hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a909774bf8b8ead0a6b26b707982f6f0737bb165
| 1,479
|
py
|
Python
|
animations/vertical/vanimation.py
|
juliendelplanque/lcddaemon
|
77fe0587fe88418aa72897c3a60eff8e7be01372
|
[
"MIT"
] | null | null | null |
animations/vertical/vanimation.py
|
juliendelplanque/lcddaemon
|
77fe0587fe88418aa72897c3a60eff8e7be01372
|
[
"MIT"
] | 21
|
2015-05-30T16:17:02.000Z
|
2015-07-29T17:30:12.000Z
|
animations/vertical/vanimation.py
|
juliendelplanque/lcddaemon
|
77fe0587fe88418aa72897c3a60eff8e7be01372
|
[
"MIT"
] | null | null | null |
#-*- coding: utf-8 -*-
import time
from animations.abstractanimation import AbstractAnimation
from animations.noanimation.noanimation import MultiLineNoAnimation
class VerticalAnimation(AbstractAnimation):
def __init__(self, driver):
# Call super class constructor.
super().__init__(driver)
# Re-use an animation already created.
self.multi_no_animation = MultiLineNoAnimation(driver)
def animate(self, message):
strings = message.contents.split('\n')
if len(strings) > self.driver.line_count():
self.display(message, strings)
else:
self.multi_no_animation.animate(message)
def display(self, strings):
raise NotImplementedError()
class TopToBottomAnimation(VerticalAnimation):
def display(self, message, strings):
time_per_frame = message.duration/len(strings)
for i in range(len(strings)):
strings_to_display = strings[i:i+self.driver.line_count()]
self.driver.clear()
self.driver.write_lines(strings_to_display)
time.sleep(time_per_frame)
class BottomToTopAnimation(VerticalAnimation):
def display(self, message, strings):
time_per_frame = message.duration/len(strings)
for i in range(len(strings), 1, -1):
strings_to_display = strings[i-2: i]
self.driver.clear()
self.driver.write_lines(strings_to_display)
time.sleep(time_per_frame)
| 36.073171
| 70
| 0.676133
| 166
| 1,479
| 5.831325
| 0.343373
| 0.072314
| 0.049587
| 0.041322
| 0.454545
| 0.36157
| 0.36157
| 0.36157
| 0.36157
| 0.36157
| 0
| 0.0035
| 0.227181
| 1,479
| 40
| 71
| 36.975
| 0.843395
| 0.0595
| 0
| 0.322581
| 0
| 0
| 0.001441
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16129
| false
| 0
| 0.096774
| 0
| 0.354839
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a90997632623c70526b57f88d63354bc898f0759
| 5,056
|
py
|
Python
|
django_backbone/healthchecks/viewsets.py
|
Jordan-Kowal/django-backbone
|
19d123adf00b3f7d22e6ef75ba6da0fe7b5e00b0
|
[
"MIT"
] | 1
|
2020-10-05T21:44:18.000Z
|
2020-10-05T21:44:18.000Z
|
django_backbone/healthchecks/viewsets.py
|
Jordan-Kowal/django-backbone
|
19d123adf00b3f7d22e6ef75ba6da0fe7b5e00b0
|
[
"MIT"
] | null | null | null |
django_backbone/healthchecks/viewsets.py
|
Jordan-Kowal/django-backbone
|
19d123adf00b3f7d22e6ef75ba6da0fe7b5e00b0
|
[
"MIT"
] | null | null | null |
"""Viewsets for the 'healthchecks' app"""
# Built-in
import logging
from enum import Enum
from functools import wraps
from secrets import token_urlsafe
# Django
from django.core.cache import cache
from django.core.exceptions import FieldError, ImproperlyConfigured, ObjectDoesNotExist
from django.db import connection
from django.db.migrations.executor import MigrationExecutor
from rest_framework.decorators import action
from rest_framework.response import Response
from rest_framework.status import HTTP_200_OK, HTTP_500_INTERNAL_SERVER_ERROR
# Personal
from jklib.django.drf.permissions import IsAdminUser
from jklib.django.drf.viewsets import ImprovedViewSet
# Local
from .models import HealthcheckDummy
# --------------------------------------------------------------------------------
# > Utilities
# --------------------------------------------------------------------------------
LOGGER = logging.getLogger("healthcheck")
class Service(Enum):
"""List of services with healthchecks"""
API = "API"
CACHE = "CACHE"
DATABASE = "DATABASE"
MIGRATIONS = "MIGRATIONS"
def error_catcher(service):
"""
Decorator for the healthchecks API endpoints
Logs the API call result, and returns a 500 if the service crashes
:param Service service: Which service is called
:return: Either the service success Response or a 500
:rtype: Response
"""
def decorator(function):
@wraps(function)
def wrapper(request, *args, **kwargs):
try:
response = function(request, *args, **kwargs)
LOGGER.info(f"Service {service.name} is OK")
return response
except Exception as error:
LOGGER.error(f"Service {service.name} is KO: {error}")
return Response(None, status=HTTP_500_INTERNAL_SERVER_ERROR)
return wrapper
return decorator
# --------------------------------------------------------------------------------
# > ViewSets
# --------------------------------------------------------------------------------
class HealthcheckViewSet(ImprovedViewSet):
"""Viewset for our various healthchecks"""
viewset_permission_classes = (IsAdminUser,)
serializer_classes = {"default": None}
@action(detail=False, methods=["get"])
@error_catcher(Service.API)
def api(self, request):
"""Checks if the API is up and running"""
return Response(None, status=HTTP_200_OK)
@action(detail=False, methods=["get"])
@error_catcher(Service.CACHE)
def cache(self, request):
"""Checks we can write/read/delete in the cache system"""
random_cache_key = token_urlsafe(30)
random_cache_value = token_urlsafe(30)
# Set value
cache.set(random_cache_key, random_cache_value)
cached_value = cache.get(random_cache_key, None)
if cached_value is None:
raise KeyError(f"Failed to set a key/value pair in the cache")
if cached_value != random_cache_value:
raise ValueError(
f"Unexpected value stored in the '{random_cache_key}' cache key"
)
# Get value
cache.delete(random_cache_value)
cached_value = cache.get(random_cache_value, None)
if cached_value is not None:
raise AttributeError(
f"Failed to properly delete the '{random_cache_key}' key in the cache"
)
return Response(None, status=HTTP_200_OK)
@action(detail=False, methods=["get"])
@error_catcher(Service.DATABASE)
def database(self, request):
"""Checks we can write/read/delete in the database"""
# Create
content = token_urlsafe(50)
instance = HealthcheckDummy.objects.create(content=content)
if instance is None:
raise LookupError("Failed to create the HealthcheckDummy instance")
# Get
fetched_instance = HealthcheckDummy.objects.get(pk=instance.id)
if fetched_instance is None:
raise ObjectDoesNotExist(
"Failed to fetch the created HealthcheckDummy instance"
)
if fetched_instance.content != content:
raise FieldError(
"Unexpected field value for the fetched HealthcheckDummy instance"
)
# Delete
HealthcheckDummy.objects.all().delete()
if HealthcheckDummy.objects.count() > 0:
raise RuntimeError(
"Failed to properly delete all HealthcheckDummy instances"
)
return Response(None, status=HTTP_200_OK)
@action(detail=False, methods=["get"])
@error_catcher(Service.MIGRATIONS)
def migrations(self, request):
"""Checks if all migrations have been applied to our database"""
executor = MigrationExecutor(connection)
plan = executor.migration_plan(executor.loader.graph.leaf_nodes())
if plan:
raise ImproperlyConfigured("There are migrations to apply")
return Response(None, status=HTTP_200_OK)
| 36.114286
| 87
| 0.625791
| 546
| 5,056
| 5.679487
| 0.302198
| 0.035472
| 0.014511
| 0.038697
| 0.208965
| 0.158659
| 0.158659
| 0.148017
| 0.133183
| 0.103515
| 0
| 0.008779
| 0.233979
| 5,056
| 139
| 88
| 36.374101
| 0.791893
| 0.187896
| 0
| 0.088889
| 0
| 0
| 0.134161
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077778
| false
| 0
| 0.155556
| 0
| 0.411111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a90a3b893c4a5282243641ddfdb4b678a42dfab0
| 595
|
py
|
Python
|
slack-notification.py
|
JSourabh/codepipeline
|
77b7fb5963199f7d235861a7aed68631d192147d
|
[
"Apache-2.0"
] | null | null | null |
slack-notification.py
|
JSourabh/codepipeline
|
77b7fb5963199f7d235861a7aed68631d192147d
|
[
"Apache-2.0"
] | null | null | null |
slack-notification.py
|
JSourabh/codepipeline
|
77b7fb5963199f7d235861a7aed68631d192147d
|
[
"Apache-2.0"
] | null | null | null |
def send_message_to_slack(text):
from urllib import request, parse
import json
post = {"text": "{0}".format(text)}
try:
json_data = json.dumps(post)
req = request.Request("https://hooks.slack.com/services/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",
data=json_data.encode('ascii'),
headers={'Content-Type': 'application/json'})
resp = request.urlopen(req)
except Exception as em:
print("EXCEPTION: " + str(em))
send_message_to_slack('Deployment has been completed.... ')
| 31.315789
| 114
| 0.608403
| 65
| 595
| 5.446154
| 0.676923
| 0.062147
| 0.073446
| 0.101695
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025114
| 0.263866
| 595
| 18
| 115
| 33.055556
| 0.783105
| 0
| 0
| 0
| 0
| 0
| 0.278992
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9110b5c0117f2af8467c6b63060a19e830d3f66
| 3,381
|
py
|
Python
|
data/image_folder.py
|
VuongTuanKhanh/Brain-MRI-GAN
|
c115b9aa92aac9efe11710df38e0312b3a508b4c
|
[
"MIT"
] | null | null | null |
data/image_folder.py
|
VuongTuanKhanh/Brain-MRI-GAN
|
c115b9aa92aac9efe11710df38e0312b3a508b4c
|
[
"MIT"
] | null | null | null |
data/image_folder.py
|
VuongTuanKhanh/Brain-MRI-GAN
|
c115b9aa92aac9efe11710df38e0312b3a508b4c
|
[
"MIT"
] | null | null | null |
"""A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import nibabel as nib
from shutil import rmtree
import numpy as np
import cv2
import os
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
images = images[:min(max_dataset_size, len(images))]
images.sort(key=lambda x: int(x.split('_')[-1].split('.')[0]))
return images
def default_loader(path):
return Image.open(path).convert('RGB')
def get_mri_images(file):
img = nib.load(file)
data = img.get_fdata()
maxx = data.max()
data = data/maxx
return data, data.shape[-1]
def save_image_data(image_folder, target_folder):
image_file_format = '{}{}_{}.nii.gz'
file_path_t1 = image_file_format.format(image_folder, image_folder.split('/')[-2], 't1')
file_path_t2 = image_file_format.format(image_folder, image_folder.split('/')[-2], 't1ce')
t1_img, _ = get_mri_images(file_path_t1)
t2_img, _ = get_mri_images(file_path_t2)
file_name = image_folder.split('/')[-2].split('_')[-1]
image_size = t1_img.shape[0]
for i in range(30, 110):
canvas = np.empty((image_size, image_size*2), np.uint8)
canvas[:, :image_size] = (t1_img[:, :, i] * 255).astype('int')
canvas[:, image_size:] = (t2_img[:, :, i] * 255).astype('int')
cv2.imwrite(target_folder + file_name + '_' + str(i) + '.jpg', canvas)
def make_test_dataset(validation_folder, save_folder):
all_images_folder = [validation_folder + f + '/' for f in os.listdir(validation_folder)][:-2]
if os.path.isdir(save_folder):
rmtree(save_folder)
os.mkdir(save_folder)
os.mkdir(save_folder + 'test/')
rand_fld = np.random.choice(all_images_folder)
save_image_data(rand_fld, save_folder + 'test/')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| 31.305556
| 122
| 0.631174
| 463
| 3,381
| 4.393089
| 0.332613
| 0.043265
| 0.017699
| 0.023599
| 0.108161
| 0.092429
| 0.048181
| 0.048181
| 0.048181
| 0.048181
| 0
| 0.014182
| 0.228335
| 3,381
| 107
| 123
| 31.598131
| 0.765427
| 0.070985
| 0
| 0
| 0
| 0
| 0.062221
| 0
| 0
| 0
| 0
| 0
| 0.012987
| 1
| 0.116883
| false
| 0
| 0.090909
| 0.038961
| 0.311688
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a91339cbacf313c26ddda2e22c112331ab363a81
| 448
|
py
|
Python
|
tests/test_powrap.py
|
awecx/powrap
|
d96763e5838d7b105a672a9dacea70e270290b22
|
[
"MIT"
] | 1
|
2021-01-03T01:54:23.000Z
|
2021-01-03T01:54:23.000Z
|
tests/test_powrap.py
|
awecx/powrap
|
d96763e5838d7b105a672a9dacea70e270290b22
|
[
"MIT"
] | null | null | null |
tests/test_powrap.py
|
awecx/powrap
|
d96763e5838d7b105a672a9dacea70e270290b22
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import pytest
from powrap import powrap
FIXTURE_DIR = Path(__file__).resolve().parent
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "bad").glob("*.po"))
def test_fail_on_bad_wrapping(po_file):
assert powrap.check_style([po_file]) == [po_file]
@pytest.mark.parametrize("po_file", (FIXTURE_DIR / "good").glob("*.po"))
def test_succees_on_good_wrapping(po_file):
assert powrap.check_style([po_file]) == []
| 24.888889
| 72
| 0.734375
| 66
| 448
| 4.621212
| 0.393939
| 0.137705
| 0.137705
| 0.15082
| 0.518033
| 0.518033
| 0.518033
| 0.27541
| 0.27541
| 0
| 0
| 0
| 0.107143
| 448
| 17
| 73
| 26.352941
| 0.7625
| 0
| 0
| 0
| 0
| 0
| 0.064732
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| false
| 0
| 0.3
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a91c4d71080e49a5432f9894381a21354fd82539
| 6,450
|
py
|
Python
|
benchmark.py
|
kwang2049/benchmarking-ann
|
8b98331181286ace0216c7079a38af337a65557d
|
[
"Apache-2.0"
] | null | null | null |
benchmark.py
|
kwang2049/benchmarking-ann
|
8b98331181286ace0216c7079a38af337a65557d
|
[
"Apache-2.0"
] | null | null | null |
benchmark.py
|
kwang2049/benchmarking-ann
|
8b98331181286ace0216c7079a38af337a65557d
|
[
"Apache-2.0"
] | null | null | null |
import json
import faiss
import pickle
import os
import tqdm
import numpy as np
import argparse
import time
from functools import wraps
parser = argparse.ArgumentParser()
parser.add_argument('--d', type=int, default=768, help='dimension size')
parser.add_argument('--buffer_size', type=int, default=50000)
parser.add_argument('--topk', type=int, default=10)
parser.add_argument('--batch_size', type=int, default=128, help='for retrieval')
parser.add_argument('--embedded_dir', type=str, default='msmarco-embedded')
parser.add_argument('--output_dir', type=str, default='msmarco-benchmarking')
parser.add_argument('--eval_string', type=str, required=True, help='e.g. pq(384, 8)')
args_cli = parser.parse_args()
path_doc_embedding = os.path.join(args_cli.embedded_dir, 'embeddings.documents.pkl')
path_query_embedding = os.path.join(args_cli.embedded_dir, 'embeddings.queries.pkl')
path_ids = os.path.join(args_cli.embedded_dir, 'ids.txt')
path_qrels = os.path.join(args_cli.embedded_dir, 'qrels.json')
print('>>> Loading query embeddings')
with open(path_query_embedding, 'rb') as f:
queries = pickle.load(f)
print('>>> Loading qrels')
with open(path_qrels, 'r') as f:
qrels = json.load(f)
print('>>> Loading document embeddings')
with open(path_doc_embedding, 'rb') as f:
xb = pickle.load(f)
print('>>> Loading ids')
with open(path_ids, 'r') as f:
ids = []
for line in f:
ids.append(line.strip())
os.makedirs(args_cli.output_dir, exist_ok=True)
def faiss_wrapper(indexing_setup_func):
@wraps(indexing_setup_func)
def wrapped_function(*args, **kwargs):
index, index_name = indexing_setup_func(*args, **kwargs)
loaded = False
index_path = os.path.join(args_cli.output_dir, index_name)
if not os.path.exists(index_path):
print(f'>>> Doing training for {index_path}')
for _ in tqdm.trange(1):
index.train(xb)
print(f'>>> Adding embeddings to {index_path}')
for start in tqdm.trange(0, len(xb), args_cli.buffer_size):
index.add(xb[start : start + args_cli.buffer_size])
faiss.write_index(index, index_path)
else:
index = faiss.read_index(index_path)
loaded = True
return index, index_name, loaded
return wrapped_function
######################## All the candidate methods ########################
@faiss_wrapper
def flat():
index = faiss.IndexFlatIP(args_cli.d)
index_name = 'flat.index'
return index, index_name
@faiss_wrapper
def flat_sq(qname):
assert qname in dir(faiss.ScalarQuantizer) # QT_fp16, QT_8bit_uniform, QT_4bit_uniform ...
index_name = f'flat-{qname}.index'
qtype = getattr(faiss.ScalarQuantizer, qname)
index = faiss.IndexScalarQuantizer(args_cli.d, qtype, faiss.METRIC_INNER_PRODUCT)
return index, index_name
@faiss_wrapper
def flat_pcq_sq(qname, d_target=args_cli.d // 2):
assert qname in dir(faiss.ScalarQuantizer) # QT_fp16, QT_8bit_uniform, QT_4bit_uniform ...
index_name = f'flat-{qname}.index'
qtype = getattr(faiss.ScalarQuantizer, qname)
index = faiss.IndexScalarQuantizer(args_cli.d, qtype, faiss.METRIC_INNER_PRODUCT)
################
index_name = index_name.replace('flat-', 'flat-pca-')
pca_matrix = faiss.PCAMatrix(args_cli.d, d_target, 0, True)
index = faiss.IndexPreTransform(pca_matrix, index)
return index, index_name
@faiss_wrapper
def flat_ivf(qname, nlist, nprobe):
assert qname in dir(faiss.ScalarQuantizer) # QT_fp16, QT_8bit_uniform, QT_4bit_uniform ...
index_name = f'flat-{qname}.index'
qtype = getattr(faiss.ScalarQuantizer, qname)
################
index_name = index_name.replace('flat-', 'flat-ivf-')
quantizer = faiss.IndexFlatIP(args_cli.d)
index = faiss.IndexIVFScalarQuantizer(quantizer, args_cli.d, nlist, qtype, faiss.METRIC_INNER_PRODUCT)
index.nprobe = nprobe
return index, index_name
@faiss_wrapper
def pq(m, nbits):
# m: How many chunks for splitting each vector
# nbits: How many clusters (2 ** nbits) for each chunked vectors
assert args_cli.d % m == 0
index_name = f'pq-{m}-{nbits}b.index'
index = faiss.IndexPQ(args_cli.d, m, nbits, faiss.METRIC_INNER_PRODUCT)
return index, index_name
@faiss_wrapper
def opq(m, nbits):
assert args_cli.d % m == 0
index_name = f'pq-{m}-{nbits}b.index'
index = faiss.IndexPQ(args_cli.d, m, nbits, faiss.METRIC_INNER_PRODUCT)
################
index_name = index_name.replace('pq-', 'opq-')
opq_matrix = faiss.OPQMatrix(args_cli.d, m)
index = faiss.IndexPreTransform(opq_matrix, index)
return index, index_name
@faiss_wrapper
def hnsw(store_n, ef_search, ef_construction):
index_name = f'hnsw-{store_n}-{ef_search}-{ef_construction}.index'
index = faiss.IndexHNSWFlat(args_cli.d, store_n, faiss.METRIC_INNER_PRODUCT)
index.hnsw.efSearch = ef_search
index.hnsw.efConstruction = ef_construction
return index, index_name
##########################################################################
def mrr(index):
mrr = 0
qids = list(qrels.keys())
print('>>> Doing retrieval')
for start in tqdm.trange(0, len(qrels), args_cli.batch_size):
qid_batch = qids[start : start + args_cli.batch_size]
qembs = np.vstack([queries[qid] for qid in qid_batch])
_, I = index.search(qembs, args_cli.topk) # (batch_size, topk)
for i in range(I.shape[0]):
for j in range(I.shape[1]):
qid = qid_batch[i]
did = ids[I[i, j]] # The ids returned by FAISS are just positions!!!
if did in qrels[qid]:
mrr += 1.0 / (j + 1)
break
return mrr / len(qrels)
results = {}
results['batch size'] = args_cli.batch_size
results['eval_string'] = args_cli.eval_string
start = time.time()
index, index_name, loaded = eval(args_cli.eval_string)
end = time.time()
results['indexing (s)'] = end - start
if loaded:
results['indexing (s)'] = None
results['size (GB)'] = os.path.getsize(os.path.join(args_cli.output_dir, index_name)) / 1024 ** 3
start = time.time()
_mrr = mrr(index)
end = time.time()
results['retrieval (s)'] = end - start
results['per query (s)'] = (end - start) / len(qrels)
results['mrr'] = _mrr
with open(os.path.join(args_cli.output_dir, f'results-{index_name}.json'), 'w') as f:
json.dump(results, f, indent=4)
| 36.647727
| 106
| 0.666667
| 912
| 6,450
| 4.52193
| 0.218202
| 0.050921
| 0.025218
| 0.038797
| 0.394762
| 0.365179
| 0.343113
| 0.294374
| 0.275461
| 0.189622
| 0
| 0.009256
| 0.179225
| 6,450
| 175
| 107
| 36.857143
| 0.769739
| 0.052713
| 0
| 0.243056
| 0
| 0
| 0.118381
| 0.027487
| 0
| 0
| 0
| 0
| 0.034722
| 1
| 0.069444
| false
| 0
| 0.0625
| 0
| 0.201389
| 0.048611
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9243210b5050d1609507b8edecbe58a2ea18c39
| 2,243
|
py
|
Python
|
meta/management/commands/bulkcreateusers.py
|
mepsd/CLAC
|
ee15111e9ad12e51fe349d3339319e30b3b69d9e
|
[
"CC0-1.0"
] | 126
|
2015-03-24T17:37:33.000Z
|
2022-03-29T18:37:39.000Z
|
meta/management/commands/bulkcreateusers.py
|
mepsd/CLAC
|
ee15111e9ad12e51fe349d3339319e30b3b69d9e
|
[
"CC0-1.0"
] | 1,815
|
2015-03-16T21:01:30.000Z
|
2019-09-09T18:47:29.000Z
|
meta/management/commands/bulkcreateusers.py
|
mepsd/CLAC
|
ee15111e9ad12e51fe349d3339319e30b3b69d9e
|
[
"CC0-1.0"
] | 69
|
2015-03-27T23:44:26.000Z
|
2021-02-14T09:45:28.000Z
|
import djclick as click
from django.contrib.auth.models import User, Group
from django.db import transaction
from django.core.management.base import CommandError
class DryRunFinished(Exception):
pass
def get_or_create_users(email_addresses):
users = []
for email in email_addresses:
if not email:
continue
try:
user = User.objects.get(email=email)
except User.DoesNotExist:
user = User.objects.create_user(
username=email.split('@')[0],
email=email
)
users.append(user)
return users
def add_users_to_group(group, users):
for u in users:
group.user_set.add(u)
group.save()
@click.command()
@click.argument('user_file', type=click.File('r'))
@click.option('--group', 'groupname', type=click.STRING,
help='Name of group to which all users should be added')
@click.option('--dryrun', default=False, is_flag=True,
help='If set, no changes will be made to the database')
def command(user_file, groupname, dryrun):
'''
Bulk creates users from email addresses in the the specified text file,
which should contain one email address per line.
If the optional "--group <GROUPNAME>" argument is specified, then all the
users (either found or created) are added to the matching group.
'''
if dryrun:
click.echo('Starting dry run (no database records will be modified).')
if groupname:
try:
group = Group.objects.get(name=groupname)
except Group.DoesNotExist:
raise CommandError(
'"{}" group does not exist. Exiting.'.format(groupname))
email_addresses = [s.strip() for s in user_file.readlines()]
try:
with transaction.atomic():
users = get_or_create_users(email_addresses)
click.echo(
'Created (or found) {} user accounts.'.format(len(users)))
if group:
add_users_to_group(group, users)
click.echo('Added users to "{}" group.'.format(groupname))
if dryrun:
raise DryRunFinished()
except DryRunFinished:
click.echo("Dry run complete.")
| 31.591549
| 78
| 0.623718
| 276
| 2,243
| 4.98913
| 0.398551
| 0.050835
| 0.026144
| 0.023239
| 0.079884
| 0.079884
| 0
| 0
| 0
| 0
| 0
| 0.000616
| 0.27597
| 2,243
| 70
| 79
| 32.042857
| 0.847291
| 0.11547
| 0
| 0.096154
| 0
| 0
| 0.153374
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0.019231
| 0.076923
| 0
| 0.173077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a92832dc73ab475a159970bd2c9e45e28feec5c7
| 544
|
py
|
Python
|
args.py
|
Pragyanstha/ImageRecognition
|
94626185b24ef1406896c81f5a583a5e1898ae29
|
[
"MIT"
] | null | null | null |
args.py
|
Pragyanstha/ImageRecognition
|
94626185b24ef1406896c81f5a583a5e1898ae29
|
[
"MIT"
] | null | null | null |
args.py
|
Pragyanstha/ImageRecognition
|
94626185b24ef1406896c81f5a583a5e1898ae29
|
[
"MIT"
] | null | null | null |
import configargparse
def parse(commands = None):
p = configargparse.ArgParser()
p.add('-c', '--config', is_config_file = True)
p.add('--mode', default='test', type=str)
p.add('--num_cosines', type = int)
p.add('--dim_subspace', type = int)
p.add('--dim_diffspace', type = int)
p.add('--method', type = str)
p.add('--expname', type=str)
p.add('--sigma', type=float)
p.add('--kernel', type=str)
if commands:
opt = p.parse_args(commands)
else:
opt = p.parse_args()
return opt
| 27.2
| 50
| 0.582721
| 76
| 544
| 4.078947
| 0.460526
| 0.116129
| 0.077419
| 0.106452
| 0.090323
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.220588
| 544
| 20
| 51
| 27.2
| 0.731132
| 0
| 0
| 0
| 0
| 0
| 0.172477
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a92ab375e3c0927ad6ddd2ae99cba78e73a59cc7
| 3,767
|
py
|
Python
|
mmhuman3d/data/data_structures/human_data_cache.py
|
ykk648/mmhuman3d
|
26af92bcf6abbe1855e1a8a48308621410f9c047
|
[
"Apache-2.0"
] | 472
|
2021-12-03T03:12:55.000Z
|
2022-03-31T01:33:13.000Z
|
mmhuman3d/data/data_structures/human_data_cache.py
|
ykk648/mmhuman3d
|
26af92bcf6abbe1855e1a8a48308621410f9c047
|
[
"Apache-2.0"
] | 127
|
2021-12-03T05:00:14.000Z
|
2022-03-31T13:47:33.000Z
|
mmhuman3d/data/data_structures/human_data_cache.py
|
ykk648/mmhuman3d
|
26af92bcf6abbe1855e1a8a48308621410f9c047
|
[
"Apache-2.0"
] | 37
|
2021-12-03T03:23:22.000Z
|
2022-03-31T08:41:58.000Z
|
from typing import List
import numpy as np
from mmhuman3d.utils.path_utils import (
Existence,
check_path_existence,
check_path_suffix,
)
from .human_data import HumanData
class HumanDataCacheReader():
def __init__(self, npz_path: str):
self.npz_path = npz_path
npz_file = np.load(npz_path, allow_pickle=True)
self.slice_size = npz_file['slice_size'].item()
self.data_len = npz_file['data_len'].item()
self.keypoints_info = npz_file['keypoints_info'].item()
self.non_sliced_data = None
self.npz_file = None
def __del__(self):
if self.npz_file is not None:
self.npz_file.close()
def get_item(self, index, required_keys: List[str] = []):
if self.npz_file is None:
self.npz_file = np.load(self.npz_path, allow_pickle=True)
cache_key = str(int(index / self.slice_size))
base_data = self.npz_file[cache_key].item()
base_data.update(self.keypoints_info)
for key in required_keys:
non_sliced_value = self.get_non_sliced_data(key)
if isinstance(non_sliced_value, dict) and\
key in base_data and\
isinstance(base_data[key], dict):
base_data[key].update(non_sliced_value)
else:
base_data[key] = non_sliced_value
ret_human_data = HumanData.new(source_dict=base_data)
# data in cache is compressed
ret_human_data.__keypoints_compressed__ = True
# set missing values and attributes by default method
ret_human_data.__set_default_values__()
return ret_human_data
def get_non_sliced_data(self, key: str):
if self.non_sliced_data is None:
if self.npz_file is None:
npz_file = np.load(self.npz_path, allow_pickle=True)
self.non_sliced_data = npz_file['non_sliced_data'].item()
else:
self.non_sliced_data = self.npz_file['non_sliced_data'].item()
return self.non_sliced_data[key]
class HumanDataCacheWriter():
def __init__(self,
slice_size: int,
data_len: int,
keypoints_info: dict,
non_sliced_data: dict,
key_strict: bool = True):
self.slice_size = slice_size
self.data_len = data_len
self.keypoints_info = keypoints_info
self.non_sliced_data = non_sliced_data
self.sliced_data = {}
self.key_strict = key_strict
def update_sliced_dict(self, sliced_dict):
self.sliced_data.update(sliced_dict)
def dump(self, npz_path: str, overwrite: bool = True):
"""Dump keys and items to an npz file.
Args:
npz_path (str):
Path to a dumped npz file.
overwrite (bool, optional):
Whether to overwrite if there is already a file.
Defaults to True.
Raises:
ValueError:
npz_path does not end with '.npz'.
FileExistsError:
When overwrite is False and file exists.
"""
if not check_path_suffix(npz_path, ['.npz']):
raise ValueError('Not an npz file.')
if not overwrite:
if check_path_existence(npz_path, 'file') == Existence.FileExist:
raise FileExistsError
dict_to_dump = {
'slice_size': self.slice_size,
'data_len': self.data_len,
'keypoints_info': self.keypoints_info,
'non_sliced_data': self.non_sliced_data,
'key_strict': self.key_strict,
}
dict_to_dump.update(self.sliced_data)
np.savez_compressed(npz_path, **dict_to_dump)
| 35.205607
| 78
| 0.609238
| 482
| 3,767
| 4.423237
| 0.20332
| 0.075985
| 0.085366
| 0.055816
| 0.116792
| 0.090994
| 0.036585
| 0.036585
| 0.036585
| 0.036585
| 0
| 0.000383
| 0.307406
| 3,767
| 106
| 79
| 35.537736
| 0.816788
| 0.114415
| 0
| 0.052632
| 0
| 0
| 0.044245
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.092105
| false
| 0
| 0.052632
| 0
| 0.197368
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a92aefa88c9e1677341b516fd325e4f5ca942f90
| 10,174
|
py
|
Python
|
code/instance_tests.py
|
ahillbs/minimum_scan_cover
|
e41718e5a8e0e3039d161800da70e56bd50a1b97
|
[
"MIT"
] | null | null | null |
code/instance_tests.py
|
ahillbs/minimum_scan_cover
|
e41718e5a8e0e3039d161800da70e56bd50a1b97
|
[
"MIT"
] | null | null | null |
code/instance_tests.py
|
ahillbs/minimum_scan_cover
|
e41718e5a8e0e3039d161800da70e56bd50a1b97
|
[
"MIT"
] | null | null | null |
from typing import List
import math
import configargparse
import numpy as np
import sqlalchemy
import yaml
import sys
import tqdm
from IPython import embed
from celery import group
from angular_solver import solve, bulk_solve
from database import Config, ConfigHolder, Graph, Task, TaskJobs, get_session
from solver import ALL_SOLVER
from utils import is_debug_env
class OnMessageCB:
def __init__(self, progressbar: tqdm.tqdm) -> None:
super().__init__()
self.progressbar = progressbar
def __call__(self, body: dict) -> None:
if body["status"] in ['SUCCESS', 'FAILURE']:
if body["status"] == 'FAILURE':
print("Found an error:", body)
try:
self.progressbar.update()
except AttributeError:
pass
def _load_config():
parser = configargparse.ArgumentParser(description="Parser for the solver tests")
parser.add_argument(
'--config',
type=str,
help='Path to config file',
is_config_file_arg=True)
parser.add_argument('--create-only', action="store_true", help="Only creates task and jobs; not process them")
parser.add_argument('--solvers', required=True, type=str, nargs='+', help="Name of the solvers that shall be used")
parser.add_argument('--solvers-args', nargs='+', type=yaml.safe_load, help="Arguments for solver instatiation")
parser.add_argument('--url-path', type=str, help="Path to database")
parser.add_argument('--min-n', type=int, default=5, help="Minimal amount of vertices a graph can have")
parser.add_argument('--max-n', type=int, default=None, help="Maximal amount of vertices a graph can have")
parser.add_argument('--min-m', type=int, default=0, help="Minimal amount of edges a graph can have")
parser.add_argument('--max-m', type=int, default=sys.maxsize, help="Maximal amount of vertices a graph can have")
parser.add_argument('--instance-types', type=str, nargs="*", default=[], help="Types of instances you want to select. Default will be all instance types")
parser.add_argument('--task-id', type=int, default=None, help="Only select instances belonging to a specific task. Default will select from all tasks.")
parser.add_argument('--max-amount', type=int, help="Maximum amount of instances that will be tested")
parser.add_argument('--repetitions', type=int, default=1, help="Amount of repetitions for every test for every solver")
parser.add_argument('--slice-size', type=str, default="auto", help="Slice sizes for bulk solves if needed (Default: auto)")
parser.add_argument('--manual-query', action="store_true", help="Instead of standard query arguments, open ipython to construct custom query")
parser.add_argument('--name', type=str, default="Main_instance_test", help="Describing name for the task")
parser.add_argument('--with-start-sol', action="store_true", default=False, help="NEED: Preious solution from task-id instances! Starts solving with start solution")
parsed = parser.parse_args()
return parsed
def _create_task(arg_config, session):
solvers = arg_config.solvers
solvers_args = arg_config.solvers_args
assert len(solvers) == len(solvers_args),\
"The amount of solver arguments must match the amount of solvers"
for solver in solvers:
assert solver in ALL_SOLVER,\
f"Solver {solver} not found! Please make sure that all solver are properly named."
task = Task(task_type="instance_test", status=Task.STATUS_OPTIONS.CREATED, name=arg_config.name)
config = ConfigHolder.fromNamespace(arg_config, task=task,
ignored_attributes=["url_path", "solvers", "solvers_args", "create_only", "config", "name"])
jobs = _get_instances(task, config, session)
for solver, solver_args in zip(solvers, solvers_args):
subtask = Task(parent=task, name=f"{solver}_test", task_type="instance_test", status=Task.STATUS_OPTIONS.CREATED)
task.children.append(subtask)
subconfig_namespace = configargparse.Namespace(solver=solver,
solver_args=solver_args)
subconfig = ConfigHolder.fromNamespace(subconfig_namespace, task=subtask)
add_prev_job = (subconfig.with_start_sol is not None and subconfig.with_start_sol)
if isinstance(jobs[0], TaskJobs):
for task_job in jobs:
prev_job = task_job if add_prev_job else None
for i in range(config.repetitions):
subtask.jobs.append(TaskJobs(task=subtask, graph=task_job.graph, prev_job=prev_job))
else:
for graph in jobs:
for i in range(config.repetitions):
subtask.jobs.append(TaskJobs(task=subtask, graph=graph))
session.add(task)
session.commit()
return task, config
def _get_instances(task, config: ConfigHolder, session: sqlalchemy.orm.Session):
if config.manual_query:
query = None
print("Manual query chosen. Please fill a query. After finishing the query just end ipython.\n\
Query result must be of type Graph or TaskJobs!")
embed()
assert query is not None, "query must be filled!"
session.add(Config(task=task, value=query.statement(), param="statement"))
return query.all()
if config.task_id is None:
query = session.query(Graph)
else:
query = session.query(TaskJobs).join(Graph).filter(TaskJobs.task_id == config.task_id)
if config.min_n is not None:
query = query.filter(Graph.vert_amount >= config.min_n)
if config.max_n is not None:
query = query.filter(Graph.vert_amount <= config.max_n)
if config.min_m is not None:
query = query.filter(Graph.edge_amount >= config.min_m)
if config.max_m is not None:
query = query.filter(Graph.edge_amount <= config.max_m)
if config.instance_types:
query = query.filter(Graph.i_type.in_(config.instance_types))
if config.max_amount is not None:
query = query[:config.max_amount]
return query[:]
def process_task(config: ConfigHolder, task: Task, session: sqlalchemy.orm.Session):
try:
task.status = Task.STATUS_OPTIONS.PROCESSING
session.commit()
for subtask in tqdm.tqdm(task.children, desc=f"Task {task.id}: Processing subtasks"):
if subtask.status not in [Task.STATUS_OPTIONS.ERROR, Task.STATUS_OPTIONS.INTERRUPTED, Task.STATUS_OPTIONS.FINISHED]:
subconfig = ConfigHolder(subtask)
if config.local:
subconfig.local = True
process_task(subconfig, subtask, session)
to_process = [job for job in task.jobs if job.solution is None]
process_jobs(to_process, config, session)
task.status = Task.STATUS_OPTIONS.FINISHED
except Exception as e:
print(e)
to_process = [job for job in task.jobs if job.solution is None]
if str(e).lower() != "Backend does not support on_message callback".lower() and to_process:
task.status = Task.STATUS_OPTIONS.ERROR
task.error_message = str(e)
if is_debug_env():
raise e
else:
task.status = Task.STATUS_OPTIONS.FINISHED
finally:
session.commit()
def _get_slicing(unsolved, slicing):
if slicing == 'auto':
slice_size = 16
slice_amount = math.ceil(len(unsolved) / 5)
else:
slice_size = slicing
slice_amount = math.ceil(len(unsolved) / slice_size)
return slice_size, slice_amount
def process_jobs(jobs: List[TaskJobs], config: ConfigHolder, session: sqlalchemy.orm.Session):
if not jobs:
return
processbar = tqdm.tqdm(total=len(jobs), desc=f"Task {jobs[0].task_id}: Process jobs")
on_message = OnMessageCB(progressbar=processbar)
# ToDo: To speed up solving time, maybe use bulksolve
slice_size, slice_amount = _get_slicing(jobs, config.slice_size)
slices = [(i*slice_size, (i+1)*slice_size) for i in range(slice_amount-1)]
if slice_amount > 0:
slices.append(tuple([(slice_amount-1)*slice_size, len(jobs)]))
solver_args = config.solver_args
if "time_limit" in solver_args:
time_limit = solver_args["time_limit"]
else:
time_limit = 900
if hasattr(config, "local") and config.local:
for job in jobs:
sol = solve(
job.graph,
config.solver,
solver_config=config.solver_args,
solve_config={
"start_solution":(None if job.prev_job is None else job.prev_job.solution.order),
"time_limit":(time_limit if job.prev_job is None else time_limit - job.prev_job.solution.runtime)
}
)
job.solution = sol
processbar.update()
session.commit()
else:
for start, end in slices:
results = group(solve.s(
job.graph,
config.solver,
solver_config=config.solver_args,
solve_config={
"start_solution":(None if job.prev_job is None else job.prev_job.solution.order),
"time_limit":(time_limit if job.prev_job is None else time_limit - job.prev_job.solution.runtime)
}
)
for job in jobs[start:end])().get(on_message=on_message)
for job, result in zip(jobs[start:end], results):
result.graph = job.graph
if job.prev_job is not None:
result.runtime = float(result.runtime) + float(job.prev_job.solution.runtime)
job.solution = result
if session:
session.commit()
def main():
parsed_config = _load_config()
session = get_session(parsed_config.url_path)
task, config = _create_task(parsed_config, session)
if not parsed_config.create_only:
process_task(config, task, session)
if __name__ == "__main__":
main()
| 47.32093
| 169
| 0.650285
| 1,323
| 10,174
| 4.842026
| 0.187453
| 0.023884
| 0.045114
| 0.021542
| 0.245551
| 0.214955
| 0.194661
| 0.172651
| 0.167031
| 0.151421
| 0
| 0.001959
| 0.247395
| 10,174
| 215
| 170
| 47.32093
| 0.834661
| 0.005013
| 0
| 0.158163
| 0
| 0
| 0.15659
| 0
| 0
| 0
| 0
| 0.004651
| 0.015306
| 1
| 0.045918
| false
| 0.005102
| 0.071429
| 0
| 0.153061
| 0.015306
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a92f10539fc879fc6144252e415f0ed5e662c25e
| 7,679
|
py
|
Python
|
frads/room.py
|
LBNL-ETA/frads
|
dbd9980c7cfebd363089180d8fb1b7107e73ec92
|
[
"BSD-3-Clause-LBNL"
] | 8
|
2019-11-13T22:26:45.000Z
|
2022-03-23T15:30:37.000Z
|
frads/room.py
|
LBNL-ETA/frads
|
dbd9980c7cfebd363089180d8fb1b7107e73ec92
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
frads/room.py
|
LBNL-ETA/frads
|
dbd9980c7cfebd363089180d8fb1b7107e73ec92
|
[
"BSD-3-Clause-LBNL"
] | 2
|
2021-08-10T18:22:04.000Z
|
2021-08-30T23:16:27.000Z
|
"""Generic room model"""
import argparse
import os
from frads import radgeom
from frads import radutil, util
class Room(object):
"""Make a shoebox."""
def __init__(self, width, depth, height, origin=radgeom.Vector()):
self.width = width
self.depth = depth
self.height = height
self.origin = origin
flr_pt2 = origin + radgeom.Vector(width, 0, 0)
flr_pt3 = flr_pt2 + radgeom.Vector(0, depth, 0)
self.floor = radgeom.Polygon.rectangle3pts(origin, flr_pt2, flr_pt3)
extrusion = self.floor.extrude(radgeom.Vector(0, 0, height))
self.clng = extrusion[1]
self.wall_south = Surface(extrusion[2], 'wall.south')
self.wall_east = Surface(extrusion[3], 'wall.east')
self.wall_north = Surface(extrusion[4], 'wall.north')
self.wall_west = Surface(extrusion[5], 'wall.west')
self.surfaces = [
self.clng, self.floor, self.wall_west, self.wall_north, self.wall_east,
self.wall_south
]
def surface_prim(self):
self.srf_prims = []
ceiling = radutil.Primitive(
'white_paint_70', 'polygon', 'ceiling', '0', self.clng.to_real())
self.srf_prims.append(ceiling)
floor = radutil.Primitive(
'carpet_20', 'polygon', 'floor', '0', self.floor.to_real())
self.srf_prims.append(floor)
nwall = radutil.Primitive(
'white_paint_50', 'polygon', self.wall_north.name,
'0', self.wall_north.polygon.to_real())
self.srf_prims.append(nwall)
ewall = radutil.Primitive('white_paint_50', 'polygon', self.wall_east.name,
'0', self.wall_east.polygon.to_real())
self.srf_prims.append(ewall)
wwall = radutil.Primitive('white_paint_50', 'polygon', self.wall_west.name,
'0', self.wall_west.polygon.to_real())
self.srf_prims.append(wwall)
# Windows on south wall only, for now.
for idx, swall in enumerate(self.wall_south.facade):
_identifier = '{}.{:02d}'.format(self.wall_south.name, idx)
_id = radutil.Primitive(
'white_paint_50', 'polygon', _identifier, '0', swall.to_real())
self.srf_prims.append(_id)
def window_prim(self):
self.wndw_prims = {}
for wpolygon in self.wall_south.windows:
_real_args = self.wall_south.windows[wpolygon].to_real()
win_prim = radutil.Primitive('glass_60', 'polygon', wpolygon, '0', _real_args)
self.wndw_prims[wpolygon] = win_prim
class Surface(object):
"""Room wall object."""
def __init__(self, polygon, name):
self.centroid = polygon.centroid()
self.polygon = polygon
self.vertices = polygon.vertices
self.vect1 = (self.vertices[1] - self.vertices[0]).normalize()
self.vect2 = (self.vertices[2] - self.vertices[1]).normalize()
self.name = name
self.windows = {}
def make_window(self, dist_left, dist_bot, width, height, wwr=None):
if wwr is not None:
assert type(wwr) == float, 'WWR must be float'
win_polygon = self.polygon.scale(radgeom.Vector(*[wwr] * 3),
self.centroid)
else:
win_pt1 = self.vertices[0]\
+ self.vect1.scale(dist_bot)\
+ self.vect2.scale(dist_left)
win_pt2 = win_pt1 + self.vect1.scale(height)
win_pt3 = win_pt1 + self.vect2.scale(width)
win_polygon = radgeom.Polygon.rectangle3pts(win_pt3, win_pt1, win_pt2)
return win_polygon
def add_window(self, name, window_polygon):
self.polygon = self.polygon - window_polygon
self.windows[name] = window_polygon
def facadize(self, thickness):
direction = self.polygon.normal().scale(thickness)
if thickness > 0:
self.facade = self.polygon.extrude(direction)[:2]
[self.facade.extend(self.windows[wname].extrude(direction)[2:])
for wname in self.windows]
uniq = []
uniq = self.facade.copy()
for idx in range(len(self.facade)):
for re in self.facade[:idx]+self.facade[idx+1:]:
if set(self.facade[idx].to_list()) == set(re.to_list()):
uniq.remove(re)
self.facade = uniq
else:
self.facade = [self.polygon]
offset_wndw = {}
for wndw in self.windows:
offset_wndw[wndw] = radgeom.Polygon(
[v + direction for v in self.windows[wndw].vertices])
self.windows = offset_wndw
def make_room(dimension: dict):
"""Make a side-lit shoebox room as a Room object."""
theroom = Room(float(dimension['width']),
float(dimension['depth']),
float(dimension['height']))
wndw_names = [i for i in dimension if i.startswith('window')]
for wd in wndw_names:
wdim = map(float, dimension[wd].split())
theroom.wall_south.add_window(wd, theroom.wall_south.make_window(*wdim))
theroom.wall_south.facadize(float(dimension['facade_thickness']))
theroom.surface_prim()
theroom.window_prim()
return theroom
def genradroom():
"""Commandline interface for generating a generic room.
Resulting Radiance .rad files will be written to a local
Objects directory, which will be created if not existed before."""
parser = argparse.ArgumentParser(
prog='genradroom', description='Generate a generic room')
parser.add_argument('width', type=float,
help='room width along X axis, starting from x=0')
parser.add_argument('depth', type=float,
help='room depth along Y axis, starting from y=0')
parser.add_argument('height', type=float,
help='room height along Z axis, starting from z=0')
parser.add_argument('-w', dest='window',
metavar=('start_x', 'start_z', 'width', 'height'),
nargs=4, action='append', type=float,
help='Define a window from lower left corner')
parser.add_argument('-n', dest='name', help='Model name', default='model')
parser.add_argument('-t', dest='facade_thickness',
metavar='Facade thickness', type=float)
args = parser.parse_args()
dims = vars(args)
for idx, window in enumerate(dims['window']):
dims['window_%s' % idx] = ' '.join(map(str, window))
dims.pop('window')
room = make_room(dims)
name = args.name
material_primitives = radutil.material_lib()
util.mkdir_p('Objects')
with open(os.path.join('Objects', f'materials_{name}.mat'), 'w') as wtr:
for prim in material_primitives:
wtr.write(str(prim)+'\n')
with open(os.path.join('Objects', f'ceiling_{name}.rad'), 'w') as wtr:
for prim in room.srf_prims:
if prim.identifier.startswith('ceiling'):
wtr.write(str(prim)+'\n')
with open(os.path.join('Objects', f'floor_{name}.rad'), 'w') as wtr:
for prim in room.srf_prims:
if prim.identifier.startswith('floor'):
wtr.write(str(prim)+'\n')
with open(os.path.join('Objects', f'wall_{name}.rad'), 'w') as wtr:
for prim in room.srf_prims:
if prim.identifier.startswith('wall'):
wtr.write(str(prim)+'\n')
for key, prim in room.wndw_prims.items():
with open(os.path.join('Objects', f'{key}_{name}.rad'), 'w') as wtr:
wtr.write(str(prim)+'\n')
| 42.192308
| 90
| 0.594218
| 963
| 7,679
| 4.601246
| 0.208723
| 0.032498
| 0.018957
| 0.017603
| 0.168585
| 0.158429
| 0.130896
| 0.098172
| 0.069059
| 0.069059
| 0
| 0.012357
| 0.272822
| 7,679
| 181
| 91
| 42.425414
| 0.78116
| 0.0405
| 0
| 0.065789
| 0
| 0
| 0.101582
| 0
| 0
| 0
| 0
| 0
| 0.006579
| 1
| 0.059211
| false
| 0
| 0.026316
| 0
| 0.111842
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a931b982d8994d2cd901db4713baba148c9468eb
| 10,825
|
py
|
Python
|
src/mcsdk/git/client.py
|
Stick97/mcsdk-automation-framework-core
|
5c7cc798fd4e0d54dfb3e0b900a828db4a72034e
|
[
"BSD-3-Clause"
] | 9
|
2019-11-03T10:15:06.000Z
|
2022-02-26T06:16:10.000Z
|
src/mcsdk/git/client.py
|
Stick97/mcsdk-automation-framework-core
|
5c7cc798fd4e0d54dfb3e0b900a828db4a72034e
|
[
"BSD-3-Clause"
] | 2
|
2020-07-08T18:23:02.000Z
|
2022-01-17T17:31:18.000Z
|
src/mcsdk/git/client.py
|
Stick97/mcsdk-automation-framework-core
|
5c7cc798fd4e0d54dfb3e0b900a828db4a72034e
|
[
"BSD-3-Clause"
] | 5
|
2020-07-06T16:28:15.000Z
|
2022-02-22T00:51:48.000Z
|
import os
import requests
from requests.auth import HTTPBasicAuth
from ..integration.os.process import Command
from ..integration.os.utils import chdir
class RepoClient:
""" The class handles the GIT processes """
def __init__(self, root_dir, token, owner, repo, repo_dir):
""" Git class constructor """
self.__root_dir = root_dir
self.__github_token = token
self.__repo_owner = owner
self.__repo_name = repo
self.__repo_dir = repo_dir
self.__repo_remote = 'origin' # TODO: Make optional parameter
def clone(self):
""" Executes a git clone command on the target repository """
chdir(self.__root_dir)
# logging the working directory for debug
print('----- Repo clone: -----')
if os.path.isdir(self.__repo_dir) and os.path.isdir(os.path.join(self.__repo_dir, '.git')):
print('Repository {repo_name} is already cloned'.format(repo_name=self.__repo_name) + '\n')
return 0
# Command to clone the repo
cmd = 'git clone https://{owner}:{token}@github.com/{owner}/{repo}.git {repo_folder}'.format(
owner=self.__repo_owner,
token=self.__github_token,
repo=self.__repo_name,
repo_folder=self.__repo_dir
)
command = Command(cmd)
command.run()
if command.returned_errors():
print('Error: ' + command.get_output())
return 255
print('Cloned repo {repo} to directory {dir}'.format(repo=self.__repo_name, dir=self.__repo_dir))
return 0
def __get_branches(self):
""" Returns a list of current branches """
if not os.path.isdir(self.__repo_dir):
return ''
chdir(self.__repo_dir) # Go to repo dir
command = Command('git branch --all', False)
command.run()
chdir(self.__root_dir) # Go to root dir
branches = command.get_output()
branches = branches.split('\n')
print("List of branches: " + '\n'.join(branches))
return branches
def branch_exists(self, branch):
""" Checks if the branch exists """
if not len(branch):
raise ValueError('Branch name not provided')
# Logging
print('Searching for branch: ' + branch)
lines = self.__get_branches()
for line in lines:
line = line.strip()
for variant in ['* ' + branch, branch, 'remotes/' + self.__repo_remote + '/' + branch]:
if len(line) == len(variant) and line == variant:
print('Branch found!')
return True
print('Branch not found!')
return False
def branch_current(self):
""" Returns the current branch """
# Logging
print('Getting the current branch')
lines = self.__get_branches()
for line in lines:
if line.find('*') == 0:
return line.lstrip('* ')
raise RuntimeError("Could not determine current branch")
def branch_delete(self, branch):
""" Runs the branch delete command branch """
if not len(branch):
raise ValueError('Branch name not provided')
# Logging
print('Deleting branch `{branch}`'.format(branch=branch))
self.checkout('master')
chdir(self.__repo_dir) # The checkout above changes the directory
# Local delete
command = Command('git branch -D {branch}'.format(branch=branch))
command.run()
print("Branch delete (local): " + command.get_output())
if not command.returned_errors():
# Remote delete
command = Command('git push origin --delete {branch}'.format(branch=branch))
command.run()
print("Branch delete (remote): " + command.get_output())
chdir(self.__root_dir) # Get back to previous directory
return command.returned_errors()
def fetch(self):
""" Runs the fetch command branch """
chdir(self.__repo_dir)
command = Command('git fetch --all')
command.run()
print("GIT fetch: " + command.get_output())
chdir(self.__root_dir) # Get back to previous directory
return command.returned_errors()
def push(self, remote, branch, new=False):
""" Executes a git push command of the given branch """
if not len(branch):
raise ValueError('Branch name not provided')
chdir(self.__repo_dir)
# logging the working directory for debug
print('----- Branch push: -----')
print('Repo name: ' + self.__repo_name)
print('Push to Branch: ' + branch)
# Command spec
cmd = 'git push {remote} {branch}'.format(remote=remote, branch=branch)
if new:
cmd = 'git push -u {remote} {branch}'.format(remote=remote, branch=branch)
# Command to push to the repo
command = Command(cmd)
command.run()
chdir(self.__root_dir) # Get back to previous directory
if command.returned_errors():
print('Could not create a new branch {branch}: '.format(branch=branch) + command.get_output())
return 255
print('Branch {branch} has been pushed to {remote}'.format(remote=remote, branch=branch))
return 0
def checkout(self, branch, force=False, auto_create=False):
""" Executes a git checkout command of the given branch """
# logging the working directory for debug
print('----- Branch checkout: -----')
print('Repo name: ' + self.__repo_name)
print('Checkout branch: ' + branch)
current_branch = self.branch_current()
if branch == current_branch:
print('Already on branch `{branch}`'.format(branch=branch))
return 0
branch_exists = self.branch_exists(branch)
if not auto_create and not branch_exists:
print('Branch does not exist and will not be created')
return 255
chdir(self.__repo_dir)
# Command spec
cmd = 'git checkout{flag}{branch}'.format(
flag=' -b ' if auto_create and not branch_exists else ' -f ' if force else ' ',
branch=branch
)
# Command to checkout the repo
command = Command(cmd)
command.run()
if command.returned_errors():
if command.get_output().find('did not match any file(s) known to git') != -1:
print('Branch does not exist. Trying to create it...\n')
self.checkout(branch, False, True) # Creating the branch
else:
print('Unknown error occurred')
print(command.get_output())
return 255
else:
print(command.get_output())
print('Working branch: {branch}'.format(branch=self.branch_current()) + '\n')
chdir(self.__root_dir) # Get back to previous directory
return 0
def stage_changes(self):
""" Executes a git add command on the working branch """
chdir(self.__repo_dir)
# logging the working directory for debug
print('----- Stage changes: -----')
# Command to checkout the repo
command = Command('git add --all')
command.run()
chdir(self.__root_dir) # Get back to previous directory
if command.returned_errors():
print('Could not stage changes: ' + command.get_output())
return 255
else:
print('Staged all the changes')
print(command.get_output())
return 0
def commit(self, message):
""" Executes a git commit on the working branch """
chdir(self.__repo_dir)
# logging the working directory for debug
print('----- Committing changes: -----')
# Command to checkout the repo
command = Command('git commit -m {message}'.format(message=message))
command.run()
chdir(self.__root_dir) # Get back to previous directory
if command.returned_errors():
print('Could not commit changes: ' + command.get_output())
return 255
else:
print('Commit OK')
output = command.get_output()
if output.find('nothing to commit, working tree clean') != -1:
print('There are no changes on the code, so the branch will not be pushed!')
print(output)
# No longer return error when no changes are detected
return -1
return 0
def make_pull_request(self, base_branch, head_branch, title="Automated release"):
""" The method creates a PR on the target repository """
# logging the working directory for debug
print('----- Creating a pull request: -----')
headers = {
"Accept": "application/vnd.github.v3+json",
"Content-type": "application/json"
}
# Added the version to the title to make it easier to see
title += ' ' + base_branch
# Check if a PR is already present in the target branch
response = requests.get(
"https://api.github.com/repos/{owner}/{repo}/pulls".format(owner=self.__repo_owner, repo=self.__repo_name),
auth=HTTPBasicAuth(self.__repo_owner, self.__github_token),
headers=headers,
params={"head": "{owner}:{head_branch}".format(owner=self.__repo_owner, head_branch=head_branch)}
)
if response.status_code != 200:
print("Error response: " + response.content.decode("utf-8"))
return response.status_code
# Check if we have PR's open for the branch
pull_requests = response.json()
if len(pull_requests) > 0:
for pr in pull_requests:
if pr.get("title") == title:
print("Automated PR already exists")
return 0
# Creating the pull request
body = {
"title": title,
"body": "This release was done because the API spec may have changed",
"head": "{owner}:{head_branch}".format(owner=self.__repo_owner, head_branch=head_branch),
"base": base_branch
}
response = requests.post(
"https://api.github.com/repos/{owner}/{repo}/pulls".format(owner=self.__repo_owner, repo=self.__repo_name),
auth=HTTPBasicAuth(self.__repo_owner, self.__github_token),
headers=headers,
json=body
)
if response.status_code != 201:
print("Error response: " + response.content.decode("utf-8"))
return response.status_code
print("Created the PR")
return 0
| 33.410494
| 119
| 0.584388
| 1,267
| 10,825
| 4.828729
| 0.166535
| 0.040536
| 0.023374
| 0.020922
| 0.456522
| 0.393756
| 0.363354
| 0.320039
| 0.291435
| 0.22965
| 0
| 0.005449
| 0.304942
| 10,825
| 323
| 120
| 33.513932
| 0.807682
| 0.140416
| 0
| 0.382353
| 0
| 0
| 0.193952
| 0.010225
| 0
| 0
| 0
| 0.003096
| 0
| 1
| 0.058824
| false
| 0
| 0.02451
| 0
| 0.210784
| 0.205882
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a93610cab32e285ce4c6a541e3cc023ff8fe9e1f
| 25,186
|
py
|
Python
|
src/frontend/gui/popupentry.py
|
C2E2-Development-Team/C2E2-Tool
|
36631bfd75c0c0fb56389f13a9aba68cbed1680f
|
[
"MIT"
] | 1
|
2021-10-04T19:56:25.000Z
|
2021-10-04T19:56:25.000Z
|
src/frontend/gui/popupentry.py
|
C2E2-Development-Team/C2E2-Tool
|
36631bfd75c0c0fb56389f13a9aba68cbed1680f
|
[
"MIT"
] | null | null | null |
src/frontend/gui/popupentry.py
|
C2E2-Development-Team/C2E2-Tool
|
36631bfd75c0c0fb56389f13a9aba68cbed1680f
|
[
"MIT"
] | null | null | null |
from tkinter import *
from tkinter.ttk import *
from frontend.gui.widgets import ToggleFrame
from frontend.mod.automaton import *
from frontend.mod.constants import *
from frontend.mod.hyir import *
from frontend.mod.session import Session
class PopupEntry(Toplevel):
def __init__(self, parent):
Toplevel.__init__(self, parent)
self.parent = parent
self.resizable(width=False, height=False)
self.title_label = Label(self, text='C2E2')
self.title_label.grid(row=0, column=0, columnspan=2)
self.TEXTBOX_HEIGHT = 10
self.TEXTBOX_WIDTH = 30
# Window appears by cursor position
self.geometry("+%d+%d" % (Session.window.winfo_pointerx(),
Session.window.winfo_pointery()))
# Prevent interaction with main window until Popup is Confirmed/Canceled
self.wait_visibility()
self.focus_set()
self.grab_set()
class AutomatonEntry(PopupEntry):
"""
Popup window for adding/deleting Automata from the hybrid system.
Args:
parent (obj): Popup's parent object
hybrid (obj): Hybrid object - should always be Session.hybrid
action (str): Action to be performed (ADD or DELETE)
"""
def __init__(self, parent, hybrid, action, automaton=None):
PopupEntry.__init__(self, parent)
self.title_label.config(text="Automaton")
if hybrid is not Session.hybrid:
Session.write("ERROR: Attempting to edit non-Session hybrid.\n")
self._cancel()
self.parent = parent
self.hybrid = hybrid
self.automaton = automaton
self.action = action
self.changed = False
self._init_widgets()
if action == EDIT:
self._load_session()
if action == DELETE:
self._disable_fields()
def _init_widgets(self):
""" Initialize GUI elements """
# Name
Label(self, text="Name:").grid(row=1, column=0, sticky=W)
self.name = StringVar()
self.name_entry = Entry(self, textvariable=self.name)
self.name_entry.grid(row=1, column=1, sticky=E)
# Buttons
self.btn_frame = Frame(self)
self.cancel_btn = Button(self.btn_frame, text="Cancel",
command=self._cancel)
self.confirm_btn = Button(self.btn_frame, text="Confirm",
command=self._confirm)
self.cancel_btn.grid(row=0, column=0)
self.confirm_btn.grid(row=0, column=1)
self.btn_frame.grid(row=2, column=0, columnspan=2)
return
def _load_session(self):
# Name
self.name.set(self.automaton.name)
return
def _disable_fields(self):
# Name
self.name_entry.config(state=DISABLED)
self.confirm_btn.config(text="DELETE", command=self._delete)
return
def _confirm(self):
if(self.action == ADD):
self._confirm_add()
else:
self._confirm_edit()
return
def _confirm_add(self):
self.hybrid.add_automaton(Automaton(self.name.get()))
Session.write("Automaton Entry Confirmed.\n")
self.changed = True
self.destroy()
return
def _confirm_edit(self):
self.automaton.name = self.name.get()
Session.write("Automaton Entry Confirmed.\n")
self.changed = True
self.destroy()
return
def _delete(self):
if messagebox.askyesno("Delete Automaton",
"Delete " + self.automaton.name + "?"):
self.hybrid.remove_automaton(self.automaton)
Session.write("Automaton Deleted.\n")
self.changed = True
else:
Session.write("Automaton Deletion Canceled.\n")
self.chagned = False
self.destroy()
return
def _cancel(self):
""" Cancels changes made in popup """
Session.write("Automaton Entry Canceled.\n")
self.changed = False
self.destroy()
return
class VariableEntry(PopupEntry):
"""
Popup window for Variable editing.
The VariableEntry class is designed to be the popup displayed to users when
editing their model's variables. It controls the GUI elements of the popup,
and interacts with the Session variables to commit changes to the currently
active model
Args:
parent (obj): Popup's parent object
"""
def __init__(self, parent, automaton):
PopupEntry.__init__(self, parent)
self.title_label.config(text="Variables")
self.automaton = automaton
self.changed = False
# For readability, options differ from those stored in the var object
self.scope_options = ('Local', 'Input', 'Output')
self._init_widgets()
self._load_session()
def _init_widgets(self):
""" Initialize GUI elements """
self.title_label.grid(row=0, column=0, columnspan=4)
Label(self, text="Name").grid(row=1, column=0)
Label(self, text="Thin").grid(row=1, column=1)
Label(self, text="Type").grid(row=1, column=2)
Label(self, text="Scope").grid(row=1, column=3)
# Variable lists for uknown number of inputs
self.names = [] # StringVar()
self.thins = [] # BoolVar()
self.types = [] # StringVar()
self.scopes = [] # StringVar()
self.var_index = 0
# Buttons
self.btn_frame = Frame(self)
self.cancel_btn = Button(self.btn_frame, text="Cancel",
command=self._cancel)
self.add_btn = Button(self.btn_frame, text="Add",
command=self._add_row)
self.confirm_btn = Button(self.btn_frame, text="Confirm",
command=self._confirm)
self.cancel_btn.grid(row=0, column=0)
self.add_btn.grid(row=0, column=1)
self.confirm_btn.grid(row=0, column=2)
return
def _load_session(self):
""" Load current model's values. """
scope_dict = { # Convert Variable scopes to options displayed to user
LOCAL: 'Local', # LOCAL = 'LOCAL_DATA'
INPUT: 'Input', # INPUT = 'INPUT_DATA'
OUTPUT: 'Output' # OUTPUT = 'OUTPUT_DATA'
}
# Add a blank row if there are no variables (happens with new automata)
if len(self.automaton.vars) == 0 and len(self.automaton.thinvars) == 0:
self._add_row()
return
for var in self.automaton.vars:
self._add_row()
self.names[self.var_index-1].set(var.name)
self.thins[self.var_index-1].set(False)
self.types[self.var_index-1].set(var.type)
self.scopes[self.var_index-1].set(scope_dict[var.scope])
for var in self.automaton.thinvars:
self._add_row()
self.names[self.var_index-1].set(var.name)
self.thins[self.var_index-1].set(True)
self.types[self.var_index-1].set(var.type)
self.scopes[self.var_index-1].set(scope_dict[var.scope])
return
def _add_row(self):
"""
Add a new variable row to VariableEntry popup.
Grid new entry widgets and regrid button frame.
"""
self.names.append(StringVar())
self.thins.append(BooleanVar())
self.types.append(StringVar())
self.scopes.append(StringVar())
# Name
Entry(self, textvariable=self.names[self.var_index])\
.grid(row=self.var_index+2, column=0)
# Thin
Checkbutton(self, var=self.thins[self.var_index])\
.grid(row=self.var_index+2, column=1)
# Type
self.types[self.var_index].set(REAL)
OptionMenu(self, self.types[self.var_index],
self.types[self.var_index].get(),
*VARIABLE_TYPES)\
.grid(row=self.var_index+2, column=2)
# Scope
self.scopes[self.var_index].set('Local')
OptionMenu(self, self.scopes[self.var_index],
self.scopes[self.var_index].get(),
*self.scope_options)\
.grid(row=self.var_index+2, column=3)
self.btn_frame.grid(row=self.var_index+3, columnspan=4)
self.var_index += 1
return
def _confirm(self):
""" Commit changes to Session. Does NOT save these changes. """
self.automaton.reset_vars()
self.automaton.reset_thinvars()
scope_dict = { # Convert displayed scopes to values stored
'Local': LOCAL, # LOCAL = 'LOCAL_DATA'
'Input': INPUT, # INPUT = 'INPUT_DATA'
'Output': OUTPUT # OUTPUT = 'OUTPUT_DATA'
}
for i in range(0, self.var_index):
name = (self.names[i].get()).strip()
thin = self.thins[i].get()
type_ = self.types[i].get() # Reserved word
scope = scope_dict[self.scopes[i].get()]
if not name: # Delete variables by erasing their name
continue
if thin:
self.automaton.add_thinvar(
Variable(name=name, type=type_, scope=scope))
else:
self.automaton.add_var(
Variable(name=name, type=type_, scope=scope))
Session.write("Variable Entry Confirmed.\n")
self.changed = True
self.destroy()
return
def _cancel(self):
""" Cancels changes made in popup """
Session.write("Variable Entry Canceled.")
self.changed = False
self.destroy()
return
class ModeEntry(PopupEntry):
"""
Popup window for Mode adding, editing, and deleting.
The ModelEntry class is designed to be the popup displayed to users when
editing their model's Modes, or adding/deleting Modes. It controls the GUI
elements of the popup, and interacts with the Session variables to commit
changes to the currently active models.
Args:
parent (obj): Popup's parent object
action (str): Action to be performed (constants ADD, EDIT, or DELETE)
mode (Mode obj): Mode to be edited or deleted, not required for ADD
"""
def __init__(self, parent, automaton, action=ADD, mode=None):
PopupEntry.__init__(self, parent)
self.title_label.config(text='Mode')
self.automaton = automaton
self.mode = mode
self.action = action
self.mode_dict = automaton.mode_dict # mode_dict[mode.id] = mode.name
self.changed = False
self._init_widgets()
if(action == ADD):
self._load_new()
else:
self._load_session()
if(action == DELETE):
self._disable_fields()
def _init_widgets(self):
""" Initialize GUI elements """
# Name
Label(self, text='Name:').grid(row=1, column=0, sticky=W)
self.name = StringVar()
self.name_entry = Entry(self, textvariable=self.name)
self.name_entry.grid(row=1, column=1, sticky=E)
# ID
Label(self, text='ID:').grid(row=2, column=0, sticky=W)
self.mode_id = IntVar()
self.id_entry = Entry(self, textvariable=self.mode_id, state=DISABLED)
self.id_entry.grid(row=2, column=1, sticky=E)
# Initial
Label(self, text='Initial:').grid(row=3, column=0, sticky=W)
self.initial = BooleanVar()
self.initial_checkbutton = Checkbutton(self, var=self.initial)
self.initial_checkbutton.grid(row=3, column=1)
# Flows
self.flow_toggle = ToggleFrame(self, text='Flows:')
self.flow_toggle.grid(row=4, column=0, columnspan=2, sticky=E+W)
# Invariants
self.invariant_toggle = ToggleFrame(self, text='Invariants:')
self.invariant_toggle.grid(row=5, column=0, columnspan=2, sticky=E+W)
# Buttons
self.btn_frame = Frame(self)
self.cancel_btn = Button(self.btn_frame,
text='Cancel',
command=self._cancel)
self.confirm_btn = Button(self.btn_frame,
text='Confirm',
command=self._confirm)
self.cancel_btn.grid(row=0, column=0)
self.confirm_btn.grid(row=0, column=1)
self.btn_frame.grid(row=8, column=0, columnspan=2)
return
def _load_session(self):
""" Load selected mode's Session values """
# Name
self.name.set(self.mode.name)
# ID
self.mode_id.set(self.mode.id)
# Initial
self.initial.set(self.mode.initial)
# Flows
if(len(self.mode.dais) < 1):
self.flow_toggle.add_row()
else:
for dai in self.mode.dais:
self.flow_toggle.add_row(text=dai.raw)
self.flow_toggle.toggle()
# Invariants
if(len(self.mode.invariants) < 1):
self.invariant_toggle.add_row()
else:
for invariant in self.mode.invariants:
self.invariant_toggle.add_row(text=invariant.raw)
self.invariant_toggle.toggle()
return
def _load_new(self):
""" Load blank row and show toggle fields"""
self.flow_toggle.add_row()
self.flow_toggle.toggle()
self.invariant_toggle.add_row()
self.invariant_toggle.toggle()
self.mode_id.set(self.automaton.next_mode_id)
return
def _disable_fields(self):
""" Disable fields and reconfigure confirm button for deletion """
self.name_entry.config(state=DISABLED)
self.id_entry.config(state=DISABLED)
self.initial_checkbutton.config(state=DISABLED)
self.flow_toggle.disable_fields()
self.invariant_toggle.disable_fields()
self.confirm_btn.config(text='DELETE', command=self._delete)
return
def _confirm(self):
""" Confirm button callback - call confirm method based on action """
if(self.action == ADD):
self._confirm_add()
else:
self._confirm_edit()
return
def _confirm_add(self):
""" Confirm new mode addition """
self.mode = Mode()
self._confirm_edit()
self.automaton.add_mode(self.mode)
return
def _confirm_edit(self):
""" Commit changes to Session. Does NOT save changes """
# Name
self.mode.name = self.name.get()
# ID
self.mode.id = self.mode_id.get()
# Initial
self.mode.initial = self.initial.get()
# Flows
self.mode.clear_dais()
for raw_text in self.flow_toggle.get_rows():
if((raw_text.get()).strip()):
self.mode.add_dai(DAI(raw_text.get()))
# Invariants
self.mode.clear_invariants()
for raw_text in self.invariant_toggle.get_rows():
if((raw_text.get()).strip()):
self.mode.add_invariant(Invariant(raw_text.get()))
Session.write("Mode Entry Confirmed.\n")
self.changed = True
self.destroy()
return
def _delete(self):
""" Delete active Mode """
# Build list of transitions that would be deleted
del_trans = []
for tran in self.automaton.transitions:
if((tran.source == self.mode.id) or \
(tran.destination == self.mode.id)):
del_trans.append(tran)
# Messagebox warning user of transitions that also will be deleted
msg = "Delete " + self.mode.name + "(" + str(self.mode.id) + ") ?\n"
msg += "WARNING: The following transitions will also be deleted:\n"
for tran in del_trans:
msg += tran.name + '\n'
if(messagebox.askyesno('Delete Mode', msg)):
self.automaton.remove_mode(self.mode)
for tran in del_trans:
self.automaton.remove_transition(tran)
Session.write("Mode Deleted.\n")
self.changed = True
else:
Session.write("Mode Deletion Canceled.\n")
self.changed = False
self.destroy()
return
def _cancel(self):
""" Cancels changes made in popup """
Session.write("Mode Entry Canceled.\n")
self.changed = False
self.destroy()
return
class TransitionEntry(PopupEntry):
"""
Popup window for Transition adding, editing, and deleting.
The TransitionEntry class is designed to be the popup displayed to users
when editing their model's Modes, or adding/deleting Modes. It controls the
GUI elements of the popup, and interacts with the Session variables to
commit changes to the currently active models.
Args:
parent (obj): Popup's parent object
action (str): Action to be performed (constants ADD, EDIT, or DELETE)
mode_dict (dictionary: int keys, str values): Dictionary connect mode
IDs to mode names
trans (Transition obj): Transition to be edited or deleted, not
required for ADD action
"""
def __init__(self, parent, automaton, action=ADD, transition=None):
PopupEntry.__init__(self, parent)
self.title_label.config(text='Transition')
self.automaton = automaton
self.transition = transition
self.mode_dict = automaton.mode_dict # mode_dict[mode.id] = mode.name
self.action = action
self.changed = False
# Load Mode list for Source/Destination Option Menus
self.mode_list = []
for mode_id in self.mode_dict:
self.mode_list.append(self.mode_dict[mode_id])
self._init_widgets()
if(action == ADD):
self._load_new()
else:
self._load_session()
if(action == DELETE):
self._disable_fields()
def _init_widgets(self):
""" Initialize GUI elements """
# Transition Label
self.transition_str = StringVar()
Label(self, textvariable=self.transition_str).grid(row=1, column=0, columnspan=2)
# ID
Label(self, text='ID:').grid(row=2, column=0, sticky=W)
self.transition_id = IntVar()
self.id_entry = Entry(self, textvariable=self.transition_id,
state=DISABLED)
self.id_entry.grid(row=2, column=1, sticky=E)
# Source and Destination
Label(self, text='Source:').grid(row=3, column=0, sticky=W)
Label(self, text='Destination:').grid(row=4, column=0, sticky=W)
self.source_str = StringVar()
self.destination_str = StringVar()
self.source_str.trace_variable('w', self._callback_mode_select)
self.destination_str.trace_variable('w', self._callback_mode_select)
# Arbitrarily set default source/destination.
# These are overwritten to be correct in _load_session when appropriate
self.source_option_menu = OptionMenu(self,
self.source_str,
self.mode_list[0],
*self.mode_list)
self.source_option_menu.grid(row=3, column=1, sticky=W+E)
self.destination_option_menu = OptionMenu(self,
self.destination_str,
self.mode_list[0],
*self.mode_list)
self.destination_option_menu.grid(row=4, column=1, sticky=W+E)
# Guards
Label(self, text='Guards:').grid(row=5, column=0, sticky=W)
self.guard_str = StringVar()
self.guard_entry = Entry(self, textvariable=self.guard_str)
self.guard_entry.grid(row=5, column=1, sticky=E)
# Actions
self.action_toggle = ToggleFrame(self, text='Actions:')
self.action_toggle.grid(row=6, column=0, columnspan=2, sticky=E+W)
# Buttons
self.btn_frame = Frame(self)
self.cancel_btn = Button(self.btn_frame,
text='Cancel',
command=self._cancel)
self.confirm_btn = Button(self.btn_frame, text='Confirm', command=self._confirm)
self.cancel_btn.grid(row=0, column=0)
self.confirm_btn.grid(row=0, column=1)
self.btn_frame.grid(row=7, column=0, columnspan=2)
return
def _load_session(self):
""" Load selected transition's Session values """
# ID
self.transition_id.set(self.transition.id)
# Source and Destination
self.source_str.set(self.mode_dict[self.transition.source])
self.destination_str.set(self.mode_dict[self.transition.destination])
# Guard
self.guard_str.set(self.transition.guard.raw)
# Actions
if len(self.transition.actions) == 0:
self.action_toggle.add_row()
else:
for action in self.transition.actions:
self.action_toggle.add_row(text=action.raw)
self.action_toggle.toggle()
return
def _load_new(self):
""" Load blank rows and show toggle fields """
self.action_toggle.add_row()
self.action_toggle.toggle()
self.transition_id.set(len(self.automaton.transitions))
return
def _disable_fields(self):
""" Disable fields and reconfigure confirm button for deletion """
self.id_entry.config(state=DISABLED)
self.source_option_menu.config(state=DISABLED)
self.destination_option_menu.config(state=DISABLED)
self.guard_entry.config(state=DISABLED)
self.action_toggle.disable_fields()
self.confirm_btn.config(text='DELETE', command=self._delete)
return
def _callback_mode_select(self, *args):
""" OptionMenu callback, updates transition label at top of window """
self.transition_str.set(self.source_str.get() + " -> " + self.destination_str.get())
return
def _confirm(self):
""" Confirm button callback - call confirm method based on action """
if(self.action == ADD):
self._confirm_add()
else:
self._confirm_edit()
return
def _confirm_add(self):
""" Confirm new mode addition """
# ID
trans_id = self.transition_id.get()
# Source and Destination
for mode_id in self.mode_dict:
if(self.mode_dict[mode_id] == self.source_str.get()):
src = mode_id
elif(self.mode_dict[mode_id] == self.destination_str.get()):
dest = mode_id
# Guard
guard = Guard(self.guard_str.get())
# Actions
actions = []
for action in self.action_toggle.get_rows():
if((action.get()).strip()):
actions.append(Action(action.get()))
transition = Transition(guard, actions, trans_id, src, dest)
self.automaton.add_transition(transition)
Session.write("Transition Entry Confirmed.\n")
self.changed = True
self.destroy()
return
def _confirm_edit(self):
"""" Commits changes to Session. Does NOT save changes """
# ID
self.transition.id = self.transition_id.get()
# Source and Destination
for mode_id in self.mode_dict:
if(self.mode_dict[mode_id] == self.source_str.get()):
self.transition.source = mode_id
elif(self.mode_dict[mode_id] == self.destination_str.get()):
self.transition.destination = mode_id
# Guard
self.transition.guard = Guard(self.guard_str.get())
# Actions
self.transition.clear_actions()
for action in self.action_toggle.rows:
if((action.get()).strip()):
self.transition.add_action(Action(action.get()))
Session.write("Transition Entry Confirmed.\n")
self.changed = True
self.destroy()
return
def _delete(self):
""" Delete active Transiiton """
if messagebox.askyesno('Delete Transition', 'Delete ' + \
self.transition_str.get() + '?'):
self.automaton.remove_transition(self.transition)
Session.write("Transition Deleted.\n")
self.changed = True
else:
Session.write("Transition Deletion Canceled.\n")
self.changed = False
self.destroy()
return
def _cancel(self):
""" Cancels changes made in popup """
Session.write("Transition Entry Canceled.\n")
self.changed = False
self.destroy()
return
| 31.017241
| 92
| 0.581077
| 2,959
| 25,186
| 4.800608
| 0.095978
| 0.025343
| 0.020275
| 0.010841
| 0.590356
| 0.514185
| 0.501443
| 0.441042
| 0.418515
| 0.379585
| 0
| 0.006863
| 0.311522
| 25,186
| 812
| 93
| 31.017241
| 0.812341
| 0.165449
| 0
| 0.50533
| 0
| 0
| 0.04145
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078891
| false
| 0
| 0.014925
| 0
| 0.17484
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9363e554d610fb201aa75a84231ad4a9b284d4a
| 749
|
py
|
Python
|
MatplotLib/9_PlottingLiveDataInRealTime.py
|
ErfanRasti/PythonCodes
|
5e4569b760b60c9303d5cc68650a2448c9065b6d
|
[
"MIT"
] | 1
|
2021-10-01T09:59:22.000Z
|
2021-10-01T09:59:22.000Z
|
MatplotLib/9_PlottingLiveDataInRealTime.py
|
ErfanRasti/PythonCodes
|
5e4569b760b60c9303d5cc68650a2448c9065b6d
|
[
"MIT"
] | null | null | null |
MatplotLib/9_PlottingLiveDataInRealTime.py
|
ErfanRasti/PythonCodes
|
5e4569b760b60c9303d5cc68650a2448c9065b6d
|
[
"MIT"
] | null | null | null |
"""In this code we wanna plot real-time data."""
# import random
from itertools import count
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.animation import FuncAnimation
plt.style.use('fivethirtyeight')
x_vals = []
y_vals = []
index = count()
def animate(i):
"""Plot the graphs with real-time data."""
data = pd.read_csv('data/data_6.csv')
x = data['x_value']
y1 = data['total_1']
y2 = data['total_2']
plt.cla()
plt.plot(x, y1, label='Channel 1')
plt.plot(x, y2, label='Channel 2')
plt.legend(loc='upper left', bbox_to_anchor=(1.05, 1))
plt.tight_layout()
ani = FuncAnimation(plt.gcf(), animate, interval=1000)
plt.tight_layout()
plt.show()
| 20.805556
| 59
| 0.635514
| 111
| 749
| 4.189189
| 0.558559
| 0.034409
| 0.051613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.02906
| 0.218959
| 749
| 35
| 60
| 21.4
| 0.765812
| 0.125501
| 0
| 0.095238
| 0
| 0
| 0.129721
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.190476
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a937efbd3e11f0b5981cad6ed7badf54a6c3173d
| 10,688
|
py
|
Python
|
scripts/move_run.py
|
EdinburghGenomics/hesiod
|
70df28714878bd57bd2e315b5b3a60f4dc56e1e3
|
[
"BSD-2-Clause"
] | 1
|
2020-03-12T04:27:26.000Z
|
2020-03-12T04:27:26.000Z
|
scripts/move_run.py
|
EdinburghGenomics/hesiod
|
70df28714878bd57bd2e315b5b3a60f4dc56e1e3
|
[
"BSD-2-Clause"
] | null | null | null |
scripts/move_run.py
|
EdinburghGenomics/hesiod
|
70df28714878bd57bd2e315b5b3a60f4dc56e1e3
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python3
import os, sys, re
import logging as L
import shutil
from argparse import ArgumentParser, ArgumentDefaultsHelpFormatter
from pprint import pformat, pprint
DRY_RUN = []
TALLIES = dict( runs = 0,
fastqdirs = 0 )
# Could import this from hesiod/__init__.py but I don't want the deps.
def glob():
"""Regular glob() is useful but we want consistent sort order."""
from glob import glob
return lambda p: sorted( (f.rstrip('/') for f in glob(os.path.expanduser(p))) )
glob = glob()
def main(args):
if args.debug:
L.basicConfig( level = L.DEBUG,
format = "{levelname}: {message}",
style = '{' )
else:
L.basicConfig( level=L.INFO,
format = "{message}",
style = '{' )
if args.no_act:
DRY_RUN.append(True)
try:
args.func(args)
except AttributeError:
# Force a full help message
parse_args(['--help'])
for k, v in TALLIES.items():
if v:
L.info("Moved {} {}.".format(v, k))
def mv_main(args):
"""Move one or more runs to a given location.
"""
# Validate the dest dir
real_dest = os.path.realpath(args.to_dir)
L.info("Moving to {}".format(real_dest))
if not os.path.isdir(real_dest):
L.error("No such directory {}".format(real_dest))
return
# Loop through the runs
for arun in args.runs:
run_name = os.path.basename(arun.rstrip('/'))
dest_name = os.path.join(real_dest, run_name)
# Is it already there?
if os.path.exists(dest_name):
L.error("There is already a directory {}".format(dest_name))
continue
# Am I trying to move the directory into itself? Actually I think Python
# shutil.move catches this one for me. Yes it does.
# See if this is a rundir or a fastqdir
if not os.path.isdir(arun):
L.error("No such directory {}".format(arun))
elif is_rundir(arun):
move_rundir(arun, dest_name)
elif is_fastqdir(arun):
move_fastqdir(arun, dest_name)
else:
L.error("Not a valid run dir or fastq dir {}".format(arun))
def is_rundir(somedir):
"""Run dirs have pipeline/output symlink.
"""
return os.path.islink(os.path.join(somedir, 'pipeline', 'output'))
def is_fastqdir(somedir):
"""Fasqdata dirs have a rundata symlink.
"""
return os.path.islink(os.path.join(somedir, 'rundata'))
def move_rundir(arun, dest_name):
"""Given a run and a destination, move it.
The pipeline/output and pipeline/output/rundata symlinks will be fixed.
"""
# This should be already done by the caller. Doing it here is problematic for
# dry runs where the directory may in fact not exist!
#dest_name = os.path.realpath(dest_name)
# Read the pipeline/output symlink. This may be a relative link so we always
# convert it to an absolute link by putting it through os.path.realpath()
output_link = os.path.join(arun, 'pipeline', 'output')
output_link_dest = os.readlink(output_link)
output_link_abs = os.path.realpath(output_link)
if not os.path.isdir(output_link_abs):
# The link is broken. So we'll not touch it.
L.warning("{} link is invalid. Will not modify links.".format(output_link))
output_link_abs = None
else:
# rundata_link needs to be the real path of the link (as opposed to the real path of
# where the link points!)
rundata_link = os.path.join(output_link_abs, 'rundata')
rundata_link_dest = os.readlink(rundata_link)
rundata_link_abs = os.path.realpath(rundata_link)
# Now the rundata_link should point back to arun or we're in trouble!
if not rundata_link_abs == os.path.realpath(arun):
L.error("{} link does not point back to {}".format(rundata_link, arun))
return
# OK we're ready to move the run
L.info("shutil.move({!r}, {!r})".format(arun, dest_name))
if not DRY_RUN:
shutil.move(arun, dest_name)
# And this changes where the output link is
output_link = os.path.join(dest_name, 'pipeline', 'output')
if output_link_abs and output_link_abs != output_link_dest:
L.warning("Converting pipeline/output link to an absolute path")
L.info("os.symlink({!r}, {!r})".format(output_link_abs, output_link))
if not DRY_RUN:
os.unlink(output_link)
os.symlink(output_link_abs, output_link)
# And finally, rundata_link must change unless output_link was dangling.
if output_link_abs:
L.info("os.symlink({!r}, {!r})".format(dest_name, rundata_link))
if not DRY_RUN:
os.unlink(rundata_link)
os.symlink(dest_name, rundata_link)
L.info("Renamed {} to {}{}".format(arun, dest_name, " [DRY_RUN]" if DRY_RUN else ""))
TALLIES['runs'] += 1
# Note - I could abstract this function and avoid copy-paste but it would be a lot less legible.
def move_fastqdir(afqd, dest_name):
"""Given a fastqdata directory and a destination, move it.
The rundata/pipeline/output and rundata symlinks will be fixed.
"""
# This should be already done by the caller.
dest_name = os.path.realpath(dest_name)
# Read the rundata symlink. This may be a relative link so we always
# convert it to an absolute link by putting it through os.path.realpath()
rundata_link = os.path.join(afqd, 'rundata')
rundata_link_dest = os.readlink(rundata_link)
rundata_link_abs = os.path.realpath(rundata_link)
if not os.path.isdir(rundata_link_abs):
# The link is broken. So we'll not touch it.
L.warning("{} link is invalid. Will not modify links.".format(rundata_link))
rundata_link_abs = None
else:
# output_link needs to be the real path of the link (as opposed to the real path of
# where the link points!)
output_link = os.path.join(rundata_link_abs, 'pipeline', 'output')
output_link_dest = os.readlink(output_link)
output_link_abs = os.path.realpath(output_link)
# Now the output_link should point back to afqd or we're in trouble!
if not output_link_abs == os.path.realpath(afqd):
L.error("{} link does not point back to {}".format(output_link, afqd))
return
# OK we're ready to move the run
L.info("shutil.move({!r}, {!r})".format(afqd, dest_name))
if not DRY_RUN:
shutil.move(afqd, dest_name)
# And this changes where the rundata link is
rundata_link = os.path.join(dest_name, 'rundata')
if rundata_link_abs and rundata_link_abs != rundata_link_dest:
L.warning("Converting rundata link to an absolute path")
L.info("os.symlink({!r}, {!r})".format(rundata_link_abs, rundata_link))
if not DRY_RUN:
os.unlink(rundata_link)
os.symlink(rundata_link_abs, rundata_link)
# And finally, output_link must change unless rundata_link was dangling.
if rundata_link_abs:
L.info("os.symlink({!r}, {!r})".format(dest_name, output_link))
if not DRY_RUN:
os.unlink(output_link)
os.symlink(dest_name, output_link)
L.info("Renamed {} to {}{}".format(afqd, dest_name, " [DRY_RUN]" if DRY_RUN else ""))
TALLIES['fastqdirs'] += 1
def rebatch_main(args):
"""Performs a batch of move_rundir operations to reflact a desired PROM_RUNS_BATCH mode.
Rebatching always happens in the CWD.
"""
runglobs = dict( year = '0000/00000000_*/',
month = '0000-00/00000000_*/',
none = '00000000_*/' )
# We need to search for directoried matching patterns other than args.mode
scanglobs = [ v.replace('0', '[0-9]') for k, v in runglobs.items() if k != args.mode ]
# Now actually look for candidates to rename.
runs_found = [ d for p in scanglobs for d in glob(p) ]
L.debug("{} directories match the glob patterns {}".format(len(runs_found), scanglobs))
runs_found = [ d for d in runs_found if is_rundir(d) ]
L.debug("{} of these look like actual runs".format(len(runs_found)))
if not runs_found:
L.error("Nothing suitable found to rebatch.")
return
all_run_bases = set()
for arun in runs_found:
# See where it is now.
run_base, run_name = os.path.split(arun)
# Work out where it belongs.
subdir = dict( year = '{}'.format(run_name[0:4]),
month = '{}-{}'.format(run_name[0:4], run_name[4:6]),
none = '' )[args.mode]
# Remember the run base for later
if run_base:
all_run_bases.add(run_base)
# Make a home for it
if subdir:
try:
if not DRY_RUN:
os.mkdir(subdir)
L.debug("Created subdir {}".format(subdir))
except OSError:
# Presumably it exists
pass
# Finally move the thing.
dest_name = os.path.join(os.path.realpath('.'), subdir, run_name)
move_rundir(arun, dest_name)
# After renaming all, clean empty directories.
for d in all_run_bases:
try:
if not DRY_RUN:
os.rmdir(d)
L.debug("Removed now-empty directory {}".format(d))
except OSError:
# Probably not empty.
pass
def parse_args(*args):
description = """Moves a Hesiod rundir or fastqdir, or else bulk moves all directories
to an alternative PROM_RUNS_BATCH mode.
"""
parser = ArgumentParser( description=description,
formatter_class = ArgumentDefaultsHelpFormatter )
sparsers = parser.add_subparsers()
# mv mode
parser_mv = sparsers.add_parser('mv', help="Move a rundir or fastqdir")
parser_mv.add_argument('-t', '--to_dir', default='.')
parser_mv.add_argument('runs', nargs='+')
parser_mv.set_defaults(func=mv_main) # as suggested in the docs.
parser_rebatch = sparsers.add_parser('rebatch', help="Rebatch all rundirs in CWD")
parser_rebatch.add_argument('mode', choices='year month none'.split())
parser_rebatch.set_defaults(func=rebatch_main)
parser.add_argument("-d", "--debug", action="store_true",
help="Print more verbose debugging messages.")
parser.add_argument("-n", "--no_act", action="store_true",
help="Dry run only.")
return parser.parse_args(*args)
if __name__ == "__main__":
main(parse_args())
| 37.900709
| 96
| 0.625561
| 1,507
| 10,688
| 4.288653
| 0.202389
| 0.054154
| 0.025994
| 0.013616
| 0.420084
| 0.337769
| 0.289339
| 0.273867
| 0.264892
| 0.219712
| 0
| 0.006104
| 0.264222
| 10,688
| 281
| 97
| 38.035587
| 0.815743
| 0.239989
| 0
| 0.224138
| 0
| 0
| 0.157237
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.051724
| false
| 0.011494
| 0.034483
| 0
| 0.132184
| 0.005747
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9386e5cb5a9cdff5dcde3ebc831677dfecb0b4f
| 1,478
|
py
|
Python
|
run.py
|
pacyu/visualize
|
0f5523ea5181af7972abb2534bb0fa8af0519125
|
[
"MIT"
] | 5
|
2020-03-01T09:24:57.000Z
|
2020-10-14T07:52:22.000Z
|
run.py
|
yomikochan/visualize
|
0f5523ea5181af7972abb2534bb0fa8af0519125
|
[
"MIT"
] | null | null | null |
run.py
|
yomikochan/visualize
|
0f5523ea5181af7972abb2534bb0fa8af0519125
|
[
"MIT"
] | 5
|
2020-02-28T14:57:25.000Z
|
2020-10-14T07:59:34.000Z
|
import audio_visual
import argparse
import sys
if __name__ == "__main__":
parser = argparse.ArgumentParser(prog='Audio visualization', conflict_handler='resolve')
parser.add_argument('-e', '--effect',
help='visualization effect: 1d or 2d or 3d')
parser.add_argument('-f', '--filename', type=str,
help='play audio file')
parser.add_argument('-r', '--playback-rate', type=float,
help='Specify the playback rate.(e.g. 1.2)', default=1.)
parser.add_argument('-d', '--delay', type=float,
help='Specify the delay time to play the animation.(unit second)', default=4)
cmd = parser.parse_args(sys.argv[1:])
parser.print_help()
run = audio_visual.AudioVisualize(filename=cmd.filename,
rate=cmd.playback_rate,
delay=cmd.delay)
if cmd.effect == '1' or cmd.effect == '1d' or cmd.effect == '1D':
if cmd.filename:
run.music_visualize_1d()
else:
run.audio_visualize_1d()
elif cmd.effect == '2' or cmd.effect == '2d' or cmd.effect == '2D':
if cmd.filename:
run.music_visualize_2d()
else:
run.audio_visualize_2d()
elif cmd.effect == '3' or cmd.effect == '3d' or cmd.effect == '3D':
if cmd.filename:
run.music_visualize_3d()
else:
run.audio_visualize_1d()
| 38.894737
| 101
| 0.56157
| 178
| 1,478
| 4.494382
| 0.337079
| 0.10125
| 0.0825
| 0.06
| 0.2275
| 0.1125
| 0
| 0
| 0
| 0
| 0
| 0.022417
| 0.305819
| 1,478
| 37
| 102
| 39.945946
| 0.75731
| 0
| 0
| 0.242424
| 0
| 0
| 0.163957
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0.030303
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9396c4b2a5db6f805687ea1be44d44fa3d0fd9d
| 1,091
|
py
|
Python
|
feeds/tasks.py
|
ralphqq/rss-apifier
|
cd056654abf24fd178f1e5d8661cafcb3cc1236b
|
[
"MIT"
] | null | null | null |
feeds/tasks.py
|
ralphqq/rss-apifier
|
cd056654abf24fd178f1e5d8661cafcb3cc1236b
|
[
"MIT"
] | 5
|
2020-06-06T01:01:48.000Z
|
2021-09-22T18:16:22.000Z
|
feeds/tasks.py
|
ralphqq/rss-apifier
|
cd056654abf24fd178f1e5d8661cafcb3cc1236b
|
[
"MIT"
] | null | null | null |
import logging
from celery import shared_task
from .models import Feed
@shared_task(name='fetch-entries')
def fetch_entries():
"""Fetches and saves all new entries for each RSS feed in the db.
Returns:
int: count of all RSS entries successfully saved
"""
logging.info('Fetching new RSS entries')
feeds = Feed.objects.all()
if feeds.exists():
logging.info(f'Found {feeds.count()} total RSS feeds to process')
total_entries_saved = 0
for feed in feeds:
try:
entries_saved = feed.update_feed_entries()
except Exception as e:
logging.error(e)
else:
logging.info(
f'Saved {entries_saved} new entries from {feed.link}'
)
total_entries_saved += entries_saved
logging.info(f'Processed and saved a total of {total_entries_saved} new RSS Entries')
return total_entries_saved
else:
logging.warning('No RSS feeds found in the database')
| 28.710526
| 94
| 0.590284
| 133
| 1,091
| 4.721805
| 0.421053
| 0.133758
| 0.10828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001379
| 0.335472
| 1,091
| 37
| 95
| 29.486486
| 0.864828
| 0.114574
| 0
| 0.083333
| 0
| 0
| 0.26044
| 0.023077
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.125
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a93f9ae56030b3bb1d933321fd043f4b7c020edb
| 2,473
|
py
|
Python
|
sublime-text-3/Packages/CommandOnSave/CommandOnSave.py
|
lvancrayelynghe/dotfiles
|
6cbf95368f18d26adc3520b4223157a0ed6acebc
|
[
"MIT"
] | 17
|
2019-03-25T23:43:40.000Z
|
2022-03-08T17:56:06.000Z
|
sublime-text-3/Packages/CommandOnSave/CommandOnSave.py
|
pection/dotfiles
|
b93759598a601833b14d87fc38ff034f027faea0
|
[
"MIT"
] | null | null | null |
sublime-text-3/Packages/CommandOnSave/CommandOnSave.py
|
pection/dotfiles
|
b93759598a601833b14d87fc38ff034f027faea0
|
[
"MIT"
] | 6
|
2019-03-20T18:17:22.000Z
|
2020-12-11T04:38:22.000Z
|
import sublime
import sublime_plugin
import subprocess
import re
import time
import threading
class CommandOnSave(sublime_plugin.EventListener):
def __init__(self):
self.timeout = 2
self.timer = None
def cancel_timer(self):
if self.timer != None:
self.timer.cancel()
def start_timer(self):
self.timer = threading.Timer(self.timeout, self.clear)
self.timer.start()
def clear(self):
print("Command on Save Cleared")
self.timer = None
def on_post_save(self, view):
view.erase_status('command_on_save')
settings = sublime.load_settings('CommandOnSave.sublime-settings').get('commands')
enabled = sublime.load_settings('CommandOnSave.sublime-settings').get('enabled')
file = view.file_name()
if self.timer == None and not settings == None and not enabled == None and enabled == True:
for path in settings.keys():
commands = settings.get(path)
match = re.match(path, file, re.M|re.I)
if match and len(commands) > 0:
print("Command on Save:")
for command in commands:
p = subprocess.Popen([command], shell=True, stdout=subprocess.PIPE)
out, err = p.communicate()
print (command)
print (out.decode('utf-8'))
self.start_timer()
class ToggleCommandOnSave(sublime_plugin.ApplicationCommand):
def __init__(self):
self.timeout = 3
self.timer = None
def run(self):
settings = sublime.load_settings("CommandOnSave.sublime-settings")
value = True if settings.get("enabled", True) != True else False
if value:
sublime.active_window().active_view().set_status('command_on_save', "[Command on Save Enabled]")
else:
sublime.active_window().active_view().set_status('command_on_save', "[Command on Save Disabled]")
settings.set("enabled", value)
self.start_timer()
# sublime.save_settings("CommandOnSave.sublime-settings")
def cancel_timer(self):
if self.timer != None:
self.timer.cancel()
def start_timer(self):
self.timer = threading.Timer(self.timeout, self.clear)
self.timer.start()
def clear(self):
sublime.active_window().active_view().erase_status("command_on_save")
| 34.830986
| 109
| 0.608573
| 285
| 2,473
| 5.133333
| 0.249123
| 0.073821
| 0.071087
| 0.051948
| 0.467532
| 0.420369
| 0.382092
| 0.270677
| 0.270677
| 0.270677
| 0
| 0.00225
| 0.281035
| 2,473
| 70
| 110
| 35.328571
| 0.820585
| 0.02224
| 0
| 0.368421
| 0
| 0
| 0.113411
| 0.037252
| 0
| 0
| 0
| 0
| 0
| 1
| 0.175439
| false
| 0
| 0.105263
| 0
| 0.315789
| 0.070175
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9413ba0562d76a3686a4648b8027621dd78c41b
| 2,736
|
py
|
Python
|
src/erb/_parse.py
|
lyy289065406/pyyaml-erb
|
e0723bda98fae97c3cfeb1e9377821bd88f7ea2d
|
[
"MIT"
] | null | null | null |
src/erb/_parse.py
|
lyy289065406/pyyaml-erb
|
e0723bda98fae97c3cfeb1e9377821bd88f7ea2d
|
[
"MIT"
] | null | null | null |
src/erb/_parse.py
|
lyy289065406/pyyaml-erb
|
e0723bda98fae97c3cfeb1e9377821bd88f7ea2d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Author : EXP
# -----------------------------------------------
import os
import re
import numbers
def _parse_dict(conf_dict) :
'''
递归解析字典值中的表达式
:param conf_dict: 原始配置字典
:return: 解析表达式后的配置字典
'''
result_dict = {}
for key, val in conf_dict.items() :
if isinstance(val, dict) :
result_dict[key] = _parse_dict(val)
elif isinstance(val, list) :
result_list = []
for v in val :
result_list.append(_parse_expression(v))
result_dict[key] = result_list
else:
result_dict[key] = _parse_expression(val)
return result_dict
def _parse_expression(expression) :
'''
解析表达式
:param expression: 表达式,格式形如 <%= ENV['JAVA_OME'] || 'default' %>
:return: 解析表达式后的值
'''
if expression is None or \
isinstance(expression, numbers.Number) or \
isinstance(expression, dict) :
return expression
value = None
mth0 = re.search(r'^<%=(.+)%>$', expression.strip())
if mth0 :
vals = re.split(r' \|\| | or ', mth0.group(1))
for val in vals :
val = val.strip()
mth1 = re.search(r'^ENV\[(.+)\]$', val)
mth2 = re.search(r'^\$\{(.+)\}$', val)
if mth1 :
value = value or _parse_environment(mth1.group(1))
elif mth2 :
value = value or _parse_environment(mth2.group(1))
else :
value = value or _parse_text(val)
else :
value = _parse_text(expression)
return value
def _parse_environment(variable) :
'''
解析环境变量
:param variable: 环境变量
:return: 环境变量的值
'''
env_key = _remove_quotes(variable)
return os.getenv(env_key)
def _parse_text(text) :
'''
解析文本(若是数字类型会自动转换)
:param text: 文本
:return: 文本值
'''
mth = re.search(r'^(\d+\.\d+)$', text)
if mth :
val = float(mth.group(1))
else :
mth = re.search(r'^(\d+)$', text)
if mth :
val = int(mth.group(1))
else :
val = _remove_quotes(text)
if val is not None and isinstance(val, str) :
if val.lower() == 'none' or val.lower() == 'null' or val.lower() == 'nil' :
val = None
elif val.lower() == 'true' :
val = True
elif val.lower() == 'false' :
val = False
return val
def _remove_quotes(text) :
'''
移除文本两端的引号(双引号或单引号)
:param text: 文本
:return: 文本
'''
if text == '""' or text == "''" :
text = ''
else :
mth = re.search(r'^[\'"](.+)["\']$', text)
if mth :
text = mth.group(1)
return text
| 23.186441
| 83
| 0.50402
| 308
| 2,736
| 4.340909
| 0.275974
| 0.035901
| 0.040389
| 0.038145
| 0.094989
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008782
| 0.334064
| 2,736
| 117
| 84
| 23.384615
| 0.725027
| 0.142544
| 0
| 0.134328
| 0
| 0
| 0.045373
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074627
| false
| 0
| 0.044776
| 0
| 0.208955
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a946ab769d869df40935f6c4d6219757e390f7ee
| 1,750
|
py
|
Python
|
auto/lookup.py
|
ggicci/fuck-leetcode
|
45b488530b9dbcc8b7c0b90160ea45b1ab4f8475
|
[
"MIT"
] | null | null | null |
auto/lookup.py
|
ggicci/fuck-leetcode
|
45b488530b9dbcc8b7c0b90160ea45b1ab4f8475
|
[
"MIT"
] | null | null | null |
auto/lookup.py
|
ggicci/fuck-leetcode
|
45b488530b9dbcc8b7c0b90160ea45b1ab4f8475
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os
import sys
import json
from argparse import ArgumentParser
ROOT = os.path.dirname(os.path.abspath(__file__))
DB_FILE = os.path.join(ROOT, 'problems.json')
def parse_args():
"""Parse CLI tool options.
"""
parser = ArgumentParser()
parser.add_argument('problem_id', type=int)
parser.add_argument('--field', type=str, help='extract field value')
parser.add_argument('--markdown',
type=bool,
default=False,
help='print markdown content')
parser.add_argument('--context',
type=str,
help='additional context to lookup')
return parser.parse_args()
def lookup(problem_id: int, context: str = None):
if context:
# Find in context first.
obj = json.loads(context)
if int(obj.get('id', -1)) == problem_id:
return obj
with open(DB_FILE, 'r') as f:
problems = json.load(f)
index = {int(x['id']): x for x in problems}
return index.get(problem_id)
def main():
opts = parse_args()
problem = lookup(opts.problem_id, context=opts.context)
if not problem:
sys.exit('Problem Not Found')
# Add field "url" to problem.
problem['url'] = f'https://leetcode.com/problems/{problem["slug"]}/'
if opts.field:
# Print field value only.
value = problem.get(opts.field)
if value is None:
sys.exit('Field Not Found')
print(value)
return
if opts.markdown is True:
print(f'[{problem["id"]} - {problem["title"]}]({problem["url"]})')
return
print(json.dumps(problem, indent=4))
if __name__ == '__main__':
main()
| 25
| 74
| 0.582286
| 219
| 1,750
| 4.534247
| 0.383562
| 0.054381
| 0.068479
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00159
| 0.281143
| 1,750
| 69
| 75
| 25.362319
| 0.787758
| 0.070857
| 0
| 0.044444
| 0
| 0
| 0.166976
| 0.022882
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.088889
| 0
| 0.266667
| 0.088889
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9470a504b0eced5d1fe21002e68de978c63f971
| 6,789
|
py
|
Python
|
src/application/dungeon.py
|
meteoric-minks/code-jam
|
b094350176e54d873a04a483dc37d70533013c37
|
[
"MIT"
] | 1
|
2021-07-09T14:41:12.000Z
|
2021-07-09T14:41:12.000Z
|
src/application/dungeon.py
|
meteoric-minks/code-jam
|
b094350176e54d873a04a483dc37d70533013c37
|
[
"MIT"
] | null | null | null |
src/application/dungeon.py
|
meteoric-minks/code-jam
|
b094350176e54d873a04a483dc37d70533013c37
|
[
"MIT"
] | null | null | null |
from __future__ import annotations # Fixes an issue with some annotations
from .ascii_box import Light, LineChar
from .ascii_drawing import DrawingChar
class Item:
"""Represents an item within a Room."""
def __init__(self, x: int, y: int, c: DrawingChar, interact: bool = False):
self.x, self.y = x, y # Coords relative to room
self.char = c
self.interact = False
def __repr__(self):
return "<Item '{}' at {}, {}>".format(self.char, self.x, self.y)
def command(self) -> None:
"""Command to run when interacted with"""
pass
class Room:
"""Represents a single room in a dungeon."""
def __init__(self,
x: int, # Coords of Top Left
y: int,
width: int = 10, # Width and Height
height: int = 6,
c: LineChar = Light, # Which drawing chars to use
):
self.x, self.y = x, y
self.width, self.height = width, height
self.char = c
self.items = []
def __repr__(self):
return "<Room of size {}x{} at {}, {}>".format(self.width, self.height, self.x, self.y)
def add_item(self, item: Item) -> None:
"""Add an item to the room"""
if 0 < item.x < self.width - 1 and 0 < item.y < self.height - 1: # Ensure item is within room
self.items.append(item)
else:
raise ValueError("Item {} is not within Room {}".format(item, self))
def intersects(self, x0: int, y0: int, x1: int, y1: int) -> bool:
"""Calculate if the room intersects some box.
Will be used to check if the room should be rendered at a given time.
x0,y0 will represent the top left, x1,y1 represents the bottom right.
Note: this is inclusive, i.e. if the rectantangles only touch it is still counted as intersecting.
"""
if (
(x0 > (self.x + self.width)) # Box is to the right of room
or (self.x > x1) # Room is to the right of box
):
return False
elif (
(y0 > (self.y + self.height)) # Box is below room
or (self.y > y1) # Room is below box
):
return False
else: # If none of these conditions are true, they must overlap
return True
def render(self) -> list[str]:
"""Will return a rendered box of the room and should include anything within the room.
Returns a list of one-line strings.
Returning a list will make it much easier to add spaces on the left so it can be rendered in the correct
place on the screen.
"""
# Start with a blank 2D list
# Lists are much easier to work with since individual items can be set, unlike strings
image = [[" " for x in range(self.width)] for y in range(self.height)]
# Top and bottom row
image[0][0] = self.char.DownRight.value
image[0][-1] = self.char.DownLeft.value
image[-1][0] = self.char.UpRight.value
image[-1][-1] = self.char.UpLeft.value
for n in range(1, self.width - 1):
image[0][n] = self.char.Horizontal.value
image[-1][n] = self.char.Horizontal.value
# Sides
for n in range(1, self.height - 1):
image[n][0] = self.char.Vertical.value
image[n][-1] = self.char.Vertical.value
# Add items
for item in self.items:
image[item.y][item.x] = item.char.value
# Join rows
image = list(map(lambda x: "".join(x), image))
return image
class Dungeon:
"""Represents an entire dungeon.
A single instance will likely represent either the world or a single level.
"""
def __init__(self):
self.rooms = []
def add_room(self, room: Room) -> None:
"""Adds a room to the dungeon."""
self.rooms.append(room)
def set_character(self, char: Character) -> None:
"""Sets the dungeon's character."""
self.character = char
def render(self,
x0: int, # Coord, in the dungeon, of the top left of the screen
y0: int,
x1: int, # Coord, in the dungeon, of the bottom right of the screen
y1: int,
) -> list[str]:
"""Renders the entire dungeon."""
result = [[" " for x in range(x1 - x0 + 1)] for y in range(y1 - y0 + 1)] # Use a list of lists for now
# This makes it much easier to set specific locations in the output
for r in self.rooms:
if r.intersects(x0, y0, x1, y1):
r_rend = r.render()
x_offset = r.x - x0
y_offset = r.y - y0
if y_offset >= 0:
ys = y_offset # Y Pos to start Drawing
else:
r_rend = r_rend[-y_offset:]
ys = 0
if x_offset >= 0:
xs = x_offset # X Pos to start drawing
else:
r_rend = list(map(lambda x: x[-x_offset:],
r_rend))
xs = 0
for y in [y for y in range(len(r_rend)) if y + ys <= y1 - y0]:
for x in [x for x in range(len(r_rend[0])) if x + xs <= x1 - x0]:
result[y + ys][x + xs] = r_rend[y][x]
result[self.character.y - y0][self.character.x - x0] = self.character.char.value
result = list(map(lambda x: "".join(x), result))
return result
def in_room(self, x: int, y: int) -> bool:
"""Will be used to check if the character is able to move to a given coordinate."""
results = []
for r in self.rooms:
results.append(r.intersects(x, y, x, y))
return any(results)
class Character:
"""Represents a movable character onscreen."""
directions = [
[0, -1],
[1, 0],
[0, 1],
[-1, 0],
]
def __init__(self, dungeon: Dungeon, x: int = 0, y: int = 0, c: DrawingChar = DrawingChar.Character):
self.dungeon = dungeon
self.x, self.y = x, y
self.char = c
def move(self, dir: int) -> None:
"""Move the character.
Direction: 0=N, 1=E, 2=S, 3=W
"""
newx, newy = self.x + self.directions[dir][0], self.y + self.directions[dir][1]
if self.dungeon.in_room(newx, newy):
self.x, self.y = newx, newy
def interact(self) -> None:
"""Interact with anything the player is on"""
for r in self.dungeon.rooms:
for i in r.items:
if i.interact and i.x - 1 <= self.x <= i.x + 1 and i.x - 1 <= self.x <= i.x + 1:
i.command()
| 32.328571
| 112
| 0.531153
| 956
| 6,789
| 3.718619
| 0.210251
| 0.019691
| 0.020253
| 0.016878
| 0.149086
| 0.081013
| 0.057947
| 0.007876
| 0.007876
| 0
| 0
| 0.018963
| 0.355281
| 6,789
| 209
| 113
| 32.483254
| 0.793237
| 0.261158
| 0
| 0.157025
| 0
| 0
| 0.016974
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.132231
| false
| 0.008264
| 0.024793
| 0.016529
| 0.264463
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a94809d2a1b0b2d4efef4518fceb1a00c7233013
| 3,836
|
py
|
Python
|
math2/graph/graphs.py
|
AussieSeaweed/math2
|
9e83fa8a5a5d227d72fec1b08f6759f0f0f41fca
|
[
"MIT"
] | 2
|
2021-03-29T03:15:57.000Z
|
2021-03-29T03:23:21.000Z
|
math2/graph/graphs.py
|
AussieSeaweed/math2
|
9e83fa8a5a5d227d72fec1b08f6759f0f0f41fca
|
[
"MIT"
] | 1
|
2021-04-07T11:07:17.000Z
|
2021-04-07T11:07:17.000Z
|
math2/graph/graphs.py
|
AussieSeaweed/math2
|
9e83fa8a5a5d227d72fec1b08f6759f0f0f41fca
|
[
"MIT"
] | null | null | null |
from abc import ABC, abstractmethod
from collections import defaultdict
from functools import partial
from auxiliary import default
class Edge:
def __init__(self, u, v, *, weight=None, capacity=None):
self.u = u
self.v = v
self.weight = weight
self.capacity = capacity
self.flow = 0
def invert(self):
return Edge(self.v, self.u, weight=self.weight, capacity=self.capacity)
def match(self, u, v):
return default(u, self.u) == self.u and default(v, self.v) == self.v
def other(self, vertex):
if vertex == self.u:
return self.v
elif vertex == self.v:
return self.u
else:
raise ValueError('The vertex is not one of the endpoints')
def residual_capacity(self, vertex):
if vertex == self.u:
return self.flow
elif vertex == self.v:
return self.capacity - self.flow
else:
raise ValueError('The vertex is not one of the endpoints')
def add_residual_capacity(self, vertex, delta):
if vertex == self.u:
self.flow -= delta
elif vertex == self.v:
self.flow += delta
else:
raise ValueError('The vertex is not one of the endpoints')
class Graph(ABC):
def __init__(self, directed=False):
self.directed = directed
self.__nodes = set()
@property
def nodes(self):
return iter(self.__nodes)
@property
def node_count(self):
return len(self.__nodes)
def add(self, edge):
self.__nodes.add(edge.u)
self.__nodes.add(edge.v)
@abstractmethod
def edges(self, u=None, v=None):
pass
class EdgeList(Graph):
def __init__(self, directed=False):
super().__init__(directed)
self.__edges = []
def add(self, edge):
super().add(edge)
self.__edges.append(edge)
if not self.directed:
self.__edges.append(edge.invert())
def edges(self, u=None, v=None):
return (edge for edge in self.__edges if edge.match(u, v))
class AdjacencyMatrix(Graph):
def __init__(self, directed=False):
super().__init__(directed)
self.__matrix = defaultdict(partial(defaultdict, list))
def add(self, edge):
super().add(edge)
self.__matrix[edge.u][edge.v].append(edge)
if not self.directed:
self.__matrix[edge.v][edge.u].append(edge.invert())
def edges(self, u=None, v=None):
edges = []
if u is None and v is None:
for adj_lists in self.__matrix.values():
for adj_list in adj_lists:
edges.extend(adj_list)
elif u is None:
for adj_lists in self.__matrix.values():
edges.extend(adj_lists[v])
elif v is None:
for adj_list in self.__matrix[u].values():
edges.extend(adj_list)
else:
edges = self.__matrix[u][v]
return iter(edges)
class AdjacencyLists(Graph):
def __init__(self, directed=False):
super().__init__(directed)
self.__lists = defaultdict(list)
def add(self, edge):
super().add(edge)
self.__lists[edge.u].append(edge)
if not self.directed:
self.__lists[edge.v].append(edge.invert())
def edges(self, u=None, v=None):
if u is None and v is None:
edges = []
for adj_list in self.__lists.values():
edges.extend(adj_list)
return iter(edges)
elif u is None:
return (edge for edge in self.edges() if edge.match(None, v))
elif v is None:
return iter(self.__lists[u])
else:
return (edge for edge in self.edges(u) if edge.match(u, v))
| 25.573333
| 79
| 0.574035
| 503
| 3,836
| 4.202783
| 0.133201
| 0.033113
| 0.026017
| 0.035951
| 0.53264
| 0.464995
| 0.441343
| 0.373699
| 0.31315
| 0.280038
| 0
| 0.000381
| 0.316476
| 3,836
| 149
| 80
| 25.744966
| 0.805873
| 0
| 0
| 0.481481
| 0
| 0
| 0.029718
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.185185
| false
| 0.009259
| 0.037037
| 0.046296
| 0.398148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a948b58d4adf86897d648d15d474fef3166794ec
| 5,734
|
py
|
Python
|
src/models/test_ensemble.py
|
nybupt/athena
|
2808f5060831382e603e5dc5ec6a9e9d8901a3b2
|
[
"MIT"
] | null | null | null |
src/models/test_ensemble.py
|
nybupt/athena
|
2808f5060831382e603e5dc5ec6a9e9d8901a3b2
|
[
"MIT"
] | 8
|
2020-09-25T22:32:00.000Z
|
2022-02-10T01:17:17.000Z
|
src/models/test_ensemble.py
|
nybupt/athena
|
2808f5060831382e603e5dc5ec6a9e9d8901a3b2
|
[
"MIT"
] | 1
|
2021-08-12T12:48:51.000Z
|
2021-08-12T12:48:51.000Z
|
import os
import sys
import time
import numpy as np
from sklearn.metrics import accuracy_score
from utils.config import TRANSFORMATION
from utils.ensemble import load_models, prediction, ensemble_defenses_util
def testOneData(
datasetFilePath,
models,
nClasses,
transformationList,
EnsembleIDs,
trueLabels,
useLogit=False
):
accs = []
data = np.load(datasetFilePath)
data = np.clip(data, 0, 1) # ensure its values inside [0, 1]
print("Prediction...")
rawPred, transTCs, predTCs = prediction(data, models, nClasses, transformationList)
ensembleTCs = []
if not useLogit:
# use probability
for ensembleID in EnsembleIDs:
print("Processing ensembleID {} using probability".format(ensembleID))
start_time = time.time()
labels = ensemble_defenses_util(rawPred, ensembleID)
ensembleTCs.append(time.time() - start_time)
accs.append(round(accuracy_score(trueLabels, labels), 4))
else:
# use logit and EnsembleID 2
ensembleID=2
print("Processing ensembleID {} using logit".format(ensembleID))
start_time = time.time()
labels = ensemble_defenses_util(rawPred, ensembleID)
ensembleTCs.append(time.time() - start_time)
accs.append(round(accuracy_score(trueLabels, labels), 4))
return np.array(accs), np.array(transTCs), np.array(predTCs), np.array(ensembleTCs)
BSLabelFP=sys.argv[1]
samplesDir=sys.argv[2]
modelsDir=sys.argv[3]
AETypes = {
"biml2": ["bim_ord2_nbIter100_eps1000", "bim_ord2_nbIter100_eps250", "bim_ord2_nbIter100_eps500"],
"bimli":["bim_ordinf_nbIter100_eps100", "bim_ordinf_nbIter100_eps90", "bim_ordinf_nbIter100_eps75"],
"cwl2":["cw_l2_lr350_maxIter100", "cw_l2_lr500_maxIter100", "cw_l2_lr700_maxIter100"],
"dfl2":["deepfool_l2_overshoot20", "deepfool_l2_overshoot30", "deepfool_l2_overshoot50"],
"fgsm":["fgsm_eps100", "fgsm_eps250", "fgsm_eps300"],
"jsma":["jsma_theta30_gamma50", "jsma_theta50_gamma50", "jsma_theta50_gamma70"],
"mim":["mim_eps20_nbIter1000", "mim_eps30_nbIter1000", "mim_eps50_nbIter1000"],
"op":["onepixel_pxCount15_maxIter30_popsize100", "onepixel_pxCount30_maxIter30_popsize100", "onepixel_pxCount5_maxIter30_popsize100"],
"pgd":["pgd_eps250", "pgd_eps100", "pgd_eps300"]
}
sampleSubDirs=[
"legitimates"#, "fgsm"
#"biml2", "bimli", "cwl2", "dfl2"
#"fgsm", "jsma", "mim", "op", "pgd"
]
# (nSamples, <sample dimension>, nChannels)
# (nClasses)
trueLabelVec=np.load(BSLabelFP)
trueLabels = np.argmax(trueLabelVec, axis=1)
nClasses = trueLabelVec.shape[1]
EnsembleIDs=[0,1,2,3]
rows=0
cols=1+len(EnsembleIDs)
if "legitimates" in sampleSubDirs:
rows=1+3*(len(sampleSubDirs) - 1)
else:
rows=3*len(sampleSubDirs)
accs = np.zeros((rows, cols))
modelFilenamePrefix="mnist-cnn" # dataset name and network architecture
# include "clean" type: no transformation.
# transformationList[0] is "clean"
transformationList=TRANSFORMATION.supported_types()
# remove "clean" because the correspondingly model will not be used in ensemble
transformationList.remove("clean")
nTrans = len(transformationList)
transTCs_Prob = np.zeros((rows, nTrans))
transTCs_Logit = np.zeros((rows, nTrans))
predTCs_Prob = np.zeros((rows, nTrans))
predTCs_Logit = np.zeros((rows, nTrans))
ensembleTCs = np.zeros((rows, 5))
rowIdx=0
rowHeaders=[]
AEFilenamePrefix="test_AE-mnist-cnn-clean"
datasetFilePaths = []
for subDirName in sampleSubDirs:
if subDirName == "legitimates": # BS
datasetFilePaths.append(
os.path.join(os.path.join(samplesDir, subDirName), "test_BS-mnist-clean.npy"))
rowHeaders.append("BS")
else: # AE
AETags = AETypes[subDirName]
for AETag in AETags:
datasetFilePaths.append(
os.path.join(os.path.join(samplesDir, subDirName), AEFilenamePrefix+"-"+AETag+".npy"))
rowHeaders.append(AETag)
useLogit = False
print("Loading prob models")
models = load_models(modelsDir, modelFilenamePrefix, transformationList, convertToLogit=useLogit)
for datasetFilePath in datasetFilePaths:
accs[rowIdx, 0:4], transTCs_Prob[rowIdx], predTCs_Prob[rowIdx], ensembleTCs[rowIdx, 0:4] = testOneData(
datasetFilePath,
models,
nClasses,
transformationList,
EnsembleIDs,
trueLabels,
useLogit=useLogit
)
rowIdx+=1
del models
useLogit=True
print("Loading logit models")
logitModels = load_models(modelsDir, modelFilenamePrefix, transformationList, convertToLogit=useLogit)
rowIdx=0
for datasetFilePath in datasetFilePaths:
accs[rowIdx, 4], transTCs_Logit[rowIdx], predTCs_Logit[rowIdx], ensembleTCs[rowIdx, 4] = testOneData(
datasetFilePath,
logitModels,
nClasses,
transformationList,
EnsembleIDs,
trueLabels,
useLogit=useLogit
)
rowIdx+=1
del logitModels
np.save("acc_ensemble_test.npy", accs)
with open("acc_ensemble_test.txt", "w") as fp:
fp.write("Acc\tRD\tMV\tAVEP\tT2MV\tAVEL\n")
for ridx in range(len(rowHeaders)):
fp.write("{}\t{}\t{}\t{}\t{}\t{}\n".format(
rowHeaders[ridx],
accs[ridx, 0],
accs[ridx, 1],
accs[ridx, 2],
accs[ridx, 3],
accs[ridx, 4]))
transTCs = (transTCs_Prob + transTCs_Logit)/2
np.save("transTCs.npy", transTCs)
np.save("predTCs_Prob.npy", predTCs_Prob)
np.save("predTCs_Logit.npy", predTCs_Logit)
np.save("ensembleTCs.npy", ensembleTCs)
| 33.144509
| 142
| 0.671085
| 638
| 5,734
| 5.87931
| 0.296238
| 0.012797
| 0.017595
| 0.018129
| 0.279126
| 0.252466
| 0.227939
| 0.18635
| 0.154625
| 0.115702
| 0
| 0.04044
| 0.206488
| 5,734
| 172
| 143
| 33.337209
| 0.783956
| 0.070108
| 0
| 0.294118
| 0
| 0
| 0.190145
| 0.103254
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007353
| false
| 0
| 0.051471
| 0
| 0.066176
| 0.036765
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a949db919cd36868c22671e2839695a92034044f
| 3,117
|
py
|
Python
|
config.py
|
eicc27/Pixcrawl-Full
|
dfa36ee5b9990ff2781a9bc39a6a60c12b1c9bdb
|
[
"MIT"
] | null | null | null |
config.py
|
eicc27/Pixcrawl-Full
|
dfa36ee5b9990ff2781a9bc39a6a60c12b1c9bdb
|
[
"MIT"
] | null | null | null |
config.py
|
eicc27/Pixcrawl-Full
|
dfa36ee5b9990ff2781a9bc39a6a60c12b1c9bdb
|
[
"MIT"
] | null | null | null |
from msedge.selenium_tools import Edge, EdgeOptions
from lxml import html
import time
import curses
stdscr = curses.initscr()
max_y = stdscr.getmaxyx()[0] - 1
if max_y < 16:
raise Exception("Terminal row size must be more then 17, but now it is %d." % (max_y + 1))
# changelog: more OOP.
# class: illust,illustName,picList(made up of pic classes)
def driver_init():
options = EdgeOptions()
options.use_chromium = True
profile_dir = r"--user-data-dir=C:\Users\Chan\AppData\Local\Microsoft\Edge\User Data"
options.add_argument(profile_dir)
options.add_experimental_option('excludeSwitches', ['enable-logging'])
driver = Edge(options=options)
return driver
stdscr.addstr("Config R18?\nWarning: you must quit all edge browsers and kill their process in task manager!")
# When getstr(), auto-refresh
f0_config = bytes.decode(stdscr.getstr())
if f0_config == 'Y' or f0_config == 'y' or f0_config == '':
driver = driver_init()
driver.get("https://www.pixiv.net/setting_user.php")
etree = html.etree
initial_page = driver.page_source
initial_dom = etree.HTML(initial_page)
r18Switch = initial_dom.xpath(
'//input[(@name="r18" or @name="r18g") and @checked]/@value')
if r18Switch[0] == 'hide':
stdscr.addstr('R-18 disabled.\n')
else:
stdscr.addstr('R-18 enabled.\n')
if r18Switch[1] == '1':
stdscr.addstr('R-18G disabled.\n')
else:
stdscr.addstr('R-18G enabled.\n')
stdscr.refresh()
stdscr.addstr(
'Do you want confirm the r-18 settings?\nPress Y or Enter to navigate you to the settings page, or by default '
'NO.\n')
f1_config = bytes.decode(stdscr.getstr())
if f1_config == 'y' or f1_config == 'Y' or f1_config == '':
stdscr.addstr('Unleash R-18?\n')
r18Config = bytes.decode(stdscr.getstr())
stdscr.addstr('Unleash R-18G?\n')
r18gConfig = bytes.decode(stdscr.getstr())
if r18Config == 'y' or r18Config == 'Y' or r18Config == '':
driver.find_element_by_xpath(
'//input[@name="r18" and @value="show"]').click()
stdscr.addstr('R-18 has been ON.\n')
else:
driver.find_element_by_xpath(
'//input[@name="r18" and @value="hide"]').click()
stdscr.addstr('R-18 is now OFF.\n')
# Give a timely feedback
stdscr.refresh()
if r18gConfig == 'Y' or r18gConfig == 'y' or r18gConfig == '':
driver.find_element_by_xpath(
'//input[@name="r18g" and @value="2"]').click()
stdscr.addstr('R-18G has been ON.\n')
else:
driver.find_element_by_xpath(
'//input[@name="r18g" and @value="1"]').click()
stdscr.addstr('R-18G is now OFF.\n')
stdscr.refresh()
driver.find_element_by_xpath('//input[@name="submit"]').click()
time.sleep(2)
stdscr.addstr('Config saved. Now refreshing...\n')
stdscr.refresh()
driver.refresh()
driver.quit()
| 39.961538
| 120
| 0.600898
| 410
| 3,117
| 4.468293
| 0.37561
| 0.085153
| 0.056769
| 0.051856
| 0.283843
| 0.225437
| 0.130459
| 0.112445
| 0.112445
| 0.112445
| 0
| 0.03392
| 0.252807
| 3,117
| 77
| 121
| 40.480519
| 0.752684
| 0.041065
| 0
| 0.176471
| 0
| 0.029412
| 0.290678
| 0.029584
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014706
| false
| 0
| 0.058824
| 0
| 0.088235
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a94bba226fe399a457f809ece3327258a884ffc0
| 1,181
|
py
|
Python
|
dev/tools/roadnet_convert/geo/formats/osm.py
|
gusugusu1018/simmobility-prod
|
d30a5ba353673f8fd35f4868c26994a0206a40b6
|
[
"MIT"
] | 50
|
2018-12-21T08:21:38.000Z
|
2022-01-24T09:47:59.000Z
|
dev/tools/roadnet_convert/geo/formats/osm.py
|
gusugusu1018/simmobility-prod
|
d30a5ba353673f8fd35f4868c26994a0206a40b6
|
[
"MIT"
] | 2
|
2018-12-19T13:42:47.000Z
|
2019-05-13T04:11:45.000Z
|
dev/tools/roadnet_convert/geo/formats/osm.py
|
gusugusu1018/simmobility-prod
|
d30a5ba353673f8fd35f4868c26994a0206a40b6
|
[
"MIT"
] | 27
|
2018-11-28T07:30:34.000Z
|
2022-02-05T02:22:26.000Z
|
from geo.position import Location
import geo.helper
class RoadNetwork:
'''The primary container class for OSM road networks. (See: simmob.py)
Note that key/value properties are reduced to lowercase for both keys
and values.
'''
def __init__(self):
self.bounds = [] #[Location,Location], min. point, max. pt.
self.nodes = {} #origId => Node
self.ways = {} #origId => Way
class Node:
def __init__(self, nodeId, lat, lng, props):
geo.helper.assert_non_null(nodeId, lat, lng, props, msg="Null args in Node constructor")
self.nodeId = str(nodeId)
self.loc = Location(float(lat), float(lng))
self.props = geo.helper.dict_to_lower(props)
class Way:
'''Ways are somewhat different from Links: they don't have
"from" and "to" Nodes, but rather feature an ordered sequence of Nodes.
'''
def __init__(self, wayId, nodes, props):
geo.helper.assert_non_null(wayId, nodes, props, msg="Null args in Way constructor")
if len(nodes)<2:
raise Exception('Way cannot be made with less than 2 Nodes.')
self.wayId = str(wayId)
self.nodes = nodes #[Node]
self.props = geo.helper.dict_to_lower(props)
| 31.078947
| 92
| 0.675699
| 172
| 1,181
| 4.523256
| 0.5
| 0.057841
| 0.071979
| 0.043702
| 0.203085
| 0.156812
| 0.087404
| 0.087404
| 0
| 0
| 0
| 0.002134
| 0.206605
| 1,181
| 37
| 93
| 31.918919
| 0.828175
| 0.298899
| 0
| 0.095238
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| 0
| 0.095238
| 1
| 0.142857
| false
| 0
| 0.095238
| 0
| 0.380952
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a94dde590f87aeb3b20de4c6b4b586cab3f571b5
| 1,441
|
py
|
Python
|
Sorts/bubble_sort_recursive.py
|
Neiva07/Algorithms
|
cc2b22d1f69f0af7b91a8326550e759abfba79c8
|
[
"MIT"
] | 199
|
2019-12-01T01:23:34.000Z
|
2022-02-28T10:30:40.000Z
|
Sorts/bubble_sort_recursive.py
|
Neiva07/Algorithms
|
cc2b22d1f69f0af7b91a8326550e759abfba79c8
|
[
"MIT"
] | 35
|
2020-06-08T17:59:22.000Z
|
2021-11-11T04:00:29.000Z
|
Sorts/bubble_sort_recursive.py
|
Neiva07/Algorithms
|
cc2b22d1f69f0af7b91a8326550e759abfba79c8
|
[
"MIT"
] | 106
|
2020-02-05T01:28:19.000Z
|
2022-03-11T05:38:54.000Z
|
# Script: bubble_sort_recursive.py
# Author: Joseph L. Crandal
# Purpose: Demonstrate bubble sort with recursion
# data will be the list to be sorted
data = [ 0, 5, 2, 3, 10, 123, -53, 23, 9, 2 ]
dataOrig = [ 0, 5, 2, 3, 10, 123, -53, 23, 9, 2 ]
# In a bubble sort you will work your way through the dataset
# and move the elements that are adjacent
# Recursive functions call on themselves to process data until a goal has been met or it runs out of items to process
# In this example it continues to go over the dataset until it doesn't see any further change in position from sorting
def bubbleSort(arr):
# Get the length of the array so we know how far to look
length = len(arr)
changed = False
for i in range(length-1):
# changed will let us keep track of whether anything was changed on the last pass
if arr[i] > arr[i+1]:
# Swaps the position of the two elements so the lower value is lower in the order
arr[i], arr[i+1] = arr[i+1], arr[i]
changed = True
# To avoid unneeded processing we break if no changes were made, meaning it is done sorting
if changed == False:
return
else:
bubbleSort(arr)
# Execute the sort
bubbleSort(data)
# Show sorted array versus original
print("Unsorted array: ")
for i in range(len(dataOrig)):
print(dataOrig[i])
print("Sorted array: ")
for i in range(len(data)):
print(data[i])
| 35.146341
| 118
| 0.668286
| 241
| 1,441
| 3.987552
| 0.53112
| 0.024974
| 0.01873
| 0.034339
| 0.098855
| 0.07076
| 0.031217
| 0.031217
| 0.031217
| 0.031217
| 0
| 0.03154
| 0.251908
| 1,441
| 40
| 119
| 36.025
| 0.859926
| 0.575989
| 0
| 0
| 0
| 0
| 0.050336
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0
| 0
| 0.1
| 0.2
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a94f07dd94305ef8cca149684b5c8e4ef5b6072f
| 19,260
|
py
|
Python
|
mcv_consoler/plugins/tempest/runner.py
|
vladryk/mcv
|
ee74beafc65053ce200e03da423784cee0724e23
|
[
"Apache-2.0"
] | null | null | null |
mcv_consoler/plugins/tempest/runner.py
|
vladryk/mcv
|
ee74beafc65053ce200e03da423784cee0724e23
|
[
"Apache-2.0"
] | null | null | null |
mcv_consoler/plugins/tempest/runner.py
|
vladryk/mcv
|
ee74beafc65053ce200e03da423784cee0724e23
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015-2016 Mirantis, Inc
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ConfigParser
import datetime
import json
import logging
import os.path
import subprocess
import traceback
from oslo_config import cfg
from mcv_consoler.common.config import DEFAULT_CIRROS_IMAGE
from mcv_consoler.common.config import MOS_TEMPEST_MAP
from mcv_consoler.common.config import TIMES_DB_PATH
from mcv_consoler.common.errors import TempestError
from mcv_consoler.plugins.rally import runner as rrunner
from mcv_consoler import utils
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
tempest_additional_conf = {
'compute':
{'fixed_network_name': CONF.networking.network_ext_name},
'object-storage':
{'operator_role': 'admin',
'reseller_admin_role': 'admin'},
'auth': {}
}
class TempestOnDockerRunner(rrunner.RallyOnDockerRunner):
failure_indicator = TempestError.NO_RUNNER_ERROR
identity = 'tempest'
def __init__(self, ctx):
super(TempestOnDockerRunner, self).__init__(ctx)
self.path = self.ctx.work_dir.base_dir
self.container = None
self.failed_cases = 0
self.home = '/mcv'
self.homedir = '/home/mcv/toolbox/tempest'
def _verify_rally_container_is_up(self):
self.verify_container_is_up("tempest")
def create_cirros_image(self):
i_list = self.glanceclient.images.list()
for im in i_list:
if im.name == 'mcv-test-functional-cirros':
return im.id
img_fp = None
try:
img_fp = open(DEFAULT_CIRROS_IMAGE)
except IOError as e:
LOG.debug('Cannot open file {path}: {err}'.format(
path=DEFAULT_CIRROS_IMAGE,
err=str(e)))
return
im = self.glanceclient.images.create(name='mcv-test-functional-cirros',
disk_format="qcow2",
is_public=True,
container_format="bare",
data=img_fp)
def cleanup_cirros_image(self):
self.cleanup_image('mcv-test-functional-cirros')
def start_container(self):
LOG.debug("Bringing up Tempest container with credentials")
add_host = ""
# TODO(albartash): Refactor this place!
if self.access_data["auth_fqdn"] != '':
add_host = "--add-host={fqdn}:{endpoint}".format(
fqdn=self.access_data["auth_fqdn"],
endpoint=self.access_data["public_endpoint_ip"])
res = subprocess.Popen(
["docker", "run", "-d", "-P=true"] +
[add_host] * (add_host != "") +
["-p", "6001:6001",
"-e", "OS_AUTH_URL=" + self.access_data["auth_url"],
"-e", "OS_TENANT_NAME=" + self.access_data["tenant_name"],
"-e", "OS_REGION_NAME" + self.access_data["region_name"],
"-e", "OS_USERNAME=" + self.access_data["username"],
"-e", "OS_PASSWORD=" + self.access_data["password"],
"-e", "KEYSTONE_ENDPOINT_TYPE=publicUrl",
"-v", '%s:/home/rally/.rally/tempest' % self.homedir,
"-v", "%s:%s" % (self.homedir, self.home), "-w", self.home,
"-t", "mcv-tempest"],
stdout=subprocess.PIPE,
preexec_fn=utils.ignore_sigint).stdout.read()
LOG.debug('Finish bringing up Tempest container.'
'ID = %s' % str(res))
self.verify_container_is_up()
self._patch_rally()
# Hotfix. set rally's permission for .rally/ folder
# Please remove this. Use: `sudo -u rally docker run` when
# rally user gets its permissions to start docker containers
cmd = 'docker exec -t {cid} sudo chown rally:rally /home/rally/.rally'
utils.run_cmd(cmd.format(cid=self.container_id))
self.copy_config()
self.install_tempest()
def _patch_rally(self):
dist = '/tempest/requirements.txt'
LOG.debug('Patching tempest requirements')
tempest_patch = '/mcv/custom_patches/requirements.patch'
self._os_patch(dist, tempest_patch, self.container_id)
git_commit_cmd = (
'cd /tempest && git config --global user.name \"mcv-team\" && '
'git config --global user.email '
'\"mirantis-cloud-validation-support@mirantis.com\" && '
'sudo git add . && sudo git commit -m \"added markupsafe to '
'requirements, which is needed for pbr\"')
utils.run_cmd('docker exec -t {cid} sh -c "{cmd}"'.format(
cid=self.container_id,
cmd=git_commit_cmd))
def make_detailed_report(self, task):
LOG.debug('Generating detailed report')
details_dir = os.path.join(self.home, 'reports/details/')
details_file = os.path.join(details_dir, task + '.txt')
cmd = "docker exec -t %(cid)s " \
"rally deployment list | grep existing | awk \'{print $2}\'" \
% dict(cid=self.container_id)
deployment_id = utils.run_cmd(cmd, quiet=True).strip()
cmd = 'docker exec -t {cid} mkdir -p {out_dir}'
utils.run_cmd(cmd.format(cid=self.container_id, out_dir=details_dir),
quiet=True)
# store tempest.conf
self.store_config(os.path.join(self.homedir,
"for-deployment-{ID}/tempest.conf"
.format(ID=deployment_id)))
self.store_config(os.path.join(self.homedir, "conf/existing.json"))
# Note(ogrytsenko): tool subunit2pyunit returns exit code '1' if
# at leas one test failed in a test suite. It also returns exit
# code '1' if some error occurred during processing a file, like:
# "Permission denied".
# We force 'exit 0' here and will check the real status lately
# by calling 'test -e <details_file>'
cmd = 'docker exec -t {cid} /bin/sh -c \" ' \
'subunit2pyunit /mcv/for-deployment-{ID}/subunit.stream ' \
'2> {out_file}\"; ' \
'exit 0'.format(cid=self.container_id,
ID=deployment_id,
out_file=details_file)
out = utils.run_cmd(cmd, quiet=True)
cmd = 'docker exec -t {cid} test -e {out_file} ' \
'&& echo -n yes || echo -n no'.format(cid=self.container_id,
out_file=details_file)
exists = utils.run_cmd(cmd)
if exists == 'no':
LOG.debug('ERROR: Failed to create detailed report for '
'{task} set. Output: {out}'.format(task=task, out=out))
return
cmd = 'mkdir -p {path}/details'.format(path=self.path)
utils.run_cmd(cmd, quiet=True)
reports_dir = os.path.join(self.homedir, 'reports')
cmd = 'cp {reports}/details/{task}.txt {path}/details'
utils.run_cmd(
cmd.format(reports=reports_dir, task=task, path=self.path),
quiet=True
)
LOG.debug(
"Finished creating detailed report for '{task}'. "
"File: {details_file}".format(task=task, details_file=details_file)
)
def fill_additional_conf(self):
if CONF.rally.existing_users:
tempest_additional_conf['auth'].update(
test_accounts_file=os.path.join(
self.home, 'additional_users.yaml'),
use_dynamic_credentials=False)
def install_tempest(self):
LOG.debug("Searching for installed tempest")
super(TempestOnDockerRunner, self)._rally_deployment_check()
self.fill_additional_conf()
LOG.debug("Generating additional config")
path_to_conf = os.path.join(self.homedir, 'additional.conf')
with open(path_to_conf, 'wb') as conf_file:
config = ConfigParser.ConfigParser()
config._sections = tempest_additional_conf
config.write(conf_file)
LOG.debug("Installing tempest...")
version = MOS_TEMPEST_MAP.get(self.access_data['mos_version'])
if not version:
cmd = ("docker exec -t {cid} "
"rally verify install --system-wide "
"--deployment existing --source /tempest ").format(
cid=self.container_id)
else:
cmd = ("docker exec -t {cid} "
"rally verify install --system-wide "
"--deployment existing --source /tempest "
"--version {version} ").format(
cid=self.container_id,
version=version)
utils.run_cmd(cmd, quiet=True)
cmd = "docker exec -t %(container)s rally verify genconfig " \
"--add-options %(conf_path)s" % \
{"container": self.container_id,
"conf_path": os.path.join(self.home, 'additional.conf')}
utils.run_cmd(cmd, quiet=True)
def _run_tempest_on_docker(self, task, *args, **kwargs):
LOG.debug("Starting verification")
if CONF.rally.existing_users:
concurr = 1
else:
concurr = 0
run_by_name = kwargs.get('run_by_name')
if run_by_name:
cmd = ("docker exec -t {cid} rally "
"--log-file {home}/log/tempest.log --rally-debug"
" verify start --system-wide "
"--regex {_set} --concurrency {con}").format(
cid=self.container_id,
home=self.home,
_set=task,
con=concurr)
else:
cmd = ("docker exec -t {cid} rally "
"--log-file {home}/log/tempest.log --rally-debug"
" verify start --system-wide "
"--set {_set} --concurrency {con}").format(
cid=self.container_id,
home=self.home,
_set=task,
con=concurr)
utils.run_cmd(cmd, quiet=True)
cmd = "docker exec -t {cid} rally verify list".format(
cid=self.container_id)
# TODO(ogrytsenko): double-check this approach
try:
p = utils.run_cmd(cmd)
except subprocess.CalledProcessError as e:
LOG.error("Task %s failed with: %s" % (task, e))
return ''
run = p.split('\n')[-3].split('|')[8]
if run == 'failed':
LOG.error('Verification failed, unable to generate report')
return ''
LOG.debug('Generating html report')
cmd = ("docker exec -t {cid} rally verify results --html "
"--out={home}/reports/{task}.html").format(
cid=self.container_id, home=self.home, task=task)
utils.run_cmd(cmd, quiet=True)
reports_dir = os.path.join(self.homedir, 'reports')
cmd = "cp {reports}/{task}.html {path} ".format(
reports=reports_dir, task=task, path=self.path)
utils.run_cmd(cmd, quiet=True)
try:
self.make_detailed_report(task)
except Exception:
LOG.debug('ERROR: \n' + traceback.format_exc())
cmd = "docker exec -t {cid} /bin/sh -c " \
"\"rally verify results --json 2>/dev/null\" "\
.format(cid=self.container_id)
return utils.run_cmd(cmd, quiet=True)
def parse_results(self, res, task):
LOG.debug("Parsing results")
if res == '':
LOG.debug("Results of test set '%s': FAILURE" % task)
self.failure_indicator = TempestError.VERIFICATION_FAILED
self.test_failures.append(task)
LOG.info(" * FAILED")
return False
try:
self.task = json.loads(res)
except ValueError:
LOG.debug("Results of test set '%s': "
"FAILURE, gotten not-JSON object. "
"Please see logs" % task)
LOG.debug("Not-JSON object: %s", res)
self.test_failures.append(task)
LOG.info(" * FAILED")
return False
time_of_tests = float(self.task.get('time', '0'))
time_of_tests = str(round(time_of_tests, 3)) + 's'
self.time_of_tests[task] = {'duration': time_of_tests}
if self.task.get('tests', 0) == 0:
self.test_failures.append(task)
LOG.debug("Task '%s' was skipped. Perhaps the service "
"is not working" % task)
LOG.info(" * FAILED")
return False
failures = self.task.get('failures')
success = self.task.get('success')
self.failed_cases += failures
LOG.debug("Results of test set '%s': "
"SUCCESS: %d FAILURES: %d" % (task, success, failures))
if not failures:
self.test_success.append(task)
LOG.info(" * PASSED")
return True
else:
self.test_failures.append(task)
self.failure_indicator = TempestError.TESTS_FAILED
LOG.info(" * FAILED")
return False
def cleanup_toolbox(self):
LOG.info('Uninstalling tempest ...')
cmd = ('docker exec -t {cid} ' 'rally verify uninstall '
'--deployment existing'.format(cid=self.container_id))
utils.run_cmd(cmd, quiet=True)
def run_batch(self, tasks, *args, **kwargs):
with self.store('rally.log', 'tempest.log'):
tool_name = kwargs["tool_name"]
all_time = kwargs["all_time"]
elapsed_time = kwargs["elapsed_time"]
# Note (ayasakov): the database execution time of each test.
# In the first run for each test tool calculate the multiplier,
# which shows the difference of execution time between testing
# on our cloud and the current cloud.
db = kwargs.get('db')
first_run = True
multiplier = 1.0
test_time = 0
all_time -= elapsed_time
self.create_cirros_image()
self._setup_rally_on_docker()
# NOTE(ogrytsenko): only test-suites are discoverable for tempest
if not kwargs.get('run_by_name'):
cid = self.container_id
tasks, missing = self.discovery(cid).match(tasks)
self.test_not_found.extend(missing)
t = []
tempest_task_results_details = {}
LOG.info("Time start: %s UTC\n" % str(datetime.datetime.utcnow()))
for task in tasks:
LOG.info("-" * 60)
task = task.replace(' ', '')
if kwargs.get('event').is_set():
LOG.info("Keyboard interrupt. Set %s won't start" % task)
break
time_start = datetime.datetime.utcnow()
LOG.info('Running %s tempest set' % task)
LOG.debug("Time start: %s UTC" % str(time_start))
if not CONF.times.update:
try:
test_time = db[tool_name][task]
except KeyError:
test_time = 0
exp_time = utils.seconds_to_humantime(test_time *
multiplier)
msg = "Expected time to complete %s: %s"
if not test_time:
LOG.debug(msg, task, exp_time)
else:
LOG.info(msg, task, exp_time)
self.run_individual_task(task, *args, **kwargs)
time_end = datetime.datetime.utcnow()
time = time_end - time_start
LOG.debug("Time end: %s UTC" % str(time_end))
if CONF.times.update:
if tool_name in db.keys():
db[tool_name].update({task: time.seconds})
else:
db.update({tool_name: {task: time.seconds}})
else:
if first_run:
first_run = False
if test_time:
multiplier = float(time.seconds) / float(test_time)
all_time -= test_time
persent = 1.0
if kwargs["all_time"]:
persent -= float(all_time) / float(kwargs["all_time"])
persent = int(persent * 100)
persent = 100 if persent > 100 else persent
line = 'Completed %s' % persent + '%'
time_str = utils.seconds_to_humantime(all_time *
multiplier)
if all_time and multiplier:
line += ' and remaining time %s' % time_str
LOG.info(line)
LOG.info("-" * 60)
t.append(self.task['test_cases'].keys())
tempest_task_results_details[task] = {
# overall number of tests in suit
"tests": self.task.get("tests", 0),
"test_succeed": self.task.get("success", 0),
"test_failed": self.task.get("failures", 0),
"test_skipped": self.task.get("skipped", 0),
"expected_failures": self.task.get("expected_failures", 0)
}
if self.failed_cases > self.max_failed_tests:
LOG.info('*LIMIT OF FAILED TESTS EXCEEDED! STOP RUNNING.*')
self.failure_indicator = \
TempestError.FAILED_TEST_LIMIT_EXCESS
break
if CONF.times.update:
with open(TIMES_DB_PATH, "w") as f:
json.dump(db, f)
LOG.info("\nTime end: %s UTC" % str(datetime.datetime.utcnow()))
self.cleanup_toolbox()
self.cleanup_cirros_image()
return {"test_failures": self.test_failures,
"test_success": self.test_success,
"test_not_found": self.test_not_found,
"time_of_tests": self.time_of_tests,
"tempest_tests_details": tempest_task_results_details,
}
@utils.developer_mode
def run_individual_task(self, task, *args, **kwargs):
results = self._run_tempest_on_docker(task, *args, **kwargs)
# store raw results
self.dump_raw_results(task, results)
self.parse_results(results, task)
return True
| 40.125
| 79
| 0.546625
| 2,182
| 19,260
| 4.662236
| 0.20165
| 0.017301
| 0.025066
| 0.020643
| 0.237983
| 0.167502
| 0.14106
| 0.12602
| 0.098889
| 0.079524
| 0
| 0.004958
| 0.340291
| 19,260
| 479
| 80
| 40.208768
| 0.795687
| 0.077362
| 0
| 0.204787
| 0
| 0
| 0.207173
| 0.030619
| 0
| 0
| 0
| 0.002088
| 0
| 1
| 0.037234
| false
| 0.005319
| 0.037234
| 0
| 0.117021
| 0.00266
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9525cb4b63e18ce45a9ca957c592c3c20ea53fe
| 1,385
|
py
|
Python
|
docsource/sphinx/source/auto_examples/hammersleypoints/plot_hamm_points_sphere.py
|
EricHughesABC/pygamma_gallery
|
64565d364e68a185aeee25b904813d795ecbe87c
|
[
"MIT"
] | null | null | null |
docsource/sphinx/source/auto_examples/hammersleypoints/plot_hamm_points_sphere.py
|
EricHughesABC/pygamma_gallery
|
64565d364e68a185aeee25b904813d795ecbe87c
|
[
"MIT"
] | null | null | null |
docsource/sphinx/source/auto_examples/hammersleypoints/plot_hamm_points_sphere.py
|
EricHughesABC/pygamma_gallery
|
64565d364e68a185aeee25b904813d795ecbe87c
|
[
"MIT"
] | null | null | null |
"""
#################
Hammersley Sphere
#################
"""
import numpy as np
from matplotlib import pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
def return_point(m, n, p):
"""
m is the index number of the Hammersley point to calculate
n is the maximun number of points
p is the order of the Hammersley point, 1,2,3,4,... etc
l is the power of x to go out to and is hard coded to 10 in this example
:return type double
"""
if p == 1:
return m / float(n)
v = 0.0
for j in range(10, -1, -1):
num = m // p ** j
if num > 0:
m -= num * p ** j
v += num / (p ** (j + 1))
return (v)
if __name__ == "__main__":
npts = 500
h_1 = np.zeros(npts)
h_7 = np.zeros(npts)
for m in range(npts):
h_1[m] = return_point(m, npts, 1)
h_7[m] = return_point(m, npts, 7)
phirad = h_1 * 2.0 * np.pi
h7 = 2.0 * h_7 - 1.0 # map from [0,1] to [-1,1]
st = np.sqrt(1.0 - h7 * h7)
xxx = st * np.cos(phirad)
yyy = st * np.sin(phirad)
zzz = h7
fig = plt.figure()
ax = fig.gca(projection='3d')
ax.plot(xxx, yyy, zzz, '.')
ax.set_xticks([-1.0, -0.5, 0.0, 0.5, 1.0]);
ax.set_yticks([-1.0, -0.5, 0.0, 0.5, 1.0]);
ax.set_zticks([-1.0, -0.5, 0.0, 0.5, 1.0]);
ax.set_title("Ham Points, 1 and 7", fontsize=14)
plt.show()
| 22.704918
| 76
| 0.519856
| 252
| 1,385
| 2.769841
| 0.365079
| 0.028653
| 0.025788
| 0.017192
| 0.113181
| 0.06447
| 0.06447
| 0.06447
| 0.06447
| 0.06447
| 0
| 0.081527
| 0.300361
| 1,385
| 61
| 77
| 22.704918
| 0.638803
| 0.231769
| 0
| 0
| 0
| 0
| 0.02924
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029412
| false
| 0
| 0.088235
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a953cb0fff14bcb71d5e717da31296569a25a401
| 11,261
|
py
|
Python
|
org/heather/setup/__init__.py
|
PandaLunatiquePrivate/Heather
|
a50ce59a7a61ac103003434fc0defc0e3bb4860c
|
[
"Apache-2.0"
] | 2
|
2021-03-06T20:15:14.000Z
|
2021-03-28T16:58:13.000Z
|
org/heather/setup/__init__.py
|
PandaLunatiquePrivate/Heather
|
a50ce59a7a61ac103003434fc0defc0e3bb4860c
|
[
"Apache-2.0"
] | null | null | null |
org/heather/setup/__init__.py
|
PandaLunatiquePrivate/Heather
|
a50ce59a7a61ac103003434fc0defc0e3bb4860c
|
[
"Apache-2.0"
] | null | null | null |
import enum
import json
import os
import requests
import yaml
import socket
import sqlite3
import traceback
from org.heather.api.tools import Tools
from org.heather.api.log import Log, LogLevel
@enum.unique
class VerifyResult(enum.Enum):
OK = 0
NEED_SETUP = 1
NEED_REPAIR = 2
class Setup():
@staticmethod
def verify(installationPath):
print('TODO: setup verification')
return True
@staticmethod
def is_config_valid(path):
full_path = os.path.abspath(path + "heather.conf" if path.endswith('/') else path + "/heather.conf")
if os.path.exists(full_path):
with open(full_path, 'r') as f:
try:
_temp = json.load(f)
return True
except:
return False
else:
return False
@staticmethod
def wizard(rootPath):
Log.do(LogLevel.ALL, 'Launching Heather setup wizard...', up_spacing=1, bottom_spacing=1)
Log.do(LogLevel.INFO, f'Please specify an valid installation path:\nCurrently in {rootPath}', up_spacing=1, bottom_spacing=1)
while True:
setupParentPath = os.path.normpath(input('Installation path: '))
if len(setupParentPath) > 0 and os.path.isdir(setupParentPath):
setupParentPath = os.path.abspath(setupParentPath)
if os.access(setupParentPath, os.W_OK):
break
else:
Log.do(LogLevel.ERROR, 'Permission denied! Can access to the specified directory! (Writting or reading)', up_spacing=1)
else:
Log.do(LogLevel.ERROR, 'Invalid directory!', up_spacing=1)
Log.do(LogLevel.INFO, 'Please specify an valid installation path:')
Log.do(LogLevel.INFO, 'Downloading locales files...', up_spacing=1)
data = requests.get('https://pastebin.com/raw/48kzz6Y9')
locales = yaml.load(data.text, Loader=yaml.CLoader)
Log.do(LogLevel.GOOD, f'Found {len(locales)} locales availables!')
Log.do(LogLevel.INFO, f'Select a default language:', up_spacing=1)
while True:
for locale in locales:
Log.do(LogLevel.ALL, f'- {locale}')
setupLocale = input('Locale language: ')
if len(setupLocale) > 0 and locales.get(setupLocale) != None:
break
else:
Log.do(LogLevel.ERROR, 'Invalid locale!', up_spacing=1)
Log.do(LogLevel.INFO, f'Select a default language:')
Log.do(LogLevel.INFO, f'Start automatic setup...', up_spacing=1)
# Setup: Directories
directories = [
os.path.normpath(setupParentPath + "/avatars"),
os.path.normpath(setupParentPath + "/database"),
os.path.normpath(setupParentPath + "/locales"),
os.path.normpath(setupParentPath + "/logs"),
os.path.normpath(setupParentPath + "/files"),
os.path.normpath(setupParentPath + "/files/movies"),
os.path.normpath(setupParentPath + "/files/series")
]
for directory in directories:
Log.do(LogLevel.ALL, f'Creating directory {directory}', delay=0.1)
try:
os.mkdir(directory)
except:
pass
# Setup: Database
Log.do(LogLevel.ALL, f'Setting up database...', delay=0.1)
database = sqlite3.connect(os.path.normpath(setupParentPath + "/database/database.db"))
Log.do(LogLevel.ALL, f'Creating tables...', delay=0.1)
queries = [
'CREATE TABLE profiles (ID INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, UID VARCHAR(32) NOT NULL UNIQUE, GROUP_UID VARCHAR(32) NOT NULL, NAME VARCHAR(32) NOT NULL UNIQUE DEFAULT "New profile", AVATAR VARCHAR(256) DEFAULT NULL, PIN VARCHAR(4) NOT NULL DEFAULT "0000")',
'CREATE TABLE movies (ID INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, UID VARCHAR(32) NOT NULL UNIQUE, TITLE VARCHAR(64) NOT NULL DEFAULT "Unknown", TRAILER_LINK VARCHAR(256), RELEASE_DATE VARCHAR(64), GENRE TEXT, DURATION INTEGER, REAL_DURATION INTEGER, RATING REAL, POPULAR_QUOTE TEXT, SYNOPSIS TEXT, COUNTRY VARCHAR(128), PRODUCTION TEXT, DIRECTOR TEXT, CASTS TEXT, ORIGINAL_VERSION VARCHAR(16) NOT NULL, FILE_PATH VARCHAR(256), QUALITY VARCHAR(32))',
'CREATE TABLE series (ID INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, UID VARCHAR(32) NOT NULL UNIQUE, TITLE VARCHAR(64) NOT NULL DEFAULT "Unknown", EPISODES INTEGER, EPISODE_NAME VARCHAR(32) DEFAULT "EPISODE", SEASONS INTEGER, SEASON_NAME VARCHAR(32) DEFAULT "SEASON", TRAILERS_LINK VARCHAR(256), RELEASES_DATE VARCHAR(64), GENRE TEXT, TOTAL_DURATION INTEGER, RATING REAL, POPULAR_QUOTE TEXT, SYNOPSIS TEXT, COUNTRY VARCHAR(128), PRODUCTION TEXT, DIRECTOR TEXT, CASTS TEXT, ORIGINAL_VERSION VARCHAR(16) NOT NULL)',
'CREATE TABLE seasons (ID INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, UID VARCHAR(32) NOT NULL UNIQUE, SERIE_UID VARCHAR(32) NOT NULL, SEASON INTEGER, SEASON_TITLE VARCHAR(64) NOT NULL DEFAULT "Unknown", RELEASES_DATE VARCHAR(64), TOTAL_DURATION INTEGER, POPULAR_QUOTE TEXT, SYNOPSIS TEXT, PRODUCTION TEXT, DIRECTOR TEXT, CASTS TEXT, ORIGINAL_VERSION VARCHAR(16) NOT NULL)',
'CREATE TABLE episodes (ID INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, UID VARCHAR(32) NOT NULL UNIQUE, SEASON_UID VARCHAR(32) NOT NULL, EPISODE INTEGER, EPISODE_TITLE VARCHAR(64) NOT NULL DEFAULT "Unknown", RELEASES_DATE VARCHAR(64), DURATION INTEGER, SYNOPSIS TEXT, CASTS TEXT, FILE_PATH VARCHAR(256), QUALITY VARCHAR(32))',
'CREATE TABLE groups (ID INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, UID VARCHAR(32) NOT NULL UNIQUE, NAME VARCHAR(32) NOT NULL UNIQUE DEFAULT "New group", MANAGE_GROUPS INTEGER, MANAGE_PROFILES INTEGER, IS_KID_FRIENDLY INTEGER, PRIORITY INTEGER NOT NULL)',
'CREATE TABLE modules (ID INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, UID VARCHAR(32) NOT NULL UNIQUE, NAME VARCHAR(32) NOT NULL UNIQUE DEFAULT "New module", PATH VARCHAR(256) DEFAULT NULL)',
'CREATE TABLE directories (ID INTEGER PRIMARY KEY AUTOINCREMENT UNIQUE NOT NULL, UID VARCHAR(32) NOT NULL UNIQUE, NAME VARCHAR(32) NOT NULL UNIQUE DEFAULT "New directory", PATH VARCHAR(256) DEFAULT NULL, IS_RECURSIVE INTEGER)'
]
for query in queries:
Log.do(LogLevel.ALL, 'Creating a table...')
database.execute(query)
Log.do(LogLevel.COMMON, f'> {query}', delay=0.1)
Log.do(LogLevel.ALL, f'Inserting default profiles and groups...', delay=0.1)
new_group = Tools.get_uid(32)
new_group_kid = Tools.get_uid(32)
new_profile = Tools.get_uid(32)
new_profile_kid = Tools.get_uid(32)
new_profile_avatar = os.path.normpath(setupParentPath + "/avatars/beatrice.jpg")
new_profile_kid_avatar = os.path.normpath(setupParentPath + "/avatars/lucie.jpg")
new_directory_movies = Tools.get_uid(32)
new_directory_movies_path = os.path.normpath(setupParentPath + "/files/movies")
new_directory_series = Tools.get_uid(32)
new_directory_series_path = os.path.normpath(setupParentPath + "/files/series")
queries = [
f'INSERT INTO groups (UID, NAME, MANAGE_GROUPS, MANAGE_PROFILES, IS_KID_FRIENDLY, PRIORITY) VALUES ("{new_group}", "Owner", 1, 1, 0, 10)',
f'INSERT INTO groups (UID, NAME, MANAGE_GROUPS, MANAGE_PROFILES, IS_KID_FRIENDLY, PRIORITY) VALUES ("{new_group_kid}", "Kid", 0, 0, 1, 1)',
f'INSERT INTO profiles (UID, GROUP_UID, NAME, AVATAR, PIN) VALUES ("{new_profile}", "{new_group}", "Heather", "{new_profile_avatar}", "0000")',
f'INSERT INTO profiles (UID, GROUP_UID, NAME, AVATAR, PIN) VALUES ("{new_profile_kid}", "{new_group_kid}", "Kids", "{new_profile_kid_avatar}", "0000")',
f'INSERT INTO directories (UID, NAME, PATH, IS_RECURSIVE) VALUES ("{new_directory_movies}", "Default movies", "{new_directory_movies_path}", 1)',
f'INSERT INTO directories (UID, NAME, PATH, IS_RECURSIVE) VALUES ("{new_directory_series}", "Default series", "{new_directory_series_path}", 1)'
]
for query in queries:
database.execute(query)
Log.do(LogLevel.COMMON, f'> {query}', delay=0.1)
database.commit()
# Setup: Download locales
Log.do(LogLevel.ALL, f'Downloading locales...', delay=0.1)
for locale in locales:
Log.do(LogLevel.ALL, f'Downloading {locale}.lang file...', delay=0.1)
try:
data = requests.get(locales[locale]).content
with open(os.path.normpath(setupParentPath + f"/locales/{locale}.lang"), 'wb+') as f:
f.write(data)
Log.do(LogLevel.GOOD, f'Downloaded {locale}.lang!', delay=0.05)
except:
Log.do(LogLevel.WARN, f'Can\'t download {locale}.lang!', delay=0.05)
# Setup: Download avatars
Log.do(LogLevel.ALL, f'Downloading avatars...', delay=0.1)
Log.do(LogLevel.ALL, f'Gettings avatars list...', delay=0.1)
data = requests.get('https://pastebin.com/raw/A3PX5iAP')
avatars = yaml.load(data.text, Loader=yaml.CLoader)
for avatar in avatars:
Log.do(LogLevel.ALL, f'Downloading {avatar} file...', delay=0.1)
try:
data = requests.get(avatars[avatar]).content
with open(os.path.normpath(setupParentPath + f"/avatars/{avatar}"), 'wb+') as f:
f.write(data)
Log.do(LogLevel.GOOD, f'Downloaded {avatar}!', delay=0.05)
except:
Log.do(LogLevel.WARN, f'Can\'t download {avatar}!', delay=0.05)
_s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
_s.connect(('8.8.8.8', 80))
privateAddress = _s.getsockname()[0]
publicAddress = requests.get('https://api.ipify.org/?format=json').json()['ip']
_s.close()
config = {
"general": {
"parent_path": setupParentPath,
"paths": {
"avatars": "avatars",
"database": "database",
"locales": "locales",
"logs": "logs"
},
"locale": setupLocale,
"updater": {
"enable": True,
"interval": 3600
},
"plugins": {
"enable": True
},
"availability": {
"public": {
"enable": False,
"endpoint": publicAddress
},
"private": {
"enable": True,
"endpoint": privateAddress
}
}
}
}
with open(os.path.normpath(rootPath + "/heather.conf"), 'w+') as f:
json.dump(config, f, indent=4)
Log.do(LogLevel.GOOD, f'Everything is setup! Ready to start!', delay=0.05)
| 42.334586
| 534
| 0.610958
| 1,350
| 11,261
| 5.008889
| 0.186667
| 0.033126
| 0.055753
| 0.035492
| 0.560337
| 0.489352
| 0.404022
| 0.360101
| 0.316474
| 0.280982
| 0
| 0.023833
| 0.273422
| 11,261
| 265
| 535
| 42.49434
| 0.802616
| 0.007282
| 0
| 0.20904
| 0
| 0.079096
| 0.431218
| 0.022107
| 0
| 0
| 0
| 0
| 0
| 1
| 0.016949
| false
| 0.00565
| 0.056497
| 0
| 0.124294
| 0.00565
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a955fd4758fdef6a817f379d021c4f3cc6b7730c
| 5,421
|
py
|
Python
|
utils/belief_prop.py
|
atitus5/ocr-869
|
1d714dd28e933fb320b099a4631d25e93bb01678
|
[
"MIT"
] | null | null | null |
utils/belief_prop.py
|
atitus5/ocr-869
|
1d714dd28e933fb320b099a4631d25e93bb01678
|
[
"MIT"
] | null | null | null |
utils/belief_prop.py
|
atitus5/ocr-869
|
1d714dd28e933fb320b099a4631d25e93bb01678
|
[
"MIT"
] | null | null | null |
import math
import sys
import time
from nltk import word_tokenize
import numpy as np
def bp_error_correction(kjv, all_predictions):
start_t = time.time()
# Run belief propagation to correct any words not found in dictionary
print("Setting up word set and tokenizing predictions...")
word_set = set(word_tokenize(kjv.full_text))
if len(all_predictions.shape) > 1:
predicted_char_ints = np.argmax(all_predictions, axis=1)
else:
predicted_char_ints = all_predictions
all_predictions = np.zeros((len(all_predictions), kjv.unique_chars()), dtype=float)
for i in range(len(all_predictions)):
all_predictions[i, int(predicted_char_ints[i])] = 1.0
predicted_chars = list(map(lambda x: kjv.int_to_char[int(x)], predicted_char_ints))
predicted_sentence = "".join(predicted_chars)
predicted_tokens = word_tokenize(predicted_sentence)
print("Done setting up.")
# Add in backoff to keep probabilities relatively localized (think exponential moving avg)
char_dist_1pct = 5 # Arbitrary; can be changed
backoff_alpha = math.pow(0.01, (1.0 / float(char_dist_1pct)))
print("Using backoff alpha %.6f (1%% contrib at %d char distance)" % (backoff_alpha, char_dist_1pct))
# Correct only words that don't fall into our word set
print("Correcting character errors with belief propagation...")
char_bigram_matrix = kjv.char_bigram_matrix()
corrected_predictions = predicted_char_ints
token_idx = 0
char_idx = 0
print_interval = max(int(len(predicted_tokens) / 100), 1)
for token_idx in range(len(predicted_tokens)):
if token_idx % print_interval == 0:
# Print update in place
sys.stdout.write("\rError correction %d%% complete" % int(token_idx / float(len(predicted_tokens) * 100.0)))
sys.stdout.flush()
token = predicted_tokens[token_idx]
if len(token) > 1 and token not in word_set:
# Attempt to fix the error
start = char_idx
end = char_idx + len(token)
new_char_predictions = run_belief_prop(char_bigram_matrix,
all_predictions[start:end, :],
backoff_alpha=backoff_alpha)
corrected_predictions[start:end] = new_char_predictions
# Only worry about start character index of next token if not at end
char_idx += len(token)
if token_idx < len(predicted_tokens) - 1:
next_token = predicted_tokens[token_idx + 1]
while predicted_sentence[char_idx] != next_token[0]:
char_idx += 1
# Insert newline to reset in-place update timer
sys.stdout.write("\rError correction 100% complete!\n")
sys.stdout.flush()
end_t = time.time()
print("Corrected errors with belief prop in %.3f seconds" % (end_t - start_t))
return corrected_predictions
def run_belief_prop(char_bigram_matrix, predictions, backoff_alpha=1.0):
# Message_{i,j,k} is message from node i to node j (with dimension k = # unique chars)
num_nodes, num_chars = predictions.shape[0:2]
inc_msgs = np.zeros((num_nodes - 1, num_chars)) # Index i is message from i to (i + 1)
dec_msgs = np.zeros((num_nodes - 1, num_chars)) # Index i is message from (i + 1) to i
# BELIEF PROP
# Compute edge conditions, normalizing in process
inc_msgs[0, :] = np.matmul(char_bigram_matrix, predictions[0,:])
inc_msgs[0, :] /= float(sum(inc_msgs[0, :]))
dec_msgs[num_nodes - 2, :] = np.matmul(np.transpose(char_bigram_matrix), predictions[num_nodes - 1, :])
dec_msgs[num_nodes - 2, :] /= float(sum(dec_msgs[num_nodes - 2, :]))
# Compute all remaining messages. Operates bidirectionally.
current_inc_msg = 1
current_dec_msg = num_nodes - 3
for i in range(num_nodes - 2):
# Compute message in increasing direction, normalizing in process
inc_msgs[current_inc_msg, :] = np.matmul(char_bigram_matrix,
np.multiply(backoff_alpha * inc_msgs[current_inc_msg - 1, :],
predictions[current_inc_msg, :]))
inc_msgs[current_inc_msg, :] /= float(sum(inc_msgs[current_inc_msg, :]))
current_inc_msg += 1
# Compute message in decreasing direction, normalizing in process
dec_msgs[current_dec_msg, :] = np.matmul(np.transpose(char_bigram_matrix),
np.multiply(backoff_alpha * dec_msgs[current_dec_msg + 1, :],
predictions[current_dec_msg + 1, :]))
dec_msgs[current_dec_msg, :] /= float(sum(dec_msgs[current_dec_msg, :]))
current_dec_msg -= 1
# Compute final marginal probabilities by multiplying incoming messages together
# Uses labels instead of one-hot due to memory constraints
final_predictions = np.zeros(num_nodes)
# First node; edge case
final_predictions[0] = np.argmax(dec_msgs[0, :])
# Normal nodes
for idx in range(1, num_nodes - 1):
final_predictions[idx] = np.argmax(np.multiply(inc_msgs[idx - 1, :], dec_msgs[idx, :]))
# Last node; edge case
final_predictions[num_nodes - 1] = np.argmax(inc_msgs[num_nodes - 2, :])
return final_predictions
| 46.333333
| 120
| 0.643239
| 725
| 5,421
| 4.571034
| 0.257931
| 0.031382
| 0.038624
| 0.015691
| 0.235063
| 0.085697
| 0.068196
| 0.028968
| 0.028968
| 0.028968
| 0
| 0.016447
| 0.259731
| 5,421
| 116
| 121
| 46.732759
| 0.80937
| 0.182808
| 0
| 0.025641
| 0
| 0
| 0.066485
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.064103
| 0
| 0.115385
| 0.089744
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a956dee6345202cc212985e79e8f74cb1e26aa99
| 1,065
|
py
|
Python
|
botx/clients/methods/errors/unauthorized_bot.py
|
ExpressApp/pybotx
|
97c8b1ce5d45a05567ed01d545cb43174a2dcbb9
|
[
"MIT"
] | 13
|
2021-01-21T12:43:10.000Z
|
2022-03-23T11:11:59.000Z
|
botx/clients/methods/errors/unauthorized_bot.py
|
ExpressApp/pybotx
|
97c8b1ce5d45a05567ed01d545cb43174a2dcbb9
|
[
"MIT"
] | 259
|
2020-02-26T08:51:03.000Z
|
2022-03-23T11:08:36.000Z
|
botx/clients/methods/errors/unauthorized_bot.py
|
ExpressApp/pybotx
|
97c8b1ce5d45a05567ed01d545cb43174a2dcbb9
|
[
"MIT"
] | 5
|
2019-12-02T16:19:22.000Z
|
2021-11-22T20:33:34.000Z
|
"""Definition for "invalid bot credentials" error."""
from typing import NoReturn
from botx.clients.methods.base import APIErrorResponse, BotXMethod
from botx.clients.types.http import HTTPResponse
from botx.exceptions import BotXAPIError
class InvalidBotCredentials(BotXAPIError):
"""Error for raising when got invalid bot credentials."""
message_template = (
"Can't get token for bot {bot_id}. Make sure bot credentials is correct"
)
def handle_error(method: BotXMethod, response: HTTPResponse) -> NoReturn:
"""Handle "invalid bot credentials" error response.
Arguments:
method: method which was made before error.
response: HTTP response from BotX API.
Raises:
InvalidBotCredentials: raised always.
"""
APIErrorResponse[dict].parse_obj(response.json_body)
raise InvalidBotCredentials(
url=method.url,
method=method.http_method,
response_content=response.json_body,
status_content=response.status_code,
bot_id=method.bot_id, # type: ignore
)
| 30.428571
| 80
| 0.71831
| 122
| 1,065
| 6.172131
| 0.508197
| 0.074369
| 0.083665
| 0.069057
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.200939
| 1,065
| 34
| 81
| 31.323529
| 0.884841
| 0.296714
| 0
| 0
| 0
| 0
| 0.09887
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.235294
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9578f51cee02781b1cf946c958d1259116e97c7
| 16,515
|
py
|
Python
|
ld38/game_scene.py
|
irskep/rogue_basement
|
f92637d7870662a401ca7bb745e3855364b5ac9c
|
[
"MIT"
] | 16
|
2017-04-24T02:29:43.000Z
|
2021-07-31T15:53:15.000Z
|
ld38/game_scene.py
|
irskep/rogue_basement
|
f92637d7870662a401ca7bb745e3855364b5ac9c
|
[
"MIT"
] | 4
|
2017-04-24T20:13:45.000Z
|
2017-05-07T16:22:52.000Z
|
ld38/game_scene.py
|
irskep/rogue_basement
|
f92637d7870662a401ca7bb745e3855364b5ac9c
|
[
"MIT"
] | 2
|
2017-05-14T20:57:38.000Z
|
2017-05-19T22:08:37.000Z
|
# This file has a lot going on in it because really ties the game together,
# just like The Dude's rug. You can probably read it start to finish, but
# by all means start jumping around from here.
# Dependencies for rendering the UI
from clubsandwich.ui import (
LabelView,
LayoutOptions,
UIScene,
)
# including some ones written specifically for this game
from .views import ProgressBarView, GameView, StatsView
# Whenever you go to another "screen," you're visiting a scene. These are the
# scenes you can get to from the game scene.
from .scenes import PauseScene, WinScene, LoseScene
# This object stores the state of the whole game, so we're definitely gonna
# need that.
from .game_state import GameState
# When keys are pressed, we'll call these functions to have the player do
# things.
from .actions import (
action_throw,
action_close,
action_move,
action_pickup_item,
)
# When things happen, we need to show status messages at the bottom of the
# screen. Since more than one thing can happen in a frame, there's some
# subtle logic encapsulated in this Logger object.
from .logger import Logger
# Constructing arbitrary English sentences from component parts is not always
# simple. This function makes it read nicer in code.
from .sentences import simple_declarative_sentence
# There are four tracks that can play at any given time. Pyglet (the library
# used for audio) doesn't have easy "fade" support, so this object tracks and
# modifies volumes for each track per frame.
from .music import NTrackPlayer
# const.py does some interesting things that you should look at when you're
# interested. For now, here are some hints:
from .const import (
# Enums are collections of unique identifiers. In roguelikes it's usually
# better to keep everything in data files, but for a small game like this
# it's not a big deal to have a few small ones.
EnumEventNames,
EnumFeature,
EnumMonsterMode,
# These are collections of values from data files:
verbs, # from verbs.csv
key_bindings, # from key_bindings.csv
# This is a reverse mapping of key_bindings.csv so we can turn
# a raw key value into a usable command.
BINDINGS_BY_KEY,
# Map of key binding ID to a clubsandwich.geom.Point object representing a
# direction.
KEYS_TO_DIRECTIONS,
)
# At some point this game was slow. This flag enables profiling. You can
# ignore it.
DEBUG_PROFILE = False
if DEBUG_PROFILE:
import cProfile
pr = cProfile.Profile()
# All game scenes share an instance of the player because the audio should be
# continuous. It's a bit of a hack that it's a global variable, but this was a
# 48-hour game, so deal with it.
N_TRACK_PLAYER = NTrackPlayer(['Q1.mp3', 'Q2.mp3', 'Q3.mp3', 'Q4.mp3'])
# This is the text that appears at the bottom left of the screen.
TEXT_HELP = """
======= Keys =======
Move: arrows, numpad
hjklyubn
Get rock: g
Throw rock: t
Close: c
""".strip()
# While you're playing the game, there are actually 3 modes of input:
#
# * Normal: move, wait, get, close, throw
# * Prompting for throw direction
# * Prompting for close direction
#
# These states were originally handled with a "mode" property, but it turns out
# to be MUCH simpler if there are just 3 completely different scenes for these
# things that happen to draw the screen the same way. That way you never have
# any "if mode == PROMPT_THROW_DIRECTION" blocks or anything.
#
# So those 3 scenes all inherit from this base class.
class GameAppearanceScene(UIScene):
def __init__(self, game_state, *args, **kwargs):
# All the game scenes share a GameState object.
self.game_state = game_state
# They also use the global player, but access it via a property just in
# case I change my mind later.
self.n_track_player = N_TRACK_PLAYER
# Make some views. Read the clubsandwich docs for details on this stuff.
# Some of them we just add as subviews and forget about, but the stats
# view will need to be updated from time to time, so hang onto a reference
# to it.
sidebar_width = 21
# The game drawing is all done by this GameView object. It happens every
# frame, so we can mostly forget about it for now.
game_view = GameView(
self.game_state,
layout_options=LayoutOptions().with_updates(left=sidebar_width, bottom=1))
log_view = LabelView(
text="", align_horz='left', color_bg='#333333', clear=True,
layout_options=LayoutOptions.row_bottom(1)
.with_updates(left=sidebar_width))
help_view = LabelView(
text=TEXT_HELP, align_horz='left',
layout_options=LayoutOptions.column_left(sidebar_width)
.with_updates(top=None, height='intrinsic'))
self.stats_view = StatsView(
self.game_state, layout_options=LayoutOptions.column_left(sidebar_width))
views = [
game_view,
self.stats_view,
help_view,
log_view,
]
super().__init__(views, *args, **kwargs)
# Each game scene has its own log controller. It's defined after the super()
# call because it needs log_view to exist.
self.logger = Logger(log_view)
# This boolean signals to DirectorLoop that it doesn't need to draw any
# scenes behind this one. (Compare with the pause screen, which wants the
# game scene to be drawn behind it, since it's a popup window!)
self.covers_screen = True
def enter(self, ctx):
super().enter(ctx)
# When this scene becomes active, clear everything. There is no convention
# for who clears the screen, so just handle it on all changes.
self.ctx.clear()
def exit(self):
super().exit()
# same reason as enter()
self.ctx.clear()
# This function is called by DirectorLoop every frame. It does important
# things!
def terminal_update(self, is_active=True):
if DEBUG_PROFILE: pr.enable()
# Fade music in/out if necessary
self.n_track_player.step()
# Tell the LevelState object to deal with any events in its queue. The
# event system is pretty sophisticated, more on that later.
self.game_state.level.consume_events()
# Tell the logger to display any log entries in its queue, or leave the
# log unchanged.
self.logger.update_log()
# The superclass draws all the views
super().terminal_update(is_active)
if DEBUG_PROFILE: pr.disable()
# This is another abstract base class, subclassing the one above. Two of the
# three game scenes are just waiting for a single keystroke for input. This
# class abstracts that behavior.
class GameModalInputScene(GameAppearanceScene):
# DirectorLoop calls terminal_read() on the active scene when input is
# available. You might want to read the BearLibTerminal docs for
# terminal_read(). `val` is the return value of that function.
def terminal_read(self, val):
# Ignore input from unbound keys
if val not in BINDINGS_BY_KEY:
return
# Read one keystroke and pop back to the previous scene.
# (DirectorLoop stores scenes as a stack.)
level_state = self.game_state.level
self.handle_key(BINDINGS_BY_KEY[val])
self.director.pop_scene()
# `k` in this function is one of the values in the left column from
# key_bindings.csv.
def handle_key(self, k):
raise NotImplementedError()
# Finally, some real action! This is the main game scene, as the name says.
# This object has a lot of responsibilities:
#
# * Reset things for a new game
# * Display world events to the user
# * Act on main game input
# * Assorted hacks
#
# Let's dive in!
class GameMainScene(GameAppearanceScene):
def __init__(self, *args, **kwargs):
# Create a fresh GameState object
super().__init__(GameState(), *args, **kwargs)
# Reset the music player in case this isn't the first game since the
# process launched
self.n_track_player.reset()
# Subscribe to a bunch of events. This probably looks a little weird, so
# you might want to read the docs for clubsandwich.event_dispatcher.
level_state = self.game_state.level
# But basically, this means "when the 'door_open' event is fired on the
# player entity, call self.on_door_open(event)."
level_state.dispatcher.add_subscriber(self, EnumEventNames.door_open, level_state.player)
level_state.dispatcher.add_subscriber(self, EnumEventNames.entity_bumped, level_state.player)
level_state.dispatcher.add_subscriber(self, EnumEventNames.entity_moved, level_state.player)
level_state.dispatcher.add_subscriber(self, EnumEventNames.entity_took_damage, level_state.player)
# These event handlers respond to all events with matching names,
# regardless of which entity they are attached to.
level_state.dispatcher.add_subscriber(self, EnumEventNames.entity_picked_up_item, None)
level_state.dispatcher.add_subscriber(self, EnumEventNames.entity_died, None)
level_state.dispatcher.add_subscriber(self, EnumEventNames.entity_attacking, None)
level_state.dispatcher.add_subscriber(self, EnumEventNames.score_increased, None)
def exit(self):
super().exit()
# Stop the music and write profiler data to disk when the game ends.
self.n_track_player.stop()
if DEBUG_PROFILE: pr.dump_stats('profile')
### event handlers ###
# (things that happen in response to world events)
## player events ##
# (only called when these events are attached to the player)
def on_entity_moved(self, event):
level_state = self.game_state.level
# Here is the first appearance of the tilemap API. This just gets us a
# RogueBasementCell object (see level_generator.py) for a given position.
cell = level_state.tilemap.cell(event.entity.position)
# This game only has one level, so exit stairs means game win! Yay!
# And "winning" means "show a cute dialog." And the dialog looks almost
# exactly like the losing dialog, except it says "you win" instead of
# "you lose." How satisfying!
if cell.feature == EnumFeature.STAIRS_DOWN:
self.director.push_scene(WinScene(self.game_state.score))
# "Annotations" are just little notes left to us by the level generator.
# These annotations in particular mean "this cell is part of a corridor
# leading between two areas of different difficulty."
if cell.annotations & {'transition-1-2', 'transition-2-3', 'transition-3-4'}:
# Fade the music out. DRAMA!!!
self.n_track_player.set_active_track(None)
### HACK HACK HACK HACK ###
# For "balance", replenish health between rooms.
# This was added in the last hour or so of the compo. It might be better
# to implement this as a cell Feature instead of this annotation, but eh,
# at this point it's not worth fixing.
level_state.player.state['hp'] = level_state.player.stats['hp_max']
self.logger.log("The glowing corridor restores you to health.")
# Whenever we update player state, we have to manually update the stats
# view. Not really the best workflow; the stats view ought to update
# itself every frame! But again, eh, whatever, it works.
self.stats_view.update()
# The level generator creates Room objects which know what area they are
# in. We can look them up by position. If this cell has a Room, then tell
# the music player to play the relevant track.
room = level_state.tilemap.get_room(event.entity.position)
if room and room.difficulty is not None:
self.n_track_player.set_active_track(room.difficulty)
def on_entity_bumped(self, event):
self.logger.log("Oof!")
def on_entity_took_damage(self, event):
self.stats_view.update()
def on_door_open(self, event):
self.logger.log("You opened the door.")
## global events ##
# (called no matter what the entity is)
def on_entity_attacking(self, event):
# "You hit the verp. The verp hits you."
self.logger.log(simple_declarative_sentence(
event.entity.monster_type.id, verbs.HIT, event.data.monster_type.id))
if event.data.mode == EnumMonsterMode.STUNNED:
# This only happens to monsters, otherwise we'd have to
# account for it in our text generator. How fortunate!
self.logger.log("It is stunned.")
def on_entity_died(self, event):
# "You die." "The wibble dies."
self.logger.log(simple_declarative_sentence(
event.entity.monster_type.id, verb=verbs.DIE))
if event.entity == self.game_state.level.player:
# Funny how losing looks just like winning...
self.director.push_scene(LoseScene(self.game_state.score))
def on_entity_picked_up_item(self, event):
if self.game_state.level.get_can_player_see(event.entity.position):
self.logger.log(simple_declarative_sentence(
event.entity.monster_type.id,
verbs.PICKUP,
event.data.item_type.id,
'a'
))
self.stats_view.update() # inventory count may have changed!
def on_score_increased(self, event):
# Coins are a special case. If you pick one up, the entity_picked_up_item
# event is not fired. Instead, you get this score_increased event.
#
# The reason is that the inventory system is very stupid, and keeping coins
# in it would be useless.
self.stats_view.update() # score changed
self.logger.log(simple_declarative_sentence(
'PLAYER', verbs.PICKUP, 'GOLD', 'a'))
# ooh, we got a keystroke!
def terminal_read(self, val):
# Ignore unbound keys
if val not in BINDINGS_BY_KEY:
return
key = BINDINGS_BY_KEY[val]
self.logger.clear()
self.handle_key(key)
def handle_key(self, k):
level_state = self.game_state.level
# Remember that `k` is one of the left column values in key_bindings.csv.
if k in KEYS_TO_DIRECTIONS:
# If the key represents a direction, try to move in that direction.
point = level_state.player.position + KEYS_TO_DIRECTIONS[k]
action_move(level_state, level_state.player, point)
elif k == 'GET':
action_pickup_item(level_state, level_state.player)
elif k == 'WAIT':
# The easiest implementation of "wait" is to just fire the event that
# says "the player did something, you can move now" without the player
# having actually done anything.
level_state.fire_player_took_action_if_alive()
elif k == 'CLOSE':
# Now it's time to push one of those fancy modal-input scenes I've talked
# so much about!
self.director.push_scene(GameCloseScene(self.game_state))
elif k == 'THROW':
if level_state.player.inventory:
# Ooh, another one!
self.director.push_scene(GameThrowScene(self.game_state))
else:
# HAHA LOL PLAYER U SUX
self.logger.log("You don't have anything to throw.")
elif k == 'CANCEL':
self.director.push_scene(PauseScene())
# At this point, you should be able to read the last two classes yourself
# without my help. From here, you should jump around to whatever interests you!
# I would suggest a reading order of something like:
# * const.py
# * entity.py
# * game_state.py
# * level_state.py
# * behavior.py
# * actions.py
# * level_generator.py
# * views.py
# * draw_game.py
class GameThrowScene(GameModalInputScene):
def enter(self, ctx):
super().enter(ctx)
self.logger.log("Throw in what direction?")
def handle_key(self, k):
level_state = self.game_state.level
if k == 'CANCEL':
return
if k not in KEYS_TO_DIRECTIONS:
self.logger.log("Invalid direction")
return
delta = KEYS_TO_DIRECTIONS[k]
item = level_state.player.inventory[0]
did_throw = action_throw(
level_state,
level_state.player, item, level_state.player.position + delta * 1000, 2)
if did_throw:
self.logger.log(simple_declarative_sentence('PLAYER', verbs.THROW, 'ROCK'))
else:
self.logger.log("You can't throw that in that direction.")
class GameCloseScene(GameModalInputScene):
def enter(self, ctx):
super().enter(ctx)
self.logger.log("Close door in what direction?")
def handle_key(self, k):
level_state = self.game_state.level
if k == 'CANCEL':
return
if k not in KEYS_TO_DIRECTIONS:
self.logger.log("Invalid direction")
return
delta = KEYS_TO_DIRECTIONS[k]
if action_close(level_state, level_state.player, level_state.player.position + delta):
self.logger.log("You closed the door.")
else:
self.logger.log("There is no door there.")
| 37.44898
| 102
| 0.711051
| 2,462
| 16,515
| 4.657595
| 0.25264
| 0.032267
| 0.019273
| 0.014128
| 0.186274
| 0.158629
| 0.134211
| 0.113718
| 0.095404
| 0.084765
| 0
| 0.002673
| 0.207206
| 16,515
| 440
| 103
| 37.534091
| 0.873138
| 0.482652
| 0
| 0.246377
| 0
| 0
| 0.064593
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.10628
| false
| 0
| 0.048309
| 0
| 0.207729
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a958227f8764279c1268ab44258acb82a4b5a6c0
| 4,882
|
py
|
Python
|
main.py
|
yanxurui/portfolio
|
032cf47ccac1c5815fd4827bf0d5f3cf43cec990
|
[
"MIT"
] | null | null | null |
main.py
|
yanxurui/portfolio
|
032cf47ccac1c5815fd4827bf0d5f3cf43cec990
|
[
"MIT"
] | null | null | null |
main.py
|
yanxurui/portfolio
|
032cf47ccac1c5815fd4827bf0d5f3cf43cec990
|
[
"MIT"
] | null | null | null |
import os
import shutil
import argparse
from pathlib import Path
from time import time
from collections import defaultdict
import torch
import numpy as np
import pandas as pd
torch.manual_seed(0)
def allocate(a):
a[a<0] = 0
if a.sum() <= 1:
return a
else:
return a/a.sum()
def ret(output, y):
output = np.apply_along_axis(allocate, -1, output)
return (output*y).sum(axis=1)
def train_batch(X, target, y):
net.train()
X, target = torch.Tensor(X), torch.Tensor(target)
optimizer.zero_grad() # zero the gradient buffers
output = net(X)
loss = criterion(output, target)
loss.backward()
optimizer.step() # Does the update
output = output.detach().numpy()
return (
loss.item(),
ret(output, y)
)
def test_batch(X, y):
net.eval()
X = torch.Tensor(X)
output = net(X)
output = output.detach().numpy()
return (
output,
ret(output, y)
)
def train():
print('Train...')
start_time = time()
net.reset_parameters() # repeat training in jupyter notebook
summary = []
best_val_ret = None
# loop over epoch and batch
for e in range(epoch):
current_epoch = defaultdict(list)
for i, X, target, y in data.train():
tr_loss, tr_ret = train_batch(X, target, y)
current_epoch['tr_loss'].append(tr_loss)
current_epoch['tr_ind'].extend(i)
current_epoch['tr_ret'].extend(tr_ret)
# evaluate
for i, X, y in data.valid():
_, val_ret = test_batch(X, y)
current_epoch['val_ind'].extend(i)
current_epoch['val_ret'].extend(val_ret)
# 3 values: loss, train average daily % return, valid ...
aggregate = [np.mean(current_epoch['tr_loss']),
np.mean(current_epoch['tr_ret'])*100,
np.mean(current_epoch['val_ret'])*100]
print("epoch:{:3d}, tr_loss:{:+.3f}, tr_ret:{:+.3f}, val_ret:{:+.3f}".format(
e+1, *aggregate))
# only save the best model on validation set
if not best_val_ret or aggregate[-1] >= best_val_ret:
best_val_ret = aggregate[-1]
val_epoch = current_epoch
torch.save({
'net': net.state_dict(),
'optimizer': optimizer.state_dict(),
'criterion': criterion.state_dict()
}, save_dir.joinpath('state.pt'))
summary.append(aggregate)
summary = pd.DataFrame(summary, columns=['tr_loss', 'tr_ret', 'val_ret'])
summary.to_csv(save_dir.joinpath('train_summary.csv'))
pd.DataFrame({'ret':current_epoch['tr_ret']}, index=current_epoch['tr_ind']).to_csv(
save_dir.joinpath('train_last_epoch.csv'))
pd.DataFrame({'ret':val_epoch['val_ret']}, index=val_epoch['val_ind']).to_csv(
save_dir.joinpath('valid_best_epoch.csv'))
print('Training finished after {:.1f}s'.format(time()-start_time))
print('Early stop epoch: {}'.format(summary['val_ret'].values.argmax()+1))
print('-'*20)
def test():
# always load model from disk
# 1. to repeat test without training
# 2. for the sake of online learning
print('Test...')
summary = []
outputs = []
for i, X, y in data.test():
output, r = test_batch(X, y)
outputs.extend(zip(i, output))
summary.extend(zip(i, r))
if online_train:
for j, X, target, y in data.online_train():
train_batch(X, target, y)
summary = pd.DataFrame(summary, columns=['index', 'ret'])
summary = summary.set_index('index')
summary.to_csv(save_dir.joinpath('test_summary.csv'))
print('ret: {:+.3f}'.format((summary['ret']+1).prod()))
outputs = dict(outputs)
outputs = pd.DataFrame(outputs).T
outputs.to_csv(save_dir.joinpath('test_output.csv'))
def load_model(path):
checkpoint = torch.load(path)
net.load_state_dict(checkpoint['net'])
net.eval()
optimizer.load_state_dict(checkpoint['optimizer'])
criterion.load_state_dict(checkpoint['criterion'])
return net, optimizer, criterion
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Null')
parser.add_argument('path', help='path of experiment, must contain config.py')
parser.add_argument('--test', action='store_true', help='test only')
args = parser.parse_args()
os.environ['CONFIG_LOCAL_DIR'] = args.path
# variables defined here are global/model level
save_dir = Path(args.path)
if not os.path.isfile(os.path.join(save_dir, 'config.py')):
raise Exception('{}: wrong path or no local config'.format(save_dir))
from config_global import epoch, net, optimizer, criterion, data, online_train
if not args.test:
train()
net, optimizer, criterion = load_model(save_dir.joinpath('state.pt'))
test()
| 33.438356
| 88
| 0.621262
| 661
| 4,882
| 4.416036
| 0.258699
| 0.024666
| 0.033573
| 0.020555
| 0.169236
| 0.055498
| 0
| 0
| 0
| 0
| 0
| 0.007505
| 0.235764
| 4,882
| 145
| 89
| 33.668966
| 0.774859
| 0.07374
| 0
| 0.1
| 0
| 0
| 0.123087
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058333
| false
| 0
| 0.083333
| 0
| 0.191667
| 0.058333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a95cdc019a431df0ac19c35d5980e2ea22fe3fdc
| 2,508
|
py
|
Python
|
toolkit/retry.py
|
blackmatrix7/iphone_hunter
|
1df7bee48f4d67397fae821f8a675115525f4ef8
|
[
"Apache-2.0"
] | 2
|
2017-09-27T14:11:59.000Z
|
2022-02-28T06:38:30.000Z
|
toolkit/retry.py
|
blackmatrix7/iphone_hunter
|
1df7bee48f4d67397fae821f8a675115525f4ef8
|
[
"Apache-2.0"
] | 1
|
2021-06-01T21:38:59.000Z
|
2021-06-01T21:38:59.000Z
|
toolkit/retry.py
|
blackmatrix7/iphone_hunter
|
1df7bee48f4d67397fae821f8a675115525f4ef8
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2017/8/18 上午9:50
# @Author : Matrix
# @Github : https://github.com/blackmatrix7/
# @Blog : http://www.cnblogs.com/blackmatrix/
# @File : retry.py
# @Software: PyCharm
import time
from functools import wraps
__author__ = 'blackmatrix'
"""
在函数执行出现异常时自动重试的简单装饰器
"""
class StopRetry(Exception):
def __repr__(self):
return 'retry stop'
def retry(max_retries: int =5, delay: (int, float) =0, step: (int, float) =0,
exceptions: (BaseException, tuple, list) =BaseException,
sleep=time.sleep, callback=None, validate=None):
"""
函数执行出现异常时自动重试的简单装饰器。
:param max_retries: 最多重试次数。
:param delay: 每次重试的延迟,单位秒。
:param step: 每次重试后延迟递增,单位秒。
:param exceptions: 触发重试的异常类型,单个异常直接传入异常类型,多个异常以tuple或list传入。
:param sleep: 实现延迟的方法,默认为time.sleep。
在一些异步框架,如tornado中,使用time.sleep会导致阻塞,可以传入自定义的方法来实现延迟。
自定义方法函数签名应与time.sleep相同,接收一个参数,为延迟执行的时间。
:param callback: 回调函数,函数签名应接收一个参数,每次出现异常时,会将异常对象传入。
可用于记录异常日志,中断重试等。
如回调函数正常执行,并返回True,则表示告知重试装饰器异常已经处理,重试装饰器终止重试,并且不会抛出任何异常。
如回调函数正常执行,没有返回值或返回除True以外的结果,则继续重试。
如回调函数抛出异常,则终止重试,并将回调函数的异常抛出。
:param validate: 验证函数,用于验证执行结果,并确认是否继续重试。
函数签名应接收一个参数,每次被装饰的函数完成且未抛出任何异常时,调用验证函数,将执行的结果传入。
如验证函数正常执行,且返回False,则继续重试,即使被装饰的函数完成且未抛出任何异常。
如回调函数正常执行,没有返回值或返回除False以外的结果,则终止重试,并将函数执行结果返回。
如验证函数抛出异常,且异常属于被重试装饰器捕获的类型,则继续重试。
如验证函数抛出异常,且异常不属于被重试装饰器捕获的类型,则将验证函数的异常抛出。
:return: 被装饰函数的执行结果。
"""
def wrapper(func):
@wraps(func)
def _wrapper(*args, **kwargs):
nonlocal delay, step, max_retries
func_ex = StopRetry
while max_retries > 0:
try:
result = func(*args, **kwargs)
# 验证函数返回False时,表示告知装饰器验证不通过,继续重试
if callable(validate) and validate(result) is False:
continue
else:
return result
except exceptions as ex:
# 回调函数返回True时,表示告知装饰器异常已经处理,终止重试
if callable(callback) and callback(ex) is True:
return
func_ex = ex
finally:
max_retries -= 1
if delay > 0 or step > 0:
sleep(delay)
delay += step
else:
raise func_ex
return _wrapper
return wrapper
if __name__ == '__main__':
pass
| 30.585366
| 77
| 0.598086
| 237
| 2,508
| 6.219409
| 0.594937
| 0.033921
| 0.012212
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01092
| 0.30622
| 2,508
| 81
| 78
| 30.962963
| 0.836207
| 0.415072
| 0
| 0.055556
| 0
| 0
| 0.021674
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.027778
| 0.055556
| 0.027778
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a968e5c87a6a2fba1534a27a1696dd6c0f7117a1
| 1,568
|
py
|
Python
|
apetools/proletarians/setuprun.py
|
rsnakamura/oldape
|
b4d1c77e1d611fe2b30768b42bdc7493afb0ea95
|
[
"Apache-2.0"
] | null | null | null |
apetools/proletarians/setuprun.py
|
rsnakamura/oldape
|
b4d1c77e1d611fe2b30768b42bdc7493afb0ea95
|
[
"Apache-2.0"
] | null | null | null |
apetools/proletarians/setuprun.py
|
rsnakamura/oldape
|
b4d1c77e1d611fe2b30768b42bdc7493afb0ea95
|
[
"Apache-2.0"
] | null | null | null |
# apetools Libraries
from apetools.baseclass import BaseClass
from apetools.builders import builder
from apetools.lexicographers.lexicographer import Lexicographer
class SetUp(BaseClass):
"""
The SetUp sets up the infrastructure
"""
def __init__(self, arguments, *args, **kwargs):
"""
:param:
- `arguments`: An ArgumentParser Namespace
"""
super(SetUp, self).__init__(*args, **kwargs)
self.arguments = arguments
self._lexicographer = None
self._builder = None
return
@property
def lexicographer(self):
"""
:return: Lexicographer that maps config-files
"""
if self._lexicographer is None:
glob = self.arguments.glob
message = "Building Lexicographer with glob ({0})".format(glob)
self.logger.debug(message)
self._lexicographer = Lexicographer(glob)
return self._lexicographer
@property
def builder(self):
"""
:return: A builder of objects
"""
if self._builder is None:
l = self.lexicographer
message = "Building builder with Lexicographer '{0}'".format(str(l))
self.logger.debug(message)
self._builder = builder.Builder(l)
return self._builder
def __call__(self):
"""
Runs the builder.hortator's `run` method
"""
self.logger.debug("Calling the hortator's run.")
self.builder.hortator()
return
# end SetUp
| 28
| 80
| 0.588648
| 154
| 1,568
| 5.863636
| 0.363636
| 0.094131
| 0.049834
| 0.048726
| 0.057586
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00186
| 0.314413
| 1,568
| 55
| 81
| 28.509091
| 0.83814
| 0.153061
| 0
| 0.2
| 0
| 0
| 0.087603
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.1
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a969c4d30c2cfa4664e0f50b541bf7d5cc4223f3
| 11,423
|
py
|
Python
|
bleu.py
|
divyang02/English_to_Hindi_Machine_language_translator
|
0502b7bb1f86f45d452868a8701009d421765b64
|
[
"MIT"
] | 1
|
2022-02-22T04:10:34.000Z
|
2022-02-22T04:10:34.000Z
|
bleu.py
|
divyang02/English_to_Hindi_Machine_language_translator
|
0502b7bb1f86f45d452868a8701009d421765b64
|
[
"MIT"
] | null | null | null |
bleu.py
|
divyang02/English_to_Hindi_Machine_language_translator
|
0502b7bb1f86f45d452868a8701009d421765b64
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Sentence level and Corpus level BLEU score calculation tool
"""
from __future__ import division, print_function
import io
import os
import math
import sys
import argparse
from fractions import Fraction
from collections import Counter
from functools import reduce
from operator import or_
try:
from nltk import ngrams
except:
def ngrams(sequence, n):
sequence = iter(sequence)
history = []
while n > 1:
history.append(next(sequence))
n -= 1
for item in sequence:
history.append(item)
yield tuple(history)
del history[0]
def modified_precision(references, hypothesis, n):
# Extracts all ngrams in hypothesis.
counts = Counter(ngrams(hypothesis, n))
if not counts:
return Fraction(0)
# Extract a union of references' counts.
max_counts = reduce(or_, [Counter(ngrams(ref, n)) for ref in references])
# Assigns the intersection between hypothesis and references' counts.
clipped_counts = {ngram: min(count, max_counts[ngram]) for ngram, count in counts.items()}
return Fraction(sum(clipped_counts.values()), sum(counts.values()))
def corpus_bleu(list_of_references, hypotheses, weights=(0.25, 0.25, 0.25, 0.25),
segment_level=False, smoothing=0, epsilon=1, alpha=1,
k=5):
# Initialize the numbers.
p_numerators = Counter() # Key = ngram order, and value = no. of ngram matches.
p_denominators = Counter() # Key = ngram order, and value = no. of ngram in ref.
hyp_lengths, ref_lengths = 0, 0
# Iterate through each hypothesis and their corresponding references.
for references, hypothesis in zip(list_of_references, hypotheses):
# Calculate the hypothesis length and the closest reference length.
# Adds them to the corpus-level hypothesis and reference counts.
hyp_len = len(hypothesis)
hyp_lengths += hyp_len
ref_lens = (len(reference) for reference in references)
closest_ref_len = min(ref_lens, key=lambda ref_len: (abs(ref_len - hyp_len), ref_len))
ref_lengths += closest_ref_len
# Calculates the modified precision for each order of ngram.
segment_level_precision = []
for i, _ in enumerate(weights, start=1):
p_i = modified_precision(references, hypothesis, i)
p_numerators[i] += p_i.numerator
p_denominators[i] += p_i.denominator
segment_level_precision.append(p_i)
# Optionally, outputs segment level scores.
if segment_level:
if hyp_len == 0:
print(0)
else:
_bp = min(math.exp(1 - closest_ref_len / hyp_len), 1.0)
segment_level_precision = chen_and_cherry(references, hypothesis,
segment_level_precision,
hyp_len, smoothing, epsilon,
alpha)
segment_pn = [w*math.log(p_i) if p_i != 0 else 0 for p_i, w in
zip(segment_level_precision, weights)]
print (_bp * math.exp(math.fsum(segment_pn)))
# Calculate corpus-level brevity penalty.
bp = min(math.exp(1 - ref_lengths / hyp_lengths), 1.0)
# Calculate corpus-level modified precision.
p_n = []
p_n_str = []
for i, w in enumerate(weights, start=1):
p_i = Fraction(p_numerators[i] / p_denominators[i])
p_n_str.append(p_i)
try:
p_n.append(w* math.log(p_i))
except ValueError:
p_n.append(0)
# Final bleu score.
score = bp * math.exp(math.fsum(p_n))
bleu_output = ("BLEU = {}, {} (BP={}, ratio={}, hyp_len={}, ref_len={})".format(
round(score*100, 2), '/'.join(map(str, [round(p_i*100, 1) for p_i in p_n_str])),
round(bp,3), round(hyp_lengths/ref_lengths, 3), hyp_lengths, ref_lengths))
print(bleu_output, file=sys.stderr)
return score, p_n_str, hyp_lengths, ref_lengths
def chen_and_cherry(references, hypothesis, p_n, hyp_len,
smoothing=0, epsilon=0.1, alpha=5, k=5):
"""
Boxing Chen and Collin Cherry (2014) A Systematic Comparison of Smoothing
Techniques for Sentence-Level BLEU. In WMT14.
"""
# No smoothing.
if smoothing == 0:
return p_n
# Smoothing method 1: Add *epsilon* counts to precision with 0 counts.
if smoothing == 1:
return [Fraction(p_i.numerator + epsilon, p_i.denominator)
if p_i.numerator == 0 else p_i for p_i in p_n]
# Smoothing method 2: Add 1 to both numerator and denominator (Lin and Och 2004)
if smoothing == 2:
return [Fraction(p_i.numerator + 1, p_i.denominator + 1)
for p_i in p_n]
# Smoothing method 3: NIST geometric sequence smoothing
# The smoothing is computed by taking 1 / ( 2^k ), instead of 0, for each
# precision score whose matching n-gram count is null.
# k is 1 for the first 'n' value for which the n-gram match count is null/
# For example, if the text contains:
# - one 2-gram match
# - and (consequently) two 1-gram matches
# the n-gram count for each individual precision score would be:
# - n=1 => prec_count = 2 (two unigrams)
# - n=2 => prec_count = 1 (one bigram)
# - n=3 => prec_count = 1/2 (no trigram, taking 'smoothed' value of 1 / ( 2^k ), with k=1)
# - n=4 => prec_count = 1/4 (no fourgram, taking 'smoothed' value of 1 / ( 2^k ), with k=2)
if smoothing == 3:
incvnt = 1 # From the mteval-v13a.pl, it's referred to as k.
for i, p_i in enumerate(p_n):
if p_i == 0:
p_n[i] = 1 / 2**incvnt
incvnt+=1
return p_n
# Smoothing method 4:
# Shorter translations may have inflated precision values due to having
# smaller denominators; therefore, we give them proportionally
# smaller smoothed counts. Instead of scaling to 1/(2^k), Chen and Cherry
# suggests dividing by 1/ln(len(T), where T is the length of the translation.
if smoothing == 4:
incvnt = 1
for i, p_i in enumerate(p_n):
if p_i == 0:
p_n[i] = incvnt * k / math.log(hyp_len) # Note that this K is different from the K from NIST.
incvnt+=1
return p_n
# Smoothing method 5:
# The matched counts for similar values of n should be similar. To a
# calculate the n-gram matched count, it averages the n−1, n and n+1 gram
# matched counts.
if smoothing == 5:
m = {}
# Requires an precision value for an addition ngram order.
p_n_plus5 = p_n + [modified_precision(references, hypothesis, 5)]
m[-1] = p_n[0] + 1
for i, p_i in enumerate(p_n):
p_n[i] = (m[i-1] + p_i + p_n_plus5[i+1]) / 3
m[i] = p_n[i]
return p_n
# Smoothing method 6:
# Interpolates the maximum likelihood estimate of the precision *p_n* with
# a prior estimate *pi0*. The prior is estimated by assuming that the ratio
# between pn and pn−1 will be the same as that between pn−1 and pn−2.
if smoothing == 6:
for i, p_i in enumerate(p_n):
if i in [1,2]: # Skips the first 2 orders of ngrams.
continue
else:
pi0 = p_n[i-1]**2 / p_n[i-2]
# No. of ngrams in translation.
l = sum(1 for _ in ngrams(hypothesis, i+1))
p_n[i] = (p_i + alpha * pi0) / (l + alpha)
return p_n
# Smoothing method
if smoothing == 7:
p_n = chen_and_cherry(references, hypothesis, p_n, hyp_len, smoothing=4)
p_n = chen_and_cherry(references, hypothesis, p_n, hyp_len, smoothing=5)
return p_n
def sentence_bleu_nbest(reference, hypotheses, weights=(0.25, 0.25, 0.25, 0.25),
smoothing=0, epsilon=0.1, alpha=5, k=5):
for hi, hypothesis in enumerate(hypotheses):
print('Translation {}... '.format(hi), file=sys.stderr, end="")
bleu_output = corpus_bleu([(reference,)], [hypothesis], weights)
bleu_score, p_n, hyp_len, ref_len = bleu_output
p_n = chen_and_cherry(reference, hypotheses, p_n, hyp_len, smoothing, epsilon)
segment_pn = [w*math.log(p_i) if p_i != 0 else 0 for p_i, w in
zip(p_n, weights)]
_bp = min(math.exp(1 - ref_len / hyp_len), 1.0)
yield _bp * math.exp(math.fsum(segment_pn))
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Arguments for calculating BLEU')
parser.add_argument('-t', '--translation', type=str, required=True,
help="translation file or string")
parser.add_argument('-r', '--reference', type=str, required=True,
help="reference file or string")
parser.add_argument('-s', '--smooth', type=int, default=3, metavar='INT', required=False,
help="smoothing method type (default: %(default)s)")
parser.add_argument('-w', '--weights', type=str, default='0.25 0.25 0.25 0.25',
help="weights for ngram (default: %(default)s)")
parser.add_argument('-sl', '--sentence-level', action='store_true',
help="print sentence level BLEU score (default: %(default)s)")
parser.add_argument('-se', '--smooth-epsilon', type=float, default=0.1,
help="empirical smoothing parameter for method 1 (default: %(default)s)")
parser.add_argument('-sk', '--smooth-k', type=int, default=5,
help="empirical smoothing parameter for method 4 (default: %(default)s)")
parser.add_argument('-sa', '--smooth-alpha', type=int, default=5,
help="empirical smoothing parameter for method 6 (default: %(default)s)")
args = parser.parse_args()
hypothesis_file = args.translation
reference_file = args.reference
weights = tuple(map(float, args.weights.split()))
segment_level = args.sentence_level
smoothing_method = args.smooth
epsilon = args.smooth_epsilon
alpha = args.smooth_alpha
k = args.smooth_k
# Calculate BLEU scores.
# Set --sentence-level and other params to calc sentence-level BLEU in a FILE or string
if os.path.isfile(reference_file):
with io.open(reference_file, 'r', encoding='utf8') as reffin, \
io.open(hypothesis_file, 'r', encoding='utf8') as hypfin:
list_of_references = ((r.split(),) for r in reffin)
hypotheses = (h.split() for h in hypfin)
corpus_bleu(list_of_references, hypotheses,
weights=weights, segment_level=segment_level,
smoothing=smoothing_method, epsilon=epsilon, alpha=alpha, k=k)
else:
reffin = [reference_file]
hypfin = [hypothesis_file]
list_of_references = ((r.split(),) for r in reffin)
hypotheses = (h.split() for h in hypfin)
corpus_bleu(list_of_references, hypotheses,
weights=weights, segment_level=True,
smoothing=smoothing_method, epsilon=epsilon, alpha=alpha, k=k)
| 45.692
| 109
| 0.604044
| 1,578
| 11,423
| 4.227503
| 0.190748
| 0.012292
| 0.005396
| 0.008095
| 0.286914
| 0.239694
| 0.192475
| 0.162344
| 0.15335
| 0.105531
| 0
| 0.022946
| 0.290379
| 11,423
| 249
| 110
| 45.875502
| 0.799531
| 0.247133
| 0
| 0.188571
| 0
| 0
| 0.076743
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0.062857
| 0
| 0.154286
| 0.034286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a972d95469a20ffc4d590103acea6ae8f6b2b426
| 1,746
|
py
|
Python
|
src/elm_doc/tasks/html.py
|
brilliantorg/elm-doc
|
69ddbcd57aee3da6283c2497d735951d95b85426
|
[
"BSD-3-Clause"
] | 29
|
2017-02-01T11:58:44.000Z
|
2021-05-21T15:18:33.000Z
|
src/elm_doc/tasks/html.py
|
brilliantorg/elm-doc
|
69ddbcd57aee3da6283c2497d735951d95b85426
|
[
"BSD-3-Clause"
] | 143
|
2017-07-26T17:34:44.000Z
|
2022-03-01T18:01:43.000Z
|
src/elm_doc/tasks/html.py
|
brilliantorg/elm-doc
|
69ddbcd57aee3da6283c2497d735951d95b85426
|
[
"BSD-3-Clause"
] | 7
|
2018-03-09T10:04:45.000Z
|
2021-10-19T19:17:40.000Z
|
import json
import html
from pathlib import Path
from elm_doc.utils import Namespace
# Note: title tag is omitted, as the Elm app sets the title after
# it's initialized.
PAGE_TEMPLATE = '''
<!DOCTYPE html>
<html>
<head>
<meta charset="UTF-8">
<link rel="shortcut icon" size="16x16, 32x32, 48x48, 64x64, 128x128, 256x256" href="{mount_point}/assets/favicon.ico">
<link rel="stylesheet" href="{mount_point}/assets/style.css">
<script src="{mount_point}/artifacts/elm.js"></script>
<script src="{mount_point}/assets/highlight/highlight.pack.js"></script>
<link rel="stylesheet" href="{mount_point}/assets/highlight/styles/default.css">
</head>
<body>
<script>
try {{
const fontsLink = document.createElement("link");
fontsLink.href = "{mount_point}/assets/fonts/" + ((navigator.userAgent.indexOf("Macintosh") > -1) ? "_hints_off.css" : "_hints_on.css");
fontsLink.rel = "stylesheet";
document.head.appendChild(fontsLink);
}} catch(e) {{
// loading the font is not essential; log the error and move on
console.log(e);
}}
Elm.Main.init({init});
</script>
</body>
</html>
''' # noqa: E501
def _render(mount_point: str = ''):
if mount_point and mount_point[-1] == '/':
mount_point = mount_point[:-1]
init = {
'flags': {
'mountedAt': mount_point,
},
}
return PAGE_TEMPLATE.format(
mount_point=html.escape(mount_point),
init=json.dumps(init))
class actions(Namespace):
def write(output_path: Path, mount_point: str = ''):
output_path.parent.mkdir(parents=True, exist_ok=True)
with open(str(output_path), 'w') as f:
f.write(_render(mount_point=mount_point))
| 30.103448
| 142
| 0.643757
| 225
| 1,746
| 4.862222
| 0.506667
| 0.155393
| 0.073126
| 0.073126
| 0.067642
| 0.067642
| 0.067642
| 0
| 0
| 0
| 0
| 0.025054
| 0.199885
| 1,746
| 57
| 143
| 30.631579
| 0.758053
| 0.052692
| 0
| 0
| 0
| 0.042553
| 0.595152
| 0.246061
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042553
| false
| 0
| 0.085106
| 0
| 0.170213
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9767449042e9e6827a47f70074761e36edb412a
| 2,666
|
py
|
Python
|
nb.py
|
corytaitchison/online-reviews
|
10de9218137658269ba36849dfa7e8f643335d01
|
[
"MIT"
] | null | null | null |
nb.py
|
corytaitchison/online-reviews
|
10de9218137658269ba36849dfa7e8f643335d01
|
[
"MIT"
] | null | null | null |
nb.py
|
corytaitchison/online-reviews
|
10de9218137658269ba36849dfa7e8f643335d01
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
###
import nltk
from nltk.corpus import stopwords
from nltk.tokenize import word_tokenize
# from nltk.stem import WordNetLemmatizer
from nltk.stem import PorterStemmer
###
from sklearn.naive_bayes import MultinomialNB
from sklearn.metrics import confusion_matrix, classification_report
from sklearn.model_selection import train_test_split
from sklearn.feature_extraction.text import CountVectorizer
###
from loadRandom import loadRandom2
ps = PorterStemmer()
# lemmatizer = WordNetLemmatizer()
def textProcess(text):
stopWords = set(stopwords.words('english'))
noPunc = word_tokenize(text)
return [ps.stem(word) for word in noPunc if word not in stopWords]
if __name__ == '__main__':
_seed = 123
_observations = 1e4
_subsets = [1, 2, 3, 4]
location = '/Users/caitchison/Documents/Yelp/yelp_dataset/restaurants_only.csv'
data = loadRandom2(location, _observations, seed=_seed, n=3778803).loc[:,
('text', 'useful', 'cool', 'funny', 'stars_x')]
# Calculate "interaction" score
data['interactions'] = data.useful + data.cool + data.funny
data = data[data['interactions'] >= _subsets[0]].dropna()
# Subset to get equal amounts of low-useful and high-useful
masks = [data.interactions == x for x in _subsets]
masks.append(data.interactions > _subsets[-1])
subsetSize = min([sum(mask) for mask in masks])
print("Creating subsets of size %i" % subsetSize)
newData = pd.DataFrame([])
for mask in masks:
df = data[mask].sample(n=subsetSize, random_state=_seed)
newData = newData.append(df)
data = newData
# Split interactions into quantiles (5)
data['group'] = pd.qcut(data['interactions'], q=5, labels=False)
print(pd.qcut(data['interactions'], q=5).cat.categories)
data.rename(columns={"stars_x": "stars"})
# Create a bag of words and convert the text to a sparse matrix
text = np.array(data['text'])
bow = CountVectorizer(analyzer=textProcess).fit(text)
print("Unique (Not Stop) Words:", len(bow.vocabulary_))
text = bow.transform(text)
# Split into features for testing and training at 30%
xTrain, xTest, yTrain, yTest = train_test_split(
text, np.array(data['group']), test_size=0.3, random_state=_seed)
# Train model (Multinomial Naive Bayes)
nb = MultinomialNB()
nb.fit(xTrain, yTrain)
# Test and Evaluate Model
preds = nb.predict(xTest)
print(confusion_matrix(yTest, preds))
print('\n')
print(classification_report(yTest, preds))
| 33.746835
| 122
| 0.686422
| 340
| 2,666
| 5.267647
| 0.447059
| 0.053601
| 0.0134
| 0.020101
| 0.026801
| 0.026801
| 0
| 0
| 0
| 0
| 0
| 0.012664
| 0.2003
| 2,666
| 78
| 123
| 34.179487
| 0.827392
| 0.140285
| 0
| 0
| 0
| 0
| 0.102948
| 0.029037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.24
| 0
| 0.28
| 0.12
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a976a9884a077db66cbb3f3d300b2d865662f9c4
| 4,346
|
py
|
Python
|
docker-images/slack-prs/main.py
|
kiteco/kiteco-public
|
74aaf5b9b0592153b92f7ed982d65e15eea885e3
|
[
"BSD-3-Clause"
] | 17
|
2022-01-10T11:01:50.000Z
|
2022-03-25T03:21:08.000Z
|
docker-images/slack-prs/main.py
|
kiteco/kiteco-public
|
74aaf5b9b0592153b92f7ed982d65e15eea885e3
|
[
"BSD-3-Clause"
] | 1
|
2022-01-13T14:28:47.000Z
|
2022-01-13T14:28:47.000Z
|
docker-images/slack-prs/main.py
|
kiteco/kiteco-public
|
74aaf5b9b0592153b92f7ed982d65e15eea885e3
|
[
"BSD-3-Clause"
] | 7
|
2022-01-07T03:58:10.000Z
|
2022-03-24T07:38:20.000Z
|
import time
import json
import argparse
import websocket
import requests
import github
MY_NAME = 'kit' # should be able to avoid this in the future
TOKEN = 'XXXXXXX'
GITHUB_USERNAME_BY_SLACK_USERNAME = {
"adam": "adamsmith",
# XXXXXXX ...
}
channel_ids_by_name = {}
channel_names_by_id = {}
next_id = 0
def send(conn, channel, text):
global next_id, last_send_timestamp
channel_id = channel_ids_by_name.get(channel, channel)
payload = dict(
id=next_id,
type="message",
channel=channel_id,
text=text)
msg = json.dumps(payload)
conn.send(json.dumps(payload))
next_id += 1
last_send_timestamp = time.time()
def slack_escape(s):
s = s.replace("&", "&")
s = s.replace("<", "<")
s = s.replace(">", ">")
return s
def pr_queue_for(github_username, prs, comments_by_pr):
response = ""
for role, pr in github.prs_for(github_username, prs):
title, url, number = pr["title"], pr["html_url"], pr["number"]
comments = comments_by_pr.get(number, None)
if not comments:
comments = github.fetch_comments(number)
comments_by_pr[number] = comments
updates_by_user = github.summarize_updates_for(github_username, comments)
if len(updates_by_user) == 0:
update_msg = "no updates"
else:
update_msg = ", ".join("%d new from %s" % (count, user) for user, count in updates_by_user.items())
response += 'you are *%s* for %s %s: *%s*\n' % (role, url, slack_escape(title), update_msg)
if response == "":
return "you are not on any pull requests"
else:
return response
def updates_since(github_username, prs, comments_by_pr, since):
response = ""
for role, pr in github.prs_for(github_username, prs):
title, url, number = pr["title"], pr["html_url"], pr["number"]
comments = comments_by_pr.get(number, None)
if not comments:
comments = github.fetch_comments(number)
comments_by_pr[number] = comments
updates_by_user = github.summarize_updates_since(github_username, comments, since)
if updates_by_user:
status = ", ".join("%d new from %s" % (count, user) for user, count in updates_by_user.items())
response += '*%s* (%s) %s\n' % (status, url, slack_escape(title))
return response
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--daily", action="store_true")
parser.add_argument("--since", type=str)
args = parser.parse_args()
conn = None
user_ids_by_name = {}
user_names_by_id = {}
im_channel_by_user = {}
# Get messaging setup info
payload = dict(token=TOKEN)
r = requests.post('https://slack.com/api/rtm.start', data=payload).json()
if r["ok"]:
print("Successfully connected to messaging API")
else:
print("Error:\n" + str(r))
return
# Unacpk general info
dial_url = r["url"]
# Unpack channel info
users = r["users"]
for user in users:
name = user["name"]
id = user["id"]
user_ids_by_name[name] = id
user_names_by_id[id] = name
# Unpack channel info
channels = r["channels"]
for channel in channels:
name = channel["name"]
id = channel["id"]
channel_ids_by_name[name] = id
channel_names_by_id[id] = name
for im_channel in r["ims"]:
im_channel_by_user[user_names_by_id[im_channel["user"]]] = im_channel["id"]
# Open websocket
conn = websocket.create_connection(dial_url)
print("Connected")
# Send private messages
prs = github.fetch_prs()
comments = {}
if args.daily:
for user, ch in im_channel_by_user.items():
github_username = GITHUB_USERNAME_BY_SLACK_USERNAME.get(user, None)
if github_username:
print('Sending PM to %s...' % user)
msg = pr_queue_for(github_username, prs, comments)
print(msg.replace("\n", "\n "))
send(conn, ch, "Here is your daily pull request update:\n" + msg)
else:
since = 0
try:
if args.since:
# Read prev timestamp
with open(args.since) as f:
since = float(f.read().strip())
# Write new timestamp
with open(args.since, "w") as f:
f.write(str(time.time()))
except (IOError, ValueError):
pass
for user, ch in im_channel_by_user.items():
github_username = GITHUB_USERNAME_BY_SLACK_USERNAME.get(user, None)
if github_username:
msg = updates_since(github_username, prs, comments, since)
if msg:
print('Sending PM to %s...' % user)
print(msg)
send(conn, ch, msg)
if __name__ == '__main__':
try:
main()
except KeyboardInterrupt:
pass
| 24.834286
| 102
| 0.685228
| 645
| 4,346
| 4.396899
| 0.237209
| 0.074048
| 0.035966
| 0.028209
| 0.422426
| 0.375882
| 0.299013
| 0.27433
| 0.27433
| 0.27433
| 0
| 0.001118
| 0.176484
| 4,346
| 174
| 103
| 24.977011
| 0.791282
| 0.049701
| 0
| 0.25
| 0
| 0
| 0.108764
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039063
| false
| 0.015625
| 0.046875
| 0
| 0.125
| 0.054688
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9775f738c3044fcff42b57c7ed49ac310db7479
| 656
|
py
|
Python
|
commands/meme.py
|
EFFLUX110/efflux-discord-bot
|
fe382fc822f852efab8d4742daa756045a17bff3
|
[
"MIT"
] | null | null | null |
commands/meme.py
|
EFFLUX110/efflux-discord-bot
|
fe382fc822f852efab8d4742daa756045a17bff3
|
[
"MIT"
] | 4
|
2022-02-03T18:24:32.000Z
|
2022-02-03T19:24:51.000Z
|
commands/meme.py
|
EFFLUX110/efflux-discord-bot
|
fe382fc822f852efab8d4742daa756045a17bff3
|
[
"MIT"
] | 1
|
2022-02-03T18:12:44.000Z
|
2022-02-03T18:12:44.000Z
|
import discord
import requests
from discord.ext import commands
class Meme(commands.Cog):
def __init__(self, bot):
self.bot = bot
@commands.command()
async def meme(self,ctx):
r=requests.get("https://memes.blademaker.tv/api?lang=en")
res=r.json()
title=res['title']
ups=res['ups']
downs=res['downs']
sub=res['subreddit']
m=discord.Embed(title=f"{title}\nsubreddit: {sub}")
m.set_image(url=res["image"])
m.set_footer(text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url)
await ctx.send(embed=m)
def setup(bot):
bot.add_cog(Meme(bot))
| 28.521739
| 87
| 0.617378
| 94
| 656
| 4.212766
| 0.531915
| 0.035354
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.22561
| 656
| 23
| 88
| 28.521739
| 0.779528
| 0
| 0
| 0
| 0
| 0
| 0.17656
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.15
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a977697bb7ffe10b5b5f5a391df5f58451adfd57
| 717
|
py
|
Python
|
45.py
|
brianfl/project-euler
|
9f83a3c2da04fd0801a4a575081add665edccd5f
|
[
"MIT"
] | null | null | null |
45.py
|
brianfl/project-euler
|
9f83a3c2da04fd0801a4a575081add665edccd5f
|
[
"MIT"
] | null | null | null |
45.py
|
brianfl/project-euler
|
9f83a3c2da04fd0801a4a575081add665edccd5f
|
[
"MIT"
] | null | null | null |
target_num = 0
j = 0
while target_num == 0:
pent_ind = float((1 + ( 1 + 24*j*(2*j-1))**.5)/6)
tri_ind = float((-1 + (1+8*j*(2*j-1)))/2)
if pent_ind.is_integer() and tri_ind.is_integer():
num = j*(2*j-1)
if num != 1 and num != 40755:
target_num = num
j += 1
print(target_num) # 1533776805
"""
I had a brute force solution, but it was a bit over a minute.
By solving for the index values of pentagon and triangle numbers
in terms of the index value of the hexagon numbers,
the formulas in pent_ind and tri_ind pop out of the quadratic equation.
Basically those variables will only be integers if j is a valid index
for a pentagon number and triangle number as well.
"""
| 29.875
| 71
| 0.661088
| 133
| 717
| 3.473684
| 0.481203
| 0.077922
| 0.019481
| 0.025974
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065455
| 0.232915
| 717
| 24
| 72
| 29.875
| 0.774545
| 0.013947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a97827ef5e7685a79286da4ad9d58d63d84d97d6
| 801
|
py
|
Python
|
client.py
|
hani9/smartlockers
|
bd7a996be58769341367d58d5c80c70ad7bd1cb6
|
[
"MIT"
] | null | null | null |
client.py
|
hani9/smartlockers
|
bd7a996be58769341367d58d5c80c70ad7bd1cb6
|
[
"MIT"
] | null | null | null |
client.py
|
hani9/smartlockers
|
bd7a996be58769341367d58d5c80c70ad7bd1cb6
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
# Importo les llibreries
import socket
import RPi.GPIO as GPIO
import time
# Faig la configuració bàsica del GPIO
GPIO.setwarnings(False)
GPIO.setmode(GPIO.BCM)
GPIO.setup(18, GPIO.OUT) # Només utilitzo el 18. Es podria fer un bucle per activar-ne diversos alhora.
# Indico la IP del servidor i el port de comunicació
host = "PLACE_YOUR_SERVER_IP_HERE"
port = 12345
# Inicio un bucle infinit
while 1:
s = socket.socket() # Creo el socket
s.connect((host, port)) # Connecto al servidor
data = s.recv(1024) # Rebo dades
GPIO.output(int(data), GPIO.HIGH) # La dada rebuda indica el pin del gpio que es farà UP
time.sleep(1) # S'espera 1 segon
GPIO.output(int(data), GPIO.LOW) # Fa un DOWN del pin
s.close() # Tanca la connexió
| 26.7
| 103
| 0.705368
| 134
| 801
| 4.186567
| 0.641791
| 0.024955
| 0.046346
| 0.060606
| 0.074866
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026275
| 0.19226
| 801
| 29
| 104
| 27.62069
| 0.840804
| 0.50437
| 0
| 0
| 0
| 0
| 0.065274
| 0.065274
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a978a3e063f71ae417a8f86e87e70e36b033503d
| 16,820
|
py
|
Python
|
src/mlpro/rl/pool/envmodels/mlp_robotinhtm.py
|
fhswf/MLPro
|
e944b69bed9c2d5548677711270e4a4fe868aea9
|
[
"Apache-2.0"
] | 5
|
2022-01-31T15:52:19.000Z
|
2022-03-21T18:34:27.000Z
|
src/mlpro/rl/pool/envmodels/mlp_robotinhtm.py
|
fhswf/MLPro
|
e944b69bed9c2d5548677711270e4a4fe868aea9
|
[
"Apache-2.0"
] | 61
|
2021-12-17T13:03:59.000Z
|
2022-03-31T10:24:37.000Z
|
src/mlpro/rl/pool/envmodels/mlp_robotinhtm.py
|
fhswf/MLPro
|
e944b69bed9c2d5548677711270e4a4fe868aea9
|
[
"Apache-2.0"
] | null | null | null |
## -------------------------------------------------------------------------------------------------
## -- Project : MLPro - A Synoptic Framework for Standardized Machine Learning Tasks
## -- Package : mlpro.rl.envmodels
## -- Module : mlp_robotinhtm
## -------------------------------------------------------------------------------------------------
## -- History :
## -- yyyy-mm-dd Ver. Auth. Description
## -- 2021-12-17 0.0.0 MRD Creation
## -- 2021-12-17 1.0.0 MRD Released first version
## -- 2021-12-20 1.0.1 DA Replaced 'done' by 'success'
## -- 2021-12-21 1.0.2 DA Class MLPEnvMdel: renamed method reset() to _reset()
## -- 2022-01-02 2.0.0 MRD Refactoring due to the changes on afct pool on
## -- TorchAFctTrans
## -- 2022-02-25 2.0.1 SY Refactoring due to auto generated ID in class Dimension
## -------------------------------------------------------------------------------------------------
"""
Ver. 2.0.1 (2022-02-25)
This module provides Environment Model based on MLP Neural Network for
robotinhtm environment.
"""
import torch
import transformations
from mlpro.rl.models import *
from mlpro.rl.pool.envs.robotinhtm import RobotArm3D
from mlpro.rl.pool.envs.robotinhtm import RobotHTM
from mlpro.sl.pool.afct.afctrans_pytorch import TorchAFctTrans
from torch.utils.data.sampler import SubsetRandomSampler
from collections import deque
def init(module, weight_init, bias_init, gain=1):
weight_init(module.weight.data, gain=gain)
bias_init(module.bias.data)
return module
class RobotMLPModel(torch.nn.Module):
def __init__(self, n_joint, timeStep):
super(RobotMLPModel, self).__init__()
self.n_joint = n_joint
self.timeStep = timeStep
self.hidden = 128
init_ = lambda m: init(m, torch.nn.init.orthogonal_, lambda x: torch.nn.init.
constant_(x, 0), np.sqrt(2))
self.model1 = torch.nn.Sequential(
init_(torch.nn.Linear(self.n_joint,self.hidden)),
torch.nn.Tanh(),
init_(torch.nn.Linear(self.hidden,self.hidden)),
torch.nn.Tanh(),
init_(torch.nn.Linear(self.hidden,self.hidden)),
torch.nn.Tanh(),
init_(torch.nn.Linear(self.hidden,7*(self.n_joint+1))),
torch.nn.Tanh()
)
def forward(self, I):
BatchSize=I.shape[0]
newI = I.reshape(BatchSize,2,self.n_joint) * torch.cat([torch.Tensor([self.timeStep]).repeat(1,self.n_joint), torch.ones(1,self.n_joint)])
newI = torch.sum(newI,dim=1)
out2 = self.model1(newI)
out2 = out2.reshape(BatchSize,self.n_joint+1,7)
return out2
class IOElement(BufferElement):
def __init__(self, p_input: torch.Tensor, p_output: torch.Tensor):
super().__init__({"input": p_input, "output": p_output})
# Buffer
class MyOwnBuffer(Buffer, torch.utils.data.Dataset):
def __init__(self, p_size=1):
Buffer.__init__(self, p_size=p_size)
self._internal_counter = 0
def add_element(self, p_elem: BufferElement):
Buffer.add_element(self, p_elem)
self._internal_counter += 1
def get_internal_counter(self):
return self._internal_counter
def __getitem__(self,idx):
return self._data_buffer["input"][idx], self._data_buffer["output"][idx]
class RobothtmAFct(TorchAFctTrans):
C_NAME = "Robothtm Adaptive Function"
C_BUFFER_CLS = MyOwnBuffer
def _setup_model(self):
self.joint_num = self._output_space.get_num_dim() - 6
self.net_model = RobotMLPModel(self.joint_num, 0.01)
self.optimizer = torch.optim.Adam(self.net_model.parameters(), lr=3e-4)
self.loss_dyn = torch.nn.MSELoss()
self.train_model = True
self.input_temp = None
self.sim_env = RobotArm3D()
joints = []
jointType = []
vectLinkLength = [[0, 0, 0], [0, 0, 0]]
jointType.append("rz")
for joint in range(self.joint_num - 1):
vectLinkLength.append([0, 0.7, 0])
jointType.append("rx")
jointType.append("f")
for x in range(len(jointType)):
vectorLink = dict(x=vectLinkLength[x][0], y=vectLinkLength[x][1], z=vectLinkLength[x][2])
joint = dict(
Joint_name="Joint %d" % x,
Joint_type=jointType[x],
Vector_link_length=vectorLink,
)
joints.append(joint)
for robo in joints:
self.sim_env.add_link_joint(
lvector=torch.Tensor(
[
[
robo["Vector_link_length"]["x"],
robo["Vector_link_length"]["y"],
robo["Vector_link_length"]["z"],
]
]
),
jointAxis=robo["Joint_type"],
thetaInit=torch.Tensor([np.radians(0)]),
)
self.sim_env.update_joint_coords()
def _input_preproc(self, p_input: torch.Tensor) -> torch.Tensor:
input = torch.cat([p_input[0][6+self.joint_num:], p_input[0][6:6+self.joint_num]])
input = input.reshape(1,self.joint_num*2)
self.input_temp = p_input[0][:3].reshape(1,3)
return input
def _output_postproc(self, p_output: torch.Tensor) -> torch.Tensor:
angles = torch.Tensor([])
thets = torch.zeros(3)
for idx in range(self.joint_num):
angle = torch.Tensor(transformations.euler_from_quaternion(p_output[-1][idx][3:].detach().numpy(), axes="rxyz")) - thets
thets = torch.Tensor(transformations.euler_from_quaternion(p_output[-1][idx][3:].detach().numpy(), axes="rxyz"))
angles = torch.cat([angles, torch.norm(angle).reshape(1, 1)], dim=1)
output = torch.cat([self.input_temp, p_output[-1][-1][:3].reshape(1,3)], dim=1)
output = torch.cat([output, angles], dim=1)
return output
def _adapt(self, p_input: Element, p_output: Element) -> bool:
model_input = deque(p_input.get_values()[6:])
model_input.rotate(self.joint_num)
model_input = torch.Tensor([list(model_input)])
self.sim_env.set_theta(torch.Tensor([p_output.get_values()[6 : 6 + self.joint_num]]))
self.sim_env.update_joint_coords()
model_output = self.sim_env.convert_to_quaternion().reshape(1,self.joint_num+1,7)
self._add_buffer(IOElement(model_input, model_output))
if self._buffer.get_internal_counter() % 100 != 0:
return False
# Divide Test and Train
if self.train_model:
dataset_size = len(self._buffer)
indices = list(range(dataset_size))
split = int(np.floor(0.3 * dataset_size))
np.random.seed(random.randint(1,1000))
np.random.shuffle(indices)
train_indices, test_indices = indices[split:], indices[:split]
train_sampler = SubsetRandomSampler(train_indices)
test_sampler = SubsetRandomSampler(test_indices)
trainer = torch.utils.data.DataLoader(self._buffer, batch_size=100, sampler=train_sampler)
tester = torch.utils.data.DataLoader(self._buffer, batch_size=100, sampler=test_sampler)
# Training
self.net_model.train()
for i, (In, Label) in enumerate(trainer):
outputs = self.net_model(In)
loss = self.loss_dyn(outputs, Label)
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
test_loss = 0
self.net_model.eval()
for i, (In, Label) in enumerate(tester):
outputs = self.net_model(In)
loss = self.loss_dyn(outputs, Label)
test_loss += loss.item()
if test_loss/len(tester) < 5e-9:
self.train_model = False
return True
def _add_buffer(self, p_buffer_element: IOElement):
self._buffer.add_element(p_buffer_element)
class MLPEnvModel(EnvModel, Mode):
C_NAME = "HTM Env Model"
def __init__(
self,
p_num_joints=4,
p_target_mode="Random",
p_ada=True,
p_logging=False,
):
# Define all the adaptive function here
self.RobotArm1 = RobotArm3D()
roboconf = {}
roboconf["Joints"] = []
jointType = []
vectLinkLength = [[0, 0, 0], [0, 0, 0]]
jointType.append("rz")
for joint in range(p_num_joints - 1):
vectLinkLength.append([0, 0.7, 0])
jointType.append("rx")
jointType.append("f")
for x in range(len(jointType)):
vectorLink = dict(x=vectLinkLength[x][0], y=vectLinkLength[x][1], z=vectLinkLength[x][2])
joint = dict(
Joint_name="Joint %d" % x,
Joint_type=jointType[x],
Vector_link_length=vectorLink,
)
roboconf["Joints"].append(joint)
roboconf["Target_mode"] = p_target_mode
roboconf["Update_rate"] = 0.01
for robo in roboconf["Joints"]:
self.RobotArm1.add_link_joint(
lvector=torch.Tensor(
[
[
robo["Vector_link_length"]["x"],
robo["Vector_link_length"]["y"],
robo["Vector_link_length"]["z"],
]
]
),
jointAxis=robo["Joint_type"],
thetaInit=torch.Tensor([np.radians(0)]),
)
self.RobotArm1.update_joint_coords()
self.jointangles = self.RobotArm1.thetas
self.dt = roboconf["Update_rate"]
self.modes = roboconf["Target_mode"]
self.target = None
self.init_distance = None
self.num_joint = self.RobotArm1.get_num_joint()
self.reach = torch.norm(torch.Tensor([[0.0, 0.0, 0.0]]) - self.RobotArm1.joints[:3, [-1]].reshape(1, 3))
# Setup space
# 1 Setup state space
obs_space = ESpace()
obs_space.add_dim(Dimension("Tx", "Targetx", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Ty", "Targety", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Tz", "Targetz", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Px", "Targetx", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Py", "Targety", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
obs_space.add_dim(Dimension("Pz", "Targetz", "", "m", "m", p_boundaries=[-np.inf, np.inf]))
for idx in range(self.num_joint):
obs_space.add_dim(
Dimension("J%i" % (idx), "Joint%i" % (idx), "", "deg", "deg", p_boundaries=[-np.inf, np.inf])
)
# 2 Setup action space
action_space = ESpace()
for idx in range(self.num_joint):
action_space.add_dim(
Dimension(
"A%i" % (idx),
"AV%i" % (idx),
"",
"rad/sec",
"\frac{rad}{sec}",
p_boundaries=[-np.pi, np.pi],
)
)
# Setup Adaptive Function
# HTM Function Here
afct_strans = AFctSTrans(
RobothtmAFct,
p_state_space=obs_space,
p_action_space=action_space,
p_threshold=-1,
p_buffer_size=10000,
p_ada=p_ada,
p_logging=p_logging,
)
EnvModel.__init__(
self,
p_observation_space=obs_space,
p_action_space=action_space,
p_latency=timedelta(seconds=self.dt),
p_afct_strans=afct_strans,
p_afct_reward=None,
p_afct_success=None,
p_afct_broken=None,
p_ada=p_ada,
p_logging=p_logging,
)
Mode.__init__(self, p_mode=Mode.C_MODE_SIM, p_logging=p_logging)
if self.modes == "random":
num = random.random()
if num < 0.2:
self.target = torch.Tensor([[0.5, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.4:
self.target = torch.Tensor([[0.0, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.6:
self.target = torch.Tensor([[-0.5, 0.0, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.8:
self.target = torch.Tensor([[0.0, -0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
else:
self.target = torch.Tensor([[-0.5, -0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
else:
self.target = torch.Tensor([[0.5, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
self.reset()
## -------------------------------------------------------------------------------------------------
def _compute_success(self, p_state: State = None) -> bool:
# disterror = np.linalg.norm(p_state.get_values()[:3] - p_state.get_values()[3:6])
disterror = np.linalg.norm(np.array(p_state.get_values())[:3] - np.array(p_state.get_values())[3:6])
if disterror <= 0.1:
self._state.set_terminal(True)
return True
else:
return False
## -------------------------------------------------------------------------------------------------
def _compute_broken(self, p_state: State) -> bool:
return False
## -------------------------------------------------------------------------------------------------
def _compute_reward(self, p_state_old: State, p_state_new: State) -> Reward:
reward = Reward(self.C_REWARD_TYPE)
# disterror = np.linalg.norm(p_state_new.get_values()[:3] - p_state_new.get_values()[3:6])
disterror = np.linalg.norm(np.array(p_state_new.get_values())[:3] - np.array(p_state_new.get_values())[3:6])
ratio = disterror / self.init_distance.item()
rew = -np.ones(1) * ratio
rew = rew - 10e-2
if disterror <= 0.1:
rew = rew + 1
rew = rew.astype("float64")
reward.set_overall_reward(rew)
return reward
def set_theta(self, theta):
self.RobotArm1.thetas = theta.reshape(self.num_joint)
self.RobotArm1.update_joint_coords()
self.jointangles = self.RobotArm1.thetas
def _reset(self, p_seed=None) -> None:
self.set_random_seed(p_seed)
theta = torch.zeros(self.RobotArm1.get_num_joint())
self.RobotArm1.set_theta(theta)
self.RobotArm1.update_joint_coords()
self.jointangles = self.RobotArm1.thetas
if self.modes == "random":
num = random.random()
if num < 0.2:
self.target = torch.Tensor([[0.5, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.4:
self.target = torch.Tensor([[0.0, 0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.6:
self.target = torch.Tensor([[-0.5, 0.0, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
elif num < 0.8:
self.target = torch.Tensor([[0.0, -0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
else:
self.target = torch.Tensor([[-0.5, -0.5, 0.5]])
self.init_distance = torch.norm(self.RobotArm1.joints[:3, [-1]].reshape(1, 3) - self.target)
obs = torch.cat(
[
self.target,
self.RobotArm1.joints[:3, [-1]].reshape(1, 3),
self.RobotArm1.thetas.reshape(1, self.num_joint),
],
dim=1,
)
obs = obs.cpu().flatten().tolist()
self._state = State(self._state_space)
self._state.set_values(obs)
| 39.299065
| 146
| 0.542866
| 2,052
| 16,820
| 4.26462
| 0.15692
| 0.007542
| 0.006514
| 0.006399
| 0.46429
| 0.426237
| 0.397555
| 0.378928
| 0.365558
| 0.348989
| 0
| 0.03303
| 0.289001
| 16,820
| 427
| 147
| 39.391101
| 0.698721
| 0.102259
| 0
| 0.341615
| 0
| 0
| 0.028382
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059006
| false
| 0
| 0.024845
| 0.009317
| 0.145963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a97a18817825892c952ac7174c04fcf55fabab56
| 6,441
|
py
|
Python
|
MTL/features.py
|
usc-sail/mica-riskybehavior-identification
|
dd8d1bb795ca1b8273625713887c6c4b747fd542
|
[
"MIT"
] | 2
|
2020-11-19T21:22:53.000Z
|
2021-02-25T00:29:38.000Z
|
MTL/features.py
|
usc-sail/mica-riskybehavior-identification
|
dd8d1bb795ca1b8273625713887c6c4b747fd542
|
[
"MIT"
] | null | null | null |
MTL/features.py
|
usc-sail/mica-riskybehavior-identification
|
dd8d1bb795ca1b8273625713887c6c4b747fd542
|
[
"MIT"
] | 1
|
2021-02-05T22:45:51.000Z
|
2021-02-05T22:45:51.000Z
|
import os
import numpy as np
import torch
from transformers import BertTokenizer
from tensorflow.keras.utils import to_categorical
from NewDataLoader import *
from config import *
import warnings
class Features:
def __init__(self, **kwargs):
self.max_len = kwargs.get('max_len', 250)
self.categorical = kwargs.get('categorical', True)
self.wordrepr = kwargs.get('wordrepr', 'toronto_sent2vec')
self.sentrepr = kwargs.get('sentrepr', 'sentiment')
self.bert_selector = kwargs.get('bert_selector', 'None')
# Transform into H/M/L
self.categorize_F = np.vectorize(self.categorize)
# Feature size
self.WORD_SIZE = FEATS_SIZES[self.wordrepr]
if self.bert_selector == "first" or self.bert_selector == "last":
self.WORD_SIZE = int(self.WORD_SIZE / 2)
self.SENT_SIZE = FEATS_SIZES[self.sentrepr]
if self.sentrepr == "bert":
if self.bert_selector == "first" or self.bert_selector == "last":
self.SENT_SIZE = int(self.SENT_SIZE / 2)
print("Features:", self.wordrepr, self.sentrepr, self.max_len, self.bert_selector)
################################################
# Transform ordinal ratings into categorical
################################################
def categorize(self, rating):
if rating >= 4:
return 0 #HIGH
elif rating > 2:
return 1 #MED
else:
return 2 #LOW
################################################
# Loads features and trims them to max_len
################################################
def get_feats(self, label_f, batch_dir = None):
if not batch_dir:
batch_dir = os.path.dirname(label_f)
# Labels
batch_labels, additional_labels = load_labels(label_f)
batch_labels = np.c_[batch_labels, additional_labels]
if self.categorical:
batch_labels = self.categorize_F(batch_labels) #H/M/L
batch_labels = to_categorical(batch_labels, num_classes = 3) #One-hot encoding
vio, sex, drugs = batch_labels[:, 0, :], batch_labels[:, 1, :], batch_labels[:, 2, :]
y = [vio, sex, drugs]
# Get the index from the filename
i = os.path.basename(label_f).split("_")[0]
i = i.replace('.npz', '')
# Genre
batch_genre = load_genre(i, batch_dir)
# Words
if self.wordrepr in ['sent2vec', 'word2vec', 'script_word2vec', 'toronto_sent2vec']:
word_features = load_w2v_or_p2v(i, batch_dir, FEATS_SIZES, self.wordrepr)
elif self.wordrepr in ['bert_large', 'bert_base', 'sst', 'moviebert']:
word_features = load_BERT(i, batch_dir, FEATS_SIZES, mode = self.wordrepr, bert_selector = self.bert_selector)
elif self.wordrepr in ['ngrams', 'tfidf']:
word_features = load_tf_or_idf(i, batch_dir, self.wordrepr)
# Sentiment
if self.sentrepr in ['sentiment']:
sentiment_features = load_w2v_or_p2v(i, batch_dir, FEATS_SIZES, "sentiment")
elif self.sentrepr in ['bert_large', 'bert_base', 'sst', 'moviebert']:
sentiment_features = load_BERT(i, batch_dir, FEATS_SIZES, mode = self.sentrepr, bert_selector = self.bert_selector)
# elif sentrepr in ['sent_post', 'posteriors']:
# sentiment_features = ???
word_features = word_features[:, -self.max_len:, :] #Trim
sentiment_features = sentiment_features[:, -self.max_len:, :]
return ([word_features, sentiment_features, batch_genre], y)
def get_feats_any_only(self, label_f, index = 0, batch_dir = None):
([word_features, sentiment_features, batch_genre], y) = self.get_feats(label_f, batch_dir = batch_dir)
return ([word_features, sentiment_features, batch_genre], y[index])
def get_feats_vio_only(self, label_f, batch_dir = None):
return self.get_feats_any_only(label_f, index = 0, batch_dir = batch_dir)
def get_feats_sex_only(self, label_f, batch_dir = None):
return self.get_feats_any_only(label_f, index = 1, batch_dir = batch_dir)
def get_feats_drugs_only(self, label_f, batch_dir = None):
return self.get_feats_any_only(label_f, index = 2, batch_dir = batch_dir)
def get_concat_feats(self, label_f, batch_dir = None):
(word_features, sentiment_features, batch_genre), batch_labels = self.get_feats(label_f, batch_dir)
feats = np.concatenate([word_features, sentiment_features], axis = 2)
return [feats, batch_genre], batch_labels[0]
class BertFeatures(Features):
"""This class goes from text to padded transformer features"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.name = kwargs.get('bert_name', 'bert-base-uncased')
self.tokenizer = BertTokenizer.from_pretrained(self.name)
self.max_len = kwargs.get('max_len', self.tokenizer.max_len)
self.categorical = kwargs.get('categorical', True)
if self.max_len > self.tokenizer.max_len:
warnings.warn("max_len > tokenizer({}).max_len.".format(self.name))
print("BertFeatures:", self.name, self.max_len)
def get_feats(self, label_f, batch_dir = None):
if not batch_dir:
batch_dir = os.path.dirname(label_f)
# Labels
batch_labels, additional_labels = load_labels(label_f)
batch_labels = np.c_[batch_labels, additional_labels]
if self.categorical:
batch_labels = self.categorize_F(batch_labels) #H/M/L
batch_labels = to_categorical(batch_labels, num_classes = 3) #One-hot encoding
vio, sex, drugs = batch_labels[:, 0], batch_labels[:, 1], batch_labels[:, 2]
y = [vio, sex, drugs]
# Get the index from the filename
i = os.path.basename(label_f).split("_")[0]
i = i.replace('.npz', '')
# Genre
batch_genre = load_genre(i, batch_dir)
#
features = []
for row in load_text(i, batch_dir):
# Tokenize and trim
text = self.tokenizer.tokenize(row)[-self.max_len:]
# Encode text
input_ids = torch.tensor([self.tokenizer.encode(text, add_special_tokens = True)])
features.append(input_ids)
# Convert to tensor
features = torch.cat(features, dim = 0)
return ([features, batch_genre], y)
| 38.568862
| 127
| 0.621798
| 816
| 6,441
| 4.639706
| 0.186275
| 0.059165
| 0.029054
| 0.029583
| 0.542261
| 0.523772
| 0.465399
| 0.403592
| 0.379292
| 0.352351
| 0
| 0.007336
| 0.238162
| 6,441
| 166
| 128
| 38.801205
| 0.764214
| 0.070796
| 0
| 0.3
| 0
| 0
| 0.059701
| 0.003818
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.08
| 0.03
| 0.3
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a97af6a55423ad89ce397dfb867db2824473473b
| 1,233
|
py
|
Python
|
project_4_data_pipelines/airflow/plugins/helpers/sparkify_dim_subdag.py
|
jpuris/udacity-data-engineering-submissions
|
e71e2569241c76b5e6c3cd074667b19bde4d7b9e
|
[
"MIT"
] | null | null | null |
project_4_data_pipelines/airflow/plugins/helpers/sparkify_dim_subdag.py
|
jpuris/udacity-data-engineering-submissions
|
e71e2569241c76b5e6c3cd074667b19bde4d7b9e
|
[
"MIT"
] | null | null | null |
project_4_data_pipelines/airflow/plugins/helpers/sparkify_dim_subdag.py
|
jpuris/udacity-data-engineering-submissions
|
e71e2569241c76b5e6c3cd074667b19bde4d7b9e
|
[
"MIT"
] | null | null | null |
from airflow import DAG
from operators import LoadDimensionOperator
def load_dim_subdag(
parent_dag_name: str,
task_id: str,
redshift_conn_id: str,
sql_statement: str,
do_truncate: bool,
table_name: str,
**kwargs,
):
"""
Airflow's subdag wrapper. Implements LoadDimensionOperator operator.
Subdag's name will be f'{parent_dag_name}.{task_id}'
Subdag related keyword arguments:
- parent_dag_name -- Parent DAG name
- task_id -- Task ID for the subdag to use
Keyword arguments:
redshift_conn_id -- Airflow connection name for Redshift detail
sql_statement -- SQL statement to run
do_truncate -- Does the table need to be truncated before running
SQL statement
table_name -- Dimension table name
All keyword arguments will be passed to LoadDimensionOperator
"""
dag = DAG(f'{parent_dag_name}.{task_id}', **kwargs)
load_dimension_table = LoadDimensionOperator(
task_id=task_id,
dag=dag,
redshift_conn_id=redshift_conn_id,
sql_query=sql_statement,
do_truncate=do_truncate,
table_name=table_name,
)
load_dimension_table
return dag
| 26.804348
| 75
| 0.673155
| 155
| 1,233
| 5.096774
| 0.329032
| 0.053165
| 0.082278
| 0.064557
| 0.074684
| 0.050633
| 0
| 0
| 0
| 0
| 0
| 0
| 0.262774
| 1,233
| 45
| 76
| 27.4
| 0.869087
| 0.47283
| 0
| 0
| 0
| 0
| 0.045763
| 0.045763
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.090909
| 0
| 0.181818
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a97e81a89bda65fad9ab35f52160822fa9349f8c
| 11,572
|
py
|
Python
|
geetools/collection/modis.py
|
carderne/gee_tools
|
4003e75ffb0ffefc9f41b1a34d849eebdb486161
|
[
"MIT"
] | null | null | null |
geetools/collection/modis.py
|
carderne/gee_tools
|
4003e75ffb0ffefc9f41b1a34d849eebdb486161
|
[
"MIT"
] | null | null | null |
geetools/collection/modis.py
|
carderne/gee_tools
|
4003e75ffb0ffefc9f41b1a34d849eebdb486161
|
[
"MIT"
] | null | null | null |
# coding=utf-8
""" Google Earth Engine MODIS Collections """
from . import Collection, TODAY, Band
from functools import partial
IDS = [
'MODIS/006/MOD09GQ', 'MODIS/006/MYD09GQ',
'MODIS/006/MOD09GA', 'MODIS/006/MYD09GA',
'MODIS/006/MOD13Q1', 'MODIS/006/MYD13Q1'
]
START = {
'MODIS/006/MOD09GQ': '2000-02-24',
'MODIS/006/MYD09GQ': '2000-02-24',
'MODIS/006/MOD09GA': '2000-02-24',
'MODIS/006/MYD09GA': '2000-02-24',
'MODIS/006/MOD13Q1': '2000-02-18',
'MODIS/006/MYD13Q1': '2000-02-18',
}
END = {
'MODIS/006/MOD09GQ': TODAY,
'MODIS/006/MYD09GQ': TODAY,
'MODIS/006/MOD09GA': TODAY,
'MODIS/006/MYD09GA': TODAY,
'MODIS/006/MOD13Q1': TODAY,
'MODIS/006/MYD13Q1': TODAY,
}
class MODIS(Collection):
""" MODIS Collections """
SHORTS = {
'MODIS/006/MOD09GQ': 'TERRA_SR_250_DAILY',
'MODIS/006/MYD09GQ': 'AQUA_SR_250_DAILY',
'MODIS/006/MOD09GA': 'TERRA_SR_1KM_DAILY',
'MODIS/006/MYD09GA': 'AQUA_SR_1KM_DAILY',
'MODIS/006/MOD13Q1': 'TERRA_IND_250_16DAYS',
'MODIS/006/MYD13Q1': 'AQUA_IND_250_16DAYS'
}
def __init__(self, product_id):
""" Initialize a MODIS collection with it's product id """
super(MODIS, self).__init__()
self.product_id = product_id
self._id = self._make_id()
self._bands = self._make_bands()
# dates
self.start_date = START[self._id]
self.end_date = END[self._id]
self.spacecraft = 'MODIS'
self.cloud_cover = None
self.short_name = self.SHORTS.get(self.id)
if self._id in ['MODIS/006/MOD09GQ', 'MODIS/006/MYD09GQ']:
self.common_masks = [self.qc250]
if self._id in ['MODIS/006/MOD09GA', 'MODIS/006/MYD09GA']:
self.common_masks = [self.state_1km]
if self._id in ['MODIS/006/MOD13Q1', 'MODIS/006/MYD13Q1']:
self.common_masks = [self.detailed_qa]
def state_1km(self, image, classes=('cloud', 'shadow', 'snow',
'average_cirrus', 'high_cirrus'), renamed=False):
return self.applyMask(image, 'state_1km', classes, renamed)
def qc250(self, image, classes=('B1_highest_quality', 'B2_highest_quality'),
renamed=False):
return self.applyPositiveMask(image, 'QC_250m', classes, renamed)
def detailed_qa(self, image, classes=('cloud', 'shadow', 'snow'),
renamed=False):
if renamed:
band ='DetailedQA'
else:
band = 'detailed_qa'
return self.applyMask(image, band, classes, renamed)
def _make_bands(self):
bands = [None]*30
# Partial bands
sur_refl_b01 = partial(Band, id='sur_refl_b01', name='red',
precision='int16', min=-100,
max=16000, reference='optical')
sur_refl_b02 = partial(Band, id='sur_refl_b02', name='nir',
precision='int16', min=-100,
max=16000, reference='optical')
num_observations = partial(Band, precision='int8', min=0, max=127,
reference='classification')
QC_250m = Band('QC_250m', 'QC_250m', 'uint16', 250, 0, 4096,
'bits', bits={
'4-7': {0: 'B1_highest_quality'},
'8-11': {0: 'B2_highest_quality'},
'12': {1: 'atmospheric_corrected'}
})
obscov = partial(Band, precision='int8', min=0, max=100,
reference='classification')
iobs_res = partial(Band, id='iobs_res', name='obs_number',
precision='uint8', min=0, max=254,
reference='classification')
orbit_pnt = partial(Band, id='orbit_pnt', name='orbit_pointer',
precision='int8', min=0, max=15,
reference='classification')
granule_pnt = partial(Band, id='granule_pnt', name='granule_pointer',
precision='uint8', min=0, max=254,
reference='classification')
state_1km = Band('state_1km', 'state_1km', 'uint16', 1000, 0, 57335,
'bits', bits={
'0-1': {0: 'clear', 1:'cloud', 2:'mix'},
'2': {1: 'shadow'},
'8-9': {1: 'small_cirrus', 2: 'average_cirrus',
3: 'high_cirrus'},
'13': {1: 'adjacent'},
'15': {1: 'snow'}
})
sezenith = Band('SensorZenith', 'sensor_zenith', 'int16', 1000, 0,
18000, 'classification')
seazimuth = Band('SensorAzimuth', 'sensor_azimuth', 'int16', 1000,
-18000, 18000, 'classification')
range_band = Band('Range', 'range', 'uint16', 1000, 27000, 65535,
'classification')
sozenith = Band('SolarZenith', 'solar_zenith', 'int16', 1000, 0,
18000, 'classification')
soazimuth = Band('SolarAzimuth', 'solar_azimuth', 'int16', 1000,
-18000, 18000, 'classification')
gflags = Band('gflags', 'geolocation_flags', 'uint8', 1000, 0, 248,
'bits')
sur_refl_b03 = partial(Band, id='sur_refl_b03', name='blue',
precision='int16', min=-100, max=16000,
reference='optical')
sur_refl_b04 = partial(Band, id='sur_refl_b04', name='green',
precision='int16', min=-100, max=16000,
reference='optical')
sur_refl_b05 = partial(Band, id='sur_refl_b05', name='swir3',
precision='int16', min=-100, max=16000,
reference='optical')
sur_refl_b06 = partial(Band, id='sur_refl_b06', name='swir',
precision='int16', min=-100, max=16000,
reference='optical')
sur_refl_b07 = partial(Band, id='sur_refl_b07', name='swir2',
precision='int16', min=-100, max=16000,
reference='optical')
QC_500m = Band('QC_500m', 'QC_500m', 'uint32', 500, 0, 4294966019,
'bits', bits={
'2-5': {0: 'B1_highest_quality'},
'6-9': {0: 'B2_highest_quality'},
'10-13': {0: 'B3_highest_quality'},
'14-17': {0: 'B4_highest_quality'},
'18-21': {0: 'B5_highest_quality'},
'22-25': {0: 'B6_highest_quality'},
'26-29': {0: 'B7_highest_quality'},
})
qscan = Band('q_scan', 'q_scan', 'uint8', 250, 0, 254, 'bits')
NDVI = Band('NDVI', 'ndvi', 'int16', 250, -2000, 10000, 'classification')
EVI = Band('EVI', 'evi', 'int16', 250, -2000, 10000, 'classification')
DetailedQA = Band('DetailedQA', 'detailed_qa', 'uint16', 250, 0, 65534,
'bits', bits={
'0-1': {0: 'good_qa'},
'2-5': {0: 'highest_qa'},
'8': {1: 'adjacent'},
'10': {1: 'cloud'},
'14': {1: 'snow'},
'15': {1: 'shadow'}
})
view_zenith = Band('ViewZenith', 'view_zenith', 'int16', 250, 0, 18000,
'classification')
relative_azimuth = Band('RelativeAzimuth', 'relative_azimuth', 'int16',
250, -18000, 18000, 'classification')
DayOfYear = Band('DayOfYear', 'day_of_year', 'int16', 250, 1, 366,
'classification')
SummaryQA = Band('SummaryQA', 'summary_qa', 'int8', 250, 0, 3, 'bits',
bits={
'0-1': {0: 'clear', 1: 'marginal', 2: 'snow',
3: 'cloud'}
})
if self.product_id in ['MOD09GQ', 'MYD09GQ']:
bands[0] = num_observations(id='num_observations',
name='num_observations', scale=250)
bands[1] = sur_refl_b01(scale=250)
bands[2] = sur_refl_b02(scale=250)
bands[3] = QC_250m
bands[4] = obscov(id='obscov', name='observation_coverage', scale=250)
bands[5] = iobs_res(scale=250)
bands[6] = orbit_pnt(scale=250)
bands[7] = granule_pnt(scale=250)
if self.product_id in ['MOD09GA', 'MYD09GA']:
bands[0] = num_observations(id='num_observations_1km', scale=1000,
name='num_observations_1km')
bands[1] = state_1km
bands[2] = sezenith
bands[3] = seazimuth
bands[4] = range_band
bands[5] = sozenith
bands[6] = soazimuth
bands[7] = gflags
bands[8] = orbit_pnt(scale=500)
bands[9] = granule_pnt(scale=500)
bands[10] = num_observations(id='num_observations_500m', scale=500,
name='num_observations_500m')
bands[11] = sur_refl_b01(scale=500)
bands[12] = sur_refl_b02(scale=500)
bands[13] = sur_refl_b03(scale=500)
bands[14] = sur_refl_b04(scale=500)
bands[15] = sur_refl_b05(scale=500)
bands[16] = sur_refl_b06(scale=500)
bands[17] = sur_refl_b07(scale=500)
bands[18] = QC_500m
bands[19] = obscov(id='obscov_500m', scale=500,
name='observation_coverage_500m')
bands[20] = iobs_res(scale=500)
bands[21] = qscan
if self.product_id in ['MOD13Q1', 'MYD13Q1']:
bands[0] = NDVI
bands[1] = EVI
bands[2] = DetailedQA
bands[3] = sur_refl_b01(scale=250)
bands[4] = sur_refl_b02(scale=250)
bands[5] = sur_refl_b03(scale=250)
bands[6] = sur_refl_b07(scale=250)
bands[7] = view_zenith
bands[8] = sozenith
bands[9] = relative_azimuth
bands[10] = DayOfYear
bands[11] = SummaryQA
return [b for b in bands if b]
def _make_id(self):
return 'MODIS/006/{}'.format(self.product_id)
@staticmethod
def fromId(id):
""" Make a MODIS collection from its ID """
def error():
msg = 'Collection {} not available'
raise ValueError(msg.format(id))
if id not in IDS: error()
splitted = id.split('/')
prod = splitted[2]
return MODIS(prod)
@classmethod
def MOD09GQ(cls):
return cls(product_id='MOD09GQ')
@classmethod
def MYD09GQ(cls):
return cls(product_id='MYD09GQ')
@classmethod
def MOD09GA(cls):
return cls(product_id='MOD09GA')
@classmethod
def MYD09GA(cls):
return cls(product_id='MYD09GA')
@classmethod
def MOD13Q1(cls):
return cls(product_id='MOD13Q1')
@classmethod
def MYD13Q1(cls):
return cls(product_id='MYD13Q1')
| 38.317881
| 89
| 0.497753
| 1,224
| 11,572
| 4.529412
| 0.191176
| 0.044733
| 0.023449
| 0.020202
| 0.306818
| 0.202561
| 0.109848
| 0.078824
| 0.045996
| 0.045996
| 0
| 0.123437
| 0.364328
| 11,572
| 301
| 90
| 38.445183
| 0.630234
| 0.015555
| 0
| 0.190678
| 0
| 0
| 0.213022
| 0.007743
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059322
| false
| 0
| 0.008475
| 0.038136
| 0.127119
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a980ed05ffe9a9c97a1b948b9c9b922dc89fb870
| 847
|
py
|
Python
|
sympy/printing/printer.py
|
certik/sympy-oldcore
|
eb5bd061c309d88cdfb502bfd5df511b30368458
|
[
"BSD-3-Clause"
] | 1
|
2016-05-08T17:54:57.000Z
|
2016-05-08T17:54:57.000Z
|
sympy/printing/printer.py
|
certik/sympy-oldcore
|
eb5bd061c309d88cdfb502bfd5df511b30368458
|
[
"BSD-3-Clause"
] | null | null | null |
sympy/printing/printer.py
|
certik/sympy-oldcore
|
eb5bd061c309d88cdfb502bfd5df511b30368458
|
[
"BSD-3-Clause"
] | null | null | null |
class Printer(object):
"""
"""
def __init__(self):
self._depth = -1
self._str = str
self.emptyPrinter = str
def doprint(self, expr):
"""Returns the pretty representation for expr (as a string)"""
return self._str(self._print(expr))
def _print(self, expr):
self._depth += 1
# See if the class of expr is known, or if one of its super
# classes is known, and use that pretty function
res = None
for cls in expr.__class__.__mro__:
if hasattr(self, '_print_'+cls.__name__):
res = getattr(self, '_print_'+cls.__name__)(expr)
break
# Unknown object, just use its string representation
if res is None:
res = self.emptyPrinter(expr)
self._depth -= 1
return res
| 27.322581
| 70
| 0.565525
| 105
| 847
| 4.266667
| 0.447619
| 0.060268
| 0.066964
| 0.0625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005367
| 0.340024
| 847
| 30
| 71
| 28.233333
| 0.796064
| 0.251476
| 0
| 0
| 0
| 0
| 0.022801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0
| 0
| 0.333333
| 0.277778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a981fd9db88834f380bdfbae5402c0c579a7fa58
| 272
|
py
|
Python
|
pleiades/transforms.py
|
jcwright77/pleiades
|
e3e208e94feee299589a094f361b301131d1bd15
|
[
"MIT"
] | 3
|
2020-03-27T19:27:01.000Z
|
2021-07-15T16:28:54.000Z
|
pleiades/transforms.py
|
jcwright77/pleiades
|
e3e208e94feee299589a094f361b301131d1bd15
|
[
"MIT"
] | 6
|
2020-03-30T17:12:42.000Z
|
2020-07-14T03:07:02.000Z
|
pleiades/transforms.py
|
jcwright77/pleiades
|
e3e208e94feee299589a094f361b301131d1bd15
|
[
"MIT"
] | 6
|
2020-03-30T17:05:58.000Z
|
2021-08-18T19:21:00.000Z
|
import math
import numpy as np
def rotate(pts, angle, pivot=(0., 0.)):
pivot = np.asarray(pivot)
angle = math.pi*angle/180
c, s = np.cos(angle), np.sin(angle)
rotation = np.array([[c, -s], [s, c]])
return (np.asarray(pts) - pivot) @ rotation + pivot
| 24.727273
| 55
| 0.602941
| 44
| 272
| 3.727273
| 0.477273
| 0.109756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023364
| 0.213235
| 272
| 10
| 56
| 27.2
| 0.742991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a98465a5dbaaa69b7d18d16711f08102c5a830eb
| 3,414
|
py
|
Python
|
wholeslidedata/annotation/write_mask2.py
|
kaczmarj/pathology-whole-slide-data
|
3adb86af716ca89f336b6c935f90bd13183572b7
|
[
"Apache-2.0"
] | 1
|
2022-02-17T19:47:14.000Z
|
2022-02-17T19:47:14.000Z
|
wholeslidedata/annotation/write_mask2.py
|
kaczmarj/pathology-whole-slide-data
|
3adb86af716ca89f336b6c935f90bd13183572b7
|
[
"Apache-2.0"
] | null | null | null |
wholeslidedata/annotation/write_mask2.py
|
kaczmarj/pathology-whole-slide-data
|
3adb86af716ca89f336b6c935f90bd13183572b7
|
[
"Apache-2.0"
] | null | null | null |
from pathlib import Path
from typing import List
import cv2
import numpy as np
from shapely import geometry
from shapely.strtree import STRtree
from wholeslidedata.annotation.structures import Annotation, Point, Polygon
from wholeslidedata.image.wholeslideimage import WholeSlideImage
from wholeslidedata.image.wholeslideimagewriter import WholeSlideMaskWriter
from wholeslidedata.samplers.utils import shift_coordinates
def select_annotations(
stree: STRtree, center_x: int, center_y: int, width: int, height: int
):
box = geometry.box(
center_x - width // 2,
center_y - height // 2,
center_x + width // 2,
center_y + height // 2,
)
annotations = stree.query(box)
return sorted(annotations, key=lambda item: item.area, reverse=True)
def get_mask(stree, point, size, ratio):
center_x, center_y = point.x, point.y
width, height = size
# get annotations
annotations = select_annotations(
stree, center_x, center_y, (width * ratio) - 1, (height * ratio) - 1
)
# create mask placeholder
mask = np.zeros((height, width), dtype=np.int32)
# set labels of all selected annotations
for annotation in annotations:
coordinates = np.copy(annotation.coordinates)
coordinates = shift_coordinates(
coordinates, center_x, center_y, width, height, ratio
)
if isinstance(annotation, Polygon):
holemask = np.ones((height, width), dtype=np.int32) * -1
for hole in annotation.holes:
hcoordinates = shift_coordinates(
hole, center_x, center_y, width, height, ratio
)
cv2.fillPoly(holemask, np.array([hcoordinates], dtype=np.int32), 1)
holemask[holemask != -1] = mask[holemask != -1]
cv2.fillPoly(
mask,
np.array([coordinates], dtype=np.int32),
annotation.label.value,
)
mask[holemask != -1] = holemask[holemask != -1]
elif isinstance(annotation, Point):
mask[int(coordinates[1]), int(coordinates[0])] = annotation.label.value
return mask.astype(np.uint8)
def convert_annotations_to_mask(
wsi: WholeSlideImage,
annotations: List[Annotation],
spacing: float,
mask_output_path: Path,
tile_size: int = 1024,
):
stree = STRtree(annotations)
ratio = wsi.get_downsampling_from_spacing(spacing)
shape = wsi.shapes[wsi.get_level_from_spacing(spacing)]
ratio = wsi.get_downsampling_from_spacing(spacing)
write_spacing = wsi.get_real_spacing(spacing)
wsm_writer = WholeSlideMaskWriter()
wsm_writer.write(
path=mask_output_path,
spacing=write_spacing,
dimensions=(shape[0], shape[1]),
tile_shape=(tile_size, tile_size),
)
for y_pos in range(0, shape[1], tile_size):
for x_pos in range(0, shape[0], tile_size):
mask = get_mask(
stree,
geometry.Point(
(x_pos + tile_size // 2) * ratio,
(y_pos + tile_size // 2) * ratio,
),
(tile_size, tile_size),
ratio,
)
if np.any(mask):
wsm_writer.write_tile(tile=mask, coordinates=(int(x_pos), int(y_pos)))
print("closing...")
wsm_writer.save()
print("done")
| 32.207547
| 86
| 0.621558
| 395
| 3,414
| 5.21519
| 0.263291
| 0.034951
| 0.025243
| 0.027184
| 0.158738
| 0.095146
| 0.095146
| 0.026214
| 0
| 0
| 0
| 0.015416
| 0.277973
| 3,414
| 105
| 87
| 32.514286
| 0.820284
| 0.022847
| 0
| 0.047059
| 0
| 0
| 0.004202
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035294
| false
| 0
| 0.117647
| 0
| 0.176471
| 0.023529
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a984e763170541feb20e89e4a6245f1b8e706963
| 578
|
py
|
Python
|
tuples_05/tests/test_slicing_tuples.py
|
njoroge33/py_learn
|
6ad55f37789045bc5c03f3dd668cf1ea497f4e84
|
[
"MIT"
] | null | null | null |
tuples_05/tests/test_slicing_tuples.py
|
njoroge33/py_learn
|
6ad55f37789045bc5c03f3dd668cf1ea497f4e84
|
[
"MIT"
] | 2
|
2019-04-15T06:29:55.000Z
|
2019-04-19T17:34:32.000Z
|
tuples_05/tests/test_slicing_tuples.py
|
njoroge33/py_learn
|
6ad55f37789045bc5c03f3dd668cf1ea497f4e84
|
[
"MIT"
] | 1
|
2019-11-19T04:51:18.000Z
|
2019-11-19T04:51:18.000Z
|
import pytest
from ..slicing_tuples import tuple_slice
@pytest.mark.parametrize('names, ages, cities, expected', [
(('Gitau', 'Kanyoi', 'Ndegwa'), (13, 24, 5), ('Njogu-ini', 'Limuru', 'Kamae'), (
('Gitau', 13, 'Njogu-ini'), ('Kanyoi', 24, 'Limuru'), ('Ndegwa', 5, 'Kamae')
)),
(('Totua', 'Suhi'), (95, 12, 36, 78), ('Tokyo', 'Vatican', 'Hyderabad'), (
('Totua', 95, 'Tokyo'), ('Suhi', 12, 'Vatican')
)),
])
def test_tuple_slice(names, ages, cities, expected):
actual = tuple_slice(names, ages, cities)
assert actual == expected
| 36.125
| 88
| 0.570934
| 67
| 578
| 4.850746
| 0.537313
| 0.092308
| 0.138462
| 0.141538
| 0.153846
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047312
| 0.195502
| 578
| 15
| 89
| 38.533333
| 0.651613
| 0
| 0
| 0.153846
| 0
| 0
| 0.266436
| 0
| 0
| 0
| 0
| 0
| 0.076923
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9856cedef8243944a78d8985c56e556db9faae0
| 28,653
|
py
|
Python
|
dftimewolf/lib/state.py
|
hkhalifa/dftimewolf
|
0a6d62fdb362c8618bd373c18a7f446b959f1a0f
|
[
"Apache-2.0"
] | null | null | null |
dftimewolf/lib/state.py
|
hkhalifa/dftimewolf
|
0a6d62fdb362c8618bd373c18a7f446b959f1a0f
|
[
"Apache-2.0"
] | null | null | null |
dftimewolf/lib/state.py
|
hkhalifa/dftimewolf
|
0a6d62fdb362c8618bd373c18a7f446b959f1a0f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This class maintains the internal dfTimewolf state.
Use it to track errors, abort on global failures, clean up after modules, etc.
"""
from dataclasses import dataclass
from concurrent.futures import ThreadPoolExecutor, Future
import importlib
import logging
import threading
import traceback
from typing import TYPE_CHECKING, Callable, Dict, List, Sequence, Type, Any, TypeVar, cast # pylint: disable=line-too-long
from dftimewolf.cli import curses_display_manager as cdm
from dftimewolf.config import Config
from dftimewolf.lib import errors, utils
from dftimewolf.lib.containers.interface import AttributeContainer
from dftimewolf.lib.errors import DFTimewolfError
from dftimewolf.lib.modules import manager as modules_manager
from dftimewolf.lib.module import ThreadAwareModule, BaseModule
if TYPE_CHECKING:
from dftimewolf.lib import module as dftw_module
from dftimewolf.lib.containers import interface
T = TypeVar("T", bound="interface.AttributeContainer") # pylint: disable=invalid-name,line-too-long
# TODO(tomchop): Consider changing this to `dftimewolf.state` if we ever need
# more granularity.
logger = logging.getLogger('dftimewolf')
NEW_ISSUE_URL = 'https://github.com/log2timeline/dftimewolf/issues/new'
@dataclass
class StatsEntry:
"""A simple dataclass to store module-related statistics.
Attributes:
module_type: Type of the module that generated the stats.
module_name: Name of the module that generated the stats. This has the
same value as module_type when no runtime_name has been specified for
the module.
stats: Dictionary of stats to store. Contents are arbitrary, but
keys must be strings.
"""
module_type: str
module_name: str
stats: Dict[str, Any]
class DFTimewolfState(object):
"""The main State class.
Attributes:
command_line_options (dict[str, Any]): Command line options passed to
dftimewolf.
config (dftimewolf.config.Config): Class to be used throughout execution.
errors (list[tuple[str, bool]]): errors generated by a module. These
should be cleaned up after each module run using the CleanUp() method.
global_errors (list[tuple[str, bool]]): the CleanUp() method moves non
critical errors to this attribute for later reporting.
input (list[str]): data that the current module will use as input.
output (list[str]): data that the current module generates.
recipe: (dict[str, str]): recipe declaring modules to load.
store (dict[str, object]): arbitrary data for modules.
stats_store: store for statistics generated by modules.
"""
def __init__(self, config: Type[Config]) -> None:
"""Initializes a state."""
super(DFTimewolfState, self).__init__()
self.command_line_options = {} # type: Dict[str, Any]
self._cache = {} # type: Dict[str, str]
self._module_pool = {} # type: Dict[str, BaseModule]
self._state_lock = threading.Lock()
self._stats_lock = threading.Lock()
self._threading_event_per_module = {} # type: Dict[str, threading.Event]
self.config = config
self.errors = [] # type: List[DFTimewolfError]
self.global_errors = [] # type: List[DFTimewolfError]
self.recipe = {} # type: Dict[str, Any]
self.store = {} # type: Dict[str, List[interface.AttributeContainer]]
self.stats_store = [] # type: List[StatsEntry]
self.streaming_callbacks = {} # type: Dict[Type[interface.AttributeContainer], List[Callable[[Any], Any]]] # pylint: disable=line-too-long
self._abort_execution = False
self.stdout_log = True
def _InvokeModulesInThreads(self, callback: Callable[[Any], Any]) -> None:
"""Invokes the callback function on all the modules in separate threads.
Args:
callback (function): callback function to invoke on all the modules.
"""
threads = []
for module_definition in self.recipe['modules']:
thread_args = (module_definition, )
thread = threading.Thread(target=callback, args=thread_args)
threads.append(thread)
thread.start()
for thread in threads:
thread.join()
self.CheckErrors(is_global=True)
def ImportRecipeModules(self, module_locations: Dict[str, str]) -> None:
"""Dynamically loads the modules declared in a recipe.
Args:
module_location (dict[str, str]): A dfTimewolf module name - Python module
mapping. e.g.:
{'GRRArtifactCollector': 'dftimewolf.lib.collectors.grr_hosts'}
Raises:
errors.RecipeParseError: if a module requested in a recipe does not
exist in the mapping.
"""
for module in self.recipe['modules'] + self.recipe.get('preflights', []):
name = module['name']
if name not in module_locations:
msg = (f'In {self.recipe["name"]}: module {name} cannot be found. '
'It may not have been declared.')
raise errors.RecipeParseError(msg)
logger.debug('Loading module {0:s} from {1:s}'.format(
name, module_locations[name]))
location = module_locations[name]
try:
importlib.import_module(location)
except ModuleNotFoundError as exception:
msg = f'Cannot find Python module for {name} ({location}): {exception}'
raise errors.RecipeParseError(msg)
def LoadRecipe(self,
recipe: Dict[str, Any],
module_locations: Dict[str, str]) -> None:
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe (dict[str, Any]): recipe declaring modules to load.
Raises:
RecipeParseError: if a module in the recipe has not been declared.
"""
self.recipe = recipe
module_definitions = recipe.get('modules', [])
preflight_definitions = recipe.get('preflights', [])
self.ImportRecipeModules(module_locations)
for module_definition in module_definitions + preflight_definitions:
# Combine CLI args with args from the recipe description
module_name = module_definition['name']
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
runtime_name = module_definition.get('runtime_name')
if not runtime_name:
runtime_name = module_name
# pytype: disable=wrong-arg-types
self._module_pool[runtime_name] = module_class(self, name=runtime_name)
# pytype: enable=wrong-arg-types
def FormatExecutionPlan(self) -> str:
"""Formats execution plan.
Returns information about loaded modules and their corresponding arguments
to stdout.
Returns:
str: String representation of loaded modules and their parameters.
"""
plan = ""
maxlen = 0
modules = self.recipe.get('preflights', []) + self.recipe.get('modules', [])
for module in modules:
if not module['args']:
continue
spacing = len(max(module['args'].keys(), key=len))
maxlen = maxlen if maxlen > spacing else spacing
for module in modules:
runtime_name = module.get('runtime_name')
if runtime_name:
plan += '{0:s} ({1:s}):\n'.format(runtime_name, module['name'])
else:
plan += '{0:s}:\n'.format(module['name'])
new_args = utils.ImportArgsFromDict(
module['args'], self.command_line_options, self.config)
if not new_args:
plan += ' *No params*\n'
for key, value in new_args.items():
plan += ' {0:s}{1:s}\n'.format(key.ljust(maxlen + 3), repr(value))
return plan
def LogExecutionPlan(self) -> None:
"""Logs the result of FormatExecutionPlan() using the base logger."""
for line in self.FormatExecutionPlan().split('\n'):
logger.debug(line)
def AddToCache(self, name: str, value: Any) -> None:
"""Thread-safe method to add data to the state's cache.
If the cached item is already in the cache it will be
overwritten with the new value.
Args:
name (str): string with the name of the cache variable.
value (object): the value that will be stored in the cache.
"""
with self._state_lock:
self._cache[name] = value
def GetFromCache(self, name: str, default_value: Any=None) -> Any:
"""Thread-safe method to get data from the state's cache.
Args:
name (str): string with the name of the cache variable.
default_value (object): the value that will be returned if
the item does not exist in the cache. Optional argument
and defaults to None.
Returns:
object: object from the cache that corresponds to the name, or
the value of "default_value" if the cache does not contain
the variable.
"""
with self._state_lock:
return self._cache.get(name, default_value)
def StoreContainer(self, container: "interface.AttributeContainer") -> None:
"""Thread-safe method to store data in the state's store.
Args:
container (AttributeContainer): data to store.
"""
with self._state_lock:
self.store.setdefault(container.CONTAINER_TYPE, []).append(container)
def StoreStats(self, stats_entry: StatsEntry) -> None:
"""Thread-safe method to store stats in the state's stats store.
Args:
statsentry: The stats object to store.
"""
with self._stats_lock:
self.stats_store.append(stats_entry)
def GetStats(self) -> List[StatsEntry]:
"""Get stats entries that have been stored in the state.
Returns:
The stats objects stored in the state's stats store.
"""
with self._stats_lock:
return self.stats_store
def GetContainers(self,
container_class: Type[T],
pop: bool=False) -> Sequence[T]:
"""Thread-safe method to retrieve data from the state's store.
Args:
container_class (type): AttributeContainer class used to filter data.
pop (Optional[bool]): Whether to remove the containers from the state when
they are retrieved.
Returns:
Collection[AttributeContainer]: attribute container objects provided in
the store that correspond to the container type.
"""
with self._state_lock:
container_objects = cast(
List[T], self.store.get(container_class.CONTAINER_TYPE, []))
if pop:
self.store[container_class.CONTAINER_TYPE] = []
return tuple(container_objects)
def DedupeContainers(self, container_class: Type[T]) -> None:
"""Thread safe deduping of containers of the given type.
This requires the container being deduped to override `__eq__()`.
Args:
container_class (type): AttributeContainer class to dedupe.
"""
with self._state_lock:
deduped = []
for c in self.store.get(container_class.CONTAINER_TYPE, []):
if c not in deduped:
deduped.append(c)
self.store[container_class.CONTAINER_TYPE] = deduped
def _SetupModuleThread(self, module_definition: Dict[str, str]) -> None:
"""Calls the module's SetUp() function and sets a threading event for it.
Callback for _InvokeModulesInThreads.
Args:
module_definition (dict[str, str]): recipe module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
logger.info('Setting up module: {0:s}'.format(runtime_name))
new_args = utils.ImportArgsFromDict(
module_definition['args'], self.command_line_options, self.config)
module = self._module_pool[runtime_name]
try:
self._RunModuleSetUp(module, **new_args)
except errors.DFTimewolfError:
msg = "A critical error occurred in module {0:s}, aborting execution."
logger.critical(msg.format(module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='dftimewolf', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
self._threading_event_per_module[runtime_name] = threading.Event()
self.CleanUp()
def _RunModuleSetUp(self,
module: BaseModule,
**new_args: Dict[str, object]) -> None:
"""Runs SetUp of a single module.
Designed to be wrapped by an output handling subclass.
Args:
module: The modulke that will have SetUp called.
new_args: kwargs to pass to SetUp."""
module.SetUp(**new_args)
def _RunModuleProcess(self, module: BaseModule) -> None:
"""Runs Process of a single module.
Designed to be wrapped by an output handling subclass.
Args:
module: The module to run Process() on."""
module.Process()
def _RunModuleProcessThreaded(
self, module: ThreadAwareModule
) -> List[Future]: # type: ignore
"""Runs Process of a single ThreadAwareModule module.
Designed to be wrapped by an output handling subclass.
Args:
module: The module that will have Process(container) called in a threaded
fashion."""
cont_count = len(self.GetContainers(module.GetThreadOnContainerType()))
logger.info(
f'Running {cont_count} threads, max {module.GetThreadPoolSize()} '
f'simultaneous for module {module.name}')
futures = []
with ThreadPoolExecutor(max_workers=module.GetThreadPoolSize()) \
as executor:
pop = not module.KeepThreadedContainersInState()
for c in self.GetContainers(module.GetThreadOnContainerType(), pop):
futures.append(
executor.submit(module.Process, c))
return futures
def _RunModulePreProcess(self, module: ThreadAwareModule) -> None:
"""Runs PreProcess of a single module.
Designed to be wrapped by an output handling subclass.
Args:
module: The module that will have PreProcess() called."""
module.PreProcess()
def _RunModulePostProcess(self, module: ThreadAwareModule) -> None:
"""Runs PostProcess of a single module.
Designed to be wrapped by an output handling subclass.
Args:
module: The module that will have PostProcess() called."""
module.PostProcess()
# pylint: disable=unused-argument
def _HandleFuturesFromThreadedModule(
self,
futures: List[Future], # type: ignore
runtime_name: str) -> None:
"""Handles any futures raised by the async processing of a module.
Args:
futures: A list of futures, returned by RunModuleProcessThreaded().
runtime_name: runtime name of the module."""
for fut in futures:
if fut.exception():
raise fut.exception() # type: ignore
# pylint: disable=unused-argument
def SetupModules(self) -> None:
"""Performs setup tasks for each module in the module pool.
Threads declared modules' SetUp() functions. Takes CLI arguments into
account when replacing recipe parameters for each module.
"""
# Note that vars() copies the values of argparse.Namespace to a dict.
self._InvokeModulesInThreads(self._SetupModuleThread)
def _RunModuleThread(self, module_definition: Dict[str, str]) -> None:
"""Runs the module's Process() function.
Callback for _InvokeModulesInThreads.
Waits for any blockers to have finished before running Process(), then
sets an Event flag declaring the module has completed.
Args:
module_definition (dict): module definition.
"""
module_name = module_definition['name']
runtime_name = module_definition.get('runtime_name', module_name)
for dependency in module_definition['wants']:
self._threading_event_per_module[dependency].wait()
module = self._module_pool[runtime_name]
# Abort processing if a module has had critical failures before.
if self._abort_execution:
logger.critical(
'Aborting execution of {0:s} due to previous errors'.format(
module.name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
return
logger.info('Running module: {0:s}'.format(runtime_name))
try:
if isinstance(module, ThreadAwareModule):
self._RunModulePreProcess(module)
futures = self._RunModuleProcessThreaded(module)
self._RunModulePostProcess(module)
self._HandleFuturesFromThreadedModule(futures, runtime_name)
else:
self._RunModuleProcess(module)
except errors.DFTimewolfError:
logger.critical(
"Critical error in module {0:s}, aborting execution".format(
module.name))
except Exception as exception: # pylint: disable=broad-except
msg = 'An unknown error occurred in module {0:s}: {1!s}'.format(
module.name, exception)
logger.critical(msg)
# We're catching any exception that is not a DFTimewolfError, so we want
# to generate an error for further reporting.
error = errors.DFTimewolfError(
message=msg, name='dftimewolf', stacktrace=traceback.format_exc(),
critical=True, unexpected=True)
self.AddError(error)
logger.info('Module {0:s} finished execution'.format(runtime_name))
self._threading_event_per_module[runtime_name].set()
self.CleanUp()
def RunPreflights(self) -> None:
"""Runs preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
args = preflight_definition.get('args', {})
new_args = utils.ImportArgsFromDict(
args, self.command_line_options, self.config)
preflight = self._module_pool[runtime_name]
try:
self._RunModuleSetUp(preflight, **new_args)
self._RunModuleProcess(preflight)
finally:
self.CheckErrors(is_global=True)
def CleanUpPreflights(self) -> None:
"""Executes any cleanup actions defined in preflight modules."""
for preflight_definition in self.recipe.get('preflights', []):
preflight_name = preflight_definition['name']
runtime_name = preflight_definition.get('runtime_name', preflight_name)
preflight = self._module_pool[runtime_name]
try:
preflight.CleanUp()
finally:
self.CheckErrors(is_global=True)
def InstantiateModule(self, module_name: str) -> "BaseModule":
"""Instantiates an arbitrary dfTimewolf module.
Args:
module_name (str): The name of the module to instantiate.
Returns:
BaseModule: An instance of a dftimewolf Module, which is a subclass of
BaseModule.
"""
module_class: Type["BaseModule"]
module_class = modules_manager.ModulesManager.GetModuleByName(module_name)
# pytype: disable=wrong-arg-types
return module_class(self)
# pytype: enable=wrong-arg-types
def RunModules(self) -> None:
"""Performs the actual processing for each module in the module pool."""
self._InvokeModulesInThreads(self._RunModuleThread)
def RegisterStreamingCallback(
self,
target: Callable[["interface.AttributeContainer"], Any],
container_type: Type["interface.AttributeContainer"]) -> None:
"""Registers a callback for a type of container.
The function to be registered should a single parameter of type
interface.AttributeContainer.
Args:
target (function): function to be called.
container_type (type[interface.AttributeContainer]): container type on
which the callback will be called.
"""
if container_type not in self.streaming_callbacks:
self.streaming_callbacks[container_type] = []
self.streaming_callbacks[container_type].append(target)
def StreamContainer(self, container: "interface.AttributeContainer") -> None:
"""Streams a container to the callbacks that are registered to handle it.
Args:
container (interface.AttributeContainer): container instance that will be
streamed to any registered callbacks.
"""
for callback in self.streaming_callbacks.get(type(container), []):
callback(container)
def AddError(self, error: DFTimewolfError) -> None:
"""Adds an error to the state.
Args:
error (errors.DFTimewolfError): The dfTimewolf error to add.
"""
if error.critical:
self._abort_execution = True
self.errors.append(error)
def CleanUp(self) -> None:
"""Cleans up after running a module.
The state's output becomes the input for the next stage. Any errors are
moved to the global_errors attribute so that they can be reported at a
later stage.
"""
# Move any existing errors to global errors
self.global_errors.extend(self.errors)
self.errors = []
def CheckErrors(self, is_global: bool=False) -> None:
"""Checks for errors and exits if any of them are critical.
Args:
is_global (Optional[bool]): True if the global_errors attribute should
be checked. False if the error attribute should be checked.
"""
error_objects = self.global_errors if is_global else self.errors
critical_errors = False
if error_objects:
logger.error('dfTimewolf encountered one or more errors:')
for index, error in enumerate(error_objects):
logger.error('{0:d}: error from {1:s}: {2:s}'.format(
index+1, error.name, error.message))
if error.stacktrace:
for line in error.stacktrace.split('\n'):
logger.error(line)
if error.critical:
critical_errors = True
if any(error.unexpected for error in error_objects):
logger.critical('One or more unexpected errors occurred.')
logger.critical(
'Please consider opening an issue: {0:s}'.format(NEW_ISSUE_URL))
if critical_errors:
raise errors.CriticalError('Critical error found. Aborting.')
def PublishMessage(self,
source: str,
message: str,
is_error: bool = False) -> None:
"""Receives a message for publishing.
The base class does nothing with this (as the method in module also logs the
message). This method exists to be overridden for other UIs.
Args:
source: The source of the message.
message: The message content.
is_error: True if the message is an error message, False otherwise."""
class DFTimewolfStateWithCDM(DFTimewolfState):
"""The main state class, extended to wrap methods with updates to a
CursesDisplayManager object."""
def __init__(self,
config: Type[Config],
cursesdm: cdm.CursesDisplayManager) -> None:
"""Initializes a state."""
super(DFTimewolfStateWithCDM, self).__init__(config)
self.cursesdm = cursesdm
self.stdout_log = False
def LoadRecipe(self,
recipe: Dict[str, Any],
module_locations: Dict[str, str]) -> None:
"""Populates the internal module pool with modules declared in a recipe.
Args:
recipe (dict[str, Any]): recipe declaring modules to load.
Raises:
RecipeParseError: if a module in the recipe has not been declared.
"""
super(DFTimewolfStateWithCDM, self).LoadRecipe(recipe, module_locations)
module_definitions = recipe.get('modules', [])
preflight_definitions = recipe.get('preflights', [])
self.cursesdm.SetRecipe(self.recipe['name'])
for module_definition in preflight_definitions:
self.cursesdm.EnqueuePreflight(module_definition['name'],
module_definition.get('wants', []),
module_definition.get('runtime_name'))
for module_definition in module_definitions:
self.cursesdm.EnqueueModule(module_definition['name'],
module_definition.get('wants', []),
module_definition.get('runtime_name'))
self.cursesdm.Draw()
def _RunModuleSetUp(self,
module: BaseModule,
**new_args: Dict[str, object]) -> None:
"""Runs SetUp of a single module.
Args:
module: The modulke that will have SetUp called.
new_args: kwargs to pass to SetUp."""
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.SETTINGUP)
module.SetUp(**new_args)
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PENDING)
def _RunModuleProcess(self, module: BaseModule) -> None:
"""Runs Process of a single module.
Args:
module: The module to run Process() on."""
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PROCESSING)
module.Process()
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.COMPLETED)
def _RunModuleProcessThreaded(
self, module: ThreadAwareModule
) -> List[Future]: # type: ignore
"""Runs Process of a single ThreadAwareModule module.
Args:
module: The module that will have Process(container) called in a threaded
fashion."""
cont_count = len(self.GetContainers(module.GetThreadOnContainerType()))
logger.info(
f'Running {cont_count} threads, max {module.GetThreadPoolSize()} '
f'simultaneous for module {module.name}')
self.cursesdm.SetThreadedModuleContainerCount(module.name, cont_count)
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PROCESSING)
futures = []
with ThreadPoolExecutor(max_workers=module.GetThreadPoolSize()) \
as executor:
pop = not module.KeepThreadedContainersInState()
for c in self.GetContainers(module.GetThreadOnContainerType(), pop):
futures.append(
executor.submit(
self._WrapThreads, module.Process, c, module.name))
return futures
def _RunModulePreProcess(self, module: ThreadAwareModule) -> None:
"""Runs PreProcess of a single module.
Args:
module: The module that will have PreProcess() called."""
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PREPROCESSING)
module.PreProcess()
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.PENDING)
def _RunModulePostProcess(self, module: ThreadAwareModule) -> None:
"""Runs PostProcess of a single module.
Args:
module: The module that will have PostProcess() called."""
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.POSTPROCESSING)
module.PostProcess()
self.cursesdm.UpdateModuleStatus(module.name, cdm.Status.COMPLETED)
def _HandleFuturesFromThreadedModule(
self,
futures: List[Future], # type: ignore
runtime_name: str) -> None:
"""Handles any futures raised by the async processing of a module.
Args:
futures: A list of futures, returned by RunModuleProcessThreaded().
runtime_name: runtime name of the module."""
for fut in futures:
if fut.exception():
self.cursesdm.SetError(runtime_name, str(fut.exception()))
raise fut.exception() # type: ignore
def _WrapThreads(self,
process: Callable[[AttributeContainer], None],
container: AttributeContainer,
module_name: str) -> None:
"""Wraps a ThreadPoolExecutor call to module.process with the
CursesDisplayManager status update methods.
Args:
process: A callable method: Process, belonging to a ThreadAwareModule.
container: The Container being processed by the thread.
module_name: The runtime name of the module."""
thread_id = threading.current_thread().getName()
self.cursesdm.UpdateModuleThreadState(
module_name, cdm.Status.RUNNING, thread_id, str(container))
process(container)
self.cursesdm.UpdateModuleThreadState(
module_name, cdm.Status.COMPLETED, thread_id, str(container))
def AddError(self, error: DFTimewolfError) -> None:
"""Adds an error to the state.
Args:
error (errors.DFTimewolfError): The dfTimewolf error to add.
"""
super(DFTimewolfStateWithCDM, self).AddError(error)
name = error.name if error.name else 'no_module_name'
self.cursesdm.SetError(name, error.message)
def PublishMessage(self,
source: str,
message: str,
is_error: bool = False) -> None:
"""Receives a message for publishing to the list of messages.
Args:
source: The source of the message.
message: The message content.
is_error: True if the message is an error message, False otherwise."""
self.cursesdm.EnqueueMessage(source, message, is_error)
| 36.640665
| 144
| 0.682965
| 3,432
| 28,653
| 5.60373
| 0.147436
| 0.020799
| 0.007436
| 0.010867
| 0.478317
| 0.424657
| 0.382955
| 0.33673
| 0.30522
| 0.289933
| 0
| 0.001165
| 0.221094
| 28,653
| 781
| 145
| 36.68758
| 0.860561
| 0.356926
| 0
| 0.441558
| 0
| 0
| 0.084794
| 0.01249
| 0
| 0
| 0
| 0.00128
| 0
| 1
| 0.111688
| false
| 0
| 0.057143
| 0
| 0.205195
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a98a17680f92454408a66d8e581e032e851f1d31
| 1,089
|
py
|
Python
|
tests/test_molecular_signatures_db.py
|
krassowski/gsea-api
|
deb562ea55871b799eb501a798dd49a881ff9523
|
[
"MIT"
] | 8
|
2020-03-06T02:03:40.000Z
|
2022-01-22T15:57:17.000Z
|
tests/test_molecular_signatures_db.py
|
krassowski/gsea-api
|
deb562ea55871b799eb501a798dd49a881ff9523
|
[
"MIT"
] | 3
|
2020-03-06T01:48:53.000Z
|
2021-10-06T04:15:55.000Z
|
tests/test_molecular_signatures_db.py
|
krassowski/gsea-api
|
deb562ea55871b799eb501a798dd49a881ff9523
|
[
"MIT"
] | 2
|
2019-12-01T18:41:07.000Z
|
2020-07-15T14:52:17.000Z
|
from pytest import raises
from gsea_api.molecular_signatures_db import MolecularSignaturesDatabase
def test_load():
msigdb_7_1 = MolecularSignaturesDatabase('tests/test_msigdb', version=7.1)
assert msigdb_7_1.version == '7.1'
assert msigdb_7_1.gene_sets == [
{
'name': 'c2.cp.reactome',
'id_type': 'symbols'
}
]
reactome_7_1 = msigdb_7_1.load('c2.cp.reactome', 'symbols')
assert 'REACTOME_NERVOUS_SYSTEM_DEVELOPMENT' in reactome_7_1.gene_sets_by_name
assert 'REACTOME_SERINE_BIOSYNTHESIS' not in reactome_7_1.gene_sets_by_name
msigdb_7_0 = MolecularSignaturesDatabase('tests/test_msigdb', version=7.0)
reactome_7_0 = msigdb_7_0.load('c2.cp.reactome', 'symbols')
assert 'REACTOME_NERVOUS_SYSTEM_DEVELOPMENT' not in reactome_7_0.gene_sets_by_name
assert 'REACTOME_SERINE_BIOSYNTHESIS' in reactome_7_0.gene_sets_by_name
def test_fail_no_dir():
with raises(ValueError, match='Could not find MSigDB: wrong_dir_name does not exist'):
MolecularSignaturesDatabase('wrong_dir_name', version=7.1)
| 38.892857
| 90
| 0.747475
| 155
| 1,089
| 4.858065
| 0.309677
| 0.02656
| 0.042497
| 0.074369
| 0.568393
| 0.568393
| 0.446215
| 0.385126
| 0.162019
| 0.162019
| 0
| 0.038293
| 0.160698
| 1,089
| 27
| 91
| 40.333333
| 0.785558
| 0
| 0
| 0
| 0
| 0
| 0.278237
| 0.115702
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.095238
| false
| 0
| 0.095238
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a98a271a4efe485ccb8f3daffb76dc91992cf6a3
| 11,387
|
py
|
Python
|
froide_govplan/admin.py
|
okfde/froide-govplan
|
1ae085c39c25af7c7a74d90ce39580119942a328
|
[
"MIT"
] | 2
|
2022-03-13T14:49:46.000Z
|
2022-03-14T18:39:04.000Z
|
froide_govplan/admin.py
|
okfde/froide-govplan
|
1ae085c39c25af7c7a74d90ce39580119942a328
|
[
"MIT"
] | 3
|
2022-03-18T11:52:46.000Z
|
2022-03-18T14:13:43.000Z
|
froide_govplan/admin.py
|
okfde/froide-govplan
|
1ae085c39c25af7c7a74d90ce39580119942a328
|
[
"MIT"
] | 1
|
2022-03-18T09:36:20.000Z
|
2022-03-18T09:36:20.000Z
|
from django.contrib import admin, auth
from django.contrib.auth.models import Group
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import path, reverse, reverse_lazy
from django.utils.translation import gettext_lazy as _
from adminsortable2.admin import SortableAdminMixin
from froide.api import api_router
from froide.follow.admin import FollowerAdmin
from froide.helper.admin_utils import make_choose_object_action, make_emptyfilter
from froide.helper.widgets import TagAutocompleteWidget
from froide.organization.models import Organization
from .api_views import GovernmentPlanViewSet
from .auth import get_allowed_plans, has_limited_access
from .forms import (
GovernmentPlanForm,
GovernmentPlanUpdateAcceptProposalForm,
GovernmentPlanUpdateForm,
)
from .models import (
Government,
GovernmentPlan,
GovernmentPlanFollower,
GovernmentPlanSection,
GovernmentPlanUpdate,
)
User = auth.get_user_model()
api_router.register(r"governmentplan", GovernmentPlanViewSet, basename="governmentplan")
class GovPlanAdminSite(admin.AdminSite):
site_header = "Regierungsvorhaben"
site_url = "/koalitionstracker/"
class GovernmentPlanAdminForm(GovernmentPlanForm):
class Meta:
model = GovernmentPlan
fields = "__all__"
widgets = {
"categories": TagAutocompleteWidget(
autocomplete_url=reverse_lazy("api:category-autocomplete")
),
}
class GovernmentAdmin(admin.ModelAdmin):
prepopulated_fields = {"slug": ("name",)}
list_display = ("name", "public", "start_date", "end_date")
list_filter = ("public",)
def execute_assign_organization(admin, request, queryset, action_obj):
queryset.update(organization=action_obj)
def execute_assign_group(admin, request, queryset, action_obj):
queryset.update(group=action_obj)
PLAN_ACTIONS = {
"assign_organization": make_choose_object_action(
Organization, execute_assign_organization, _("Assign organization...")
),
"assign_group": make_choose_object_action(
Group, execute_assign_group, _("Assign permission group...")
),
}
class GovernmentPlanAdmin(admin.ModelAdmin):
form = GovernmentPlanForm
save_on_top = True
prepopulated_fields = {"slug": ("title",)}
search_fields = ("title",)
raw_id_fields = ("responsible_publicbody",)
actions = ["make_public"]
def get_queryset(self, request):
qs = get_allowed_plans(request)
qs = qs.prefetch_related(
"categories",
"organization",
"group",
)
return qs
def view_on_site(self, obj):
# Avoid Django's redirect through normal admin
# TODO: remove on https://github.com/django/django/pull/15526
return obj.get_absolute_url()
def get_actions(self, request):
actions = super().get_actions(request)
if not has_limited_access(request.user):
admin_actions = {
action: (
func,
action,
func.short_description,
)
for action, func in PLAN_ACTIONS.items()
}
actions.update(admin_actions)
return actions
def get_urls(self):
urls = super().get_urls()
my_urls = [
path(
"<int:pk>/accept-proposal/",
self.admin_site.admin_view(self.accept_proposal),
name="froide_govplan-plan_accept_proposal",
),
]
return my_urls + urls
def get_list_display(self, request):
list_display = [
"title",
"public",
"status",
"rating",
"organization",
"get_categories",
]
if not has_limited_access(request.user):
list_display.append("group")
return list_display
def get_list_filter(self, request):
list_filter = [
"status",
"rating",
"public",
]
if not has_limited_access(request.user):
list_filter.extend(
[
make_emptyfilter(
"proposals", _("Has change proposals"), empty_value=None
),
"organization",
"group",
"government",
"categories",
]
)
return list_filter
def get_fields(self, request, obj=None):
if has_limited_access(request.user):
return (
"title",
"slug",
"description",
"quote",
"public",
"due_date",
"measure",
"status",
"rating",
"reference",
)
return super().get_fields(request, obj=obj)
def get_categories(self, obj):
"""
Return the categories linked in HTML.
"""
categories = [category.name for category in obj.categories.all()]
return ", ".join(categories)
get_categories.short_description = _("category(s)")
def make_public(self, request, queryset):
queryset.update(public=True)
make_public.short_description = _("Make public")
def accept_proposal(self, request, pk):
obj = get_object_or_404(self.get_queryset(request), pk=pk)
plan_url = reverse(
"admin:froide_govplan_governmentplan_change",
args=(obj.pk,),
current_app=self.admin_site.name,
)
if not obj.proposals:
return redirect(plan_url)
if request.method == "POST":
proposals = obj.proposals or {}
proposal_id = request.POST.get("proposal_id")
delete_proposals = request.POST.getlist("proposal_delete")
update = None
if proposal_id:
data = proposals[proposal_id]["data"]
form = GovernmentPlanUpdateAcceptProposalForm(data=data, plan=obj)
if form.is_valid():
update = form.save(
proposal_id=proposal_id,
delete_proposals=delete_proposals,
)
else:
form = GovernmentPlanUpdateAcceptProposalForm(data={}, plan=obj)
form.delete_proposals(delete_proposals)
if update is None:
self.message_user(request, _("The proposal has been deleted."))
return redirect(plan_url)
self.message_user(
request,
_("An unpublished update has been created."),
)
update_url = reverse(
"admin:froide_govplan_governmentplanupdate_change",
args=(update.pk,),
current_app=self.admin_site.name,
)
return redirect(update_url)
else:
form = GovernmentPlanUpdateAcceptProposalForm(plan=obj)
opts = self.model._meta
context = {
"form": form,
"proposals": form.get_proposals(),
"object": obj,
"app_label": opts.app_label,
"opts": opts,
}
return render(
request,
"froide_govplan/admin/accept_proposal.html",
context,
)
class GovernmentPlanUpdateAdmin(admin.ModelAdmin):
form = GovernmentPlanUpdateForm
save_on_top = True
raw_id_fields = ("user", "foirequest")
date_hierarchy = "timestamp"
search_fields = ("title", "content")
list_display = (
"title",
"timestamp",
"plan",
"user",
"status",
"rating",
"public",
)
list_filter = (
"status",
"public",
"organization",
)
search_fields = (
"title",
"plan__title",
)
date_hierarchy = "timestamp"
def get_queryset(self, request):
qs = super().get_queryset(request)
qs = qs.prefetch_related(
"plan",
"user",
)
if has_limited_access(request.user):
qs = qs.filter(plan__in=get_allowed_plans(request))
return qs
def view_on_site(self, obj):
# Avoid Django's redirect through normal admin
# TODO: remove on https://github.com/django/django/pull/15526
return obj.get_absolute_url()
def save_model(self, request, obj, form, change):
limited = has_limited_access(request.user)
if not change and limited:
# When added by a limited user,
# autofill user and organization
obj.user = request.user
if obj.plan.organization:
user_has_org = request.user.organization_set.all().filter(pk=1).exists()
if user_has_org:
obj.organization = obj.plan.organization
res = super().save_model(request, obj, form, change)
obj.plan.update_from_updates()
return res
def get_fields(self, request, obj=None):
if has_limited_access(request.user):
return (
"plan",
"title",
"timestamp",
"content",
"url",
"status",
"rating",
"public",
)
return super().get_fields(request, obj=obj)
def formfield_for_foreignkey(self, db_field, request, **kwargs):
if db_field.name == "plan":
if has_limited_access(request.user):
kwargs["queryset"] = get_allowed_plans(request)
return super().formfield_for_foreignkey(db_field, request, **kwargs)
def user_in_obj_group(self, request, obj):
if not obj.plan.group_id:
return False
user = request.user
return User.objects.filter(pk=user.pk, groups=obj.plan.group_id).exists()
def has_view_permission(self, request, obj=None):
if obj and self.user_in_obj_group(request, obj):
return True
return super().has_view_permission(request, obj=obj)
def has_add_permission(self, request):
return super().has_add_permission(request)
def has_change_permission(self, request, obj=None):
if obj and self.user_in_obj_group(request, obj):
return True
return super().has_change_permission(request, obj=obj)
class GovernmentPlanSectionAdmin(SortableAdminMixin, admin.ModelAdmin):
save_on_top = True
prepopulated_fields = {"slug": ("title",)}
search_fields = ("title",)
raw_id_fields = ("categories",)
list_display = (
"title",
"featured",
)
list_filter = (
"featured",
"categories",
"government",
)
admin.site.register(Government, GovernmentAdmin)
admin.site.register(GovernmentPlan, GovernmentPlanAdmin)
admin.site.register(GovernmentPlanUpdate, GovernmentPlanUpdateAdmin)
admin.site.register(GovernmentPlanSection, GovernmentPlanSectionAdmin)
admin.site.register(GovernmentPlanFollower, FollowerAdmin)
govplan_admin_site = GovPlanAdminSite(name="govplanadmin")
govplan_admin_site.register(GovernmentPlan, GovernmentPlanAdmin)
govplan_admin_site.register(GovernmentPlanUpdate, GovernmentPlanUpdateAdmin)
| 30.859079
| 88
| 0.596557
| 1,113
| 11,387
| 5.876909
| 0.185984
| 0.023544
| 0.022015
| 0.02813
| 0.238649
| 0.17612
| 0.158997
| 0.13209
| 0.110075
| 0.110075
| 0
| 0.002283
| 0.307632
| 11,387
| 368
| 89
| 30.942935
| 0.827372
| 0.027136
| 0
| 0.316832
| 0
| 0
| 0.101729
| 0.02154
| 0
| 0
| 0
| 0.005435
| 0
| 1
| 0.069307
| false
| 0
| 0.049505
| 0.009901
| 0.313531
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a98a8630e0f08cab9b6667bd3db9422e0508306a
| 2,995
|
py
|
Python
|
tests/test_xmltompd.py
|
thiblahute/python-mpegdash
|
e7702dec59fe61668888ba5c9e1cb2f495b72c17
|
[
"MIT"
] | 1
|
2021-06-08T04:25:04.000Z
|
2021-06-08T04:25:04.000Z
|
tests/test_xmltompd.py
|
thiblahute/python-mpegdash
|
e7702dec59fe61668888ba5c9e1cb2f495b72c17
|
[
"MIT"
] | null | null | null |
tests/test_xmltompd.py
|
thiblahute/python-mpegdash
|
e7702dec59fe61668888ba5c9e1cb2f495b72c17
|
[
"MIT"
] | 1
|
2021-09-27T12:57:51.000Z
|
2021-09-27T12:57:51.000Z
|
try:
import unittest2 as unittest
except:
import unittest
from mpegdash.parser import MPEGDASHParser
class XML2MPDTestCase(unittest.TestCase):
def test_xml2mpd_from_string(self):
mpd_string = '''
<MPD xmlns="urn:mpeg:DASH:schema:MPD:2011" mediaPresentationDuration="PT0H1M52.43S" minBufferTime="PT1.5S"
profiles="urn:mpeg:dash:profile:isoff-on-demand:2011" type="static">
<Period duration="PT0H1M52.43S" start="PT0S">
<AdaptationSet>
<ContentComponent contentType="video" id="1" />
<Representation bandwidth="4190760" codecs="avc1.640028" height="1080" id="1" mimeType="video/mp4" width="1920">
<BaseURL>motion-20120802-89.mp4</BaseURL>
<SegmentBase indexRange="674-981">
<Initialization range="0-673" />
</SegmentBase>
</Representation>
</AdaptationSet>
</Period>
</MPD>
'''
self.assert_mpd(MPEGDASHParser.parse(mpd_string))
def test_xml2mpd_from_file(self):
self.assert_mpd(MPEGDASHParser.parse('./tests/mpd-samples/sample-001.mpd'))
self.assert_mpd(MPEGDASHParser.parse('./tests/mpd-samples/motion-20120802-manifest.mpd'))
self.assert_mpd(MPEGDASHParser.parse('./tests/mpd-samples/oops-20120802-manifest.mpd'))
self.assert_mpd(MPEGDASHParser.parse('./tests/mpd-samples/360p_speciment_dash.mpd'))
def test_xml2mpd_from_url(self):
mpd_url = 'http://yt-dash-mse-test.commondatastorage.googleapis.com/media/motion-20120802-manifest.mpd'
self.assert_mpd(MPEGDASHParser.parse(mpd_url))
def test_xml2mpd_from_file_with_utc_timing(self):
mpd = MPEGDASHParser.parse('./tests/mpd-samples/utc_timing.mpd')
self.assertEqual(mpd.utc_timings[0].scheme_id_uri, 'urn:mpeg:dash:utc:http-iso:2014')
self.assertEqual(mpd.utc_timings[0].value, 'https://time.akamai.com/?iso')
def test_xml2mpd_from_file_with_event_messagedata(self):
mpd = MPEGDASHParser.parse('./tests/mpd-samples/with_event_message_data.mpd')
self.assertTrue(mpd.periods[0].event_streams[0].events[0].message_data is not None)
self.assertTrue(mpd.periods[0].event_streams[0].events[0].event_value is None)
self.assertTrue(mpd.periods[0].event_streams[0].events[1].message_data is None)
self.assertEqual(mpd.periods[0].event_streams[0].events[1].event_value, "Some Random Event Text")
def assert_mpd(self, mpd):
self.assertTrue(mpd is not None)
self.assertTrue(len(mpd.periods) > 0)
self.assertTrue(mpd.periods[0].adaptation_sets is not None)
self.assertTrue(len(mpd.periods[0].adaptation_sets) > 0)
self.assertTrue(mpd.periods[0].adaptation_sets[0].representations is not None)
self.assertTrue(len(mpd.periods[0].adaptation_sets[0].representations) > 0)
self.assertTrue(len(mpd.periods[0].adaptation_sets[0].representations[0].id) > 0)
| 50.762712
| 126
| 0.686477
| 381
| 2,995
| 5.251969
| 0.32021
| 0.069965
| 0.054973
| 0.08096
| 0.517741
| 0.504248
| 0.449275
| 0.37981
| 0.301349
| 0.226387
| 0
| 0.055556
| 0.176628
| 2,995
| 58
| 127
| 51.637931
| 0.75588
| 0
| 0
| 0
| 0
| 0.08
| 0.395659
| 0.173623
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.12
| false
| 0
| 0.06
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a98cc0ed5054e6dba3e35b5238cafe5ac890c96b
| 513
|
py
|
Python
|
algorithm_toolbox/week_4/03_divide_and_conquer_1_search_array/iterativeBinSearch.py
|
dibyanshushekhardey/data_struct_and_algo_coursera
|
ce579ba0be19d0415dc5a9526fd04bcdb803dbc0
|
[
"MIT"
] | null | null | null |
algorithm_toolbox/week_4/03_divide_and_conquer_1_search_array/iterativeBinSearch.py
|
dibyanshushekhardey/data_struct_and_algo_coursera
|
ce579ba0be19d0415dc5a9526fd04bcdb803dbc0
|
[
"MIT"
] | null | null | null |
algorithm_toolbox/week_4/03_divide_and_conquer_1_search_array/iterativeBinSearch.py
|
dibyanshushekhardey/data_struct_and_algo_coursera
|
ce579ba0be19d0415dc5a9526fd04bcdb803dbc0
|
[
"MIT"
] | null | null | null |
def BinarySearchIt(A, low, high, key):
while low <= high:
mid = low + ((high - low)//2)
if key == A[mid]:
return mid
elif key < A[mid]:
high = mid - 1
else:
low = mid + 1
return low - 1
arr = [3, 5, 8, 10, 12, 15, 18, 20, 20, 50, 60]
low = 1
high = 11
key = 50
index = BinarySearchIt(arr, low, high, key)
if index != -1:
print ("Element", key,"is present at index %d" %(index))
else:
print ("Element %d is not present" %(key))
| 23.318182
| 60
| 0.502924
| 78
| 513
| 3.307692
| 0.435897
| 0.108527
| 0.077519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.085799
| 0.341131
| 513
| 22
| 61
| 23.318182
| 0.677515
| 0
| 0
| 0.105263
| 0
| 0
| 0.105058
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0
| 0
| 0.157895
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a98fe624f9604a44b5865d4659413307a64a58db
| 2,133
|
py
|
Python
|
2016/day-02.py
|
mharty3/advent_of_code
|
f86e67eb772f4c328e30744610606fc154930aef
|
[
"MIT"
] | null | null | null |
2016/day-02.py
|
mharty3/advent_of_code
|
f86e67eb772f4c328e30744610606fc154930aef
|
[
"MIT"
] | null | null | null |
2016/day-02.py
|
mharty3/advent_of_code
|
f86e67eb772f4c328e30744610606fc154930aef
|
[
"MIT"
] | null | null | null |
#--- Day 2: Bathroom Security ---
from typing import List
def parse(input_data: str) -> List[List[str]]:
lines = input_data.strip().split()
directions = [list(line) for line in lines]
return directions
def move1(x, y, direction):
if direction == 'U':
y -= 1
elif direction == 'D':
y += 1
elif direction == 'L':
x -= 1
elif direction == 'R':
x += 1
if y < 0: y = 0
if y > 2: y = 2
if x < 0: x = 0
if x > 2: x = 2
return x, y
def move2(x, y, direction, keypad):
last_x = x
last_y = y
if direction == 'U':
y -= 1
elif direction == 'D':
y += 1
elif direction == 'L':
x -= 1
elif direction == 'R':
x += 1
if keypad[x][y] == '-':
return last_x, last_y
else:
return x, y
def solve1(input_data: str) -> str:
keypad = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
x = 1
y = 1
keycode = []
for line in parse(input_data):
for direction in line:
x, y = move1(x, y, direction)
keycode.append(str(keypad[y][x]))
return ''.join(keycode)
def solve2(input_data):
keypad = [['-', '-', '-', '-', '-', '-', '-'],
['-', '-', '-', '1', '-', '-', '-'],
['-', '-', '2', '3', '4', '-', '-'],
['-', '5', '6', '7', '8', '9', '-'],
['-', '-', 'A', 'B', 'C', '-', '-'],
['-', '-', '-', 'D', '-', '-', '-'],
['-', '-', '-', '-', '-', '-', '-']]
x = 1
y = 3
keycode = []
for line in parse(input_data):
for direction in line:
x, y = move2(x, y, direction, keypad)
keycode.append(keypad[y][x])
return ''.join(keycode)
test_data = """ULL
RRDDD
LURDL
UUUUD"""
assert solve1(test_data) == '1985'
assert solve2(test_data) == '5DB3'
if __name__ == '__main__':
from aocd.models import Puzzle
puzzle = Puzzle(2016, 2)
answer_1 = solve1(puzzle.input_data)
print(answer_1)
puzzle.answer_a = answer_1
answer_2 = solve2(puzzle.input_data)
puzzle.answer_b = answer_2
| 21.118812
| 50
| 0.449602
| 270
| 2,133
| 3.440741
| 0.251852
| 0.019376
| 0.09042
| 0.064586
| 0.378902
| 0.331539
| 0.277718
| 0.277718
| 0.277718
| 0.277718
| 0
| 0.046132
| 0.339428
| 2,133
| 100
| 51
| 21.33
| 0.613201
| 0.015002
| 0
| 0.368421
| 0
| 0
| 0.045238
| 0
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.065789
| false
| 0
| 0.026316
| 0
| 0.171053
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a99348b5bc6c6ccf0bf508d81eb41b18d8e6cf18
| 2,875
|
py
|
Python
|
compose.py
|
gicmo/koji-osbuild
|
d8107f23478ca12cd376098a79c7465cc5dd12d1
|
[
"Apache-2.0"
] | null | null | null |
compose.py
|
gicmo/koji-osbuild
|
d8107f23478ca12cd376098a79c7465cc5dd12d1
|
[
"Apache-2.0"
] | null | null | null |
compose.py
|
gicmo/koji-osbuild
|
d8107f23478ca12cd376098a79c7465cc5dd12d1
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
import argparse
import koji
import os
from pprint import pprint
def main():
parser = argparse.ArgumentParser(description="osbuild koji client")
parser.add_argument("--url", metavar="URL", type=str,
default="https://localhost/kojihub",
help="The URL koji hub API endpoint")
parser.add_argument("--repo", metavar="REPO", help='The repository to use',
type=str, action="append", default=[])
parser.add_argument("--release", metavar="RELEASE", help='The distribution release')
parser.add_argument("--user", metavar="USER", default="kojiadmin")
parser.add_argument("--password", metavar="PASSWORD", default="kojipass")
parser.add_argument("--principal", metavar="USER", default="osbuild-krb@LOCAL")
parser.add_argument("--keytab", metavar="FILE", help="kerberos keytab",
default="/tmp/osbuild-composer-koji-test/client.keytab")
parser.add_argument("--serverca", metavar="FILE", help="Server CA",
default="/tmp/osbuild-composer-koji-test/ca-crt.pem")
parser.add_argument("--plain", help="use plain text login",
default=False, action="store_true")
parser.add_argument("name", metavar="NAME", help='The distribution name')
parser.add_argument("version", metavar="VERSION", help='The distribution version')
parser.add_argument("distro", metavar="DISTRO", help='The distribution')
parser.add_argument("target", metavar="TARGET", help='The build target')
parser.add_argument("arch", metavar="ARCHITECTURE", help='Request the architecture',
type=str, nargs="+")
parser.add_argument("--image-type", metavar="TYPE",
help='Request an image-type [default: qcow2]',
type=str, action="append", default=[])
args = parser.parse_args()
opts = {"user": args.user, "password": args.password, "serverca": args.serverca}
session = koji.ClientSession(args.url, opts)
if args.plain:
session.login()
else:
session.gssapi_login(principal=args.principal, keytab=args.keytab)
name, version, arch, target = args.name, args.version, args.arch, args.target
distro, image_types = args.distro, args.image_type
if not image_types:
image_types = ["qcow2"]
opts = {}
if args.release:
opts["release"] = args.release
if args.repo:
opts["repo"] = ",".join(args.repo)
print("name:", name)
print("version:", version)
print("distro:", distro)
print("arches:", ", ".join(arch))
print("target:", target)
print("image types ", str(image_types))
if opts:
pprint(opts)
session.osbuildImage(name, version, distro, image_types, target, arch, opts=opts)
if __name__ == "__main__":
main()
| 39.930556
| 88
| 0.631652
| 332
| 2,875
| 5.373494
| 0.268072
| 0.075673
| 0.142937
| 0.0213
| 0.066144
| 0.036996
| 0
| 0
| 0
| 0
| 0
| 0.001324
| 0.211826
| 2,875
| 71
| 89
| 40.492958
| 0.785966
| 0.005913
| 0
| 0.035088
| 0
| 0
| 0.254113
| 0.030452
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017544
| false
| 0.035088
| 0.070175
| 0
| 0.087719
| 0.140351
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9971d06d9c16341c965038e22004beaf49e0586
| 2,182
|
py
|
Python
|
profile_python/profile.py
|
heroesofcode/profile-python
|
e4e6ee2f3739ea6edad30999b74b3d42f754a86c
|
[
"MIT"
] | null | null | null |
profile_python/profile.py
|
heroesofcode/profile-python
|
e4e6ee2f3739ea6edad30999b74b3d42f754a86c
|
[
"MIT"
] | 1
|
2021-10-09T01:26:29.000Z
|
2021-10-09T01:26:29.000Z
|
profile_python/profile.py
|
heroesofcode/profile-python
|
e4e6ee2f3739ea6edad30999b74b3d42f754a86c
|
[
"MIT"
] | null | null | null |
from rich.console import Console
from rich.table import Table
from rich.progress import track
from time import sleep
import sys
class Profile(object):
def get_datas(self, datas):
try:
print(datas['login'])
print(datas['name'])
print(datas['bio'])
print(datas['company'])
print(datas['blog'])
print(datas['location'])
except:
print("This user does not exist")
def get_repos(self, repos):
try:
for repo in repos:
table = Table(show_header=True, header_style="bold magenta")
table.add_column("Name Repository")
table.add_column("Language")
table.add_column("Forks")
table.add_column("Stars")
table.add_row(
repo['name'],
repo['language'],
str(repo['forks_count']),
str(repo['stargazers_count'])
)
console = Console()
console.print(table)
except:
print("This user does not exist")
def exist_application(self):
option_exist = input("Do you really want to exit the system? y/n: ")
if option_exist == "y":
sys.exit()
def process_data(self):
for _ in track(range(100), description='[green]Processing data'):
sleep(0.02)
def run_app(self, values_datas, values_repos):
while True:
print("-----------------------------------------------")
print("1 - My datas")
print("2 - Repositories")
print("3 - Exist")
print("-----------------------------------------------")
option = input("Choose an option: ")
if option == "1":
self.process_data()
self.get_datas(values_datas)
elif option == "2":
self.process_data()
self.get_repos(values_repos)
elif option == "3":
self.exist_application()
else:
print("This option does not exist")
| 29.486486
| 76
| 0.47846
| 219
| 2,182
| 4.648402
| 0.39726
| 0.058939
| 0.05501
| 0.037328
| 0.11002
| 0.066798
| 0.066798
| 0.066798
| 0
| 0
| 0
| 0.008778
| 0.373511
| 2,182
| 73
| 77
| 29.890411
| 0.735918
| 0
| 0
| 0.169492
| 0
| 0
| 0.186984
| 0.04308
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084746
| false
| 0
| 0.084746
| 0
| 0.186441
| 0.254237
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a99744e768b04af0c0bed6111d20060a12e0cfeb
| 2,459
|
py
|
Python
|
app/view/admin/notification_manage.py
|
G1NTOKI0522/WeChatterBot
|
1a5377713fd3d6c7a6bca1c20e8fdcf70e8215f5
|
[
"BSD-3-Clause"
] | 1
|
2020-04-03T02:54:18.000Z
|
2020-04-03T02:54:18.000Z
|
app/view/admin/notification_manage.py
|
G1NTOKI0522/WeChatterBot
|
1a5377713fd3d6c7a6bca1c20e8fdcf70e8215f5
|
[
"BSD-3-Clause"
] | 7
|
2020-04-11T13:22:50.000Z
|
2020-05-14T00:19:37.000Z
|
app/view/admin/notification_manage.py
|
G1NTOKI0522/WeChatterBot
|
1a5377713fd3d6c7a6bca1c20e8fdcf70e8215f5
|
[
"BSD-3-Clause"
] | 3
|
2020-04-11T12:09:56.000Z
|
2020-12-16T13:26:20.000Z
|
# coding: utf-8
import datetime
from flask_login import login_required, current_user
from flask import Blueprint, request
from app.libs.http import jsonify, error_jsonify
from app.libs.db import session
from app.serializer.notice import NoticeParaSchema
from app.model.notice import Notice
bp_admin_notification = Blueprint('admin_notification', __name__, url_prefix='/admin/notification')
@bp_admin_notification.route("/", methods=["POST"])
@login_required
def notification_manage(): # 管理员设定通知
if current_user.isAdmin == 0: # 只能为管理员
return error_jsonify(10000003)
json = request.get_json()
data, errors = NoticeParaSchema().load(json)
if errors:
return error_jsonify(10000001, errors)
now = datetime.datetime.now()
data['created_at'] = now
data['source'] = '山东省人力资源管理部门'
data['user_id'] = current_user.id
new_data = Notice(**data)
session.add(new_data)
session.commit()
return jsonify({})
@bp_admin_notification.route("/", methods=["GET"])
@login_required
def notification_get(): # 管理员获得通知
if current_user.isAdmin == 0: # 只能为管理员
return error_jsonify(10000003)
if current_user.isAdmin == 2: # 如果是省级管理员
res = Notice.query.all() # 获得所有通知
if current_user.isAdmin == 1: # 市级管理员
res = Notice.query.filter_by(user_id=current_user.id).all()
data_need, errors = NoticeParaSchema(many=True).dump(res)
if errors:
return error_jsonify(10000001, errors)
return jsonify(data_need)
@bp_admin_notification.route("/<int:id>", methods=["POST"])
@login_required
def notice_manage_id(id): # 更改管理员获得的通知
if current_user.isAdmin == 0: # 只能为管理员
return error_jsonify(10000003)
json = request.get_json()
data, errors = NoticeParaSchema().load(json)
if errors:
return error_jsonify(10000001, errors)
data_need = Notice.query.filter_by(id=id)
if data_need.first() is None: # 没有这个id,更改失败
return error_jsonify(10000018)
data_need.update(data)
session.commit()
return jsonify({})
@bp_admin_notification.route("/<int:id>", methods=["DELETE"])
@login_required
def notice_manage_delete(id): # 删除id对应的通知
if current_user.isAdmin == 0: # 只能为管理员
return error_jsonify(10000003)
data_need = Notice.query.filter_by(id=id).first()
if data_need is None:
return error_jsonify(10000017)
session.delete(data_need)
session.commit()
return jsonify({})
| 28.264368
| 99
| 0.699471
| 313
| 2,459
| 5.28754
| 0.268371
| 0.072508
| 0.097885
| 0.072508
| 0.489426
| 0.395166
| 0.395166
| 0.341994
| 0.304532
| 0.239275
| 0
| 0.039382
| 0.184221
| 2,459
| 86
| 100
| 28.593023
| 0.785643
| 0.045547
| 0
| 0.4375
| 0
| 0
| 0.046312
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.109375
| 0
| 0.375
| 0.03125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a99aa91e73c38055d1f2d643a8c77c56216293f4
| 6,498
|
py
|
Python
|
colossalai/engine/_base_engine.py
|
rahulgupta9202/ColossalAI
|
993088d45eaa032e39cf5959df2a506f0663bc2e
|
[
"Apache-2.0"
] | 1
|
2022-03-12T04:49:19.000Z
|
2022-03-12T04:49:19.000Z
|
colossalai/engine/_base_engine.py
|
rahulgupta9202/ColossalAI
|
993088d45eaa032e39cf5959df2a506f0663bc2e
|
[
"Apache-2.0"
] | null | null | null |
colossalai/engine/_base_engine.py
|
rahulgupta9202/ColossalAI
|
993088d45eaa032e39cf5959df2a506f0663bc2e
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python
# -*- encoding: utf-8 -*-
from torch.nn import Module
from torch.nn.modules.loss import _Loss
from torch.optim import Optimizer
from colossalai.builder import build_gradient_handler
from colossalai.context import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.logging import get_global_dist_logger
from colossalai.nn import (ZeroRedundancyOptimizer_Level_2,
ZeroRedundancyOptimizer_Level_3)
from .schedule import BaseSchedule
class Engine:
"""Basic engine class for training and evaluation. It runs a specific process method
:meth:`step` which is based on the given :attr:`schedule` over each batch of a dataset.
It controls a iteration in training.
:param model: The neural network model
:param optimizer: Optimizer for updating the parameters
:param step_schedule: Running schedule in :meth:`step`
:param gradient_accumulation: Steps of gradient accumulation
:param gradient_clipping: The norm of gradient clipping
:type model: Module
:type optimizer: Optimizer
:type step_schedule: BaseSchedule, optional
:type gradient_accumulation: int, optional
:type gradient_clipping: float, optional
"""
def __init__(self,
model: Module,
optimizer: Optimizer,
criterion: _Loss,
step_schedule: BaseSchedule,
gradient_handlers: list = None,
gradient_accumulation: int = 1,
gradient_clipping: float = 0.0,
):
self._model = model
self._optimizer = optimizer
self._criterion = criterion
self._schedule = step_schedule
# schedule initialize
self._schedule.initialize(model, optimizer)
# state
self.training = True # default
# gradient accumulation
assert gradient_accumulation > 0, 'gradient accumulation size must be larger than 0'
self._grad_accum_size = gradient_accumulation
self._grad_clip = gradient_clipping
self._logger = get_global_dist_logger()
# build gradient handler
self._gradient_handlers = []
if gradient_handlers is not None:
assert isinstance(gradient_handlers, list), \
f'argument gradient_handler_cfg expected type list, ' \
f'but got type {type(gradient_handlers)}'
elif isinstance(optimizer, (ZeroRedundancyOptimizer_Level_2,
ZeroRedundancyOptimizer_Level_3)):
gradient_handlers = [dict(type='ZeROGradientHandler')]
self._logger.info(
"Training with zero is detected, ZeROGradientHandler is automatically "
"added even though not specified in the configuration",
ranks=[0])
elif gpc.is_initialized(ParallelMode.DATA) and gpc.get_world_size(
ParallelMode.DATA) > 1:
gradient_handlers = [dict(type='DataParallelGradientHandler')]
self._logger.info(
"Data parallel training is detected, DataParallelGradientHandler is automatically "
"added even though not specified in the configuration",
ranks=[0])
if gradient_handlers is None:
self._logger.warning(
"No gradient handler is set up, please make sure you do not need "
"to all-reduce the gradients after a training step.",
ranks=[0])
else:
for cfg in gradient_handlers:
handler = build_gradient_handler(cfg, model, optimizer)
self._gradient_handlers.append(handler)
@property
def model(self):
return self._model
@property
def optimizer(self):
return self._optimizer
@property
def criterion(self):
return self._criterion
@property
def schedule(self):
return self._schedule
@property
def gradient_accumulation(self):
return self._grad_accum_size
def handle_gradient(self):
"""Handles all-reduce operations of gradients across different parallel groups.
"""
for handler in self._gradient_handlers:
handler.handle_gradient()
def train(self):
"""Sets the model to training mode.
"""
self.training = True
self._model.train()
def eval(self):
"""Sets the model to evaluation mode.
"""
self.training = False
self._model.eval()
def step(self,
data_iter,
is_last_iteration: bool = False,
return_loss=True):
"""A running step based on the schedule. Usually, it runs a training or
evaluation over a batch of dataset.
:param data_iter: Data iterator of the dataset
:param is_last_iteration: If True, this iteration is the last iteration in the epoch
:param return_loss: loss will be returned if True
:type data_iter: Iterator
:type is_last_iteration: bool, optional
:type return_loss: bool, optional
:return: (output, lablel, loss)
"""
if self.training:
self._optimizer.zero_grad()
# differentiate training and eval with grad accum
if self.training:
for i in range(self._grad_accum_size):
output, label, loss = self._schedule.forward_backward_step(
data_iter, self._model, self._criterion, self._optimizer,
forward_only=False,
grad_accum_size=self._grad_accum_size,
return_loss=return_loss)
if i == self._grad_accum_size - 1:
# all reduce gradients
self.handle_gradient()
self._schedule.optimizer_step(self._model, self._optimizer, self._grad_clip)
else:
output, label, loss = self._schedule.forward_backward_step(
data_iter, self._model, self._criterion, self._optimizer,
forward_only=True,
grad_accum_size=1,
return_loss=return_loss)
# consume the remaining dataset left out due to gradient accumulation
if is_last_iteration:
while True:
try:
_ = next(data_iter)
except StopIteration:
break
return output, label, loss
| 36.711864
| 99
| 0.622499
| 706
| 6,498
| 5.529745
| 0.271955
| 0.045082
| 0.023309
| 0.021773
| 0.123975
| 0.114754
| 0.085041
| 0.085041
| 0.085041
| 0.085041
| 0
| 0.003579
| 0.311942
| 6,498
| 176
| 100
| 36.920455
| 0.869604
| 0.231302
| 0
| 0.216216
| 0
| 0
| 0.113777
| 0.016343
| 0
| 0
| 0
| 0
| 0.018018
| 1
| 0.09009
| false
| 0
| 0.081081
| 0.045045
| 0.234234
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a99b36048f5d32ab6c9b6ad9baf0b5a681590fdd
| 718
|
py
|
Python
|
11. Optical Flow/optical_flow.py
|
farhan0syakir/OpenCv-tutorial
|
b3d78f3567f4ea61b8955190f51097b6ceb4b318
|
[
"MIT"
] | 15
|
2021-05-04T15:03:14.000Z
|
2022-03-20T11:57:55.000Z
|
11. Optical Flow/optical_flow.py
|
farhan0syakir/OpenCv-tutorial
|
b3d78f3567f4ea61b8955190f51097b6ceb4b318
|
[
"MIT"
] | 12
|
2020-09-24T16:57:45.000Z
|
2020-10-23T15:13:06.000Z
|
11. Optical Flow/optical_flow.py
|
farhan0syakir/OpenCv-tutorial
|
b3d78f3567f4ea61b8955190f51097b6ceb4b318
|
[
"MIT"
] | 18
|
2020-09-21T13:01:37.000Z
|
2020-10-15T19:42:28.000Z
|
import numpy as np
import cv2
cap = cv2.VideoCapture('motion.avi')
ret, frame = cap.read()
gs_im0 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
points_prev = cv2.goodFeaturesToTrack(gs_im0, 100, 0.03, 9.0, False)
while(cap.isOpened()):
ret, frame = cap.read()
gs_im1 = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Call tracker.
points, st, err = cv2.calcOpticalFlowPyrLK(gs_im0, gs_im1, points_prev, None, (3,3))
for i,p in enumerate(points):
a,b = p.ravel()
frame = cv2.circle(frame,(a,b),3,(255,255,255),-1)
cv2.imshow('frame',frame)
points_prev = points
gs_im0 = gs_im1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
| 25.642857
| 88
| 0.650418
| 110
| 718
| 4.136364
| 0.5
| 0.043956
| 0.048352
| 0.065934
| 0.215385
| 0.140659
| 0
| 0
| 0
| 0
| 0
| 0.076257
| 0.196379
| 718
| 28
| 89
| 25.642857
| 0.712305
| 0.018106
| 0
| 0.1
| 0
| 0
| 0.022727
| 0
| 0
| 0
| 0.005682
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9a00c334939540391cc64f13f7f530cabcf615a
| 7,546
|
py
|
Python
|
unfold/transactions/views.py
|
wesny/unfold
|
6594054f7408ac142fc6e902093b6fc8cbfda94e
|
[
"MIT"
] | null | null | null |
unfold/transactions/views.py
|
wesny/unfold
|
6594054f7408ac142fc6e902093b6fc8cbfda94e
|
[
"MIT"
] | null | null | null |
unfold/transactions/views.py
|
wesny/unfold
|
6594054f7408ac142fc6e902093b6fc8cbfda94e
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.mixins import LoginRequiredMixin
from django.views import View
from django.views.generic import ListView
from django.utils.http import is_safe_url
from django.contrib import messages
from rest_framework import status
from django.core.exceptions import ObjectDoesNotExist
from django.shortcuts import redirect, render
from mama_cas.models import ServiceTicket
from mama_cas.utils import redirect as cas_redirect
from mama_cas.utils import to_bool
from rest_framework.response import Response
from decimal import Decimal
from django.urls import reverse
import urllib
from pinax.stripe.mixins import CustomerMixin
from pinax.stripe.models import Charge
from pinax.stripe.actions import charges
from stripe.error import CardError
from rest_framework_jwt.settings import api_settings
from unfold.transactions.models import Purchase, Article
from unfold.transactions.admin import PurchaseForm
from unfold.users.models import User
jwt_payload_handler = api_settings.JWT_PAYLOAD_HANDLER
jwt_encode_handler = api_settings.JWT_ENCODE_HANDLER
def bad_request(message):
return Response({
'status': 'error',
'message': message,
}, status=status.HTTP_400_BAD_REQUEST)
class PurchaseView(LoginRequiredMixin, View):
template_name = "pages/purchase_article.html"
form_class = PurchaseForm
# def test_func(self):
# return self.request.user.is_publisher
def get(self, request, *args, **kwargs):
publisherusername = request.GET.get('publisher', None)
external_id = request.GET.get('id', None)
new_token = to_bool(request.GET.get('new_token', None))
if publisherusername == None or external_id == None:
return bad_request("Invalid Parameters")
try:
article = Article.objects.get(publisher__username=publisherusername, external_id=external_id)
except ObjectDoesNotExist:
return bad_request("Article referenced does not exist")
purchase = Purchase.objects.filter(article=article, buyer=request.user)
if purchase.exists():
if new_token != None:
publisher = User.objects.get(username=publisherusername)
st = ServiceTicket.objects.create_ticket(service=publisherusername + '.com', user=request.user)
return cas_redirect(article.url, params={'token': st.ticket})
else:
return redirect(article.url)
try:
publisher = User.objects.get(username=publisherusername)
except ObjectDoesNotExist:
return bad_request("Publisher does not exist")
next_url = ''
if article.price > request.user.balance:
next_url = urllib.parse.quote(request.get_full_path(), safe='~()*!.\'')
form = self.form_class(initial={
'external_id': external_id,
'publisher': publisherusername,
'price': article.price
})
data = {
'form': form,
'price': article.price,
'publisher': publisher.name,
'title': article.title,
'balance': request.user.balance,
'next': next_url or ''
}
return render(request, self.template_name, data)
def post(self, request, *args, **kwargs):
form = self.form_class(request.POST)
if form.is_valid():
external_id = form.cleaned_data['external_id']
publisherusername = form.cleaned_data['publisher']
price = form.cleaned_data['price']
new_token = to_bool(request.GET.get('new_token', None))
try:
article = Article.objects.get(publisher__username=publisherusername, external_id=external_id)
except ObjectDoesNotExist:
return bad_request("Article referenced does not exist")
if article.price != price:
return bad_request("Price has changed since submission")
purchase = Purchase(article=article, price=price, buyer=request.user)
purchase.save()
request.user.balance = request.user.balance - purchase.price
request.user.save()
publisher = User.objects.get(username=publisherusername)
publisher.balance = publisher.balance + purchase.price
publisher.save()
if new_token != None:
st = ServiceTicket.objects.create_ticket(service=publisherusername + '.com', user=request.user)
return cas_redirect(article.url, params={'token': st.ticket})
else:
return redirect(article.url)
return render(request, self.template_name, {'form': form})
class ReloadView(LoginRequiredMixin, View):
template_name = "pages/refill_account.html"
def get_redirect_url(self):
redirect_to = self.request.POST.get(
'next',
self.request.GET.get('next', '')
)
url_is_safe = is_safe_url(url=redirect_to)
return redirect_to if url_is_safe else ''
def get(self, request, *args, **kwargs):
can_charge = True
balance = request.user.balance
data = {
'balance': balance,
'can_charge': can_charge
}
return render(request, self.template_name, data)
def post(self, request, *args, **kwargs):
try:
add_on = Decimal(request.POST.get('amount'))
except:
messages.error(request, 'Amount was not in the desired format.')
can_charge = True
balance = request.user.balance
data = {
'balance': balance,
'can_charge': can_charge
}
return render(request, self.template_name, data)
try:
charges.create(amount=add_on, customer=request.user.customer.stripe_id)
except CardError as e:
body = e.json_body
err = body.get('error', {})
messages.error(request, err.get('message'))
return redirect("/reload")
user = User.objects.get(username=request.user.username)
user.balance = user.balance + add_on
user.save()
messages.success(request, "Payment was successfully processed.")
url = self.get_redirect_url() or '/user'
return redirect(url)
class NewAPIKeyView(LoginRequiredMixin, View):
def post(self, request, *args, **kwargs):
payload = jwt_payload_handler(request.user)
token = jwt_encode_handler(payload)
request.user.token = token
request.user.save()
return redirect('/user')
class StripeAccountFromCustomerMixin(object):
@property
def stripe_account(self):
customer = getattr(self, "customer", None)
return customer.stripe_account if customer else None
@property
def stripe_account_stripe_id(self):
return self.stripe_account.stripe_id if self.stripe_account else None
stripe_account_stripe_id.fget.short_description = "Stripe Account"
class ChargeListView(LoginRequiredMixin, CustomerMixin, ListView):
model = Charge
context_object_name = "charge_list"
template_name = "pinax/stripe/charge_list.html"
def get_queryset(self):
return super(ChargeListView, self).get_queryset().order_by("charge_created")
class PurchaseListView(LoginRequiredMixin, ListView):
model = Purchase
template_name = "pages/articles_list.html"
def get_queryset(self):
return Purchase.objects.filter(buyer=self.request.user)
| 39.507853
| 111
| 0.658362
| 847
| 7,546
| 5.710744
| 0.198347
| 0.040934
| 0.022328
| 0.021708
| 0.307215
| 0.273723
| 0.219764
| 0.206533
| 0.206533
| 0.206533
| 0
| 0.000528
| 0.247283
| 7,546
| 190
| 112
| 39.715789
| 0.851056
| 0.008216
| 0
| 0.311377
| 0
| 0
| 0.080615
| 0.014037
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065868
| false
| 0
| 0.137725
| 0.023952
| 0.419162
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9a1965586fb4160c10932687996645bcd809a1c
| 1,843
|
py
|
Python
|
interviewbit/Programming/Arrays/Rotate Matrix/solution.py
|
pablotrinidad/competitive-programming
|
de16d007ca276330cd0a92bd5b75ce4e9e75fb59
|
[
"MIT"
] | null | null | null |
interviewbit/Programming/Arrays/Rotate Matrix/solution.py
|
pablotrinidad/competitive-programming
|
de16d007ca276330cd0a92bd5b75ce4e9e75fb59
|
[
"MIT"
] | null | null | null |
interviewbit/Programming/Arrays/Rotate Matrix/solution.py
|
pablotrinidad/competitive-programming
|
de16d007ca276330cd0a92bd5b75ce4e9e75fb59
|
[
"MIT"
] | null | null | null |
"""InterviewBit.
Programming > Arrays > Rotate Matrix.
"""
class Solution:
"""Solution."""
def rotate(self, A):
"""Rotate matrix."""
n = len(A)
for l in range(0, n // 2): # l = level
for o in range(0, n - (l * 2) - 1): # o = offset
tlr, tlc = l, l + o # Top Left row/column
trr, trc = l + o, n - 1 - l # Top Right row/column
brr, brc = n - 1 - l, n - 1 - l - o # Bottom right row/column
blr, blc = n - 1 - l - o, l # Bottom left row/column
# Switch corner values
A[tlr][tlc], A[trr][trc], A[brr][brc], A[blr][blc] = A[blr][blc], A[tlr][tlc], A[trr][trc], A[brr][brc]
return A
matrices = [
[
[1]
],
[
[1, 2],
[3, 4]
],
[
[1, 2, 3],
[4, 5, 6],
[7, 8, 9]
],
[
['a', 'b', 'c', 'd'],
['e', 'f', 'g', 'h'],
['i', 'j', 'k', 'l'],
['m', 'n', 'o', 'p'],
],
[
[str(x).zfill(2) for x in range(1, 6)],
[str(x).zfill(2) for x in range(6, 11)],
[str(x).zfill(2) for x in range(11, 16)],
[str(x).zfill(2) for x in range(16, 21)],
[str(x).zfill(2) for x in range(21, 26)]
],
[
[str(x).zfill(2) for x in range(1, 7)],
[str(x).zfill(2) for x in range(7, 13)],
[str(x).zfill(2) for x in range(13, 19)],
[str(x).zfill(2) for x in range(19, 25)],
[str(x).zfill(2) for x in range(25, 31)],
[str(x).zfill(2) for x in range(31, 37)]
]
]
solution = Solution()
for matrix in matrices:
print("Matrix before rotation:")
for row in matrix:
print('\t', row)
print("Matrix after rotation:")
for row in solution.rotate(matrix):
print('\t', row)
print('\n' * 3)
| 26.328571
| 119
| 0.429192
| 284
| 1,843
| 2.785211
| 0.271127
| 0.115044
| 0.125158
| 0.139064
| 0.39823
| 0.347661
| 0.347661
| 0.347661
| 0.108723
| 0
| 0
| 0.062447
| 0.365708
| 1,843
| 69
| 120
| 26.710145
| 0.6142
| 0.112859
| 0
| 0.127273
| 0
| 0
| 0.041563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018182
| false
| 0
| 0
| 0
| 0.054545
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9a3856b6e71069b01f3d5066c6f323c68f21ce5
| 1,283
|
py
|
Python
|
tests/dao_tests/test_stored_sample_dao.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 39
|
2017-10-13T19:16:27.000Z
|
2021-09-24T16:58:21.000Z
|
tests/test_stored_sample_dao.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 312
|
2017-09-08T15:42:13.000Z
|
2022-03-23T18:21:40.000Z
|
tests/test_stored_sample_dao.py
|
all-of-us/raw-data-repository
|
d28ad957557587b03ff9c63d55dd55e0508f91d8
|
[
"BSD-3-Clause"
] | 19
|
2017-09-15T13:58:00.000Z
|
2022-02-07T18:33:20.000Z
|
from rdr_service import clock
from rdr_service.dao.biobank_stored_sample_dao import BiobankStoredSampleDao
from rdr_service.dao.participant_dao import ParticipantDao
from rdr_service.model.biobank_stored_sample import BiobankStoredSample
from rdr_service.model.participant import Participant
from tests.helpers.unittest_base import BaseTestCase
class BiobankStoredSampleDaoTest(BaseTestCase):
"""Tests only that a sample can be written and read; see the reconciliation pipeline."""
def setUp(self):
super().setUp()
self.participant = Participant(participantId=123, biobankId=555)
ParticipantDao().insert(self.participant)
self.dao = BiobankStoredSampleDao()
def test_insert_and_read_sample(self):
sample_id = "WEB123456"
test_code = "1U234"
now = clock.CLOCK.now()
created = self.dao.insert(
BiobankStoredSample(
biobankStoredSampleId=sample_id,
biobankId=self.participant.biobankId,
biobankOrderIdentifier="KIT",
test=test_code,
confirmed=now,
)
)
fetched = self.dao.get(sample_id)
self.assertEqual(test_code, created.test)
self.assertEqual(test_code, fetched.test)
| 37.735294
| 92
| 0.694466
| 136
| 1,283
| 6.382353
| 0.411765
| 0.040323
| 0.080645
| 0.039171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016211
| 0.230709
| 1,283
| 33
| 93
| 38.878788
| 0.863222
| 0.063913
| 0
| 0
| 0
| 0
| 0.014226
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a9a3934109af932f3d04644fe8eb5b82a3bf255d
| 2,769
|
py
|
Python
|
server/pantryflask/__init__.py
|
jernaumorat/IntelligentPantry
|
33d1ee867a5b6e0169fb44918069fbec5bfde259
|
[
"MIT"
] | null | null | null |
server/pantryflask/__init__.py
|
jernaumorat/IntelligentPantry
|
33d1ee867a5b6e0169fb44918069fbec5bfde259
|
[
"MIT"
] | null | null | null |
server/pantryflask/__init__.py
|
jernaumorat/IntelligentPantry
|
33d1ee867a5b6e0169fb44918069fbec5bfde259
|
[
"MIT"
] | 1
|
2021-11-11T09:25:34.000Z
|
2021-11-11T09:25:34.000Z
|
import socket, os, atexit
from flask import Flask, jsonify, request
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate
from flask.helpers import send_from_directory, url_for
from zeroconf import ServiceInfo, Zeroconf
from pantryflask.config import FlaskConfig
from pantryflask.auth import token_auth, generate_pairing_code, generate_user_token
from pantryflask.models import AuthToken
from pantryflask.db import db
from pantryflask.pantry_api import bp as pantry_bp
from pantryflask.robot_api import bp as robot_bp
from pantryflask.util import bp as util_bp
ip = os.environ.get('LISTEN_IP')
httpZconf = ServiceInfo(
"_http._tcp.local.",
"intpantry._http._tcp.local.",
addresses=[socket.inet_aton(ip)],
port=5000)
httpsZconf = ServiceInfo(
"_https._tcp.local.",
"intpantry._https._tcp.local.",
addresses=[socket.inet_aton(ip)],
port=5443)
zc = Zeroconf()
zc.register_service(httpZconf)
print('Service Registered:', httpZconf)
def app_factory(config={}):
app = Flask(__name__)
app.config.from_object(FlaskConfig) if config == {} else app.config.from_object(config)
db.init_app(app)
migrate = Migrate(app, db)
@app.route('/')
def get_root():
links = []
for rule in app.url_map.iter_rules():
methods = ','.join(rule.methods)
links.append((f'{rule}', methods, rule.endpoint))
return jsonify(links)
@app.route('/cert', methods=['GET'])
def get_cert():
response = send_from_directory(os.path.join('.', 'static'), 'ssr.crt')
return response
@app.route('/pair', methods=['GET'])
def pair_device():
code = request.args.get('code')
if len(AuthToken.query.filter_by(token_class='user').all()) == 0 and not code:
return jsonify(generate_pairing_code())
token = generate_user_token(code)
if token == None:
return jsonify(None), 401
return jsonify(token), 201
@app.route('/pair', methods=['POST'])
@token_auth.login_required(role=['user'])
def get_pairing_code():
return jsonify(generate_pairing_code())
@app.route('/pair', methods=['DELETE'])
@token_auth.login_required(role=['user'])
def delete_token():
token = request.headers.get('Authorization')
print(token)
token = token.split(' ')[1]
db.session.delete(AuthToken.query.get(token))
db.session.commit()
return jsonify('OK')
app.register_blueprint(pantry_bp)
app.register_blueprint(robot_bp)
app.register_blueprint(util_bp)
return app, db, migrate
@atexit.register
def shutdown():
zc.unregister_all_services()
app, db, migrate = app_factory()
| 29.457447
| 91
| 0.669195
| 349
| 2,769
| 5.120344
| 0.332378
| 0.058758
| 0.031897
| 0.031897
| 0.118635
| 0.118635
| 0.078344
| 0.04141
| 0
| 0
| 0
| 0.007266
| 0.204767
| 2,769
| 94
| 92
| 29.457447
| 0.804269
| 0
| 0
| 0.082192
| 0
| 0
| 0.07509
| 0.019856
| 0
| 0
| 0
| 0
| 0
| 1
| 0.09589
| false
| 0
| 0.178082
| 0.013699
| 0.383562
| 0.068493
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d10162b60dc80362847021a74c900fd613e0ff7
| 39,370
|
py
|
Python
|
lingua_franca/lang/parse_eu.py
|
OpenVoiceOS/ovos-lingua-franca
|
392cc37cbfde3b8d6f11258c1e148e63ba2fb951
|
[
"Apache-2.0"
] | null | null | null |
lingua_franca/lang/parse_eu.py
|
OpenVoiceOS/ovos-lingua-franca
|
392cc37cbfde3b8d6f11258c1e148e63ba2fb951
|
[
"Apache-2.0"
] | 13
|
2022-01-26T03:43:46.000Z
|
2022-03-25T17:00:18.000Z
|
lingua_franca/lang/parse_eu.py
|
OpenVoiceOS/ovos-lingua-franca
|
392cc37cbfde3b8d6f11258c1e148e63ba2fb951
|
[
"Apache-2.0"
] | 1
|
2022-01-18T21:11:44.000Z
|
2022-01-18T21:11:44.000Z
|
#
# Copyright 2017 Mycroft AI Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Parse functions for Basque (eu)
TODO: numbers greater than 999999
"""
from datetime import datetime
from dateutil.relativedelta import relativedelta
from dateutil.tz import gettz
from lingua_franca.lang.format_eu import pronounce_number_eu
from lingua_franca.lang.parse_common import *
from lingua_franca.lang.common_data_eu import _NUM_STRING_EU
def isFractional_eu(input_str):
"""
This function takes the given text and checks if it is a fraction.
Args:
text (str): the string to check if fractional
Returns:
(bool) or (float): False if not a fraction, otherwise the fraction
"""
if input_str.endswith('s', -1):
input_str = input_str[:len(input_str) - 1] # e.g. "fifths"
aFrac = {"erdia": 2, "erdi": 2, "heren": 3, "laurden": 4,
"laurdena": 4, "bosten": 5, "bostena": 5, "seiren": 6, "seirena": 6,
"zazpiren": 7, "zapirena": 7, "zortziren": 8, "zortzirena": 8,
"bederatziren": 9, "bederatzirena": 9, "hamarren": 10, "hamarrena": 10,
"hamaikaren": 11, "hamaikarena": 11, "hamabiren": 12, "hamabirena": 12}
if input_str.lower() in aFrac:
return 1.0 / aFrac[input_str]
if (input_str == "hogeiren" or input_str == "hogeirena"):
return 1.0 / 20
if (input_str == "hogeita hamarren" or input_str == "hogeita hamarrena"):
return 1.0 / 30
if (input_str == "ehunen" or input_str == "ehunena"):
return 1.0 / 100
if (input_str == "milaren" or input_str == "milarena"):
return 1.0 / 1000
return False
# TODO: short_scale and ordinals don't do anything here.
# The parameters are present in the function signature for API compatibility
# reasons.
#
# Returns incorrect output on certain fractional phrases like, "cuarto de dos"
def extract_number_eu(text, short_scale=True, ordinals=False):
"""
This function prepares the given text for parsing by making
numbers consistent, getting rid of contractions, etc.
Args:
text (str): the string to normalize
Returns:
(int) or (float): The value of extracted number
"""
aWords = text.lower().split()
count = 0
result = None
while count < len(aWords):
val = 0
word = aWords[count]
next_next_word = None
if count + 1 < len(aWords):
next_word = aWords[count + 1]
if count + 2 < len(aWords):
next_next_word = aWords[count + 2]
else:
next_word = None
# is current word a number?
if word in _NUM_STRING_EU:
val = _NUM_STRING_EU[word]
elif word.isdigit(): # doesn't work with decimals
val = int(word)
elif is_numeric(word):
val = float(word)
elif isFractional_eu(word):
if next_word in _NUM_STRING_EU:
# erdi bat, heren bat, etab
result = _NUM_STRING_EU[next_word]
# hurrengo hitza (bat, bi, ...) salto egin
next_word = None
count += 2
elif not result:
result = 1
count += 1
result = result * isFractional_eu(word)
continue
if not val:
# look for fractions like "2/3"
aPieces = word.split('/')
# if (len(aPieces) == 2 and is_numeric(aPieces[0])
# and is_numeric(aPieces[1])):
if look_for_fractions(aPieces):
val = float(aPieces[0]) / float(aPieces[1])
if val:
if result is None:
result = 0
# handle fractions
if next_word == "en" or next_word == "ren":
result = float(result) / float(val)
else:
result = val
if next_word is None:
break
# number word and fraction
ands = ["eta"]
if next_word in ands:
zeros = 0
if result is None:
count += 1
continue
newWords = aWords[count + 2:]
newText = ""
for word in newWords:
newText += word + " "
afterAndVal = extract_number_eu(newText[:-1])
if afterAndVal:
if result < afterAndVal or result < 20:
while afterAndVal > 1:
afterAndVal = afterAndVal / 10.0
for word in newWords:
if word == "zero" or word == "0":
zeros += 1
else:
break
for _ in range(0, zeros):
afterAndVal = afterAndVal / 10.0
result += afterAndVal
break
elif next_next_word is not None:
if next_next_word in ands:
newWords = aWords[count + 3:]
newText = ""
for word in newWords:
newText += word + " "
afterAndVal = extract_number_eu(newText[:-1])
if afterAndVal:
if result is None:
result = 0
result += afterAndVal
break
decimals = ["puntu", "koma", ".", ","]
if next_word in decimals:
zeros = 0
newWords = aWords[count + 2:]
newText = ""
for word in newWords:
newText += word + " "
for word in newWords:
if word == "zero" or word == "0":
zeros += 1
else:
break
afterDotVal = str(extract_number_eu(newText[:-1]))
afterDotVal = zeros * "0" + afterDotVal
result = float(str(result) + "." + afterDotVal)
break
count += 1
# Return the $str with the number related words removed
# (now empty strings, so strlen == 0)
# aWords = [word for word in aWords if len(word) > 0]
# text = ' '.join(aWords)
if "." in str(result):
integer, dec = str(result).split(".")
# cast float to int
if dec == "0":
result = int(integer)
return result or False
# TODO Not parsing 'cero'
def eu_number_parse(words, i):
def eu_cte(i, s):
if i < len(words) and s == words[i]:
return s, i + 1
return None
def eu_number_word(i, mi, ma):
if i < len(words):
v = _NUM_STRING_EU.get(words[i])
if v and v >= mi and v <= ma:
return v, i + 1
return None
def eu_number_1_99(i):
if i >= len(words):
return None
r1 = eu_number_word(i, 1, 29)
if r1:
return r1
composed = False
if words[i] != "eta" and words[i][-2:] == "ta":
composed = True
words[i] = words[i][:-2]
r1 = eu_number_word(i, 20, 90)
if r1:
v1, i1 = r1
if composed:
# i2 = r2[1]
r3 = eu_number_word(i1, 1, 19)
if r3:
v3, i3 = r3
return v1 + v3, i3
return r1
return None
def eu_number_1_999(i):
r1 = eu_number_word(i, 100, 900)
if r1:
v1, i1 = r1
r2 = eu_cte(i1, "eta")
if r2:
i2 = r2[1]
r3 = eu_number_1_99(i2)
if r3:
v3, i3 = r3
return v1 + v3, i3
else:
return r1
# [1-99]
r1 = eu_number_1_99(i)
if r1:
return r1
return None
def eu_number(i):
# check for cero
r1 = eu_number_word(i, 0, 0)
if r1:
return r1
# check for [1-999] (mil [0-999])?
r1 = eu_number_1_999(i)
if r1:
v1, i1 = r1
r2 = eu_cte(i1, "mila")
if r2:
i2 = r2[1]
r3 = eu_number_1_999(i2)
if r3:
v3, i3 = r3
return v1 * 1000 + v3, i3
else:
return v1 * 1000, i2
else:
return r1
return None
return eu_number(i)
def extract_numbers_eu(text, short_scale=True, ordinals=False):
"""
Takes in a string and extracts a list of numbers.
Args:
text (str): the string to extract a number from
short_scale (bool): Use "short scale" or "long scale" for large
numbers -- over a million. The default is short scale, which
is now common in most English speaking countries.
See https://en.wikipedia.org/wiki/Names_of_large_numbers
ordinals (bool): consider ordinal numbers, e.g. third=3 instead of 1/3
Returns:
list: list of extracted numbers as floats
"""
return extract_numbers_generic(text, pronounce_number_eu, extract_number_eu,
short_scale=short_scale, ordinals=ordinals)
def normalize_eu(text, remove_articles=True):
""" Basque string normalization """
words = text.split() # this also removed extra spaces
normalized = ""
i = 0
while i < len(words):
word = words[i]
# Convert numbers into digits
r = eu_number_parse(words, i)
if r:
v, i = r
normalized += " " + str(v)
continue
normalized += " " + word
i += 1
return normalized[1:] # strip the initial space
return text
# TODO MycroftAI/mycroft-core#2348
def extract_datetime_eu(input_str, anchorDate=None, default_time=None):
def clean_string(s):
# cleans the input string of unneeded punctuation and capitalization
# among other things
symbols = [".", ",", ";", "?", "!", "."]
# noise_words = ["entre", "la", "del", "al", "el", "de",
# "para", "una", "cualquier", "a",
# "e'", "esta", "este"]
# TODO
noise_words = ["artean", "tartean", "edozein", "hau", "hontan", "honetan",
"para", "una", "cualquier", "a",
"e'", "esta", "este"]
for word in symbols:
s = s.replace(word, "")
for word in noise_words:
s = s.replace(" " + word + " ", " ")
s = s.lower().replace(
"-",
" ").replace(
"_",
"")
# handle synonyms and equivalents, "tomorrow early = tomorrow morning
synonyms = {"goiza": ["egunsentia", "goiz", "oso goiz"],
"arratsaldea": ["arratsa", "bazkalostea", "arratsalde", "arrats"],
"gaua": ["iluntzea", "berandu", "gau", "gaba"]}
for syn in synonyms:
for word in synonyms[syn]:
s = s.replace(" " + word + " ", " " + syn + " ")
# relevant plurals
wordlist = ["goizak", "arratsaldeak", "gauak", "egunak", "asteak",
"urteak", "minutuak", "segunduak", "hurrengoak",
"datozenak", "orduak", "hilabeteak"]
for _, word in enumerate(wordlist):
s = s.replace(word, word.rstrip('ak'))
# s = s.replace("meses", "mes").replace("anteriores", "anterior")
return s
def date_found():
return found or \
(
datestr != "" or
yearOffset != 0 or monthOffset != 0 or
dayOffset is True or hrOffset != 0 or
hrAbs or minOffset != 0 or
minAbs or secOffset != 0
)
if input_str == "":
return None
if anchorDate is None:
anchorDate = datetime.now()
found = False
daySpecified = False
dayOffset = False
monthOffset = 0
yearOffset = 0
dateNow = anchorDate
today = dateNow.strftime("%w")
currentYear = dateNow.strftime("%Y")
fromFlag = False
datestr = ""
hasYear = False
timeQualifier = ""
words = clean_string(input_str).split(" ")
timeQualifiersList = ['goiza', 'arratsaldea', 'gaua']
time_indicators = ["en", "la", "al", "por", "pasados",
"pasadas", "día", "hora"]
days = ['astelehena', 'asteartea', 'asteazkena',
'osteguna', 'ostirala', 'larunbata', 'igandea']
months = ['urtarrila', 'otsaila', 'martxoa', 'apirila', 'maiatza', 'ekaina',
'uztaila', 'abuztua', 'iraila', 'urria', 'azaroa',
'abendua']
monthsShort = ['urt', 'ots', 'mar', 'api', 'mai', 'eka', 'uzt', 'abu',
'ira', 'urr', 'aza', 'abe']
nexts = ["hurrengo", "datorren", "ondorengo"]
suffix_nexts = ["barru"]
lasts = ["azken", "duela"]
suffix_lasts = ["aurreko"]
nxts = ["ondorengo", "hurrengo", "datorren"]
prevs = ["aurreko", "duela", "previo", "anterior"]
# TODO
froms = ["desde", "en", "para", "después de", "por", "próximo",
"próxima", "de"]
thises = ["hau"]
froms += thises
lists = nxts + prevs + froms + time_indicators
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
wordNextNextNext = words[idx + 3] if idx + 3 < len(words) else ""
start = idx
used = 0
# save timequalifier for later
if word in timeQualifiersList:
timeQualifier = word
# parse today, tomorrow, yesterday
elif (word == "gaur" or word == "gaurko") and not fromFlag:
dayOffset = 0
used += 1
elif (word == "bihar" or word == "biharko") and not fromFlag:
dayOffset = 1
used += 1
elif (word == "atzo" or word == "atzoko") and not fromFlag:
dayOffset -= 1
used += 1
# before yesterday
elif (word == "herenegun" or word == "herenegungo") and not fromFlag:
dayOffset -= 2
used += 1
# if wordNext == "ayer":
# used += 1
# elif word == "ante" and wordNext == "ante" and wordNextNext == \
# "ayer" and not fromFlag:
# dayOffset -= 3
# used += 3
# elif word == "ante anteayer" and not fromFlag:
# dayOffset -= 3
# used += 1
# day after tomorrow
elif (word == "etzi" or word == "etziko") and not fromFlag:
dayOffset += 2
used = 1
elif (word == "etzidamu" or word == "etzidamuko") and not fromFlag:
dayOffset += 3
used = 1
# parse 5 days, 10 weeks, last week, next week, week after
elif word == "egun" or word == "eguna" or word == "eguneko":
if wordPrevPrev and wordPrevPrev == "duela":
used += 1
if wordPrev and wordPrev[0].isdigit():
dayOffset -= int(wordPrev)
start -= 1
used += 1
elif (wordPrev and wordPrev[0].isdigit() and
wordNext not in months and
wordNext not in monthsShort):
dayOffset += int(wordPrev)
start -= 1
used += 2
elif wordNext and wordNext[0].isdigit() and wordNextNext not in \
months and wordNextNext not in monthsShort:
dayOffset += int(wordNext)
start -= 1
used += 2
elif word == "aste" or word == "astea" or word == "asteko" and not fromFlag:
if wordPrev[0].isdigit():
dayOffset += int(wordPrev) * 7
start -= 1
used = 2
for w in nexts:
if wordPrev == w:
dayOffset = 7
start -= 1
used = 2
for w in lasts:
if wordPrev == w:
dayOffset = -7
start -= 1
used = 2
for w in suffix_nexts:
if wordNext == w:
dayOffset = 7
start -= 1
used = 2
for w in suffix_lasts:
if wordNext == w:
dayOffset = -7
start -= 1
used = 2
# parse 10 months, next month, last month
elif word == "hilabete" or word == "hilabetea" or word == "hilabeteko" and not fromFlag:
if wordPrev[0].isdigit():
monthOffset = int(wordPrev)
start -= 1
used = 2
for w in nexts:
if wordPrev == w:
monthOffset = 7
start -= 1
used = 2
for w in lasts:
if wordPrev == w:
monthOffset = -7
start -= 1
used = 2
for w in suffix_nexts:
if wordNext == w:
monthOffset = 7
start -= 1
used = 2
for w in suffix_lasts:
if wordNext == w:
monthOffset = -7
start -= 1
used = 2
# parse 5 years, next year, last year
elif word == "urte" or word == "urtea" or word == "urteko" and not fromFlag:
if wordPrev[0].isdigit():
yearOffset = int(wordPrev)
start -= 1
used = 2
for w in nexts:
if wordPrev == w:
yearOffset = 1
start -= 1
used = 2
for w in lasts:
if wordPrev == w:
yearOffset = -1
start -= 1
used = 2
for w in suffix_nexts:
if wordNext == w:
yearOffset = 1
start -= 1
used = 2
for w in suffix_lasts:
if wordNext == w:
yearOffset = -1
start -= 1
used = 2
# parse Monday, Tuesday, etc., and next Monday,
# last Tuesday, etc.
elif word in days and not fromFlag:
d = days.index(word)
dayOffset = (d + 1) - int(today)
used = 1
if dayOffset < 0:
dayOffset += 7
if wordPrev == "hurrengo":
dayOffset += 7
used += 1
start -= 1
elif wordPrev == "aurreko":
dayOffset -= 7
used += 1
start -= 1
if wordNext == "hurrengo":
# dayOffset += 7
used += 1
elif wordNext == "aurreko":
# dayOffset -= 7
used += 1
# parse 15 of July, June 20th, Feb 18, 19 of February
elif word in months or word in monthsShort:
try:
m = months.index(word)
except ValueError:
m = monthsShort.index(word)
used += 1
datestr = months[m]
if wordPrev and wordPrev[0].isdigit():
# 13 mayo
datestr += " " + wordPrev
start -= 1
used += 1
if wordNext and wordNext[0].isdigit():
datestr += " " + wordNext
used += 1
hasYear = True
else:
hasYear = False
elif wordNext and wordNext[0].isdigit():
# mayo 13
datestr += " " + wordNext
used += 1
if wordNextNext and wordNextNext[0].isdigit():
datestr += " " + wordNextNext
used += 1
hasYear = True
else:
hasYear = False
elif wordPrevPrev and wordPrevPrev[0].isdigit():
# 13 dia mayo
datestr += " " + wordPrevPrev
start -= 2
used += 2
if wordNext and word[0].isdigit():
datestr += " " + wordNext
used += 1
hasYear = True
else:
hasYear = False
elif wordNextNext and wordNextNext[0].isdigit():
# mayo dia 13
datestr += " " + wordNextNext
used += 2
if wordNextNextNext and wordNextNextNext[0].isdigit():
datestr += " " + wordNextNextNext
used += 1
hasYear = True
else:
hasYear = False
if datestr in months:
datestr = ""
# parse 5 days from tomorrow, 10 weeks from next thursday,
# 2 months from July
validFollowups = days + months + monthsShort
validFollowups.append("gaur")
validFollowups.append("bihar")
validFollowups.append("atzo")
# validFollowups.append("atzoko")
validFollowups.append("herenegun")
validFollowups.append("orain")
validFollowups.append("oraintxe")
# validFollowups.append("ante")
# TODO
if word in froms and wordNext in validFollowups:
if not (word == "bihar" or word == "herenegun" or word == "atzo"):
used = 1
fromFlag = True
if wordNext == "bihar":
dayOffset += 1
elif wordNext == "atzo" or wordNext == "atzoko":
dayOffset -= 1
elif wordNext == "herenegun":
dayOffset -= 2
# elif (wordNext == "ante" and wordNext == "ante" and
# wordNextNextNext == "ayer"):
# dayOffset -= 3
elif wordNext in days:
d = days.index(wordNext)
tmpOffset = (d + 1) - int(today)
used = 2
# if wordNextNext == "feira":
# used += 1
if tmpOffset < 0:
tmpOffset += 7
if wordNextNext:
if wordNextNext in nxts:
tmpOffset += 7
used += 1
elif wordNextNext in prevs:
tmpOffset -= 7
used += 1
dayOffset += tmpOffset
elif wordNextNext and wordNextNext in days:
d = days.index(wordNextNext)
tmpOffset = (d + 1) - int(today)
used = 3
if wordNextNextNext:
if wordNextNextNext in nxts:
tmpOffset += 7
used += 1
elif wordNextNextNext in prevs:
tmpOffset -= 7
used += 1
dayOffset += tmpOffset
# if wordNextNextNext == "feira":
# used += 1
if wordNext in months:
used -= 1
if used > 0:
if start - 1 > 0 and words[start - 1] in lists:
start -= 1
used += 1
for i in range(0, used):
words[i + start] = ""
if start - 1 >= 0 and words[start - 1] in lists:
words[start - 1] = ""
found = True
daySpecified = True
# parse time
hrOffset = 0
minOffset = 0
secOffset = 0
hrAbs = None
minAbs = None
for idx, word in enumerate(words):
if word == "":
continue
wordPrevPrev = words[idx - 2] if idx > 1 else ""
wordPrev = words[idx - 1] if idx > 0 else ""
wordNext = words[idx + 1] if idx + 1 < len(words) else ""
wordNextNext = words[idx + 2] if idx + 2 < len(words) else ""
wordNextNextNext = words[idx + 3] if idx + 3 < len(words) else ""
# parse noon, midnight, morning, afternoon, evening
used = 0
if word == "eguerdi" or word == "eguerdia" or word == "eguerdian":
hrAbs = 12
used += 2
elif word == "gauerdi" or word == "gauerdia" or word == "gauerdian":
hrAbs = 0
used += 2
elif word == "goiza":
if not hrAbs:
hrAbs = 8
used += 1
elif word == "arratsaldea" or word == "arratsa" or word == "arratsean" or word == "arratsaldean":
if not hrAbs:
hrAbs = 15
used += 1
# TODO
# elif word == "media" and wordNext == "tarde":
# if not hrAbs:
# hrAbs = 17
# used += 2
elif word == "iluntze" or word == "iluntzea" or word == "iluntzean":
if not hrAbs:
hrAbs = 20
used += 2
# TODO
# elif word == "media" and wordNext == "mañana":
# if not hrAbs:
# hrAbs = 10
# used += 2
# elif word == "fim" and wordNext == "tarde":
# if not hrAbs:
# hrAbs = 19
# used += 2
elif word == "egunsentia" or word == "egunsentian" or word == "egunsenti":
if not hrAbs:
hrAbs = 6
used += 1
# elif word == "madrugada":
# if not hrAbs:
# hrAbs = 1
# used += 2
elif word == "gaua" or word == "gauean" or word == "gau":
if not hrAbs:
hrAbs = 21
used += 1
# parse half an hour, quarter hour
# TODO
elif (word == "hora" and
(wordPrev in time_indicators or wordPrevPrev in
time_indicators)):
if wordPrev == "media":
minOffset = 30
elif wordPrev == "cuarto":
minOffset = 15
elif wordPrevPrev == "cuarto":
minOffset = 15
if idx > 2 and words[idx - 3] in time_indicators:
words[idx - 3] = ""
words[idx - 2] = ""
else:
hrOffset = 1
if wordPrevPrev in time_indicators:
words[idx - 2] = ""
words[idx - 1] = ""
used += 1
hrAbs = -1
minAbs = -1
# parse 5:00 am, 12:00 p.m., etc
elif word[0].isdigit():
isTime = True
strHH = ""
strMM = ""
remainder = ""
if ':' in word:
# parse colons
# "3:00 in the morning"
stage = 0
length = len(word)
for i in range(length):
if stage == 0:
if word[i].isdigit():
strHH += word[i]
elif word[i] == ":":
stage = 1
else:
stage = 2
i -= 1
elif stage == 1:
if word[i].isdigit():
strMM += word[i]
else:
stage = 2
i -= 1
elif stage == 2:
remainder = word[i:].replace(".", "")
break
if remainder == "":
nextWord = wordNext.replace(".", "")
if nextWord == "am" or nextWord == "pm":
remainder = nextWord
used += 1
elif wordNext == "goiza" or wordNext == "egunsentia" or wordNext == "goizeko" or wordNext == "egunsentiko":
remainder = "am"
used += 1
elif wordPrev == "arratsaldeko" or wordPrev == "arratsaldea" or wordPrev == "arratsaldean":
remainder = "pm"
used += 1
elif wordNext == "gaua" or wordNext == "gauean" or wordNext == "gaueko":
if 0 < int(word[0]) < 6:
remainder = "am"
else:
remainder = "pm"
used += 1
elif wordNext in thises and (wordNextNext == "goiza" or wordNextNext == "goizean" or wordNextNext == "goizeko"):
remainder = "am"
used = 2
elif wordNext in thises and \
(wordNextNext == "arratsaldea" or wordNextNext == "arratsaldean" or wordNextNext == "arratsaldeko"):
remainder = "pm"
used = 2
elif wordNext in thises and (wordNextNext == "gaua" or wordNextNext == "gauean" or wordNextNext == "gaueko"):
remainder = "pm"
used = 2
else:
if timeQualifier != "":
if strHH <= 12 and \
(timeQualifier == "goiza" or
timeQualifier == "arratsaldea"):
strHH += 12
else:
# try to parse # s without colons
# 5 hours, 10 minutes etc.
length = len(word)
strNum = ""
remainder = ""
for i in range(length):
if word[i].isdigit():
strNum += word[i]
else:
remainder += word[i]
if remainder == "":
remainder = wordNext.replace(".", "").lstrip().rstrip()
if (
remainder == "pm" or
wordNext == "pm" or
remainder == "p.m." or
wordNext == "p.m."):
strHH = strNum
remainder = "pm"
used = 1
elif (
remainder == "am" or
wordNext == "am" or
remainder == "a.m." or
wordNext == "a.m."):
strHH = strNum
remainder = "am"
used = 1
else:
if (wordNext == "pm" or
wordNext == "p.m." or
wordPrev == "arratsaldeko"):
strHH = strNum
remainder = "pm"
used = 0
elif (wordNext == "am" or
wordNext == "a.m." or
wordPrev == "goizeko"):
strHH = strNum
remainder = "am"
used = 0
elif (int(word) > 100 and
(
# wordPrev == "o" or
# wordPrev == "oh" or
wordPrev == "zero"
)):
# 0800 hours (pronounced oh-eight-hundred)
strHH = int(word) / 100
strMM = int(word) - strHH * 100
if wordNext == "orduak":
used += 1
elif (
wordNext == "orduak" and
word[0] != '0' and
(
int(word) < 100 and
int(word) > 2400
)):
# ignores military time
# "in 3 hours"
hrOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "minutu":
# "in 10 minutes"
minOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif wordNext == "segundu":
# in 5 seconds
secOffset = int(word)
used = 2
isTime = False
hrAbs = -1
minAbs = -1
elif int(word) > 100:
strHH = int(word) / 100
strMM = int(word) - strHH * 100
if wordNext == "ordu":
used += 1
elif wordNext == "" or (
wordNext == "puntuan"):
strHH = word
strMM = 00
if wordNext == "puntuan":
used += 2
if wordNextNextNext == "arratsaldea":
remainder = "pm"
used += 1
elif wordNextNextNext == "goiza":
remainder = "am"
used += 1
elif wordNextNextNext == "gaua":
if 0 > strHH > 6:
remainder = "am"
else:
remainder = "pm"
used += 1
elif wordNext[0].isdigit():
strHH = word
strMM = wordNext
used += 1
if wordNextNext == "orduak":
used += 1
else:
isTime = False
strHH = int(strHH) if strHH else 0
strMM = int(strMM) if strMM else 0
strHH = strHH + 12 if (remainder == "pm" and
0 < strHH < 12) else strHH
strHH = strHH - 12 if (remainder == "am" and
0 < strHH >= 12) else strHH
if strHH > 24 or strMM > 59:
isTime = False
used = 0
if isTime:
hrAbs = strHH * 1
minAbs = strMM * 1
used += 1
if used > 0:
# removed parsed words from the sentence
for i in range(used):
words[idx + i] = ""
if wordPrev == "puntuan":
words[words.index(wordPrev)] = ""
if idx > 0 and wordPrev in time_indicators:
words[idx - 1] = ""
if idx > 1 and wordPrevPrev in time_indicators:
words[idx - 2] = ""
idx += used - 1
found = True
# check that we found a date
if not date_found():
return None
if dayOffset is False:
dayOffset = 0
# perform date manipulation
extractedDate = dateNow
extractedDate = extractedDate.replace(microsecond=0,
second=0,
minute=0,
hour=0)
if datestr != "":
en_months = ['january', 'february', 'march', 'april', 'may', 'june',
'july', 'august', 'september', 'october', 'november',
'december']
en_monthsShort = ['jan', 'feb', 'mar', 'apr', 'may', 'june', 'july',
'aug',
'sept', 'oct', 'nov', 'dec']
for idx, en_month in enumerate(en_months):
datestr = datestr.replace(months[idx], en_month)
for idx, en_month in enumerate(en_monthsShort):
datestr = datestr.replace(monthsShort[idx], en_month)
temp = datetime.strptime(datestr, "%B %d")
temp = temp.replace(tzinfo=None)
if not hasYear:
temp = temp.replace(year=extractedDate.year, tzinfo=extractedDate.tzinfo)
if extractedDate < temp:
extractedDate = extractedDate.replace(year=int(currentYear),
month=int(
temp.strftime(
"%m")),
day=int(temp.strftime(
"%d")))
else:
extractedDate = extractedDate.replace(
year=int(currentYear) + 1,
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")))
else:
extractedDate = extractedDate.replace(
year=int(temp.strftime("%Y")),
month=int(temp.strftime("%m")),
day=int(temp.strftime("%d")))
if yearOffset != 0:
extractedDate = extractedDate + relativedelta(years=yearOffset)
if monthOffset != 0:
extractedDate = extractedDate + relativedelta(months=monthOffset)
if dayOffset != 0:
extractedDate = extractedDate + relativedelta(days=dayOffset)
if hrAbs is None and minAbs is None and default_time:
hrAbs = default_time.hour
minAbs = default_time.minute
if hrAbs != -1 and minAbs != -1:
extractedDate = extractedDate + relativedelta(hours=hrAbs or 0,
minutes=minAbs or 0)
if (hrAbs or minAbs) and datestr == "":
if not daySpecified and dateNow > extractedDate:
extractedDate = extractedDate + relativedelta(days=1)
if hrOffset != 0:
extractedDate = extractedDate + relativedelta(hours=hrOffset)
if minOffset != 0:
extractedDate = extractedDate + relativedelta(minutes=minOffset)
if secOffset != 0:
extractedDate = extractedDate + relativedelta(seconds=secOffset)
resultStr = " ".join(words)
resultStr = ' '.join(resultStr.split())
# resultStr = pt_pruning(resultStr)
return [extractedDate, resultStr]
def get_gender_eu(word, raw_string=""):
# There is no gender in Basque
gender = False
return gender
| 36.218951
| 132
| 0.436297
| 3,755
| 39,370
| 4.526232
| 0.170173
| 0.015004
| 0.010591
| 0.011003
| 0.286303
| 0.232819
| 0.191045
| 0.14374
| 0.130854
| 0.119793
| 0
| 0.030273
| 0.46637
| 39,370
| 1,086
| 133
| 36.252302
| 0.778714
| 0.120726
| 0
| 0.446026
| 0
| 0
| 0.054884
| 0
| 0
| 0
| 0
| 0.003683
| 0
| 1
| 0.016607
| false
| 0
| 0.007117
| 0.001186
| 0.065243
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d1378b3e67d5a0964ccf48994e4da6105c0ae60
| 472
|
py
|
Python
|
move_py_files.py
|
rune-l/coco-annotator
|
a7ae8004c5e1ca74e5bbc41d09edc5cfab117a14
|
[
"MIT"
] | null | null | null |
move_py_files.py
|
rune-l/coco-annotator
|
a7ae8004c5e1ca74e5bbc41d09edc5cfab117a14
|
[
"MIT"
] | null | null | null |
move_py_files.py
|
rune-l/coco-annotator
|
a7ae8004c5e1ca74e5bbc41d09edc5cfab117a14
|
[
"MIT"
] | null | null | null |
import os
import subprocess
test_set_path = '/Users/runelangergaard/Documents/SmartAnnotation/data/test_set'
test_imgs = os.listdir(test_set_path)
test_imgs
cwd_path = '/Users/runelangergaard'
os.chdir(cwd_path)
for img in test_imgs:
full_path = os.path.join(test_set_path, img)
subprocess.run([
"scp",
"-i",
"coco-anno.pem",
full_path,
"ec2-user@ec2-34-211-193-133.us-west-2.compute.amazonaws.com:/datasets/tmp"
])
| 23.6
| 83
| 0.684322
| 69
| 472
| 4.478261
| 0.57971
| 0.090615
| 0.106796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036269
| 0.182203
| 472
| 19
| 84
| 24.842105
| 0.764249
| 0
| 0
| 0
| 0
| 0.0625
| 0.37155
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d13e8253f51474a77c77b964813f16a0d1c345f
| 304
|
py
|
Python
|
examples/apply.py
|
PictElm/grom
|
52e28efad1edae447347dd396e80a665c283b05d
|
[
"Apache-2.0"
] | 1
|
2019-06-29T18:53:31.000Z
|
2019-06-29T18:53:31.000Z
|
examples/apply.py
|
PictElm/grom
|
52e28efad1edae447347dd396e80a665c283b05d
|
[
"Apache-2.0"
] | null | null | null |
examples/apply.py
|
PictElm/grom
|
52e28efad1edae447347dd396e80a665c283b05d
|
[
"Apache-2.0"
] | null | null | null |
import random
import grom
grom.debug(False)
dirName = "dump\\"
inputName = "example.bmp"
outputName = "output.bmp"
g = grom.Genome(dirName + inputName, partition=[
('head', 0x76),
('raw')
])
print(g)
print(g.partition)
g.apply(lambda x: 255 - x, ['raw'])
g(dirName + outputName, pause=False)
| 16
| 48
| 0.661184
| 41
| 304
| 4.902439
| 0.585366
| 0.059701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023438
| 0.157895
| 304
| 18
| 49
| 16.888889
| 0.761719
| 0
| 0
| 0
| 0
| 0
| 0.121711
| 0
| 0
| 0
| 0.013158
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d14a69daed26d53510912624929725162594aec
| 3,351
|
py
|
Python
|
statefun-sdk-python/statefun/statefun_builder.py
|
MartijnVisser/flink-statefun
|
66b2fc5a178d916756428f65a197095fbb43f57d
|
[
"Apache-2.0"
] | null | null | null |
statefun-sdk-python/statefun/statefun_builder.py
|
MartijnVisser/flink-statefun
|
66b2fc5a178d916756428f65a197095fbb43f57d
|
[
"Apache-2.0"
] | 7
|
2022-02-24T17:20:28.000Z
|
2022-03-25T13:18:44.000Z
|
statefun-sdk-python/statefun/statefun_builder.py
|
MartijnVisser/flink-statefun
|
66b2fc5a178d916756428f65a197095fbb43f57d
|
[
"Apache-2.0"
] | null | null | null |
################################################################################
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
################################################################################
import typing
from statefun.core import ValueSpec
from statefun.context import Context
from statefun.messages import Message
from statefun.storage import make_address_storage_spec, StorageSpec
import inspect
class StatefulFunction(object):
__slots__ = ("fun", "storage_spec", "is_async")
def __init__(self,
fun: typing.Callable[[Context, Message], None],
specs: StorageSpec,
is_async: bool):
if fun is None:
raise ValueError("function code is missing.")
self.fun = fun
if specs is None:
raise ValueError("storage spec is missing.")
self.storage_spec = specs
self.is_async = is_async
class StatefulFunctions(object):
__slots__ = ("_functions",)
def __init__(self):
self._functions = {}
def register(self, typename: str, fun, specs: typing.Optional[typing.List[ValueSpec]] = None):
"""registers a StatefulFunction function instance, under the given namespace with the given function type. """
if fun is None:
raise ValueError("function instance must be provided")
if not typename:
raise ValueError("function typename must be provided")
storage_spec = make_address_storage_spec(specs if specs else [])
is_async = inspect.iscoroutinefunction(fun)
sig = inspect.getfullargspec(fun)
if len(sig.args) != 2:
raise ValueError(
f"The registered function {typename} does not expect a context and a message but rather {sig.args}.")
self._functions[typename] = StatefulFunction(fun=fun, specs=storage_spec, is_async=is_async)
def bind(self, typename, specs: typing.List[ValueSpec] = None):
"""wraps a StatefulFunction instance with a given namespace and type.
for example:
s = StatefulFunctions()
@s.define("com.foo.bar/greeter")
def greeter(context, message):
print("Hi there")
This would add an invokable stateful function that can accept messages
sent to "com.foo.bar/greeter".
"""
def wrapper(function):
self.register(typename, function, specs)
return function
return wrapper
def for_typename(self, typename: str) -> StatefulFunction:
return self._functions[typename]
| 39.423529
| 118
| 0.640107
| 393
| 3,351
| 5.361323
| 0.394402
| 0.036545
| 0.01851
| 0.0299
| 0.050309
| 0.032273
| 0.032273
| 0
| 0
| 0
| 0
| 0.001959
| 0.238436
| 3,351
| 84
| 119
| 39.892857
| 0.823668
| 0.349448
| 0
| 0.047619
| 0
| 0.02381
| 0.129455
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0.02381
| 0.452381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d17091c2b65264aa06f866332b484a8ae11e68d
| 2,195
|
py
|
Python
|
Solutions/236.py
|
ruppysuppy/Daily-Coding-Problem-Solutions
|
37d061215a9af2ce39c51f8816c83039914c0d0b
|
[
"MIT"
] | 70
|
2021-03-18T05:22:40.000Z
|
2022-03-30T05:36:50.000Z
|
Solutions/236.py
|
ungaro/Daily-Coding-Problem-Solutions
|
37d061215a9af2ce39c51f8816c83039914c0d0b
|
[
"MIT"
] | null | null | null |
Solutions/236.py
|
ungaro/Daily-Coding-Problem-Solutions
|
37d061215a9af2ce39c51f8816c83039914c0d0b
|
[
"MIT"
] | 30
|
2021-03-18T05:22:43.000Z
|
2022-03-17T10:25:18.000Z
|
"""
Problem:
You are given a list of N points (x1, y1), (x2, y2), ..., (xN, yN) representing a
polygon. You can assume these points are given in order; that is, you can construct the
polygon by connecting point 1 to point 2, point 2 to point 3, and so on, finally
looping around to connect point N to point 1.
Determine if a new point p lies inside this polygon. (If p is on the boundary of the
polygon, you should return False).
"""
from typing import List, Tuple
Point = Tuple[int, int]
def is_inside(points: List[Point], p: Point) -> bool:
# Using the following concept:
# if a stright line in drawn from the point p to its right (till infinity), the
# drawn line will intersect the lines connecting the points odd number of times
# (if p is enclosed by the points) else the the number of intersections will be
# even (implying its outside the figure created by the points)
# Details:
# https://www.geeksforgeeks.org/how-to-check-if-a-given-point-lies-inside-a-polygon
if len(points) in (0, 1, 2):
return False
x, y = p
last = points[0]
intersections = 0
same_height = set()
for point in points[1:]:
x1, y1 = last
x2, y2 = point
if min(y1, y2) <= y <= max(y1, y2) and x <= min(x1, x2):
if y2 == y and point not in same_height:
intersections += 1
same_height.add(point)
elif y1 == y and last not in same_height:
intersections += 1
same_height.add(last)
last = point
point = points[0]
x1, y1 = last
x2, y2 = point
if max(y1, y2) >= y >= min(y1, y2) and x <= min(x1, x2):
if y2 == y and point not in same_height:
intersections += 1
same_height.add(point)
elif y1 == y and last not in same_height:
intersections += 1
same_height.add(last)
if intersections % 2 == 1:
return True
return False
if __name__ == "__main__":
print(is_inside([(4, 3), (5, 4), (6, 3), (5, 2)], (3, 3)))
print(is_inside([(4, 3), (5, 4), (6, 3), (5, 2)], (5, 3)))
"""
SPECS:
TIME COMPLEXITY: O(n)
SPACE COMPLEXITY: O(n)
"""
| 29.662162
| 87
| 0.596811
| 348
| 2,195
| 3.706897
| 0.333333
| 0.069767
| 0.027907
| 0.046512
| 0.271318
| 0.271318
| 0.271318
| 0.24186
| 0.24186
| 0.24186
| 0
| 0.042335
| 0.289749
| 2,195
| 73
| 88
| 30.068493
| 0.785119
| 0.384055
| 0
| 0.5
| 0
| 0
| 0.00626
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.027778
| 0
| 0.138889
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d19a458c0aeddafe12f42faf41b63a52a85ae7f
| 2,546
|
py
|
Python
|
Oblig3/test_benchmark.py
|
fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing
|
4d3b2ed56b56e016413ae1544e19ad2a2c0ef047
|
[
"MIT"
] | null | null | null |
Oblig3/test_benchmark.py
|
fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing
|
4d3b2ed56b56e016413ae1544e19ad2a2c0ef047
|
[
"MIT"
] | null | null | null |
Oblig3/test_benchmark.py
|
fabiorodp/IN5550_Neural_Methods_in_Natural_Language_Processing
|
4d3b2ed56b56e016413ae1544e19ad2a2c0ef047
|
[
"MIT"
] | null | null | null |
# Author: Fabio Rodrigues Pereira
# E-mail: fabior@uio.no
# Author: Per Morten Halvorsen
# E-mail: pmhalvor@uio.no
# Author: Eivind Grønlie Guren
# E-mail: eivindgg@ifi.uio.no
try:
from Oblig3.packages.preprocess import load_raw_data, filter_raw_data, pad
from Oblig3.packages.preprocess import OurCONLLUDataset
from Oblig3.packages.model import Transformer
except:
from packages.preprocess import load_raw_data, filter_raw_data, pad
from packages.preprocess import OurCONLLUDataset
from packages.model import Transformer
from sklearn.model_selection import train_test_split
from torch.utils.data import DataLoader
from transformers import BertTokenizer
import torch
# first step
# datapath = '/cluster/projects/nn9851k/IN5550/norne-nb-in5550-train.conllu'
# NORBERT = '/cluster/shared/nlpl/data/vectors/latest/216'
datapath = 'Oblig3/saga/norne-nb-in5550-train.conllu'
NORBERT = 'Oblig3/saga/216/'
device = "cuda" if torch.cuda.is_available() else "cpu"
torch.cuda.empty_cache() if torch.cuda.is_available() else None
# loading raw data
con_df = load_raw_data(datapath=datapath)
con_df = filter_raw_data(df=con_df, min_entities=5)
# splitting data
train_df, val_df = train_test_split(
con_df,
# train_size=0.50,
test_size=0.25,
random_state=1,
shuffle=True,
)
tokenizer = BertTokenizer.from_pretrained(NORBERT)
# creating data sets
train_dataset = OurCONLLUDataset(
df=train_df,
tokenizer=tokenizer,
device=device
)
val_dataset = OurCONLLUDataset(
df=val_df,
tokenizer=tokenizer,
label_vocab=train_dataset.label_vocab,
device=device
)
# creating data loaders
train_loader = DataLoader(
train_dataset,
batch_size=32,
collate_fn=lambda batch: pad(batch, train_dataset.IGNORE_ID)
)
val_loader = DataLoader(
val_dataset,
batch_size=len(val_dataset),
collate_fn=lambda batch: pad(batch, train_dataset.IGNORE_ID)
)
# calling transformer model
transformer = Transformer(
NORBERT=NORBERT,
num_labels=len(train_dataset.label_indexer),
NOT_ENTITY_ID=train_dataset.label_indexer['O'],
device=device,
epochs=100, # 12 for the optimal
lr_scheduler=False,
factor=0.1,
patience=2,
loss_funct='cross-entropy',
random_state=1,
verbose=True,
lr=0.01,
momentum=0.9,
epoch_patience=1, # 0 for the optimal
label_indexer=train_dataset.label_indexer
)
transformer.fit(
loader=train_loader,
test=val_loader,
verbose=True
)
torch.save(transformer, "transformer_benchmark_12ep.pt")
| 24.480769
| 78
| 0.749411
| 350
| 2,546
| 5.254286
| 0.391429
| 0.052202
| 0.052202
| 0.039152
| 0.23056
| 0.174008
| 0.112017
| 0.112017
| 0.112017
| 0.112017
| 0
| 0.025593
| 0.155931
| 2,546
| 103
| 79
| 24.718447
| 0.830154
| 0.180283
| 0
| 0.115942
| 0
| 0
| 0.051232
| 0.033349
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.144928
| 0
| 0.144928
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d1acd1c8212f19c55510b4dd8c3544bf2548519
| 11,176
|
py
|
Python
|
test/test_box/test_box_storage.py
|
cmc333333/parsons
|
50804a3627117797570f1e9233c9bbad583f7831
|
[
"Apache-2.0"
] | null | null | null |
test/test_box/test_box_storage.py
|
cmc333333/parsons
|
50804a3627117797570f1e9233c9bbad583f7831
|
[
"Apache-2.0"
] | 2
|
2021-11-24T19:39:57.000Z
|
2022-01-03T23:03:35.000Z
|
test/test_box/test_box_storage.py
|
cmc333333/parsons
|
50804a3627117797570f1e9233c9bbad583f7831
|
[
"Apache-2.0"
] | null | null | null |
import logging
import os
import random
import string
import unittest
import warnings
from boxsdk.exception import BoxAPIException, BoxOAuthException
from parsons.box import Box
from parsons.etl import Table
"""Prior to running, you should ensure that the relevant environment
variables have been set, e.g. via
# Note: these are fake keys, provided as examples.
export BOX_CLIENT_ID=txqedp4rqi0cz5qckz361fziavdtdwxz
export BOX_CLIENT_SECRET=bk264KHMDLVy89TeuUpSRa4CN5o35u9h
export BOX_ACCESS_TOKEN=boK97B39m3ozIGyTcazbWRbi5F2SSZ5J
"""
TEST_CLIENT_ID = os.getenv('BOX_CLIENT_ID')
TEST_BOX_CLIENT_SECRET = os.getenv('BOX_CLIENT_SECRET')
TEST_ACCESS_TOKEN = os.getenv('BOX_ACCESS_TOKEN')
def generate_random_string(length):
"""Utility to generate random alpha string for file/folder names"""
return ''.join(random.choice(string.ascii_letters) for i in range(length))
@unittest.skipIf(not os.getenv('LIVE_TEST'), 'Skipping because not running live test')
class TestBoxStorage(unittest.TestCase):
def setUp(self) -> None:
warnings.filterwarnings(action="ignore", message="unclosed", category=ResourceWarning)
# Create a client that we'll use to manipulate things behind the scenes
self.client = Box()
# Create test folder that we'll use for all our manipulations
self.temp_folder_name = generate_random_string(24)
logging.info(f'Creating temp folder {self.temp_folder_name}')
self.temp_folder_id = self.client.create_folder(self.temp_folder_name)
def tearDown(self) -> None:
logging.info(f'Deleting temp folder {self.temp_folder_name}')
self.client.delete_folder_by_id(self.temp_folder_id)
def test_list_files_by_id(self) -> None:
# Count on environment variables being set
box = Box()
subfolder = box.create_folder_by_id(folder_name='id_subfolder',
parent_folder_id=self.temp_folder_id)
# Create a couple of files in the temp folder
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
box.upload_table_to_folder_id(table, 'temp1', folder_id=subfolder)
box.upload_table_to_folder_id(table, 'temp2', folder_id=subfolder)
box.create_folder_by_id(folder_name='temp_folder1', parent_folder_id=subfolder)
box.create_folder_by_id(folder_name='temp_folder2', parent_folder_id=subfolder)
file_list = box.list_files_by_id(folder_id=subfolder)
self.assertEqual(['temp1', 'temp2'], file_list['name'])
# Check that if we delete a file, it's no longer there
for box_file in file_list:
if box_file['name'] == 'temp1':
box.delete_file_by_id(box_file['id'])
break
file_list = box.list_files_by_id(folder_id=subfolder)
self.assertEqual(['temp2'], file_list['name'])
folder_list = box.list_folders_by_id(folder_id=subfolder)['name']
self.assertEqual(['temp_folder1', 'temp_folder2'], folder_list)
def test_list_files_by_path(self) -> None:
# Count on environment variables being set
box = Box()
# Make sure our test folder is in the right place
found_default = False
for item in box.list():
if item['name'] == self.temp_folder_name:
found_default = True
break
self.assertTrue(found_default,
f'Failed to find test folder f{self.temp_folder_name} '
f'in default Box folder')
subfolder_name = 'path_subfolder'
subfolder_path = f'{self.temp_folder_name}/{subfolder_name}'
box.create_folder(path=subfolder_path)
# Create a couple of files in the temp folder
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
box.upload_table(table, f'{subfolder_path}/temp1')
box.upload_table(table, f'{subfolder_path}/temp2')
box.create_folder(f'{subfolder_path}/temp_folder1')
box.create_folder(f'{subfolder_path}/temp_folder2')
file_list = box.list(path=subfolder_path, item_type='file')
self.assertEqual(['temp1', 'temp2'], file_list['name'])
# Check that if we delete a file, it's no longer there
for box_file in file_list:
if box_file['name'] == 'temp1':
box.delete_file(path=f'{subfolder_path}/temp1')
break
file_list = box.list(path=subfolder_path, item_type='file')
self.assertEqual(['temp2'], file_list['name'])
folder_list = box.list(path=subfolder_path, item_type='folder')
self.assertEqual(['temp_folder1', 'temp_folder2'], folder_list['name'])
# Make sure we can delete by path
box.delete_folder(f'{subfolder_path}/temp_folder1')
folder_list = box.list(path=subfolder_path, item_type='folder')
self.assertEqual(['temp_folder2'], folder_list['name'])
def test_upload_file(self) -> None:
# Count on environment variables being set
box = Box()
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
box_file = box.upload_table_to_folder_id(table, 'phone_numbers',
folder_id=self.temp_folder_id)
new_table = box.get_table_by_file_id(box_file.id)
# Check that what we saved is equal to what we got back
self.assertEqual(str(table), str(new_table))
# Check that things also work in JSON
box_file = box.upload_table_to_folder_id(table, 'phone_numbers_json',
folder_id=self.temp_folder_id,
format='json')
new_table = box.get_table_by_file_id(box_file.id, format='json')
# Check that what we saved is equal to what we got back
self.assertEqual(str(table), str(new_table))
# Now check the same thing with paths instead of file_id
path_filename = 'path_phone_numbers'
box_file = box.upload_table(table, f'{self.temp_folder_name}/{path_filename}')
new_table = box.get_table(path=f'{self.temp_folder_name}/{path_filename}')
# Check that we throw an exception with bad formats
with self.assertRaises(ValueError):
box.upload_table_to_folder_id(table, 'phone_numbers', format='illegal_format')
with self.assertRaises(ValueError):
box.get_table_by_file_id(box_file.id, format='illegal_format')
def test_download_file(self) -> None:
box = Box()
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
uploaded_file = table.to_csv()
path_filename = f'{self.temp_folder_name}/my_path'
box.upload_table(table, path_filename)
downloaded_file = box.download_file(path_filename)
with open(uploaded_file) as uploaded, open(downloaded_file) as downloaded:
self.assertEqual(str(uploaded.read()), str(downloaded.read()))
def test_get_item_id(self) -> None:
# Count on environment variables being set
box = Box()
# Create a subfolder in which we'll do this test
sub_sub_folder_name = 'item_subfolder'
sub_sub_folder_id = box.create_folder_by_id(folder_name=sub_sub_folder_name,
parent_folder_id=self.temp_folder_id)
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
box_file = box.upload_table_to_folder_id(table, 'file_in_subfolder',
folder_id=self.temp_folder_id)
box_file = box.upload_table_to_folder_id(table, 'phone_numbers',
folder_id=sub_sub_folder_id)
# Now try getting various ids
file_path = f'{self.temp_folder_name}/item_subfolder/phone_numbers'
self.assertEqual(box_file.id, box.get_item_id(path=file_path))
file_path = f'{self.temp_folder_name}/item_subfolder'
self.assertEqual(sub_sub_folder_id, box.get_item_id(path=file_path))
file_path = self.temp_folder_name
self.assertEqual(self.temp_folder_id, box.get_item_id(path=file_path))
# Trailing "/"
with self.assertRaises(ValueError):
file_path = f'{self.temp_folder_name}/item_subfolder/phone_numbers/'
box.get_item_id(path=file_path)
# Nonexistent file
with self.assertRaises(ValueError):
file_path = f'{self.temp_folder_name}/item_subfolder/nonexistent/phone_numbers'
box.get_item_id(path=file_path)
# File (rather than folder) in middle of path
with self.assertRaises(ValueError):
file_path = f'{self.temp_folder_name}/file_in_subfolder/phone_numbers'
box.get_item_id(path=file_path)
def test_errors(self) -> None:
# Count on environment variables being set
box = Box()
nonexistent_id = '9999999'
table = Table([['phone_number', 'last_name', 'first_name'],
['4435705355', 'Warren', 'Elizabeth'],
['5126993336', 'Obama', 'Barack']])
# Upload a bad format
with self.assertRaises(ValueError):
box.upload_table_to_folder_id(table, 'temp1', format='bad_format')
# Download a bad format
with self.assertRaises(ValueError):
box.get_table_by_file_id(file_id=nonexistent_id, format='bad_format')
# Upload to non-existent folder
with self.assertLogs(level=logging.WARNING):
with self.assertRaises(BoxAPIException):
box.upload_table_to_folder_id(table, 'temp1', folder_id=nonexistent_id)
# Download a non-existent file
with self.assertLogs(level=logging.WARNING):
with self.assertRaises(BoxAPIException):
box.get_table_by_file_id(nonexistent_id, format='json')
# Create folder in non-existent parent
with self.assertRaises(ValueError):
box.create_folder('nonexistent_path/path')
# Create folder in non-existent parent
with self.assertLogs(level=logging.WARNING):
with self.assertRaises(BoxAPIException):
box.create_folder_by_id(folder_name='subfolder', parent_folder_id=nonexistent_id)
# Try using bad credentials
box = Box(access_token='5345345345')
with self.assertLogs(level=logging.WARNING):
with self.assertRaises(BoxOAuthException):
box.list_files_by_id()
| 42.656489
| 97
| 0.642895
| 1,399
| 11,176
| 4.8599
| 0.154396
| 0.041183
| 0.049419
| 0.042359
| 0.621121
| 0.569348
| 0.548022
| 0.490955
| 0.460656
| 0.434917
| 0
| 0.022583
| 0.2551
| 11,176
| 261
| 98
| 42.819923
| 0.794114
| 0.113636
| 0
| 0.411043
| 0
| 0
| 0.182209
| 0.06863
| 0
| 0
| 0
| 0
| 0.184049
| 1
| 0.055215
| false
| 0
| 0.055215
| 0
| 0.122699
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d1b66ad840bf7a208b29ea852c07fe8f18d11de
| 3,961
|
py
|
Python
|
Task2.py
|
sahil7pathak/Image_Segmentation_and_Point_Detection
|
7cf00f1c0a10ee0384eba7cbbb17f0779642cfa3
|
[
"MIT"
] | null | null | null |
Task2.py
|
sahil7pathak/Image_Segmentation_and_Point_Detection
|
7cf00f1c0a10ee0384eba7cbbb17f0779642cfa3
|
[
"MIT"
] | null | null | null |
Task2.py
|
sahil7pathak/Image_Segmentation_and_Point_Detection
|
7cf00f1c0a10ee0384eba7cbbb17f0779642cfa3
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
import numpy as np
import cv2
'''Erosion Method'''
def erosion(image, kernel):
img_height = image.shape[0]
img_width = image.shape[1]
kernel_height = kernel.shape[0]
kernel_width = kernel.shape[1]
h = kernel_height//2
w = kernel_width//2
res = [[0 for x in range(img_width)] for y in range(img_height)]
res = np.array(res)
for i in range(h, img_height-h):
for j in range(w, img_width-w):
a = np.array(image[(i-h):(i-h)+kernel_height, (j-w):(j-w)+kernel_width])
if(np.array_equal(a, kernel)):
res[i][j] = 1
else:
res[i][j] = 0
return res
'''Point Detection Method'''
def point_detection(image, kernel):
img_height = image.shape[0]
img_width = image.shape[1]
image = cv2.Laplacian(image, cv2.CV_32F)
kernel_height = kernel.shape[0]
kernel_width = kernel.shape[1]
h = kernel_height//2
w = kernel_width//2
'''Threshold chosen to be a value which is 90% of maximum sum value'''
T = 8382
sum_arr = []
res = [[0 for x in range(img_width)] for y in range(img_height)]
res = np.array(res)
for i in range(h, img_height-h):
for j in range(w, img_width-w):
a = np.array(image[(i-h):(i-h)+kernel_height, (j-w):(j-w)+kernel_width])
out = ((np.multiply(kernel, a)))
sum = np.abs(np.sum(out))
sum_arr.append(sum)
if(sum > T):
co_ord = (i, j)
res[i][j] = 1
print("Maximum sum: ",np.max(np.array(sum_arr)))
return res, co_ord
def check_segment(image):
img_height = image.shape[0]
img_width = image.shape[1]
'''Threshold chosen by observing the plotted histogram'''
T = 204
res = [[0 for x in range(img_width)] for y in range(img_height)]
res = np.array(res)
for i in range(image.shape[0]):
for j in range(image.shape[1]):
if(image[i][j] > T):
res[i][j] = 255
else:
res[i][j] = 0
return res
img = cv2.imread("point.jpg",0)
sample = img
kernel = np.array([[-1,-1,-1],
[-1,8,-1],
[-1,-1,-1]])
output, co_ord = point_detection(img, kernel)
output = output*255
output = np.asarray(output, np.uint8)
cv2.rectangle(output,(424,230),(464,272),(255,255,255),2)
cv2.imwrite("res_point.jpg",output)
'''Code for segmenting the object from the background'''
img2 = cv2.imread("segment.jpg", 0)
seg = check_segment(img2)
seg = np.asarray(seg, np.uint8)
cv2.rectangle(seg,(155,115),(208,172),(255,255,255),2)
cv2.rectangle(seg,(245,68),(300,223),(255,255,255),2)
cv2.rectangle(seg,(322,13),(370,291),(255,255,255),2)
cv2.rectangle(seg,(382,33),(430,264),(255,255,255),2)
'''Observed co-ordinates of bounding boxes, in col, row format'''
print("1st box: ")
print("Upper left: (155,115)")
print("Upper right: (208,115)")
print("Bottom left: (155,172)")
print("Bottom right: (208,172)\n")
print("2nd box: ")
print("Upper left: (245,68)")
print("Upper right: (300,68)")
print("Bottom left: (245,223)")
print("Bottom right: (300,223)\n")
print("3rd box: ")
print("Upper left: (322,13)")
print("Upper right: (370,13)")
print("Bottom left: (322,291)")
print("Bottom right: (370,291)\n")
print("4th box: ")
print("Upper left: (382,33)")
print("Upper right: (430,33)")
print("Bottom left: (382,264)")
print("Bottom right: (430,264)")
cv2.imwrite("res_segment.jpg",seg)
'''Plotting Histogram'''
my_dict = {}
for i in range(np.unique(img2).shape[0]):
a = np.unique(img2)[i]
count = np.sum(img2 == a)
my_dict[a] = count
sorted_by_value = sorted(my_dict.items(), key=lambda kv: kv[1])
uniq = list(np.unique(img2))
val = list(my_dict.values())
plt.plot(uniq[1:],val[1:])
plt.show()
| 30.705426
| 85
| 0.578642
| 621
| 3,961
| 3.613527
| 0.227053
| 0.040553
| 0.026738
| 0.022282
| 0.341355
| 0.335562
| 0.335562
| 0.285205
| 0.285205
| 0.285205
| 0
| 0.092246
| 0.244635
| 3,961
| 128
| 86
| 30.945313
| 0.657754
| 0
| 0
| 0.326923
| 0
| 0
| 0.127811
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028846
| false
| 0
| 0.028846
| 0
| 0.086538
| 0.201923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d213f69d083136ed499e8028606ef1e8d49f01e
| 2,495
|
py
|
Python
|
covid_phylo/src/analysis.py
|
mrubio-chavarria/covidMonitor
|
8d59b17dbff46a781527de181f22b115565e5c2d
|
[
"MIT"
] | 1
|
2021-03-22T17:05:52.000Z
|
2021-03-22T17:05:52.000Z
|
covid_phylo/src/analysis.py
|
mrubio-chavarria/covidMonitor
|
8d59b17dbff46a781527de181f22b115565e5c2d
|
[
"MIT"
] | 6
|
2020-06-06T01:51:21.000Z
|
2022-01-13T02:39:02.000Z
|
covid_phylo/src/analysis.py
|
mrubio-chavarria/covidMonitor
|
8d59b17dbff46a781527de181f22b115565e5c2d
|
[
"MIT"
] | null | null | null |
import align_tools as at
import matplotlib.pyplot as plt
import numpy as np
from collections import Counter
def h(x):
if x>0:
return 1
else:
return 0
def get_counter(arr, lower_sat=None, upper_sat=None):
result = {}
for val in arr:
if (upper_sat is None or val < upper_sat) and (lower_sat is None or val > lower_sat):
result[val] = result.get(val, 0) + 1
elif upper_sat is not None and val >= upper_sat:
result[upper_sat] = result.get(upper_sat, 0) + 1
else:
result[lower_sat] = result.get(lower_sat, 0) + 1
return result
def analyse_gaps(num_gaps, collaps_factor=1):
print(get_counter(num_gaps, upper_sat=1))
has_gaps = [h(num_gap) for num_gap in num_gaps]
num_gaps_collaps = [sum(has_gaps[max([collaps_factor*i, 0]):min([collaps_factor*(i+1), len(has_gaps)])]) for i in range(int(len(has_gaps)/collaps_factor)+1)]
ax = plt.subplot(111)
x = [n for n in num_gaps_collaps]
ax.bar(range(len(num_gaps_collaps)), num_gaps_collaps)
plt.xlabel('x')
plt.ylabel('y')
plt.title('Posiciones con gaps')
plt.show()
def analyse_changes(num_vars_det, num_vars_all):
vars_det_sites = get_counter(num_vars_det, 0, 4)
vars_all_sites = get_counter(num_vars_all, 0, 4)
print('only determined')
print([f'k={k}: {vars_det_sites.get(k, 0)}, {vars_det_sites.get(k, 0) / len(num_vars_det) * 100:.2f}%' for k in vars_det_sites])
print('also undetermined')
print([f'k={k}: {vars_all_sites.get(k, 0)}, {vars_all_sites.get(k, 0) / len(num_vars_all) * 100:.2f}%' for k in vars_all_sites])
x = [n for n in vars_det_sites]
y = [vars_det_sites.get(n, 0) for n in x]
z = [vars_all_sites[n] for n in x]
ax = plt.subplot(111)
bar1 = ax.bar(np.array(x)-0.1, y, width=0.2, color='b', align='center')
bar2 = ax.bar(np.array(x)+0.1, z, width=0.2, color='r', align='center')
ax.legend( (bar1[0], bar2[0]), ('Solo bases conocidas', 'Incluyendo bases desconocidas'))
plt.xlabel('k (saturación en 4)')
plt.xticks([1, 2, 3, 4])
plt.ylabel('n_k')
plt.title('Histograma de nucleotidos distintos por posición')
plt.show()
def main():
records = at.aligned_records_by_tag("complete")
num_gaps, num_vars_det, num_vars_all = at.analyse_alignment(records)
print("done anaylsis")
analyse_gaps(num_gaps, collaps_factor=300)
analyse_changes(num_vars_det, num_vars_all)
if __name__ == '__main__':
main()
| 34.178082
| 161
| 0.658116
| 428
| 2,495
| 3.600467
| 0.266355
| 0.049968
| 0.05451
| 0.038936
| 0.26087
| 0.193381
| 0.089552
| 0.044127
| 0
| 0
| 0
| 0.029515
| 0.198798
| 2,495
| 73
| 162
| 34.178082
| 0.741371
| 0
| 0
| 0.105263
| 0
| 0.035088
| 0.159856
| 0.035256
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.070175
| 0
| 0.210526
| 0.105263
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d21d5ac301b7c2c83e332f0f0cea5a96ae6d81d
| 1,266
|
py
|
Python
|
pygears_vivado/vivmod.py
|
Anari-AI/pygears-vivado
|
a9d928d9914b479739ff8fc1e208813292c4b711
|
[
"MIT"
] | 1
|
2022-03-19T02:11:12.000Z
|
2022-03-19T02:11:12.000Z
|
pygears_vivado/vivmod.py
|
Anari-AI/pygears-vivado
|
a9d928d9914b479739ff8fc1e208813292c4b711
|
[
"MIT"
] | null | null | null |
pygears_vivado/vivmod.py
|
Anari-AI/pygears-vivado
|
a9d928d9914b479739ff8fc1e208813292c4b711
|
[
"MIT"
] | 1
|
2021-06-01T13:21:12.000Z
|
2021-06-01T13:21:12.000Z
|
import os
from pygears.hdl.sv import SVModuleInst
from .ip_resolver import IPResolver
class SVVivModuleInst(SVModuleInst):
def __init__(self, node, lang=None):
resolver = IPResolver(node)
super().__init__(node, resolver.lang, resolver)
@property
def is_generated(self):
return True
@property
def include(self):
return [os.path.join(self.ipdir, 'hdl')]
def get_wrap_portmap(self, parent_lang):
sig_map = {}
for s in self.node.meta_kwds['signals']:
sig_map[s.name] = s.name
port_map = {}
for p in self.node.in_ports + self.node.out_ports:
name = p.basename
if self.lang == 'sv':
port_map[name] = name
elif parent_lang == 'sv':
sig_map[f'{name}_tvalid'] = f'{name}.valid'
sig_map[f'{name}_tready'] = f'{name}.ready'
sig_map[f'{name}_tdata'] = f'{name}.data'
elif parent_lang == 'v':
sig_map[f'{name}_tvalid'] = f'{name}_valid'
sig_map[f'{name}_tready'] = f'{name}_ready'
sig_map[f'{name}_tdata'] = f'{name}_data'
else:
port_map[name] = name
return port_map, sig_map
| 30.878049
| 59
| 0.553712
| 162
| 1,266
| 4.080247
| 0.351852
| 0.090772
| 0.06354
| 0.099849
| 0.239032
| 0.239032
| 0.239032
| 0.239032
| 0.239032
| 0.239032
| 0
| 0
| 0.316746
| 1,266
| 40
| 60
| 31.65
| 0.764162
| 0
| 0
| 0.121212
| 0
| 0
| 0.127172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.121212
| false
| 0
| 0.090909
| 0.060606
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d24383aba0b77760774f695ed82a4ade6ace738
| 1,841
|
py
|
Python
|
commodore/inventory/render.py
|
projectsyn/commodore
|
afd924a2aa8abb79cd6a8970ff225756469dd2b3
|
[
"BSD-3-Clause"
] | 39
|
2019-12-17T13:40:19.000Z
|
2021-12-31T08:22:52.000Z
|
commodore/inventory/render.py
|
projectsyn/commodore
|
afd924a2aa8abb79cd6a8970ff225756469dd2b3
|
[
"BSD-3-Clause"
] | 161
|
2020-02-14T18:32:49.000Z
|
2022-03-25T09:23:35.000Z
|
commodore/inventory/render.py
|
projectsyn/commodore
|
afd924a2aa8abb79cd6a8970ff225756469dd2b3
|
[
"BSD-3-Clause"
] | 12
|
2019-12-18T15:43:09.000Z
|
2021-06-28T11:51:59.000Z
|
import shutil
import tempfile
from pathlib import Path
from typing import Dict
import click
from commodore.config import Config
from .parameters import ClassNotFound, InventoryFactory, InventoryFacts
def _cleanup_work_dir(cfg: Config, work_dir: Path):
if not cfg.debug:
# Clean up work dir if we're not in debug mode
shutil.rmtree(work_dir)
def extract_components(
cfg: Config, invfacts: InventoryFacts
) -> Dict[str, Dict[str, str]]:
if cfg.debug:
click.echo(
f"Called with: global_config={invfacts.global_config} "
+ f"tenant_config={invfacts.tenant_config} "
+ f"extra_classes={invfacts.extra_classes} "
+ f"allow_missing_classes={invfacts.allow_missing_classes}."
)
global_dir = Path(invfacts.global_config).resolve().absolute()
tenant_dir = None
if invfacts.tenant_config:
tenant_dir = Path(invfacts.tenant_config).resolve().absolute()
work_dir = Path(tempfile.mkdtemp(prefix="commodore-reclass-")).resolve()
if global_dir.is_dir() and (not tenant_dir or tenant_dir.is_dir()):
invfactory = InventoryFactory.from_repo_dirs(
work_dir, global_dir, tenant_dir, invfacts
)
else:
_cleanup_work_dir(cfg, work_dir)
raise NotImplementedError("Cloning global or tenant repo not yet implemented")
try:
inv = invfactory.reclass(invfacts)
components = inv.parameters("components")
except ClassNotFound as e:
_cleanup_work_dir(cfg, work_dir)
raise ValueError(
"Unable to render inventory with `--no-allow-missing-classes`. "
+ f"Class '{e.name}' not found. "
+ "Verify the provided values or allow missing classes."
) from e
_cleanup_work_dir(cfg, work_dir)
return components
| 30.683333
| 86
| 0.674633
| 227
| 1,841
| 5.273128
| 0.356828
| 0.070175
| 0.046784
| 0.056809
| 0.070175
| 0.070175
| 0.070175
| 0
| 0
| 0
| 0
| 0
| 0.233569
| 1,841
| 59
| 87
| 31.20339
| 0.848335
| 0.0239
| 0
| 0.068182
| 0
| 0
| 0.22507
| 0.110306
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.159091
| 0
| 0.227273
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d29d50d0c950b859290e95b7cb057e02fb60ee8
| 4,045
|
py
|
Python
|
profit/models/torch/vae.py
|
ayushkarnawat/profit
|
f3c4d601078b52513af6832c3faf75ddafc59ac5
|
[
"MIT"
] | null | null | null |
profit/models/torch/vae.py
|
ayushkarnawat/profit
|
f3c4d601078b52513af6832c3faf75ddafc59ac5
|
[
"MIT"
] | 1
|
2021-09-15T13:13:12.000Z
|
2021-09-15T13:13:12.000Z
|
profit/models/torch/vae.py
|
ayushkarnawat/profit
|
f3c4d601078b52513af6832c3faf75ddafc59ac5
|
[
"MIT"
] | null | null | null |
"""Variational autoencoder model."""
from typing import Tuple
import torch
from torch import nn
from torch.nn import functional as F
class BaseVAE(nn.Module):
"""Base class for creating variational autoencoders (VAEs).
The module is designed to connect user-specified encoder/decoder
layers to form a latent space representation of the data.
A general overview of the model can be described by:
https://lilianweng.github.io/lil-log/2018/08/12/from-autoencoder-to-beta-vae.html
"""
def __init__(self) -> None:
super(BaseVAE, self).__init__()
def encode(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
"""Builds the encoded representation of the input.
The encoded model outputs the mean and logvar of the latent
space embeddings/distribution, or in more mathematical terms,
:math:: `q(z|x) = \\mathcal{N}(z| \\mu(x), \\sigma(x))`
"""
raise NotImplementedError
def reparameterize(self, mu: torch.Tensor, logvar: torch.Tensor) -> torch.Tensor:
"""Reparamaterization trick.
Computes the latent vector (`z`), which is a compressed low-dim
representation of the input.
This trick allows us to express the gradient of the expectation
as the expectation of the gradient [1]. Additionally, it makes
the variance of the estimate an order of magnitude lower than
without using it. This allows us to compute the gradient during
the backward pass more accurately, with better estimates [2].
References:
-----------
-[1] https://gregorygundersen.com/blog/2018/04/29/reparameterization/
-[2] https://stats.stackexchange.com/a/226136
"""
std = torch.exp(0.5*logvar)
# eps=N(0,I), where the I is an identity matrix of same size as std
eps = torch.randn_like(std)
return mu + std*eps
def decode(self, z: torch.Tensor) -> torch.Tensor:
"""Decodes the sampled latent vector (`z`) into the reconstructed
output (`x'`).
Ideally, the reconstructed output (`x'`) is identical to the
original input (`x`).
"""
raise NotImplementedError
def forward(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
mu, logvar = self.encode(x)
z = self.reparameterize(mu, logvar)
return self.decode(z), mu, logvar, z
class SequenceVAE(BaseVAE):
"""CbAS VAE model for (one-hot) encoded sequences."""
def __init__(self,
seqlen: int,
vocab_size: int,
hidden_size: int = 64,
latent_size: int = 20) -> None:
super(SequenceVAE, self).__init__()
self.seqlen = seqlen
self.vocab_size = vocab_size
self.hidden_size = hidden_size
self.latent_size = latent_size
# Probablistic encoder
self.fc1 = nn.Linear(seqlen * vocab_size, hidden_size)
self.fc21 = nn.Linear(hidden_size, latent_size)
self.fc22 = nn.Linear(hidden_size, latent_size)
# Probablistic decoder
self.fc3 = nn.Linear(latent_size, hidden_size)
self.fc4 = nn.Linear(hidden_size, seqlen * vocab_size)
# Reshape occurs here (see self.decode())
# size is now: (seqlen * vocab_size) -> (seqlen, vocab_size)
self.fc5 = nn.Linear(vocab_size, vocab_size)
def encode(self, x: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
# Flatten (n, seqlen, vocab_size) -> (n, seqlen * vocab_size)
x = x.view(x.size(0), -1)
h1 = F.relu(self.fc1(x))
return self.fc21(h1), self.fc22(h1)
def decode(self, z: torch.Tensor) -> torch.Tensor:
# Input tensor: Latent vector z = (num_samples, latent_size)
h3 = F.relu(self.fc3(z))
h4 = self.fc4(h3)
reshaped = h4.view(h4.size(0), self.seqlen, self.vocab_size)
# Return logits since F.cross_entropy computes log_softmax internally
return self.fc5(reshaped)
| 36.116071
| 90
| 0.634611
| 536
| 4,045
| 4.701493
| 0.365672
| 0.074206
| 0.044444
| 0.061111
| 0.109127
| 0.109127
| 0.086905
| 0.086905
| 0.058333
| 0.04127
| 0
| 0.01994
| 0.256119
| 4,045
| 111
| 91
| 36.441441
| 0.817547
| 0.433622
| 0
| 0.133333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.177778
| false
| 0
| 0.088889
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d2ae38a47c725cb399a9f327008d51a718980eb
| 2,037
|
py
|
Python
|
backend/export/views.py
|
dmryutov/otus-python-0319-final
|
de07f36ee4bbd57dbfb16defaf762b08ec41fb0e
|
[
"Apache-2.0"
] | null | null | null |
backend/export/views.py
|
dmryutov/otus-python-0319-final
|
de07f36ee4bbd57dbfb16defaf762b08ec41fb0e
|
[
"Apache-2.0"
] | 6
|
2020-06-05T23:05:14.000Z
|
2022-02-10T10:42:31.000Z
|
backend/export/views.py
|
dmryutov/otus-python-0319-final
|
de07f36ee4bbd57dbfb16defaf762b08ec41fb0e
|
[
"Apache-2.0"
] | null | null | null |
from django.http.response import HttpResponse
from rest_framework import serializers, viewsets
from rest_framework.decorators import action
from rest_framework.permissions import IsAuthenticated
from .excel import Excel
XLSX_MIME = 'application/vnd.openxmlformats-officedocument.spreadsheetml.sheet'
class ExportViewSet(viewsets.GenericViewSet):
serializer_class = serializers.Serializer
permission_classes = (IsAuthenticated,)
@staticmethod
def download_file(file_name, export_func, *args, **kwargs):
"""
Generate file and send it to client
Args:
file_name (str): Excel file name
export_func (str): Export function
args: Export function args
kwargs: Export function kwargs
Returns:
django.http.response.HttpResponse: HTTP response
"""
response = HttpResponse(content_type=XLSX_MIME)
response['Content-Disposition'] = 'attachment; filename="{}.xlsx"'.format(file_name)
getattr(Excel(file_name), export_func)(*args, **kwargs).save(response)
return response
@action(methods=['post'], detail=False)
def stl(self, request):
"""
Export time series decomposition results to Excel file
"""
self.check_permissions(request)
data = request.data.get('data', [])
result = request.data.get('result', {})
return self.download_file('STL', 'export_stl', data, result)
@action(methods=['post'], detail=False)
def forecast(self, request):
"""
Export time series forecasting results to Excel file
"""
self.check_permissions(request)
data = request.data.get('data', [])
result = request.data.get('result', {})
date_start = request.data.get('date_start', '2018-01-01')
period_type = request.data.get('period_type', 'W')
return self.download_file('Forecast', 'export_forecast', data, result,
date_start, period_type)
| 32.333333
| 92
| 0.650957
| 220
| 2,037
| 5.9
| 0.359091
| 0.067797
| 0.064715
| 0.041602
| 0.297381
| 0.234206
| 0.143297
| 0.143297
| 0.143297
| 0.143297
| 0
| 0.005175
| 0.241041
| 2,037
| 62
| 93
| 32.854839
| 0.834411
| 0.174276
| 0
| 0.266667
| 0
| 0
| 0.134961
| 0.041774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.166667
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d2bec83c642f547afb331d447ae8ff19041fd5a
| 1,111
|
py
|
Python
|
src/tests/tests_get_formatted_items.py
|
kazqvaizer/checklistbot
|
f715280fbe7035bc2ce4f69cbf95595d9fe3a225
|
[
"MIT"
] | 5
|
2020-10-06T13:42:45.000Z
|
2021-12-21T07:35:08.000Z
|
src/tests/tests_get_formatted_items.py
|
kazqvaizer/checklistbot
|
f715280fbe7035bc2ce4f69cbf95595d9fe3a225
|
[
"MIT"
] | null | null | null |
src/tests/tests_get_formatted_items.py
|
kazqvaizer/checklistbot
|
f715280fbe7035bc2ce4f69cbf95595d9fe3a225
|
[
"MIT"
] | null | null | null |
import pytest
from models import TodoItem
pytestmark = [
pytest.mark.usefixtures("use_db"),
]
@pytest.fixture
def chat(factory):
return factory.chat()
@pytest.fixture
def items(factory, chat):
return [
factory.item(chat=chat, text="Hello"),
factory.item(chat=chat, text="Nice!"),
]
def test_format_without_strike(items, chat):
lines = chat.get_formatted_items().split("\n")
assert len(lines) == 2
assert "1. Hello" == lines[0]
assert "2. Nice!" == lines[1]
def test_format_with_strike(items, chat):
items[0].is_checked = True
items[0].save()
lines = chat.get_formatted_items().split("\n")
assert len(lines) == 2
assert "<s>1. Hello</s>" == lines[0]
assert "2. Nice!" == lines[1]
def test_respect_order_by_id(items, chat):
TodoItem.update(id=100500).where(TodoItem.id == items[0].id).execute()
lines = chat.get_formatted_items().split("\n")
assert len(lines) == 2
assert "1. Nice!" == lines[0]
assert "2. Hello" == lines[1]
def test_no_items_is_okay(chat):
assert chat.get_formatted_items() == ""
| 20.574074
| 74
| 0.640864
| 157
| 1,111
| 4.382166
| 0.318471
| 0.040698
| 0.093023
| 0.122093
| 0.388081
| 0.321221
| 0.321221
| 0.321221
| 0.321221
| 0.234012
| 0
| 0.026786
| 0.193519
| 1,111
| 53
| 75
| 20.962264
| 0.741071
| 0
| 0
| 0.294118
| 0
| 0
| 0.069307
| 0
| 0
| 0
| 0
| 0
| 0.294118
| 1
| 0.176471
| false
| 0
| 0.058824
| 0.058824
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d2fec927240532eb03988da6b6277edf3bec73d
| 2,859
|
py
|
Python
|
cart/tests/test_views.py
|
mohsenamoon1160417237/ECommerce-app
|
4cca492214b04b56f625aef2a2979956a8256710
|
[
"MIT"
] | null | null | null |
cart/tests/test_views.py
|
mohsenamoon1160417237/ECommerce-app
|
4cca492214b04b56f625aef2a2979956a8256710
|
[
"MIT"
] | null | null | null |
cart/tests/test_views.py
|
mohsenamoon1160417237/ECommerce-app
|
4cca492214b04b56f625aef2a2979956a8256710
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from shop.models import Product
from django.contrib.auth.models import User
from coupons.forms import CouponForm
class CartAddViewTest(TestCase):
def setUp(self):
self.data = {"quantity" : 2,
"update" : False}
self.product = Product.objects.create(name='clothes',
description='clothes',
price=12.00
)
self.product.save()
self.user = User.objects.create(username='mohsen' ,
email='dramatic225@gmail.com' ,
password='mohsen1160417237')
self.user.save()
self.url = '/cart/add/{}/'.format(self.product.id)
def test_get_method_not_allowed(self):
response = self.client.get(self.url , follow=True)
self.assertEqual(response.status_code , 405)
def test_cart_add_user_authenticated(self):
self.client.force_login(self.user)
response = self.client.post(self.url , data=self.data , follow=True)
redirect_url = response.request['PATH_INFO']
self.assertEqual(response.status_code , 200)
self.assertEqual(redirect_url , '/cart/detail/')
def test_cart_add_user_not_authenticated(self):
self.client.logout()
response = self.client.post(self.url , data=self.data , follow=True)
redirect_url = response.request['PATH_INFO']
self.assertEqual(response.status_code , 200)
self.assertEqual(redirect_url , '/account/login/')
class CartRemoveViewTest(TestCase):
def setUp(self):
self.product = Product.objects.create(name='clothes',
description='clothes',
price=12.00
)
self.product.save()
self.url = '/cart/remove/{}/'.format(self.product.id)
def test_get_method_not_allowed(self):
response = self.client.get(self.url , follow=True)
self.assertEqual(response.status_code , 405)
def test_cart_remove_ok(self):
response = self.client.post(self.url , follow=True)
redirect_url = response.request['PATH_INFO']
self.assertEqual(response.status_code , 200)
self.assertEqual(redirect_url , '/cart/detail/')
class CartDetailViewTest(TestCase):
def setUp(self):
self.data = {''}
self.url = '/cart/detail/'
def test_cart_detail_ok(self):
response = self.client.post(self.url)
self.assertEqual(response.status_code , 200)
| 28.878788
| 77
| 0.550892
| 288
| 2,859
| 5.333333
| 0.256944
| 0.041016
| 0.070313
| 0.113281
| 0.712891
| 0.690755
| 0.608073
| 0.608073
| 0.5625
| 0.5625
| 0
| 0.02139
| 0.345925
| 2,859
| 98
| 78
| 29.173469
| 0.8
| 0
| 0
| 0.5
| 0
| 0
| 0.070627
| 0.007606
| 0
| 0
| 0
| 0
| 0.160714
| 1
| 0.160714
| false
| 0.017857
| 0.071429
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d341997147380f82b39848b173c8f836285f331
| 2,134
|
py
|
Python
|
tests/conftest.py
|
gpontesss/botus_receptus
|
bf29f5f70a2e7ae3548a44287c636515f78e7e77
|
[
"BSD-3-Clause"
] | 3
|
2019-04-15T01:45:46.000Z
|
2020-04-07T13:31:19.000Z
|
tests/conftest.py
|
gpontesss/botus_receptus
|
bf29f5f70a2e7ae3548a44287c636515f78e7e77
|
[
"BSD-3-Clause"
] | 244
|
2020-04-20T22:10:23.000Z
|
2022-03-31T23:03:48.000Z
|
tests/conftest.py
|
gpontesss/botus_receptus
|
bf29f5f70a2e7ae3548a44287c636515f78e7e77
|
[
"BSD-3-Clause"
] | 1
|
2021-11-08T08:52:32.000Z
|
2021-11-08T08:52:32.000Z
|
from __future__ import annotations
import asyncio
from typing import Any
import asynctest.mock # type: ignore
import pytest # type: ignore
import pytest_mock._util # type: ignore
pytest_mock._util._mock_module = asynctest.mock
class EventLoopClockAdvancer:
"""
A helper object that when called will advance the event loop's time. If the
call is awaited, the caller task will wait an iteration for the update to
wake up any awaiting handlers.
"""
__slots__ = ("offset", "loop", "sleep_duration", "_base_time")
def __init__(self, loop, sleep_duration=1e-4):
self.offset = 0.0
self._base_time = loop.time
self.loop = loop
self.sleep_duration = sleep_duration
# incorporate offset timing into the event loop
self.loop.time = self.time
def time(self):
"""
Return the time according to the event loop's clock. The time is
adjusted by an offset.
"""
return self._base_time() + self.offset
async def __call__(self, seconds):
"""
Advance time by a given offset in seconds. Returns an awaitable
that will complete after all tasks scheduled for after advancement
of time are proceeding.
"""
# sleep so that the loop does everything currently waiting
await asyncio.sleep(self.sleep_duration)
if seconds > 0:
# advance the clock by the given offset
self.offset += seconds
# Once the clock is adjusted, new tasks may have just been
# scheduled for running in the next pass through the event loop
await asyncio.sleep(self.sleep_duration)
@pytest.fixture
def advance_time(event_loop):
return EventLoopClockAdvancer(event_loop)
@pytest.fixture
def mock_aiohttp(mocker: Any) -> None:
mocker.patch('aiohttp.ClientSession', autospec=True)
@pytest.fixture
def mock_discord_bot(mocker: Any) -> None:
mocker.patch('discord.ext.commands.Bot')
@pytest.fixture(autouse=True)
def add_async_mocks(mocker: Any) -> None:
mocker.CoroutineMock = mocker.mock_module.CoroutineMock
| 28.837838
| 79
| 0.680412
| 281
| 2,134
| 5.014235
| 0.395018
| 0.038325
| 0.034067
| 0.040454
| 0.082328
| 0.048261
| 0
| 0
| 0
| 0
| 0
| 0.003102
| 0.244611
| 2,134
| 73
| 80
| 29.232877
| 0.870968
| 0.266167
| 0
| 0.147059
| 0
| 0
| 0.060305
| 0.034351
| 0
| 0
| 0
| 0
| 0
| 1
| 0.176471
| false
| 0
| 0.176471
| 0.029412
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d352ba96be56207cce46e2dc458765a09de6f97
| 1,247
|
py
|
Python
|
Shark_Training/pyimagesearch/preprocessing/meanpreprocessor.py
|
crpurcell/MQ_DPI_Release
|
97444513e8b8d48ec91ff8a43b9dfaed0da029f9
|
[
"MIT"
] | null | null | null |
Shark_Training/pyimagesearch/preprocessing/meanpreprocessor.py
|
crpurcell/MQ_DPI_Release
|
97444513e8b8d48ec91ff8a43b9dfaed0da029f9
|
[
"MIT"
] | null | null | null |
Shark_Training/pyimagesearch/preprocessing/meanpreprocessor.py
|
crpurcell/MQ_DPI_Release
|
97444513e8b8d48ec91ff8a43b9dfaed0da029f9
|
[
"MIT"
] | null | null | null |
#=============================================================================#
# #
# MODIFIED: 15-Jan-2019 by C. Purcell #
# #
#=============================================================================#
import cv2
#-----------------------------------------------------------------------------#
class MeanPreprocessor:
def __init__(self, rMean, gMean, bMean, rgbOrder=True):
self.rMean = rMean
self.gMean = gMean
self.bMean = bMean
self.rgbOrder = rgbOrder
def preprocess(self, image):
# Split the image into its respective RGB channels
if self.rgbOrder:
(R, G, B) = cv2.split(image.astype("float32"))
else:
(B, G, R) = cv2.split(image.astype("float32"))
# Subtract the means for each channel
R -= self.rMean
G -= self.gMean
B -= self.bMean
# Merge the channels back together and return the image
if self.rgbOrder:
return cv2.merge([R, G, B])
else:
return cv2.merge([B, G, R])
| 35.628571
| 79
| 0.36648
| 103
| 1,247
| 4.398058
| 0.436893
| 0.059603
| 0.06181
| 0.083885
| 0.11479
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019157
| 0.372093
| 1,247
| 34
| 80
| 36.676471
| 0.559387
| 0.481957
| 0
| 0.210526
| 0
| 0
| 0.022187
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.052632
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d36012ec39c8b5de0335c08778adaf22f20af3c
| 985
|
py
|
Python
|
aiida_quantumespresso/parsers/constants.py
|
unkcpz/aiida-quantumespresso
|
fbac0993bb8b6cdeba85717453debcf0ab062b5a
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/parsers/constants.py
|
unkcpz/aiida-quantumespresso
|
fbac0993bb8b6cdeba85717453debcf0ab062b5a
|
[
"MIT"
] | null | null | null |
aiida_quantumespresso/parsers/constants.py
|
unkcpz/aiida-quantumespresso
|
fbac0993bb8b6cdeba85717453debcf0ab062b5a
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Physical or mathematical constants.
Since every code has its own conversion units, this module defines what
QE understands as for an eV or other quantities.
Whenever possible, we try to use the constants defined in
:py:mod:aiida.common.constants:, but if some constants are slightly different
among different codes (e.g., different standard definition), we define
the constants in this file.
"""
from aiida.common.constants import (
ang_to_m,
bohr_si,
bohr_to_ang,
hartree_to_ev,
invcm_to_THz,
ry_si,
ry_to_ev,
timeau_to_sec,
)
# From the definition of Quantum ESPRESSO, conversion from atomic mass
# units to Rydberg units:
# REAL(DP), PARAMETER :: AMU_SI = 1.660538782E-27_DP ! Kg
# REAL(DP), PARAMETER :: ELECTRONMASS_SI = 9.10938215E-31_DP ! Kg
# REAL(DP), PARAMETER :: AMU_AU = AMU_SI / ELECTRONMASS_SI
# REAL(DP), PARAMETER :: AMU_RY = AMU_AU / 2.0_DP
amu_Ry = 911.4442421323
| 31.774194
| 77
| 0.700508
| 149
| 985
| 4.463087
| 0.597315
| 0.03609
| 0.090226
| 0.081203
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050129
| 0.210152
| 985
| 30
| 78
| 32.833333
| 0.804627
| 0.783756
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.090909
| 0
| 0.090909
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d3f8941dd6434ce1537415533cd51f289916f52
| 5,554
|
py
|
Python
|
configstruct/config_struct.py
|
bradrf/configstruct
|
aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8
|
[
"MIT"
] | null | null | null |
configstruct/config_struct.py
|
bradrf/configstruct
|
aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8
|
[
"MIT"
] | 16
|
2016-10-13T09:53:46.000Z
|
2022-03-24T15:04:51.000Z
|
configstruct/config_struct.py
|
bradrf/configstruct
|
aeea8fbba1e2daa0a0c38eeb9622d1716c0bb3e8
|
[
"MIT"
] | null | null | null |
import os
import sys
import logging
from configparser import ConfigParser
from .open_struct import OpenStruct
from .section_struct import SectionStruct
# TODO: use file lock when read/write
def choose_theirs(section, option, mine, theirs):
'''Always prefer values for keys from file.'''
return theirs
def choose_mine(section, option, mine, theirs):
'''Always prefer values for keys in memory.'''
return mine
LOG_LEVELS = ['debug-all', 'debug', 'info', 'warning', 'error', 'critical']
LOG_OPTIONS = {'log_level': 'info', 'log_file': 'STDERR'}
class OtherLoggingFilter(logging.Filter):
'''Quell logs from other modules using a different minimum level.'''
def __init__(self, whitelisted_module, minimum_other_level):
super(self.__class__, self).__init__(whitelisted_module)
self._minimum_other_level = minimum_other_level
def filter(self, record):
rc = super(self.__class__, self).filter(record)
if rc != 0:
return rc # matched the whitelisted module
return record.levelno >= self._minimum_other_level
class ConfigStruct(OpenStruct):
'''Provides simplified access for managing typed configuration options saved in a file.
:param config_file: path to file that should house configuration items.
:param log_options_parent: option key to use if this instance is expected to use the
`LOG_OPTIONS` default values and allow configuration of basic logging
:param sections_defaults: options that are provided as defaults (will be overridden by any
options read from the `config_file`)
'''
def __init__(self, config_file, log_options_parent=None, **sections_defaults):
super(ConfigStruct, self).__init__()
self._config_file = config_file
self._log_options_parent = log_options_parent
if log_options_parent:
parent_options = sections_defaults.get(log_options_parent, {})
sections_defaults[log_options_parent] = LOG_OPTIONS.copy()
sections_defaults[log_options_parent].update(parent_options)
for (name, items) in sections_defaults.items():
self[name] = SectionStruct(name, **items)
self._load(choose_theirs) # because above were basic defaults for the keys
def configure_basic_logging(self, main_module_name, **kwargs):
'''Use common logging options to configure all logging.
Basic logging configuration is used to set levels for all logs from the main module and to
filter out logs from other modules unless they are of one level in priority higher.
:param main_module_name: name of the primary module for normal logging
'''
if not self._log_options_parent:
raise ValueError('Missing log_options_parent')
options = self[self._log_options_parent]
log_level_index = LOG_LEVELS.index(options.log_level)
log_kwargs = {
'level': getattr(logging, options.log_level.upper()),
'format': '[%(asctime)s #%(process)d] %(levelname)-8s %(name)-12s %(message)s',
'datefmt': '%Y-%m-%dT%H:%M:%S%z',
}
if options.log_file == 'STDERR':
log_kwargs['stream'] = sys.stderr
elif options.log_file == 'STDOUT':
log_kwargs['stream'] = sys.stdout
else:
log_kwargs['filename'] = options.log_file
log_kwargs.update(kwargs) # allow overrides from caller
logging.basicConfig(**log_kwargs)
# now filter out any other module's logging unless it's one level above the main
other_log_level = getattr(logging, LOG_LEVELS[log_level_index + 1].upper())
other_filter = OtherLoggingFilter(main_module_name, other_log_level)
for handler in logging.root.handlers:
handler.addFilter(other_filter)
def save(self, conflict_resolver=choose_mine):
'''Save all options in memory to the `config_file`.
Options are read once more from the file (to allow other writers to save configuration),
keys in conflict are resolved, and the final results are written back to the file.
:param conflict_resolver: a simple lambda or function to choose when an option key is
provided from an outside source (THEIRS, usually a file on disk) but is also already
set on this ConfigStruct (MINE)
'''
config = self._load(conflict_resolver) # in case some other process has added items
with open(self._config_file, 'wb') as cf:
config.write(cf)
######################################################################
# private
def _load(self, resolver):
config = ConfigParser()
if os.path.exists(self._config_file):
with open(self._config_file) as cf:
config.readfp(cf) # use readfp as read somehow circumvents mockfs in tests
loaded = self._sync_sections_with(config, resolver)
self._add_new_sections(config, loaded)
return config
def _sync_sections_with(self, config, resolver):
loaded = set()
for name in config.sections():
if name not in self:
self[name] = SectionStruct(name)
self[name].sync_with(config, resolver)
loaded.add(name)
return loaded
def _add_new_sections(self, config, seen):
for name in self:
if name not in seen:
self[name].sync_with(config, choose_mine) # new ones, so always "mine"
| 40.540146
| 100
| 0.659705
| 707
| 5,554
| 4.975955
| 0.292786
| 0.039795
| 0.050028
| 0.017055
| 0.088403
| 0.027288
| 0.027288
| 0.027288
| 0.027288
| 0
| 0
| 0.001192
| 0.245049
| 5,554
| 136
| 101
| 40.838235
| 0.837825
| 0.308786
| 0
| 0
| 0
| 0.012658
| 0.062931
| 0
| 0
| 0
| 0
| 0.007353
| 0
| 1
| 0.126582
| false
| 0
| 0.075949
| 0
| 0.303797
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d4042ed9b0586457ce903d2cc6db6a880c03485
| 10,327
|
py
|
Python
|
test_apps/python_app/tests/compiler_test.py
|
Origen-SDK/o2
|
5b0f9a6d113ddebc73c7ee224931e8b2d0301794
|
[
"MIT"
] | null | null | null |
test_apps/python_app/tests/compiler_test.py
|
Origen-SDK/o2
|
5b0f9a6d113ddebc73c7ee224931e8b2d0301794
|
[
"MIT"
] | 127
|
2019-11-23T17:09:35.000Z
|
2021-09-02T11:06:20.000Z
|
test_apps/python_app/tests/compiler_test.py
|
Origen-SDK/o2
|
5b0f9a6d113ddebc73c7ee224931e8b2d0301794
|
[
"MIT"
] | null | null | null |
import origen # pylint: disable=import-error
import pytest, pathlib, os, stat, abc
from os import access, W_OK, X_OK, R_OK
from tests.shared import clean_falcon, clean_compiler, tmp_dir
def user_compiler():
''' End users should access the compiler via ``origen.app.compiler``. '''
return origen.app.compiler
MakoRenderer = origen.compiler.MakoRenderer
# JinjaRenderer = origen.compiler.JinjaRenderer
def test_compiler_inits(clean_falcon):
assert isinstance(user_compiler(), origen.compiler.Compiler) == True
assert user_compiler().stack == []
assert user_compiler().renders == []
assert user_compiler().output_files == []
assert 'mako' in user_compiler().renderers
assert user_compiler().renderers['mako'] is MakoRenderer
def test_copmiler_selects_appropriate_syntax(clean_falcon):
test = "myfile.txt.mako"
assert user_compiler().select_syntax(test) == 'mako'
assert user_compiler().select_syntax(pathlib.Path(test)) == 'mako'
test = "myfile.txt.jinja"
assert user_compiler().select_syntax(test) == 'jinja'
assert user_compiler().select_syntax(pathlib.Path(test)) == 'jinja'
test = "myfile.txt"
assert user_compiler().select_syntax(test) is None
assert user_compiler().select_syntax(pathlib.Path(test)) is None
def test_compiler_text_render_requires_syntax(clean_falcon):
with pytest.raises(origen.compiler.ExplicitSyntaxRequiredError):
user_compiler().render("Test...", direct_src=True)
class FixtureCompilerTest(abc.ABC):
''' Fixture conformance testing the child renderer
'''
@property
@abc.abstractclassmethod
def extension(cls):
raise NotImplementedError
@property
@abc.abstractclassmethod
def syntax(cls):
raise NotImplementedError
@property
def str_render(self):
return "Hello " + self.templatify('"Origen"') + "!"
@property
def str_render_with_standard_context(self):
return f"Hello from Origen version {self.templatify('origen.version')}!"
@property
def str_render_with_additional_context(self):
return f"Hello from template compiler \"{self.templatify('test_renderer_name')}\"!"
@property
def expected_str_render(self):
return "Hello Origen!"
@property
def expected_str_render_with_standard_context(self):
# Make sure origen.version isn't woefully broken
assert isinstance(origen.version, str)
assert len(origen.version) > 0
return f"Hello from Origen version {origen.version}!"
@property
def expected_str_render_with_additional_context(self):
return f"Hello from template compiler \"{self.syntax}\"!"
@property
def dummy_input_filename(self):
return pathlib.Path(
str(self.expected_output_filename) + f'.{self.extension}')
@property
def expected_output_filename(self):
return tmp_dir().joinpath(f'test_file.txt')
@property
def expected_default_output_filename(self):
s = user_compiler().renderers[self.syntax]
return origen.app.output_dir.joinpath(f'compiled/test_file.txt')
@property
def input_filename(self):
return origen.root.joinpath('templates/dut_info.txt' +
f'.{self.extension}')
@property
def output_filename(self):
return tmp_dir().joinpath('dut_info.txt')
@property
def expected_dut_info_output(self):
return "\n".join([
self.expected_str_render_with_standard_context,
self.expected_str_render_with_additional_context,
'The application name is "example"'
])
def test_compiler_resolves_default_filenames(self):
# Test as string
f = str(self.dummy_input_filename)
r = user_compiler().resolve_filename(f)
assert r == self.expected_default_output_filename
# Test as pathlib.Path
assert user_compiler().resolve_filename(
self.dummy_input_filename) == self.expected_default_output_filename
def test_compiler_resolves_filenames(self):
# Test as string
assert user_compiler().resolve_filename(
str(self.dummy_input_filename),
output_dir=tmp_dir()) == self.expected_output_filename
# Test as pathlib.Path
assert user_compiler().resolve_filename(
self.dummy_input_filename,
output_dir=tmp_dir()) == self.expected_output_filename
@property
def additional_context(self):
return {'test_renderer_name': self.syntax}
def test_render_file(self):
''' Test that the renderer can render a given file '''
rendered = user_compiler().render(self.input_filename,
syntax=self.syntax,
direct_src=False,
output_dir=tmp_dir(),
context=self.additional_context)
assert isinstance(rendered, pathlib.Path)
assert rendered == self.output_filename
assert rendered.exists
assert open(rendered, 'r').read() == self.expected_dut_info_output
def test_render_str(self):
''' Test that the renderer can render a given string '''
rendered = user_compiler().render(self.str_render,
syntax=self.syntax,
direct_src=True)
assert rendered == self.expected_str_render
def test_render_with_standard_context(self):
''' Renders output using the standard context '''
rendered = user_compiler().render(
self.str_render_with_standard_context,
syntax=self.syntax,
direct_src=True)
assert rendered == self.expected_str_render_with_standard_context
def test_render_with_additional_context(self):
''' Renders output using additional context given as an option
-> Test that the renderer supports the 'additional_context' option
'''
rendered = user_compiler().render(
self.str_render_with_additional_context,
syntax=self.syntax,
direct_src=True,
context={'test_renderer_name': self.syntax})
assert rendered == self.expected_str_render_with_additional_context
@abc.abstractclassmethod
def templatify(self, input):
raise NotImplementedError
class TestMakoCompiler(FixtureCompilerTest):
extension = 'mako'
syntax = 'mako'
def templatify(self, input):
return "${" + input + "}"
# class TestJinjaCompiler:
# pass
class TestCompilerStack():
''' Tests the compiler's stack-like interface '''
test_cases = TestMakoCompiler()
''' Borrow the Mako test cases for use here '''
def test_compiler_can_accept_requests(self, clean_falcon, clean_compiler):
''' Push can accept either a straight pathlib.Path or str object (interpreted as a file)
or a tuple consisting of a 'src' and 'options'
'''
assert len(user_compiler().stack) == 0
user_compiler().push('test.mako')
assert len(user_compiler().stack) == 1
assert isinstance(user_compiler().stack[0], tuple)
assert isinstance(user_compiler().stack[0][0], list)
assert isinstance(user_compiler().stack[0][0][0], pathlib.Path)
assert user_compiler().stack[0][1] == {}
def test_compiler_can_clear_itself(self):
assert len(user_compiler().stack) > 0
user_compiler().clear()
assert user_compiler().stack == []
assert user_compiler().renders == []
assert user_compiler().output_files == []
def test_compiler_renders_text(self, clean_falcon, clean_compiler):
origen.app.compile(self.test_cases.str_render,
direct_src=True,
syntax='mako')
assert len(user_compiler().renders) == 1
assert len(user_compiler().stack) == 0
assert user_compiler(
).renders[0] == self.test_cases.expected_str_render
origen.app.compile(self.test_cases.str_render_with_additional_context,
context=self.test_cases.additional_context,
direct_src=True,
syntax='mako')
assert len(user_compiler().renders) == 2
assert len(user_compiler().stack) == 0
assert user_compiler().renders[
1] == self.test_cases.expected_str_render_with_additional_context
assert user_compiler().renders[-1] == user_compiler().last_render
def test_compiler_text_render_requires_syntax(self, clean_falcon,
clean_compiler):
assert len(user_compiler().stack) == 0
with pytest.raises(origen.compiler.ExplicitSyntaxRequiredError):
origen.app.compile(self.test_cases.str_render, direct_src=True)
def test_compiler_returns_templates_dir(self):
assert user_compiler().templates_dir == origen.app.root.joinpath(
'templates')
def test_compiler_renders_files(self, clean_falcon, clean_compiler):
origen.app.compile('dut_info.txt.mako',
output_dir=tmp_dir(),
context=self.test_cases.additional_context,
templates_dir=user_compiler().templates_dir)
assert len(user_compiler().stack) == 0
assert len(user_compiler().output_files) == 1
compiled_file = user_compiler().output_files[0]
compiled_file_status = os.stat(compiled_file)
assert isinstance(compiled_file, pathlib.PurePath) == True
assert compiled_file.exists() == True
assert access(compiled_file, R_OK) == True
# Check file permissions
assert bool(compiled_file_status.st_mode & stat.S_IRUSR) == True
assert bool(compiled_file_status.st_mode & stat.S_IWUSR) == True
assert bool(compiled_file_status.st_mode & stat.S_IWUSR) == True
| 39.117424
| 97
| 0.637165
| 1,151
| 10,327
| 5.454387
| 0.150304
| 0.09366
| 0.06021
| 0.03345
| 0.554317
| 0.438038
| 0.340714
| 0.255336
| 0.190188
| 0.17187
| 0
| 0.003042
| 0.267745
| 10,327
| 263
| 98
| 39.26616
| 0.827162
| 0.078629
| 0
| 0.31746
| 0
| 0
| 0.056672
| 0.008786
| 0
| 0
| 0
| 0
| 0.275132
| 1
| 0.174603
| false
| 0
| 0.021164
| 0.063492
| 0.306878
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d42c2702dd5a391e27f8a389f8a934778ba0c95
| 999
|
py
|
Python
|
api/api.py
|
devSessions/crvi
|
1ecc68d6c968294bcc5ceea747604ee237f6080c
|
[
"MIT"
] | 25
|
2017-12-31T06:51:54.000Z
|
2021-11-17T11:29:30.000Z
|
api/api.py
|
amittomar-1/crvi
|
1ecc68d6c968294bcc5ceea747604ee237f6080c
|
[
"MIT"
] | 23
|
2020-01-28T21:34:12.000Z
|
2022-03-11T23:11:54.000Z
|
api/api.py
|
amittomar-1/crvi
|
1ecc68d6c968294bcc5ceea747604ee237f6080c
|
[
"MIT"
] | 11
|
2018-01-04T12:30:33.000Z
|
2020-12-01T18:08:59.000Z
|
from flask import Flask, jsonify, request
import predict
import socket
app = Flask(__name__)
@app.route('/')
@app.route('/home')
def home():
"""Renders the home page."""
return (
"Welcome Guest!!!"
)
#to spedicy route after url
@app.route('/api', methods=['POST'])
def get_tasks():
#get url from form
# url = request.form['url']
url = request.files['url']
#sends url for prediction
sender = predict.predict(url)
#get values from prediction
rec = sender.predict_only()
# #list of out values
# outputlist=[rec]
# #for multiple json apis
# tasks = []
# tasks1 = [
# {
# 'value': outputlist[0],
# },
# ]
# tasks.append(tasks1)
# return jsonify({'tasks': tasks})
return jsonify({'cash': rec})
if __name__ == '__main__':
#for remote host
ip = socket.gethostbyname(socket.gethostname())
app.run(port=5000,host=ip)
#for local host
#app.run(debug=True, port=5000)
| 19.211538
| 51
| 0.58959
| 120
| 999
| 4.791667
| 0.508333
| 0.041739
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014845
| 0.258258
| 999
| 52
| 52
| 19.211538
| 0.761134
| 0.383383
| 0
| 0
| 0
| 0
| 0.076142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.157895
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d4492744de35276bcea0bf1ccb409c9aa59295e
| 418
|
py
|
Python
|
Special_Viewer.py
|
Akivamelka/unsupervised_mid_semester
|
5393185d7b0327bbb7cd4b3700d4d00704a5623f
|
[
"MIT"
] | null | null | null |
Special_Viewer.py
|
Akivamelka/unsupervised_mid_semester
|
5393185d7b0327bbb7cd4b3700d4d00704a5623f
|
[
"MIT"
] | null | null | null |
Special_Viewer.py
|
Akivamelka/unsupervised_mid_semester
|
5393185d7b0327bbb7cd4b3700d4d00704a5623f
|
[
"MIT"
] | null | null | null |
from Dimension_Reduction import Viewer
import pandas as pd
view_tool = Viewer()
reduc = 'pca'
suffix = '5'
data_plot = pd.read_csv(f"{reduc}_dim2_{suffix}.csv", delimiter=",")
models = ['km', 'fuzz', 'gmm', 'dbsc', 'hier', 'spec' ]
for model in models:
print(model)
labels = pd.read_csv(f"labels_{model}_{suffix}.csv", delimiter=",")
view_tool.view_vs_target(data_plot, labels, suffix, model)
| 32.153846
| 72
| 0.669856
| 60
| 418
| 4.45
| 0.583333
| 0.059925
| 0.067416
| 0.074906
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005714
| 0.162679
| 418
| 13
| 73
| 32.153846
| 0.757143
| 0
| 0
| 0
| 0
| 0
| 0.194103
| 0.127764
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.090909
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d4a0164b56629bd4e65dd24b9c1a1fba70a5ea1
| 810
|
py
|
Python
|
mac/redRMacUpdater.py
|
PiRSquared17/r-orange
|
6bc383f1db3c10c59e16b39daffc44df904ce031
|
[
"Apache-2.0"
] | 1
|
2019-04-15T13:50:30.000Z
|
2019-04-15T13:50:30.000Z
|
mac/redRMacUpdater.py
|
PiRSquared17/r-orange
|
6bc383f1db3c10c59e16b39daffc44df904ce031
|
[
"Apache-2.0"
] | null | null | null |
mac/redRMacUpdater.py
|
PiRSquared17/r-orange
|
6bc383f1db3c10c59e16b39daffc44df904ce031
|
[
"Apache-2.0"
] | 1
|
2016-01-21T23:00:21.000Z
|
2016-01-21T23:00:21.000Z
|
import tarfile, sys,os
from PyQt4.QtCore import *
from PyQt4.QtGui import *
app = QApplication(sys.argv)
try:
zfile = tarfile.open(sys.argv[1], "r:gz" )
zfile.extractall(sys.argv[2])
zfile.close()
mb = QMessageBox('Red-R Updated', "Red-R has been updated'",
QMessageBox.Information, QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton, QMessageBox.NoButton)
except:
mb = QMessageBox('Red-R Updated', "There was an Error in updating Red-R.\n\n%s" % sys.exc_info()[0],
QMessageBox.Information, QMessageBox.Ok | QMessageBox.Default,
QMessageBox.NoButton, QMessageBox.NoButton)
app.setActiveWindow(mb)
mb.setFocus()
mb.show()
app.exit(0)
#mb.exec_()
sys.exit(app.exec_())
os.remove(sys.argv[1])
| 30
| 105
| 0.646914
| 104
| 810
| 5.009615
| 0.461538
| 0.053743
| 0.03071
| 0.065259
| 0.441459
| 0.349328
| 0.349328
| 0.349328
| 0.349328
| 0.349328
| 0
| 0.011024
| 0.216049
| 810
| 27
| 106
| 30
| 0.809449
| 0.012346
| 0
| 0.190476
| 0
| 0
| 0.12
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d4be9a3c0385e4ebdfd3712a699e128c38acafc
| 9,346
|
py
|
Python
|
darknet_websocket_demo.py
|
wutianze/darknet-superb-service
|
fdee5a932c8a3898701c1e302e4642fbff853630
|
[
"MIT"
] | null | null | null |
darknet_websocket_demo.py
|
wutianze/darknet-superb-service
|
fdee5a932c8a3898701c1e302e4642fbff853630
|
[
"MIT"
] | null | null | null |
darknet_websocket_demo.py
|
wutianze/darknet-superb-service
|
fdee5a932c8a3898701c1e302e4642fbff853630
|
[
"MIT"
] | null | null | null |
from ctypes import *
#from multiprocessing import Process, Queue
import queue
import time
from threading import Lock,Thread
from fastapi import FastAPI
from fastapi import Request
from fastapi import WebSocket, WebSocketDisconnect
import uvicorn
#from yolo_service import *
import socket
import random
from typing import List
import darknet
import cv2
import time
import io
import struct
import os
import numpy as np
import base64
import json
from jtracer.tracing import init_tracer
import pynng
from PIL import Image
from opentracing.propagation import Format
def convert2relative(bbox,darknet_height,darknet_width):
"""
YOLO format use relative coordinates for annotation
"""
x, y, w, h = bbox
_height = darknet_height
_width = darknet_width
return x/_width, y/_height, w/_width, h/_height
def convert2original(image, bbox,darknet_height,darknet_width):
x, y, w, h = convert2relative(bbox,darknet_height,darknet_width)
image_h, image_w, __ = image.shape
orig_x = int(x * image_w)
orig_y = int(y * image_h)
orig_width = int(w * image_w)
orig_height = int(h * image_h)
bbox_converted = (orig_x, orig_y, orig_width, orig_height)
return bbox_converted
class SuperbFrame:
def __init__(self,darknet_height,darknet_width):
self.image = None
self.results = None
self.darknet_image = darknet.make_image(darknet_width,darknet_height,3)
self.recv_timestamp = 0
self.send_timestamp = 0
self.inference_time = 0
self.final_image = None
self.bytes = None
self.span = None
def port_is_used(port,ip="0.0.0.0"):
s = socket.socket(socket.AF_INET,socket.SOCK_STREAM)
try:
s.connect((ip,port))
s.shutdown(2)
return True
except Exception as e:
return False
app = FastAPI()
class ConnectionManager:
def __init__(self):
# 存放激活的ws连接对象
self.active_connections: List[WebSocket] = []
self.ports = set()
self.port_lock = Lock()
async def connect(self, ws: WebSocket):
# 等待连接
await ws.accept()
# 存储ws连接对象
self.active_connections.append(ws)
def disconnect(self, ws: WebSocket):
# 关闭时 移除ws对象
self.active_connections.remove(ws)
manager = ConnectionManager()
@app.get("/get_port")
def get_port(request:Request):
while True:
manager.port_lock.acquire()
port_tmp = random.randint(int(os.getenv("SUPB_MIN_PORT")),int(os.getenv("SUPB_MAX_PORT")))
if port_tmp in manager.ports or port_is_used(port_tmp):
manager.port_lock.release()
continue
else:
manager.ports.add(port_tmp)
manager.port_lock.release()
return port_tmp # port_tmp is the key for a client
def parse_data(data,tracer):
head_length, msg_length = struct.unpack("ii", data[0:8])
head_length, msg_length, msg_head, msg = struct.unpack("ii"+ str(head_length) + "s" + str(msg_length) + "s", data)
if head_length > 2:
span_dict = json.loads(msg_head)
span_ctx = tracer.extract(Format.TEXT_MAP, span_dict)
return span_ctx, msg
else:
return None, msg
def send_index(send_queue, sock,keep_alive):
while keep_alive:
try:
span_reply = send_queue.get(block=False,timeout=20)
sock.send(span_reply)
except pynng.Timeout:
print("sock.send timeout")
except:
pass # no msg to send
def send_then_recv(input_address,send_queue,input_queue,tracer,darknet_width,darknet_height,sock,keep_alive):
#sock = pynng.Pair1(recv_timeout=100,send_timeout=100)
#sock.listen(input_address)
while keep_alive:
#try:
# span_reply = send_queue.get(block=False,timeout=20)
# sock.send(span_reply)
#except pynng.Timeout:
# print("sock.send timeout")
#except:
# pass # no msg to send
try:
msg = sock.recv()
except pynng.Timeout:
continue
recv_time = time.time()
newFrame = SuperbFrame(darknet_height,darknet_width)
newFrame.recv_timestamp = int(recv_time*1000.0) # in ms
# msg handling
span_ctx, msg_content = parse_data(msg,tracer)
if span_ctx is not None:
newFrame.span = tracer.start_span('image_procss',child_of=span_ctx)
header = msg_content[0:24]
hh,ww,cc,tt = struct.unpack('iiid',header)
newFrame.send_timestamp = int(tt*1000.0)
hh,ww,cc,tt,ss = struct.unpack('iiid'+str(hh*ww*cc)+'s',msg_content)
newFrame.image = cv2.cvtColor((np.frombuffer(ss,dtype=np.uint8)).reshape(hh,ww,cc), cv2.COLOR_BGR2RGB)
darknet.copy_image_from_bytes(newFrame.darknet_image,cv2.resize(newFrame.image,(darknet_width,darknet_height),interpolation=cv2.INTER_LINEAR).tobytes())
#if span_ctx is not None:
# newFrame.span.finish()
try:
input_queue.put(newFrame,block=False,timeout=100)
except:
print("input_queue is full, discard current msg")
continue
def keep_inference(send_queue,input_queue,result_queue,network,class_names,keep_alive):
while keep_alive:
try:
#print("get newFrame")
newFrame = input_queue.get(block=False,timeout=100)
except:
#print("inference get fail")
continue
prev_time = time.time()
newFrame.results = darknet.detect_image(network, class_names, newFrame.darknet_image, thresh=0.2)
newFrame.inference_time = int((time.time()-prev_time)*1000.0) # s -> ms
darknet.free_image(newFrame.darknet_image)
if newFrame.span is not None:
index = newFrame.span.get_baggage_item('index')
newFrame.span.finish()
try:
send_queue.put(index.encode())
#sock.send(index.encode())
except:
print("send_queue is full, discard current msg")
try:
result_queue.put(newFrame,block=False,timeout=10)
except:
print("result_queue is full, discard current msg")
continue
def generate_output(result_queue,need_bytes,keep_alive,class_colors,darknet_height,darknet_width,resizew=960,resizeh=480):
while keep_alive:
try:
newFrame = result_queue.get(block=False,timeout=30)
except:
continue
detections_adjusted = []
if newFrame is not None:
for label, confidence, bbox in newFrame.results:
bbox_adjusted = convert2original(newFrame.image, bbox,darknet_height,darknet_width)
detections_adjusted.append((str(label), confidence, bbox_adjusted))
image = darknet.draw_boxes(detections_adjusted, newFrame.image, class_colors)
cv2.cvtColor(image,cv2.COLOR_BGR2RGB)
newFrame.final_image = image
if need_bytes:
img = Image.fromarray(image).resize((resizew,resizeh))
img_byte_arr = io.BytesIO()
img.save(img_byte_arr, format='PNG')
img_byte_arr.seek(0)
newFrame.bytes = base64.b64encode(img_byte_arr.read()).decode()
return newFrame
else:
continue
@app.websocket("/ws/{port}")# user is the received port_tmp
async def stream_handler(websocket: WebSocket, port: str):
print("a new websocket connected")
await manager.connect(websocket)
network,class_names,class_colors = darknet.load_network(
"./cfg/yolov4.cfg",
"./cfg/coco.data",
"./yolov4.weights",
batch_size=1
)
darknet_width = darknet.network_width(network)
darknet_height = darknet.network_height(network)
tracer = init_tracer("image-process")
input_queue = queue.Queue(maxsize=5)
result_queue = queue.Queue(maxsize=5)
send_queue = queue.Queue(maxsize=5)
input_address = "tcp://0.0.0.0:"+port
sock = pynng.Pair1(recv_timeout=100,send_timeout=100)
sock.listen(input_address)
keep_alive = True
p0 = Thread(target=send_then_recv,args=(input_address,send_queue,input_queue,tracer,darknet_width,darknet_height,sock,keep_alive))
p1 = Thread(target=keep_inference,args=(send_queue,input_queue,result_queue,network,class_names,keep_alive))
p2 = Thread(target=send_index,args=(send_queue,sock,keep_alive))
p0.start()
p1.start()
p2.start()
try:
while keep_alive:
superbFrame = generate_output(result_queue,True,keep_alive,class_colors,darknet_width,darknet_height)
send1_time = int(time.time()*1000.0)
payload = {"img": "data:image/png;base64,%s"%(superbFrame.bytes),"send0_time":superbFrame.send_timestamp,"recv_time":superbFrame.recv_timestamp,"send1_time":send1_time}
await websocket.send_json(payload)
except WebSocketDisconnect:
keep_alive = False
p0.join()
p1.join()
p2.join()
sock.close()
manager.disconnect(websocket)
manager.ports.discard(port)
if __name__ == "__main__":
uvicorn.run("darknet_websocket_demo:app",host="0.0.0.0",port=int(os.getenv("SUPB_SERVICE_PORT")),log_level="info")
| 35.003745
| 180
| 0.652044
| 1,201
| 9,346
| 4.860117
| 0.227311
| 0.023128
| 0.027411
| 0.029981
| 0.25287
| 0.201302
| 0.136029
| 0.136029
| 0.112387
| 0.112387
| 0
| 0.017161
| 0.24556
| 9,346
| 266
| 181
| 35.135338
| 0.810665
| 0.067516
| 0
| 0.172249
| 0
| 0
| 0.050847
| 0.005765
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057416
| false
| 0.004785
| 0.114833
| 0
| 0.220096
| 0.023923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d4d42f7498f1a4af52daeaede069016fb2ef667
| 2,389
|
py
|
Python
|
tests/unit/test_sherman_morrison.py
|
willwheelera/pyqmc
|
0c8d1f308bbccb1560aa680a5a75e7a4fe7a69fb
|
[
"MIT"
] | 44
|
2019-06-04T13:53:26.000Z
|
2022-03-31T08:36:30.000Z
|
tests/unit/test_sherman_morrison.py
|
willwheelera/pyqmc
|
0c8d1f308bbccb1560aa680a5a75e7a4fe7a69fb
|
[
"MIT"
] | 121
|
2019-05-13T14:05:20.000Z
|
2022-02-16T19:24:37.000Z
|
tests/unit/test_sherman_morrison.py
|
willwheelera/pyqmc
|
0c8d1f308bbccb1560aa680a5a75e7a4fe7a69fb
|
[
"MIT"
] | 35
|
2019-04-26T21:57:50.000Z
|
2022-02-14T07:56:34.000Z
|
import numpy as np
from pyqmc.slater import sherman_morrison_row
from pyqmc.slater import sherman_morrison_ms
def test_sherman_morrison():
ratio_err, inv_err = run_sherman_morrison()
assert ratio_err < 1e-13, f"ratios don't match {ratio_err}"
assert inv_err < 1e-13, f"inverses don't match {inv_err}"
ratio_err, inv_err = run_sherman_morrison(ms=True)
assert ratio_err < 1e-13, f"ratios don't match {ratio_err}"
assert inv_err < 1e-13, f"inverses don't match {inv_err}"
def construct_mat(nconf, n, ndet=None):
u, s, v = np.linalg.svd(np.random.randn(n, n))
if ndet is None:
shape = (nconf, n)
else:
shape = (nconf, ndet, n)
svals = (np.random.rand(*shape) + 1) * np.random.choice([-1, 1], shape)
matrix = np.einsum("ij,...hj,jk->...hik", u, svals, v)
return matrix
def construct_vec(matrix, nconf, n, e, ndet=None):
if ndet is None:
coef = np.random.randn(nconf, n - 1)
else:
coef = np.random.randn(nconf, ndet, n - 1)
not_e = np.arange(n) != e
vec_ = np.einsum("i...j,i...jk->i...k", coef, matrix[..., not_e, :])
proj = (np.random.random(nconf) - 1) * 2
proj += np.sign(proj) * 0.5
vec = vec_ + np.einsum("i...k,i->i...k", matrix[..., e, :], proj)
return vec
def run_sherman_morrison(ms=False):
n = 10
nconf = 4
e = 2
ndet = 8 if ms else None
# construct matrix that isn't near singular
matrix = construct_mat(nconf, n, ndet=ndet)
inv = np.linalg.inv(matrix)
# make sure new matrix isn't near singular
newmatrix = matrix.copy()
vec = construct_vec(matrix, nconf, n, e, ndet=ndet)
newmatrix[..., e, :] = vec
# compute ratios and inverses directly and by update
if ndet is None:
smratio, sminv = sherman_morrison_row(e, inv, vec)
else:
smratio, sminv = sherman_morrison_ms(e, inv, vec)
npratio = np.linalg.det(newmatrix) / np.linalg.det(matrix)
npinv = np.linalg.inv(newmatrix)
ratio_err = np.amax(np.abs(npratio - smratio))
inv_err = np.amax(np.abs(npinv - sminv))
return ratio_err, inv_err
if __name__ == "__main__":
r_err, inv_err = list(zip(*[run_sherman_morrison() for i in range(2000)]))
print(np.amax(r_err))
print(np.amax(inv_err))
counts, bins = np.histogram(np.log10(inv_err), bins=np.arange(-16, 0))
print(np.stack([counts, bins[1:]]))
| 30.628205
| 78
| 0.631226
| 385
| 2,389
| 3.774026
| 0.283117
| 0.045423
| 0.0468
| 0.022023
| 0.326222
| 0.246387
| 0.196834
| 0.11287
| 0.11287
| 0.11287
| 0
| 0.019282
| 0.218501
| 2,389
| 77
| 79
| 31.025974
| 0.758972
| 0.055672
| 0
| 0.181818
| 0
| 0
| 0.079929
| 0
| 0
| 0
| 0
| 0
| 0.072727
| 1
| 0.072727
| false
| 0
| 0.054545
| 0
| 0.181818
| 0.054545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d4df1f93edc3b8bb4e583e03cb8610d1cc0439f
| 1,543
|
py
|
Python
|
script/licel-plotter.py
|
FedeVerstraeten/smn-lidar-controller
|
7850fd48702d5f2e00d07b499812b3b2fb2b7676
|
[
"MIT"
] | null | null | null |
script/licel-plotter.py
|
FedeVerstraeten/smn-lidar-controller
|
7850fd48702d5f2e00d07b499812b3b2fb2b7676
|
[
"MIT"
] | 1
|
2021-10-05T03:53:55.000Z
|
2021-10-05T03:53:55.000Z
|
script/licel-plotter.py
|
FedeVerstraeten/smnar-lidar-controller
|
7850fd48702d5f2e00d07b499812b3b2fb2b7676
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import sys
import socket
import time
import numpy as np
import matplotlib.pyplot as plt
HOST = '10.49.234.234'
PORT = 2055
def command_to_licel(licelcommand):
data=None
with socket.socket(socket.AF_INET, socket.SOCK_STREAM) as s:
s.connect((HOST, PORT))
s.sendall(bytes(licelcommand+'\r\n','utf-8'))
time.sleep(2) # wait TCP adquisition
data = s.recv(8192) # 8192 = 4096 * 2
print("Len:",len(data),"type:",type(data))
return data
if __name__ == '__main__':
# Select TR
command_select='SELECT 0'
rsp=repr(command_to_licel(command_select))
print('Received',rsp)
# Clear memory
command_clear='MCLEAR'
rsp=repr(command_to_licel(command_clear))
print('Received',rsp)
# Start TR
command_start='MSTART'
rsp=repr(command_to_licel(command_start))
print('Received',rsp)
time.sleep(5)
# Stop TR
command_stop='MSTOP'
rsp=repr(command_to_licel(command_stop))
print('Received',rsp)
# Get data
command_data='DATA? 0 4001 LSW A'
rsp=command_to_licel(command_data)
#print('Received',rsp)
# with open('outputlicel', 'w') as f:
# f.write(rsp)
data_output=rsp
# Plot
t = np.arange(0, len(data_output), 1)
data_arr=[]
for data_byte in data_output:
data_arr.append(int(data_byte))
fig, ax = plt.subplots()
ax.plot(t, data_arr)
ax.set(xlabel='time (s)', ylabel='voltage (mV)',title='SMN LICEL')
ax.grid()
fig.savefig("test.png")
plt.show()
| 24.109375
| 70
| 0.644848
| 226
| 1,543
| 4.225664
| 0.460177
| 0.056545
| 0.087958
| 0.109948
| 0.117277
| 0.117277
| 0
| 0
| 0
| 0
| 0
| 0.032125
| 0.213221
| 1,543
| 63
| 71
| 24.492063
| 0.75453
| 0.121841
| 0
| 0.093023
| 0
| 0
| 0.112435
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.116279
| 0
| 0.162791
| 0.116279
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d500786de7e53c7c13f50132e8ecbc760d095db
| 13,860
|
py
|
Python
|
horizon/openstack_dashboard/dashboards/identity/account/tables.py
|
yianjiajia/openstack_horizon
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
[
"Apache-2.0"
] | null | null | null |
horizon/openstack_dashboard/dashboards/identity/account/tables.py
|
yianjiajia/openstack_horizon
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
[
"Apache-2.0"
] | null | null | null |
horizon/openstack_dashboard/dashboards/identity/account/tables.py
|
yianjiajia/openstack_horizon
|
9e36a4c3648ef29d0df6912d990465f51d6124a6
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 Nebula, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import logging
import json
from django.utils.translation import ugettext_lazy as _
from django.utils.translation import ungettext_lazy
from django.conf import settings
from horizon import forms
from horizon import tables
from horizon.utils import filters
from openstack_dashboard import api
from openstack_dashboard import policy
LOG = logging.getLogger(__name__)
POLICY_CHECK = getattr(settings, "POLICY_CHECK_FUNCTION", lambda p, r: True)
class CreateAccount(tables.LinkAction):
name = "create"
verbose_name = _("Create Account")
url = "horizon:identity:account:create"
classes = ("ajax-modal",)
icon = "plus"
policy_rules = (("identity", "identity:create_user"),)
class DeleteAccountAction(tables.DeleteAction):
help_text = _(
"This Operation will delete all configuration and resources(network, images, servers, disks, VPN, firewall, keypair) and !!! Please confirm your operation.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Delete User",
u"Delete Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Deleted User",
u"Deleted Users",
count
)
name = "delete"
policy_rules = (("identity", "identity:update_user"),)
def allowed(self, request, user):
if not api.keystone.keystone_can_edit_user():
return False
self.enabled = True
if not user:
return False
else:
return user.enabled
def delele_billing_account(self, request, obj_id):
client = api.billing.RequestClient(request)
account = client.get_account(obj_id)
if account:
ret = client.api_request('/account/delete/' + account['account_id'],
method='DELETE')
user = json.loads(ret.read())
if user['success'] != 'success':
raise
def delete(self, request, obj_id):
LOG.info('Deleting User "%s".' % obj_id)
try:
api.keystone.user_update_enabled(request, obj_id, False)
user = api.keystone.user_get(request, obj_id)
api.keystone.tenant_update(request, user.default_project_id, enabled=False)
self.delele_billing_account(request, user.default_project_id)
# operation log
config = _('User ID: %s') % obj_id
api.logger.Logger(request).create(resource_type='account', action_name='Deletes User',
resource_name='Account', config=config,
status='Success')
except Exception:
# operation log
config = _('User ID: %s') % obj_id
api.logger.Logger(request).create(resource_type='account', action_name='Deletes User',
resource_name='Account', config=config,
status='Error')
class EnableAccountAction(tables.DeleteAction):
help_text = _(
"This Operation will enable the user and project!!! Please confirm your operation.")
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Enable User",
u"Enable Users",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Enabled User",
u"Enabled Users",
count
)
name = "enable"
policy_rules = (("identity", "identity:update_user"),)
def allowed(self, request, user):
if not api.keystone.keystone_can_edit_user():
return False
self.enabled = True
if not user:
return False
else:
return not user.enabled
def enable_billing_account(self, request, obj_id):
client = api.billing.RequestClient(request)
account = client.get_account(obj_id)
if account:
params = {}
params['account'] = {}
params['account']['status'] = 'normal'
params['account']['frozen_status'] = 'normal'
ret = client.api_request('/account/update/' + account['account_id'],
method='PUT', data=json.dumps(params))
user = json.loads(ret.read())
if user['success'] != 'success':
raise
def action(self, request, obj_id):
LOG.info('Enable User "%s".' % obj_id)
try:
api.keystone.user_update_enabled(request, obj_id, True)
user = api.keystone.user_get(request, obj_id)
api.keystone.tenant_update(request, user.default_project_id, enabled=True)
self.enable_billing_account(request, user.default_project_id)
# operation log
config = _('User ID: %s') % obj_id
api.logger.Logger(request).create(resource_type='account', action_name='Enables User',
resource_name='Account', config=config,
status='Success')
except Exception:
# operation log
config = _('User ID: %s') % obj_id
api.logger.Logger(request).create(resource_type='account', action_name='Enables User',
resource_name='Account', config=config,
status='Error')
class AccountFilterAction(tables.FilterAction):
name = "filter_account"
filter_type = "server"
filter_choices = (('sname', _("Name"), True),
('scompany', _("Company Name"), True),
('enabled', _("Status"), True),)
class EditAccountInfoLink(tables.LinkAction):
name = "edit"
verbose_name = _("Edit")
url = "horizon:identity:account:update_info"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_user"),)
def allowed(self, request, datum=None):
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled
class AdjustQuotaLink(tables.LinkAction):
name = "update_quota"
verbose_name = _("Modify Quotas")
url = "horizon:identity:account:update_quota"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_project"),)
def allowed(self, request, datum=None):
# only display when the modified user have this region
region_choices = []
regions = api.keystone.list_regions_for_user(request, datum.id)
for region in regions:
region_choices.append(region['id'])
if request.user.services_region not in region_choices:
return False
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled
class RoleChangeLink(tables.BatchAction):
name = "adjust_quota"
classes = ('btn-danger',)
icon = "pencil"
help_text = _("Please do it carefully!")
policy_rules = (("identity", "identity:update_user"),)
@staticmethod
def action_present(count):
return ungettext_lazy(
u"Role Change",
u"Role Changes",
count
)
@staticmethod
def action_past(count):
return ungettext_lazy(
u"Role Changed",
u"Role Changed",
count
)
def allowed(self, request, datum=None):
policy = (("identity", "identity:create_grant"),
("identity", "identity:revoke_grant"),)
# only normal user can change their role
# only support and admin can do this action
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
if user.enabled:
default_role = api.keystone.get_default_role(request)
if user.default_role_id != default_role.id:
return False
return POLICY_CHECK(policy, request)
else:
return False
def action(self, request, obj_id):
try:
user = api.keystone.user_get(request, obj_id)
default_user_role = api.keystone.get_default_role(request)
default_project_admin_role = api.keystone.get_default_project_admin_role(request)
api.keystone.remove_tenant_user_role(request, project=user.default_project_id,
user=user.id, role=default_user_role.id)
api.keystone.user_update(request, obj_id, **{'default_role_id': default_project_admin_role.id})
api.keystone.add_tenant_user_role(request, project=user.default_project_id,
user=user.id, role=default_project_admin_role.id)
# operation log
config = _('Old role %s, new role %s') % (default_user_role.name, default_project_admin_role.name)
api.logger.Logger(request).create(resource_type='account', action_name='Role_Change',
resource_name='Account', config=config,
status='Success')
except Exception:
# operation log
config = _('Old role %s, new role %s') % (default_user_role.name, default_project_admin_role.name)
api.logger.Logger(request).create(resource_type='account', action_name='Role_Change',
resource_name='Account', config=config,
status='Error')
class ChangePasswordLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "change_password"
verbose_name = _("Change Password")
url = "horizon:identity:account:change_password"
classes = ("ajax-modal",)
icon = "key"
policy_rules = (("identity", "identity:change_password"),)
policy_target_attrs = (("user_id", "id"),)
def allowed(self, request, datum=None):
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled and api.keystone.keystone_can_edit_user()
class UpdateRegionsLink(policy.PolicyTargetMixin, tables.LinkAction):
name = "regions"
verbose_name = _("Update Regions")
url = "horizon:identity:account:regions"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:update_user_regions"),)
def allowed(self, request, datum=None):
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled
class UpdateMembersLink(tables.LinkAction):
name = "users"
verbose_name = _("Manage Members")
url = "horizon:identity:account:update_member"
classes = ("ajax-modal",)
icon = "pencil"
policy_rules = (("identity", "identity:list_users"),
("identity", "identity:list_grants"))
def allowed(self, request, datum=None):
if not datum:
return False
else:
user = api.keystone.user_get(request, datum)
return user.enabled
STATUS_DISPLAY_CHOICES = (
(False, _("Delete")),
(True, _("Normal")),
)
class AccountsTable(tables.DataTable):
id = tables.Column('id', hidden=True)
# project_id = tables.Column('project_id', hidden=True)
name = tables.Column('name',
verbose_name=_('User Name'),
form_field=forms.CharField(),
link='horizon:identity:account:detail'
)
company = tables.Column('company',
verbose_name=_('Company Name'),
form_field=forms.CharField())
# email = tables.Column('email', verbose_name=_('Email'),
# form_field=forms.CharField(required=False),
# filters=(lambda v: defaultfilters
# .default_if_none(v, ""),
# defaultfilters.escape,
# defaultfilters.urlize)
# )
enabled = tables.Column('enabled', verbose_name=_('Status'),
# status=True,
# status_choices=STATUS_CHOICES,
display_choices=STATUS_DISPLAY_CHOICES,
empty_value="False")
created_at = tables.Column('created_at',
verbose_name=_('Created_at'),
filters=[filters.parse_isotime])
class Meta(object):
name = "accounts"
verbose_name = _("AccountList")
table_actions = (AccountFilterAction, CreateAccount)
row_actions = (EditAccountInfoLink, AdjustQuotaLink, UpdateRegionsLink, UpdateMembersLink,
RoleChangeLink, ChangePasswordLink, DeleteAccountAction, EnableAccountAction)
| 37.258065
| 165
| 0.581818
| 1,427
| 13,860
| 5.466713
| 0.189909
| 0.032432
| 0.023074
| 0.03115
| 0.530573
| 0.477118
| 0.449942
| 0.429689
| 0.424048
| 0.407896
| 0
| 0.000845
| 0.3171
| 13,860
| 371
| 166
| 37.358491
| 0.823349
| 0.090837
| 0
| 0.501754
| 0
| 0.003509
| 0.153008
| 0.030474
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0.021053
| 0.035088
| 0.021053
| 0.435088
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d5291b6a1ce7e03aab2c5b10e8c178dc0212bb3
| 2,278
|
py
|
Python
|
3Sum.py
|
Muthu2093/Algorithms-practice
|
999434103a9098a4361104fd39cba5913860fa9d
|
[
"MIT"
] | null | null | null |
3Sum.py
|
Muthu2093/Algorithms-practice
|
999434103a9098a4361104fd39cba5913860fa9d
|
[
"MIT"
] | null | null | null |
3Sum.py
|
Muthu2093/Algorithms-practice
|
999434103a9098a4361104fd39cba5913860fa9d
|
[
"MIT"
] | null | null | null |
## Given an array nums of n integers, are there elements a, b, c in nums such that a + b + c = 0? Find all unique triplets in the array which gives the sum of zero.
## Note:
## The solution set must not contain duplicate triplets.
## Example:
## Given array nums = [-1, 0, 1, 2, -1, -4],
## A solution set is:
## [
## [-1, 0, 1],
## [-1, -1, 2]
## ]
class Solution:
def quickSort(self, nums, l, r):
if(l<r):
pi = self.partition(nums, l, r)
self.quickSort(nums, l, pi-1)
self.quickSort(nums, pi+1, r)
def partition(self, nums, low, high):
pivot = nums[high]
j=low-1
for i in range(low, high):
if nums[i] <= pivot:
j += 1
nums[i],nums[j] = nums[j],nums[i]
nums[high],nums[j+1] = nums[j+1],nums[high]
return (j+1)
def threeSum(self, nums):
"""
:type nums: List[int]
:rtype: List[List[int]]
"""
if len(nums) <= 2:
return []
if len(nums) == 3:
if sum(nums) == 0:
lis = []
lis.append(nums)
return lis
#self.quickSort(nums, 0 , len(nums)-1)
nums.sort()
lis =[]
for m in range (1,len(nums)-1):
l=0
r=len(nums)-1
if (m+2 <= r and nums[m] == nums[m+2]):
k=m+3
while(k<=r and nums[m] != nums[k]):
k = k + 1
if k > r:
break
m=k-2
l=k-3
while (l<m and m<r):
if (nums[l] + nums[m] + nums[r] == 0):
lis.append((nums[l],nums[m],nums[r]))
while(l<r and nums[l] == nums[l+1]):
l = l+1
while(l<r and nums[r] == nums[r-1]):
r = r-1
if (nums[l] + nums[m] + nums[r] < 0):
l = l + 1
else:
r = r - 1
lis = list(set(lis))
return lis
| 27.780488
| 164
| 0.368306
| 293
| 2,278
| 2.863481
| 0.242321
| 0.047676
| 0.053635
| 0.035757
| 0.125149
| 0.060787
| 0.042908
| 0.042908
| 0
| 0
| 0
| 0.037133
| 0.491659
| 2,278
| 81
| 165
| 28.123457
| 0.687392
| 0.178666
| 0
| 0.163265
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0
| 0
| 0.163265
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d5338ad6760bdfbd08440494b1ea9d0eab1dc53
| 1,809
|
py
|
Python
|
developers_chamber/scripts/gitlab.py
|
dstlmrk/developers-chamber
|
93f928048f57c049f1c85446d18078b73376462a
|
[
"MIT"
] | 8
|
2019-08-23T15:46:30.000Z
|
2021-03-23T20:12:21.000Z
|
developers_chamber/scripts/gitlab.py
|
dstlmrk/developers-chamber
|
93f928048f57c049f1c85446d18078b73376462a
|
[
"MIT"
] | 14
|
2019-09-17T20:24:18.000Z
|
2021-05-18T21:10:12.000Z
|
developers_chamber/scripts/gitlab.py
|
dstlmrk/developers-chamber
|
93f928048f57c049f1c85446d18078b73376462a
|
[
"MIT"
] | 6
|
2019-08-23T15:46:21.000Z
|
2022-02-18T11:01:18.000Z
|
import os
import click
from developers_chamber.git_utils import get_current_branch_name
from developers_chamber.gitlab_utils import \
create_merge_request as create_merge_request_func
from developers_chamber.scripts import cli
DEFAULT_API_URL = os.environ.get('GITLAB_API_URL', 'https://gitlab.com/api/v4')
DEFAULT_PROJECT = os.environ.get('GITLAB_PROJECT')
DEFAULT_TARGET_BRANCH = os.environ.get('GITLAB_TARGET_BRANCH', 'next')
DEFAULT_TOKEN = os.environ.get('GITLAB_TOKEN')
@cli.group()
def gitlab():
"""GitLab commands"""
@gitlab.command()
@click.option('--api-url', help='GitLab instance API URL (defaults to gitlab.com)', type=str, required=True,
default=DEFAULT_API_URL)
@click.option('--token', help='token (can be set as env variable GITLAB_TOKEN)', type=str, required=True,
default=DEFAULT_TOKEN)
@click.option('--source-branch', help='source Git branch', type=str)
@click.option('--target-branch', help='target Git branch (defaults to env variable GITLAB_TARGET_BRANCH)', type=str,
default=DEFAULT_TARGET_BRANCH)
@click.option('--project', help='GitLab project name (defaults to env variable GITLAB_PROJECT)', type=str,
required=True, default=DEFAULT_PROJECT)
def create_release_merge_request(api_url, token, source_branch, target_branch, project):
"""Create a new merge request in GitLab project after release"""
if not source_branch:
source_branch = get_current_branch_name()
mr_url = create_merge_request_func(
api_url=api_url,
token=token,
title=f'Merge branch "{source_branch}"',
description='',
source_branch=source_branch,
target_branch=target_branch,
project=project,
)
click.echo(f'Merge request was successfully created: {mr_url}')
| 38.489362
| 116
| 0.726368
| 244
| 1,809
| 5.147541
| 0.266393
| 0.038217
| 0.038217
| 0.057325
| 0.121815
| 0.078822
| 0
| 0
| 0
| 0
| 0
| 0.000657
| 0.159204
| 1,809
| 46
| 117
| 39.326087
| 0.825115
| 0.040907
| 0
| 0
| 0
| 0
| 0.266821
| 0.012181
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.142857
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d5577a30127caeb2ef24f4e9b841abc050103d0
| 15,790
|
py
|
Python
|
tests_pytest/state_machines/autoinstall/test_autoinstall_smbase.py
|
tessia-project/tessia
|
b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540
|
[
"Apache-2.0"
] | 5
|
2020-06-04T10:20:33.000Z
|
2020-10-26T15:09:19.000Z
|
tests_pytest/state_machines/autoinstall/test_autoinstall_smbase.py
|
tessia-project/tessia
|
b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540
|
[
"Apache-2.0"
] | null | null | null |
tests_pytest/state_machines/autoinstall/test_autoinstall_smbase.py
|
tessia-project/tessia
|
b9ded8dc7f0b9a7a0ea00d95b5ccc4af4d2e7540
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2021 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Test base autoinstall machine
A smallest implementation on SmBase is used to test common features
"""
# pylint: disable=invalid-name # we have really long test names
# pylint: disable=redefined-outer-name # use of fixtures
# pylint: disable=unused-argument # use of fixtures for their side effects
#
# IMPORTS
#
from pathlib import Path
from tessia.baselib.hypervisors.hmc.volume_descriptor import FcpVolumeDescriptor
from tessia.server.config import Config
from tessia.server.state_machines.autoinstall import plat_lpar, plat_zvm, plat_kvm
from tessia.server.state_machines.autoinstall import plat_base, sm_base
from tessia.server.state_machines.autoinstall.model import AutoinstallMachineModel
from tessia.server.state_machines.autoinstall.sm_base import SmBase
from tests_pytest.decorators import tracked
from tests_pytest.state_machines.ssh_stub import SshClient
from tests_pytest.state_machines.null_hypervisor import NullHypervisor
import pytest
import yaml
#
# CONSTANTS AND DEFINITIONS
#
CREDS = {'user': 'unit', 'password': 'test'}
#
# CODE
#
class NullMachine(SmBase):
"""
Concrete SmBase implementation
This implementation helps trigger all common paths without having
any distro specifics (i.e. termination conditions or log lines)
"""
def __init__(self, model: AutoinstallMachineModel,
platform: plat_base.PlatBase, *args, **kwargs):
"""
Initialize SmBase
"""
super().__init__(model, platform, *args, **kwargs)
@property
@classmethod
def DISTRO_TYPE(cls): # pylint: disable=invalid-name
"""
Return the type of linux distribution supported.
"""
return "null"
# DISTRO_TYPE
def wait_install(self):
"""
Consider operating system installed and return immediately
"""
# wait_install()
class NullPostInstallChecker:
"""
PostInstallChecked that checks that it has been called
"""
@tracked
def verify(self):
"""
Public method to verify installed system
"""
return []
class TestModelUpdate:
"""
Test model updates during autoinstallation
"""
class UpdatingHypervisor(NullHypervisor):
"""
Hypervisor that returns some valid data about storage volumes
"""
@tracked
def query_dpm_storage_devices(self, guest_name):
"""Query storage devices on DPM"""
return [
FcpVolumeDescriptor(
**{'uri': '/api/storage-volumes/1', 'attachment': 'fcp',
'is_fulfilled': True, 'size': 19.07,
'uuid': '6005076309FFD435000000000000CD0F',
'paths': [{'device_nr': 'FC00',
'wwpn': '5005076309049435',
'lun': 'CD0F0000'}]
})]
@pytest.fixture
def scsi_volume_without_paths(self):
"""
A single-partition SCSI volume
"""
result = AutoinstallMachineModel.ZfcpVolume(
'cd0f0000', 20_000_000, multipath=True,
wwid='36005076309ffd435000000000000cd0f')
result.set_partitions('msdos', [{
'mount_point': '/data',
'size': 18_000,
'filesystem': 'ext4',
'part_type': 'primary',
'mount_opts': None,
}])
yield result
@pytest.fixture(autouse=True)
def mock_hypervisors(self, monkeypatch):
"""
Use hypevisor stub instead of real sessions
"""
monkeypatch.setattr(plat_lpar, 'HypervisorHmc',
TestModelUpdate.UpdatingHypervisor)
def test_model_update_adds_fcp_paths(
self, lpar_scsi_system, default_os_tuple, tmpdir,
scsi_volume_without_paths):
"""
Attempt to install "nothing" on an LPAR on SCSI disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
lpar_scsi_system, CREDS)
model.system_profile.add_volume(scsi_volume_without_paths)
checker = NullPostInstallChecker()
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert len(model.system_profile.volumes) == 2
assert model.system_profile.volumes[1].paths
@pytest.fixture(autouse=True)
def mock_config(monkeypatch, tmp_path):
"""
Set default configuration
"""
def get_config():
"""
Configuration for use in tests
"""
# use a temporary path for storing templates
return {
'auto_install': {
'url': 'http://server_1:5000/',
'dir': str(tmp_path),
'live_img_passwd': 'liveimage'
}
}
monkeypatch.setattr(Config, 'get_config', get_config)
@pytest.fixture(autouse=True)
def mock_hypervisors(monkeypatch):
"""
Use hypevisor stub instead of real sessions
"""
monkeypatch.setattr(plat_lpar, 'HypervisorHmc', NullHypervisor)
monkeypatch.setattr(plat_zvm, 'HypervisorZvm', NullHypervisor)
monkeypatch.setattr(plat_kvm, 'HypervisorKvm', NullHypervisor)
@pytest.fixture(autouse=True)
def mock_ssh(monkeypatch):
"""
Use ssh stub instead of real sessions
"""
monkeypatch.setattr(plat_base, 'SshClient', SshClient)
monkeypatch.setattr(plat_kvm, 'SshClient', SshClient)
monkeypatch.setattr(sm_base, 'SshClient', SshClient)
def test_boot_and_postinstall_check_on_lpar_dasd(
lpar_dasd_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on an LPAR on DASD disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
lpar_dasd_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == lpar_dasd_system.hypervisor.boot_options['partition-name']
assert cpus == lpar_dasd_system.cpus
assert mem == lpar_dasd_system.memory
# installation device does not show up in HmcHypervisor boot,
# it is only used later during installation
assert attrs['boot_params']['boot_method'] == 'dasd'
assert attrs['boot_params']['devicenr'] == \
lpar_dasd_system.hypervisor.boot_options['boot-device']
def test_boot_and_postinstall_check_on_lpar_scsi(
lpar_scsi_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on an LPAR on SCSI disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
lpar_scsi_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == lpar_scsi_system.hypervisor.boot_options['partition-name']
assert cpus == lpar_scsi_system.cpus
assert mem == lpar_scsi_system.memory
# installation device does not show up in HmcHypervisor boot,
# it is only used later during installation
assert attrs['boot_params']['boot_method'] == 'dasd'
assert attrs['boot_params']['devicenr'] == \
lpar_scsi_system.hypervisor.boot_options['boot-device']
def test_boot_and_postinstall_check_on_vm_dasd(
vm_dasd_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on a VM on DASD disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
vm_dasd_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_zvm.PlatZvm.create_hypervisor(model)
platform = plat_zvm.PlatZvm(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == vm_dasd_system.system_name
assert cpus == vm_dasd_system.cpus
assert mem == vm_dasd_system.memory
assert vm_dasd_system.volumes[0].device_id == \
attrs['storage_volumes'][0]['devno']
def test_boot_and_postinstall_check_on_vm_scsi(
vm_scsi_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on a VM on SCSI disk
Verify that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
vm_scsi_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_zvm.PlatZvm.create_hypervisor(model)
platform = plat_zvm.PlatZvm(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == vm_scsi_system.system_name
assert cpus == vm_scsi_system.cpus
assert mem == vm_scsi_system.memory
assert vm_scsi_system.volumes[0].lun == \
attrs['storage_volumes'][0]['lun']
def testboot_and_postinstall_check_on_kvm_scsi(
kvm_scsi_system, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on a KVM on SCSI disk
Verify correct device paths
and that hypervisor is called with correct parameters
and post-install checker is run
"""
model = AutoinstallMachineModel(*default_os_tuple,
kvm_scsi_system, CREDS)
checker = NullPostInstallChecker()
hyp = plat_kvm.PlatKvm.create_hypervisor(model)
platform = plat_kvm.PlatKvm(model, hyp)
# autoinstall machines use their own working directory
# and have to be initialized in a temporary environment
with tmpdir.as_cwd():
smbase = NullMachine(model, platform, checker)
smbase.start()
assert checker.verify.called_once
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == kvm_scsi_system.system_name
assert cpus == kvm_scsi_system.cpus
assert mem == kvm_scsi_system.memory
assert kvm_scsi_system.volumes[0].lun == \
attrs['storage_volumes'][0]['volume_id']
for volume in model.system_profile.volumes:
assert '/dev/disk/by-path/ccw' in volume.device_path
def test_network_boot_on_lpar_scsi(
scsi_volume, osa_iface, default_os_tuple, tmpdir):
"""
Attempt to install "nothing" on an LPAR on SCSI disk
using network boot
Verify that hypervisor is called with correct parameters
"""
ins_file = 'user@password:inst.local/some-os/boot.ins'
hmc_hypervisor = AutoinstallMachineModel.HmcHypervisor(
'hmc', 'hmc.local',
{'user': '', 'password': ''},
{
'partition-name': 'LP10',
'boot-method': 'network',
'boot-uri': 'ftp://' + ins_file,
})
system = AutoinstallMachineModel.SystemProfile(
'lp10', 'default',
hypervisor=hmc_hypervisor,
hostname='lp10.local',
cpus=2, memory=8192,
volumes=[scsi_volume],
interfaces=[(osa_iface, True)]
)
model = AutoinstallMachineModel(*default_os_tuple, system, CREDS)
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
with tmpdir.as_cwd():
smbase = NullMachine(model, platform)
smbase.start()
sys, cpus, mem, attrs, *_ = hyp.start.calls[0]
assert sys == hmc_hypervisor.boot_options['partition-name']
assert cpus == system.cpus
assert mem == system.memory
assert attrs['boot_params']['boot_method'] == 'ftp'
assert attrs['boot_params']['insfile'] == ins_file
def test_template_lpar_dasd(lpar_dasd_system, default_os_tuple, tmpdir):
"""
Test major template parameters
"""
*os_tuple, _, _ = default_os_tuple
package_repo = AutoinstallMachineModel.PackageRepository(
'aux', 'http://example.com/repo', 'package repo')
model = AutoinstallMachineModel(
*os_tuple, [], [package_repo], lpar_dasd_system, CREDS)
hyp = plat_lpar.PlatLpar.create_hypervisor(model)
platform = plat_lpar.PlatLpar(model, hyp)
with tmpdir.as_cwd():
smbase = NullMachine(model, platform)
autofile_path = (Path.cwd() / 'lp10-default')
smbase.start()
autofile = yaml.safe_load(autofile_path.read_text())
assert autofile['system']['type'] == 'LPAR'
assert autofile['system']['hostname'] == 'lp10.local'
assert autofile['gw_iface']['type'] == 'OSA'
assert autofile['gw_iface']['osname'] == 'enccw0b01'
assert autofile['gw_iface']['search_list'] == ['example.com', 'local']
assert autofile['ifaces'][0]['osname'] == 'enccw0b01'
assert autofile['volumes'][0]['type'] == 'DASD'
assert autofile['volumes'][0]['partitions'] == [
{'fs': 'ext4', 'mp': '/', 'size': '18000M'}
]
assert autofile['repos'][0]['name'] == 'os-repo'
assert autofile['repos'][1]['name'] == 'aux'
def test_template_kvm_scsi(kvm_scsi_system, default_os_tuple, tmpdir):
"""
Test major template parameters
"""
model = AutoinstallMachineModel(*default_os_tuple,
kvm_scsi_system, CREDS)
hyp = plat_kvm.PlatKvm.create_hypervisor(model)
platform = plat_kvm.PlatKvm(model, hyp)
with tmpdir.as_cwd():
smbase = NullMachine(model, platform)
autofile_path = (Path.cwd() / 'kvm54-default')
smbase.start()
autofile = yaml.safe_load(autofile_path.read_text())
assert autofile['system']['type'] == 'KVM'
assert autofile['system']['hostname'] == 'kvm54.local'
assert autofile['gw_iface']['type'] == 'MACVTAP'
assert autofile['gw_iface']['osname'] == 'eth0'
assert autofile['ifaces'][0]['is_gateway']
| 34.326087
| 82
| 0.664155
| 1,836
| 15,790
| 5.528322
| 0.198802
| 0.021675
| 0.024828
| 0.017734
| 0.573695
| 0.546207
| 0.512808
| 0.492414
| 0.46601
| 0.424039
| 0
| 0.013665
| 0.235275
| 15,790
| 459
| 83
| 34.400871
| 0.826915
| 0.228689
| 0
| 0.338645
| 0
| 0
| 0.103389
| 0.012816
| 0
| 0
| 0
| 0
| 0.199203
| 1
| 0.079681
| false
| 0.015936
| 0.047809
| 0
| 0.159363
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d5852ea5b1463bc9be5da885619fc756c5bd1fc
| 4,329
|
py
|
Python
|
personal/Ervin/Word2Vec_recommender.py
|
edervishaj/spotify-recsys-challenge
|
4077201ac7e4ed9da433bd10a92c183614182437
|
[
"Apache-2.0"
] | 3
|
2018-10-12T20:19:57.000Z
|
2019-12-11T01:11:38.000Z
|
personal/Ervin/Word2Vec_recommender.py
|
kiminh/spotify-recsys-challenge
|
5e7844a77ce3c26658400f161d2d74d682f30e69
|
[
"Apache-2.0"
] | null | null | null |
personal/Ervin/Word2Vec_recommender.py
|
kiminh/spotify-recsys-challenge
|
5e7844a77ce3c26658400f161d2d74d682f30e69
|
[
"Apache-2.0"
] | 4
|
2018-10-27T20:30:18.000Z
|
2020-10-14T07:43:27.000Z
|
import time
import numpy as np
import scipy.sparse as sps
from gensim.models import Word2Vec
from tqdm import tqdm
from recommenders.recommender import Recommender
from utils.datareader import Datareader
from utils.evaluator import Evaluator
from utils.post_processing import eurm_to_recommendation_list
from recommenders.similarity.s_plus import dot_product
class W2VRecommender(Recommender):
"""
Requires gensim package: pip install gensim
"""
RECOMMENDER_NAME = "W2VRecommender"
def __init__(self):
super()
def compute_model(self, negative=5, sg=1, size=50, min_count=1, workers=64, iter=1, window=None, verbose=False):
sentences = []
for row in tqdm(range(self.urm.shape[0]), desc='Generating sentences'):
words = self.urm.indices[self.urm.indptr[row]:self.urm.indptr[row+1]]
words = words.astype(np.str)
if len(words) > 0:
sentences.append(words.tolist())
if verbose:
print('[ Building Word2Vec model ]')
start_time = time.time()
if window is None:
window = np.max(self.urm.sum(axis=1).A1)
w2v = Word2Vec(sentences=sentences, sg=sg, size=size, min_count=min_count, workers=workers, iter=iter,
seed=123, negative=negative, window=window)
w2v.init_sims(replace=True)
self.kv = w2v.wv
# if verbose:
# print('[ Building Similarity Matrix ]')
#
# syn0norm = sps.csr_matrix(self.kv.syn0norm)
# self.model = dot_product(syn0norm, syn0norm.T, k=850)
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
def compute_rating(self, verbose=False, small=False, mode="offline", top_k=750):
if small:
self.urm = sps.csr_matrix(self.urm)[self.pid]
self.eurm = sps.lil_matrix(self.urm.shape, dtype=np.float32)
if verbose:
print('[ Computing ratings ]')
start_time = time.time()
for row in tqdm(range(1000, self.urm.shape[0]), desc='Calculating similarities'):
test_words = self.urm.indices[self.urm.indptr[row]:self.urm.indptr[row+1]]
test_words = test_words.astype(np.str)
most_sim = self.kv.most_similar(positive=test_words, topn=top_k)
tracks = [tup[0] for tup in most_sim]
sim = [tup[1] for tup in most_sim]
self.eurm[row, tracks] = sim
self.eurm = self.eurm.tocsr()
self.eurm.eliminate_zeros()
if verbose:
print("time: " + str(int(time.time() - start_time) / 60))
# def compute_rating2(self, verbose=False, small=False, mode="offline", remove_seed=True):
# if small:
# self.urm = sps.csr_matrix(self.urm)[self.pid]
# self.eurm = sps.lil_matrix(self.urm.shape, dtype=np.float32)
#
# if verbose:
# print('[ Computing ratings ]')
# start_time = time.time()
#
# for row in tqdm(range(1000, self.urm.shape[0]), desc='Calculating similarities'):
# test_words = self.urm.indices[self.urm.indptr[row]:self.urm.indptr[row+1]]
# test_words = test_words.astype(np.str)
# for w in test_words:
# most_sim = self.kv.most_similar(positive=w, topn=500)
# tracks = [tup[0] for tup in most_sim]
# sim = [tup[1] for tup in most_sim]
# self.eurm[row, tracks] = self.eurm[row, tracks].toarray() + sim
#
# print(self.eurm.shape)
# self.eurm = self.eurm.tocsr()
# self.eurm.eliminate_zeros()
#
# if verbose:
# print("time: " + str(int(time.time() - start_time) / 60))
if __name__ == '__main__':
dr = Datareader(only_load=True, mode='offline', test_num='1', verbose=False)
pid = dr.get_test_playlists().transpose()[0]
urm = dr.get_urm()
urm.data = np.ones(urm.data.shape[0])
ev = Evaluator(datareader=dr)
model = W2VRecommender()
model.fit(urm, pid)
model.compute_model(verbose=True, size=50)
model.compute_rating(verbose=True, small=True, top_k=750)
ev.evaluate(recommendation_list=eurm_to_recommendation_list(model.eurm, remove_seed=True, datareader=dr),
name="W2V", old_mode=False)
| 37.973684
| 116
| 0.613075
| 570
| 4,329
| 4.52807
| 0.254386
| 0.05153
| 0.03797
| 0.037195
| 0.443627
| 0.430453
| 0.430453
| 0.376986
| 0.376986
| 0.376986
| 0
| 0.023053
| 0.258489
| 4,329
| 114
| 117
| 37.973684
| 0.780997
| 0.270501
| 0
| 0.129032
| 0
| 0
| 0.046228
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.048387
| false
| 0
| 0.16129
| 0
| 0.241935
| 0.064516
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d5933b202fa0260d94c68bc7edbd14a32abb844
| 2,930
|
py
|
Python
|
visualize.py
|
jcamstan3370/MachineLearningPerovskites
|
d7bc433bac349bf53473dc6d636954cae996b8d2
|
[
"MIT"
] | 6
|
2020-05-09T17:18:00.000Z
|
2021-09-22T09:37:40.000Z
|
visualize.py
|
jstanai/ml_perovskites
|
d7bc433bac349bf53473dc6d636954cae996b8d2
|
[
"MIT"
] | null | null | null |
visualize.py
|
jstanai/ml_perovskites
|
d7bc433bac349bf53473dc6d636954cae996b8d2
|
[
"MIT"
] | 1
|
2021-03-24T04:21:31.000Z
|
2021-03-24T04:21:31.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
@author: Jared
"""
import numpy as np
import pandas as pd
import myConfig
import matplotlib.pyplot as plt
from ast import literal_eval
from plotter import getTrendPlot1
from matplotlib.pyplot import figure
df = pd.read_csv(myConfig.extOutput)
dffExt = pd.read_csv(myConfig.featurePathExt)
dffExt = dffExt.copy().dropna(axis=0, how='any').reset_index()
y_predict_ext = df['yhat_ext']
print('Num dummy crystals: {}'.format(len(y_predict_ext)))
print([n for n in dffExt.columns if 'p_' not in n])
s = 'fracCl'
dffExt['yhat_ext'] = df['yhat_ext']
ylabel = '$E_{g}$ (eV)'
getTrendPlot1(dffExt, y_predict_ext, s,
ylabel = ylabel,
xlabel = s,
title = 'Trend')
plt.show()
'''
s = 'volume'
g = dffExt.groupby('fracCl')
for i, group in g:
getTrendPlot1(group, y_predict_ext, s,
ylabel = ylabel,
xlabel = s,
title = 'Trend',
scatter = False)
plt.show()
'''
s = 'fracCs'
g = dffExt.groupby('fracSn')
for i, group in g:
getTrendPlot1(group, y_predict_ext, s,
ylabel = ylabel,
xlabel = s,
title = 'Trend',
scatter = False)
plt.show()
'''
print(dffExt[['fracCs', 'fracRb', 'fracK', 'fracNa',
'fracSn' , 'fracGe',
'fracCl', 'fracI', 'fracBr', 'yhat_ext']].head(10))
'''
g = dffExt.groupby([
'fracCs', 'fracRb', 'fracK', 'fracNa',
'fracSn' , 'fracGe',
'fracCl', 'fracI', 'fracBr'])
x = []
y = []
x_all = []
y_all = []
for (gr, gi) in g:
labels = ['Cs', 'Rb', 'K', 'Na', 'Sn', 'Ge',
'Cl', 'I', 'Br']
#print(gr)
sarr = []
for i, n in enumerate(gr):
if i < 6:
m = 1
else:
m = 3
if n != 0:
#if n == 1.0:
sarr.append(labels[i] + '$_{' + str(int(4*m*n)) + '}$')
#else:
#sarr.append(labels[i] + '$_{' + str(4*m*n) + '}$')
#print(sarr, gr)
x += [''.join(sarr)]
y.append(gi['yhat_ext'].mean())
x_all += [''.join(sarr)]*len(gi)
y_all += gi['yhat_ext'].tolist()
print(len(x_all), len(x))
fig = plt.figure(figsize=(13, 4), dpi=200)
#(Atomic 3%, Lattice 10%)
#plt.title('Stability Trends')
plt.title('Direct Bandgap Trends')
#plt.ylabel('$\Delta E_{hull}$ (meV/atom)')
plt.ylabel('$E_{g}$ (eV)')
plt.xticks(rotation=90)
plt.scatter(x, y)
#figure(num=None, figsize=(8, 6), dpi=200, facecolor='w', edgecolor='k')
plt.savefig('/Users/Jared/Documents/test.png', bbox_inches='tight')
plt.show()
'''
plt.title('Bandgap Trends (Atomic 5%, Lattice 5%)')
plt.ylabel('E$_{g}$ (eV)')
plt.xticks(rotation=90)
plt.scatter(x_all, y_all)
figure(num=None, figsize=(8, 6), dpi=200, facecolor='w', edgecolor='k')
'''
| 23.821138
| 72
| 0.531058
| 387
| 2,930
| 3.930233
| 0.364341
| 0.027613
| 0.03616
| 0.019724
| 0.359632
| 0.333333
| 0.333333
| 0.333333
| 0.333333
| 0.264957
| 0
| 0.019849
| 0.277816
| 2,930
| 123
| 73
| 23.821138
| 0.69896
| 0.10785
| 0
| 0.114754
| 0
| 0
| 0.126332
| 0.015728
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.114754
| 0
| 0.114754
| 0.04918
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d595677f62dbebf986ab917f4b41f5f89af2fea
| 13,409
|
py
|
Python
|
InstagramCrawler.py
|
Bagas8015/Instagram-Posts-Crawler-Users-v1
|
82d5da12f7f6caf8c085085135134f58affb1ec7
|
[
"MIT"
] | null | null | null |
InstagramCrawler.py
|
Bagas8015/Instagram-Posts-Crawler-Users-v1
|
82d5da12f7f6caf8c085085135134f58affb1ec7
|
[
"MIT"
] | null | null | null |
InstagramCrawler.py
|
Bagas8015/Instagram-Posts-Crawler-Users-v1
|
82d5da12f7f6caf8c085085135134f58affb1ec7
|
[
"MIT"
] | null | null | null |
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
import time
import emoji
import string
import csv
import os
def getFileSize(nameFile):
return os.stat(nameFile).st_size
browser = webdriver.Chrome()
def loginInstagram(url, username, password):
browser.get(url) #Masuk ke url.
time.sleep(2) #Memberi kesempatan untuk loading page.
browser.find_element_by_xpath('/html/body/span/section/main/article/div[2]/div[2]/p/a').click() #Click untuk ke halaman login.
#3 baris ke bawah berfungsi untuk mengisi form dan login.
print("Mengisi form login ....")
time.sleep(2)
browser.find_element_by_xpath('/html/body/span/section/main/div/article/div/div[1]/div/form/div[2]/div/label/input').send_keys(username)
browser.find_element_by_xpath('/html/body/span/section/main/div/article/div/div[1]/div/form/div[3]/div/label/input').send_keys(password)
browser.find_element_by_xpath('/html/body/span/section/main/div/article/div/div[1]/div/form/div[4]/button').click()
time.sleep(3) #Memberi kesempatan untuk loading page.
browser.find_element_by_xpath('/html/body/div[3]/div/div/div[3]/button[2]').click() #Menutup pop-up yang muncul.
browser.find_element_by_xpath('/html/body/span/section/nav/div[2]/div/div/div[3]/div/div[3]/a/span').click() #Menuju ke halaman profile user.
def getListFollowers(username, jml_followers = 0):
print("Sedang mengload data daftar followers " + username + " ....")
time.sleep(3) #Untuk menunggu page profile home selesai diload
if jml_followers == 0:
jml_followers = browser.find_element_by_xpath('/html/body/span/section/main/div/header/section/ul/li[2]/a/span').get_attribute('title') #Untuk mendapatkan jumlah followers users di dalam list
jml_followers.replace(',','')
browser.find_element_by_xpath('/html/body/span/section/main/div/header/section/ul/li[2]/a').click() #Meng-click href untuk melihat tampilan followersnya
time.sleep(2)
followersList = browser.find_element_by_xpath('/html/body/div[3]/div/div[2]/ul')
lengthListFollowers = len(followersList.find_elements_by_css_selector('li')) #Untuk mendapatkan panjang list followers yang sudah ditampilkan
time.sleep(2)
followersList.click()#klik bar kosong akun pertama
actionChain = webdriver.ActionChains(browser) #Mengambil ActionChains
daftar = []
nilai_berulang = 0
batas_berulang = 0
while lengthListFollowers < int(jml_followers) and lengthListFollowers < 200:
time.sleep(1)
browser.find_element_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li[' + str(lengthListFollowers-2) + ']').click() #Supaya bisa ngescroll sampai batas yang ditentukan
actionChain.key_down(Keys.SPACE).key_up(Keys.SPACE).perform()
if nilai_berulang == lengthListFollowers:
batas_berulang += 1
if batas_berulang == 4:
break
else:
batas_berulang = 0
nilai_berulang = lengthListFollowers
lengthListFollowers = len(browser.find_elements_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li'))
for i in range(1,lengthListFollowers+1):
if int(jml_followers) > 12:
daftar.append(browser.find_element_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li['+str(i)+']/div/div[1]/div[2]/div[1]/a').get_attribute('title'))
else:
daftar.append(browser.find_element_by_xpath('/html/body/div[3]/div/div[2]/ul/div/li['+str(i)+']/div/div[2]/div[1]/div/div/a').get_attribute('title'))
return daftar
def writeToCSVandGTF(index, username, namafile): #GTF = Get Total Followers from target, GTF berguna untuk penentuan target selanjutnya.
print('Sedang Crawling target ' + username + ' ....')
try:
browser.find_element_by_xpath('/html/body/span/section/main/div/div/article/div[1]/div/h2') #Ngecek private atau ngga, kalau ngga private lanjut ke except
return 0, index
except:
time.sleep(2)
translator = str.maketrans('', '', string.punctuation) #Untuk ngebuat teksnya rapih
def give_emoji_free_text(text): #Untuk membuang semua emoji
allchars = [str for str in text.encode('ascii', 'ignore').decode('utf-8')]
emoji_list = [c for c in allchars if c in emoji.UNICODE_EMOJI]
clean_text = ' '.join([str for str in text.encode('ascii', 'ignore').decode('utf-8').split() if not any(i in str for i in emoji_list)])
return clean_text
def hashtag(text): #Untuk mendapatkan tag
char = text.encode('ascii', 'ignore').decode('utf-8').replace('\n',' ')
tag = []
teks = ''
tulis = 0
for i in range(len(char)):
if tulis == 1:
teks = teks + char[i]
if char[i] == '#':
tulis = 1
elif (char[i] == ' ' or i == len(char)-1) and teks != '':
teks = '#' + teks
tag.append(teks)
tulis = 0
teks = ''
return tag
jml_followers = browser.find_element_by_xpath('/html/body/span/section/main/div/header/section/ul/li[2]/a/span').get_attribute('title') #Untuk mendapatkan total followers target
jml_posts = browser.find_element_by_xpath('/html/body/span/section/main/div/header/section/ul/li[1]/span/span').text #Untuk mendapatkan total posts target
jml_followers = jml_followers.replace(',','')
jml_posts = jml_posts.replace(',','')
if int(jml_posts) == 0:
return int(jml_followers), index
tes = 0
galat = 0
benar = 1
while benar == 1 and int(jml_posts) != 0:
try:
browser.find_element_by_xpath('/html/body/span/section/main/div/div['+str(tes)+']/article/div[1]/div/div[1]/div[1]').click()
benar = 0
except:
tes += 1
galat += 1
if galat == 10:
break
continue
time.sleep(1)
#Crawling post
limit = 0
while limit < int(jml_posts)-1 and int(jml_posts) != 0 and galat != 11:
#print("Sedang crawling data posts target " + username + " ....")
loading = False
kanan = False
kiri = False
try:
time.sleep(3)
browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/div/div/svg')
if limit > 0:
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a').click()
loading = True
kanan = True
else:
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a').click()
loading = True
kiri = True
except:
try:
### Ini jika ada bulet-buletan loading
if loading:
if kiri:
time.sleep(2)
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a').click()
loading = False
kiri = False
continue
elif kanan:
time.sleep(2)
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a[2]').click()
loading = False
kanan = False
continue
### Sampai sini lalu hasilnya akan dikontinue ke awal, untuk ngambil pos yang sebelumnya muter-muter
teks = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/div[1]/ul/div/li/div/div/div[2]/span').text #Mengambil captionnya dan menyimpannya dalam variabel teks
tag = hashtag(teks) #Meyimpan kumpulan tag
if len(tag) == 0:
tag = ''
teks = give_emoji_free_text(teks) #Menyingkirkan emoji dari teks
teks = teks.translate(translator).lower() #Membuat huruf menjadi kecil
except:
teks = ''
tag = ''
try:
try:
likes = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/section[2]/div/div/button/span').text #Untuk mengambil like yang punya banyak likes.
except:
likes = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/section[2]/div/div/button').text #Untuk likes-nya sedikit
likes = likes.replace('like this','').replace('like','')#Untuk me-replace 'like this' atau 'like'
except:
likes = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/section[2]/div/span/span').text #Untuk mendapatkan likes dari video
#print(teks, likes, tag)
try:
commentlist = len(browser.find_elements_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/div[1]/ul/ul')) #panjang dari banyak komen
comment = []
##print(commentlist)
for i in range(1,commentlist+1):
morecomment = []
commentter = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/div[1]/ul/ul['+str(i)+']/div/li/div/div[1]/div[2]/h3/a').text
teksc = browser.find_element_by_xpath('/html/body/div[3]/div[2]/div/article/div[2]/div[1]/ul/ul['+str(i)+']/div/li/div/div[1]/div[2]/span').text
teksc = give_emoji_free_text(teksc)
teksc = teksc.translate(translator).lower()
morecomment.append(commentter)
morecomment.append(teksc)
comment.append(morecomment)
#print(commentter,teks)
if len(comment) == 0:
comment = ''
except:
comment = ''
if index == 0:
with open(namafile,'a',newline='') as csvfile: #Membuka dan membuat file '.csv'
writer = csv.writer(csvfile)
writer.writerow(['username','post','tag','likes','comment'])
writer.writerow([username, teks, tag, likes, comment])
index += 1
else:
with open(namafile, 'a', newline = '') as csvfile: #Menambahkan file '.csv' dengan data baru
writer = csv.writer(csvfile)
#print(username, teks, tag, likes, comment)
writer.writerow([username, teks, tag, likes, comment])
index += 1
if limit == 0:
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a').click()
else:
browser.find_element_by_xpath('/html/body/div[3]/div[1]/div/div/a[2]').click()
#print()
time.sleep(2)
limit += 1
return int(jml_followers), index
def mulaiProgram(url, username, password):
loginInstagram(url, username, password)
hitung = 0
sizeOfFile = 0
namafile = input("Masukkan nama file: ")
namafix = namafile+'.csv'
while sizeOfFile < 1024*1024*100:
tertinggi = 0
indekss = 0
try:
listTotalFollowersFromTarget = []
listFollowers = []
listFollowers = getListFollowers(username, tertinggi)
#print(listFollowers)
for usertarget in listFollowers:
browser.get(url+'/'+usertarget)
time.sleep(3)
totalFollowers, indekss = writeToCSVandGTF(indekss, usertarget,namafix)
listTotalFollowersFromTarget.append(totalFollowers)
hitung += 1
#print( listTotalFollowersFromTarget )
tertinggi = max(listTotalFollowersFromTarget)
#print(tertinggi)
indeks = listTotalFollowersFromTarget.index(tertinggi)
#print(indeks)
browser.get(url+'/'+username)
time.sleep(2)
username = listFollowers[indeks]
#print(username)
browser.get(url+'/'+username)
except:
continue
sizeOfFile = getFileSize(namafix)
user = input('Masukkan username akun anda: ')
passwo = input('Masukkan password akun anda: ')
url = 'https://www.instagram.com'
username = user
password = passwo
mulaiProgram(url, username, password)
browser.quit()
| 50.220974
| 205
| 0.548736
| 1,541
| 13,409
| 4.680727
| 0.188189
| 0.02745
| 0.047276
| 0.064467
| 0.382504
| 0.351865
| 0.347428
| 0.33398
| 0.33398
| 0.32788
| 0
| 0.018929
| 0.330226
| 13,409
| 266
| 206
| 50.409774
| 0.784211
| 0.122604
| 0
| 0.369369
| 0
| 0.103604
| 0.185719
| 0.15714
| 0
| 0
| 0
| 0
| 0
| 1
| 0.031532
| false
| 0.031532
| 0.031532
| 0.004505
| 0.094595
| 0.013514
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d5b40af3f077c2c14c5035c4efe391b9a38cc70
| 527
|
py
|
Python
|
DesignPatterns/MVC/server/controllers/index.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | 1
|
2020-08-13T19:09:27.000Z
|
2020-08-13T19:09:27.000Z
|
DesignPatterns/MVC/server/controllers/index.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | null | null | null |
DesignPatterns/MVC/server/controllers/index.py
|
TigranGit/CodeBase
|
d58e30b1d83fab4b388ec2cdcb868fa751c62188
|
[
"Apache-2.0"
] | null | null | null |
from .base_controller import BaseController
from ..helper.utils import render_template
from ..helper.constants import STATUS_OK
class IndexController(BaseController):
def __init__(self, client_address):
self.user_ip = client_address[0]
self.user_port = str(client_address[1])
self.title = "Home"
def get(self):
return STATUS_OK, render_template(
"index.html",
title=self.title,
user_ip=self.user_ip,
user_port=self.user_port,
)
| 27.736842
| 47
| 0.654649
| 64
| 527
| 5.109375
| 0.484375
| 0.097859
| 0.061162
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005102
| 0.256167
| 527
| 18
| 48
| 29.277778
| 0.829082
| 0
| 0
| 0
| 0
| 0
| 0.026565
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.2
| 0.066667
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
8d5f94f57caf92571a35ef22a1aa7566e2df0d65
| 1,582
|
py
|
Python
|
tasks/tests/ui/conftest.py
|
MisterLenivec/django_simple_todo_app
|
8e694a67df43de7feaae785c0b3205534c701923
|
[
"MIT"
] | null | null | null |
tasks/tests/ui/conftest.py
|
MisterLenivec/django_simple_todo_app
|
8e694a67df43de7feaae785c0b3205534c701923
|
[
"MIT"
] | 4
|
2020-06-07T01:25:14.000Z
|
2021-06-10T18:34:10.000Z
|
tasks/tests/ui/conftest.py
|
MisterLenivec/django_simple_todo_app
|
8e694a67df43de7feaae785c0b3205534c701923
|
[
"MIT"
] | null | null | null |
from django.conf import settings
from selenium import webdriver
from selenium.webdriver.chrome.options import Options
import pytest
import os
@pytest.fixture(scope='session')
def django_db_setup():
settings.DATABASES['default'] = {
'ENGINE': 'django.db.backends.postgresql',
'NAME': os.environ.get('simple_todo_db_name'),
'USER': os.environ.get('simple_todo_db_user'),
'PASSWORD': os.environ.get('simple_todo_db_password'),
'HOST': '127.0.0.1',
'PORT': '5432',
}
def pytest_addoption(parser):
parser.addoption('--browser_name', action='store', default="chrome",
help="Choose browser: chrome or firefox")
def chrome_options():
options = Options()
options.add_argument("--headless") # No open browser
options.add_argument("--window-size=1920x1080")
return options
def firefox_options():
fp = webdriver.FirefoxProfile()
return fp
@pytest.fixture(scope="session")
def browser(request):
browser_name = request.config.getoption("browser_name")
browser = None
if browser_name == "chrome":
print("\nstart chrome browser for test..")
browser = webdriver.Chrome(
options=chrome_options()
)
elif browser_name == "firefox":
print("\nstart firefox browser for test..")
browser = webdriver.Firefox(
firefox_profile=firefox_options()
)
else:
raise pytest.UsageError("--browser_name should be chrome or firefox")
yield browser
print("\nquit browser..")
browser.quit()
| 27.275862
| 77
| 0.653603
| 180
| 1,582
| 5.605556
| 0.411111
| 0.065411
| 0.035679
| 0.053518
| 0.186323
| 0.071358
| 0
| 0
| 0
| 0
| 0
| 0.014551
| 0.218078
| 1,582
| 57
| 78
| 27.754386
| 0.801132
| 0.009482
| 0
| 0
| 0
| 0
| 0.252396
| 0.047923
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.022222
| 0.111111
| 0
| 0.266667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|