hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
dba89946ffbf4b4e0ca04987e645e105d52edb8a
| 2,412
|
py
|
Python
|
dags/mailsdag.py
|
rvacaru/airflow-training-skeleton
|
45fc6a8938d055b98c62c85b7c8085cb7d6f23ba
|
[
"Apache-2.0"
] | null | null | null |
dags/mailsdag.py
|
rvacaru/airflow-training-skeleton
|
45fc6a8938d055b98c62c85b7c8085cb7d6f23ba
|
[
"Apache-2.0"
] | null | null | null |
dags/mailsdag.py
|
rvacaru/airflow-training-skeleton
|
45fc6a8938d055b98c62c85b7c8085cb7d6f23ba
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
"""Example DAG demonstrating the usage of the BashOperator."""
from datetime import timedelta
import datetime
import airflow
from airflow.models import DAG
from airflow.operators.bash_operator import BashOperator
from airflow.operators.dummy_operator import DummyOperator
from airflow.operators.python_operator import PythonOperator
from airflow.operators.python_operator import BranchPythonOperator
args = {
'owner': 'Airflow',
'start_date': airflow.utils.dates.days_ago(14),
}
dag = DAG(
dag_id='exercise_weekday',
default_args=args,
schedule_interval='0 0 * * *',
dagrun_timeout=timedelta(minutes=60),
)
dummy_last = DummyOperator(
task_id='run_this_last',
dag=dag,
trigger_rule='one_success',
)
def print_weekday(**context):
day = context["execution_date"].strftime('%a')
print(day)
return day
weekday_task = PythonOperator(
task_id='weekday_task',
python_callable=print_weekday,
provide_context=True,
dag=dag,
)
# optimize with try exept
weekday_person = {
"Mon": "bob",
"Tue": "joe",
"Thu": "joe",
}
def define_oncall(**context):
day = print_weekday(**context)
try:
task_id = weekday_person[day]
except KeyError:
return "ali"
return task_id
branch_task = BranchPythonOperator(
task_id='branch_task',
python_callable=define_oncall,
provide_context=True,
dag=dag,
)
tasks = ["bob", "joe", "ali"]
for p in tasks:
taski = DummyOperator(
task_id=p,
dag=dag,
)
branch_task >> taski
taski >> dummy_last
weekday_task >> branch_task
| 25.125
| 66
| 0.717247
| 319
| 2,412
| 5.291536
| 0.45768
| 0.035545
| 0.047393
| 0.018957
| 0.075829
| 0.047393
| 0
| 0
| 0
| 0
| 0
| 0.005612
| 0.187396
| 2,412
| 95
| 67
| 25.389474
| 0.855612
| 0.354892
| 0
| 0.1
| 0
| 0
| 0.091384
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.133333
| 0
| 0.216667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dba99e90b4b43e354898ce74c9ce989b11885ee9
| 1,359
|
py
|
Python
|
appengine-compat/exported_appengine_sdk/google/storage/speckle/proto/jdbc_type.py
|
speedplane/python-compat-runtime
|
743ade7e1350c790c4aaa48dd2c0893d06d80cee
|
[
"Apache-2.0"
] | 26
|
2015-01-20T08:02:38.000Z
|
2020-06-10T04:57:41.000Z
|
appengine-compat/exported_appengine_sdk/google/storage/speckle/proto/jdbc_type.py
|
speedplane/python-compat-runtime
|
743ade7e1350c790c4aaa48dd2c0893d06d80cee
|
[
"Apache-2.0"
] | 53
|
2016-04-06T21:10:43.000Z
|
2018-03-19T23:14:33.000Z
|
appengine-compat/exported_appengine_sdk/google/storage/speckle/proto/jdbc_type.py
|
speedplane/python-compat-runtime
|
743ade7e1350c790c4aaa48dd2c0893d06d80cee
|
[
"Apache-2.0"
] | 23
|
2016-04-19T05:45:26.000Z
|
2021-12-31T23:22:36.000Z
|
#!/usr/bin/env python
#
# Copyright 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Python equivalent of jdbc_type.h.
Python definition of the JDBC type constant values defined in Java class
java.sql.Types. Since the values don't fall into the range allowed by
a protocol buffer enum, we use Python constants instead.
If you update this, update jdbc_type.py also.
"""
BIT = -7
TINYINT = -6
SMALLINT = 5
INTEGER = 4
BIGINT = -5
FLOAT = 6
REAL = 7
DOUBLE = 8
NUMERIC = 2
DECIMAL = 3
CHAR = 1
VARCHAR = 12
LONGVARCHAR = -1
DATE = 91
TIME = 92
TIMESTAMP = 93
BINARY = -2
VARBINARY = -3
LONGVARBINARY = -4
NULL = 0
OTHER = 1111
JAVA_OBJECT = 2000
DISTINCT = 2001
STRUCT = 2002
ARRAY = 2003
BLOB = 2004
CLOB = 2005
REF = 2006
DATALINK = 70
BOOLEAN = 16
ROWID = -8
NCHAR = -15
NVARCHAR = -9
LONGNVARCHAR = -16
NCLOB = 2011
SQLXML = 2009
| 20.283582
| 74
| 0.725533
| 217
| 1,359
| 4.529954
| 0.723502
| 0.061038
| 0.02645
| 0.032553
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074886
| 0.19426
| 1,359
| 66
| 75
| 20.590909
| 0.822831
| 0.626932
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbaa5fe4d5410450515867da0876df0842647fcf
| 2,406
|
py
|
Python
|
GestiRED/views.py
|
osabogal10/GestiREDBackend
|
99aa3b01bd67910cc0f96751c88d0f4e83763392
|
[
"MIT"
] | null | null | null |
GestiRED/views.py
|
osabogal10/GestiREDBackend
|
99aa3b01bd67910cc0f96751c88d0f4e83763392
|
[
"MIT"
] | null | null | null |
GestiRED/views.py
|
osabogal10/GestiREDBackend
|
99aa3b01bd67910cc0f96751c88d0f4e83763392
|
[
"MIT"
] | 1
|
2018-11-19T00:08:05.000Z
|
2018-11-19T00:08:05.000Z
|
from django.http import HttpResponse
from django.core.mail import send_mail
import json
from django.shortcuts import render
from django.views.decorators.csrf import csrf_exempt
from GestiRED.models import User
from GestiRED.models import QualityControl, Phase, Resource, ResourceType,PhaseType
from django.core import serializers
from django.db.models import Q
# Create your views here.
def index(request):
return HttpResponse("GestiRED app UP")
@csrf_exempt
def quality_review_notification(request):
if request.method == 'POST':
data = json.loads(request.body)
qualityControl_id = data["qualityControl_id"]
resource_name = data["resource_name"]
responsible_name = data["responsible_name"]
qualityControl = QualityControl.objects.get(pk=qualityControl_id)
user = qualityControl.responsible
send_mail('Revision Calidad',
'Recurso: ' + resource_name + '\n Observaciones: Se ha asignado para control de calidad a: ' + responsible_name,
'clipstaragil6@gmail.com',
[user.email],
fail_silently=False)
res = {"status": "Ok", "Content:": "Email enviado"}
return HttpResponse(json.dumps(res), content_type="application/json")
@csrf_exempt
def resources_filters(request):
qs_json={}
if request.method == 'GET':
phaseType = request.GET.get('phaseType')
if phaseType != None : phaseType= phaseType.split(',')
resourceType = request.GET.get('resourceType')
if resourceType != None : resourceType = resourceType.split(',')
responsible = request.GET.get('responsible')
if responsible != None: responsible = responsible.split(',')
labels = request.GET.get('labels')
my_dict = {'phase__phaseType__in':phaseType,
'resourceType__in': resourceType,
'responsibles__in':responsible,
'labels__icontains': labels} # Your dict with fields
or_condition = Q()
for key, value in my_dict.items():
if value != None:
or_condition.add(Q(**{key: value}), Q.AND)
lp = set()
lp=Resource.objects.filter(or_condition).all().distinct()
data = list([res.json() for res in lp])
qs_json =json.dumps({'objects':data})
return HttpResponse( qs_json, content_type='application/json')
| 36.454545
| 130
| 0.656692
| 269
| 2,406
| 5.732342
| 0.386617
| 0.038911
| 0.033722
| 0.031128
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000542
| 0.232751
| 2,406
| 65
| 131
| 37.015385
| 0.834778
| 0.018703
| 0
| 0.039216
| 0
| 0
| 0.150191
| 0.009758
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.176471
| 0.019608
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbaa65a763de8c1cfbc863205e539ed71151b214
| 2,181
|
py
|
Python
|
ext_modules/_maix_nn/example/yolo2_camera.py
|
sipeed/python3-maix
|
9ced31b8f1c1e4ef93b6a57bbfced27ae9e3361e
|
[
"MIT"
] | 93
|
2021-01-12T01:56:06.000Z
|
2022-03-30T12:52:01.000Z
|
ext_modules/_maix_nn/example/yolo2_camera.py
|
JasperG1998/MaixPy3
|
b36800b8d6aebf55018894c215c23a73d2fe406d
|
[
"MIT"
] | 29
|
2021-02-04T10:37:26.000Z
|
2022-03-20T15:10:55.000Z
|
ext_modules/_maix_nn/example/yolo2_camera.py
|
JasperG1998/MaixPy3
|
b36800b8d6aebf55018894c215c23a73d2fe406d
|
[
"MIT"
] | 25
|
2021-01-25T18:10:09.000Z
|
2022-03-31T13:55:36.000Z
|
from maix import nn
from PIL import Image, ImageDraw, ImageFont
from maix import display, camera
import time
from maix.nn import decoder
def draw_rectangle_with_title(draw, box, disp_str, bg_color=(255, 0, 0, 255), font_color=(255, 255, 255, 255)):
# draw = ImageDraw.Draw(img)
font = ImageFont.load_default()
font_w, font_h = font.getsize(disp_str)
draw.rectangle((box[0], box[1], box[0] + box[2], box[1] + box[3]), fill=None, outline=bg_color, width=2)
draw.rectangle((box[0], box[1] - font_h, box[0] + font_w, box[1]), fill=bg_color)
draw.text((box[0], box[1] - font_h), disp_str, fill=font_color, font=font)
camera.config(size=(224, 224))
model = {
"param": "/root/models/yolo2_face_awnn.param",
"bin": "/root/models/yolo2_face_awnn.bin"
}
options = {
"model_type": "awnn",
"inputs": {
"input0": (224, 224, 3)
},
"outputs": {
"output0": (7, 7, (1+4+1)*5)
},
"mean": [127.5, 127.5, 127.5],
"norm": [0.0078125, 0.0078125, 0.0078125],
}
print("-- load model:", model)
m = nn.load(model, opt=options)
print("-- load ok")
print("-- read image")
w = options["inputs"]["input0"][1]
h = options["inputs"]["input0"][0]
# # img.show()
print("-- read image ok")
labels = ["person"]
anchors = [1.19, 1.98, 2.79, 4.59, 4.53, 8.92, 8.06, 5.29, 10.32, 10.65]
yolo2_decoder = decoder.Yolo2(len(labels), anchors, net_in_size=(w, h), net_out_size=(7, 7))
while 1:
img = camera.capture()
if not img:
time.sleep(0.01)
continue
t = time.time()
out = m.forward(img, quantize=True, layout="hwc")
print("-- forward: ", time.time() - t )
t = time.time()
boxes, probs = yolo2_decoder.run(out, nms=0.3, threshold=0.5, img_size=(240, 240))
print("-- decode: ", time.time() - t )
t = time.time()
for i, box in enumerate(boxes):
class_id = probs[i][0]
prob = probs[i][1][class_id]
disp_str = "{}:{:.2f}%".format(labels[class_id], prob*100)
draw_rectangle_with_title(display.get_draw(), box, disp_str)
print("-- draw: ", time.time() - t )
t = time.time()
display.show()
print("-- show: ", time.time() - t )
| 27.2625
| 111
| 0.596057
| 342
| 2,181
| 3.690058
| 0.345029
| 0.050713
| 0.022187
| 0.019017
| 0.126783
| 0.090333
| 0
| 0
| 0
| 0
| 0
| 0.087982
| 0.202659
| 2,181
| 79
| 112
| 27.607595
| 0.637723
| 0.017423
| 0
| 0.068966
| 0
| 0
| 0.121425
| 0.030942
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017241
| false
| 0
| 0.086207
| 0
| 0.103448
| 0.137931
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbaa6f31a1ce95280bfdff82b4090e6bc54d2002
| 10,143
|
py
|
Python
|
tests/test_metadata_options.py
|
Fatal1ty/mashumaro
|
f32acf98f7cc7cdf638b921fe3fde96bef4fbefb
|
[
"Apache-2.0"
] | 394
|
2018-11-09T11:55:11.000Z
|
2022-03-27T07:39:48.000Z
|
tests/test_metadata_options.py
|
Fatal1ty/mashumaro
|
f32acf98f7cc7cdf638b921fe3fde96bef4fbefb
|
[
"Apache-2.0"
] | 70
|
2018-12-10T19:43:01.000Z
|
2022-03-17T07:37:45.000Z
|
tests/test_metadata_options.py
|
Fatal1ty/mashumaro
|
f32acf98f7cc7cdf638b921fe3fde96bef4fbefb
|
[
"Apache-2.0"
] | 29
|
2018-12-10T19:44:19.000Z
|
2022-03-11T00:12:26.000Z
|
from dataclasses import dataclass, field
from datetime import date, datetime, time, timezone
from pathlib import Path
from typing import Any, Dict, Optional, Union
import ciso8601
import pytest
from mashumaro import DataClassDictMixin
from mashumaro.exceptions import UnserializableField
from mashumaro.types import SerializationStrategy
from .entities import (
MutableString,
MyList,
ThirdPartyType,
TypedDictRequiredKeys,
)
def test_ciso8601_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_ciso8601_date_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: date = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=date(2021, 1, 2))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_ciso8601_time_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: time = field(metadata={"deserialize": "ciso8601"})
should_be = DataClass(x=time(3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_pendulum_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=datetime(2008, 12, 29, 7, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2009-W01 0700"})
assert instance == should_be
def test_pendulum_date_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: date = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=date(2008, 12, 29))
instance = DataClass.from_dict({"x": "2009-W01"})
assert instance == should_be
def test_pendulum_time_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: time = field(metadata={"deserialize": "pendulum"})
should_be = DataClass(x=time(3, 4, 5))
instance = DataClass.from_dict({"x": "2009-W01 030405"})
assert instance == should_be
def test_unsupported_datetime_parser_engine():
with pytest.raises(UnserializableField):
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": "unsupported"})
def test_global_function_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"deserialize": ciso8601.parse_datetime_as_naive}
)
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05+03:00"})
assert instance == should_be
def test_local_function_datetime_parser():
def parse_dt(s):
return ciso8601.parse_datetime_as_naive(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": parse_dt})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05+03:00"})
assert instance == should_be
def test_class_method_datetime_parser():
class DateTimeParser:
@classmethod
def parse_dt(cls, s: str) -> datetime:
return datetime.fromisoformat(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": DateTimeParser.parse_dt})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05"})
assert instance == should_be
def test_class_instance_method_datetime_parser():
class DateTimeParser:
def __call__(self, s: str) -> datetime:
return datetime.fromisoformat(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": DateTimeParser()})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05"})
assert instance == should_be
def test_callable_class_instance_datetime_parser():
class CallableDateTimeParser:
def __call__(self, s):
return ciso8601.parse_datetime(s)
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(metadata={"deserialize": CallableDateTimeParser()})
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_lambda_datetime_parser():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"deserialize": lambda s: ciso8601.parse_datetime(s)}
)
should_be = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
instance = DataClass.from_dict({"x": "2021-01-02T03:04:05Z"})
assert instance == should_be
def test_derived_dataclass_metadata_deserialize_option():
@dataclass
class A:
x: datetime = field(metadata={"deserialize": ciso8601.parse_datetime})
@dataclass
class B(A, DataClassDictMixin):
y: datetime = field(metadata={"deserialize": ciso8601.parse_datetime})
should_be = B(
x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc),
y=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc),
)
instance = B.from_dict(
{"x": "2021-01-02T03:04:05Z", "y": "2021-01-02T03:04:05Z"}
)
assert instance == should_be
def test_bytearray_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: bytearray = field(
metadata={"deserialize": lambda s: s.upper().encode()}
)
should_be = DataClass(x=bytearray(b"ABC"))
instance = DataClass.from_dict({"x": "abc"})
assert instance == should_be
def test_path_like_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Path = field(
metadata={"deserialize": lambda s: Path(str(s).upper())}
)
should_be = DataClass(x=Path("/ABC"))
instance = DataClass.from_dict({"x": "/abc"})
assert instance == should_be
def test_datetime_serialize_option():
@dataclass
class DataClass(DataClassDictMixin):
x: datetime = field(
metadata={"serialize": lambda v: v.strftime("%Y-%m-%d %H:%M:%S")}
)
should_be = {"x": "2021-01-02 03:04:05"}
instance = DataClass(x=datetime(2021, 1, 2, 3, 4, 5, tzinfo=timezone.utc))
assert instance.to_dict() == should_be
def test_third_party_type_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: ThirdPartyType = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
should_be = DataClass(x=ThirdPartyType(123))
instance = DataClass.from_dict({"x": 123})
assert instance == should_be
assert instance.to_dict() == {"x": 123}
def test_serializable_type_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: MutableString = field(
metadata={
"deserialize": lambda s: MutableString(s.upper()),
"serialize": lambda v: str(v).lower(),
}
)
should_be = DataClass(x=MutableString("ABC"))
instance = DataClass.from_dict({"x": "abc"})
assert instance == should_be
assert instance.to_dict() == {"x": "abc"}
def test_optional_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Optional[ThirdPartyType] = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
instance = DataClass.from_dict({"x": 123})
assert instance
assert instance.x.value == 123
dct = instance.to_dict()
assert dct["x"] == 123
def test_union_overridden():
@dataclass
class DataClass(DataClassDictMixin):
x: Union[int, str, float, ThirdPartyType] = field(
metadata={
"deserialize": lambda v: ThirdPartyType(v),
"serialize": lambda v: v.value,
}
)
instance = DataClass.from_dict({"x": 1})
assert instance == DataClass(x=ThirdPartyType(value=1))
assert instance.to_dict() == {"x": 1}
def test_serialization_strategy():
class TestSerializationStrategy(SerializationStrategy):
def serialize(self, value):
return [value]
def deserialize(self, value):
return value[0]
@dataclass
class DataClass(DataClassDictMixin):
x: int = field(
metadata={"serialization_strategy": TestSerializationStrategy()}
)
instance = DataClass(x=123)
assert DataClass.from_dict({"x": [123]}) == instance
assert instance.to_dict() == {"x": [123]}
def test_collection_derived_custom_class():
@dataclass
class DataClass(DataClassDictMixin):
x: MyList = field(
metadata={"serialize": lambda v: v, "deserialize": lambda v: v}
)
instance = DataClass(x=[1, 2, 3])
assert DataClass.from_dict({"x": [1, 2, 3]}) == instance
assert instance.to_dict() == {"x": [1, 2, 3]}
def test_dataclass_with_typed_dict_overridden():
def serialize_x(x: TypedDictRequiredKeys) -> Dict[str, Any]:
return {"int": int(x["int"]), "float": float(x["float"])}
def deserialize_x(x: Dict[str, Any]) -> TypedDictRequiredKeys:
return TypedDictRequiredKeys(int=x["int"], float=x["float"])
@dataclass
class DataClass(DataClassDictMixin):
x: TypedDictRequiredKeys = field(
metadata={"serialize": serialize_x, "deserialize": deserialize_x}
)
obj = DataClass(x=TypedDictRequiredKeys(int=1, float=2.0))
data = {"x": {"int": 1, "float": 2.0}}
assert DataClass.from_dict(data) == obj
assert obj.to_dict() == data
| 30.1875
| 79
| 0.648822
| 1,165
| 10,143
| 5.498712
| 0.108155
| 0.044958
| 0.082579
| 0.147206
| 0.688885
| 0.615673
| 0.561973
| 0.514518
| 0.455354
| 0.433812
| 0
| 0.053177
| 0.219462
| 10,143
| 335
| 80
| 30.277612
| 0.755968
| 0
| 0
| 0.414343
| 0
| 0
| 0.077098
| 0.007098
| 0
| 0
| 0
| 0
| 0.123506
| 1
| 0.12749
| false
| 0
| 0.039841
| 0.031873
| 0.414343
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbaae886d43e46ac193d1e7f28a6367192d2a640
| 7,552
|
py
|
Python
|
vendor/github.com/tensorflow/tensorflow/tensorflow/python/ops/list_ops.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 2
|
2018-12-12T23:33:05.000Z
|
2019-02-26T07:20:22.000Z
|
vendor/github.com/tensorflow/tensorflow/tensorflow/python/ops/list_ops.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 13
|
2020-11-13T18:53:29.000Z
|
2022-03-12T00:33:00.000Z
|
vendor/github.com/tensorflow/tensorflow/tensorflow/python/ops/list_ops.py
|
owennewo/kfserving
|
89f73c87525b8e06ea799f69f2979c4ad272fcb3
|
[
"Apache-2.0"
] | 2
|
2020-10-06T09:24:31.000Z
|
2020-12-20T15:10:56.000Z
|
# Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Ops to manipulate lists of tensors."""
# pylint: disable=g-bad-name
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.python.framework import dtypes
from tensorflow.python.framework import ops
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import gen_list_ops
# go/tf-wildcard-import
# pylint: disable=wildcard-import
from tensorflow.python.ops.gen_list_ops import *
# pylint: enable=wildcard-import
ops.NotDifferentiable("TensorListConcatLists")
ops.NotDifferentiable("TensorListElementShape")
ops.NotDifferentiable("TensorListLength")
ops.NotDifferentiable("TensorListPushBackBatch")
def empty_tensor_list(element_shape,
element_dtype,
max_num_elements=None,
name=None):
if max_num_elements is None:
max_num_elements = -1
return gen_list_ops.empty_tensor_list(
element_shape=_build_element_shape(element_shape),
element_dtype=element_dtype,
max_num_elements=max_num_elements,
name=name)
def tensor_list_reserve(element_shape, num_elements, element_dtype, name=None):
return gen_list_ops.tensor_list_reserve(
element_shape=_build_element_shape(element_shape),
num_elements=num_elements,
element_dtype=element_dtype,
name=name)
def tensor_list_from_tensor(tensor, element_shape, name=None):
return gen_list_ops.tensor_list_from_tensor(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
name=name)
def tensor_list_concat(input_handle, element_dtype, name=None):
# Ignore the lengths output of TensorListConcat. It is only used during
# gradient computation.
return gen_list_ops.tensor_list_concat(
input_handle=input_handle, element_dtype=element_dtype, name=name)[0]
def tensor_list_split(tensor, element_shape, lengths, name=None):
return gen_list_ops.tensor_list_split(
tensor=tensor,
element_shape=_build_element_shape(element_shape),
lengths=lengths,
name=name)
@ops.RegisterGradient("TensorListPushBack")
def _PushBackGrad(op, dresult):
return gen_list_ops.tensor_list_pop_back(
dresult, element_dtype=op.get_attr("element_dtype"))
@ops.RegisterGradient("TensorListPopBack")
def _PopBackGrad(op, dlist, delement):
if dlist is None:
dlist = empty_tensor_list(
element_dtype=delement.dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
return gen_list_ops.tensor_list_push_back(dlist, delement)
@ops.RegisterGradient("TensorListStack")
def _TensorListStackGrad(unused_op, dtensor):
return tensor_list_from_tensor(dtensor, element_shape=dtensor.shape[1:])
@ops.RegisterGradient("TensorListConcat")
def _TensorListConcatGrad(op, dtensor, unused_dlengths):
# TODO(srbs): We lose the element_shape information in tensor_list_concat.
# Consider providing that as an output of TensorListConcat?
if dtensor.shape.rank is None:
element_shape = None
else:
element_shape = [None] + dtensor.shape.as_list()[1:]
return tensor_list_split(
dtensor,
element_shape=_build_element_shape(element_shape),
lengths=op.outputs[1])
@ops.RegisterGradient("TensorListSplit")
def _TensorListSplitGrad(op, dlist):
return tensor_list_concat(dlist, element_dtype=op.inputs[0].dtype), None, None
@ops.RegisterGradient("TensorListFromTensor")
def _TensorListFromTensorGrad(op, dlist):
"""Gradient for TensorListFromTensor."""
if op.inputs[0].shape.dims and op.inputs[0].shape.dims[0].value is not None:
num_elements = op.inputs[0].shape.dims[0].value
else:
num_elements = None
if dlist is None:
dlist = empty_tensor_list(
element_dtype=op.inputs[0].dtype,
element_shape=gen_list_ops.tensor_list_element_shape(
op.outputs[0], shape_type=dtypes.int32))
tensor_grad = gen_list_ops.tensor_list_stack(
dlist, element_dtype=op.inputs[0].dtype, num_elements=num_elements)
shape_grad = None
return tensor_grad, shape_grad
@ops.RegisterGradient("TensorListGetItem")
def _TensorListGetItemGrad(op, ditem):
"""Gradient for TensorListGetItem."""
list_size = gen_list_ops.tensor_list_length(op.inputs[0])
list_grad = gen_list_ops.tensor_list_set_item(
gen_list_ops.tensor_list_reserve(
gen_list_ops.tensor_list_element_shape(op.inputs[0],
shape_type=dtypes.int32),
list_size, element_dtype=ditem.dtype),
index=op.inputs[1],
item=ditem)
index_grad = None
return list_grad, index_grad
@ops.RegisterGradient("TensorListSetItem")
def _TensorListSetItemGrad(op, dlist):
_, index, item = op.inputs
list_grad = gen_list_ops.tensor_list_set_item(
dlist, index=index, item=array_ops.zeros_like(item))
index_grad = None
element_grad = gen_list_ops.tensor_list_get_item(
dlist, index, element_dtype=item.dtype)
return list_grad, index_grad, element_grad
@ops.RegisterGradient("TensorListGather")
def _TensorListGatherGrad(op, dtensor):
_, indices = op.inputs
return gen_list_ops.tensor_list_scatter(
tensor=dtensor, indices=indices,
element_shape=ops.convert_to_tensor(-1, dtype=dtypes.int32)), None
@ops.RegisterGradient("TensorListScatter")
def _TensorListScatterGrad(op, dlist):
t, indices, _ = op.inputs
return gen_list_ops.tensor_list_gather(
dlist, indices, element_dtype=t.dtype), None
def _build_element_shape(shape):
"""Converts shape to a format understood by list_ops for element_shape.
If `shape` is already a `Tensor` it is returned as-is. We do not perform a
type check here.
If shape is None or a TensorShape with unknown rank, -1 is returned.
If shape is a scalar, an int32 tensor with empty list is returned. Note we
do directly return an empty list since ops.convert_to_tensor would conver it
to a float32 which is not a valid type for element_shape.
If shape is a sequence of dims, None's in the list are replaced with -1. We
do not check the dtype of the other dims.
Args:
shape: Could be None, Tensor, TensorShape or a list of dims (each dim could
be a None, scalar or Tensor).
Returns:
A None-free shape that can be converted to a tensor.
"""
if isinstance(shape, ops.Tensor):
return shape
if isinstance(shape, tensor_shape.TensorShape):
# `TensorShape.as_list` requires rank to be known.
shape = shape.as_list() if shape else None
# Shape is unknown.
if shape is None:
return -1
# Shape is a scalar.
if not shape:
return ops.convert_to_tensor(shape, dtype=dtypes.int32)
# Shape is a sequence of dimensions. Convert None dims to -1.
return [d if d is not None else -1 for d in shape]
| 34.801843
| 80
| 0.742585
| 1,047
| 7,552
| 5.115568
| 0.217765
| 0.071695
| 0.037341
| 0.050784
| 0.316468
| 0.218447
| 0.165982
| 0.130134
| 0.094473
| 0.046303
| 0
| 0.007289
| 0.164327
| 7,552
| 216
| 81
| 34.962963
| 0.841388
| 0.263639
| 0
| 0.209302
| 0
| 0
| 0.04794
| 0.012031
| 0
| 0
| 0
| 0.00463
| 0
| 1
| 0.124031
| false
| 0
| 0.069767
| 0.054264
| 0.341085
| 0.007752
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbaf82c57c0e8e70a6ba6faeba1bc88a6aa96173
| 996
|
py
|
Python
|
requires.py
|
lydaaa/fzutils
|
5f775d046876e3ce35d0b1174b5a3db96e9d627e
|
[
"MIT"
] | 1
|
2018-08-04T13:55:03.000Z
|
2018-08-04T13:55:03.000Z
|
requires.py
|
lydaaa/fzutils
|
5f775d046876e3ce35d0b1174b5a3db96e9d627e
|
[
"MIT"
] | null | null | null |
requires.py
|
lydaaa/fzutils
|
5f775d046876e3ce35d0b1174b5a3db96e9d627e
|
[
"MIT"
] | null | null | null |
# coding:utf-8
'''
@author = super_fazai
@File : requires.py
@Time : 2016/8/3 12:59
@connect : superonesfazai@gmail.com
'''
install_requires = [
'ipython',
'wheel',
'utils',
'db',
'greenlet==0.4.13',
'web.py==0.40.dev1',
'pytz',
'requests',
'selenium==3.8.0', # 3.8.1及其以上版本不支持phantomjs了
'asyncio',
'psutil',
'pyexecjs',
'setuptools',
'colorama',
'twine',
'numpy',
'pprint',
'selenium',
'chardet',
'bs4',
'scrapy',
'demjson',
'pymssql',
'sqlalchemy',
'gevent',
'aiohttp',
'celery',
'jsonpath',
'matplotlib',
'wget',
'flask',
'flask_login',
'mitmproxy', # shell 抓包代理
'pymongo',
'pyexcel',
'pyexcel-xlsx',
'fabric',
'shadowsocks',
# 'pycurl==7.43.0.1',
'furl',
'yarl',
'prettytable',
'xlrd',
'pandas',
'jieba',
'geopandas',
'scikit-image',
'wordcloud', # 词云
'pygame',
]
| 16.6
| 53
| 0.491968
| 93
| 996
| 5.236559
| 0.860215
| 0.008214
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044993
| 0.308233
| 996
| 60
| 54
| 16.6
| 0.661829
| 0.179719
| 0
| 0
| 0
| 0
| 0.447891
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbafb8e5a5c72fd3abd02eb1cca23ac263bc48ce
| 2,433
|
py
|
Python
|
m15_dos/dos.py
|
venkatarjun/Python3
|
606adf8588a74a53d592e62e07e81a5a1530b993
|
[
"MIT"
] | 80
|
2020-11-14T19:19:27.000Z
|
2022-03-10T17:43:17.000Z
|
m15_dos/dos.py
|
nerbertb/python-52-weeks
|
55add5d75d1aabed4c59d445e1d1b773ede047b0
|
[
"MIT"
] | 10
|
2020-11-24T06:19:45.000Z
|
2022-02-27T23:53:28.000Z
|
m15_dos/dos.py
|
nerbertb/python-52-weeks
|
55add5d75d1aabed4c59d445e1d1b773ede047b0
|
[
"MIT"
] | 58
|
2020-11-13T18:35:22.000Z
|
2022-03-28T06:40:08.000Z
|
import subprocess
import requests
import argparse
from concurrent.futures import ThreadPoolExecutor
from time import sleep
from datetime import datetime
ICMP_ATTACK = "ICMP"
HTTP_ATTACK = "HTTP"
valid_attacks = {HTTP_ATTACK, ICMP_ATTACK}
parser = argparse.ArgumentParser(description="DoS HTTP")
parser.add_argument('-P', '--poolsize', default=10, help='Size of the threadpool')
parser.add_argument('-T', '--target', default='localhost', help='Target URL for http request')
parser.add_argument('-D', '--delay', default=0, help='Amount of time to wait between requests')
parser.add_argument('-A', '--attack', help='Type of attack (e.g. HTTP, ICMP)')
args = parser.parse_args()
threadpool_size = int(args.poolsize)
target = args.target
delay = int(args.delay)
attack = args.attack.upper()
if attack not in valid_attacks:
print(f"Invalid attack type, must be one of: {valid_attacks}")
exit()
terminate = False
def http_request(url):
global terminate
while True and not terminate:
response = requests.get(url)
if not response.ok:
print(f"{str(datetime.now())[:-3]} !!! HTTP request failed, code: {response.status_code}")
else:
print(f"{str(datetime.now())[:-3]} ---> HTTP request successful")
if delay > 0:
for _ in range(0, delay): sleep(1)
print("...http_request thread terminated")
def ping_host(ip):
global terminate
while True and not terminate:
try:
subprocess.check_output(["ping", "-c3", "-n", "-i0.5", "-W2", ip])
print(f"{str(datetime.now())[:-3]} ---> Ping successful: {ip}")
except subprocess.CalledProcessError:
print(f"{str(datetime.now())[:-3]} !!! Ping failed: {ip}")
if delay > 0:
for _ in range(0, delay): sleep(1)
def main():
global terminate
try:
targets = [target for _ in range(0, threadpool_size)]
with ThreadPoolExecutor(max_workers=threadpool_size) as executor:
if attack == HTTP_ATTACK:
executor.map(http_request, targets)
elif attack == ICMP_ATTACK:
executor.map(ping_host, targets)
else:
return # should not have gotten here
except KeyboardInterrupt:
print("... terminating application ...", end="")
terminate = True
print("terminated")
if __name__ == "__main__":
main()
| 27.337079
| 102
| 0.630908
| 300
| 2,433
| 4.996667
| 0.386667
| 0.044029
| 0.045364
| 0.045364
| 0.168112
| 0.168112
| 0.168112
| 0.082722
| 0.040027
| 0.040027
| 0
| 0.009651
| 0.233457
| 2,433
| 88
| 103
| 27.647727
| 0.794102
| 0.011097
| 0
| 0.213115
| 0
| 0.016393
| 0.238769
| 0.052413
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04918
| false
| 0
| 0.098361
| 0
| 0.163934
| 0.131148
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbb02044e102ff75841402e288f20f24bd0e7921
| 3,444
|
py
|
Python
|
maestro/backends/django/contrib/signals.py
|
estudio89/maestro-python
|
331079cb3f0c10de2e19210cbade793544510f33
|
[
"BSD-3-Clause"
] | null | null | null |
maestro/backends/django/contrib/signals.py
|
estudio89/maestro-python
|
331079cb3f0c10de2e19210cbade793544510f33
|
[
"BSD-3-Clause"
] | null | null | null |
maestro/backends/django/contrib/signals.py
|
estudio89/maestro-python
|
331079cb3f0c10de2e19210cbade793544510f33
|
[
"BSD-3-Clause"
] | null | null | null |
from django.apps import apps
from django.db import models
from django.db.models.signals import post_save, pre_delete
from typing import Type, Optional, List, cast, TYPE_CHECKING
from maestro.backends.django.settings import maestro_settings
from maestro.backends.django.contrib.factory import create_django_data_store
from maestro.backends.django.utils import model_to_entity_name
from maestro.core.metadata import Operation
from .middleware import _add_operation_to_queue
import copy
if TYPE_CHECKING:
from maestro.backends.django import DjangoDataStore
def model_saved_signal(
sender: "Type[models.Model]",
instance: "models.Model",
created: "bool",
raw: "bool",
using: "str",
update_fields: "Optional[List[str]]",
**kwargs,
):
operation: "Operation"
if created:
operation = Operation.INSERT
else:
operation = Operation.UPDATE
data_store: "DjangoDataStore" = create_django_data_store()
entity_name = model_to_entity_name(instance)
data_store.commit_item_change(
operation=operation,
entity_name=entity_name,
item_id=str(instance.pk),
item=copy.deepcopy(instance),
execute_operation=False,
)
_add_operation_to_queue(operation=operation, item=copy.deepcopy(instance))
def model_pre_delete_signal(
sender: "Type[models.Model]", instance: "models.Model", using: "str", **kwargs
):
data_store: "DjangoDataStore" = create_django_data_store()
entity_name = model_to_entity_name(instance)
data_store.commit_item_change(
operation=Operation.DELETE,
entity_name=entity_name,
item_id=str(instance.pk),
item=copy.deepcopy(instance),
execute_operation=False,
)
_add_operation_to_queue(operation=Operation.DELETE, item=copy.deepcopy(instance))
def _connect_signal(model: "models.Model"):
full_label = (
cast("str", model._meta.app_label) + "_" + cast("str", model._meta.model_name)
)
post_save.connect(
receiver=model_saved_signal,
sender=model,
dispatch_uid=full_label + "_update_sync",
)
pre_delete.connect(
receiver=model_pre_delete_signal,
sender=model,
dispatch_uid=full_label + "_delete_sync",
)
def connect_signals():
for app_model in maestro_settings.MODELS:
model = apps.get_model(app_model)
_connect_signal(model=model)
def _disconnect_signal(model: "models.Model"):
full_label = (
cast("str", model._meta.app_label) + "_" + cast("str", model._meta.model_name)
)
post_save.disconnect(
receiver=model_saved_signal,
sender=model,
dispatch_uid=full_label + "_update_sync",
)
pre_delete.disconnect(
receiver=model_pre_delete_signal,
sender=model,
dispatch_uid=full_label + "_delete_sync",
)
class _DisableSignalsContext:
def __init__(self, model: "Type[models.Model]"):
self.model = model
def __enter__(self):
_disconnect_signal(model=self.model)
def __exit__(self, type, value, traceback):
label = self.model._meta.app_label + "." + self.model._meta.model_name
enabled_models = [label.lower() for label in maestro_settings.MODELS]
if label in enabled_models:
_connect_signal(model=self.model)
def temporarily_disable_signals(model: "Type[models.Model]"):
return _DisableSignalsContext(model=model)
| 30.210526
| 86
| 0.702962
| 418
| 3,444
| 5.45933
| 0.210526
| 0.039439
| 0.033304
| 0.043821
| 0.539001
| 0.489045
| 0.456617
| 0.456617
| 0.416301
| 0.416301
| 0
| 0
| 0.196574
| 3,444
| 113
| 87
| 30.477876
| 0.82472
| 0
| 0
| 0.344086
| 0
| 0
| 0.074042
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.11828
| 0.010753
| 0.236559
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbb4a7b40915f984e1d6c4fb86487617ba753bc3
| 2,421
|
py
|
Python
|
Scripts/ReduceFragments.py
|
mike72353/FragFeatureNet
|
ef61ae52e3d6dcc6d2d56df2a6bd5fe1a298c930
|
[
"BSD-3-Clause"
] | 1
|
2021-10-13T11:49:37.000Z
|
2021-10-13T11:49:37.000Z
|
Scripts/ReduceFragments.py
|
mike72353/FragFeatureNet
|
ef61ae52e3d6dcc6d2d56df2a6bd5fe1a298c930
|
[
"BSD-3-Clause"
] | null | null | null |
Scripts/ReduceFragments.py
|
mike72353/FragFeatureNet
|
ef61ae52e3d6dcc6d2d56df2a6bd5fe1a298c930
|
[
"BSD-3-Clause"
] | 1
|
2021-09-09T04:42:20.000Z
|
2021-09-09T04:42:20.000Z
|
"""
Remove Fragments not in Knowledgebase
"""
__author__ = "Michael Suarez"
__email__ = "masv@connect.ust.hk"
__copyright__ = "Copyright 2019, Hong Kong University of Science and Technology"
__license__ = "3-clause BSD"
from argparse import ArgumentParser
import numpy as np
import pickle
parser = ArgumentParser(description="Build Files")
parser.add_argument("--datadir", type=str, default="Data", help="input - XXX.YYY ")
parser.add_argument("--envNewAcronym", type=str, default="PRT.SNW", help="input - XXX.YYY ")
args = parser.parse_args()
# Check the Bound Fragments
BoundFrags = np.loadtxt("../%s/%s/%s.Homogenised.boundfrags_zeros.txt" %(args.datadir, args.envNewAcronym, args.envNewAcronym), delimiter=',')
normalDF = pickle.load(open("../%s/GrandCID.dict" %(args.datadir), "rb"))
binding = np.full(BoundFrags.shape,-1)
mlength = 0
for r, i in enumerate(BoundFrags):
for c, j in enumerate(i[i!=0]):
try:
# Checks whether the Fragment can be found in the 59k Fragment Base
binding[r,c]=normalDF.index.get_loc(int(j))
except:
continue
temp = binding[r]
if temp[temp!=-1].shape[0] > mlength:
mlength = temp[temp!=-1].shape[0]
print(mlength) #Finds the maximum number of Fragments per environment -> 705
indices = np.empty(binding.shape[0])
red_binding = np.full((binding.shape[0], mlength), -1)
for j, i in enumerate(binding):
indices[j] = i[i!=-1].shape[0]
red_binding[j][:int(indices[j])] = i[i!=-1]
red_binding = np.delete(red_binding, np.where(indices==0), axis=0)
pickle.dump(red_binding, open("../%s/%s/%s.binding.mtr" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "wb"))
# Removes environments without binding Fragments
Features_all = pickle.load(open("../%s/%s/%s.Homogenised.property.pvar" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "rb"))
Features_all = np.delete(Features_all, np.where(indices==0), axis=0)
pickle.dump(Features_all, open("../%s/%s/%s.Homogenised.property.pvar" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "wb"))
# Removes environment annotiation without binding fragments
with open("../%s/%s/%s.Homogenised.annotation.txt" %(args.datadir, args.envNewAcronym, args.envNewAcronym), "r+") as f:
lines = f.readlines()
for i in np.where(indices==0)[0][::-1]:
del lines[i]
f.seek(0)
f.truncate()
f.writelines(lines)
| 38.428571
| 142
| 0.687732
| 340
| 2,421
| 4.808824
| 0.388235
| 0.012232
| 0.009174
| 0.085627
| 0.268502
| 0.225688
| 0.225688
| 0.166972
| 0.091743
| 0.091743
| 0
| 0.014983
| 0.145394
| 2,421
| 62
| 143
| 39.048387
| 0.775254
| 0.12185
| 0
| 0
| 0
| 0
| 0.186377
| 0.084674
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbb4ba3a72efae417ef662fbf9ea83724f57fdc1
| 11,352
|
py
|
Python
|
client/core/tests/billing_tests.py
|
vbohinc/CommunityCellularManager
|
ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c
|
[
"BSD-3-Clause"
] | null | null | null |
client/core/tests/billing_tests.py
|
vbohinc/CommunityCellularManager
|
ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c
|
[
"BSD-3-Clause"
] | 3
|
2021-03-20T00:02:37.000Z
|
2022-02-11T03:46:59.000Z
|
client/core/tests/billing_tests.py
|
vbohinc/CommunityCellularManager
|
ab330fcb1bc70ee3a8e9bcdac2846ab6c327f87c
|
[
"BSD-3-Clause"
] | null | null | null |
"""Tests for core.billing.
Run this test from the project root
$ nosetests core.tests.billing_tests
Copyright (c) 2016-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import unittest
import random
import math
from core.billing import get_call_cost
from core.billing import get_prefix_from_number
from core.billing import get_sms_cost
from core.billing import process_prices
from core.billing import round_to_billable_unit
from core.billing import round_up_to_nearest_100
from core import config_database
TARIFF = 100
class GetCostTest(unittest.TestCase):
"""Testing core.billing.get_call_cost."""
@classmethod
def setUpClass(cls):
# Setup the config db.
cls.config_db = config_database.ConfigDB()
cls.config_db['bts_secret'] = 'hokay'
cls.config_db['free_seconds'] = '5'
cls.config_db['billable_unit'] = '1'
# Setup some price data like what would be sent back from the cloud.
price_data = [
{
'directionality': 'off_network_send',
'prefix': '509',
'country_name': 'Haiti',
'country_code': 'HT',
'cost_to_subscriber_per_sms': 900,
'cost_to_subscriber_per_min': 1100,
'billable_unit': 1,
}, {
'directionality': 'off_network_send',
'prefix': '56',
'country_name': 'Chile',
'country_code': 'CL',
'cost_to_subscriber_per_sms': 1000,
'cost_to_subscriber_per_min': 800,
'billable_unit': 1,
}, {
'directionality': 'off_network_send',
'prefix': '63',
'country_name': 'Philippines',
'country_code': 'PH',
'cost_to_subscriber_per_sms': 100,
'cost_to_subscriber_per_min': 600,
'billable_unit': 30,
}, {
'directionality': 'off_network_receive',
'cost_to_subscriber_per_sms': 200,
'cost_to_subscriber_per_min': 100,
'billable_unit': 1,
}, {
'directionality': 'on_network_send',
'cost_to_subscriber_per_sms': 400,
'cost_to_subscriber_per_min': 300,
'billable_unit': 1,
}, {
'directionality': 'on_network_receive',
'cost_to_subscriber_per_sms': 500,
'cost_to_subscriber_per_min': 200,
'billable_unit': 1,
}
]
# Populate the config db with prices
process_prices(price_data, cls.config_db)
def test_on_receive_call(self):
"""We can get the subscriber price for an on-network received call."""
billable_seconds = 170
# Recall that the expected cost is rounded to the nearest value of 100.
expected_cost = 600
self.assertEqual(expected_cost,
get_call_cost(billable_seconds, 'on_network_receive'))
def test_on_receive_sms(self):
"""We can get the subscriber price for an on-network received SMS."""
expected_cost = 500
self.assertEqual(expected_cost, get_sms_cost('on_network_receive'))
def test_off_receive_call(self):
"""We can get the subscriber price for an off-network received call."""
billable_seconds = 700
expected_cost = 1200
self.assertEqual(
expected_cost,
get_call_cost(billable_seconds, 'off_network_receive'))
def test_off_receive_sms(self):
"""We can get the subscriber price for an off-network received SMS."""
expected_cost = 200
self.assertEqual(expected_cost, get_sms_cost('off_network_receive'))
def test_on_send_call(self):
"""We can get the subscriber price for an on-network sent call."""
billable_seconds = 190
expected_cost = 1000
self.assertEqual(expected_cost,
get_call_cost(billable_seconds, 'on_network_send'))
def test_on_send_sms(self):
"""We can get the subscriber price for an on-network sent SMS."""
expected_cost = 400
self.assertEqual(expected_cost, get_sms_cost('on_network_send'))
def test_call_to_chile(self):
"""We can get the cost of a call to Chile."""
billable_seconds = 830
expected_cost = 11000
number = ''.join(['56', '1235554567'])
actual_cost = get_call_cost(billable_seconds, 'off_network_send',
destination_number=number)
self.assertEqual(expected_cost, actual_cost)
def test_sms_to_chile(self):
"""We can get the price to a subscriber of an SMS sent to Chile."""
expected_cost = 1000
number = ''.join(['56', '1235554567'])
actual_cost = get_sms_cost('off_network_send',
destination_number=number)
self.assertEqual(expected_cost, actual_cost)
def test_call_to_ph(self):
""" We bill for calls to PH correctly. """
billable_seconds = 70
expected_cost = 900
number = ''.join(['63', '5551234567'])
actual_cost = get_call_cost(billable_seconds, 'off_network_send',
destination_number=number)
self.assertEqual(expected_cost, actual_cost)
def test_nonexistent_prefix(self):
"""If the prefix doesn't exist, it's free.
The prefix price key might not exist if, say, the billing tier data
has not yet been loaded.
"""
expected_cost = 0
number = ''.join(['9999', '1235554567'])
actual_cost = get_sms_cost('off_network_send',
destination_number=number)
self.assertEqual(expected_cost, actual_cost)
class GetPrefixFromNumberTest(unittest.TestCase):
"""Testing core.billing.get_prefix_from_number."""
@classmethod
def setUpClass(cls):
# Setup the config db.
cls.config_db = config_database.ConfigDB()
cls.config_db['bts_secret'] = 'yup'
# Load up some pricing data into the config db. We use this data to
# determine what prefixes are available.
# 2015dec9(shasan): This is a legacy billing response, lacking billable
# units. This also tests we can handle that case.
price_data = [
{
'directionality': 'off_network_send',
'prefix': '789',
'country_name': 'Ocenaia',
'country_code': 'OC',
'cost_to_subscriber_per_sms': 300,
'cost_to_subscriber_per_min': 20,
}, {
'directionality': 'off_network_send',
'prefix': '78',
'country_name': 'Eurasia',
'country_code': 'EU',
'cost_to_subscriber_per_sms': 400,
'cost_to_subscriber_per_min': 10,
}, {
'directionality': 'off_network_send',
'prefix': '7',
'country_name': 'Eastasia',
'country_code': 'EA',
'cost_to_subscriber_per_sms': 500,
'cost_to_subscriber_per_min': 30,
}, {
'directionality': 'off_network_send',
'prefix': '3',
'country_name': 'London',
'country_code': 'LN',
'cost_to_subscriber_per_sms': 5000,
'cost_to_subscriber_per_min': 3000,
}
]
# Populate the config db with prices
process_prices(price_data, cls.config_db)
def test_get_one_digit_prefix(self):
"""We can get a one digit prefix."""
number = ''.join(['7', '1235557890'])
self.assertEqual('7', get_prefix_from_number(number))
def test_get_two_digit_prefix(self):
"""We can get a two digit prefix."""
number = ''.join(['78', '1235557890'])
self.assertEqual('78', get_prefix_from_number(number))
def test_get_three_digit_prefix(self):
"""We can get a three digit prefix."""
number = ''.join(['789', '1235557890'])
self.assertEqual('789', get_prefix_from_number(number))
def test_get_one_digit_uncommon_prefix(self):
"""We can get a one digit uncommon prefix."""
number = ''.join(['3', '1235557890'])
self.assertEqual('3', get_prefix_from_number(number))
class RoundCostToBillableUnit(unittest.TestCase):
"""Testing core.billing.round_to_billable_unit."""
def test_billable_unit_rounding_sans_free_seconds(self):
for i in range(100):
billsec = random.randint(1, 5000)
expected_cost = int(billsec * (TARIFF / 60.0))
print('%s seconds should cost %s' % (billsec, expected_cost))
self.assertEqual(expected_cost,
round_to_billable_unit(billsec, TARIFF))
def test_billable_unit_rounding_with_free_seconds(self):
for i in range(100):
billsec = random.randint(100, 5000)
free = random.randint(1, 100)
expected_cost = int((billsec - free) * (TARIFF / 60.0))
print('%s seconds with %s free should cost %s' %
(billsec, free, expected_cost))
self.assertEqual(expected_cost,
round_to_billable_unit(billsec, TARIFF, free))
def test_billable_unit_rounding_with_units(self):
"""Test the "rows" of this table: (billsec, expected_cost)."""
tests = [
# base case
(0, 60, 0, 30, 0),
# call too short
(5, 60, 0, 30, 30),
# changing the units
(5, 60, 0, 60, 60),
# call slightly too long
(61, 60, 0, 60, 120),
# weird non-uniform per minute
(61, 72, 0, 30, 108),
# including free seconds
(61, 60, 10, 60, 60)
]
for test in tests:
billsec = test[0]
rate = test[1]
free = test[2]
unit = test[3]
expected_cost = test[4]
actual_cost = round_to_billable_unit(billsec, rate, free, unit)
print('%s sec with %s free and a unit of %s sec '
'expected cost %s, actual cost %s' %
(billsec, free, unit, expected_cost, actual_cost))
self.assertEqual(expected_cost, actual_cost)
class RoundCostUpToNearest100(unittest.TestCase):
"""Testing core.billing.round_up_to_nearest_100."""
def test_round_negatives(self):
# test negatives
for i in [-10000, -100, -1]:
self.assertEqual(0, round_up_to_nearest_100(i))
def test_round_positives(self):
for i in range(0, 5000):
self.assertEqual(int(math.ceil(i / float(100))) * 100,
round_up_to_nearest_100(i))
| 38.481356
| 79
| 0.587738
| 1,338
| 11,352
| 4.703288
| 0.189088
| 0.06102
| 0.05085
| 0.060385
| 0.574289
| 0.457969
| 0.388527
| 0.332751
| 0.290958
| 0.267917
| 0
| 0.047747
| 0.313689
| 11,352
| 294
| 80
| 38.612245
| 0.759979
| 0.177149
| 0
| 0.271429
| 0
| 0
| 0.179624
| 0.05654
| 0
| 0
| 0
| 0
| 0.090476
| 1
| 0.1
| false
| 0
| 0.066667
| 0
| 0.185714
| 0.019048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbba66cc16504421bbf294d9cd7ab892cc735e8e
| 4,880
|
py
|
Python
|
apps/greencheck/forms.py
|
BR0kEN-/admin-portal
|
0c38dc0d790031f45bf07660bce690e972fe2858
|
[
"Apache-2.0"
] | null | null | null |
apps/greencheck/forms.py
|
BR0kEN-/admin-portal
|
0c38dc0d790031f45bf07660bce690e972fe2858
|
[
"Apache-2.0"
] | null | null | null |
apps/greencheck/forms.py
|
BR0kEN-/admin-portal
|
0c38dc0d790031f45bf07660bce690e972fe2858
|
[
"Apache-2.0"
] | null | null | null |
from django import forms
from django.forms import ModelForm
from django.contrib.auth import get_user_model
from django.core.exceptions import ValidationError
from .choices import ActionChoice
from .choices import StatusApproval
from .models import GreencheckIp
from .models import GreencheckIpApprove
from .models import GreencheckASN, GreencheckASNapprove
User = get_user_model()
class ApprovalMixin:
ApprovalModel = None
def _save_approval(self):
"""
Save the approval request, be it an IP Range or an AS Network
from a
"""
if self.ApprovalModel is None:
raise NotImplementedError("Approval model missing")
model_name = self.ApprovalModel._meta.model_name
if not self.cleaned_data["is_staff"]:
hosting_provider = self.instance.hostingprovider
# changed here represents an
action = ActionChoice.update if self.changed else ActionChoice.new
status = StatusApproval.update if self.changed else StatusApproval.new
kwargs = {
"action": action,
"status": status,
"hostingprovider": hosting_provider,
}
if model_name == "greencheckasnapprove":
self.instance = GreencheckASNapprove(asn=self.instance.asn, **kwargs)
else:
self.instance = GreencheckIpApprove(
ip_end=self.instance.ip_end,
ip_start=self.instance.ip_start,
**kwargs
)
hosting_provider.mark_as_pending_review(self.instance)
def clean_is_staff(self):
try:
# when using this form `is_staff` should always be available
# or else something has gone wrong...
return self.data["is_staff"]
except KeyError:
raise ValidationError("Alert staff: a bug has occurred.")
class GreencheckAsnForm(ModelForm, ApprovalMixin):
ApprovalModel = GreencheckASNapprove
is_staff = forms.BooleanField(
label="user_is_staff", required=False, widget=forms.HiddenInput()
)
class Meta:
model = GreencheckASN
fields = (
"active",
"asn",
)
def save(self, commit=True):
self._save_approval()
return super().save(commit=True)
class GreencheckIpForm(ModelForm, ApprovalMixin):
"""This form is meant for admin
If a non staff user fills in the form it would return
an unsaved approval record instead of greencheckip record
"""
ApprovalModel = GreencheckIpApprove
is_staff = forms.BooleanField(
label="user_is_staff", required=False, widget=forms.HiddenInput()
)
class Meta:
model = GreencheckIp
fields = (
"active",
"ip_start",
"ip_end",
)
def save(self, commit=True):
"""
If a non-staff user creates an ip, instead of saving
the ip record directly, it will save an approval record.
Only when it has been approved the record will actually
be created.
So we return an approval instance instead of Greencheck instance
which in turn will get saved a bit later.
"""
self._save_approval()
return super().save(commit=commit)
class GreencheckAsnApprovalForm(ModelForm):
class Meta:
model = GreencheckASNapprove
fields = ("action", "asn", "status")
def save(self, commit=True):
instance = self.instance.greencheck_asn
if commit is True:
if instance:
instance.asn = self.instance.asn
instance.save()
else:
instance = GreencheckASN.objects.create(
active=True,
asn=self.instance.asn,
hostingprovider=self.instance.hostingprovider,
)
self.instance.greencheck_asn = instance
return super().save(commit=commit)
class GreecheckIpApprovalForm(ModelForm):
field_order = ("ip_start", "ip_end")
class Meta:
model = GreencheckIpApprove
fields = "__all__"
def save(self, commit=True):
ip_instance = self.instance.greencheck_ip
if commit is True:
if ip_instance:
ip_instance.ip_end = self.instance.ip_end
ip_instance.ip_end = self.instance.ip_start
ip_instance.save()
else:
ip_instance = GreencheckIp.objects.create(
active=True,
ip_end=self.instance.ip_end,
ip_start=self.instance.ip_start,
hostingprovider=self.instance.hostingprovider,
)
self.instance.greencheck_ip = ip_instance
return super().save(commit=commit)
| 30.886076
| 85
| 0.608811
| 509
| 4,880
| 5.711198
| 0.269155
| 0.078431
| 0.028896
| 0.023392
| 0.280358
| 0.21431
| 0.184727
| 0.094255
| 0.094255
| 0.094255
| 0
| 0
| 0.317623
| 4,880
| 157
| 86
| 31.082803
| 0.872973
| 0.127049
| 0
| 0.317757
| 0
| 0
| 0.050181
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056075
| false
| 0
| 0.084112
| 0
| 0.327103
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbbc25c0d987a2badd4b10e9df8a681d25f102e8
| 23,904
|
py
|
Python
|
ipamanager/entities.py
|
Tjev/freeipa-manager
|
0d40e64d81a86d4312b4e22cd57dcaecf25d0801
|
[
"BSD-3-Clause"
] | null | null | null |
ipamanager/entities.py
|
Tjev/freeipa-manager
|
0d40e64d81a86d4312b4e22cd57dcaecf25d0801
|
[
"BSD-3-Clause"
] | null | null | null |
ipamanager/entities.py
|
Tjev/freeipa-manager
|
0d40e64d81a86d4312b4e22cd57dcaecf25d0801
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# SPDX-License-Identifier: BSD-3-Clause
# Copyright © 2017-2019, GoodData Corporation. All rights reserved.
"""
FreeIPA Manager - entity module
Object representations of the entities configured in FreeIPA.
"""
import os
import re
import voluptuous
import yaml
from abc import ABCMeta, abstractproperty
import schemas
from command import Command
from core import FreeIPAManagerCore
from errors import ConfigError, ManagerError, IntegrityError
class FreeIPAEntity(FreeIPAManagerCore):
"""
General FreeIPA entity (user, group etc.) representation.
Can only be used via subclasses, not directly.
"""
__metaclass__ = ABCMeta
entity_id_type = 'cn' # entity name identificator in FreeIPA
key_mapping = {} # attribute name mapping between local config and FreeIPA
ignored = [] # list of ignored entities for each entity type
allowed_members = []
def __init__(self, name, data, path=None):
"""
:param str name: entity name (user login, group name etc.)
:param dict data: dictionary of entity configuration values
:param str path: path to file the entity was parsed from;
if None, indicates creation of entity from FreeIPA
"""
super(FreeIPAEntity, self).__init__()
if not data: # may be None; we want to ensure dictionary
data = dict()
self.name = name
self.path = path
self.metaparams = data.pop('metaparams', dict())
if self.path: # created from local config
try:
self.validation_schema(data)
except voluptuous.Error as e:
raise ConfigError('Error validating %s: %s' % (name, e))
if not path.endswith('.yaml'): # created from template tool
path, name = os.path.split(self.path)
self.path = '%s.yaml' % os.path.join(
path, name.replace('-', '_'))
self.data_ipa = self._convert_to_ipa(data)
self.data_repo = data
else: # created from FreeIPA
self.data_ipa = data
self.data_repo = self._convert_to_repo(data)
def _convert_to_ipa(self, data):
"""
Convert entity data to IPA format.
:param dict data: entity data in repository format
:returns: dictionary of data in IPA format
:rtype: dict
"""
result = dict()
for key, value in data.iteritems():
new_key = self.key_mapping.get(key, key).lower()
if new_key == 'memberof':
self._check_memberof(value)
result[new_key] = value
elif isinstance(value, bool):
result[new_key] = value
elif isinstance(value, list):
result[new_key] = tuple(unicode(i) for i in value)
else:
result[new_key] = (unicode(value),)
return result
def _convert_to_repo(self, data):
"""
Convert entity data to repo format.
:param dict data: entity data in IPA format
:returns: dictionary of data in repository format
:rtype: dict
"""
result = dict()
for attr in self.managed_attributes_pull:
if attr.lower() in data:
key = attr
# find reverse (IPA -> repo) attribute name mapping
for k, v in self.key_mapping.iteritems():
if v == attr:
key = k
break
value = data[attr.lower()]
if isinstance(value, tuple):
if len(value) > 1:
result[key] = list(value)
else:
result[key] = value[0]
else:
result[key] = value
return result
def _check_memberof(self, member_of):
for entity_type in member_of:
try:
self.get_entity_class(entity_type)
except KeyError:
raise ConfigError(
'Cannot be a member of non-existent entity type %s'
% entity_type)
def create_commands(self, remote_entity=None):
"""
Create commands to execute in order
to sync entity with its FreeIPA counterpart.
:param FreeIPAEntity remote_entity: remote entity
:returns: list of Command objects to execute
:rtype: list(Command)
"""
diff = dict()
for key in self.managed_attributes_push:
local_value = self.data_ipa.get(key.lower(), ())
if not remote_entity:
if local_value:
diff[key.lower()] = local_value
else:
remote_value = remote_entity.data_ipa.get(key.lower(), ())
if sorted(local_value) != sorted(remote_value):
diff[key.lower()] = local_value
if diff or not remote_entity: # create entity even without params
if remote_entity: # modify existing entity
command = '%s_mod' % self.entity_name
else: # add new entity
command = '%s_add' % self.entity_name
return [Command(command, diff, self.name, self.entity_id_type)]
return []
def update_repo_data(self, additional):
"""
Update repo-format data with additional attributes.
Used for adding membership attributes to data.
:param dict additional: dictionary to update entity data with
:rtype: None
"""
self.data_repo.update(additional or {})
def normalize(self):
"""
Re-structure entity's data in such a way that it can be stored
into the configuration file in a normalized format. This is used
when round-trip loading and saving a configuration.
"""
memberof = self.data_repo.pop('memberOf', None)
if memberof:
for target_type, target_list in memberof.iteritems():
memberof[target_type] = sorted(target_list)
self.data_repo['memberOf'] = memberof
def write_to_file(self):
if not self.path:
raise ManagerError(
'%s has no file path, nowhere to write.' % repr(self))
if self.metaparams:
self.data_repo.update({'metaparams': self.metaparams})
# don't write default attributes into file
for key in self.default_attributes:
self.data_repo.pop(key, None)
try:
with open(self.path, 'w') as target:
data = {self.name: self.data_repo or None}
yaml.dump(data, stream=target, Dumper=EntityDumper,
default_flow_style=False, explicit_start=True)
self.lg.debug('%s written to file', repr(self))
except (IOError, OSError, yaml.YAMLError) as e:
raise ConfigError(
'Cannot write %s to %s: %s' % (repr(self), self.path, e))
def delete_file(self):
if not self.path:
raise ManagerError(
'%s has no file path, cannot delete.' % repr(self))
try:
os.unlink(self.path)
self.lg.debug('%s config file deleted', repr(self))
except OSError as e:
raise ConfigError(
'Cannot delete %s at %s: %s' % (repr(self), self.path, e))
@staticmethod
def get_entity_class(name):
for entity_class in [
FreeIPAHBACRule, FreeIPAHBACService,
FreeIPAHBACServiceGroup, FreeIPAHostGroup, FreeIPAPermission,
FreeIPAPrivilege, FreeIPARole, FreeIPAService,
FreeIPASudoRule, FreeIPAUser, FreeIPAUserGroup]:
if entity_class.entity_name == name:
return entity_class
raise KeyError(name)
@abstractproperty
def validation_schema(self):
"""
:returns: entity validation schema
:rtype: voluptuous.Schema
"""
@abstractproperty
def managed_attributes_push(self):
"""
Return a list of properties that are managed for given entity type
when pushing configuration from local repo to FreeIPA.
NOTE: the list should NOT include attributes that are managed via
separate commands, like memberOf/memberHost/memberUser or ipasudoopt.
:returns: list of entity's managed attributes
:rtype: list(str)
"""
@property
def managed_attributes_pull(self):
"""
Return a list of properties that are managed for given entity type.
when pulling configuration from FreeIPA to local repository.
:returns: list of entity's managed attributes
:rtype: list(str)
"""
return self.managed_attributes_push
@property
def default_attributes(self):
"""
Return a list of default attributes for each entity of the given type.
These attributes will not be written into the YAML file when pulling.
:returns: list of entity's attributes that have single default value
:rtype: list(str)
"""
return []
def __repr__(self):
return '%s %s' % (self.entity_name, self.name)
def __str__(self):
return self.name
def __eq__(self, other):
return type(self) is type(other) and self.name == other.name
def __ne__(self, other):
return not (self == other)
def __gt__(self, other):
return self.name > other.name
def __lt__(self, other):
return self.name < other.name
class FreeIPAGroup(FreeIPAEntity):
"""Abstract representation a FreeIPA group entity (host/user group)."""
managed_attributes_push = ['description']
@abstractproperty
def allowed_members(self):
"""
:returns: list of entity types that can be members of this entity
:rtype: list(FreeIPAEntity)
"""
class FreeIPAHostGroup(FreeIPAGroup):
"""Representation of a FreeIPA host group entity."""
entity_name = 'hostgroup'
allowed_members = ['hostgroup']
validation_schema = voluptuous.Schema(schemas.schema_hostgroups)
class FreeIPAUserGroup(FreeIPAGroup):
"""Representation of a FreeIPA user group entity."""
entity_name = 'group'
managed_attributes_pull = ['description', 'posix']
allowed_members = ['user', 'group']
validation_schema = voluptuous.Schema(schemas.schema_usergroups)
def __init__(self, name, data, path=None):
"""
:param str name: entity name (user login, group name etc.)
:param dict data: dictionary of entity configuration values
:param str path: path to file the entity was parsed from;
if None, indicates creation of entity from FreeIPA
"""
if not path: # entity created from FreeIPA, not from config
data['posix'] = u'posixgroup' in data.get(u'objectclass', [])
super(FreeIPAUserGroup, self).__init__(name, data, path)
self.posix = self.data_repo.get('posix', True)
def can_contain_users(self, pattern):
"""
Check whether the group can contain users directly.
If the pattern is None, no restrictions are applied.
:param str pattern: regex to check name by (not enforced if empty)
"""
return not pattern or re.match(pattern, self.name)
def cannot_contain_users(self, pattern):
"""
Check whether the group can not contain users directly.
Used for determining if the group can be a member of a sudo/HBAC rule.
If the pattern is None, no restrictions are applied.
:param str pattern: regex to check name by (not enforced if empty)
"""
return not pattern or not re.match(pattern, self.name)
def _process_posix_setting(self, remote_entity):
posix_diff = dict()
description = None
if remote_entity:
if self.posix and not remote_entity.posix:
posix_diff = {u'posix': True}
description = 'group_mod %s (make POSIX)' % self.name
elif not self.posix and remote_entity.posix:
posix_diff = {'setattr': (u'gidnumber=',),
'delattr': (u'objectclass=posixgroup',)}
description = 'group_mod %s (make non-POSIX)' % self.name
elif not self.posix: # creation of new non-POSIX group
posix_diff = {u'nonposix': True}
return (posix_diff, description)
def create_commands(self, remote_entity=None):
"""
Create commands to execute in order to update the rule.
Extends the basic command creation with POSIX/non-POSIX setting.
:param dict remote_entity: remote rule data
:returns: list of commands to execute
:rtype: list(Command)
"""
commands = super(FreeIPAUserGroup, self).create_commands(remote_entity)
posix_diff, description = self._process_posix_setting(remote_entity)
if posix_diff:
if not commands: # no diff but POSIX setting, new command needed
cmd = Command('group_mod', posix_diff,
self.name, self.entity_id_type)
cmd.description = description
return [cmd]
else: # update POSIX setting as part of existing command
commands[0].update(posix_diff)
return commands
class FreeIPAUser(FreeIPAEntity):
"""Representation of a FreeIPA user entity."""
entity_name = 'user'
entity_id_type = 'uid'
managed_attributes_push = ['givenName', 'sn', 'initials', 'mail',
'ou', 'manager', 'carLicense', 'title']
key_mapping = {
'emailAddress': 'mail',
'firstName': 'givenName',
'lastName': 'sn',
'organizationUnit': 'ou',
'githubLogin': 'carLicense'
}
validation_schema = voluptuous.Schema(schemas.schema_users)
class FreeIPARule(FreeIPAEntity):
"""Abstract class covering HBAC and sudo rules."""
def create_commands(self, remote_entity=None):
"""
Create commands to execute in order to update the rule.
Extends the basic command creation
to account for adding/removing rule members.
:param dict remote_entity: remote rule data
:returns: list of commands to execute
:rtype: list(Command)
"""
result = super(FreeIPARule, self).create_commands(remote_entity)
result.extend(self._process_rule_membership(remote_entity))
return result
def _process_rule_membership(self, remote_entity):
"""
Prepare a command for a hbac/sudo rule membership update.
If the rule previously had any members, these are removed
as a rule can only have one usergroup and one hostgroup as members.
:param FreeIPArule remote_entity: remote entity data (may be None)
"""
commands = []
for key, member_type, cmd_key in (
('memberhost', 'hostgroup', 'host'),
('memberuser', 'group', 'user'),
('memberservice', 'hbacsvc', 'service')):
local_members = set(self.data_ipa.get(key, []))
if remote_entity:
search_key = '%s_%s' % (key, member_type)
remote_members = set(
remote_entity.data_ipa.get(search_key, []))
else:
remote_members = set()
command = '%s_add_%s' % (self.entity_name, cmd_key)
for member in local_members - remote_members:
diff = {member_type: member}
commands.append(
Command(command, diff, self.name, self.entity_id_type))
command = '%s_remove_%s' % (self.entity_name, cmd_key)
for member in remote_members - local_members:
diff = {member_type: member}
commands.append(
Command(command, diff, self.name, self.entity_id_type))
return commands
class FreeIPAHBACRule(FreeIPARule):
"""Representation of a FreeIPA HBAC (host-based access control) rule."""
entity_name = 'hbacrule'
default_attributes = ['serviceCategory']
managed_attributes_push = ['description', 'serviceCategory']
validation_schema = voluptuous.Schema(schemas.schema_hbac)
def __init__(self, name, data, path=None):
"""
Create a HBAC rule instance.
This override is needed to set the servicecat parameter.
"""
if path: # only edit local entities
if not data: # may be None; we want to ensure dictionary
data = dict()
if 'memberService' not in data:
data.update({'serviceCategory': 'all'})
elif 'serviceCategory' in data:
raise IntegrityError(
'%s cannot contain both memberService and serviceCategory'
% name)
super(FreeIPAHBACRule, self).__init__(name, data, path)
class FreeIPASudoRule(FreeIPARule):
"""Representation of a FreeIPA sudo rule."""
entity_name = 'sudorule'
default_attributes = [
'cmdCategory', 'options', 'runAsGroupCategory', 'runAsUserCategory']
managed_attributes_push = [
'cmdCategory', 'description',
'ipaSudoRunAsGroupCategory', 'ipaSudoRunAsUserCategory']
managed_attributes_pull = managed_attributes_push + ['ipaSudoOpt']
key_mapping = {
'options': 'ipaSudoOpt',
'runAsGroupCategory': 'ipaSudoRunAsGroupCategory',
'runAsUserCategory': 'ipaSudoRunAsUserCategory'
}
validation_schema = voluptuous.Schema(schemas.schema_sudo)
def __init__(self, name, data, path=None):
"""
Create a sudorule instance.
This override is needed to set the options & runAs params.
"""
if path: # only edit local entities
if not data: # may be None; we want to ensure dictionary
data = dict()
data.update({'options': ['!authenticate', '!requiretty'],
'cmdCategory': 'all',
'runAsUserCategory': 'all',
'runAsGroupCategory': 'all'})
super(FreeIPASudoRule, self).__init__(name, data, path)
def _convert_to_repo(self, data):
result = super(FreeIPASudoRule, self)._convert_to_repo(data)
if isinstance(result.get('options'), unicode):
result['options'] = [result['options']]
return result
def create_commands(self, remote_entity=None):
"""
Create commands to execute in order to update the rule.
Extends the basic command creation with sudorule option update.
:param dict remote_entity: remote rule data
:returns: list of commands to execute
:rtype: list(Command)
"""
result = super(FreeIPASudoRule, self).create_commands(remote_entity)
result.extend(self._parse_sudo_options(remote_entity))
return result
def _parse_sudo_options(self, remote_entity):
"""
Prepare commands for sudo rule options update. This includes
deletion of old options that are no longer in configuration
as well as addition of new options.
:param dict remote_entity: remote entity data (can be None)
:returns: list of sudorule option update commands to execute
:rtype: list(Command)
"""
commands = []
local_options = set(self.data_repo.get('options', []))
if remote_entity:
remote_options = set(remote_entity.data_ipa.get('ipasudoopt', []))
else:
remote_options = set()
command = 'sudorule_add_option'
for opt in local_options - remote_options:
diff = {'ipasudoopt': [opt]}
commands.append(
Command(command, diff, self.name, self.entity_id_type))
command = 'sudorule_remove_option'
for opt in remote_options - local_options:
diff = {'ipasudoopt': [opt]}
commands.append(
Command(command, diff, self.name, self.entity_id_type))
return commands
class FreeIPAHBACService(FreeIPAEntity):
"""Entity to hold the info about FreeIPA HBACServices"""
entity_name = 'hbacsvc'
managed_attributes_push = ['description']
managed_attributes_pull = managed_attributes_push
validation_schema = voluptuous.Schema(schemas.schema_hbacservices)
class FreeIPAHBACServiceGroup(FreeIPAEntity):
"""Entity to hold the info about FreeIPA HBACServiceGroups"""
entity_name = 'hbacsvcgroup'
managed_attributes_push = ['description']
managed_attributes_pull = managed_attributes_push
allowed_members = ['hbacsvc']
validation_schema = voluptuous.Schema(schemas.schema_hbacsvcgroups)
class FreeIPARole(FreeIPAEntity):
"""Entity to hold the info about FreeIPA Roles"""
entity_name = 'role'
managed_attributes_pull = ['description']
managed_attributes_push = managed_attributes_pull
allowed_members = ['user', 'group', 'service', 'hostgroup']
validation_schema = voluptuous.Schema(schemas.schema_roles)
class FreeIPAPrivilege(FreeIPAEntity):
"""Entity to hold the info about FreeIPA Privilege"""
entity_name = 'privilege'
managed_attributes_pull = ['description']
managed_attributes_push = managed_attributes_pull
allowed_members = ['role']
validation_schema = voluptuous.Schema(schemas.schema_privileges)
class FreeIPAPermission(FreeIPAEntity):
"""Entity to hold the info about FreeIPA Permission"""
entity_name = 'permission'
managed_attributes_pull = ['description', 'subtree', 'attrs',
'ipapermlocation', 'ipapermright',
'ipapermdefaultattr']
managed_attributes_push = managed_attributes_pull
key_mapping = {
'grantedRights': 'ipapermright',
'attributes': 'attrs',
'location': 'ipapermlocation',
'defaultAttr': 'ipapermdefaultattr'
}
allowed_members = ['privilege']
validation_schema = voluptuous.Schema(schemas.schema_permissions)
class FreeIPAService(FreeIPAEntity):
"""
Entity to hold the info about FreeIPA Services
PUSH NOT SUPPORTED yet
"""
entity_name = 'service'
entity_id_type = 'krbcanonicalname'
managed_attributes_push = [] # Empty because we don't support push
managed_attributes_pull = ['managedby_host', 'description']
key_mapping = {
'managedBy': 'managedby_host',
}
validation_schema = voluptuous.Schema(schemas.schema_services)
def write_to_file(self):
"""
Converts the file name format from xyz/hostname.int.na.intgdc.com
to xyz-hostname_int_na_intgdc_com.yaml
"""
path, file_name = os.path.split(self.path)
service_name, _ = file_name.split('@')
self.path = ('%s-%s.yaml' % (path, service_name.replace('.', '_')))
super(FreeIPAService, self).write_to_file()
class EntityDumper(yaml.SafeDumper):
"""YAML dumper subclass used to fix under-indent of lists when dumping."""
def __init__(self, *args, **kwargs):
super(EntityDumper, self).__init__(*args, **kwargs)
self.add_representer(type(None), self._none_representer())
def increase_indent(self, flow=False, indentless=False):
return super(EntityDumper, self).increase_indent(flow, False)
def _none_representer(self):
"""
Enable correct representation of empty values in config
by representing None as empty string instead of 'null'.
"""
def representer(dumper, value):
return dumper.represent_scalar(u'tag:yaml.org,2002:null', '')
return representer
| 39.058824
| 79
| 0.616508
| 2,657
| 23,904
| 5.392548
| 0.164471
| 0.027638
| 0.023451
| 0.024567
| 0.382119
| 0.329146
| 0.25998
| 0.226549
| 0.196678
| 0.181044
| 0
| 0.001006
| 0.293047
| 23,904
| 611
| 80
| 39.12275
| 0.846796
| 0.255982
| 0
| 0.246032
| 0
| 0
| 0.104401
| 0.009874
| 0
| 0
| 0
| 0
| 0
| 1
| 0.100529
| false
| 0
| 0.02381
| 0.021164
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbbe56b29123b2a0ee8c4986b892e3949b69a274
| 2,362
|
py
|
Python
|
__main__.py
|
SHUcream00/MLBPitchVisual
|
a3092cef7cbd4e73f8d0010dd62811df6cc36cac
|
[
"MIT"
] | null | null | null |
__main__.py
|
SHUcream00/MLBPitchVisual
|
a3092cef7cbd4e73f8d0010dd62811df6cc36cac
|
[
"MIT"
] | null | null | null |
__main__.py
|
SHUcream00/MLBPitchVisual
|
a3092cef7cbd4e73f8d0010dd62811df6cc36cac
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
def visualize(dataframe, balltype):
df = dataframe
#Filter by balltype
res = df[df["pitch_type"] == balltype]
#Group by results
groups = res.groupby("description")
for name, group in groups:
if name == "miss":
plt.plot(group["plate_x"], group["plate_z"], marker="o", linestyle="", color="none", ms = 3, mec="#9A9A9A", label=name)
else:
plt.plot(group["plate_x"], group["plate_z"], marker="o", linestyle="", color="none", ms = 3, mec="#03A77F", label=name)
#Fixing the viewpoint of the plot
axes = plt.gca()
axes.set_xlim([-2.50,2.50])
axes.set_ylim([0.00,5.00])
#Setting strike zone
sz_top_avg = res["sz_top"].mean()
sz_bottom_avg = res["sz_bot"].mean()
sz_left = -0.85
sz_right = 0.85
#Drawing strike zone
plt.plot((sz_left, sz_right), (sz_top_avg, sz_top_avg), 'k-')
plt.plot((sz_left, sz_right), (sz_bottom_avg, sz_bottom_avg), 'k-')
plt.plot((sz_left, sz_left), (sz_top_avg, sz_bottom_avg), 'k-')
plt.plot((sz_right, sz_right), (sz_top_avg, sz_bottom_avg), 'k-')
#Setting labels
plt.xlabel("Horizontal Location")
plt.ylabel("Vertical Location")
plt.title(f"{player_name} 2018\n {ballname_dict.get(balltype, balltype)}")
plt.legend()
plt.show()
#Setting up Name and CSV location
player_name = "Put player name"
file_src = "Put target csv"
raw = pd.read_csv(file_src)
df = pd.DataFrame(raw)
#For filtering cases
replace_dict = {"description": {"hit_into_play_no_out": "contact", "hit_into_play": "contact", "hit_into_play_score": "contact", "swinging_strike": "miss", "swinging_strike_blocked": "miss"}}
ballname_dict = {"FF": "4-Seam Fastball", "CH": "Changeup", "CU": "Curveball", "SL": "Slider", "FT": "2-Seam Fastball", "AB": "Automatic Ball",
"AS": "Automatic Strike", "EP": "Eephus", "FC": "Cutter", "FO": "Forkball", "FS": "Splitter", "GY": "Gyroball", "IN": "Intentional Ball",
"KC": "Knuckle Curve", "NP": "No Pitch", "PO": "Pitchout", "SC": "Screwball", "SI": "Sinker", "UN": "Unknown"}
df = df.replace(replace_dict)
df = df[df["description"].isin(["contact", "miss"])]
for i in df["pitch_type"].unique():
visualize(df, i)
| 37.492063
| 192
| 0.615157
| 336
| 2,362
| 4.142857
| 0.443452
| 0.030172
| 0.028736
| 0.028017
| 0.188218
| 0.188218
| 0.177443
| 0.12069
| 0.086207
| 0.086207
| 0
| 0.017507
| 0.201948
| 2,362
| 62
| 193
| 38.096774
| 0.720955
| 0.07155
| 0
| 0
| 0
| 0
| 0.286387
| 0.024023
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025
| false
| 0
| 0.075
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbc0541470856937f6eef50be9d0887839277ab1
| 3,472
|
py
|
Python
|
utils/ghost.py
|
JayJJChen/LoveXueXiQiangGuo
|
648a38cd73d1eb7ed7267721f1a23c90afb0daee
|
[
"MIT"
] | 3
|
2019-04-16T07:52:20.000Z
|
2021-08-16T03:07:14.000Z
|
utils/ghost.py
|
JayJJChen/LoveXueXiQiangGuo
|
648a38cd73d1eb7ed7267721f1a23c90afb0daee
|
[
"MIT"
] | 1
|
2019-04-17T02:23:32.000Z
|
2020-12-24T11:04:52.000Z
|
utils/ghost.py
|
JayJJChen/LoveXueXiQiangGuo
|
648a38cd73d1eb7ed7267721f1a23c90afb0daee
|
[
"MIT"
] | 2
|
2019-04-17T04:00:55.000Z
|
2019-09-18T00:57:35.000Z
|
import os
import time
from utils.eye import Eye
from utils.finger import Finger
class Ghost:
"""class to navigate the app, with Eye and Finger"""
def __init__(self, adb_path, temp_path, sleep_sec=2):
self.eye = Eye(adb_path, temp_path)
self.finger = Finger(adb_path, sleep_sec=sleep_sec)
def to_main(self):
"""back to main page, doesn't support back from exam"""
num_attempts = 0
max_try = 10
while not self._in_main():
if self._in_exam():
self._exit_exam()
else:
self.finger.back()
num_attempts += 1
if num_attempts >= max_try: # failsafe
input("I'm lost! Please help me go to main page! Hit Enter to continue")
def to_score(self):
"""click the score from main page"""
self._bottom_tab(2)
self._goto("score")
def to_exam_root(self):
"""go to the exam page root from main page"""
self._bottom_tab(4)
self._goto("exam_icon")
def _exit_exam(self):
"""exit during exam to main"""
self.finger.back()
self._goto("exit_exam")
self.finger.back()
def swipe_up(self):
self.finger.swipe(500, 1000, 500, 500)
def swipe_down(self):
self.finger.swipe(500, 500, 500, 1000)
def _find_weekly_exam(self):
"""find available weekly exam in weekly exam page"""
path = self._image_path("start_exam")
coords = self.eye.find(path, multi_target=False)
fail_count = 0
while coords is None:
# swipe up if there's no "start_exam"
time.sleep(2)
self.swipe_up()
coords = self.eye.find(path, multi_target=False)
if (fail_count > 10) and (coords is None):
raise RuntimeError("I'm lost! Exiting!")
self.finger.tap(*coords[0])
def _goto(self, img_name):
path = self._image_path(img_name)
coords = self.eye.find(path, multi_target=False)
fail_count = 0
while coords is None:
time.sleep(2)
coords = self.eye.find(path, multi_target=False)
if (fail_count > 5) and (coords is None):
raise RuntimeError("I'm lost! Exiting!")
self.finger.tap(*coords[0])
def _bottom_tab(self, n):
"""
navigate to bottom n_th tab, the screen resolution is 1080x1920
args
n: int, n_th bottom tab
{
n=0: 消息
n=1: 关注
n=2: 学刁
n=3: 视频学习
n=4: 我的
}
"""
x = [108 + 108 * 2 * i for i in range(5)]
y = 1850
self.finger.tap(x[n], y)
def _in_exam(self):
image = self.eye.see()
in_exam = self.eye.find(self._image_path("in_exam"), img=image, multi_target=False)
if in_exam is not None:
return True
else:
return False
def _in_main(self):
image = self.eye.see()
main_act = self.eye.find(self._image_path("main_act"), img=image, multi_target=False)
main_inact = self.eye.find(self._image_path("main_inact"), img=image, multi_target=False)
if (main_act is not None) or (main_inact is not None):
return True
else:
return False
@staticmethod
def _image_path(img_name):
path = os.path.join("images", "{}.png".format(img_name))
return path
| 30.191304
| 97
| 0.563652
| 484
| 3,472
| 3.855372
| 0.254132
| 0.037513
| 0.041265
| 0.036442
| 0.383708
| 0.326902
| 0.259378
| 0.229368
| 0.192926
| 0.192926
| 0
| 0.028657
| 0.326613
| 3,472
| 114
| 98
| 30.45614
| 0.769461
| 0.128168
| 0
| 0.337662
| 0
| 0
| 0.058276
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.168831
| false
| 0
| 0.051948
| 0
| 0.298701
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbc489c4f1e6739cd6d3b2e54cc4268da59045a7
| 336
|
py
|
Python
|
quran_text/urls.py
|
Quran-Tafseer/tafseer_api
|
49eede15a6e50812a4bab1e0e1e38069fcb0da4d
|
[
"MIT"
] | 16
|
2019-03-02T13:08:59.000Z
|
2022-02-26T17:26:09.000Z
|
quran_text/urls.py
|
EmadMokhtar/tafseer_api
|
abb2d53eb917f58db1e09f7d92180b0eb8001a40
|
[
"MIT"
] | 45
|
2017-10-25T06:17:50.000Z
|
2018-12-08T17:01:41.000Z
|
quran_text/urls.py
|
Quran-Tafseer/tafseer_api
|
49eede15a6e50812a4bab1e0e1e38069fcb0da4d
|
[
"MIT"
] | 6
|
2019-02-09T03:57:09.000Z
|
2021-12-29T02:54:29.000Z
|
from django.urls import path
from . import views
urlpatterns = [
path('',
view=views.SuraListView.as_view(), name='sura-list'),
path('<int:sura_num>/<int:number>/',
view=views.AyahTextView.as_view(), name='ayah-detail'),
path('<int:sura_num>/<int:number>',
view=views.AyahTextView.as_view()),
]
| 25.846154
| 64
| 0.630952
| 43
| 336
| 4.813953
| 0.44186
| 0.130435
| 0.096618
| 0.135266
| 0.483092
| 0.483092
| 0.483092
| 0.483092
| 0.483092
| 0.483092
| 0
| 0
| 0.181548
| 336
| 12
| 65
| 28
| 0.752727
| 0
| 0
| 0
| 0
| 0
| 0.223214
| 0.16369
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.2
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbc6237f7856e6445933721e9b53e17ec980bef0
| 8,205
|
py
|
Python
|
main.py
|
PotentialParadox/PyReparm
|
70062e351eebacb9c6cb3dc0262e97256c52be3d
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
PotentialParadox/PyReparm
|
70062e351eebacb9c6cb3dc0262e97256c52be3d
|
[
"Apache-2.0"
] | null | null | null |
main.py
|
PotentialParadox/PyReparm
|
70062e351eebacb9c6cb3dc0262e97256c52be3d
|
[
"Apache-2.0"
] | null | null | null |
import random
from evaluation import Evaluator
from generator import generator
from mutate import mutateset
from deap import base
from deap import creator
from deap import tools
from parameter_group import ParameterGroup
import gaussian_output
from analysis import Analysis
from gaussian_input import GaussianInput
from gaussian import gaussian_single
from header import Header
from reparm_data import ReparmData
from genesis import Genesis
import numpy as np
from scipy.optimize import minimize
from copy import deepcopy
from sklearn.cross_validation import train_test_split
from sklearn.preprocessing import StandardScaler
from sklearn import svm
from sklearn.linear_model import RidgeCV
from sklearn.ensemble import RandomForestRegressor
#############################################
# BEGIN USER INPUT
#############################################
fin = open("reparm.in", 'r')
file = fin.read()
reparm_data = ReparmData(file)
if reparm_data.reparm_input.should_continue:
reparm_data.load()
else:
Genesis(reparm_data=reparm_data)
reparm_data.save()
############################################
# END USER INPUT
############################################
#############################################
# BEGIN USER INPUT
#############################################
# Number of Generation
NGEN = reparm_data.reparm_input.number_generations
# PopulationSize
PSIZE = reparm_data.reparm_input.population_size
# Crossover Probability
CXPB = reparm_data.reparm_input.crossover_probability
# Mutation Probability
# How likely and individual will be mutated
MUTPB = reparm_data.reparm_input.mutation_probability
# Mutation Rate
# How likely a member of an individual will be mutated
MUTR = reparm_data.reparm_input.mutation_rate
# Crowding Factor
CWD = reparm_data.reparm_input.crowding_factor
# Mutation Perturbation
MUTPT = reparm_data.reparm_input.mutation_perturbation
# Initial Perturbation
IMUTPT = 0.05
# Initial List of parameters
IL = []
for i in range(0, len(reparm_data.best_am1_individual.inputs[0].parameters[0].p_floats), 4):
IL.append(reparm_data.best_am1_individual.inputs[0].parameters[0].p_floats[i])
# The evaluator (fitness, cost) function
eval = Evaluator(reparm_data=reparm_data)
if reparm_data.best_fitness is None:
reparm_data.best_fitness = list(eval.eval(IL))
reparm_data.original_fitness = deepcopy(reparm_data.best_fitness)
else:
reparm_data.best_fitness = list(eval.eval(IL))
print("original_fitness", reparm_data.original_fitness)
print("starting at", reparm_data.best_fitness)
#############################################
# END USER INPUT
#############################################
#############################################
# BEGIN DEAP SETUP
#############################################
creator.create("FitnessMax", base.Fitness, weights=(-1.0, 0, 0))
creator.create("ParamSet", list, fitness=creator.FitnessMax, best=None)
toolbox = base.Toolbox()
toolbox.register("individual", generator, IL, IMUTPT)
toolbox.register("population", tools.initRepeat, list, toolbox.individual)
toolbox.register("mate", tools.cxSimulatedBinary)
toolbox.register("mutate", mutateset, pert=MUTPT, chance=MUTR)
toolbox.register("select", tools.selTournament, tournsize=3)
toolbox.register("evaluate", eval.eval)
pop = toolbox.population(n=PSIZE)
#############################################
# END DEAP SETUP
#############################################
#############################################
# BEGIN GENETIC ALGORITHM
#############################################
for g in range(NGEN):
print("Starting gen:", g)
offspring = toolbox.select(pop, len(pop))
offspring = list(map(toolbox.clone, offspring))
for child1, child2 in zip(offspring[::2], offspring[1::2]):
if random.random() < CXPB:
toolbox.mate(child1, child2, CWD)
del child1.fitness.values
del child2.fitness.values
for mutant in offspring:
if random.random() < MUTPB:
toolbox.mutate(mutant)
del mutant.fitness.values
invalid_ind = [ind for ind in offspring if not ind.fitness.valid]
fitnesses = []
for i in invalid_ind:
try:
fitness = toolbox.evaluate(i)
fitnesses.append(fitness)
reparm_data.observations.append(list(i))
i.fitness.values = fitness
if not reparm_data.best_fitness or fitness[0] < reparm_data.best_fitness[0]:
print("Previous Best", reparm_data.best_fitness)
reparm_data.best_fitness = list(fitness)
reparm_data.best_am1_individual.set_pfloats(i)
print("NewBest Found:", reparm_data.best_fitness)
except TypeError:
fitnesses.append(None)
reparm_data.save()
pop[:] = offspring
#############################################
# End Genetic Algorithm
#############################################
#############################################
# Begin Particle Simulation
#############################################
# for g in range(NGEN):
# for part in pop:
# part.fitness.values = toolbox.evaluate(part)
# if not part.best or part.best.fitness < part.fitness:
# part.best = creator.ParamSet(part)
# part.best.fitness.values = part.fitness.values
# if not best or best.fitness < part.fitness:
# best = creator.ParamSet(part)
# best.fitness.values = part.fitness.values
# for part in pop:
# toolbox.mutate(part)
# print(best, "with fitness", best.fitness)
#############################################
# End Particle Simulation
#############################################
#############################################
# Begin Print Out
#############################################
gin_best = reparm_data.best_am1_individual.inputs[0]
s_opt_header = "#P AM1(Input,Print) opt\n\nAM1\n"
opt_header = Header(s_opt_header)
gin_opt = GaussianInput(header=opt_header,
coordinates=gin_best.coordinates[0],
parameters=gin_best.parameters[0])
fout = open("reparm_best_opt.com", 'w')
fout.write(gin_opt.str())
fout.close()
try:
gout = gaussian_single(gin_opt.str())
fout = open("reparm_best_opt.log", 'w')
fout.write(gout)
fout.close()
except TypeError:
print("Could not get output file from input,"
"most likely, optimization failed to converge")
#############################################
# End Print Out
#############################################
#############################################
# Begin ScikitLearn
#############################################
# # Preprocessor
# targets = np.array(reparm_data.targets)
# X = np.array(reparm_data.observations)
# y = targets[:, 0] # 0, 1, 2 for total, energy, and dipole
# X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
# stdsc = StandardScaler()
# X_train_std = stdsc.fit_transform(X_train)
# X_test_std = stdsc.transform(X_test)
#
# # Training
# clf = svm.SVR(C=1.3, kernel='rbf')
# # clf = RandomForestRegressor(n_estimators=20)
# clf.fit(X_train, y_train)
# print("Using {} samples with fitness score {}".format(len(y), clf.score(X_test, y_test)))
#
# initial_guess = np.array(IL)
# fun = lambda x: clf.predict(stdsc.transform(x.reshape(1, -1)))
# print("Predicting best parameters")
# min_params = (minimize(fun, initial_guess)).x
# stdsc.inverse_transform(min_params)
# params = min_params.tolist()
# skl_best = deepcopy(reparm_data.best_am1_individual)
# skl_best.set_pfloats(params)
# open("skl_best.com", 'w').write(skl_best.inputs[0].str())
# skl_fitness = eval.eval(params)
# if skl_fitness:
# print("skl_fitness:", skl_fitness)
#############################################
# End ScikitLearn
#############################################
#############################################
# Begin Analysis
#############################################
anal = Analysis(reparm_data)
anal.trithiophene()
#############################################
# End Analysis
#############################################
| 36.145374
| 92
| 0.584156
| 905
| 8,205
| 5.140331
| 0.259669
| 0.081685
| 0.045142
| 0.045142
| 0.11221
| 0.061049
| 0.061049
| 0.037403
| 0.022356
| 0.022356
| 0
| 0.006928
| 0.155637
| 8,205
| 226
| 93
| 36.30531
| 0.66455
| 0.271176
| 0
| 0.110092
| 0
| 0
| 0.065442
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.211009
| 0
| 0.211009
| 0.055046
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbc6b99c48a68e88a0554cb932a77dac52c1e5c0
| 1,460
|
py
|
Python
|
repo/script.module.liveresolver/lib/liveresolver/resolvers/finecast.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | 3
|
2020-03-03T13:21:44.000Z
|
2021-07-21T09:53:31.000Z
|
repo/script.module.liveresolver/lib/liveresolver/resolvers/finecast.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | null | null | null |
repo/script.module.liveresolver/lib/liveresolver/resolvers/finecast.py
|
Hades01/Addons
|
710da97ac850197498a3cd64be1811c593610add
|
[
"Apache-2.0"
] | 2
|
2020-04-01T22:11:12.000Z
|
2020-05-07T23:54:52.000Z
|
# -*- coding: utf-8 -*-
import re,urlparse,cookielib,os,urllib
from liveresolver.modules import client,recaptcha_v2,control,constants, decryptionUtils
from liveresolver.modules.log_utils import log
cookieFile = os.path.join(control.dataPath, 'finecastcookie.lwp')
def resolve(url):
#try:
try:
referer = urlparse.parse_qs(urlparse.urlparse(url).query)['referer'][0]
except:
referer=url
id = urlparse.parse_qs(urlparse.urlparse(url).query)['u'][0]
cj = get_cj()
url = 'http://www.finecast.tv/embed4.php?u=%s&vw=640&vh=450'%id
rs = client.request(url,referer=referer,cj=cj)
sitekey = re.findall('data-sitekey="([^"]+)', rs)[0]
token = recaptcha_v2.UnCaptchaReCaptcha().processCaptcha(sitekey, lang='de')
#1:04
result =client.request (url, post=urllib.urlencode(token),referer=referer)
log(result)
file = re.findall('[\'\"](.+?.stream)[\'\"]',result)[0]
auth = re.findall('[\'\"](\?wmsAuthSign.+?)[\'\"]',result)[0]
rtmp = 'http://play.finecast.tv:1935/live/%s/playlist.m3u8%s'%(file,auth)
return rtmp
#except:
# return
def get_cj():
cookieJar=None
try:
cookieJar = cookielib.LWPCookieJar()
cookieJar.load(cookieFile,ignore_discard=True)
except:
cookieJar=None
if not cookieJar:
cookieJar = cookielib.LWPCookieJar()
return cookieJar
| 30.416667
| 87
| 0.619178
| 171
| 1,460
| 5.239766
| 0.502924
| 0.030134
| 0.051339
| 0.051339
| 0.087054
| 0.087054
| 0.087054
| 0
| 0
| 0
| 0
| 0.021016
| 0.217808
| 1,460
| 48
| 88
| 30.416667
| 0.763573
| 0.031507
| 0
| 0.258065
| 0
| 0.032258
| 0.139815
| 0.031228
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064516
| false
| 0
| 0.096774
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbc7c8fe7bece88307002636b27bacde286985d2
| 3,520
|
py
|
Python
|
app.py
|
pizzapanther/google-actions-python-example
|
40d13fc1821e1e11f15cc7413571cb5bd6327024
|
[
"MIT"
] | 9
|
2017-11-17T07:09:08.000Z
|
2020-07-03T13:32:16.000Z
|
app.py
|
pizzapanther/google-actions-python-example
|
40d13fc1821e1e11f15cc7413571cb5bd6327024
|
[
"MIT"
] | 2
|
2019-08-10T05:49:47.000Z
|
2021-04-30T20:51:40.000Z
|
app.py
|
pizzapanther/google-actions-python-example
|
40d13fc1821e1e11f15cc7413571cb5bd6327024
|
[
"MIT"
] | 5
|
2018-05-04T08:05:55.000Z
|
2021-08-25T05:49:18.000Z
|
#!/usr/bin/env python
import os
import json
import tornado.ioloop
import tornado.log
import tornado.web
from google.oauth2 import id_token
from google.auth.transport import requests as google_requests
import jwt
import requests
API_KEY = os.environ.get('OPEN_WEATHER_MAP_KEY', None)
PROJECT_ID = os.environ.get('PROJECT_ID', None)
class WeatherHandler(tornado.web.RequestHandler):
def start_conversation (self):
response = {
'expectUserResponse': True,
'expectedInputs': [
{
'possibleIntents': {'intent': 'actions.intent.TEXT'},
'inputPrompt': {
'richInitialPrompt': {
'items': [
{
'simpleResponse': {
'ssml': '<speak>What city would you like the weather for?</speak>'
}
}
]
}
}
}
]
}
self.set_header("Content-Type", 'application/json')
self.set_header('Google-Assistant-API-Version', 'v2')
self.write(json.dumps(response, indent=2))
def get_weather (self, city):
api_response = requests.get(
'http://api.openweathermap.org/data/2.5/weather',
params={'q': city, 'APPID': API_KEY}
)
data = api_response.json()
if 'main' not in data:
response = {
'expectUserResponse': False,
'finalResponse': {
'richResponse': {
'items': [
{
'simpleResponse': {
'ssml': '<speak>City not found - meow!</speak>'
}
}
]
}
}
}
else:
temp = round(1.8 * (data['main']['temp'] - 273) + 32)
response = {
'expectUserResponse': False,
'finalResponse': {
'richResponse': {
'items': [
{
'simpleResponse': {
'ssml': '<speak>The temperature in {} is {} degrees.</speak>'.format(city, temp)
}
}
]
}
}
}
self.set_header("Content-Type", 'application/json')
self.set_header('Google-Assistant-API-Version', 'v2')
self.write(json.dumps(response, indent=2))
def get (self):
city = self.get_query_argument('city', '')
if city:
self.get_weather(city)
else:
self.start_conversation()
def post (self):
token = self.request.headers.get("Authorization")
jwt_data = jwt.decode(token, verify=False)
if jwt_data['aud'] != PROJECT_ID:
self.set_status(401)
self.write('Token Mismatch')
else:
request = google_requests.Request()
try:
# Makes external request, remove if not needed to speed things up
id_info = id_token.verify_oauth2_token(token, request, PROJECT_ID)
except:
self.set_status(401)
self.write('Token Mismatch')
data = json.loads(self.request.body.decode('utf-8'))
intent = data['inputs'][0]['intent']
print(intent)
print(data['conversation']['conversationId'])
if intent == 'actions.intent.MAIN':
self.start_conversation()
else:
city = data['inputs'][0]['arguments'][0]['textValue']
self.get_weather(city)
def make_app():
return tornado.web.Application([
(r"/weather-app", WeatherHandler),
], autoreload=True)
if __name__ == "__main__":
tornado.log.enable_pretty_logging()
app = make_app()
app.listen(int(os.environ.get('PORT', '8000')))
tornado.ioloop.IOLoop.current().start()
| 25.693431
| 98
| 0.563352
| 368
| 3,520
| 5.263587
| 0.391304
| 0.021683
| 0.026846
| 0.043366
| 0.247806
| 0.247806
| 0.247806
| 0.247806
| 0.20857
| 0.121838
| 0
| 0.011722
| 0.297159
| 3,520
| 136
| 99
| 25.882353
| 0.771221
| 0.023864
| 0
| 0.294643
| 0
| 0
| 0.224585
| 0.016312
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044643
| false
| 0
| 0.080357
| 0.008929
| 0.142857
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbc99a75d68d09d60f840eae7b285af4fedbeeae
| 2,988
|
py
|
Python
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/bms_container_query_object.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-07-23T08:59:24.000Z
|
2018-07-23T08:59:24.000Z
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/bms_container_query_object.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-11-29T14:46:42.000Z
|
2018-11-29T14:46:42.000Z
|
azure-mgmt-recoveryservicesbackup/azure/mgmt/recoveryservicesbackup/models/bms_container_query_object.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2018-08-28T14:36:47.000Z
|
2018-08-28T14:36:47.000Z
|
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from msrest.serialization import Model
class BMSContainerQueryObject(Model):
"""The query filters that can be used with the list containers API.
All required parameters must be populated in order to send to Azure.
:param backup_management_type: Required. Backup management type for this
container. Possible values include: 'Invalid', 'AzureIaasVM', 'MAB',
'DPM', 'AzureBackupServer', 'AzureSql', 'AzureStorage', 'AzureWorkload',
'DefaultBackup'
:type backup_management_type: str or
~azure.mgmt.recoveryservicesbackup.models.BackupManagementType
:param container_type: Type of container for filter. Possible values
include: 'Invalid', 'Unknown', 'IaasVMContainer',
'IaasVMServiceContainer', 'DPMContainer', 'AzureBackupServerContainer',
'MABContainer', 'Cluster', 'AzureSqlContainer', 'Windows', 'VCenter',
'VMAppContainer', 'SQLAGWorkLoadContainer', 'StorageContainer',
'GenericContainer', 'SqlCluster', 'ExchangeDAG', 'SharepointFarm',
'HyperVCluster', 'WindowsClient'
:type container_type: str or
~azure.mgmt.recoveryservicesbackup.models.ContainerType
:param backup_engine_name: Backup engine name
:type backup_engine_name: str
:param fabric_name: Fabric name for filter
:type fabric_name: str
:param status: Status of registration of this container with the Recovery
Services Vault.
:type status: str
:param friendly_name: Friendly name of this container.
:type friendly_name: str
"""
_validation = {
'backup_management_type': {'required': True},
}
_attribute_map = {
'backup_management_type': {'key': 'backupManagementType', 'type': 'str'},
'container_type': {'key': 'containerType', 'type': 'str'},
'backup_engine_name': {'key': 'backupEngineName', 'type': 'str'},
'fabric_name': {'key': 'fabricName', 'type': 'str'},
'status': {'key': 'status', 'type': 'str'},
'friendly_name': {'key': 'friendlyName', 'type': 'str'},
}
def __init__(self, **kwargs):
super(BMSContainerQueryObject, self).__init__(**kwargs)
self.backup_management_type = kwargs.get('backup_management_type', None)
self.container_type = kwargs.get('container_type', None)
self.backup_engine_name = kwargs.get('backup_engine_name', None)
self.fabric_name = kwargs.get('fabric_name', None)
self.status = kwargs.get('status', None)
self.friendly_name = kwargs.get('friendly_name', None)
| 44.597015
| 81
| 0.662316
| 309
| 2,988
| 6.249191
| 0.433657
| 0.029001
| 0.072501
| 0.029001
| 0.047644
| 0.047644
| 0.047644
| 0
| 0
| 0
| 0
| 0.000405
| 0.174364
| 2,988
| 66
| 82
| 45.272727
| 0.782327
| 0.593039
| 0
| 0
| 0
| 0
| 0.305657
| 0.060219
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.047619
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbca2b427044c8866cf81d44e473638aa489abca
| 274
|
py
|
Python
|
ia870/iagradm.py
|
rdenadai/ia870p3
|
c4823efc4b8e5f187a64f8a4e9962e328bf86967
|
[
"BSD-2-Clause"
] | 5
|
2018-10-15T12:02:03.000Z
|
2022-02-11T12:47:12.000Z
|
ia870/iagradm.py
|
rdenadai/ia870p3
|
c4823efc4b8e5f187a64f8a4e9962e328bf86967
|
[
"BSD-2-Clause"
] | 1
|
2018-10-15T12:04:36.000Z
|
2019-01-25T12:04:35.000Z
|
ia870/iagradm.py
|
rdenadai/ia870p3
|
c4823efc4b8e5f187a64f8a4e9962e328bf86967
|
[
"BSD-2-Clause"
] | 4
|
2019-01-25T11:13:48.000Z
|
2020-12-20T01:42:33.000Z
|
# -*- encoding: utf-8 -*-
# Module iagradm
def iagradm(f, Bdil=None, Bero=None):
from ia870 import iasubm,iadil,iaero,iasecross
if Bdil is None: Bdil = iasecross()
if Bero is None: Bero = iasecross()
y = iasubm( iadil(f,Bdil),iaero(f,Bero))
return y
| 21.076923
| 50
| 0.642336
| 41
| 274
| 4.292683
| 0.512195
| 0.056818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018692
| 0.218978
| 274
| 12
| 51
| 22.833333
| 0.803738
| 0.138686
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbcc6f4ccb0dabce5252e1dd4108228b2c863f99
| 721
|
py
|
Python
|
web/web-lemonthinker/src/app/app.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 2
|
2021-08-09T17:08:12.000Z
|
2021-08-09T17:08:17.000Z
|
web/web-lemonthinker/src/app/app.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | null | null | null |
web/web-lemonthinker/src/app/app.py
|
NoXLaw/RaRCTF2021-Challenges-Public
|
1a1b094359b88f8ebbc83a6b26d27ffb2602458f
|
[
"MIT"
] | 1
|
2021-10-09T16:51:56.000Z
|
2021-10-09T16:51:56.000Z
|
from flask import Flask, request, redirect, url_for
import os
import random
import string
import time # lemonthink
clean = time.time()
app = Flask(__name__)
chars = list(string.ascii_letters + string.digits)
@app.route('/')
def main():
return open("index.html").read()
@app.route('/generate', methods=['POST'])
def upload():
global clean
if time.time() - clean > 60:
os.system("rm static/images/*")
clean = time.time()
text = request.form.getlist('text')[0]
text = text.replace("\"", "")
filename = "".join(random.choices(chars,k=8)) + ".png"
os.system(f"python3 generate.py {filename} \"{text}\"")
return redirect(url_for('static', filename='images/' + filename), code=301)
| 28.84
| 79
| 0.653259
| 96
| 721
| 4.833333
| 0.572917
| 0.051724
| 0.060345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013267
| 0.163662
| 721
| 25
| 79
| 28.84
| 0.756219
| 0.01387
| 0
| 0.090909
| 0
| 0
| 0.138028
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.227273
| 0.045455
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbccbf08a5c6a38fe09196877c8bb3f8a56251c4
| 816
|
py
|
Python
|
aprendizado/codewars/descending_order.py
|
renatodev95/Python
|
2adee4a01de41f8bbb68fce563100c135a5ab549
|
[
"MIT"
] | null | null | null |
aprendizado/codewars/descending_order.py
|
renatodev95/Python
|
2adee4a01de41f8bbb68fce563100c135a5ab549
|
[
"MIT"
] | null | null | null |
aprendizado/codewars/descending_order.py
|
renatodev95/Python
|
2adee4a01de41f8bbb68fce563100c135a5ab549
|
[
"MIT"
] | null | null | null |
# Your task is to make a function that can take any non-negative integer as an argument and return it with its digits in descending order. Essentially, rearrange the digits to create the highest possible number.
# Função que recebe um número inteiro (não negativo) como argumento e o retorna com os dígitos em ordem descendente. Essencialmente, organize os dígitos para criar o maior número possível.
# Primeiro código
def descending_order(num):
new_num = str(num)
new_num1 = [int(x) for x in new_num]
new_num1 = sorted(new_num1, reverse=True)
string = ''
for x in new_num1:
string += str(x)
return int(string)
# Refatoração do primeiro código (utilizando list comprehension)
def descending_order_two(num):
return int(''.join([x for x in sorted(str(num), reverse=True)]))
#
#
| 38.857143
| 211
| 0.734069
| 127
| 816
| 4.645669
| 0.622047
| 0.047458
| 0.030508
| 0.023729
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006061
| 0.191176
| 816
| 20
| 212
| 40.8
| 0.887879
| 0.582108
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0.1
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd1044b9a9e2ac21f72f6855560f0e23688f3f9
| 8,025
|
py
|
Python
|
docs/conf.py
|
urm8/django-translations
|
e8f66710af9433044937b75c061e1988add398a5
|
[
"BSD-3-Clause"
] | 100
|
2018-11-20T19:30:49.000Z
|
2022-03-10T07:46:27.000Z
|
docs/conf.py
|
urm8/django-translations
|
e8f66710af9433044937b75c061e1988add398a5
|
[
"BSD-3-Clause"
] | 30
|
2018-11-27T19:53:53.000Z
|
2022-02-04T14:56:52.000Z
|
docs/conf.py
|
urm8/django-translations
|
e8f66710af9433044937b75c061e1988add398a5
|
[
"BSD-3-Clause"
] | 25
|
2019-05-30T13:41:47.000Z
|
2022-03-25T04:28:17.000Z
|
# -*- coding: utf-8 -*-
#
# Configuration file for the Sphinx documentation builder.
#
# This file does only contain a selection of the most common options. For a
# full list see the documentation:
# http://www.sphinx-doc.org/en/master/config
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
#
import os
import sys
import json
import datetime
# `Django setup` below, will add the path to `translations` module
# automatically because it's been included in `project.settings`, so no need
# to import it here
# -- Django setup ------------------------------------------------------------
# generated project settings
import django
sys.path.insert(
0,
os.path.join(os.path.dirname(os.path.abspath('.')), 'project')
)
os.environ['DJANGO_SETTINGS_MODULE'] = 'project.settings'
django.setup()
# -- Project information -----------------------------------------------------
with open(
os.path.join(
os.path.dirname(os.path.abspath('.')),
'config.json'
), 'r') as fh:
info = json.load(fh)
# project
project = info['project']['name']
# description
description = info['project']['desc']
# author
author = info['author']['name']
# The short X.Y version
version = info['release']['version']
# The full version, including alpha/beta/rc tags
release = info['release']['name']
# github
github_user = info['github']['user']
github_repo = info['github']['repo']
# donation
donate_url = info['urls']['funding']
# logo
logo = info['project']['logo']
# documentation
documentation = '{} {}'.format(project, 'Documentation')
# year
year = datetime.datetime.now().year
# copyright
copyright = '{year}, {author}'.format(year=year, author=author)
# -- General configuration ---------------------------------------------------
# If your documentation needs a minimal Sphinx version, state it here.
#
# needs_sphinx = '1.0'
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.doctest',
'sphinx.ext.intersphinx',
'sphinx.ext.viewcode',
'sphinx.ext.githubpages',
]
# Add any paths that contain templates here, relative to this directory.
templates_path = ['_templates']
# The suffix(es) of source filenames.
# You can specify multiple suffix as a list of string:
#
# source_suffix = ['.rst', '.md']
source_suffix = '.rst'
# The master toctree document.
master_doc = 'index'
# The language for content autogenerated by Sphinx. Refer to documentation
# for a list of supported languages.
#
# This is also used if you do content translation via gettext catalogs.
# Usually you set "language" from the command line for these cases.
language = None
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path .
exclude_patterns = ['_build', 'Thumbs.db', '.DS_Store']
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'monokai'
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = 'alabaster'
# Theme options are theme-specific and customize the look and feel of a theme
# further. For a list of options available for each theme, see the
# documentation.
#
html_theme_options = {
'note_bg': '#fec',
'note_border': '#ffe2a8',
'show_relbars': True,
'logo': logo,
'touch_icon': logo,
'logo_name': True,
'description': description,
'github_user': github_user,
'github_repo': github_repo,
'github_banner': True,
}
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
html_static_path = ['_static']
# Custom sidebar templates, must be a dictionary that maps document names
# to template names.
#
# The default sidebars (for documents that don't match any pattern) are
# defined by theme itself. Builtin themes are using these templates by
# default: ``['localtoc.html', 'relations.html', 'sourcelink.html',
# 'searchbox.html']``.
#
# html_sidebars = {}
# -- Options for HTMLHelp output ---------------------------------------------
# Output file base name for HTML help builder.
htmlhelp_basename = 'DjangoTranslationsdoc'
# -- Options for LaTeX output ------------------------------------------------
latex_elements = {
# The paper size ('letterpaper' or 'a4paper').
#
# 'papersize': 'letterpaper',
# The font size ('10pt', '11pt' or '12pt').
#
# 'pointsize': '10pt',
# Additional stuff for the LaTeX preamble.
#
# 'preamble': '',
# Latex figure (float) alignment
#
# 'figure_align': 'htbp',
}
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title,
# author, documentclass [howto, manual, or own class]).
latex_documents = [
(master_doc, 'DjangoTranslations.tex', documentation,
author, 'manual'),
]
# -- Options for manual page output ------------------------------------------
# One entry per manual page. List of tuples
# (source start file, name, description, authors, manual section).
man_pages = [
(master_doc, 'djangotranslations', documentation,
[author], 1)
]
# -- Options for Texinfo output ----------------------------------------------
# Grouping the document tree into Texinfo files. List of tuples
# (source start file, target name, title, author,
# dir menu entry, description, category)
texinfo_documents = [
(master_doc, 'DjangoTranslations', documentation,
author, 'DjangoTranslations', description,
'Miscellaneous'),
]
# -- Extension configuration -------------------------------------------------
# -- Options for intersphinx extension ---------------------------------------
# Example configuration for intersphinx: refer to the Python standard library.
intersphinx_mapping = {
'python': ('https://docs.python.org/', None),
'django': ('http://django.readthedocs.org/en/latest/', None),
}
# -- Options for doctest extension -------------------------------------------
doctest_global_setup = """
import builtins
from django.db import connection
from django.test import TestCase
from sample.utils import create_samples
import beautifier
# Turn on the test database for the doctests
connection.creation.create_test_db(verbosity=0)
TestCase.setUpClass()
# Beautify `testoutput`
def print(value='', end='\\n'):
builtins.print(beautifier.beautify(value, False), end=end)
# Sample creation
def create_doc_samples(translations=True):
if translations:
create_samples(
continent_names=['europe', 'asia'],
country_names=['germany', 'south korea'],
city_names=['cologne', 'seoul'],
continent_fields=['name', 'denonym'],
country_fields=['name', 'denonym'],
city_fields=['name', 'denonym'],
langs=['de']
)
else:
create_samples(
continent_names=['europe', 'asia'],
country_names=['germany', 'south korea'],
city_names=['cologne', 'seoul'],
)
"""
doctest_global_cleanup = """
import builtins
from django.db import connection
from django.test import TestCase
# Normalize `testoutput`
def print(value='', end='\\n'):
builtins.print(value, end=end)
# Turn off the test database for the doctests
TestCase.tearDownClass()
connection.creation.destroy_test_db(verbosity=0)
"""
| 27.389078
| 79
| 0.642492
| 947
| 8,025
| 5.372756
| 0.370644
| 0.015723
| 0.005503
| 0.005896
| 0.186124
| 0.129127
| 0.112421
| 0.112421
| 0.096698
| 0.082547
| 0
| 0.002711
| 0.172586
| 8,025
| 292
| 80
| 27.482877
| 0.763554
| 0.530841
| 0
| 0.152542
| 0
| 0
| 0.536845
| 0.148199
| 0
| 0
| 0
| 0.003425
| 0
| 1
| 0
| false
| 0
| 0.110169
| 0
| 0.110169
| 0.033898
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd2339bf7055960ea772c1eecf31ab430a3ae71
| 5,297
|
py
|
Python
|
src/waldur_core/core/tests/helpers.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 26
|
2017-10-18T13:49:58.000Z
|
2021-09-19T04:44:09.000Z
|
src/waldur_core/core/tests/helpers.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 14
|
2018-12-10T14:14:51.000Z
|
2021-06-07T10:33:39.000Z
|
src/waldur_core/core/tests/helpers.py
|
geant-multicloud/MCMS-mastermind
|
81333180f5e56a0bc88d7dad448505448e01f24e
|
[
"MIT"
] | 32
|
2017-09-24T03:10:45.000Z
|
2021-10-16T16:41:09.000Z
|
import copy
from django.conf import settings
from django.test.utils import override_settings
from rest_framework import status, test
class PermissionsTest(test.APITransactionTestCase):
"""
Abstract class for permissions tests.
Methods `get_urls_configs`, `get_users_with_permission`,
`get_users_without_permissions` have to be overridden.
Logical example:
class ExamplePermissionsTest(PermissionsTest):
def get_users_with_permission(self, url, method):
if is_unreachable(url):
# no one can has access to unreachable url
return []
return [user_with_permission]
def get_users_without_permissions(self, url, method):
if is_unreachable(url):
# everybody does not have access to to unreachable url
return [user_with_permission, user_without_permission]
return [user_without_permission]
def get_urls_configs(self):
yield {'url': 'http://testserver/some/url, 'method': 'GET'}
yield {'url': 'http://testserver/some/unreachable/url', 'method': 'POST'}
...
"""
def get_urls_configs(self):
"""
Return list or generator of url configs.
Each url config is dictionary with such keys:
- url: url itself
- method: request method
- data: data which will be sent in request
url config example:
{
'url': 'http://testserver/api/backup/',
'method': 'POST',
'data': {'backup_source': 'backup/source/url'}
}
"""
raise NotImplementedError()
def get_users_with_permission(self, url, method):
"""
Return list of users which can access given url with given method
"""
raise NotImplementedError()
def get_users_without_permissions(self, url, method):
"""
Return list of users which can not access given url with given method
"""
raise NotImplementedError()
def test_permissions(self):
"""
Go through all url configs ands checks that user with permissions
can request them and users without - can't
"""
for conf in self.get_urls_configs():
url, method = conf['url'], conf['method']
data = conf['data'] if 'data' in conf else {}
for user in self.get_users_with_permission(url, method):
self.client.force_authenticate(user=user)
response = getattr(self.client, method.lower())(url, data=data)
self.assertFalse(
response.status_code
in (status.HTTP_403_FORBIDDEN, status.HTTP_404_NOT_FOUND),
'Error. User %s can not reach url: %s (method:%s). (Response status code %s, data %s)'
% (user, url, method, response.status_code, response.data),
)
for user in self.get_users_without_permissions(url, method):
self.client.force_authenticate(user=user)
response = getattr(self.client, method.lower())(url, data=data)
unreachable_statuses = (
status.HTTP_403_FORBIDDEN,
status.HTTP_404_NOT_FOUND,
status.HTTP_409_CONFLICT,
)
self.assertTrue(
response.status_code in unreachable_statuses,
'Error. User %s can reach url: %s (method:%s). (Response status code %s, data %s)'
% (user, url, method, response.status_code, response.data),
)
class ListPermissionsTest(test.APITransactionTestCase):
"""
Abstract class that tests what objects user receive in list.
Method `get_users_and_expected_results` has to be overridden.
Method `get_url` have to be defined.
"""
def get_url(self):
return None
def get_users_and_expected_results(self):
"""
Return list or generator of dictionaries with such keys:
- user - user which we want to test
- expected_results - list of dictionaries with fields which user has
to receive as answer from server
"""
pass
def test_list_permissions(self):
for user_and_expected_result in self.get_users_and_expected_results():
user = user_and_expected_result['user']
expected_results = user_and_expected_result['expected_results']
self.client.force_authenticate(user=user)
response = self.client.get(self.get_url())
self.assertEqual(
len(expected_results),
len(response.data),
'User %s receive wrong number of objects. Expected: %s, received %s'
% (user, len(expected_results), len(response.data)),
)
for actual, expected in zip(response.data, expected_results):
for key, value in expected.items():
self.assertEqual(actual[key], value)
def override_waldur_core_settings(**kwargs):
waldur_settings = copy.deepcopy(settings.WALDUR_CORE)
waldur_settings.update(kwargs)
return override_settings(WALDUR_CORE=waldur_settings)
| 37.302817
| 106
| 0.605815
| 596
| 5,297
| 5.20302
| 0.233221
| 0.028378
| 0.034827
| 0.028378
| 0.403096
| 0.320542
| 0.268301
| 0.242825
| 0.20058
| 0.112222
| 0
| 0.004093
| 0.308099
| 5,297
| 141
| 107
| 37.567376
| 0.842019
| 0.343968
| 0
| 0.163934
| 0
| 0.032787
| 0.085113
| 0
| 0
| 0
| 0
| 0
| 0.065574
| 1
| 0.131148
| false
| 0.016393
| 0.065574
| 0.016393
| 0.262295
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd3d31bd1a8e525699ace640bf7abf893c326e1
| 1,121
|
py
|
Python
|
data/benchmark.py
|
Gummary/denet
|
00d814d75eea54d5b259fce128ae7b625a900140
|
[
"MIT"
] | 343
|
2020-04-02T06:22:18.000Z
|
2022-03-25T12:51:55.000Z
|
data/benchmark.py
|
sanglee325/cutblur
|
1589718b27973bec41289bbd5ad5a71ebe2e9925
|
[
"MIT"
] | 26
|
2020-04-30T03:23:15.000Z
|
2022-02-20T07:31:42.000Z
|
data/benchmark.py
|
sanglee325/cutblur
|
1589718b27973bec41289bbd5ad5a71ebe2e9925
|
[
"MIT"
] | 66
|
2020-04-02T06:55:37.000Z
|
2022-03-10T15:44:19.000Z
|
"""
CutBlur
Copyright 2020-present NAVER corp.
MIT license
"""
import os
import glob
import data
class BenchmarkSR(data.BaseDataset):
def __init__(self, phase, opt):
root = opt.dataset_root
self.scale = opt.scale
dir_HQ, dir_LQ = self.get_subdir()
self.HQ_paths = sorted(glob.glob(os.path.join(root, dir_HQ, "*.png")))
self.LQ_paths = sorted(glob.glob(os.path.join(root, dir_LQ, "*.png")))
super().__init__(phase, opt)
def get_subdir(self):
dir_HQ = "HR"
dir_LQ = "X{}".format(self.scale)
return dir_HQ, dir_LQ
class BenchmarkDN(BenchmarkSR):
def __init__(self, phase, opt):
self.sigma = opt.sigma
super().__init__(phase, opt)
def get_subdir(self):
dir_HQ = "HQ"
dir_LQ = "{}".format(self.sigma)
return dir_HQ, dir_LQ
class BenchmarkJPEG(BenchmarkSR):
def __init__(self, phase, opt):
self.quality = opt.quality
super().__init__(phase, opt)
def get_subdir(self):
dir_HQ = "HQ"
dir_LQ = "{}".format(self.quality)
return dir_HQ, dir_LQ
| 22.877551
| 78
| 0.611954
| 152
| 1,121
| 4.203947
| 0.276316
| 0.062598
| 0.065728
| 0.062598
| 0.571205
| 0.516432
| 0.450704
| 0.344288
| 0.344288
| 0.231612
| 0
| 0.00479
| 0.255129
| 1,121
| 48
| 79
| 23.354167
| 0.760479
| 0.048171
| 0
| 0.451613
| 0
| 0
| 0.021719
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.193548
| false
| 0
| 0.096774
| 0
| 0.483871
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd4271941c1c0d5952f6d9d574008a25b255d3d
| 917
|
py
|
Python
|
pytglib/api/types/update_chat_is_pinned.py
|
iTeam-co/pytglib
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 6
|
2019-10-30T08:57:27.000Z
|
2021-02-08T14:17:43.000Z
|
pytglib/api/types/update_chat_is_pinned.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 1
|
2021-08-19T05:44:10.000Z
|
2021-08-19T07:14:56.000Z
|
pytglib/api/types/update_chat_is_pinned.py
|
iTeam-co/python-telegram
|
e5e75e0a85f89b77762209b32a61b0a883c0ae61
|
[
"MIT"
] | 5
|
2019-12-04T05:30:39.000Z
|
2021-05-21T18:23:32.000Z
|
from ..utils import Object
class UpdateChatIsPinned(Object):
"""
A chat was pinned or unpinned
Attributes:
ID (:obj:`str`): ``UpdateChatIsPinned``
Args:
chat_id (:obj:`int`):
Chat identifier
is_pinned (:obj:`bool`):
New value of is_pinned
order (:obj:`int`):
New value of the chat order
Returns:
Update
Raises:
:class:`telegram.Error`
"""
ID = "updateChatIsPinned"
def __init__(self, chat_id, is_pinned, order, **kwargs):
self.chat_id = chat_id # int
self.is_pinned = is_pinned # bool
self.order = order # int
@staticmethod
def read(q: dict, *args) -> "UpdateChatIsPinned":
chat_id = q.get('chat_id')
is_pinned = q.get('is_pinned')
order = q.get('order')
return UpdateChatIsPinned(chat_id, is_pinned, order)
| 22.365854
| 60
| 0.563795
| 106
| 917
| 4.698113
| 0.386792
| 0.128514
| 0.104418
| 0.084337
| 0.076305
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.320611
| 917
| 40
| 61
| 22.925
| 0.799358
| 0.368593
| 0
| 0
| 0
| 0
| 0.112426
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.076923
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd513568e3fe748df68592f5efb0230845ec0a5
| 990
|
py
|
Python
|
setup.py
|
dylancrockett/iot.io
|
472767186a5500e05b02d821f32e1208f3652418
|
[
"MIT"
] | null | null | null |
setup.py
|
dylancrockett/iot.io
|
472767186a5500e05b02d821f32e1208f3652418
|
[
"MIT"
] | null | null | null |
setup.py
|
dylancrockett/iot.io
|
472767186a5500e05b02d821f32e1208f3652418
|
[
"MIT"
] | null | null | null |
from setuptools import setup
import iotio
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="iot.io",
version=iotio.__version__,
packages=["iotio"],
author="Dylan Crockett",
author_email="dylanrcrockett@gmail.com",
license="MIT",
description="A management API for connecting and managing Clients via websocket connections.",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/dylancrockett/iot.io",
project_urls={
"Documentation": "https://iotio.readthedocs.io/",
"Source Code": "https://github.com/dylancrockett/iot.io"
},
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent"
],
install_requires=[
'gevent',
'gevent-websocket',
'flask',
'flask-sockets',
],
python_requires='>=3.7'
)
| 28.285714
| 98
| 0.639394
| 106
| 990
| 5.839623
| 0.669811
| 0.096931
| 0.061389
| 0.096931
| 0.103393
| 0.103393
| 0
| 0
| 0
| 0
| 0
| 0.003881
| 0.219192
| 990
| 34
| 99
| 29.117647
| 0.796895
| 0
| 0
| 0.0625
| 0
| 0
| 0.441414
| 0.024242
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd57373c1091216c9a267bad2a40451008902b2
| 1,820
|
py
|
Python
|
trellominer/api/trello.py
|
xnoder/trellominer
|
629d8f916486aa94a5bfa3a9497c36316c2864ed
|
[
"MIT"
] | null | null | null |
trellominer/api/trello.py
|
xnoder/trellominer
|
629d8f916486aa94a5bfa3a9497c36316c2864ed
|
[
"MIT"
] | null | null | null |
trellominer/api/trello.py
|
xnoder/trellominer
|
629d8f916486aa94a5bfa3a9497c36316c2864ed
|
[
"MIT"
] | null | null | null |
import os
import requests
from trellominer.config import yaml
class HTTP(object):
def __init__(self):
self.config = yaml.read(os.getenv("TRELLO_CONFIG", default=os.path.join(os.path.expanduser('~'), ".trellominer.yaml")))
self.api_url = os.getenv("TRELLO_URL", default=self.config['api']['url'])
self.api_key = os.getenv("TRELLO_API_KEY", default=self.config['api']['key'])
self.api_token = os.getenv("TRELLO_API_TOKEN", default=self.config['api']['token'])
self.organization = os.getenv("TRELLO_ORGANIZATION", default=self.config['api']['organization'])
self.output_file = os.getenv("TRELLO_OUTPUT_FILE", default=self.config['api']['output_file_name'])
class Trello(HTTP):
def __init__(self):
super().__init__()
def boards(self):
url = "{0}/organizations/{1}/boards?key={2}&token={3}".format(
self.api_url, self.organization, self.api_key, self.api_token)
req = requests.get(url, params=None)
return req.json()
def cards(self, board_id):
url = "{0}/boards/{1}/cards?fields=shortLink,name,desc,idList,due,dueComplete,closed,idMembers&members=true&member_fields=fullName&key={2}&token={3}".format(
self.api_url, board_id, self.api_key, self.api_token)
req = requests.get(url, params=None)
return req.json()
def lists(self, list_id):
url = "{0}/lists/{1}?key={2}&token={3}".format(self.api_url, list_id, self.api_key, self.api_token)
req = requests.get(url, params=None)
return req.json()
def checklists(self, card_id):
url = "{0}/cards/{1}/checklists?key={2}&token={3}".format(
self.api_url, card_id, self.api_key, self.api_token)
req = requests.get(url, params=None)
return req.json()
| 40.444444
| 165
| 0.644505
| 255
| 1,820
| 4.419608
| 0.239216
| 0.093168
| 0.074534
| 0.088731
| 0.341615
| 0.325643
| 0.325643
| 0.325643
| 0.233363
| 0.233363
| 0
| 0.010804
| 0.186264
| 1,820
| 44
| 166
| 41.363636
| 0.750169
| 0
| 0
| 0.30303
| 0
| 0.030303
| 0.231868
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.090909
| 0
| 0.454545
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd5cd5e6175ef560ba478a76fe061ded7bfc8d7
| 2,337
|
py
|
Python
|
alexnet_guided_bp_vanilla.py
|
wezteoh/face_perception_thru_backprop
|
449f78ce330876ff25fbcdf892023fd2ba86005c
|
[
"MIT"
] | null | null | null |
alexnet_guided_bp_vanilla.py
|
wezteoh/face_perception_thru_backprop
|
449f78ce330876ff25fbcdf892023fd2ba86005c
|
[
"MIT"
] | null | null | null |
alexnet_guided_bp_vanilla.py
|
wezteoh/face_perception_thru_backprop
|
449f78ce330876ff25fbcdf892023fd2ba86005c
|
[
"MIT"
] | null | null | null |
import numpy as np
import tensorflow as tf
import os
from scipy.io import savemat
from scipy.io import loadmat
from scipy.misc import imread
from scipy.misc import imsave
from alexnet_face_classifier import *
import matplotlib.pyplot as plt
plt.switch_backend('agg')
class backprop_graph:
def __init__(self, num_classes, nhid, cnn):
self.num_classes = num_classes
self.inputs = tf.placeholder(tf.float32, shape = [None, 227, 227, 3], name='input')
self.labels_1hot = tf.placeholder(tf.float32, shape=[None, self.num_classes])
self.cnn = cnn(self.inputs, None, self.num_classes)
self.cnn.preprocess()
self.cnn.convlayers()
self.cnn.fc_layers(transfer_learning=False, nhid=nhid)
def classifier_graph(self, temp=3.0):
self.probabilities = tf.nn.softmax(self.cnn.fc2/temp)
self.probability = tf.tensordot(self.probabilities, self.labels_1hot, axes=[[1],[1]])
self.log_probability = tf.log(self.probability)
def guided_backprop_graph(self):
self.grad_fc2 = tf.nn.relu(tf.gradients(self.probability, self.cnn.fc2)[0])
self.grad_fc1 = tf.nn.relu(tf.gradients(self.cnn.fc2, self.cnn.fc1, grad_ys=self.grad_fc2)[0])
self.grad_conv5 = tf.nn.relu(tf.gradients(self.cnn.fc1, self.cnn.conv5, grad_ys=self.grad_fc1)[0])
self.grad_conv4 = tf.nn.relu(tf.gradients(self.cnn.conv5, self.cnn.conv4, grad_ys=self.grad_conv5)[0])
self.grad_conv3 = tf.nn.relu(tf.gradients(self.cnn.conv4, self.cnn.conv3, grad_ys=self.grad_conv4)[0])
self.grad_conv2 = tf.nn.relu(tf.gradients(self.cnn.conv3, self.cnn.conv2, grad_ys=self.grad_conv3)[0])
self.grad_conv1 = tf.nn.relu(tf.gradients(self.cnn.conv2, self.cnn.conv1, grad_ys=self.grad_conv2)[0])
self.grad_image = tf.nn.relu(tf.gradients(self.cnn.conv1, self.inputs, grad_ys=self.grad_conv1)[0])
###
def guided_backprop(graph, image, one_hot, sess):
image = np.expand_dims(image, 0)
one_hot = np.expand_dims(one_hot, 0)
saliency_map = sess.run(graph.grad_image, feed_dict={graph.inputs:image, graph.labels_1hot:one_hot})[0]
scaling_adjustment = 1E-20
saliency_map_scaled = saliency_map/(np.max(saliency_map)+scaling_adjustment)
return saliency_map_scaled
| 37.095238
| 110
| 0.693624
| 360
| 2,337
| 4.330556
| 0.255556
| 0.085311
| 0.041052
| 0.051315
| 0.20077
| 0.20077
| 0.116742
| 0
| 0
| 0
| 0
| 0.031672
| 0.175867
| 2,337
| 62
| 111
| 37.693548
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0.003432
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0
| 0.230769
| 0
| 0.384615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd6ae222f06041fd60daf0b6a6b62ee66225c4f
| 18,729
|
py
|
Python
|
tests/test_sqlalchemy_registry.py
|
AferriDaniel/coaster
|
3ffbc9d33c981284593445299aaee0c3cc0cdb0b
|
[
"BSD-2-Clause"
] | 48
|
2015-01-15T08:57:24.000Z
|
2022-01-26T04:04:34.000Z
|
tests/test_sqlalchemy_registry.py
|
AferriDaniel/coaster
|
3ffbc9d33c981284593445299aaee0c3cc0cdb0b
|
[
"BSD-2-Clause"
] | 169
|
2015-01-16T13:17:38.000Z
|
2021-05-31T13:23:23.000Z
|
tests/test_sqlalchemy_registry.py
|
AferriDaniel/coaster
|
3ffbc9d33c981284593445299aaee0c3cc0cdb0b
|
[
"BSD-2-Clause"
] | 17
|
2015-02-15T07:39:04.000Z
|
2021-10-05T11:20:22.000Z
|
"""Registry and RegistryMixin tests."""
from types import SimpleNamespace
import pytest
from coaster.db import db
from coaster.sqlalchemy import BaseMixin
from coaster.sqlalchemy.registry import Registry
# --- Fixtures -------------------------------------------------------------------------
@pytest.fixture()
def CallableRegistry(): # noqa: N802
"""Callable registry with a positional parameter."""
class CallableRegistry:
registry = Registry()
return CallableRegistry
@pytest.fixture()
def PropertyRegistry(): # noqa: N802
"""Registry with property and a positional parameter."""
class PropertyRegistry:
registry = Registry(property=True)
return PropertyRegistry
@pytest.fixture()
def CachedPropertyRegistry(): # noqa: N802
"""Registry with cached property and a positional parameter."""
class CachedPropertyRegistry:
registry = Registry(cached_property=True)
return CachedPropertyRegistry
@pytest.fixture()
def CallableParamRegistry(): # noqa: N802
"""Callable registry with a keyword parameter."""
class CallableParamRegistry:
registry = Registry('kwparam')
return CallableParamRegistry
@pytest.fixture()
def PropertyParamRegistry(): # noqa: N802
"""Registry with property and a keyword parameter."""
class PropertyParamRegistry:
registry = Registry('kwparam', property=True)
return PropertyParamRegistry
@pytest.fixture()
def CachedPropertyParamRegistry(): # noqa: N802
"""Registry with cached property and a keyword parameter."""
class CachedPropertyParamRegistry:
registry = Registry('kwparam', cached_property=True)
return CachedPropertyParamRegistry
@pytest.fixture()
def all_registry_hosts(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""All test registries as a list."""
return [
CallableRegistry,
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
]
@pytest.fixture(scope='module')
def registry_member():
"""Test registry member function."""
def member(pos=None, kwparam=None):
pass
return member
@pytest.fixture(scope='session')
def registrymixin_models():
"""Fixtures for RegistryMixin tests."""
# We have two sample models and two registered items to test that
# the registry is unique to each model and is not a global registry
# in the base RegistryMixin class.
# Sample model 1
class RegistryTest1(BaseMixin, db.Model):
"""Registry test model 1."""
__tablename__ = 'registry_test1'
# Sample model 2
class RegistryTest2(BaseMixin, db.Model):
"""Registry test model 2."""
__tablename__ = 'registry_test2'
# Sample registered item (form or view) 1
class RegisteredItem1:
"""Registered item 1."""
def __init__(self, obj=None):
"""Init class."""
self.obj = obj
# Sample registered item 2
@RegistryTest2.views('test')
class RegisteredItem2:
"""Registered item 2."""
def __init__(self, obj=None):
"""Init class."""
self.obj = obj
# Sample registered item 3
@RegistryTest1.features('is1')
@RegistryTest2.features()
def is1(obj):
"""Assert object is instance of RegistryTest1."""
return isinstance(obj, RegistryTest1)
RegistryTest1.views.test = RegisteredItem1
return SimpleNamespace(**locals())
# --- Tests ----------------------------------------------------------------------------
# --- Creating a registry
def test_registry_set_name():
"""Registry's __set_name__ gets called."""
# Registry has no name unless added to a class
assert Registry()._name is None
class RegistryUser:
reg1 = Registry()
reg2 = Registry()
assert RegistryUser.reg1._name == 'reg1'
assert RegistryUser.reg2._name == 'reg2'
def test_registry_reuse_error():
"""Registries cannot be reused under different names."""
# Registry raises TypeError from __set_name__, but Python recasts as RuntimeError
with pytest.raises(RuntimeError):
class RegistryUser:
a = b = Registry()
def test_registry_reuse_okay():
"""Registries be reused with the same name under different hosts."""
reusable = Registry()
assert reusable._name is None
class HostA:
registry = reusable
assert HostA.registry._name == 'registry'
class HostB:
registry = reusable
assert HostB.registry._name == 'registry'
assert HostA.registry is HostB.registry
assert HostA.registry is reusable
def test_registry_param_type():
"""Registry's param must be string or None."""
r = Registry()
assert r._param is None
r = Registry('')
assert r._param is None
r = Registry(1)
assert r._param == '1'
r = Registry('obj')
assert r._param == 'obj'
r = Registry(param='foo')
assert r._param == 'foo'
def test_registry_property_cached_property():
"""A registry can have property or cached_property set, but not both."""
r = Registry()
assert r._default_property is False
assert r._default_cached_property is False
r = Registry(property=True)
assert r._default_property is True
assert r._default_cached_property is False
r = Registry(cached_property=True)
assert r._default_property is False
assert r._default_cached_property is True
with pytest.raises(TypeError):
Registry(property=True, cached_property=True)
# --- Populating a registry
def test_add_to_registry(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added to registries and accessed as per registry settings."""
@CallableRegistry.registry()
@PropertyRegistry.registry()
@CachedPropertyRegistry.registry()
@CallableParamRegistry.registry()
@PropertyParamRegistry.registry()
@CachedPropertyParamRegistry.registry()
def member(pos=None, kwparam=None):
return (pos, kwparam)
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member(1) == (callable_host, 1)
assert property_host.registry.member == (property_host, None)
assert cached_property_host.registry.member == (cached_property_host, None)
assert callable_param_host.registry.member(1) == (1, callable_param_host)
assert property_param_host.registry.member == (None, property_param_host)
assert cached_property_param_host.registry.member == (
None,
cached_property_param_host,
)
def test_property_cache_mismatch(
PropertyRegistry, CachedPropertyRegistry # noqa: N803
):
"""A registry's default setting must be explicitly turned off if conflicting."""
with pytest.raises(TypeError):
@PropertyRegistry.registry(cached_property=True)
def member1(pos=None, kwparam=None):
return (pos, kwparam)
with pytest.raises(TypeError):
@CachedPropertyRegistry.registry(property=True)
def member2(pos=None, kwparam=None):
return (pos, kwparam)
@PropertyRegistry.registry(cached_property=True, property=False)
@CachedPropertyRegistry.registry(property=True, cached_property=False)
def member(pos=None, kwparam=None):
return (pos, kwparam)
def test_add_to_registry_host(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added as a function, overriding default settings."""
@CallableRegistry.registry()
@PropertyRegistry.registry(property=False)
@CachedPropertyRegistry.registry(cached_property=False)
@CallableParamRegistry.registry()
@PropertyParamRegistry.registry(property=False)
@CachedPropertyParamRegistry.registry(cached_property=False)
def member(pos=None, kwparam=None):
return (pos, kwparam)
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member(1) == (callable_host, 1)
assert property_host.registry.member(2) == (property_host, 2)
assert cached_property_host.registry.member(3) == (cached_property_host, 3)
assert callable_param_host.registry.member(4) == (4, callable_param_host)
assert property_param_host.registry.member(5) == (5, property_param_host)
assert cached_property_param_host.registry.member(6) == (
6,
cached_property_param_host,
)
def test_add_to_registry_property(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added as a property, overriding default settings."""
@CallableRegistry.registry(property=True)
@PropertyRegistry.registry(property=True)
@CachedPropertyRegistry.registry(property=True, cached_property=False)
@CallableParamRegistry.registry(property=True)
@PropertyParamRegistry.registry(property=True)
@CachedPropertyParamRegistry.registry(property=True, cached_property=False)
def member(pos=None, kwparam=None):
return (pos, kwparam)
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member == (callable_host, None)
assert property_host.registry.member == (property_host, None)
assert cached_property_host.registry.member == (cached_property_host, None)
assert callable_param_host.registry.member == (None, callable_param_host)
assert property_param_host.registry.member == (None, property_param_host)
assert cached_property_param_host.registry.member == (
None,
cached_property_param_host,
)
def test_add_to_registry_cached_property(
CallableRegistry, # noqa: N803
PropertyRegistry,
CachedPropertyRegistry,
CallableParamRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""A member can be added as a property, overriding default settings."""
@CallableRegistry.registry(property=True)
@PropertyRegistry.registry(property=True)
@CachedPropertyRegistry.registry(property=True, cached_property=False)
@CallableParamRegistry.registry(property=True)
@PropertyParamRegistry.registry(property=True)
@CachedPropertyParamRegistry.registry(property=True, cached_property=False)
def member(pos=None, kwparam=None):
return (pos, kwparam)
callable_host = CallableRegistry()
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
callable_param_host = CallableParamRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
assert callable_host.registry.member == (callable_host, None)
assert property_host.registry.member == (property_host, None)
assert cached_property_host.registry.member == (cached_property_host, None)
assert callable_param_host.registry.member == (None, callable_param_host)
assert property_param_host.registry.member == (None, property_param_host)
assert cached_property_param_host.registry.member == (
None,
cached_property_param_host,
)
def test_add_to_registry_custom_name(all_registry_hosts, registry_member):
"""Members can be added to a registry with a custom name."""
assert registry_member.__name__ == 'member'
for host in all_registry_hosts:
# Mock decorator call
host.registry('custom')(registry_member)
# This adds the member under the custom name
assert host.registry.custom is registry_member
# The default name of the function is not present...
with pytest.raises(AttributeError):
assert host.registry.member is registry_member
# ... but can be added
host.registry()(registry_member)
assert host.registry.member is registry_member
def test_add_to_registry_underscore(all_registry_hosts, registry_member):
"""Registry member names cannot start with an underscore."""
for host in all_registry_hosts:
with pytest.raises(ValueError):
host.registry('_new_member')(registry_member)
def test_add_to_registry_dupe(all_registry_hosts, registry_member):
"""Registry member names cannot be duplicates of an existing name."""
for host in all_registry_hosts:
host.registry()(registry_member)
with pytest.raises(ValueError):
host.registry()(registry_member)
host.registry('custom')(registry_member)
with pytest.raises(ValueError):
host.registry('custom')(registry_member)
def test_cached_properties_are_cached(
PropertyRegistry, # noqa: N803
CachedPropertyRegistry,
PropertyParamRegistry,
CachedPropertyParamRegistry,
):
"""Cached properties are truly cached."""
# Register registry member
@PropertyRegistry.registry()
@CachedPropertyRegistry.registry()
@PropertyParamRegistry.registry()
@CachedPropertyParamRegistry.registry()
def member(pos=None, kwparam=None):
return [pos, kwparam] # Lists are different each call
property_host = PropertyRegistry()
cached_property_host = CachedPropertyRegistry()
property_param_host = PropertyParamRegistry()
cached_property_param_host = CachedPropertyParamRegistry()
# The properties and cached properties work
assert property_host.registry.member == [property_host, None]
assert cached_property_host.registry.member == [cached_property_host, None]
assert property_param_host.registry.member == [None, property_param_host]
assert cached_property_param_host.registry.member == [
None,
cached_property_param_host,
]
# The properties and cached properties return equal values on each access
assert property_host.registry.member == property_host.registry.member
assert cached_property_host.registry.member == cached_property_host.registry.member
assert property_param_host.registry.member == property_param_host.registry.member
assert (
cached_property_param_host.registry.member
== cached_property_param_host.registry.member
)
# Only the cached properties return the same value every time
assert property_host.registry.member is not property_host.registry.member
assert cached_property_host.registry.member is cached_property_host.registry.member
assert (
property_param_host.registry.member is not property_param_host.registry.member
)
assert (
cached_property_param_host.registry.member
is cached_property_param_host.registry.member
)
# TODO:
# test_registry_member_cannot_be_called_clear_cache
# test_multiple_positional_and_keyword_arguments
# test_registry_iter
# test_registry_members_must_be_callable
# test_add_by_directly_sticking_in
# test_instance_registry_is_cached
# test_clear_cache_for
# test_clear_cache
# test_registry_mixin_config
# test_registry_mixin_subclasses
# --- RegistryMixin tests --------------------------------------------------------------
def test_access_item_from_class(registrymixin_models):
"""Registered items are available from the model class."""
assert (
registrymixin_models.RegistryTest1.views.test
is registrymixin_models.RegisteredItem1
)
assert (
registrymixin_models.RegistryTest2.views.test
is registrymixin_models.RegisteredItem2
)
assert (
registrymixin_models.RegistryTest1.views.test
is not registrymixin_models.RegisteredItem2
)
assert (
registrymixin_models.RegistryTest2.views.test
is not registrymixin_models.RegisteredItem1
)
assert registrymixin_models.RegistryTest1.features.is1 is registrymixin_models.is1
assert registrymixin_models.RegistryTest2.features.is1 is registrymixin_models.is1
def test_access_item_class_from_instance(registrymixin_models):
"""Registered items are available from the model instance."""
r1 = registrymixin_models.RegistryTest1()
r2 = registrymixin_models.RegistryTest2()
# When accessed from the instance, we get a partial that resembles
# the wrapped item, but is not the item itself.
assert r1.views.test is not registrymixin_models.RegisteredItem1
assert r1.views.test.func is registrymixin_models.RegisteredItem1
assert r2.views.test is not registrymixin_models.RegisteredItem2
assert r2.views.test.func is registrymixin_models.RegisteredItem2
assert r1.features.is1 is not registrymixin_models.is1
assert r1.features.is1.func is registrymixin_models.is1
assert r2.features.is1 is not registrymixin_models.is1
assert r2.features.is1.func is registrymixin_models.is1
def test_access_item_instance_from_instance(registrymixin_models):
"""Registered items can be instantiated from the model instance."""
r1 = registrymixin_models.RegistryTest1()
r2 = registrymixin_models.RegistryTest2()
i1 = r1.views.test()
i2 = r2.views.test()
assert isinstance(i1, registrymixin_models.RegisteredItem1)
assert isinstance(i2, registrymixin_models.RegisteredItem2)
assert not isinstance(i1, registrymixin_models.RegisteredItem2)
assert not isinstance(i2, registrymixin_models.RegisteredItem1)
assert i1.obj is r1
assert i2.obj is r2
assert i1.obj is not r2
assert i2.obj is not r1
def test_features(registrymixin_models):
"""The features registry can be used for feature tests."""
r1 = registrymixin_models.RegistryTest1()
r2 = registrymixin_models.RegistryTest2()
assert r1.features.is1() is True
assert r2.features.is1() is False
| 33.148673
| 88
| 0.724171
| 2,008
| 18,729
| 6.539343
| 0.10757
| 0.070368
| 0.063057
| 0.038535
| 0.646028
| 0.587389
| 0.518849
| 0.467977
| 0.409717
| 0.374914
| 0
| 0.010394
| 0.183245
| 18,729
| 564
| 89
| 33.207447
| 0.848009
| 0.17545
| 0
| 0.543478
| 0
| 0
| 0.009265
| 0
| 0
| 0
| 0
| 0.001773
| 0.225543
| 1
| 0.105978
| false
| 0.002717
| 0.013587
| 0.021739
| 0.244565
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd936c5bdf9f66abffeaa3d4ec25c893af108da
| 4,239
|
py
|
Python
|
api/controller/activity.py
|
DXCChina/pms
|
c779a69f25fb08101593c6ff0451debc0abce6e4
|
[
"MIT"
] | 27
|
2017-11-06T06:58:30.000Z
|
2021-04-23T02:47:23.000Z
|
api/controller/activity.py
|
DXCChina/pms
|
c779a69f25fb08101593c6ff0451debc0abce6e4
|
[
"MIT"
] | 3
|
2017-12-08T02:55:42.000Z
|
2019-06-04T15:23:03.000Z
|
api/controller/activity.py
|
DXCChina/pms
|
c779a69f25fb08101593c6ff0451debc0abce6e4
|
[
"MIT"
] | 16
|
2017-10-12T03:06:39.000Z
|
2020-12-24T09:00:49.000Z
|
# -*- coding: utf-8 -*-
'''活动管理接口'''
from flask import request
from model.db import database, Activity, ActivityMember, Demand, ActivityBase, ProjectMember, User
from model.role import identity
from flask_jwt_extended import (fresh_jwt_required)
def demand_activity_add(activity_id, data):
'''添加活动需求'''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if not demand.activityId:
demand.activityId = activity_id
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
def demand_activity_del(activity_id, data):
'''删除活动需求'''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if demand.activityId == activity_id:
demand.activityId = None
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
def demand_activity_done(activity_id, data):
'''更新活动需求'''
for demand_id in data:
demand = Demand.get(Demand.id == demand_id)
if demand.activityId == activity_id:
demand.status = 1
# Demand.update(activityId=activity_id).where(Demand.id == demand_id).execute()
demand.save()
@fresh_jwt_required
@identity.check_permission("create", 'task')
def activity_add():
'''创建项目活动'''
data = request.json
if 'memberId' in data and data['memberId']:
data['status'] = 'dev-ing'
with database.atomic():
activity_id = ActivityBase.create(**data).id
if 'memberId' in data and data['memberId']:
for member_id in data['memberId']:
role = ProjectMember.get(
ProjectMember.projectId == data['projectId'],
ProjectMember.memberId == member_id).role
ActivityMember.create(**{
'activityId': activity_id,
'memberId': member_id,
'role': role
})
demand_activity_add(activity_id, data['demand'])
return {"msg": 'ok'}
@fresh_jwt_required
@identity.check_permission("update", 'task')
def activity_update():
'''更新项目活动'''
data = request.json
activity_id = data.pop('activityId')
with database.atomic():
if 'del_memberId' in data:
for member_id in data.pop('del_memberId'):
ActivityMember.delete().where(
(ActivityMember.activityId == activity_id) &
(ActivityMember.memberId == member_id)).execute()
if 'memberId' in data:
if not 'status' in data or not data['status']:
data['status'] = 'dev-ing'
for member_id in data.pop('memberId'):
ActivityMember.get_or_create(
activityId=activity_id,
memberId=member_id,
role=ProjectMember.get(
(ProjectMember.projectId == data['projectId'])
& (ProjectMember.memberId == member_id)).role)
if 'done_demand' in data:
demand_activity_done(activity_id, data.pop('done_demand'))
if 'demand' in data:
demand_activity_add(activity_id, data.pop('demand'))
if 'del_demand' in data:
demand_activity_del(activity_id, data.pop('del_demand'))
Activity.update(**data).where(Activity.id == activity_id).execute()
return {"msg": 'ok'}
@fresh_jwt_required
def activity_detail(activity_id):
'''查询活动详情
GET /api/activity/<int:activity_id>
'''
activity = Activity.findOne(Activity.id == activity_id)
activity['member'] = list(
ActivityMember.find(ActivityMember.role, User.username,
User.email, User.id).join(User)
.where(ActivityMember.activityId == activity_id))
activity['demand'] = list(
Demand.find().where(Demand.activityId == activity_id))
return activity
@fresh_jwt_required
def project_user(project_id):
'''查询项目成员'''
return {
"data":
list(
ProjectMember.find(
ProjectMember.role,
User).join(User).where(ProjectMember.projectId == project_id))
}
| 35.033058
| 98
| 0.599198
| 459
| 4,239
| 5.352941
| 0.169935
| 0.10582
| 0.08954
| 0.039072
| 0.524217
| 0.460317
| 0.319088
| 0.293854
| 0.258038
| 0.258038
| 0
| 0.000655
| 0.279547
| 4,239
| 120
| 99
| 35.325
| 0.803864
| 0.082095
| 0
| 0.3
| 0
| 0
| 0.071466
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.077778
| false
| 0
| 0.044444
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbd96b797fa91e96b8a7f838f8fb68571c587fa0
| 326
|
py
|
Python
|
math/9. Palindrome number.py
|
Rage-ops/Leetcode-Solutions
|
48d4ecbb92a0bb7a7bb74a1445b593a67357ac02
|
[
"MIT"
] | 1
|
2020-11-23T13:52:11.000Z
|
2020-11-23T13:52:11.000Z
|
math/9. Palindrome number.py
|
harsha-sam/Leetcode-Solutions
|
48d4ecbb92a0bb7a7bb74a1445b593a67357ac02
|
[
"MIT"
] | null | null | null |
math/9. Palindrome number.py
|
harsha-sam/Leetcode-Solutions
|
48d4ecbb92a0bb7a7bb74a1445b593a67357ac02
|
[
"MIT"
] | null | null | null |
# Easy
# https://leetcode.com/problems/palindrome-number/
# Time Complexity: O(log(x) to base 10)
# Space Complexity: O(1)
class Solution:
def isPalindrome(self, x: int) -> bool:
temp = x
rev = 0
while temp > 0:
rev = rev * 10 + temp % 10
temp //= 10
return rev == x
| 27.166667
| 50
| 0.542945
| 44
| 326
| 4.022727
| 0.659091
| 0.124294
| 0.090395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050459
| 0.331288
| 326
| 12
| 51
| 27.166667
| 0.761468
| 0.349693
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0
| 0
| 0.375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbdc8acd947df0cf5d903b9fd18f947cd84ecb24
| 4,762
|
py
|
Python
|
prtg/client.py
|
kevinschoon/prtg-py
|
714e0750606e55b2cd4c7dff8770d94057fa932b
|
[
"MIT"
] | null | null | null |
prtg/client.py
|
kevinschoon/prtg-py
|
714e0750606e55b2cd4c7dff8770d94057fa932b
|
[
"MIT"
] | null | null | null |
prtg/client.py
|
kevinschoon/prtg-py
|
714e0750606e55b2cd4c7dff8770d94057fa932b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Python library for Paessler's PRTG (http://www.paessler.com/)
"""
import logging
import xml.etree.ElementTree as Et
from urllib import request
from prtg.cache import Cache
from prtg.models import Sensor, Device, Status, PrtgObject
from prtg.exceptions import BadTarget, UnknownResponse
class Connection(object):
"""
PRTG Connection Object
"""
def __init__(self):
self.response = list()
@staticmethod
def _encode_response(response, tag):
out = list()
if any([tag == 'devices', tag =='sensors']):
for item in response.findall('item'):
i = dict()
for attrib in item:
i[attrib.tag] = attrib.text
if tag == 'devices':
out.append(Device(**i))
if tag == 'sensors':
out.append(Sensor(**i))
if tag == 'status':
i = dict()
for item in response:
i[item.tag] = item.text
out.append(Status(**i))
if tag == 'prtg':
i = dict()
for item in response:
i[item.tag] = item.text
out.append(PrtgObject(**i))
return out
def _process_response(self, response, expect_return=True):
"""
Process the response from the server.
"""
if expect_return:
try:
resp = Et.fromstring(response.read().decode('utf-8'))
except Et.ParseError as e:
raise UnknownResponse(e)
try:
ended = resp.attrib['listend'] # Catch KeyError and return finished
except KeyError:
ended = 1
return self._encode_response(resp, resp.tag), ended
def _build_request(self, query):
"""
Build the HTTP request.
"""
req, method = str(query), query.method
logging.debug('REQUEST: target={} method={}'.format(req, method))
return request.Request(url=req, method=method)
def get_request(self, query):
"""
Make a single HTTP request
"""
req = self._build_request(query)
logging.info('Making request: {}'.format(query))
resp, ended = self._process_response(request.urlopen(req))
self.response += resp
if not int(ended): # Recursively request until PRTG indicates "listend"
query.increment()
self.get_request(query)
class Client(object):
def __init__(self, endpoint, username, password):
self.endpoint = endpoint
self.username = username
self.password = password
self.cache = Cache()
@staticmethod
def query(query):
conn = Connection()
conn.get_request(query)
return conn.response
"""
def refresh(self, query):
logging.info('Refreshing content: {}'.format(content))
devices = Query(target='table', endpoint=self.endpoint, username=self.username, password=self.password, content=content, counter=content)
self.connection.get_paginated_request(devices)
self.cache.write_content(devices.response)
def update(self, content, attribute, value, replace=False):
for index, obj in enumerate(content):
logging.debug('Updating object: {} with {}={}'.format(obj, attribute, value))
if attribute == 'tags':
tags = value.split(',')
if replace:
obj.tags = value.split(',')
else:
obj.tags += [x for x in tags if x not in obj.tags]
content[index] = obj
self.cache.write_content(content, force=True)
def content(self, content_name, parents=False, regex=None, attribute=None):
response = list()
for resp in self.cache.get_content(content_name):
if not all([regex, attribute]):
response.append(resp)
else:
if RegexMatch(resp, expression=regex, attribute=attribute):
response.append(resp)
if all([content_name == 'sensors', parents is True]):
logging.info('Searching for parents.. this may take a while')
p = list()
ids = set()
for index, child in enumerate(response):
parent = self.cache.get_object(str(child.parentid)) # Parent device.
if parent:
ids.add(str(parent.objid)) # Lookup unique parent ids.
else:
logging.warning('Unable to find sensor parent')
for parent in ids:
p.append(self.cache.get_object(parent))
response = p
return response
"""
| 32.616438
| 145
| 0.558169
| 518
| 4,762
| 5.063707
| 0.295367
| 0.020587
| 0.010294
| 0.019443
| 0.035837
| 0.035837
| 0.035837
| 0.035837
| 0.035837
| 0.035837
| 0
| 0.00094
| 0.330113
| 4,762
| 145
| 146
| 32.841379
| 0.821317
| 0.059219
| 0
| 0.166667
| 0
| 0
| 0.040552
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106061
| false
| 0.030303
| 0.090909
| 0
| 0.287879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbdd97337631bf234182cdf6ceb595a8b38fcc53
| 359
|
py
|
Python
|
pyunitwizard/_private_tools/parsers.py
|
uibcdf/pyunitwizard
|
54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7
|
[
"MIT"
] | null | null | null |
pyunitwizard/_private_tools/parsers.py
|
uibcdf/pyunitwizard
|
54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7
|
[
"MIT"
] | null | null | null |
pyunitwizard/_private_tools/parsers.py
|
uibcdf/pyunitwizard
|
54cdce7369e1f2a3771a1f05a4a6ba1d7610a5e7
|
[
"MIT"
] | null | null | null |
parsers = ['openmm.unit', 'pint', 'unyt']
def digest_parser(parser: str) -> str:
""" Check if parser is correct."""
if parser is not None:
if parser.lower() in parsers:
return parser.lower()
else:
raise ValueError
else:
from pyunitwizard.kernel import default_parser
return default_parser
| 25.642857
| 54
| 0.601671
| 42
| 359
| 5.071429
| 0.619048
| 0.112676
| 0.093897
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.29805
| 359
| 13
| 55
| 27.615385
| 0.845238
| 0.075209
| 0
| 0.2
| 0
| 0
| 0.058642
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
dbddb2e414eaaea37bf5ee700d9d3c21f697c101
| 6,606
|
py
|
Python
|
metric_wsd/utils/data_utils.py
|
bartonlin/MWSD
|
70ad446ee7f00a11988acb290270e32d8e6af925
|
[
"MIT"
] | 4
|
2021-04-27T16:28:51.000Z
|
2021-08-30T11:10:28.000Z
|
metric_wsd/utils/data_utils.py
|
bartonlin/MWSD
|
70ad446ee7f00a11988acb290270e32d8e6af925
|
[
"MIT"
] | null | null | null |
metric_wsd/utils/data_utils.py
|
bartonlin/MWSD
|
70ad446ee7f00a11988acb290270e32d8e6af925
|
[
"MIT"
] | 2
|
2021-08-25T14:29:45.000Z
|
2022-02-12T02:09:45.000Z
|
'''
Copyright (c) Facebook, Inc. and its affiliates.
All rights reserved.
This source code is licensed under the license found in the
LICENSE file in the root directory of this source tree.
Code taken from: https://github.com/facebookresearch/wsd-biencoders/blob/master/wsd_models/util.py
'''
import os
import re
import torch
import subprocess
from transformers import *
import random
pos_converter = {'NOUN':'n', 'PROPN':'n', 'VERB':'v', 'AUX':'v', 'ADJ':'a', 'ADV':'r'}
def generate_key(lemma, pos):
if pos in pos_converter.keys():
pos = pos_converter[pos]
key = '{}+{}'.format(lemma, pos)
return key
def load_pretrained_model(name):
if name == 'roberta-base':
model = RobertaModel.from_pretrained('roberta-base')
hdim = 768
elif name == 'roberta-large':
model = RobertaModel.from_pretrained('roberta-large')
hdim = 1024
elif name == 'bert-large':
model = BertModel.from_pretrained('bert-large-uncased')
hdim = 1024
else: #bert base
model = BertModel.from_pretrained('bert-base-uncased')
hdim = 768
return model, hdim
def load_tokenizer(name):
if name == 'roberta-base':
tokenizer = RobertaTokenizer.from_pretrained('roberta-base')
elif name == 'roberta-large':
tokenizer = RobertaTokenizer.from_pretrained('roberta-large')
elif name == 'bert-large':
tokenizer = BertTokenizer.from_pretrained('bert-large-uncased')
else: #bert base
tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
return tokenizer
def load_wn_senses(path):
wn_senses = {}
with open(path, 'r', encoding="utf8") as f:
for line in f:
line = line.strip().split('\t')
lemma = line[0]
pos = line[1]
senses = line[2:]
key = generate_key(lemma, pos)
wn_senses[key] = senses
return wn_senses
def get_label_space(data):
#get set of labels from dataset
labels = set()
for sent in data:
for _, _, _, _, label in sent:
if label != -1:
labels.add(label)
labels = list(labels)
labels.sort()
labels.append('n/a')
label_map = {}
for sent in data:
for _, lemma, pos, _, label in sent:
if label != -1:
key = generate_key(lemma, pos)
label_idx = labels.index(label)
if key not in label_map: label_map[key] = set()
label_map[key].add(label_idx)
return labels, label_map
def process_encoder_outputs(output, mask, as_tensor=False):
combined_outputs = []
position = -1
avg_arr = []
for idx, rep in zip(mask, torch.split(output, 1, dim=0)):
#ignore unlabeled words
if idx == -1: continue
#average representations for units in same example
elif position < idx:
position=idx
if len(avg_arr) > 0: combined_outputs.append(torch.mean(torch.stack(avg_arr, dim=-1), dim=-1))
avg_arr = [rep]
else:
assert position == idx
avg_arr.append(rep)
#get last example from avg_arr
if len(avg_arr) > 0: combined_outputs.append(torch.mean(torch.stack(avg_arr, dim=-1), dim=-1))
if as_tensor: return torch.cat(combined_outputs, dim=0)
else: return combined_outputs
#run WSD Evaluation Framework scorer within python
def evaluate_output(scorer_path, gold_filepath, out_filepath):
eval_cmd = ['java','-cp', scorer_path, 'Scorer', gold_filepath, out_filepath]
output = subprocess.Popen(eval_cmd, stdout=subprocess.PIPE ).communicate()[0]
output = [x.decode("utf-8") for x in output.splitlines()]
p,r,f1 = [float(output[i].split('=')[-1].strip()[:-1]) for i in range(3)]
return p, r, f1
def load_data(datapath, name):
text_path = os.path.join(datapath, '{}.data.xml'.format(name))
gold_path = os.path.join(datapath, '{}.gold.key.txt'.format(name))
#load gold labels
gold_labels = {}
with open(gold_path, 'r', encoding="utf8") as f:
for line in f:
line = line.strip().split(' ')
instance = line[0]
#this means we are ignoring other senses if labeled with more than one
#(happens at least in SemCor data)
key = line[1]
gold_labels[instance] = key
#load train examples + annotate sense instances with gold labels
sentences = []
s = []
with open(text_path, 'r', encoding="utf8") as f:
for line in f:
line = line.strip()
if line == '</sentence>':
sentences.append(s)
s=[]
elif line.startswith('<instance') or line.startswith('<wf'):
word = re.search('>(.+?)<', line).group(1)
lemma = re.search('lemma="(.+?)"', line).group(1)
pos = re.search('pos="(.+?)"', line).group(1)
#clean up data
word = re.sub(''', '\'', word)
lemma = re.sub(''', '\'', lemma)
sense_inst = -1
sense_label = -1
if line.startswith('<instance'):
sense_inst = re.search('instance id="(.+?)"', line).group(1)
#annotate sense instance with gold label
sense_label = gold_labels[sense_inst]
s.append((word, lemma, pos, sense_inst, sense_label))
return sentences
#normalize ids list, masks to whatever the passed in length is
def normalize_length(ids, attn_mask, o_mask, max_len, pad_id):
if max_len == -1:
return ids, attn_mask, o_mask
else:
if len(ids) < max_len:
while len(ids) < max_len:
ids.append(torch.tensor([[pad_id]]))
attn_mask.append(0)
o_mask.append(-1)
else:
ids = ids[:max_len-1]+[ids[-1]]
attn_mask = attn_mask[:max_len]
o_mask = o_mask[:max_len]
assert len(ids) == max_len
assert len(attn_mask) == max_len
assert len(o_mask) == max_len
return ids, attn_mask, o_mask
#filters down training dataset to (up to) k examples per sense
#for few-shot learning of the model
def filter_k_examples(data, k):
#shuffle data so we don't only get examples for (common) senses from beginning
random.shuffle(data)
#track number of times sense from data is used
sense_dict = {}
#store filtered data
filtered_data = []
example_count = 0
for sent in data:
filtered_sent = []
for form, lemma, pos, inst, sense in sent:
#treat unlabeled words normally
if sense == -1:
x = (form, lemma, pos, inst, sense)
elif sense in sense_dict:
if sense_dict[sense] < k:
#increment sense count and add example to filtered data
sense_dict[sense] += 1
x = (form, lemma, pos, inst, sense)
example_count += 1
else: #if the data already has k examples of this sense
#add example with no instance or sense label to data
x = (form, lemma, pos, -1, -1)
else:
#add labeled example to filtered data and sense dict
sense_dict[sense] = 1
x = (form, lemma, pos, inst, sense)
example_count += 1
filtered_sent.append(x)
filtered_data.append(filtered_sent)
print("k={}, training on {} sense examples...".format(k, example_count))
return filtered_data
#EOF
| 29.891403
| 98
| 0.673479
| 990
| 6,606
| 4.369697
| 0.261616
| 0.020342
| 0.011558
| 0.014794
| 0.251271
| 0.111188
| 0.092233
| 0.092233
| 0.085761
| 0.085761
| 0
| 0.011389
| 0.189222
| 6,606
| 220
| 99
| 30.027273
| 0.796303
| 0.189979
| 0
| 0.2375
| 0
| 0
| 0.079526
| 0
| 0
| 0
| 0
| 0
| 0.025
| 1
| 0.0625
| false
| 0
| 0.0375
| 0
| 0.1625
| 0.00625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9155e8339948407989efd32f44f9c2682f1c678e
| 931
|
py
|
Python
|
groclient/constants.py
|
eric-gro/api-client
|
0ca73422c25b5065907d068a44b72bdc43fea79f
|
[
"MIT"
] | 18
|
2019-01-10T21:06:17.000Z
|
2022-03-15T06:22:18.000Z
|
groclient/constants.py
|
eric-gro/api-client
|
0ca73422c25b5065907d068a44b72bdc43fea79f
|
[
"MIT"
] | 138
|
2019-01-16T15:35:35.000Z
|
2022-03-23T13:05:03.000Z
|
groclient/constants.py
|
eric-gro/api-client
|
0ca73422c25b5065907d068a44b72bdc43fea79f
|
[
"MIT"
] | 24
|
2019-02-22T19:24:54.000Z
|
2022-03-15T10:17:37.000Z
|
"""Constants about the Gro ontology that can be imported and re-used anywhere."""
REGION_LEVELS = {
'world': 1,
'continent': 2,
'country': 3,
'province': 4, # Equivalent to state in the United States
'district': 5, # Equivalent to county in the United States
'city': 6,
'market': 7,
'other': 8,
'coordinate': 9
}
ENTITY_TYPES_PLURAL = ['metrics', 'items', 'regions', 'frequencies', 'sources', 'units']
DATA_SERIES_UNIQUE_TYPES_ID = [
'metric_id',
'item_id',
'region_id',
'partner_region_id',
'frequency_id',
'source_id'
]
ENTITY_KEY_TO_TYPE = {
'metric_id': 'metrics',
'item_id': 'items',
'region_id': 'regions',
'partner_region_id': 'regions',
'source_id': 'sources',
'frequency_id': 'frequencies',
'unit_id': 'units'
}
DATA_POINTS_UNIQUE_COLS = DATA_SERIES_UNIQUE_TYPES_ID + [
'reporting_date',
'start_date',
'end_date'
]
| 22.707317
| 88
| 0.628357
| 115
| 931
| 4.782609
| 0.582609
| 0.058182
| 0.04
| 0.061818
| 0.083636
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012346
| 0.216971
| 931
| 40
| 89
| 23.275
| 0.742112
| 0.170784
| 0
| 0
| 0
| 0
| 0.415686
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9156c4aa90ea0469b8acd15340e3ebcae1eab123
| 1,535
|
py
|
Python
|
asv_bench/benchmarks/tslibs/period.py
|
CitizenB/pandas
|
ee1efb6d923a2c3e5a912efe20a336179614993d
|
[
"PSF-2.0",
"Apache-2.0",
"BSD-3-Clause-No-Nuclear-License-2014",
"MIT",
"ECL-2.0",
"BSD-3-Clause"
] | 6
|
2020-09-10T15:03:25.000Z
|
2021-04-01T22:48:33.000Z
|
asv_bench/benchmarks/tslibs/period.py
|
ivan-vasilev/pandas
|
4071dde86e33434e1bee8304fa62074949f813cc
|
[
"BSD-3-Clause"
] | 7
|
2015-08-30T23:51:00.000Z
|
2018-12-29T19:52:35.000Z
|
asv_bench/benchmarks/tslibs/period.py
|
ivan-vasilev/pandas
|
4071dde86e33434e1bee8304fa62074949f813cc
|
[
"BSD-3-Clause"
] | 5
|
2017-10-04T22:24:49.000Z
|
2021-08-06T13:50:13.000Z
|
"""
Period benchmarks that rely only on tslibs. See benchmarks.period for
Period benchmarks that rely on other parts fo pandas.
"""
from pandas import Period
from pandas.tseries.frequencies import to_offset
class PeriodProperties:
params = (
["M", "min"],
[
"year",
"month",
"day",
"hour",
"minute",
"second",
"is_leap_year",
"quarter",
"qyear",
"week",
"daysinmonth",
"dayofweek",
"dayofyear",
"start_time",
"end_time",
],
)
param_names = ["freq", "attr"]
def setup(self, freq, attr):
self.per = Period("2012-06-01", freq=freq)
def time_property(self, freq, attr):
getattr(self.per, attr)
class PeriodUnaryMethods:
params = ["M", "min"]
param_names = ["freq"]
def setup(self, freq):
self.per = Period("2012-06-01", freq=freq)
def time_to_timestamp(self, freq):
self.per.to_timestamp()
def time_now(self, freq):
self.per.now(freq)
def time_asfreq(self, freq):
self.per.asfreq("A")
class PeriodConstructor:
params = [["D"], [True, False]]
param_names = ["freq", "is_offset"]
def setup(self, freq, is_offset):
if is_offset:
self.freq = to_offset(freq)
else:
self.freq = freq
def time_period_constructor(self, freq, is_offset):
Period("2012-06-01", freq=freq)
| 21.619718
| 70
| 0.536808
| 173
| 1,535
| 4.641619
| 0.381503
| 0.099626
| 0.054795
| 0.07472
| 0.117061
| 0.117061
| 0.089664
| 0.089664
| 0.089664
| 0.089664
| 0
| 0.023369
| 0.330945
| 1,535
| 70
| 71
| 21.928571
| 0.75852
| 0.080782
| 0
| 0.040816
| 0
| 0
| 0.119743
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.163265
| false
| 0
| 0.040816
| 0
| 0.387755
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
915bb507e25fc7cb08c5d136b971e88a2d706d9b
| 1,934
|
py
|
Python
|
tests/integration/test_infrastructure_persistence.py
|
othercodes/sample-todo-list-hexagonal-achitecture
|
a958c6906d8e777e837c8348c754b637b89a7031
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_infrastructure_persistence.py
|
othercodes/sample-todo-list-hexagonal-achitecture
|
a958c6906d8e777e837c8348c754b637b89a7031
|
[
"Apache-2.0"
] | null | null | null |
tests/integration/test_infrastructure_persistence.py
|
othercodes/sample-todo-list-hexagonal-achitecture
|
a958c6906d8e777e837c8348c754b637b89a7031
|
[
"Apache-2.0"
] | null | null | null |
from typing import Optional
from complexheart.domain.criteria import Criteria
from sqlalchemy import create_engine
from sqlalchemy.engine import Engine
from sqlalchemy.orm import sessionmaker
from to_do_list.tasks.domain.models import Task
from to_do_list.tasks.infrastructure.persistence.relational import RelationalTaskRepository, DBInstaller
db_engine: Optional[Engine] = None
def setup_function():
global db_engine
db_engine = create_engine('sqlite:///:memory:')
DBInstaller(db_engine).install()
def test_repository_should_save_new_task_successfully(task_factory):
session = sessionmaker(bind=db_engine)()
repository = RelationalTaskRepository(session)
task = repository.save(task_factory({}))
assert session.query(Task).get(task.id)
def test_repository_should_find_task_successfully(task_factory):
session = sessionmaker(bind=db_engine)()
repository = RelationalTaskRepository(session)
task = repository.save(task_factory({}))
assert repository.find(task.id)
def test_repository_should_match_task_by_criteria_successfully(task_factory):
session = sessionmaker(bind=db_engine)()
repository = RelationalTaskRepository(session)
for i in range(11):
repository.save(task_factory({'description': 'My task {i}'.format(i=i)}))
tasks = repository.match(
Criteria() \
.filter('description', 'like', '%task 1%') \
.order_by(['id'])
)
for task in tasks:
assert isinstance(task, Task)
assert len(tasks) == 2
def test_repository_should_get_all_tasks_successfully(task_factory):
session = sessionmaker(bind=db_engine)()
repository = RelationalTaskRepository(session)
for i in range(10):
repository.save(task_factory({'description': 'My task {i}'.format(i=i)}))
tasks = repository.all()
for task in tasks:
assert isinstance(task, Task)
assert len(tasks) == 10
| 27.628571
| 104
| 0.73061
| 231
| 1,934
| 5.909091
| 0.281385
| 0.046886
| 0.049817
| 0.067399
| 0.591941
| 0.567033
| 0.524542
| 0.524542
| 0.524542
| 0.524542
| 0
| 0.00496
| 0.165977
| 1,934
| 69
| 105
| 28.028986
| 0.84129
| 0
| 0
| 0.363636
| 0
| 0
| 0.044984
| 0
| 0
| 0
| 0
| 0
| 0.136364
| 1
| 0.113636
| false
| 0
| 0.159091
| 0
| 0.272727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
915c531ce1a9edc3c8480b0c6bf84bed9c0ec81f
| 2,934
|
py
|
Python
|
wagtail_jinja2/extensions.py
|
minervaproject/wagtail-jinja2-extensions
|
708f2f873273312ead80d67c3eff0555f152d072
|
[
"MIT"
] | 6
|
2015-09-25T15:33:17.000Z
|
2021-11-17T23:25:52.000Z
|
wagtail_jinja2/extensions.py
|
minervaproject/wagtail-jinja2-extensions
|
708f2f873273312ead80d67c3eff0555f152d072
|
[
"MIT"
] | 1
|
2015-09-29T15:53:40.000Z
|
2015-09-29T15:53:40.000Z
|
wagtail_jinja2/extensions.py
|
minervaproject/wagtail-jinja2-extensions
|
708f2f873273312ead80d67c3eff0555f152d072
|
[
"MIT"
] | null | null | null |
from jinja2.ext import Extension
from jinja2 import nodes
from jinja2 import Markup
from wagtail.wagtailadmin.templatetags.wagtailuserbar import wagtailuserbar as original_wagtailuserbar
from wagtail.wagtailimages.models import Filter, SourceImageIOError
class WagtailUserBarExtension(Extension):
tags = set(['wagtailuserbar'])
def parse(self, parser):
call = self.call_method('_render', args=[nodes.ContextReference()])
return nodes.Output([nodes.MarkSafe(call)]).set_lineno(next(parser.stream).lineno)
def _render(self, context):
return Markup(original_wagtailuserbar(context))
class WagtailImagesExtension(Extension):
tags = set(['image'])
def parse(self, parser):
lineno = next(parser.stream).lineno
image_expr = parser.parse_expression()
filter_spec = parser.parse_expression()
if parser.stream.skip_if('name:as'):
output_var_name = parser.parse_expression()
output_var_name = nodes.Const(output_var_name.name)
else:
output_var_name = nodes.Const(None)
if output_var_name.value is not None:
return nodes.Assign(nodes.Name(output_var_name.value, 'store'),
self.call_method('_render', [image_expr, filter_spec, output_var_name]))
else:
return nodes.Output([
self.call_method('_render', [image_expr, filter_spec, output_var_name])
]).set_lineno(lineno)
def filter(self, filter_spec):
_filter, _ = Filter.objects.get_or_create(spec=filter_spec)
return _filter
def _render(self, image, filter_spec, output_var_name=None):
if not image:
return ''
try:
rendition = image.get_rendition(self.filter(filter_spec))
except SourceImageIOError:
# It's fairly routine for people to pull down remote databases to their
# local dev versions without retrieving the corresponding image files.
# In such a case, we would get a SourceImageIOError at the point where we try to
# create the resized version of a non-existent image. Since this is a
# bit catastrophic for a missing image, we'll substitute a dummy
# Rendition object so that we just output a broken link instead.
Rendition = image.renditions.model # pick up any custom Image / Rendition classes that may be in use
rendition = Rendition(image=image, width=0, height=0)
rendition.file.name = 'not-found'
if output_var_name:
# store the rendition object in the given variable
return rendition
else:
# render the rendition's image tag now
# resolved_attrs = {}
# for key in self.attrs:
# resolved_attrs[key] = self.attrs[key].resolve(context)
return rendition.img_tag({})
| 40.75
| 113
| 0.65576
| 354
| 2,934
| 5.279661
| 0.378531
| 0.048154
| 0.069556
| 0.032103
| 0.122525
| 0.055645
| 0.055645
| 0.055645
| 0.055645
| 0.055645
| 0
| 0.002313
| 0.263122
| 2,934
| 71
| 114
| 41.323944
| 0.862165
| 0.224608
| 0
| 0.108696
| 0
| 0
| 0.026979
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108696
| false
| 0
| 0.108696
| 0.021739
| 0.478261
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
915d76b7f2fcca50d25cf033042e2f1d7c43e461
| 14,694
|
py
|
Python
|
nn_dataflow/tests/unit_test/test_network.py
|
Pingziwalk/nn_dataflow
|
5ae8eeba4e243df6e9a69127073513a852a62d17
|
[
"BSD-3-Clause"
] | 170
|
2017-02-28T01:33:11.000Z
|
2022-03-12T09:56:47.000Z
|
nn_dataflow/tests/unit_test/test_network.py
|
Pingziwalk/nn_dataflow
|
5ae8eeba4e243df6e9a69127073513a852a62d17
|
[
"BSD-3-Clause"
] | 24
|
2017-09-18T20:14:51.000Z
|
2022-01-23T06:43:28.000Z
|
nn_dataflow/tests/unit_test/test_network.py
|
Pingziwalk/nn_dataflow
|
5ae8eeba4e243df6e9a69127073513a852a62d17
|
[
"BSD-3-Clause"
] | 71
|
2017-02-07T17:36:17.000Z
|
2022-03-26T00:45:00.000Z
|
""" $lic$
Copyright (C) 2016-2020 by Tsinghua University and The Board of Trustees of
Stanford University
This program is free software: you can redistribute it and/or modify it under
the terms of the Modified BSD-3 License as published by the Open Source
Initiative.
This program is distributed in the hope that it will be useful, but WITHOUT ANY
WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A
PARTICULAR PURPOSE. See the BSD-3 License for more details.
You should have received a copy of the Modified BSD-3 License along with this
program. If not, see <https://opensource.org/licenses/BSD-3-Clause>.
"""
import unittest
from nn_dataflow.core import Network
from nn_dataflow.core import Layer, InputLayer, ConvLayer, FCLayer, \
PoolingLayer, EltwiseLayer
class TestNetwork(unittest.TestCase):
''' Tests for Network. '''
# pylint: disable=too-many-public-methods
def setUp(self):
''' Set up. '''
self.network = Network('test_net')
self.network.set_input_layer(InputLayer(3, 224))
self.network.add('c1', ConvLayer(3, 64, 224, 3))
self.network.add('p1', PoolingLayer(64, 7, 32))
self.network.add('f1', FCLayer(64, 1000, 7))
def test_set_input_layer(self):
''' Modifier set_input_layer. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 24))
self.assertIsInstance(network.input_layer(), InputLayer)
self.assertEqual(network.input_layer().nofm, 3)
self.assertEqual(network.input_layer().hofm, 24)
self.assertEqual(network.input_layer().wofm, 24)
self.assertEqual(len(network), 0)
def test_set_input_layer_type(self):
''' Modifier set_input_layer type. '''
network = Network('test_net')
with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'):
network.set_input_layer(Layer(3, 24))
with self.assertRaisesRegex(TypeError, 'Network: .*input_layer.*'):
network.set_input_layer(ConvLayer(3, 8, 24, 3))
def test_set_input_layer_duplicate(self):
''' Modifier set_input_layer duplicate. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 24))
with self.assertRaisesRegex(KeyError, 'Network: .*input.*'):
network.set_input_layer(InputLayer(3, 24))
def test_add(self):
''' Modifier add. '''
self.assertEqual(len(self.network), 3)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
self.assertEqual(len(self.network), 7)
def test_add_same_key(self):
''' Modifier add same key. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(KeyError, 'Network: .*c1.*'):
network.add('c1', ConvLayer(64, 128, 224, 3))
def test_add_no_input(self):
''' Modifier add no input. '''
network = Network('test_net')
with self.assertRaisesRegex(RuntimeError, 'Network: .*input.*'):
network.add('c1', ConvLayer(3, 64, 224, 3))
def test_add_no_prev(self):
''' Modifier add no prevs. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(KeyError, 'Network: .*prev.*p1.*'):
network.add('p1', PoolingLayer(64, 7, 32), prevs='p1')
def test_add_invalid_type(self):
''' Modifier add invalid type. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
with self.assertRaisesRegex(TypeError, 'Network: .*Layer.*'):
network.add('c1', (3, 64, 224, 3))
def test_add_unmatch_prev(self):
''' Modifier add unmatch prevs. '''
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
network.add('c1', ConvLayer(3, 64, 224, 3))
with self.assertRaisesRegex(ValueError,
'Network: .*c1.*p1.*mismatch fmap.*'):
network.add('p1', PoolingLayer(64, 7, 2))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError,
'Network: .*c1.*c2.*mismatch fmap.*'):
network.add('c2', ConvLayer(64, 128, 220, 3))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*p1.*'):
network.add('p1', PoolingLayer(32, 7, 32))
self.assertEqual(len(network), 1)
with self.assertRaisesRegex(ValueError, 'Network: .*c1.*prev.*c2.*'):
network.add('c2', ConvLayer(32, 128, 224, 3))
self.assertEqual(len(network), 1)
network.add('c2', ConvLayer(64, 128, 224, 3))
with self.assertRaisesRegex(ValueError,
r'Network: .*c1 | c2.*prev.*p1.*'):
network.add('p1', PoolingLayer(128, 7, 32), prevs=('c1', 'c2'))
self.assertEqual(len(network), 2)
def test_add_ext(self):
''' Modifier add_ext. '''
self.assertEqual(len(self.network), 3)
self.network.add_ext('e0', InputLayer(3, 24))
self.assertIsInstance(self.network['e0'], InputLayer)
self.assertEqual(self.network['e0'].nofm, 3)
self.assertEqual(self.network['e0'].hofm, 24)
self.assertEqual(self.network['e0'].wofm, 24)
self.network.add_ext('e1', InputLayer(5, (16, 20)))
self.assertIsInstance(self.network['e1'], InputLayer)
self.assertEqual(self.network['e1'].nofm, 5)
self.assertEqual(self.network['e1'].hofm, 16)
self.assertEqual(self.network['e1'].wofm, 20)
self.assertEqual(len(self.network), 3)
def test_add_ext_same_key(self):
''' Modifier add_ext same key. '''
network = Network('test_net')
network.add_ext('e0', InputLayer(3, 24))
with self.assertRaisesRegex(KeyError, 'Network: .*ext.*'):
network.add_ext('e0', InputLayer(3, 24))
def test_add_ext_invalid_type(self):
''' Modifier add_ext invalid type. '''
network = Network('test_net')
with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'):
network.add_ext('e0', Layer(3, 24))
with self.assertRaisesRegex(TypeError, 'Network: .*external layer.*'):
network.add_ext('e0', ConvLayer(3, 8, 24, 3))
def test_prevs(self):
''' Get prevs. '''
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
prevs = self.network.prevs('f1')
self.assertTupleEqual(prevs, ('p1',))
prevs = self.network.prevs('f2')
self.assertTupleEqual(prevs, ('p1',))
prevs = self.network.prevs('f3')
self.assertTupleEqual(prevs, ('f1', 'f2'))
def test_prevs_first(self):
''' Get prevs first layer. '''
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
prevs = self.network.prevs('c1')
self.assertTupleEqual(prevs, (None,))
prevs = self.network.prevs('c2')
self.assertTupleEqual(prevs, (None,))
def test_prevs_input(self):
''' Get prevs input layer. '''
with self.assertRaisesRegex(ValueError, 'Network: .*input.*'):
_ = self.network.prevs(self.network.INPUT_LAYER_KEY)
def test_prevs_ext_next(self):
''' Get prevs next layer of an external layer. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add('n', ConvLayer(6, 3, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'e0'))
prevs = self.network.prevs('n')
self.assertTupleEqual(prevs, (None, 'e0'))
def test_prevs_ext(self):
''' Get prevs external layer. '''
self.network.add_ext('e0', InputLayer(3, 3))
with self.assertRaisesRegex(ValueError, 'Network: .*ext.*'):
_ = self.network.prevs('e0')
def test_nexts(self):
''' Get nexts. '''
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
nexts = self.network.nexts('p1')
self.assertTupleEqual(nexts, ('f1', 'f2'))
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, ('f3', 'e4'))
nexts = self.network.nexts('f2')
self.assertTupleEqual(nexts, ('f3',))
nexts = self.network.nexts('f3')
self.assertTupleEqual(nexts, ('e4',))
def test_nexts_last(self):
''' Get nexts first layer. '''
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, (None,))
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
nexts = self.network.nexts('f1')
self.assertTupleEqual(nexts, (None,))
nexts = self.network.nexts('f2')
self.assertTupleEqual(nexts, (None,))
def test_nexts_input(self):
''' Get nexts input layer. '''
nexts = self.network.nexts(self.network.INPUT_LAYER_KEY)
self.assertTupleEqual(nexts, ('c1',))
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
self.network.add('c3', ConvLayer(6, 4, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'c2'))
nexts = self.network.nexts(self.network.INPUT_LAYER_KEY)
self.assertTupleEqual(nexts, ('c1', 'c2', 'c3'))
def test_firsts(self):
''' Get firsts. '''
firsts = self.network.firsts()
self.assertTupleEqual(firsts, ('c1',))
self.network.add('c2', ConvLayer(3, 3, 224, 1),
prevs=self.network.INPUT_LAYER_KEY)
self.network.add('c3', ConvLayer(6, 4, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'c2'))
firsts = self.network.firsts()
self.assertTupleEqual(firsts, ('c1', 'c2'))
self.assertIn('c1', firsts)
self.assertNotIn('c3', firsts)
def test_firsts_ext(self):
''' Get firsts with external layers. '''
self.network.add_ext('e0', InputLayer(3, 224))
self.network.add('c2', ConvLayer(3, 3, 224, 1), prevs=('e0',))
self.network.add('c3', ConvLayer(67, 3, 224, 1), prevs=('e0', 'c1'))
self.network.add('c4', ConvLayer(6, 3, 224, 1),
prevs=(self.network.INPUT_LAYER_KEY, 'e0',))
firsts = self.network.firsts()
self.assertIn('c2', firsts)
self.assertNotIn('c3', firsts)
self.assertIn('c4', firsts)
def test_lasts(self):
''' Get lasts. '''
lasts = self.network.lasts()
self.assertTupleEqual(lasts, ('f1',))
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
lasts = self.network.lasts()
self.assertTupleEqual(lasts, ('f1', 'f2'))
def test_ext_layers(self):
''' Get external layers. '''
self.assertTupleEqual(self.network.ext_layers(), tuple())
self.network.add_ext('e0', InputLayer(3, 224))
self.assertTupleEqual(self.network.ext_layers(), ('e0',))
self.network.add_ext('e1', InputLayer(3, 224))
self.assertTupleEqual(self.network.ext_layers(), ('e0', 'e1'))
def test_contains(self):
''' Whether contains. '''
self.assertIn('c1', self.network)
self.assertIn('p1', self.network)
self.assertIn('f1', self.network)
self.assertNotIn('f2', self.network)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.assertIn('f2', self.network)
def test_len(self):
''' Accessor len. '''
self.assertEqual(len(self.network), 3)
network = Network('test_net')
self.assertEqual(len(network), 0)
network.set_input_layer(InputLayer(3, 224))
self.assertEqual(len(network), 0)
network.add('c1', ConvLayer(3, 4, 224, 1))
self.assertEqual(len(network), 1)
self.network.add('f2', FCLayer(64, 2000, 7), prevs='p1')
self.assertEqual(len(self.network), 4)
self.network.add('f3', FCLayer(3000, 1000), prevs=('f1', 'f2'))
self.assertEqual(len(self.network), 5)
self.network.add('e4', EltwiseLayer(1000, 1, 2), prevs=('f1', 'f3'))
self.assertEqual(len(self.network), 6)
self.network.add('f4', FCLayer(1000, 1000), prevs='e4')
self.assertEqual(len(self.network), 7)
def test_iter(self):
''' Accessor iter. '''
num = 0
for layer in self.network:
self.assertIn(layer, self.network)
self.assertIsInstance(self.network[layer], Layer)
num += 1
self.assertEqual(len(self.network), num)
network = Network('test_net')
network.set_input_layer(InputLayer(3, 224))
with self.assertRaises(StopIteration):
_ = next(iter(network))
def test_contains_ext(self):
''' Whether contains external layer. '''
self.assertNotIn('e0', self.network)
self.network.add_ext('e0', InputLayer(3, 224))
self.assertIn('e0', self.network)
def test_len_ext(self):
''' Accessor len external layer. '''
self.assertEqual(len(self.network), 3)
self.network.add_ext('e0', InputLayer(3, 224))
self.assertEqual(len(self.network), 3)
def test_iter_ext(self):
''' Accessor iter external layer. '''
self.network.add_ext('e0', InputLayer(3, 224))
for layer in self.network:
self.assertNotEqual(layer, 'e0')
def test_getitem(self):
''' Accessor getitem. '''
self.assertIsInstance(self.network['c1'], ConvLayer)
self.assertIsInstance(self.network['p1'], PoolingLayer)
self.assertIsInstance(self.network['f1'], FCLayer)
def test_getitem_error(self):
''' Accessor getitem. '''
with self.assertRaisesRegex(KeyError, 'Network: .*c2.*'):
_ = self.network['c2']
def test_str(self):
''' Accessor str. '''
string = str(self.network)
for layer in self.network:
self.assertIn(layer, string)
| 38.365535
| 79
| 0.598884
| 1,793
| 14,694
| 4.815393
| 0.104852
| 0.143966
| 0.063238
| 0.031619
| 0.690294
| 0.555594
| 0.485986
| 0.452861
| 0.35754
| 0.329743
| 0
| 0.054428
| 0.242276
| 14,694
| 382
| 80
| 38.465969
| 0.721035
| 0.101538
| 0
| 0.42126
| 0
| 0
| 0.062774
| 0
| 0
| 0
| 0
| 0
| 0.366142
| 1
| 0.133858
| false
| 0
| 0.011811
| 0
| 0.149606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
915df7659ad33f08dc46ec91edcb67d2d6a2b9af
| 365
|
py
|
Python
|
apps/division/urls.py
|
Jingil-Integrated-Management/JIM_backend
|
f0e7860d57eddaee034531a52ab91d6715d12c18
|
[
"Apache-2.0"
] | null | null | null |
apps/division/urls.py
|
Jingil-Integrated-Management/JIM_backend
|
f0e7860d57eddaee034531a52ab91d6715d12c18
|
[
"Apache-2.0"
] | null | null | null |
apps/division/urls.py
|
Jingil-Integrated-Management/JIM_backend
|
f0e7860d57eddaee034531a52ab91d6715d12c18
|
[
"Apache-2.0"
] | null | null | null |
from django.urls import path
from .views import DivisionListCreateAPIView, DivisionRetrieveUpdateDestroyAPIView, MainDivisionListAPIView
urlpatterns = [
path('division/', DivisionListCreateAPIView.as_view()),
path('division/<division_pk>', DivisionRetrieveUpdateDestroyAPIView.as_view()),
path('division/main/', MainDivisionListAPIView.as_view()),
]
| 33.181818
| 107
| 0.794521
| 30
| 365
| 9.533333
| 0.5
| 0.125874
| 0.06993
| 0.125874
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.09589
| 365
| 10
| 108
| 36.5
| 0.866667
| 0
| 0
| 0
| 0
| 0
| 0.123288
| 0.060274
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.285714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
915ff60df252f62c3f259d30deba52d17fbf124c
| 9,077
|
py
|
Python
|
sympy/solvers/tests/test_pde.py
|
nashalex/sympy
|
aec3e6512be46f0558f5dbcf2b4d723496c91649
|
[
"BSD-3-Clause"
] | 8,323
|
2015-01-02T15:51:43.000Z
|
2022-03-31T13:13:19.000Z
|
sympy/solvers/tests/test_pde.py
|
nashalex/sympy
|
aec3e6512be46f0558f5dbcf2b4d723496c91649
|
[
"BSD-3-Clause"
] | 15,102
|
2015-01-01T01:33:17.000Z
|
2022-03-31T22:53:13.000Z
|
sympy/solvers/tests/test_pde.py
|
nashalex/sympy
|
aec3e6512be46f0558f5dbcf2b4d723496c91649
|
[
"BSD-3-Clause"
] | 4,490
|
2015-01-01T17:48:07.000Z
|
2022-03-31T17:24:05.000Z
|
from sympy import (Derivative as D, Eq, exp, sin,
Function, Symbol, symbols, cos, log)
from sympy.core import S
from sympy.solvers.pde import (pde_separate, pde_separate_add, pde_separate_mul,
pdsolve, classify_pde, checkpdesol)
from sympy.testing.pytest import raises
a, b, c, x, y = symbols('a b c x y')
def test_pde_separate_add():
x, y, z, t = symbols("x,y,z,t")
F, T, X, Y, Z, u = map(Function, 'FTXYZu')
eq = Eq(D(u(x, t), x), D(u(x, t), t)*exp(u(x, t)))
res = pde_separate_add(eq, u(x, t), [X(x), T(t)])
assert res == [D(X(x), x)*exp(-X(x)), D(T(t), t)*exp(T(t))]
def test_pde_separate():
x, y, z, t = symbols("x,y,z,t")
F, T, X, Y, Z, u = map(Function, 'FTXYZu')
eq = Eq(D(u(x, t), x), D(u(x, t), t)*exp(u(x, t)))
raises(ValueError, lambda: pde_separate(eq, u(x, t), [X(x), T(t)], 'div'))
def test_pde_separate_mul():
x, y, z, t = symbols("x,y,z,t")
c = Symbol("C", real=True)
Phi = Function('Phi')
F, R, T, X, Y, Z, u = map(Function, 'FRTXYZu')
r, theta, z = symbols('r,theta,z')
# Something simple :)
eq = Eq(D(F(x, y, z), x) + D(F(x, y, z), y) + D(F(x, y, z), z), 0)
# Duplicate arguments in functions
raises(
ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(x), u(z, z)]))
# Wrong number of arguments
raises(ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(x), Y(y)]))
# Wrong variables: [x, y] -> [x, z]
raises(
ValueError, lambda: pde_separate_mul(eq, F(x, y, z), [X(t), Y(x, y)]))
assert pde_separate_mul(eq, F(x, y, z), [Y(y), u(x, z)]) == \
[D(Y(y), y)/Y(y), -D(u(x, z), x)/u(x, z) - D(u(x, z), z)/u(x, z)]
assert pde_separate_mul(eq, F(x, y, z), [X(x), Y(y), Z(z)]) == \
[D(X(x), x)/X(x), -D(Z(z), z)/Z(z) - D(Y(y), y)/Y(y)]
# wave equation
wave = Eq(D(u(x, t), t, t), c**2*D(u(x, t), x, x))
res = pde_separate_mul(wave, u(x, t), [X(x), T(t)])
assert res == [D(X(x), x, x)/X(x), D(T(t), t, t)/(c**2*T(t))]
# Laplace equation in cylindrical coords
eq = Eq(1/r * D(Phi(r, theta, z), r) + D(Phi(r, theta, z), r, 2) +
1/r**2 * D(Phi(r, theta, z), theta, 2) + D(Phi(r, theta, z), z, 2), 0)
# Separate z
res = pde_separate_mul(eq, Phi(r, theta, z), [Z(z), u(theta, r)])
assert res == [D(Z(z), z, z)/Z(z),
-D(u(theta, r), r, r)/u(theta, r) -
D(u(theta, r), r)/(r*u(theta, r)) -
D(u(theta, r), theta, theta)/(r**2*u(theta, r))]
# Lets use the result to create a new equation...
eq = Eq(res[1], c)
# ...and separate theta...
res = pde_separate_mul(eq, u(theta, r), [T(theta), R(r)])
assert res == [D(T(theta), theta, theta)/T(theta),
-r*D(R(r), r)/R(r) - r**2*D(R(r), r, r)/R(r) - c*r**2]
# ...or r...
res = pde_separate_mul(eq, u(theta, r), [R(r), T(theta)])
assert res == [r*D(R(r), r)/R(r) + r**2*D(R(r), r, r)/R(r) + c*r**2,
-D(T(theta), theta, theta)/T(theta)]
def test_issue_11726():
x, t = symbols("x t")
f = symbols("f", cls=Function)
X, T = symbols("X T", cls=Function)
u = f(x, t)
eq = u.diff(x, 2) - u.diff(t, 2)
res = pde_separate(eq, u, [T(x), X(t)])
assert res == [D(T(x), x, x)/T(x),D(X(t), t, t)/X(t)]
def test_pde_classify():
# When more number of hints are added, add tests for classifying here.
f = Function('f')
eq1 = a*f(x,y) + b*f(x,y).diff(x) + c*f(x,y).diff(y)
eq2 = 3*f(x,y) + 2*f(x,y).diff(x) + f(x,y).diff(y)
eq3 = a*f(x,y) + b*f(x,y).diff(x) + 2*f(x,y).diff(y)
eq4 = x*f(x,y) + f(x,y).diff(x) + 3*f(x,y).diff(y)
eq5 = x**2*f(x,y) + x*f(x,y).diff(x) + x*y*f(x,y).diff(y)
eq6 = y*x**2*f(x,y) + y*f(x,y).diff(x) + f(x,y).diff(y)
for eq in [eq1, eq2, eq3]:
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
for eq in [eq4, eq5, eq6]:
assert classify_pde(eq) == ('1st_linear_variable_coeff',)
def test_checkpdesol():
f, F = map(Function, ['f', 'F'])
eq1 = a*f(x,y) + b*f(x,y).diff(x) + c*f(x,y).diff(y)
eq2 = 3*f(x,y) + 2*f(x,y).diff(x) + f(x,y).diff(y)
eq3 = a*f(x,y) + b*f(x,y).diff(x) + 2*f(x,y).diff(y)
for eq in [eq1, eq2, eq3]:
assert checkpdesol(eq, pdsolve(eq))[0]
eq4 = x*f(x,y) + f(x,y).diff(x) + 3*f(x,y).diff(y)
eq5 = 2*f(x,y) + 1*f(x,y).diff(x) + 3*f(x,y).diff(y)
eq6 = f(x,y) + 1*f(x,y).diff(x) + 3*f(x,y).diff(y)
assert checkpdesol(eq4, [pdsolve(eq5), pdsolve(eq6)]) == [
(False, (x - 2)*F(3*x - y)*exp(-x/S(5) - 3*y/S(5))),
(False, (x - 1)*F(3*x - y)*exp(-x/S(10) - 3*y/S(10)))]
for eq in [eq4, eq5, eq6]:
assert checkpdesol(eq, pdsolve(eq))[0]
sol = pdsolve(eq4)
sol4 = Eq(sol.lhs - sol.rhs, 0)
raises(NotImplementedError, lambda:
checkpdesol(eq4, sol4, solve_for_func=False))
def test_solvefun():
f, F, G, H = map(Function, ['f', 'F', 'G', 'H'])
eq1 = f(x,y) + f(x,y).diff(x) + f(x,y).diff(y)
assert pdsolve(eq1) == Eq(f(x, y), F(x - y)*exp(-x/2 - y/2))
assert pdsolve(eq1, solvefun=G) == Eq(f(x, y), G(x - y)*exp(-x/2 - y/2))
assert pdsolve(eq1, solvefun=H) == Eq(f(x, y), H(x - y)*exp(-x/2 - y/2))
def test_pde_1st_linear_constant_coeff_homogeneous():
f, F = map(Function, ['f', 'F'])
u = f(x, y)
eq = 2*u + u.diff(x) + u.diff(y)
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(x - y)*exp(-x - y))
assert checkpdesol(eq, sol)[0]
eq = 4 + (3*u.diff(x)/u) + (2*u.diff(y)/u)
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(2*x - 3*y)*exp(-S(12)*x/13 - S(8)*y/13))
assert checkpdesol(eq, sol)[0]
eq = u + (6*u.diff(x)) + (7*u.diff(y))
assert classify_pde(eq) == ('1st_linear_constant_coeff_homogeneous',)
sol = pdsolve(eq)
assert sol == Eq(u, F(7*x - 6*y)*exp(-6*x/S(85) - 7*y/S(85)))
assert checkpdesol(eq, sol)[0]
eq = a*u + b*u.diff(x) + c*u.diff(y)
sol = pdsolve(eq)
assert checkpdesol(eq, sol)[0]
def test_pde_1st_linear_constant_coeff():
f, F = map(Function, ['f', 'F'])
u = f(x,y)
eq = -2*u.diff(x) + 4*u.diff(y) + 5*u - exp(x + 3*y)
sol = pdsolve(eq)
assert sol == Eq(f(x,y),
(F(4*x + 2*y)*exp(x/2) + exp(x + 4*y)/15)*exp(-y))
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = (u.diff(x)/u) + (u.diff(y)/u) + 1 - (exp(x + y)/u)
sol = pdsolve(eq)
assert sol == Eq(f(x, y), F(x - y)*exp(-x/2 - y/2) + exp(x + y)/3)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = 2*u + -u.diff(x) + 3*u.diff(y) + sin(x)
sol = pdsolve(eq)
assert sol == Eq(f(x, y),
F(3*x + y)*exp(x/5 - 3*y/5) - 2*sin(x)/5 - cos(x)/5)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = u + u.diff(x) + u.diff(y) + x*y
sol = pdsolve(eq)
assert sol.expand() == Eq(f(x, y),
x + y + (x - y)**2/4 - (x + y)**2/4 + F(x - y)*exp(-x/2 - y/2) - 2).expand()
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
assert checkpdesol(eq, sol)[0]
eq = u + u.diff(x) + u.diff(y) + log(x)
assert classify_pde(eq) == ('1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral')
def test_pdsolve_all():
f, F = map(Function, ['f', 'F'])
u = f(x,y)
eq = u + u.diff(x) + u.diff(y) + x**2*y
sol = pdsolve(eq, hint = 'all')
keys = ['1st_linear_constant_coeff',
'1st_linear_constant_coeff_Integral', 'default', 'order']
assert sorted(sol.keys()) == keys
assert sol['order'] == 1
assert sol['default'] == '1st_linear_constant_coeff'
assert sol['1st_linear_constant_coeff'].expand() == Eq(f(x, y),
-x**2*y + x**2 + 2*x*y - 4*x - 2*y + F(x - y)*exp(-x/2 - y/2) + 6).expand()
def test_pdsolve_variable_coeff():
f, F = map(Function, ['f', 'F'])
u = f(x, y)
eq = x*(u.diff(x)) - y*(u.diff(y)) + y**2*u - y**2
sol = pdsolve(eq, hint="1st_linear_variable_coeff")
assert sol == Eq(u, F(x*y)*exp(y**2/2) + 1)
assert checkpdesol(eq, sol)[0]
eq = x**2*u + x*u.diff(x) + x*y*u.diff(y)
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(y*exp(-x))*exp(-x**2/2))
assert checkpdesol(eq, sol)[0]
eq = y*x**2*u + y*u.diff(x) + u.diff(y)
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(-2*x + y**2)*exp(-x**3/3))
assert checkpdesol(eq, sol)[0]
eq = exp(x)**2*(u.diff(x)) + y
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, y*exp(-2*x)/2 + F(y))
assert checkpdesol(eq, sol)[0]
eq = exp(2*x)*(u.diff(y)) + y*u - u
sol = pdsolve(eq, hint='1st_linear_variable_coeff')
assert sol == Eq(u, F(x)*exp(-y*(y - 2)*exp(-2*x)/2))
| 38.299578
| 84
| 0.537512
| 1,794
| 9,077
| 2.634894
| 0.074136
| 0.041464
| 0.041252
| 0.038502
| 0.660038
| 0.614132
| 0.545166
| 0.471123
| 0.453353
| 0.416966
| 0
| 0.030389
| 0.220557
| 9,077
| 236
| 85
| 38.461864
| 0.637739
| 0.036245
| 0
| 0.391304
| 0
| 0
| 0.093407
| 0.080357
| 0
| 0
| 0
| 0
| 0.282609
| 1
| 0.059783
| false
| 0
| 0.021739
| 0
| 0.081522
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91613dad90fa3ec0c081f265b28f59e30cdfc17e
| 6,376
|
py
|
Python
|
GCN/GCN.py
|
EasternJournalist/learn-deep-learning
|
cc424713ffc57b8a796ebd81354a1b887f9c5092
|
[
"MIT"
] | 6
|
2021-08-18T03:29:12.000Z
|
2022-03-22T13:15:35.000Z
|
GCN/GCN.py
|
EasternJournalist/learn-deep-learning
|
cc424713ffc57b8a796ebd81354a1b887f9c5092
|
[
"MIT"
] | null | null | null |
GCN/GCN.py
|
EasternJournalist/learn-deep-learning
|
cc424713ffc57b8a796ebd81354a1b887f9c5092
|
[
"MIT"
] | 2
|
2022-01-06T12:25:02.000Z
|
2022-03-22T13:15:36.000Z
|
import torch
import torch.nn.functional as F
import pandas as pd
import numpy as np
from torch_geometric.data import Data
from torch_geometric.nn import GCNConv, PairNorm
from torch_geometric.utils.undirected import to_undirected
import random
import matplotlib.pyplot as plt
data_name = 'citeseer' # 'cora' or 'citeseer'
data_edge_path = f'datasets/{data_name}/{data_name}.cites'
data_content_path = f'datasets/{data_name}/{data_name}.content'
raw_content = pd.read_table(data_content_path, header=None, dtype={0:np.str})
raw_edge = pd.read_table(data_edge_path, header=None, dtype=np.str)
paper_ids = raw_content[0]
paper_id_map = {}
for i, pp_id in enumerate(paper_ids):
paper_id_map[pp_id] = i
edge_index = torch.from_numpy(raw_edge.apply(lambda col: col.map(paper_id_map)).dropna().values).long().t().contiguous()
x = torch.from_numpy(raw_content.values[:, 1:-1].astype(np.float)).float()
labels = np.unique(raw_content[raw_content.keys()[-1]]).tolist()
y = torch.from_numpy(raw_content[raw_content.keys()[-1]].map(lambda x: labels.index(x)).values).long()
def get_mask(y:torch.tensor):
train_mask = torch.tensor([False] * y.shape[0])
for i in torch.unique(y).unbind():
temp = torch.arange(0, y.shape[0])[y == i].tolist()
random.shuffle(temp)
train_mask[temp[:30]] = True
train_mask = torch.tensor(train_mask)
test_mask = train_mask == False
return train_mask, test_mask
train_mask, test_mask = get_mask(y)
data = Data(x=x, edge_index=edge_index, y=y, train_mask=train_mask, test_mask=test_mask)
def drop_edge(edge_index, keep_ratio:float=1.):
num_keep = int(keep_ratio * edge_index.shape[1])
temp = [True] * num_keep + [False] * (edge_index.shape[1] - num_keep)
random.shuffle(temp)
return edge_index[:, temp]
class GCNNodeClassifier(torch.nn.Module):
def __init__(self,
dim_features,
num_classes,
num_layers,
add_self_loops:bool=True,
use_pairnorm:bool=False,
drop_edge:float=1.,
activation:str='relu',
undirected:bool=False
):
super(GCNNodeClassifier, self).__init__()
dim_hidden = 32
self.gconvs = torch.nn.ModuleList(
[GCNConv(in_channels=dim_features, out_channels=dim_hidden, add_self_loops=add_self_loops)]
+ [GCNConv(in_channels=dim_hidden, out_channels=dim_hidden, add_self_loops=add_self_loops) for i in range(num_layers - 2)]
)
self.final_conv = GCNConv(in_channels=dim_hidden, out_channels=num_classes, add_self_loops=add_self_loops)
self.use_pairnorm = use_pairnorm
if self.use_pairnorm:
self.pairnorm = PairNorm()
self.drop_edge = drop_edge
activations_map = {'relu':torch.relu, 'tanh':torch.tanh, 'sigmoid':torch.sigmoid, 'leaky_relu':torch.nn.LeakyReLU(0.1)}
self.activation_fn = activations_map[activation]
def forward(self, x, edge_index):
for l in self.gconvs:
edges = drop_edge(edge_index, self.drop_edge)
x = l(x, edges)
if self.use_pairnorm:
x = self.pairnorm(x)
x = self.activation_fn(x)
x = self.final_conv(x, edge_index)
return x
def eval_acc(y_pred, y):
return ((torch.argmax(y_pred, dim=-1) == y).float().sum() / y.shape[0]).item()
num_epochs = 100
test_cases = [
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# num layers
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# self loop
{'num_layers':2, 'add_self_loops':False, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# pair norm
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
{'num_layers':6, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':False},
# drop edge
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.6, 'activation':'relu', 'undirected':False},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.6, 'activation':'relu', 'undirected':False},
# activation fn
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'tanh', 'undirected':False},
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'leaky_relu', 'undirected':False},
# undirected
{'num_layers':2, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':True, 'drop_edge':1., 'activation':'relu', 'undirected':True},
{'num_layers':4, 'add_self_loops':True, 'use_pairnorm':False, 'drop_edge':0.8, 'activation':'relu', 'undirected':True},
]
for i_case, kwargs in enumerate(test_cases):
print(f'Test Case {i_case:>2}')
model = GCNNodeClassifier(x.shape[1], len(labels), **kwargs)
optimizer = torch.optim.Adam(model.parameters(), lr=1e-3)
history_test_acc = []
input_edge_index = to_undirected(edge_index) if kwargs['undirected'] else edge_index
for i_epoch in range(0, num_epochs):
print(f'Epoch {i_epoch:>3} ', end='')
y_pred = model(x, input_edge_index)
train_acc = eval_acc(y_pred[train_mask], y[train_mask])
# Train
loss = F.cross_entropy(y_pred[train_mask], y[train_mask])
optimizer.zero_grad()
loss.backward()
optimizer.step()
# Test
test_acc = eval_acc(y_pred[test_mask], y[test_mask])
history_test_acc.append(test_acc)
print(f'Train Acc = {train_acc}. Test Acc = {test_acc}')
kwargs['best_acc'] = max(history_test_acc)
plt.plot(list(range(num_epochs)), history_test_acc, label=f'case_{str(i_case).zfill(2)}')
plt.legend()
plt.savefig(f'{data_name}-HistoryAcc.jpg')
pd.DataFrame(test_cases).to_csv(f'{data_name}-Result.csv')
| 44.587413
| 134
| 0.674875
| 932
| 6,376
| 4.345494
| 0.187768
| 0.041481
| 0.062222
| 0.051358
| 0.377531
| 0.354815
| 0.318272
| 0.276543
| 0.276543
| 0.274074
| 0
| 0.01186
| 0.166876
| 6,376
| 143
| 135
| 44.587413
| 0.750565
| 0.015213
| 0
| 0.035714
| 0
| 0
| 0.201914
| 0.024402
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044643
| false
| 0
| 0.080357
| 0.008929
| 0.169643
| 0.026786
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9163007875867d67440a283e2e9737b0b98baef2
| 3,724
|
py
|
Python
|
esg_leipzig_homepage_2015/views.py
|
ESG-Leipzig/Homepage-2015
|
6b77451881031dcb640d2e61ce862617d634f9ac
|
[
"MIT"
] | null | null | null |
esg_leipzig_homepage_2015/views.py
|
ESG-Leipzig/Homepage-2015
|
6b77451881031dcb640d2e61ce862617d634f9ac
|
[
"MIT"
] | 4
|
2015-03-31T22:37:09.000Z
|
2015-10-22T21:37:17.000Z
|
esg_leipzig_homepage_2015/views.py
|
ESG-Leipzig/Homepage-2015
|
6b77451881031dcb640d2e61ce862617d634f9ac
|
[
"MIT"
] | 3
|
2015-02-03T10:23:24.000Z
|
2018-04-11T12:29:23.000Z
|
import datetime
import json
from django.conf import settings
from django.http import Http404
from django.utils import timezone
from django.views import generic
from .models import Event, FlatPage, News
class HomeView(generic.ListView):
"""
View for the first page called 'Home'.
"""
context_object_name = 'event_list'
model = Event
template_name = 'home.html'
def get_queryset(self):
"""
Returns a queryset of all future events that should appear on home.
Uses settings.EVENT_DELAY_IN_MINUTES to determine the range.
"""
time_to_hide = timezone.now() - datetime.timedelta(
minutes=settings.EVENT_DELAY_IN_MINUTES)
queryset = super().get_queryset().filter(begin__gte=time_to_hide)
result = []
for event in queryset:
time_to_show = timezone.now() + datetime.timedelta(
days=event.on_home_before_begin)
if event.on_home_before_begin > 0 and event.begin <= time_to_show:
result.append(event)
return result
def get_context_data(self, **context):
"""
Adds all news to the context.
"""
news_list = News.objects.all()
return super().get_context_data(news_list=news_list, **context)
class CalendarView(generic.ListView):
"""
View for a calendar with all events.
"""
model = Event
template_name = 'calendar.html'
def get_context_data(self, **context):
"""
Returns the template context. Adds event data as JSON for use in
Javascript calendar.
"""
context = super().get_context_data(**context)
event_list = []
for event in context['event_list']:
event_dict = {
'title': event.title,
'start': event.begin.isoformat(),
'description': event.content,
'className': event.css_class_name}
if event.duration:
event_dict['end'] = event.end.isoformat()
event_list.append(event_dict)
context['event_list_json'] = json.dumps(event_list)
return context
class FlatPageView(generic.DetailView):
"""
View for static pages.
"""
model = FlatPage
def get_object(self, queryset=None):
"""
Returns the flatpage instance. Raises Http404 if inexistent.
"""
queryset = queryset or self.get_queryset()
url = self.kwargs.get('url')
for flatpage in queryset.filter(slug=url.split('/')[-1]):
if flatpage.get_absolute_url().strip('/') == url:
obj = flatpage
break
else:
raise Http404
return obj
def get_template_names(self):
"""
Returns the template names for the view as list. The name
'flatpage_default.html' is always appended.
"""
template_names = []
if self.object.template_name:
template_names.append(self.object.template_name)
template_names.append('flatpage_default.html')
return template_names
def get_context_data(self, **context):
"""
Returns the template context. Adds breadcrumb to it if neccessary.
"""
context = super().get_context_data(**context)
parent = context['flatpage'].parent
if parent is None:
breadcrumb_list = []
else:
breadcrumb_list = [context['flatpage']]
while parent is not None:
breadcrumb_list.append(parent)
parent = parent.parent
breadcrumb_list.reverse()
context['breadcrumb_list'] = breadcrumb_list
return context
| 31.559322
| 78
| 0.603652
| 419
| 3,724
| 5.186158
| 0.295943
| 0.02485
| 0.038656
| 0.02347
| 0.178555
| 0.133456
| 0.090198
| 0.052462
| 0.052462
| 0.052462
| 0
| 0.004231
| 0.301826
| 3,724
| 117
| 79
| 31.82906
| 0.831538
| 0.153867
| 0
| 0.150685
| 0
| 0
| 0.049949
| 0.007136
| 0
| 0
| 0
| 0
| 0
| 1
| 0.082192
| false
| 0
| 0.09589
| 0
| 0.383562
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91632bfaf2e874f47f67ae904c5dae1d1c06cb7a
| 3,509
|
py
|
Python
|
train.py
|
ronniechong/tensorflow-trainer
|
79e58d224ce1e5ae687abee2bfd81deb49bd41dd
|
[
"MIT"
] | null | null | null |
train.py
|
ronniechong/tensorflow-trainer
|
79e58d224ce1e5ae687abee2bfd81deb49bd41dd
|
[
"MIT"
] | 6
|
2021-06-08T21:56:34.000Z
|
2022-03-12T00:39:34.000Z
|
train.py
|
ronniechong/tensorflow-trainer
|
79e58d224ce1e5ae687abee2bfd81deb49bd41dd
|
[
"MIT"
] | null | null | null |
from dotenv import load_dotenv
load_dotenv()
from flask import Flask, flash, request, redirect, url_for
from flask_ngrok import run_with_ngrok
from flask_cors import CORS
from werkzeug.utils import secure_filename
import tensorflow as tf
from tensorflow import keras
from tensorflow.keras.applications import vgg16
from tensorflow.keras import layers, models, Model, optimizers
from tensorflow.keras.preprocessing import image
import numpy as np
import os
import base64
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
app.secret_key = os.getenv('SECRETKEY')
CORS(app)
# run_with_ngrok(app)
# https://github.com/gstaff/flask-ngrok/issues/2
category_names = os.getenv('CATEGORIES').split(',')
nb_categories = len(category_names)
type = os.getenv('MODE')
if type == 'checkpoint':
# Load via checkpoints
img_height, img_width = 200,200
conv_base = vgg16.VGG16(weights='imagenet', include_top=False, pooling='max', input_shape = (img_width, img_height, 3))
layers = [
conv_base,
layers.Dense(nb_categories, activation='softmax')
]
model = models.Sequential(layers)
model.load_weights('./model/cp2-0010.ckpt')
else:
# Load saved model
model = models.load_model('./model/model_vgg16.h5')
def allowed_file(filename):
return '.' in filename and \
filename.rsplit('.', 1)[1].lower() in ALLOWED_EXTENSIONS
@app.route('/')
def home():
return 'Nothing to see here'
@app.route('/v2/predict', methods=['POST'])
def predictFileUpload():
if request.method == 'POST':
print(request)
if 'file' not in request.files:
return {
'Error': 'No file part'
}
file = request.files['file']
if file.filename == '':
return {
'Error': 'No selected file'
}
if file and allowed_file(file.filename):
filename = secure_filename(file.filename)
file.save(os.path.join('./uploads', filename))
img_width, img_height = 200, 200
img = image.load_img(os.path.join('./uploads', filename), target_size = (img_width, img_height))
img = image.img_to_array(img)
img = np.expand_dims(img, axis = 0)
class_prob=model.predict(img)
y_pred = np.argmax(class_prob, axis=1)
count = 0
for a in class_prob[0]:
# print(category_names[count] + ': ' + "{:.2f}".format(a))
count = count + 1
return {
'filename': filename,
'prediction': category_names[y_pred[0]]
}
return 'nothing to see here'
@app.route('/v1/predict', methods=['POST'])
def predictBase64():
if request.method == 'POST':
data = request.get_json()
if data is None:
return {
'Error': 'No image'
}
else:
img_data = data['image']
filename = data['name']
with open(os.path.join('./uploads', filename), "wb") as fh:
fh.write(base64.decodebytes(img_data.encode()))
# fh.close()
img_width, img_height = 200, 200
img = image.load_img(os.path.join('./uploads', filename), target_size = (img_width, img_height))
img = image.img_to_array(img)
img = np.expand_dims(img, axis = 0)
class_prob=model.predict(img)
y_pred = np.argmax(class_prob, axis=1)
count = 0;
for a in class_prob[0]:
# print(category_names[count] + ': ' + "{:.2f}".format(a))
count = count + 1
return {
'filename': filename,
'prediction': category_names[y_pred[0]]
}
return 'nothing to see here'
if __name__ == '__main__':
app.run(host='0.0.0.0')
| 29
| 121
| 0.654032
| 478
| 3,509
| 4.638075
| 0.32636
| 0.035183
| 0.024808
| 0.03834
| 0.331078
| 0.308525
| 0.308525
| 0.291385
| 0.291385
| 0.291385
| 0
| 0.022159
| 0.202622
| 3,509
| 121
| 122
| 29
| 0.770193
| 0.065261
| 0
| 0.319588
| 0
| 0
| 0.112741
| 0.013138
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041237
| false
| 0
| 0.134021
| 0.020619
| 0.268041
| 0.010309
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91633c0b686a90b166f71428baf166c3cd9fcb51
| 4,555
|
py
|
Python
|
src/models/train_model.py
|
sandorfoldi/chess_positions_recognition
|
b051f5ba066876d54c435d96cf7e339dfc369b3b
|
[
"FTL"
] | null | null | null |
src/models/train_model.py
|
sandorfoldi/chess_positions_recognition
|
b051f5ba066876d54c435d96cf7e339dfc369b3b
|
[
"FTL"
] | null | null | null |
src/models/train_model.py
|
sandorfoldi/chess_positions_recognition
|
b051f5ba066876d54c435d96cf7e339dfc369b3b
|
[
"FTL"
] | 1
|
2022-01-08T20:26:08.000Z
|
2022-01-08T20:26:08.000Z
|
import random
import matplotlib.pyplot as plt
import wandb
import hydra
import torch
import torch.utils.data as data_utils
from model import ChessPiecePredictor
from torch import nn, optim
from google.cloud import storage
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision.datasets import ImageFolder
@hydra.main(config_path="../conf", config_name="config")
def train(cfg):
print(f"Training started with parameters: {cfg}")
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
wandb.init()
torch.manual_seed(cfg.seed)
model = ChessPiecePredictor(
image_size=cfg.image_size,
patch_size=cfg.patch_size,
in_channels=cfg.in_channels,
embed_dim=cfg.embed_dim,
num_heads=cfg.num_heads,
)
wandb.watch(model)
t = transforms.Compose(
[
transforms.Resize((cfg.image_size, cfg.image_size)),
transforms.Grayscale(num_output_channels=cfg.in_channels),
transforms.ToTensor(),
]
)
train_data = ImageFolder(f"{cfg.data_path}/train", transform=t)
validation_data = ImageFolder(f"{cfg.data_path}/test", transform=t)
indices_train = random.sample(range(1, 60000), 5000)
indices_valid = random.sample(range(1, 30000), 1000)
train_data = data_utils.Subset(train_data, indices_train)
validation_data = data_utils.Subset(validation_data, indices_valid)
train_loader = DataLoader(train_data, batch_size=cfg.batch_size, shuffle=True)
validation_loader = DataLoader(validation_data, batch_size=cfg.batch_size, shuffle=True)
criterion = nn.CrossEntropyLoss()
optimizer = optim.Adam(model.parameters(), lr=cfg.lr)
print("Training started...")
train_losses = []
validation_losses = []
batch_count = len(train_loader)
epochs = 2
for e in range(epochs):
train_loss = 0
train_correct = 0
validation_loss = 0
validation_correct = 0
i = 0
for images, labels in train_loader:
# in case we use cuda to train on gpu
images = images.to(device)
labels = labels.to(device)
optimizer.zero_grad()
preds = model(images)
loss = criterion(preds, labels)
loss.backward()
optimizer.step()
train_loss += loss.item()
# accuracy
_, preds_indices = torch.max(preds, dim=1)
train_correct += (preds_indices == labels).sum()
i += 1
if i % 100 == 0:
print(
f"Epoch: {e+1} / {epochs}"
f" - progress: {i} / {batch_count}"
f" - loss: {loss.data.mean()}"
)
for images, labels in validation_loader:
images = images.to(device)
labels = labels.to(device)
preds = model(images)
loss = criterion(preds, labels)
validation_loss += loss.item()
# accuracy
_, preds_indices = torch.max(preds, dim=1)
validation_correct += (preds_indices == labels).sum()
train_accuracy = float(train_correct / (len(train_loader) * cfg.batch_size))
validation_accuracy = float(validation_correct / (len(validation_loader) * cfg.batch_size))
wandb.log({
"train_loss": train_loss,
"validation_loss": validation_loss,
"train_accuracy": train_accuracy,
"validation_accuracy": validation_accuracy,
})
train_losses.append(train_loss / len(train_loader))
validation_losses.append(validation_loss / len(validation_loader))
# plotting
plt.plot(list(range(1, len(train_losses) + 1)), train_losses, label="Training loss")
print("Train losses:", train_losses)
plt.plot(list(range(1, len(validation_losses) + 1)), validation_losses, label="Validation loss")
print("Validation losses:", validation_losses)
plt.xlabel("epoch")
plt.ylabel("loss")
plt.legend()
fig_path = "training_run.png"
plt.savefig(fig_path)
print(f"Saved training loss figure to {fig_path}")
model_path = "trained_model.pth"
torch.save(model.state_dict(), model_path)
print(f"Saved trained model to {model_path}")
storage_client = storage.Client()
bucket = storage_client.bucket("chess_predictor")
blob = bucket.blob("model_blob")
blob.upload_from_filename("outputs/model_0.pt")
if __name__ == "__main__":
train()
| 29.967105
| 100
| 0.636004
| 542
| 4,555
| 5.130996
| 0.273063
| 0.019417
| 0.01726
| 0.012226
| 0.187702
| 0.152463
| 0.118662
| 0.089896
| 0.035239
| 0.035239
| 0
| 0.011498
| 0.255324
| 4,555
| 151
| 101
| 30.165563
| 0.808373
| 0.013611
| 0
| 0.091743
| 0
| 0
| 0.108289
| 0.004679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.009174
| false
| 0
| 0.110092
| 0
| 0.119266
| 0.06422
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9164b76283b749a665c678ccd635362448fe685d
| 10,817
|
py
|
Python
|
dfn/tests/test_FractureNetworkThermal.py
|
richardhaslam/discrete-fracture-network
|
2a235fdd3aedfb80dbd9f441d07c5713a6d6c74f
|
[
"MIT"
] | 1
|
2021-06-01T17:38:15.000Z
|
2021-06-01T17:38:15.000Z
|
dfn/tests/test_FractureNetworkThermal.py
|
richardhaslam/discrete-fracture-network
|
2a235fdd3aedfb80dbd9f441d07c5713a6d6c74f
|
[
"MIT"
] | null | null | null |
dfn/tests/test_FractureNetworkThermal.py
|
richardhaslam/discrete-fracture-network
|
2a235fdd3aedfb80dbd9f441d07c5713a6d6c74f
|
[
"MIT"
] | null | null | null |
import copy
import unittest
import networkx as nx
import numpy as np
from scipy.special import erf
from dfn import Fluid, FractureNetworkThermal
class TestFractureNetworkThermal(unittest.TestCase):
def __init__(self, *args, **kwargs):
super(TestFractureNetworkThermal, self).__init__(*args, **kwargs)
# fluid properties
cp_w = 4300.0
rho_w = 1000.0
mu_w = 1E-3
self.fluid = Fluid(density=rho_w, viscosity=mu_w, heat_capacity=cp_w)
# reservoir properties
k_r = 2.9
cp_r = 1050.0
rho_r = 2700.0
alpha_r = k_r / (rho_r * cp_r)
# first network
conn_1 = [(0, 1), (1, 2), (1, 3), (2, 4), (3, 4), (4, 5)]
L_1 = [100, 500, 500, 500, 500, 100]
H_1 = [500, 500, 500, 500, 500, 500]
w_1 = [1E-3, 1E-3, 1E-3, 1E-3, 1E-3, 1E-3]
self.network_1 = FractureNetworkThermal(conn_1, L_1, H_1, w_1, k_r,
alpha_r)
# second network
conn_2 = [(0, 1), (1, 2), (2, 3), (1, 4), (2, 5), (3, 6), (4, 5),
(5, 6), (4, 7), (5, 8), (6, 9), (7, 8), (8, 9), (9, 10)]
L_2 = 250 * np.ones(len(conn_2))
L_2[0] = 100
L_2[-1] = 100
H_2 = 500 * np.ones(len(conn_2))
w_2 = 1E-3 * np.ones(len(conn_2))
self.network_2 = FractureNetworkThermal(conn_2, L_2, H_2, w_2, k_r,
alpha_r)
def copy_networks(self):
"""Return a copy of the fracture networks."""
return copy.copy(self.network_1), copy.copy(self.network_2)
def networks_with_flow(self):
"""Return networks with the mass flow calculated."""
network_1, network_2 = self.copy_networks()
P_0 = 0.0
m_inj = 50.0
network_1.calculate_flow(self.fluid, {0: P_0}, {5: -m_inj})
network_2.calculate_flow(self.fluid, {0: P_0}, {10: -m_inj})
return network_1, network_2
def reverse_nodes(self, network, segments):
"""Reverse the node order for given segments."""
conn = network.connectivity
for seg in segments:
inlet, outlet = conn[seg]
conn[seg, :] = outlet, inlet
network.connectivity = conn
return network
def test_no_mass_flow(self):
"""Test if TypeError is raised for networks without flow calculated."""
with self.assertRaises(TypeError):
self.network_1._check_if_calculated()
with self.assertRaises(TypeError):
self.network_2._check_if_calculated()
def test_neg_mass_flow(self):
"""Test if valueError is raised for networks with negative flow."""
network_1, network_2 = self.networks_with_flow()
network_1 = self.reverse_nodes(network_1, [1])
network_2 = self.reverse_nodes(network_2, [1])
network_1.calculate_flow(self.fluid, {0: 0}, {5: -1.0})
network_2.calculate_flow(self.fluid, {0: 0}, {10: -1.0})
with self.assertRaises(ValueError):
network_1.calculate_temperature(self.fluid, 0, [0], [1])
with self.assertRaises(ValueError):
network_2.calculate_temperature(self.fluid, 0, [0], [1])
def test_construct_graph(self):
"""Test _construct_graph method."""
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
network_2._construct_graph()
# construct graph for network 1
G_1 = nx.MultiDiGraph()
edge_data_1 = [(0, 1, {'index': 0}), (1, 2, {'index': 1}),
(1, 3, {'index': 2}), (2, 4, {'index': 3}),
(3, 4, {'index': 4}), (4, 5, {'index': 5})]
G_1.add_edges_from(edge_data_1)
# construct graph for network 2
G_2 = nx.MultiDiGraph()
edge_data_2 = [(0, 1, {'index': 0}), (1, 2, {'index': 1}),
(2, 3, {'index': 2}), (1, 4, {'index': 3}),
(2, 5, {'index': 4}), (3, 6, {'index': 5}),
(4, 5, {'index': 6}), (5, 6, {'index': 7}),
(4, 7, {'index': 8}), (5, 8, {'index': 9}),
(6, 9, {'index': 10}), (7, 8, {'index': 11}),
(8, 9, {'index': 12}), (9, 10, {'index': 13})]
G_2.add_edges_from(edge_data_2)
# return True if graphs are the same
is_isomorphic_1 = nx.is_isomorphic(network_1.graph, G_1)
is_isomorphic_2 = nx.is_isomorphic(network_2.graph, G_2)
self.assertTrue(is_isomorphic_1)
self.assertTrue(is_isomorphic_2)
def test_find_injection_nodes(self):
"""Test _find_injection_nodes method."""
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
network_2._construct_graph()
self.assertEqual(network_1._find_injection_nodes(), [0])
self.assertEqual(network_2._find_injection_nodes(), [0])
def test_mass_contribution(self):
"""Test _mass_contribution method."""
network_1, network_2 = self.networks_with_flow()
chi_1 = network_1._mass_contribution()
chi_2 = network_2._mass_contribution()
# first network
for i in (0, 1, 2, 5):
self.assertAlmostEqual(chi_1[i], 1.0, 12)
self.assertAlmostEqual(chi_1[3] + chi_1[4], 1.0, 12)
# second network
for i in (0, 1, 2, 3, 8, 13):
self.assertAlmostEqual(chi_2[i], 1.0, 12)
for i, j in [(4, 6), (5, 7), (9, 11), (10, 12)]:
self.assertAlmostEqual(chi_2[i] + chi_2[j], 1.0, 12)
def test_find_paths(self):
"""Test find_paths method."""
# .find_paths method calls .construct_graph if needed. Manually call
# .construct_graph() on one network for testing both True and False
# conditions
network_1, network_2 = self.networks_with_flow()
network_1._construct_graph()
path_1 = {(0, 1, 3), (0, 2, 4)}
path_2 = {(0, 1, 2, 5, 10), (0, 1, 4, 7, 10), (0, 3, 6, 7, 10),
(0, 3, 6, 9, 12), (0, 3, 8, 11, 12), (0, 1, 4, 9, 12)}
self.assertEqual(path_1, set(network_1.find_paths(0, 4)))
self.assertEqual(path_2, set(network_2.find_paths(0, 9)))
def test_calculate_temperature_inlet_segment(self):
"""Test calculate_temperature ability to handle the inlet segment."""
# operational parameters for temperature
t_end = 86400 * 365.25 * 20
time = t_end * np.linspace(1.0 / 100, 1.0, 100)
distance = np.linspace(0.0, 100.0, 100)
z, t = np.meshgrid(distance, time)
network_1, network_2 = self.networks_with_flow()
# create parameters for temperature manually
m_1 = network_1.mass_flow[0]
m_2 = network_2.mass_flow[0]
beta_1 = 2 * network_1.thermal_cond * network_1.thickness[0] / \
(m_1 * network_1.fluid.c_f)
beta_2 = 2 * network_2.thermal_cond * network_2.thickness[0] / \
(m_2 * network_2.fluid.c_f)
xi_1 = beta_1 * z / (2 * np.sqrt(network_1.thermal_diff * t))
xi_2 = beta_2 * z / (2 * np.sqrt(network_2.thermal_diff * t))
Theta_1 = erf(xi_1)
Theta_2 = erf(xi_2)
# difference between manual and automatic construction
diff_1 = Theta_1 - network_1.calculate_temperature(self.fluid, 0,
distance, time)
diff_2 = Theta_2 - network_2.calculate_temperature(self.fluid, 0,
distance, time)
self.assertAlmostEqual((diff_1**2).sum() / (Theta_1**2).sum(), 0, 12)
self.assertAlmostEqual((diff_2**2).sum() / (Theta_2**2).sum(), 0, 12)
def test_calculate_temperature(self):
"""Test calculate_temperature by constructing manual the equations."""
# operational parameters for temperature
t_end = 86400 * 365.25 * 20
time = t_end * np.linspace(1.0 / 100, 1.0, 100)
distance = np.linspace(0.0, 100.0, 100)
z, t = np.meshgrid(distance, time)
network_1, network_2 = self.networks_with_flow()
# create parameters for temperature manually
chi_1 = np.array([1.0, 1.0, 1.0, 0.5, 0.5, 1.0])
chi_2 = np.ones(network_2.n_segments)
chi_2[4:8] = 0.5
chi_2[9:13] = 0.5
m_1 = network_1.mass_flow
m_2 = network_2.mass_flow
beta_1 = 2 * network_1.thermal_cond * network_1.thickness / \
(m_1 * network_1.fluid.c_f)
beta_2 = 2 * network_2.thermal_cond * network_2.thickness / \
(m_2 * network_2.fluid.c_f)
xi_1 = np.einsum('i,jk->ijk', beta_1 * network_1.length,
1 / (2 * np.sqrt(network_1.thermal_diff * t)))
xi_2 = np.einsum('i,jk->ijk', beta_2 * network_2.length,
1 / (2 * np.sqrt(network_2.thermal_diff * t)))
a = xi_1[[0, 2, 4], :, :].sum(axis=0)
b = xi_1[[0, 1, 3], :, :].sum(axis=0)
xi_seg = beta_1[-1] * z / (2 * np.sqrt(network_1.thermal_diff * t))
Theta_1 = chi_1[0] * chi_1[2] * chi_1[4] * erf(a + xi_seg) + \
chi_1[0] * chi_1[1] * chi_1[3] * erf(b + xi_seg)
a = xi_2[[0, 1, 2, 5, 10], :, :].sum(axis=0)
b = xi_2[[0, 1, 4, 7, 10], :, :].sum(axis=0)
c = xi_2[[0, 3, 6, 7, 10], :, :].sum(axis=0)
d = xi_2[[0, 3, 6, 9, 12], :, :].sum(axis=0)
e = xi_2[[0, 3, 8, 11, 12], :, :].sum(axis=0)
f = xi_2[[0, 1, 4, 9, 12], :, :].sum(axis=0)
C_1 = chi_2[0] * chi_2[1] * chi_2[2] * chi_2[5] * chi_2[10]
C_2 = chi_2[0] * chi_2[1] * chi_2[4] * chi_2[7] * chi_2[10]
C_3 = chi_2[0] * chi_2[3] * chi_2[6] * chi_2[7] * chi_2[10]
C_4 = chi_2[0] * chi_2[3] * chi_2[6] * chi_2[9] * chi_2[12]
C_5 = chi_2[0] * chi_2[3] * chi_2[8] * chi_2[11] * chi_2[12]
C_6 = chi_2[0] * chi_2[1] * chi_2[4] * chi_2[9] * chi_2[12]
xi_seg = beta_2[-1] * z / (2 * np.sqrt(network_2.thermal_diff * t))
Theta_2 = C_1 * erf(a + xi_seg) + C_2 * erf(b + xi_seg) + \
C_3 * erf(c + xi_seg) + C_4 * erf(d + xi_seg) + \
C_5 * erf(e + xi_seg) + C_6 * erf(f + xi_seg)
# difference between manual and automatic construction
diff_1 = Theta_1 - network_1.calculate_temperature(self.fluid, 5,
distance, time)
diff_2 = Theta_2 - network_2.calculate_temperature(self.fluid, 13,
distance, time)
self.assertAlmostEqual((diff_1**2).sum() / (Theta_1**2).sum(), 0, 12)
self.assertAlmostEqual((diff_2**2).sum() / (Theta_2**2).sum(), 0, 12)
if __name__ == '__main__':
unittest.main()
| 38.222615
| 79
| 0.547194
| 1,627
| 10,817
| 3.38783
| 0.119238
| 0.056604
| 0.016328
| 0.026125
| 0.49746
| 0.421081
| 0.393324
| 0.328374
| 0.312591
| 0.297714
| 0
| 0.101461
| 0.303874
| 10,817
| 282
| 80
| 38.358156
| 0.630544
| 0.102894
| 0
| 0.209945
| 0
| 0
| 0.013083
| 0
| 0
| 0
| 0
| 0
| 0.099448
| 1
| 0.066298
| false
| 0
| 0.033149
| 0
| 0.121547
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91674cee92c414668d806e044c2d5ffc326ce9fc
| 10,775
|
py
|
Python
|
dataapi/AWS/getawsdata.py
|
gusamarante/Quantequim
|
3968d9965e8e2c3b5850f1852b56c485859a9c89
|
[
"MIT"
] | 296
|
2018-10-19T21:00:53.000Z
|
2022-03-29T21:50:55.000Z
|
dataapi/AWS/getawsdata.py
|
gusamarante/Quantequim
|
3968d9965e8e2c3b5850f1852b56c485859a9c89
|
[
"MIT"
] | 11
|
2019-06-18T11:43:35.000Z
|
2021-11-14T21:39:20.000Z
|
dataapi/AWS/getawsdata.py
|
gusamarante/FinanceLab
|
3968d9965e8e2c3b5850f1852b56c485859a9c89
|
[
"MIT"
] | 102
|
2018-10-18T14:14:34.000Z
|
2022-03-06T00:34:53.000Z
|
"""
Author: Gustavo Amarante
"""
import numpy as np
import pandas as pd
from datetime import datetime
class TrackerFeeder(object):
"""
Feeder for the trackers of the FinanceHub database.
"""
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, fh_ticker):
"""
grabs trackers from the FH database
:param fh_ticker: str or list with the tickers from the database trackers
:return: pandas DataFrame with tickers on the columns
"""
assert type(fh_ticker) is str or type(fh_ticker) is list or type(fh_ticker) is dict, \
"'tickers' must be a string, list or dict"
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers" WHERE '
if type(fh_ticker) is str:
sql_query = sql_query + "fh_ticker IN ('" + fh_ticker + "')"
elif type(fh_ticker) is list:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(fh_ticker) + "')"
elif type(fh_ticker) is dict:
sql_query = sql_query + "fh_ticker IN ('" + "', '".join(list(fh_ticker.keys())) + "')"
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.pivot(index='time_stamp', columns='fh_ticker', values='value')
if type(fh_ticker) is dict:
df = df.rename(fh_ticker, axis=1)
df.index = pd.to_datetime(df.index)
df = df.dropna(how='all')
df = df.sort_index()
return df
def fetch_metadata(self):
"""
Returns the full metadata table of the FH trackers, which is useful to do custom filters and look at what
is in the database.
:return: pandas Dataframe
"""
sql_query = 'SELECT * FROM "trackers_description"'
df = pd.read_sql(sql=sql_query, con=self.conn)
return df
def filter_fetch(self, filter_dict, ret='series'):
"""
Grabs the trackers from the FH database that satisfy the criteria given by 'filter_dict'.
:param filter_dict: dict. Keys must be column names from the metadata table. Values must be
either str or list of str
:param ret: If 'series', returns the a dataframe with the tracker series that staistfy the conditions.
If 'tickers', returns a list of the tickers that staistfy the conditions.
:return: list or pandas DataFrame
"""
assert type(filter_dict) is dict, "'filter_dict' must be a dict"
assert len(filter_dict) > 0, "'filter_dict' is empty"
assert ret.lower() in ['series', 'tickers'], "'ret' must be either 'series' or 'ticker'"
desc_query = 'SELECT fh_ticker FROM trackers_description WHERE '
for col in filter_dict.keys():
if type(filter_dict[col]) is list:
desc_query = desc_query + col + " IN ('" + "', '".join(filter_dict[col]) + "')"
else:
desc_query = desc_query + col + f" IN ('{filter_dict[col]}')"
desc_query = desc_query + ' and '
desc_query = desc_query[:-5]
df = pd.read_sql(sql=desc_query, con=self.conn)
tickers = df.values.flatten().tolist()
if ret == 'tickers':
return tickers
df = self.fetch(tickers)
return df
def filter_parameters(self):
"""
Grabs the possible columns and their respective unique values from the metadata table.
:return: dict. Keys are the column names, values are list of unique values of the column.
"""
df = self.fetch_metadata()
param_dict = {}
for col in df.columns:
param_dict[col] = df[col].unique().tolist()
return param_dict
def fetch_everything(self):
sql_query = 'SELECT time_stamp, fh_ticker, value FROM "trackers"'
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.pivot(index='time_stamp', columns='fh_ticker', values='value')
df.index = pd.to_datetime(df.index)
df = df.dropna(how='all')
df = df.sort_index()
return df
class FocusFeeder(object):
def __init__(self, db_connect):
"""
Feeder construction
:param db_connect: sql connection engine from sqlalchemy
"""
self.conn = db_connect.connection
def fetch(self, index='ipca', frequency='yearly', prediction_scope=None,
dt_ini=None, dt_end=None):
"""
Grabs data from the data base and pivots the results into a dataframe. To assure consistency The function can
only take one index at a time and one frequency at a time. Only'prediction_scope' can be a list.
If no prediction scope is passed, all available prediction scopes are returned.
:param index: String containing the name of the index.
:param frequency: String. 'yearly', 'monthly' or 'quarterly' (availability depends on the index)
:param prediction_scope: string, float or list. Years that the forecasts are for.
:param dt_ini: string. Initial date for the series
:param dt_end: string. End date for the series
:return: pandas DataFrame with the pivoted data.
"""
# Error Checking
self._basic_assertions(index, frequency, prediction_scope)
# Handle formats
index, frequency, prediction_scope, dt_ini, dt_end, pivot \
= self._map_inputs(index, frequency, prediction_scope, dt_ini, dt_end)
# build sql query
sql_query = self._build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end)
# get data
df = pd.read_sql(sql=sql_query, con=self.conn)
df = df.drop_duplicates()
# pivoting
df = df.pivot(index='date', columns=pivot, values='value')
df.index = pd.to_datetime(df.index)
return df
def years_ahead(self, index='IPCA', years=1, dt_ini=None, dt_end=None):
"""
The metric atribute is set to 'mean' by default because further projections change smoothly
"""
# Error checking
self._basic_assertions_years_ahead(index, years)
# Handle formats
index, dt_ini, dt_end = self._map_inputs_years_ahead(index, dt_ini, dt_end)
# grabs the index for all available years for each date
df = self.fetch(index=index, frequency='yearly', prediction_scope=None,
dt_ini=dt_ini, dt_end=dt_end)
# creates the new dataframe
df_weighted = pd.DataFrame(index=df.index)
df_weighted[index + ' ' + str(years) + ' year ahead'] = np.nan
# days until year end
df_weighted['D2YE'] = ((df_weighted.index + pd.offsets.YearEnd()) -
pd.to_datetime(df_weighted.index.tolist())).days
for ind in df_weighted.index:
if ind.day == 31 and ind.month == 12:
df_weighted.loc[ind, 'D2YE'] = 0
# loops on each date
for date in df_weighted.index:
df_weighted.loc[date, index + ' ' + str(years) + ' year ahead'] = \
(df.loc[date, str(date.year + years - 1)] * df_weighted.loc[date, 'D2YE'] +
df.loc[date, str(date.year + years)] * (365 - df_weighted.loc[date, 'D2YE'])) / 365
df = df_weighted[[index + ' ' + str(years) + ' year ahead']].interpolate()
df.index = pd.to_datetime(df.index)
return df
@staticmethod
def _basic_assertions(index, frequency, prediction_scope):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert type(frequency) is str, 'frequency must be a string'
@staticmethod
def _map_inputs(index, frequency, prediction_scope, dt_ini, dt_end):
"""Handle formats of the inputs"""
# index
if type(index) is str:
index = index.lower()
elif type(index) is list:
index = [x.lower() for x in index]
# frequency
frequency = frequency.lower()
# prediction_scope
if type(prediction_scope) is str:
prediction_scope = prediction_scope.lower()
elif type(prediction_scope) is list:
prediction_scope = [str(x).lower() for x in prediction_scope]
elif prediction_scope is None:
prediction_scope = None
else:
prediction_scope = str(prediction_scope).lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
# pivot variable (while we have no metrics, its always the prediction scope)
pivot = 'prediction_scope'
return index, frequency, prediction_scope, dt_ini, dt_end, pivot
@staticmethod
def _build_sql_query(index, frequency, prediction_scope, dt_ini, dt_end):
sql_query = 'SELECT DATE, VALUE, PREDICTION_SCOPE FROM "focus_survey" WHERE '
# index (must not be None)
if type(index) is str:
sql_query = sql_query + "lower(INDEX) IN ('" + index + "')"
elif type(index) is list:
sql_query = sql_query + "lower(INDEX) IN ('" + "', '".join(index) + "')"
# frequency
if type(frequency) is str:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + frequency + "')"
elif type(frequency) is list:
sql_query = sql_query + " AND lower(FREQUENCY) IN ('" + "', '".join(frequency) + "')"
# prediction scope
if type(prediction_scope) is str:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + prediction_scope + "')"
elif type(prediction_scope) is list:
sql_query = sql_query + " AND lower(PREDICTION_SCOPE) IN ('" + "', '".join(prediction_scope) + "')"
sql_query = sql_query + " AND DATE BETWEEN '" + dt_ini + "' AND '" + dt_end + "'"
sql_query = sql_query + ' ORDER BY DATE;'
return sql_query
@staticmethod
def _basic_assertions_years_ahead(index, years):
"""Check basic assertions"""
assert type(index) is str, 'index must be a string'
assert (type(years) is int) and (years <= 4), 'number of years must be an intger between 1 and 4'
@staticmethod
def _map_inputs_years_ahead(index, dt_ini, dt_end):
"""Handles the format of the inputs of the years_ahead method"""
index = index.lower()
# dates
if dt_ini is None:
dt_ini = '1900-01-01'
if dt_end is None:
dt_end = datetime.now().strftime('%Y-%m-%d')
return index, dt_ini, dt_end
| 35.212418
| 117
| 0.604826
| 1,408
| 10,775
| 4.464489
| 0.169744
| 0.044543
| 0.020999
| 0.030544
| 0.43923
| 0.36987
| 0.328349
| 0.276487
| 0.240057
| 0.189628
| 0
| 0.005101
| 0.290487
| 10,775
| 305
| 118
| 35.327869
| 0.817135
| 0.227749
| 0
| 0.344595
| 0
| 0
| 0.132171
| 0.011372
| 0
| 0
| 0
| 0
| 0.081081
| 1
| 0.094595
| false
| 0
| 0.02027
| 0
| 0.202703
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9167fe0a7f3eeef9305940bbccf9dcc614aaf736
| 569
|
py
|
Python
|
assets/utils/config.py
|
mklew/quickstart-data-lake-qubole
|
bb9b4a559815fc293b0fa06aa7e536fe14ced6dd
|
[
"Apache-2.0"
] | null | null | null |
assets/utils/config.py
|
mklew/quickstart-data-lake-qubole
|
bb9b4a559815fc293b0fa06aa7e536fe14ced6dd
|
[
"Apache-2.0"
] | null | null | null |
assets/utils/config.py
|
mklew/quickstart-data-lake-qubole
|
bb9b4a559815fc293b0fa06aa7e536fe14ced6dd
|
[
"Apache-2.0"
] | null | null | null |
from configparser import ConfigParser
CONFIG_INT_KEYS = {
'hadoop_max_nodes_count',
'hadoop_ebs_volumes_count',
'hadoop_ebs_volume_size',
'spark_max_nodes_count',
'spark_ebs_volumes_count',
'spark_ebs_volume_size'
}
def read_config(config_path):
parser = ConfigParser()
parser.read(config_path)
config = {}
for section in parser.sections():
for (config_key, config_value) in parser.items(section):
config[config_key] = int(config_value) if config_key in CONFIG_INT_KEYS else config_value
return config
| 27.095238
| 101
| 0.72232
| 75
| 569
| 5.066667
| 0.386667
| 0.071053
| 0.068421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.193322
| 569
| 20
| 102
| 28.45
| 0.827887
| 0
| 0
| 0
| 0
| 0
| 0.233743
| 0.233743
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.058824
| 0
| 0.176471
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
916aaa2f9132fad05b66933ca386d50c7aed073b
| 6,083
|
py
|
Python
|
project/starter_code/student_utils.py
|
nihaagarwalla/nd320-c1-emr-data-starter
|
6ce6bb65e89b38f1c2119a739b892ad2504adf7d
|
[
"MIT"
] | null | null | null |
project/starter_code/student_utils.py
|
nihaagarwalla/nd320-c1-emr-data-starter
|
6ce6bb65e89b38f1c2119a739b892ad2504adf7d
|
[
"MIT"
] | null | null | null |
project/starter_code/student_utils.py
|
nihaagarwalla/nd320-c1-emr-data-starter
|
6ce6bb65e89b38f1c2119a739b892ad2504adf7d
|
[
"MIT"
] | null | null | null |
import pandas as pd
import numpy as np
import os
import tensorflow as tf
import functools
####### STUDENTS FILL THIS OUT ######
#Question 3
def reduce_dimension_ndc(df, ndc_df):
'''
df: pandas dataframe, input dataset
ndc_df: pandas dataframe, drug code dataset used for mapping in generic names
return:
df: pandas dataframe, output dataframe with joined generic drug name
'''
ndc_df["Non-proprietary Name"]= ndc_df["Non-proprietary Name"].str.replace("Hcl", "Hydrochloride")
ndc_df["Non-proprietary Name"]= ndc_df["Non-proprietary Name"].str.replace(" And ", "-")
ndc_df["Non-proprietary Name"]= (ndc_df["Non-proprietary Name"].str.strip()).str.upper()
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Film Coated", "TABLET")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Coated", "TABLET")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Film Coated, Extended Release", "Tablet Extended Release")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Tablet, Extended Release", "Tablet Extended Release")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("For Suspension, Extended Release", "For Suspension Extended Release")
# ndc_df["Dosage Form"]= ndc_df["Dosage Form"].str.replace("Powder, Metered", "Powder Metered")
# ndc_df["Dosage Form"]= (ndc_df["Dosage Form"].str.strip()).str.upper()
# ndc_df["generic_drug_name"]= ndc_df["Non-proprietary Name"]+"_"+ndc_df["Dosage Form"]
ndc_df["generic_drug_name"]= ndc_df["Non-proprietary Name"]
df_reduce_dimension = pd.merge(df, ndc_df, on=['ndc_code'], how='inner')
df_reduce_dimension['LABEL'] = 0
reduce_dim_df= df_reduce_dimension.drop(columns=['Proprietary Name', 'Non-proprietary Name', 'Dosage Form', 'Route Name', 'Company Name', 'Product Type'])
return reduce_dim_df
#Question 4
def select_first_encounter(df):
'''
df: pandas dataframe, dataframe with all encounters
return:
- first_encounter_df: pandas dataframe, dataframe with only the first encounter for a given patient
'''
first_encounter_df = df.sort_values('encounter_id').groupby('patient_nbr').first()
first_encounter_df = first_encounter_df.reset_index()
return first_encounter_df
#Question 6
def patient_dataset_splitter(df, key='patient_nbr'):
'''
df: pandas dataframe, input dataset that will be split
patient_key: string, column that is the patient id
return:
- train: pandas dataframe,
- validation: pandas dataframe,
- test: pandas dataframe,
'''
df = df.iloc[np.random.permutation(len(df))]
unique_values = df[key].unique()
total_values = len(unique_values)
train_size = round(total_values * (1 - 0.4 ))
train = df[df[key].isin(unique_values[:train_size])].reset_index(drop=True)
left_size = len(unique_values[train_size:])
validation_size = round(left_size*0.5)
validation = df[df[key].isin(unique_values[train_size:train_size+validation_size])].reset_index(drop=True)
test = df[df[key].isin(unique_values[validation_size+train_size:])].reset_index(drop=True)
return train, validation, test
#Question 7
def create_tf_categorical_feature_cols(categorical_col_list,
vocab_dir='./diabetes_vocab/'):
'''
categorical_col_list: list, categorical field list that will be transformed with TF feature column
vocab_dir: string, the path where the vocabulary text files are located
return:
output_tf_list: list of TF feature columns
'''
output_tf_list = []
for c in categorical_col_list:
vocab_file_path = os.path.join(vocab_dir, c + "_vocab.txt")
'''
Which TF function allows you to read from a text file and create a categorical feature
You can use a pattern like this below...
tf_categorical_feature_column = tf.feature_column.......
'''
tf_categorical_feature_column = tf.feature_column.categorical_column_with_vocabulary_file(
key=c, vocabulary_file = vocab_file_path, num_oov_buckets=1)
one_hot_origin_feature = tf.feature_column.indicator_column(tf_categorical_feature_column)
output_tf_list.append(one_hot_origin_feature)
return output_tf_list
#Question 8
def normalize_numeric_with_zscore(col, mean, std):
'''
This function can be used in conjunction with the tf feature column for normalization
'''
return (col - mean)/std
def create_tf_numeric_feature(col, MEAN, STD, default_value=0):
'''
col: string, input numerical column name
MEAN: the mean for the column in the training data
STD: the standard deviation for the column in the training data
default_value: the value that will be used for imputing the field
return:
tf_numeric_feature: tf feature column representation of the input field
'''
normalizer = functools.partial(normalize_numeric_with_zscore, mean=MEAN, std=STD)
tf_numeric_feature= tf.feature_column.numeric_column(
key=col, default_value = default_value, normalizer_fn=normalizer, dtype=tf.float64)
return tf_numeric_feature
#Question 9
def get_mean_std_from_preds(diabetes_yhat):
'''
diabetes_yhat: TF Probability prediction object
'''
m = diabetes_yhat.mean()
s = diabetes_yhat.stddev()
return m, s
# Question 10
def get_student_binary_prediction(df, col):
'''
df: pandas dataframe prediction output dataframe
col: str, probability mean prediction field
return:
student_binary_prediction: pandas dataframe converting input to flattened numpy array and binary labels
def convert_to_binary(df, pred_field, actual_field):
df['score'] = df[pred_field].apply(lambda x: 1 if x>=25 else 0 )
df['label_value'] = df[actual_field].apply(lambda x: 1 if x>=25 else 0)
return df
binary_df = convert_to_binary(model_output_df, 'pred', 'actual_value')
binary_df.head()
'''
return student_binary_prediction
| 40.553333
| 158
| 0.706395
| 848
| 6,083
| 4.840802
| 0.245283
| 0.035323
| 0.040195
| 0.054811
| 0.325213
| 0.27162
| 0.231181
| 0.197077
| 0.181486
| 0.173447
| 0
| 0.005239
| 0.18412
| 6,083
| 149
| 159
| 40.825503
| 0.821882
| 0.444518
| 0
| 0
| 0
| 0
| 0.115149
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.096154
| 0
| 0.403846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
916ad498f5f7937a47cd76bb93a7df7cec38d72f
| 5,354
|
py
|
Python
|
core_tools/utility/plotting/plot_1D.py
|
peendebak/core_tools
|
2e43edf0bbc1d7ceb7042559db499535e8f6a076
|
[
"BSD-2-Clause"
] | null | null | null |
core_tools/utility/plotting/plot_1D.py
|
peendebak/core_tools
|
2e43edf0bbc1d7ceb7042559db499535e8f6a076
|
[
"BSD-2-Clause"
] | null | null | null |
core_tools/utility/plotting/plot_1D.py
|
peendebak/core_tools
|
2e43edf0bbc1d7ceb7042559db499535e8f6a076
|
[
"BSD-2-Clause"
] | null | null | null |
import matplotlib.pyplot as plt
import matplotlib as mpl
import numpy as np
import copy
from core_tools.utility.plotting.plot_settings import plot_layout, graph_settings_1D, _1D_raw_plot_data
from core_tools.utility.plotting.plot_general import _data_plotter
class plotter_1D(_data_plotter):
def __init__(self, plt_layout = plot_layout(), graph_setings = graph_settings_1D()):
self.plot_layout = plt_layout
self.local_data = np.empty([plt_layout.n_plots_y, plt_layout.n_plots_x], dtype = _1D_plot_single)
for i in range(self.local_data.size):
self.local_data.flat[i] = _1D_plot_single(graph_setings)
class _1D_plot_single:
def __init__(self, graph_settings):
self.settings = copy.copy(graph_settings) #default settings
self.data = []
self.x_lim = None
self.y_lim = None
def set_labels(self, xlabel, ylabel):
self.settings.xlabel = xlabel
self.settings.ylabel = ylabel
def set_range(self, x_range=None, y_range=None):
if x_range is not None:
self.x_lim = x_range
if y_range is not None:
self.y_lim = y_range
def add_data(self, x, y, xerr = None, yerr = None, label = None, settings = None, w=None, c=None, alpha=None):
if settings == None:
settings = copy.copy(self.settings)
else:
settings = copy.copy(settings)
if label is not None:
settings.label = label
if w is not None:
if 'l' not in w:
settings.linestyle = ''
if 'p' in w:
settings.marker = 'o'
if c is not None:
settings.color = c
if alpha is not None:
settings.alpha = alpha
self.data += [_1D_raw_plot_data(x,y, xerr, yerr, settings)]
def _render(self, ax, layout_settings, index, scaler = 1, figure=None):
ax.locator_params(axis='x', nbins=layout_settings.xbins)
ax.locator_params(axis='y', nbins=layout_settings.ybins)
ax.xaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax.yaxis.set_minor_locator(mpl.ticker.AutoMinorLocator())
ax.tick_params(direction='in', which='both', top=True, right=True)
if self.settings.xlog == True:
ax.set_xscale('log')
if self.settings.ylog == True:
ax.set_yscale('log')
if self.x_lim is not None:
ax.set_xlim(*self.x_lim)
if self.y_lim is not None:
ax.set_ylim(*self.y_lim)
labels = False
for i in range(len(self.data)):
data = self.data[i]
if data.x_error == None and data.y_error == None:
ax.plot(data.x_data, data.y_data, **data.settings.plot_settings_to_dict(i, scaler))
else:
pass
# ax.errorbar(a, c, yerr = b/10,ecolor='g',linewidth=1.2,elinewidth=0.7)
if data.settings.label is not None:
labels = True
if self.settings.xlabel is not None:
if layout_settings.share_x == False:
ax.set_xlabel(self.settings.xlabel)
elif index[0] == layout_settings.n_plots_x-1 :
ax.set_xlabel(self.settings.xlabel)
if self.settings.ylabel is not None:
if layout_settings.share_y == False:
ax.set_ylabel(self.settings.ylabel)
elif index[1] == 0 :
ax.set_ylabel(self.settings.ylabel)
if labels == True:
ax.legend()
# TODO add log scale support !!!
if __name__ == '__main__':
from colors import MATERIAL_COLOR, Red
# global settings
g = graph_settings_1D()
g.color = Red[::-1]
g.linewidth = 1
a = plotter_1D(graph_setings=g)
a[0].set_labels('x_label', 'y_label')
a[0].add_data(np.linspace(0,50,200), np.sin(np.linspace(10,50,200)), w = 'p', alpha = 1, c=Red[5])
a[0].add_data(np.linspace(0,50,200), np.sin(np.linspace(10,50,200)), w = 'l', alpha = 0.3, c=Red[5])
# a.plot()
a.save('test1D_single.svg')
a = plotter_1D(plot_layout(n_plots_x = 1,n_plots_y = 2))
a[0].set_labels('x_label', 'y_label')
a[0].add_data(np.linspace(10,50,50), np.random.random([50]))
a[0,1].set_labels('x_label', 'y_label')
a[0,1].add_data(np.linspace(10,50,50), np.random.random([50]))
a.save('test1D_12.svg')
# a.plot()
a = plotter_1D(plot_layout(n_plots_x = 2,n_plots_y = 2, share_x=True, share_y=True))
a[0].set_labels('x_label', 'y_label')
a[0].add_data(np.linspace(10,50,50), np.random.random([50]), label='test 1')
a[0,1].set_labels('x_label', 'y_label')
a[0,1].add_data(np.linspace(10,50,50), np.random.random([50]), label='test 2')
a[0,1].add_data(np.linspace(10,50,50), np.random.random([50]))
a[1,0].set_labels('x_label', 'y_label')
a[1,0].add_data(np.linspace(10,50,50), np.random.random([50]))
a[1,1].set_labels('x_label', 'y_label')
a[1,1].add_data(np.linspace(10,50,50), np.sin(np.linspace(10,50,50)))
a.save('test1D_22.svg')
# a.plot()
a = plotter_1D(plot_layout((300, 70), n_plots_x = 6,n_plots_y = 1, share_x=False, share_y=True))
a[0].set_labels('time (ns)', 'Spin up probably (%)')
a[0].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[1].set_labels('time (ns)', 'Spin up probably (%)')
a[1].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[2].set_labels('time (ns)', 'Spin up probably (%)')
a[2].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[3].set_labels('time (ns)', 'Spin up probably (%)')
a[3].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[4].set_labels('time (ns)', 'Spin up probably (%)')
a[4].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
a[5].set_labels('time (ns)', 'Spin up probably (%)')
a[5].add_data(np.linspace(0,500,50), np.sin(np.linspace(10,50,50)))
print(a)
a.save('test1D_61.svg')
a.plot()
| 31.309942
| 111
| 0.686216
| 972
| 5,354
| 3.587449
| 0.156379
| 0.068827
| 0.055062
| 0.064239
| 0.440206
| 0.429596
| 0.368225
| 0.320333
| 0.229137
| 0.215945
| 0
| 0.054048
| 0.139522
| 5,354
| 171
| 112
| 31.309942
| 0.702844
| 0.030071
| 0
| 0.107438
| 0
| 0
| 0.070754
| 0
| 0
| 0
| 0
| 0.005848
| 0
| 1
| 0.049587
| false
| 0.008264
| 0.057851
| 0
| 0.123967
| 0.008264
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
916bb212bcbe679ba4c75cb54521ee006fb78140
| 5,110
|
py
|
Python
|
v0.3/achat.py
|
Forec/lan-ichat
|
f2ae85ef6a8f2b30126be787e52785971c926d8c
|
[
"0BSD"
] | 63
|
2016-10-25T06:05:29.000Z
|
2021-06-11T01:13:30.000Z
|
v0.3/achat.py
|
yyfhust/lan-ichat
|
f2ae85ef6a8f2b30126be787e52785971c926d8c
|
[
"0BSD"
] | 1
|
2018-10-16T10:06:19.000Z
|
2018-10-16T10:06:19.000Z
|
v0.3/achat.py
|
yyfhust/lan-ichat
|
f2ae85ef6a8f2b30126be787e52785971c926d8c
|
[
"0BSD"
] | 55
|
2016-10-25T06:05:33.000Z
|
2021-12-10T04:58:57.000Z
|
# last edit date: 2016/11/2
# author: Forec
# LICENSE
# Copyright (c) 2015-2017, Forec <forec@bupt.edu.cn>
# Permission to use, copy, modify, and/or distribute this code for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
from socket import *
import threading
import pyaudio
import wave
import sys
import zlib
import struct
import pickle
import time
import numpy as np
CHUNK = 1024
FORMAT = pyaudio.paInt16
CHANNELS = 2
RATE = 44100
RECORD_SECONDS = 0.5
class Audio_Server(threading.Thread):
def __init__(self, remoteIP, remotePort, remoteVersion) :
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = (remoteIP, remotePort)
if remoteVersion == 4:
self.sock = socket(AF_INET ,SOCK_STREAM)
else:
self.sock = socket(AF_INET6 ,SOCK_STREAM)
self.p = pyaudio.PyAudio()
self.stream = None
def __del__(self):
if self.sock is not None:
self.sock.close()
if self.stream is not None:
try:
self.stream.stop_stream()
self.stream.close()
except:
pass
if self.p is not None:
try:
self.p.terminate()
except:
pass
def run(self):
print ("AUDIO server starts...")
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
print ("audio server <-> remote server success connected...")
check = "F"
check = self.sock.recv(1)
if check.decode("utf-8") != "S":
return
data = "".encode("utf-8")
payload_size = struct.calcsize("L")
self.stream = self.p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
output=True,
frames_per_buffer = CHUNK
)
while True:
while len(data) < payload_size:
data += self.sock.recv(81920)
packed_size = data[:payload_size]
data = data[payload_size:]
msg_size = struct.unpack("L", packed_size)[0]
while len(data) < msg_size:
data += self.sock.recv(81920)
frame_data = data[:msg_size]
data = data[msg_size:]
frames = pickle.loads(frame_data)
for frame in frames:
self.stream.write(frame, CHUNK)
class Audio_Client(threading.Thread):
def __init__(self ,serverIP, serverPort, serverVersion):
threading.Thread.__init__(self)
self.setDaemon(True)
self.ADDR = (serverIP, serverPort)
if serverVersion == 4:
self.sock = socket(AF_INET, SOCK_STREAM)
else:
self.sock = socket(AF_INET6, SOCK_STREAM)
self.p = pyaudio.PyAudio()
self.stream = None
def __del__(self) :
if self.sock is not None:
self.sock.close()
if self.stream is not None:
try:
self.stream.stop_stream()
self.stream.close()
except:
pass
if self.p is not None:
try:
self.p.terminate()
except:
pass
def run(self):
print ("AUDIO client starts...")
while True:
try:
self.sock.connect(self.ADDR)
break
except:
time.sleep(3)
continue
print ("audio client <-> remote server success connected...")
check = "F"
check = self.sock.recv(1)
if check.decode("utf-8") != "S":
return
print ("remote AUDIO client connected...")
self.stream = self.p.open(format=FORMAT,
channels=CHANNELS,
rate=RATE,
input=True,
frames_per_buffer=CHUNK)
while self.stream.is_active():
frames = []
for i in range(0, int(RATE / CHUNK * RECORD_SECONDS)):
data = self.stream.read(CHUNK)
frames.append(data)
senddata = pickle.dumps(frames)
try:
self.sock.sendall(struct.pack("L", len(senddata)) + senddata)
except:
break
| 34.066667
| 77
| 0.541292
| 574
| 5,110
| 4.722997
| 0.329268
| 0.044264
| 0.019919
| 0.023608
| 0.451494
| 0.432313
| 0.392475
| 0.392475
| 0.392475
| 0.357064
| 0
| 0.016184
| 0.371233
| 5,110
| 150
| 78
| 34.066667
| 0.827575
| 0.154599
| 0
| 0.6
| 0
| 0
| 0.046447
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046154
| false
| 0.030769
| 0.076923
| 0
| 0.153846
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
916e1ddff0241cef174fcd4e5ccac0206688c76b
| 636
|
py
|
Python
|
initcmds/models.py
|
alldevic/mtauksync
|
1a5d325ca8a7878aba5b292d7835546b24bb554c
|
[
"MIT"
] | null | null | null |
initcmds/models.py
|
alldevic/mtauksync
|
1a5d325ca8a7878aba5b292d7835546b24bb554c
|
[
"MIT"
] | null | null | null |
initcmds/models.py
|
alldevic/mtauksync
|
1a5d325ca8a7878aba5b292d7835546b24bb554c
|
[
"MIT"
] | null | null | null |
from django.db import models
TASK_STATUS = (
("c", "created"),
("p", "progress"),
("s", "success"),
("f", "failed")
)
class TaskModel(models.Model):
lastrunned = models.DateTimeField(
"lastrunned", auto_now=False, auto_now_add=False)
taskname = models.CharField("taskname", max_length=50)
status = models.CharField(max_length=1, choices=TASK_STATUS, default='c')
fail = models.TextField("fail", blank=True, null=True)
def __str__(self) -> str:
return f"{self.taskname} - {self.lastrunned}"
class Meta:
verbose_name = "запуск"
verbose_name_plural = "запуски"
| 27.652174
| 77
| 0.636792
| 75
| 636
| 5.213333
| 0.626667
| 0.051151
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005952
| 0.207547
| 636
| 22
| 78
| 28.909091
| 0.769841
| 0
| 0
| 0
| 0
| 0
| 0.16195
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.055556
| 0.055556
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
916fbb01e62cdbb436021c5d032e0ff8b5532255
| 3,171
|
py
|
Python
|
src/Data.py
|
jhlee93/WNet-cGAN-Keras
|
89666be91083735c3259e04907bbfbe1c89fc8f8
|
[
"MIT"
] | 7
|
2019-07-09T15:16:52.000Z
|
2021-05-13T14:14:48.000Z
|
src/Data.py
|
jhlee93/WNet-cGAN-Keras
|
89666be91083735c3259e04907bbfbe1c89fc8f8
|
[
"MIT"
] | 4
|
2019-07-24T13:35:11.000Z
|
2021-04-20T07:59:49.000Z
|
src/Data.py
|
jhlee93/WNet-cGAN-Keras
|
89666be91083735c3259e04907bbfbe1c89fc8f8
|
[
"MIT"
] | 1
|
2021-12-16T13:19:13.000Z
|
2021-12-16T13:19:13.000Z
|
import glob
import numpy as np
class Data:
def __init__(self, path, random=False):
"""
input:
path: path to the folder with subfolders: DSM, PAN, LABEL
max_num: int, num of samples
random: bool, to load samples randomly or from 0 to num_max
"""
self.DSM = sorted(glob.glob(path+"/DSM/*.tif"))
self.PAN = sorted(glob.glob(path+"/PAN/*.tif"))
self.LABEL = sorted(glob.glob(path+"/LABEL/*.tif"))
if len(self.DSM) != len(self.PAN) or len(self.LABEL) != len(self.PAN):
raise ValueError('DSM, PAN or LABEL do not match')
def get_data(self, start=0, num=10, as_arr=True, random=False):
"""
function: load max_num of XY into lists
output: list of numpy arrays, X (images) and Y (labels)
"""
DSM_out = []
PAN_out = []
LABEL_out = []
if random:
idx = np.random.choice(list(range(len(self.X))), num, replace=False)
print('randomly loading {0} tiles from {1} tiles'.format(num, len(self.DSM)))
else:
idx = list(range(start, start+num))
print('loading {0} - {1} image tiles'.format(start, start+num-1))
for i in idx:
DSM_out.append(np.moveaxis(rasterio.open(self.DSM[i]).read(),0,2))
PAN_out.append(np.moveaxis(rasterio.open(self.PAN[i]).read(),0,2))
LABEL_out.append(np.moveaxis(rasterio.open(self.LABEL[i]).read(),0,2))
DSM_remove = [self.DSM[i] for i in idx]
PAN_remove = [self.PAN[i] for i in idx]
LABEL_remove = [self.LABEL[i] for i in idx]
for i in range(len(DSM_remove)):
self.DSM.remove(DSM_remove[i])
self.PAN.remove(PAN_remove[i])
self.LABEL.remove(LABEL_remove[i])
if as_arr:
return np.asarray(DSM_out), np.asarray(PAN_out), np.asarray(LABEL_out)
else:
return DSM_out, PAN_out, LABEL_out
def split_trn_vld_tst(self, vld_rate=0.2, tst_rate=0.0, random=True, seed=10):
np.random.seed(seed)
num = len(self.DSM)
vld_num = int(num*vld_rate)
tst_num = int(num*tst_rate)
print('split into {0} train, {1} validation, {2} test samples'.format(num-vld_num-tst_num, vld_num, tst_num))
idx = np.arange(num)
if random:
np.random.shuffle(idx)
DSM_tst, PAN_tst, LABEL_tst = [self.DSM[k] for k in idx[:tst_num]], [self.PAN[k] for k in idx[:tst_num]], [self.LABEL[k] for k in idx[:tst_num]]
DSM_vld, PAN_vld, LABEL_vld = [self.DSM[k] for k in idx[tst_num:tst_num+vld_num]], [self.PAN[k] for k in idx[tst_num:tst_num+vld_num]], [self.LABEL[k] for k in idx[tst_num:tst_num+vld_num]]
DSM_trn, PAN_trn, LABEL_trn = [self.DSM[k] for k in idx[tst_num+vld_num:]], [self.PAN[k] for k in idx[tst_num+vld_num:]], [self.LABEL[k] for k in idx[tst_num+vld_num:]]
return DSM_trn, PAN_trn, LABEL_trn, DSM_vld, PAN_vld, LABEL_vld, DSM_tst, PAN_tst, LABEL_tst
| 44.661972
| 198
| 0.571429
| 495
| 3,171
| 3.50303
| 0.191919
| 0.051903
| 0.025952
| 0.036332
| 0.339677
| 0.306805
| 0.214533
| 0.153979
| 0.153979
| 0.130911
| 0
| 0.010662
| 0.290129
| 3,171
| 70
| 199
| 45.3
| 0.759662
| 0.082308
| 0
| 0.085106
| 0
| 0
| 0.067367
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.042553
| 0
| 0.191489
| 0.06383
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
917058eae76c95edb3644d77520d9eb1f3e8a1e9
| 8,908
|
py
|
Python
|
liberaforms/views/admin.py
|
ngi-nix/liberaforms
|
5882994736292e7ab34c4c9207805b307478a6c7
|
[
"MIT"
] | 3
|
2021-09-02T16:45:42.000Z
|
2022-02-21T19:06:25.000Z
|
liberaforms/views/admin.py
|
ngi-nix/liberaforms
|
5882994736292e7ab34c4c9207805b307478a6c7
|
[
"MIT"
] | 2
|
2021-08-17T04:13:10.000Z
|
2021-09-14T22:48:21.000Z
|
liberaforms/views/admin.py
|
ngi-nix/liberaforms
|
5882994736292e7ab34c4c9207805b307478a6c7
|
[
"MIT"
] | 1
|
2021-08-17T07:13:15.000Z
|
2021-08-17T07:13:15.000Z
|
"""
This file is part of LiberaForms.
# SPDX-FileCopyrightText: 2020 LiberaForms.org
# SPDX-License-Identifier: AGPL-3.0-or-later
"""
import os, json
from flask import g, request, render_template, redirect
from flask import session, flash, Blueprint
from flask import send_file, after_this_request
from flask_babel import gettext as _
from liberaforms.models.user import User
from liberaforms.models.form import Form
from liberaforms.models.site import Site
from liberaforms.models.invite import Invite
from liberaforms.utils.wraps import *
from liberaforms.utils import utils
from liberaforms.utils.utils import make_url_for, JsonResponse
from liberaforms.utils.dispatcher import Dispatcher
from liberaforms.utils import wtf
from pprint import pprint
admin_bp = Blueprint('admin_bp', __name__,
template_folder='../templates/admin')
@admin_bp.route('/admin', methods=['GET'])
@admin_required
def site_admin():
return render_template('admin-panel.html',
user=g.current_user,
app_version=utils.get_app_version(),
site=g.site)
""" User management """
@admin_bp.route('/admin/users', methods=['GET'])
@admin_required
def list_users():
return render_template('list-users.html',
users=User.find_all(),
invites=Invite.find_all())
@admin_bp.route('/admin/users/<int:id>', methods=['GET'])
@admin_required
def inspect_user(id):
user=User.find(id=id)
if not user:
flash(_("User not found"), 'warning')
return redirect(make_url_for('admin_bp.list_users'))
return render_template('inspect-user.html', user=user)
@admin_bp.route('/admin/users/toggle-blocked/<int:id>', methods=['POST'])
@admin_required
def toggle_user_blocked(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
if user.id == g.current_user.id:
# current_user cannot disable themself
blocked=user.blocked
else:
blocked=user.toggle_blocked()
return JsonResponse(json.dumps({'blocked':blocked}))
@admin_bp.route('/admin/users/toggle-admin/<int:id>', methods=['POST'])
@admin_required
def toggle_admin(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
if user.username == g.current_user.username:
# current_user cannot remove their own admin permission
is_admin=True
else:
is_admin=user.toggle_admin()
return JsonResponse(json.dumps({'admin':is_admin}))
@admin_bp.route('/admin/users/toggle-uploads-enabled/<int:id>', methods=['POST'])
@admin_required
def toggle_uploads_enabled(id):
user=User.find(id=id)
if not user:
return JsonResponse(json.dumps())
uploads_enabled=user.toggle_uploads_enabled()
return JsonResponse(json.dumps({'uploads_enabled':uploads_enabled}))
@admin_bp.route('/admin/users/delete/<int:id>', methods=['GET', 'POST'])
@admin_required
def delete_user(id):
user=User.find(id=id)
if not user:
flash(_("User not found"), 'warning')
return redirect(make_url_for('admin_bp.list_users'))
if request.method == 'POST' and 'username' in request.form:
if user.is_root_user():
flash(_("Cannot delete root user"), 'warning')
return redirect(make_url_for('admin_bp.inspect_user', id=user.id))
if user.id == g.current_user.id:
flash(_("Cannot delete yourself"), 'warning')
return redirect(make_url_for('admin_bp.inspect_user',
username=user.username))
if user.username == request.form['username']:
user.delete_user()
flash(_("Deleted user '%s'" % (user.username)), 'success')
return redirect(make_url_for('admin_bp.list_users'))
else:
flash(_("Username does not match"), 'warning')
return render_template('delete-user.html', user=user)
@admin_bp.route('/admin/users/csv', methods=['GET'])
@admin_required
def csv_users():
csv_file = g.site.write_users_csv()
@after_this_request
def remove_file(response):
os.remove(csv_file)
return response
return send_file(csv_file, mimetype="text/csv", as_attachment=True)
""" Form management """
@admin_bp.route('/admin/forms', methods=['GET'])
@admin_required
def list_forms():
return render_template('list-forms.html', forms=Form.find_all())
@admin_bp.route('/admin/forms/toggle-public/<int:id>', methods=['GET'])
@admin_required
def toggle_form_public_admin_prefs(id):
queriedForm = Form.find(id=id)
if not queriedForm:
flash(_("Can't find that form"), 'warning')
return redirect(make_url_for('form_bp.my_forms'))
queriedForm.toggle_admin_form_public()
return redirect(make_url_for('form_bp.inspect_form', form_id=id))
""" Invitations """
@admin_bp.route('/admin/invites', methods=['GET'])
@admin_required
def list_invites():
return render_template('list-invites.html', invites=Invite.find_all())
@admin_bp.route('/admin/invites/new', methods=['GET', 'POST'])
@admin_required
def new_invite():
wtform=wtf.NewInvite()
if wtform.validate_on_submit():
message=wtform.message.data
token = utils.create_token(Invite)
#pprint(token)
new_invite=Invite( email=wtform.email.data,
message=message,
token=token,
admin=wtform.admin.data)
new_invite.save()
status = Dispatcher().send_invitation(new_invite)
if status['email_sent'] == True:
flash_text = _("We have sent an invitation to %s" % new_invite.email)
flash(flash_text, 'success')
else:
flash(status['msg'], 'warning')
return redirect(make_url_for('admin_bp.list_invites'))
wtform.message.data=Invite.default_message()
return render_template('new-invite.html',
wtform=wtform,
total_invites=Invite.find_all().count())
@admin_bp.route('/admin/invites/delete/<int:id>', methods=['GET'])
@admin_required
def delete_invite(id):
invite=Invite.find(id=id)
if invite:
invite.delete()
# i18n: Invitation to dave@example.com deleted OK
flash(_("Invitation to %s deleted OK" % invite.email), 'success')
else:
flash(_("Opps! We can't find that invitation"), 'error')
return redirect(make_url_for('admin_bp.list_invites'))
""" Personal Admin preferences """
@admin_bp.route('/admin/toggle-newuser-notification', methods=['POST'])
@admin_required
def toggle_newUser_notification():
return json.dumps({'notify': g.current_user.toggle_new_user_notification()})
@admin_bp.route('/admin/toggle-newform-notification', methods=['POST'])
@admin_required
def toggle_newForm_notification():
return json.dumps({'notify': g.current_user.toggle_new_form_notification()})
""" ROOT_USERS functions
"""
@admin_bp.route('/admin/forms/change-author/<int:form_id>', methods=['GET', 'POST'])
@rootuser_required
def change_author(form_id):
queriedForm = Form.find(id=form_id)
if not queriedForm:
flash(_("Can't find that form"), 'warning')
return redirect(make_url_for('user_bp.my_forms'))
if request.method == 'POST':
author = queriedForm.author
if not ('old_author_username' in request.form and \
request.form['old_author_username']==author.username):
flash(_("Current author incorrect"), 'warning')
return render_template('change-author.html', form=queriedForm)
if 'new_author_username' in request.form:
new_author=User.find(username=request.form['new_author_username'])
if new_author:
if new_author.enabled:
old_author=author
if queriedForm.change_author(new_author):
log_text = _("Changed author from %s to %s" % (
old_author.username,
new_author.username))
queriedForm.add_log(log_text)
flash(_("Changed author OK"), 'success')
return redirect(make_url_for('form_bp.inspect_form',
form_id=queriedForm.id))
else:
flash(_("Cannot use %s. The user is not enabled" % (
request.form['new_author_username']),
), 'warning')
else:
flash(_("Can't find username %s" % (
request.form['new_author_username'])
), 'warning')
return render_template('change-author.html', form=queriedForm)
| 36.064777
| 84
| 0.635608
| 1,088
| 8,908
| 4.993566
| 0.153493
| 0.032211
| 0.03534
| 0.050064
| 0.441377
| 0.351555
| 0.283821
| 0.242039
| 0.207436
| 0.147985
| 0
| 0.001176
| 0.236529
| 8,908
| 246
| 85
| 36.211382
| 0.797677
| 0.03132
| 0
| 0.270833
| 0
| 0
| 0.182428
| 0.049464
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088542
| false
| 0
| 0.078125
| 0.03125
| 0.322917
| 0.015625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91705feef5320bb231c5d61b510ee6321361c934
| 29,405
|
py
|
Python
|
python/zephyr/datasets/score_dataset.py
|
r-pad/zephyr
|
c8f45e207c11bfc2b21df169db65a7df892d2848
|
[
"MIT"
] | 18
|
2021-05-27T04:40:38.000Z
|
2022-02-08T19:46:31.000Z
|
python/zephyr/datasets/score_dataset.py
|
r-pad/zephyr
|
c8f45e207c11bfc2b21df169db65a7df892d2848
|
[
"MIT"
] | null | null | null |
python/zephyr/datasets/score_dataset.py
|
r-pad/zephyr
|
c8f45e207c11bfc2b21df169db65a7df892d2848
|
[
"MIT"
] | 2
|
2021-11-07T12:42:00.000Z
|
2022-03-01T12:51:54.000Z
|
import os, copy
import cv2
from functools import partial
import numpy as np
import torch
import torchvision
from torch.utils.data import Dataset
from zephyr.data_util import to_np, vectorize, img2uint8
from zephyr.utils import torch_norm_fast
from zephyr.utils.mask_edge import getRendEdgeScore
from zephyr.utils.edges import generate_distance_image
from zephyr.normals import compute_normals
from zephyr.utils.timer import TorchTimer
try:
from zephyr.datasets.bop_raw_dataset import BopRawDataset
except ImportError:
pass
from zephyr.datasets.prep_dataset import PrepDataset
IMPORTANCE_ORDER = [
28, 27, 32, 33, 36, 35, 29, 16, 26, 22, 13, 4, 26, 21, 22
]
class ScoreDataset(Dataset):
def __init__(self, datapoints, dataset_root, dataset_name, args, mode='train', timing = False):
self.args = args
self.datapoints = datapoints
self.dataset_root = dataset_root
self.dataset_name = dataset_name
self.mode = mode
self.return_full_data = False
self.feature_size = args.feature_size
self.norm_cos_weight = args.norm_cos_weight
self.top_n_feat = args.top_n_feat
self.max_hypos = args.max_hypos
self.ppf_only = args.ppf_only
self.n_ppf_hypos = args.n_ppf_hypos
self.n_sift_hypos = args.n_sift_hypos
self.use_mask_test = args.use_mask_test
if args.raw_bop_dataset:
self.loader = BopRawDataset(
args.bop_root, self.dataset_name, args.split, args.split_name, args.ppf_result_file, no_sift=args.ppf_only, no_ppf=args.sift_only
)
else:
self.loader = PrepDataset(
self.dataset_root, self.feature_size
)
self.dim_point = 0
self.dim_render = 0
self.dim_agg = 0
# About timing
self.timing = timing
self.timing_list = []
if args.model_name == "maskconv":
print("Using Point Render dataset")
self.return_rend, self.return_points, self.return_agg = True, True, False
else:
self.return_rend = False
if args.dataset == "feat":
print("Using Agg Dataset")
self.return_points, self.return_agg = False, True
else: # Use PointNet dataset
if "mix" in args.dataset:
print("Using Mix Dataset")
self.return_points, self.return_agg = True, True
else:
print("Using PointNet Dataset")
self.return_points, self.return_agg = True, False
'''For aggregated features Data'''
if self.return_agg:
self.std = None
self.mean = None
self.feature_inliers = True
self.use_hsv = True
self.normalize = True
self.fs_thresh = 0.02
if args.selected_features is not None:
self.selected_features = args.selected_features
print("Using feature indices:", self.selected_features)
elif self.top_n_feat is not None:
self.selected_features = IMPORTANCE_ORDER[:self.top_n_feat]
print("ScoreDataset: Using top features N =", self.top_n_feat)
print("Using feature indices:", self.selected_features)
args.selected_features = self.selected_features
else:
self.selected_features = list(range(39))
print("Using all aggregated features")
args.selected_features = self.selected_features
self.dim_agg = len(self.selected_features)
self.vectorize = partial(vectorize,
use_hsv=self.use_hsv,
feature_inliers=self.feature_inliers,
norm_cos_weight=self.norm_cos_weight,
fs_thresh=self.fs_thresh
)
self.agg_cache = [None for _ in range(len(self.datapoints))]
'''For PointNet Data'''
self.point_x_labels = []
if self.return_points:
self.max_points = args.max_points
args.xyz_channel = [] # indices of point_x channels that define coordinates
args.model_channel = [] # indices of point_x channels that are specific to the object model
'''Mask channel'''
num_features = 0
# valid_proj.unsqueeze(-1).float(),
# valid_depth.unsqueeze(-1).float(),
if not self.args.no_valid_proj:
self.point_x_labels += ['valid_proj']
num_features += 1
if not self.args.no_valid_depth:
self.point_x_labels += ["valid_depth"]
num_features += 1
'''XYZ channel'''
self.uvd, self.uv = False, False
if "uvd" in args.dataset:
self.uvd = True
args.xyz_channel = list(range(num_features, num_features + 3))
num_features +=3
self.point_x_labels += ['u', 'v', 'd']
elif "uv" in args.dataset:
self.uv = True
args.xyz_channel = list(range(num_features, num_features + 2))
num_features += 2
self.point_x_labels += ['u', 'v']
else:
num_features += 0
args.model_channel += args.xyz_channel
num_non_data = num_features
'''Data channel'''
if "cos" in args.dataset:
self.point_x_labels += ['cam_norm_cos']
self.RGB, self.HSV, self.D, self.diff, self.cos, self.edge, self.ppfscore, self.norm_cos = \
False, False, False, False, False, False, False, False
if "RGB" in args.dataset:
self.RGB, self.HSV = True, False
args.model_channel += list(range(num_features, num_features + 3))
num_features += 6
self.point_x_labels += ['R_diff', 'G_diff', 'B_diff'] if "diff" in args.dataset else ["R1", "G1", "B1", "R2", "G2", "B2"]
elif "HSV" in args.dataset:
self.RGB, self.HSV = True, True
args.model_channel += list(range(num_features, num_features + 3))
num_features += 6
self.point_x_labels += ['H_diff', 'S_diff', 'V_diff'] if "diff" in args.dataset else ["H1", "S1", "V1", "H2", "S2", "V2"]
if "D" in args.dataset:
self.D = True
args.model_channel += list(range(num_features, num_features + 1))
num_features += 2
self.point_x_labels += ["D_diff"] if "diff" in args.dataset else ["D1", "D2"]
if "diff" in args.dataset:
self.diff = True
num_features = num_non_data + (num_features-num_non_data) // 2
if "cos" in args.dataset:
self.cos = True
num_features += 1
if "edge" in args.dataset:
self.edge = True
self.edgecos = "edgecos" in args.dataset
self.edgexnor = "edgexnor" in args.dataset
num_features += 1 if (self.edgecos or self.edgexnor) else 2
if self.edgecos:
self.point_x_labels += ['obs_edge_score']
elif self.edgexnor:
self.point_x_labels += ['edge_xnor']
else:
self.point_x_labels += ['obs_edge_score', "rend_edge_score"]
if "ppfscore" in args.dataset:
self.ppfscore = True
num_features += 1
self.point_x_labels += ['ppf_score']
if "norm" in args.dataset:
self.norm_cos = True
num_features += 1
self.point_x_labels += ['norm_cos']
self.seg_mask = False
if "seg" in args.dataset:
self.seg_mask = True
num_features += 1
self.point_x_labels += ['mask', "mask_edge"]
self.dim_point = num_features
'''Train/Test specific config'''
if self.mode == 'train':
print("Initializating training dataset", self.point_x_labels)
self.cojitter = args.cojitter
self.drop_ratio = args.drop_ratio
self.uv_rot = args.uv_rot
else:
print("Initializating %s dataset" % mode, self.point_x_labels)
self.cojitter = False
self.drop_ratio = 0
self.uv_rot = False
self.transform = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ColorJitter(brightness=0.2, contrast=0.2, saturation=0.2, hue=0.05),
torchvision.transforms.ToTensor(),
])
if self.cojitter:
self.transform_cojitter = torchvision.transforms.Compose([
torchvision.transforms.ToPILImage(),
torchvision.transforms.ColorJitter(brightness=0.5, contrast=0.5, saturation=0.5, hue=0.5),
torchvision.transforms.ToTensor(),
])
print("ScorePointnetDataset: Using cojitter")
if self.return_rend:
self.dim_render = self.dim_point - 1
def __len__(self):
return len(self.datapoints)
def setNormalization(self, var, mean):
var = torch.from_numpy(np.asarray(var))
mean = torch.from_numpy(np.asarray(mean))
self.std = torch.sqrt(var[self.selected_features]).float()
self.mean = mean[self.selected_features].float()
'''Return [n_hypo, n_features]'''
def getAggData(self, data):
x = self.vectorize(data)
x = x[:, self.selected_features]
if self.normalize:
x = (x-self.mean)/self.std
return x
'''Return [n_hypo, n_points, n_features]'''
def getPointNetData(self, data, return_uv_original=False):
with TorchTimer("Data convert 1", agg_list=self.timing_list, timing = self.timing, verbose=False):
img = data['img'].float() # float [0, 1]
depth = data['depth'].float()
if "pbr" in self.dataset_root and self.mode == "train":
# print("blur depth image")
depth = depth * (torch.ones_like(depth) + 0.003 * torch.randn_like(depth))
transforms = data['transforms'].float()
model_points = data['model_points'].float()
model_colors = data['model_colors'].float() # float [0, 1]
model_normals = data['model_normals'].float()
meta_data = data['meta_data']
with TorchTimer("Transform and project", agg_list=self.timing_list, timing = self.timing, verbose=False):
# Transform and project point cloud
trans_pts = torch.einsum('ijk,mk->imj', transforms[:,:3,:3], model_points) + transforms[:,:3,3].unsqueeze(1)
f_cam = torch.tensor([meta_data['camera_fx'], meta_data['camera_fy']])
c_cam = torch.tensor([meta_data['camera_cx'], meta_data['camera_cy']])
proj_pts = trans_pts[:,:,:2]/trans_pts[:,:,2:]*f_cam + c_cam
uv = proj_pts.long()
invalid_proj = (uv[:,:,1]>=img.shape[0]) + (uv[:,:,1]<0) \
+ (uv[:,:,0]>=img.shape[1]) + (uv[:,:,0]< 0)
uv[invalid_proj] = 0
# Projected depth
proj_depth = trans_pts[:,:,-1]
'''Jitter the color as data augmentation'''
if self.mode == "train":
img = img.permute(2, 0, 1) # (H, W, C) to (C, H, W)
img = self.transform(img)
img = img.permute(1, 2, 0) # (C, H, W) to (H, W, C)
if self.cojitter:
H, W, C = img.shape # (H, W, C)
N, _ = model_colors.shape
data_cojitter = torch.cat([
img.reshape((1, -1, 3)),
model_colors.reshape((1, -1, 3))
], dim=1)
data_cojitter = data_cojitter.permute(2, 0, 1)
cojittered = self.transform_cojitter(data_cojitter)
cojittered = cojittered.permute(1, 2, 0)
img = cojittered[0, :H*W, :].reshape((H, W, C))
model_colors = cojittered[0, H*W:, :].reshape((N, C))
# RGb to HSV
with TorchTimer("RGB to HSV", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.HSV:
with np.errstate(divide='ignore'):
img_rgb = img2uint8(to_np(img))
# img_hsv = rgb2hsv(img_rgb) # this will convert it to range [0, 1]
img_hsv = cv2.cvtColor(img_rgb,cv2.COLOR_RGB2HSV)
img_hsv = img_hsv.astype(float) / 255.0
img = torch.from_numpy(img_hsv).to(img.device).float()
model_colors_rgb = img2uint8(np.expand_dims(to_np(model_colors), 0))
# model_colors_hsv = rgb2hsv(model_colors_rgb)[0]
model_colors_hsv = cv2.cvtColor(model_colors_rgb,cv2.COLOR_RGB2HSV)[0]
model_colors_hsv = model_colors_hsv.astype(float) / 255.0
model_colors = torch.from_numpy(model_colors_hsv).to(model_colors.device).float()
# Sample the observed HSVD
with TorchTimer("Sample obvervation", agg_list=self.timing_list, timing = self.timing, verbose=False):
obs_color = img[uv[:,:,1], uv[:,:,0], :]
obs_depth = depth[uv[:,:,1], uv[:,:,0]]
with TorchTimer("Hypo Pruning", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.args.inconst_ratio_th is not None and self.mode == "test":
d_diff = proj_depth - obs_depth
n_points = model_points.shape[0]
invalid_count = (d_diff < -0.02).sum(-1).float()
invalid_ratio = invalid_count / n_points
th = self.args.inconst_ratio_th
idx = invalid_ratio < (th/100.0)
idx[-1] = True
# At least preserve some non-oracle hypos
if idx.sum() == 1:
idx[0] = True
pruning_mask = idx
transforms = transforms[idx]
trans_pts = trans_pts[idx]
obs_color = obs_color[idx]
obs_depth = obs_depth[idx]
uv = uv[idx]
invalid_proj = invalid_proj[idx]
proj_depth = proj_depth[idx]
self.SelectDataByIdx(data, idx)
uv_original = copy.deepcopy(uv)
data['uv_original'] = uv_original
# Transform normals
with TorchTimer("Transform and project 2", agg_list=self.timing_list, timing = self.timing, verbose=False):
trans_norms = torch.einsum('ijk,mk->imj', transforms[:,:3,:3], model_normals)
cam_norm_cos = (- trans_pts * trans_norms).sum(-1) / (torch_norm_fast(trans_pts, -1) * torch_norm_fast(trans_norms, -1))
valid_norm = cam_norm_cos > 0
valid_proj = valid_norm * torch.bitwise_not(invalid_proj)
data['valid_proj'] = valid_proj
# x = []
x = model_points.new_empty((len(transforms), len(model_points), self.dim_point))
idx_feat = 0
with TorchTimer("Valid proj/depth", agg_list=self.timing_list, timing = self.timing, verbose=False):
valid_depth = obs_depth > 0
'''Mask channel'''
if not self.args.no_valid_proj:
# x += [valid_proj.unsqueeze(-1).float()]
x[:, :, idx_feat] = valid_proj.float()
idx_feat += 1
if not self.args.no_valid_depth:
# x += [valid_depth.unsqueeze(-1).float()]
x[:, :, idx_feat] = valid_depth.float()
idx_feat += 1
'''XYZ channel'''
with TorchTimer("Normalize uv", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.uv or self.uvd:
uv = uv.float()
uv_mean = uv.mean(dim=1, keepdim=True)
uv_std = uv.std(dim=1, keepdim=True)
uv = (uv - uv_mean) / uv_std
if self.uv_rot:
n_hypo, n_point, n_coord = uv.shape
'''random flip'''
flip_mat = torch.rand((n_hypo, 1, n_coord)) > 0.5
flip_mat = (flip_mat.type(uv.dtype) - 0.5) * 2
uv = uv * flip_mat
'''random rotation'''
rot_mat = torch.rand((n_hypo, 1, 1)) * 2 * np.pi
rot_mat = torch.cat([
torch.cos(rot_mat), -torch.sin(rot_mat),
torch.sin(rot_mat), torch.cos(rot_mat)
], 2).reshape((-1, 1, 2, 2))
uv = uv.unsqueeze(-1)
uv = torch.matmul(rot_mat, uv)
uv = uv.squeeze()
# x += [uv]
x[:, :, idx_feat:idx_feat+2] = uv
idx_feat += 2
if self.uvd:
d_diff = proj_depth.unsqueeze(-1) - obs_depth.unsqueeze(-1)
d_diff = (d_diff - d_diff.mean(dim=1, keepdim=True)) / d_diff.std(dim=1, keepdim=True)
# x += [d_diff]
x[:, :, idx_feat:idx_feat+1] = d_diff
idx_feat += 1
'''Point data channel'''
if self.cos:
# x += [cam_norm_cos.unsqueeze(-1).float()]
x[:, :, idx_feat] = cam_norm_cos.float()
idx_feat += 1
with TorchTimer("Compute RGBD/HSVD diff", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.RGB or self.HSV:
if self.diff:
color_diff = model_colors.unsqueeze(0).expand(obs_color.shape) - obs_color
if self.HSV:
color_diff[:,:,0] = color_diff[:,:,0].abs()
color_diff[:,:,0] = np.minimum(color_diff[:,:,0], 1-color_diff[:,:,0])
# x += [color_diff]
x[:, :, idx_feat:idx_feat+3] = color_diff
idx_feat += 3
else:
# x += [model_colors.unsqueeze(0).expand(obs_color.shape), obs_color]
x[:, :, idx_feat:idx_feat+3] = model_colors.unsqueeze(0).expand(obs_color.shape)
idx_feat += 3
x[:, :, idx_feat:idx_feat+3] = obs_color
idx_feat += 3
if self.D:
if self.diff:
# x += [proj_depth.unsqueeze(-1) - obs_depth.unsqueeze(-1)]
x[:, :, idx_feat] = proj_depth - obs_depth
idx_feat += 1
else:
# x += [proj_depth.unsqueeze(-1), obs_depth.unsqueeze(-1)]
x[:, :, idx_feat] = proj_depth
idx_feat += 1
x[:, :, idx_feat] = obs_depth
idx_feat += 1
'''Edge channel'''
with TorchTimer("Edge", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.edge:
'''Observed edges'''
if "depth_for_edge" in data:
depth_for_edge = data['depth_for_edge']
# print("Using depth_for_edge", depth_for_edge.min(), depth_for_edge.max())
else:
depth_for_edge = depth
with TorchTimer("generate_distance_image", agg_list=self.timing_list, timing = self.timing, verbose=False):
edge_obs = generate_distance_image(depth_for_edge, canny_l=20, canny_h=50)[0,0]
with TorchTimer("Edge sampling", agg_list=self.timing_list, timing = self.timing, verbose=False):
uv = copy.deepcopy(uv_original) # Re-fetch the uv as it is changed before
edge_score_obs = edge_obs[uv[:,:,1], uv[:,:,0]]
edge_score_obs = torch.exp(-edge_score_obs / 24)
'''Projected edges'''
with TorchTimer("getRendEdgeScore", agg_list=self.timing_list, timing = self.timing, verbose=False):
if "edge_score_rend" in data:
edge_score_rend = data['edge_score_rend']
else:
with torch.no_grad():
edge_score_rend = getRendEdgeScore(img.to(self.args.edge_gpu), uv_original.to(self.args.edge_gpu)).to(uv_original.device)
'''Normalized edge scores'''
edge_score_rend = edge_score_rend / edge_score_rend.max(1, keepdim=True)[0]
# edge_score_obs = torch.exp(-edge_score_obs / )
if self.edgexnor:
edge_score = edge_score_rend * edge_score_obs + (1 - edge_score_rend) * (1 - edge_score_obs)
# x += [edge_score.unsqueeze(-1)]
x[:, :, idx_feat] = edge_score
idx_feat += 1
elif self.edgecos:
# x += [edge_score_obs.unsqueeze(-1)]
x[:, :, idx_feat] = edge_score_obs
idx_feat += 1
else:
# x += [edge_score_obs.unsqueeze(-1)]
# x += [edge_score_rend.unsqueeze(-1)]
x[:, :, idx_feat] = edge_score_obs
idx_feat += 1
x[:, :, idx_feat] = edge_score_rend
idx_feat += 1
if self.args.camera_scale is not None:
meta_data['camera_scale'] = self.args.camera_scale
'''Use the cos of the angle between observed and rendered normal vectors'''
with TorchTimer("Normal vector", agg_list=self.timing_list, timing = self.timing, verbose=False):
if self.norm_cos:
norm_downsample = self.args.norm_downsample
uv = uv_original # Re-fetch the uv as it is changed before
normals = compute_normals(to_np(depth)[::norm_downsample, ::norm_downsample].astype(np.double), meta_data = meta_data)
normals = torch.from_numpy(normals).float()
scene_normals_proj = normals[uv[:,:,1]//norm_downsample, uv[:,:,0]//norm_downsample]
model_normals_proj = trans_norms
norm_cos = (scene_normals_proj * model_normals_proj).sum(dim=-1) / (torch_norm_fast(scene_normals_proj, -1) * torch_norm_fast(model_normals_proj, -1))
norm_cos[norm_cos != norm_cos] = 0
# x += [norm_cos.unsqueeze(-1).float()]
x[:, :, idx_feat] = norm_cos.float()
idx_feat += 1
# with TorchTimer("torch.cat()", agg_list=self.timing_list, timing = self.timing, verbose=False):
# x = torch.cat(x, dim=-1)
# print(x.shape)
if self.args.hard_mask:
x[~valid_proj.bool()]=0
'''Sample the points'''
if self.drop_ratio >= 0 and self.mode == 'train':
n_hypo = x.shape[0]
n_point = x.shape[1]
n_point_kept = int((1.0-self.drop_ratio) * n_point)
if self.max_points is not None and n_point_kept > self.max_points:
n_point_kept = self.max_points
idx = []
for i in range(n_hypo):
idx.append(torch.randperm(n_point)[:n_point_kept].unsqueeze(0))
idx = torch.cat(idx, dim=0)
x = x[torch.arange(n_hypo).unsqueeze(1).expand(n_hypo, n_point_kept), idx]
uv_sampled = uv_original[torch.arange(n_hypo).unsqueeze(1).expand(n_hypo, n_point_kept), idx]
else:
uv_sampled = uv_original
if return_uv_original:
return x, uv_sampled
else:
return x
def getPointRenderData(self, data):
point_x, uv = self.getPointNetData(data, True)
crop_size = 96
pad_size = 2
n_hypo = uv.shape[0]
n_point = uv.shape[1]
span_min = pad_size
span_max = crop_size - pad_size
mask_index = [0]
# data_index = [0, 1] + list(range(4, point_x.shape[2]))
data_index = list(range(point_x.shape[2]))
n_feat = len(data_index)
point_mask = point_x[:, :, mask_index].bool()
point_data = point_x[:, :, data_index]
uv = uv.float()
uv_max = uv.max(dim=1, keepdim=True)[0]
uv_min = uv.min(dim=1, keepdim=True)[0]
uv_center = (uv_max + uv_min) / 2.0
uv_radius = (uv_max - uv_min).max(-1, True)[0] / 2.0
uv_norm = (uv - uv_center) / uv_radius # range in [-1, 1]
uv_resize = (uv_norm + 1) / 2 * (span_max - span_min) + span_min
uv_resize = uv_resize.long()
u = uv_resize[:, :, 0]
v = uv_resize[:, :, 1]
feature_map = torch.zeros(n_hypo, n_feat, crop_size, crop_size)
t = torch.arange(n_hypo).view(-1,1).repeat(1, n_point)
u = u.reshape(-1)[point_mask.view(-1)]
v = v.reshape(-1)[point_mask.view(-1)]
t = t.view(-1)[point_mask.view(-1)]
feature_map[t.view(-1), :, v.view(-1), u.view(-1)] = point_data.view(-1, n_feat)[point_mask.view(-1)]
mask_map = feature_map[:, 0:1, :, :]
data_map = feature_map[:, 1:, :, :]
return mask_map, data_map
def SelectDataByIdx(self, data, idx):
data['transforms'] = data['transforms'][idx]
data['pp_err'] = data['pp_err'][idx]
if "edge_score_rend" in data:
data['edge_score_rend'] = data['edge_score_rend'][idx]
return data
def __getitem__(self, idx):
dp = self.datapoints[idx]
to_return = {"object_id": dp[0], "scene_id": dp[1], "im_id": dp[2]}
obj_id = dp[0]
scene_id = dp[1]
im_id = dp[2]
'''If only used aggregated features, return the cached one'''
if self.return_agg and not self.return_points and self.agg_cache[idx] is not None:
to_return['agg_x'], to_return['pp_err'], to_return['transforms'] = self.agg_cache[idx]
return to_return
# data = loadData(*dp, feature_size = self.feature_size, base_path = self.dataset_root)
# '''Get the model data and send it into the processing function'''
# model_data = self.getModelData(dp[0])
# data.update(model_data)
data = self.loader.loadData(*dp)
assert len(data['pp_err']) == 101 or len(data['pp_err']) == 1101 or len(data['pp_err']) == 301
assert not (self.args.ppf_only and self.args.sift_only)
if self.args.ppf_only:
assert len(data['pp_err']) >= self.args.n_ppf_hypos + 1
idx = list(np.arange(self.args.n_ppf_hypos)) + [-1]
self.SelectDataByIdx(data, idx)
if self.args.sift_only:
assert len(data['pp_err']) >= self.args.n_ppf_hypos + self.args.n_sift_hypos + 1
idx = list(range(self.n_ppf_hypos, self.n_ppf_hypos+self.n_sift_hypos)) + [-1]
data = self.SelectDataByIdx(data, idx)
'''Sample the hypotheses'''
point_x = self.getPointNetData(data)
n_hypo = len(point_x)
to_return['object_id'] = to_return['object_id'].repeat(n_hypo)
to_return['scene_id'] = to_return['scene_id'].repeat(n_hypo)
to_return['im_id'] = to_return['im_id'].repeat(n_hypo)
to_return['pp_err'] = data['pp_err'].reshape(-1)
to_return['transforms'] = data['transforms']
if self.return_agg:
to_return['agg_x'] = self.getAggData(data)
self.agg_cache[idx] = (to_return['agg_x'], to_return['pp_err'], to_return['transforms'])
if self.return_points:
if self.return_rend:
to_return['rend_mask'], to_return['x_rend'] = self.getPointRenderData(data)
to_return['mask_x'] = to_return['rend_mask']
to_return['rend_x'] = to_return['x_rend']
else:
to_return['point_x'] = point_x
# print("to_return['pp_err']", to_return['pp_err'])
# print("to_return['pp_err']", to_return['pp_err'].shape)
# print("to_return['transforms']", to_return['transforms'].shape)
# print("to_return['point_x']", to_return['point_x'].shape)
to_return['dataset_i'] = 0
# For ICP post-processing
to_return['depth'] = data['depth']
to_return['meta_data'] = data['meta_data']
to_return['uv_original'] = data['uv_original']
to_return['model_points'] = data['model_points']
return to_return
| 44.218045
| 167
| 0.534875
| 3,604
| 29,405
| 4.122087
| 0.107658
| 0.017905
| 0.015751
| 0.018309
| 0.349825
| 0.287964
| 0.238018
| 0.186457
| 0.1458
| 0.132337
| 0
| 0.018448
| 0.347424
| 29,405
| 664
| 168
| 44.284639
| 0.755745
| 0.067948
| 0
| 0.168357
| 0
| 0
| 0.054035
| 0.001689
| 0
| 0
| 0
| 0
| 0.008114
| 1
| 0.016227
| false
| 0.002028
| 0.036511
| 0.002028
| 0.070994
| 0.022312
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91762cf01e789ac760eedf4942c7a866b5214252
| 632
|
py
|
Python
|
src/lingcomp/farm/features.py
|
CharlottePouw/interpreting-complexity
|
b9a73c0aff18e4c6b4209a6511d00639494c70da
|
[
"Apache-2.0"
] | 2
|
2020-12-18T12:26:22.000Z
|
2020-12-19T18:47:07.000Z
|
src/lingcomp/farm/features.py
|
CharlottePouw/interpreting-complexity
|
b9a73c0aff18e4c6b4209a6511d00639494c70da
|
[
"Apache-2.0"
] | null | null | null |
src/lingcomp/farm/features.py
|
CharlottePouw/interpreting-complexity
|
b9a73c0aff18e4c6b4209a6511d00639494c70da
|
[
"Apache-2.0"
] | 1
|
2021-05-19T13:39:45.000Z
|
2021-05-19T13:39:45.000Z
|
import torch
from farm.data_handler.samples import Sample
from farm.modeling.prediction_head import RegressionHead
class FeaturesEmbeddingSample(Sample):
def __init__(self, id, clear_text, tokenized=None, features=None, feat_embeds=None):
super().__init__(id, clear_text, tokenized, features)
self.feats_embed = feat_embeds
class FeaturesRegressionHead(RegressionHead):
"""A regression head mixing [CLS] representation
and explicit features for prediction"""
def forward(self, x, feats, **kwargs):
x = torch.cat((x, feats), 1)
logits = self.feed_forward(x)
return logits
| 31.6
| 88
| 0.724684
| 77
| 632
| 5.74026
| 0.584416
| 0.036199
| 0.049774
| 0.090498
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001934
| 0.181962
| 632
| 19
| 89
| 33.263158
| 0.852998
| 0.129747
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.25
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9176ff87702ba5b114dba78865e902b3d3390b83
| 2,259
|
py
|
Python
|
dashboard/dashboard.py
|
TrustyJAID/Toxic-Cogs
|
870d92067ba2a99b9ade2f957f945b95fdbc80f7
|
[
"MIT"
] | null | null | null |
dashboard/dashboard.py
|
TrustyJAID/Toxic-Cogs
|
870d92067ba2a99b9ade2f957f945b95fdbc80f7
|
[
"MIT"
] | null | null | null |
dashboard/dashboard.py
|
TrustyJAID/Toxic-Cogs
|
870d92067ba2a99b9ade2f957f945b95fdbc80f7
|
[
"MIT"
] | null | null | null |
from collections import defaultdict
import discord
from redbot.core import Config, checks, commands
from redbot.core.bot import Red
from redbot.core.utils.chat_formatting import box, humanize_list, inline
from abc import ABC
# ABC Mixins
from dashboard.abc.abc import MixinMeta
from dashboard.abc.mixin import DBMixin, dashboard
# Command Mixins
from dashboard.abc.roles import DashboardRolesMixin
from dashboard.abc.webserver import DashboardWebserverMixin
from dashboard.abc.settings import DashboardSettingsMixin
# RPC Mixins
from dashboard.baserpc import HUMANIZED_PERMISSIONS, DashboardRPC
from dashboard.menus import ClientList, ClientMenu
THEME_COLORS = ["red", "primary", "blue", "green", "greener", "yellow"]
class CompositeMetaClass(type(commands.Cog), type(ABC)):
"""This allows the metaclass used for proper type detection to coexist with discord.py's
metaclass."""
# Thanks to Flare for showing how to use group commands across multiple files. If this breaks, its his fault
class Dashboard(
DashboardRolesMixin,
DashboardWebserverMixin,
DashboardSettingsMixin,
DBMixin,
commands.Cog,
metaclass=CompositeMetaClass,
):
__version__ = "0.1.6a"
def __init__(self, bot: Red, *args, **kwargs):
super().__init__(*args, **kwargs)
self.bot = bot
self.config = Config.get_conf(self, identifier=473541068378341376)
self.config.register_global(
secret="[Not set]",
redirect="http://127.0.0.1:42356/callback",
clientid=0,
blacklisted=[],
disallowedperms=[],
support="",
defaultcolor="red",
meta={"title": "", "icon": "", "description": "", "color": ""},
)
self.config.register_guild(roles=[])
self.configcache = defaultdict(self.cache_defaults)
self.rpc = DashboardRPC(self)
def cog_unload(self):
self.configcache.clear()
self.rpc.unload()
def cache_defaults(self):
return {"roles": []}
async def initialize(self):
config = await self.config.all_guilds()
for k, v in config.items():
self.configcache[k] = v
| 30.945205
| 110
| 0.657371
| 248
| 2,259
| 5.895161
| 0.516129
| 0.062244
| 0.05472
| 0.030096
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019197
| 0.239044
| 2,259
| 72
| 111
| 31.375
| 0.831297
| 0.107127
| 0
| 0
| 0
| 0
| 0.057453
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06
| false
| 0
| 0.26
| 0.02
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91773a1b99193243fe941616b2fc5339f203eb98
| 410
|
py
|
Python
|
algorithms/162.Find-Peak-Element/Python/solution_2.py
|
hopeness/leetcode
|
496455fa967f0704d729b4014f92f52b1d69d690
|
[
"MIT"
] | null | null | null |
algorithms/162.Find-Peak-Element/Python/solution_2.py
|
hopeness/leetcode
|
496455fa967f0704d729b4014f92f52b1d69d690
|
[
"MIT"
] | null | null | null |
algorithms/162.Find-Peak-Element/Python/solution_2.py
|
hopeness/leetcode
|
496455fa967f0704d729b4014f92f52b1d69d690
|
[
"MIT"
] | null | null | null |
"""
https://leetcode.com/problems/find-peak-element/submissions/
"""
from typing import List
class Solution:
def findPeakElement(self, nums: List[int]) -> int:
l, r = 0, len(nums)-1
while l < r:
lmid = (l + r) // 2
rmid = lmid + 1
if nums[lmid] < nums[rmid]:
l = lmid + 1
else:
r = rmid - 1
return l
| 22.777778
| 60
| 0.473171
| 51
| 410
| 3.803922
| 0.607843
| 0.030928
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024292
| 0.397561
| 410
| 17
| 61
| 24.117647
| 0.761134
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9177bf15b6da687a6ae646c46fc3addf65d8004a
| 2,684
|
py
|
Python
|
data_loader.py
|
vinbigdata-medical/MIDL2021-Xray-Classification
|
51359126d07573053059c36e3cd95a7fd7100e0e
|
[
"MIT"
] | 4
|
2021-04-14T08:04:08.000Z
|
2021-08-10T10:15:00.000Z
|
data_loader.py
|
vinbigdata-medical/MIDL2021-Xray-Classification
|
51359126d07573053059c36e3cd95a7fd7100e0e
|
[
"MIT"
] | 1
|
2022-01-13T12:51:31.000Z
|
2022-01-13T12:51:31.000Z
|
data_loader.py
|
vinbigdata-medical/MIDL2021-Xray-Classification
|
51359126d07573053059c36e3cd95a7fd7100e0e
|
[
"MIT"
] | null | null | null |
from torchvision.datasets import ImageFolder
from torchvision import transforms
import random
import os
import torch
from torch.utils.data.dataloader import DataLoader
from utils import constants, get_default_device
from image_folder_with_path import ImageFolderWithPaths
def to_device(data, device):
"""Move tensor(s) to chosen device"""
if isinstance(data, (list, tuple)):
return [to_device(x, device) for x in data]
return data.to(device, non_blocking=True)
class DeviceDataLoader():
""" wrap a Dataloader to move data to a device """
def __init__(self, dl, device):
self.dl = dl
self.device = device
def __iter__(self):
""" yield a batch of data after moving it to device """
for b in self.dl:
yield to_device(b, self.device)
def __len__(self):
""" return number of batch size """
return len(self.dl)
default_device = get_default_device.default_device
train_transforms = transforms.Compose([
transforms.RandomHorizontalFlip(p=0.5),
transforms.RandomRotation(degrees=random.uniform(5, 10)),
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
test_transforms = transforms.Compose([
transforms.Resize((512, 512)),
transforms.ToTensor(),
])
classes = os.listdir(constants.DATA_PATH + constants.TRAIN_PATH)
training_dataset = ImageFolder(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
valid_dataset = ImageFolder(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
# testing_dataset = ImageFolder(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
# training_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TRAIN_PATH, transform=train_transforms)
# valid_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.VAL_PATH, transform=test_transforms)
testing_dataset = ImageFolderWithPaths(constants.DATA_PATH + constants.TEST_PATH, transform=test_transforms)
torch.manual_seed(constants.RANDOM_SEED)
train_dl = DataLoader(training_dataset, constants.BATCH_SIZE, shuffle=True, num_workers=8, pin_memory=True)
val_dl = DataLoader(valid_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
test_dl = DataLoader(testing_dataset, constants.BATCH_SIZE, num_workers=8, pin_memory=True)
"""
Now we can wrap our training and validation data loaders using DeviceDataLoader for automatically transferring batches
of data to GPU (if available), and use to_device to move our model to GPU (if available)
"""
train_dl = DeviceDataLoader(train_dl, default_device)
val_dl = DeviceDataLoader(val_dl, default_device)
test_dl = DeviceDataLoader(test_dl, default_device)
| 37.277778
| 118
| 0.770492
| 351
| 2,684
| 5.663818
| 0.287749
| 0.045775
| 0.059859
| 0.091549
| 0.372233
| 0.372233
| 0.248491
| 0.248491
| 0.248491
| 0.191147
| 0
| 0.008651
| 0.138599
| 2,684
| 72
| 119
| 37.277778
| 0.851211
| 0.175857
| 0
| 0.139535
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.093023
| false
| 0
| 0.186047
| 0
| 0.372093
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9177c031d705388dfe8031bad5b727ad1032aa9e
| 4,254
|
py
|
Python
|
calliope/test/test_analysis.py
|
sjpfenninger/calliope
|
a4e49c3b7d37f908bafc84543510eec0b4cf5d9f
|
[
"Apache-2.0"
] | 1
|
2019-11-11T15:50:16.000Z
|
2019-11-11T15:50:16.000Z
|
calliope/test/test_analysis.py
|
mhdella/calliope
|
a4e49c3b7d37f908bafc84543510eec0b4cf5d9f
|
[
"Apache-2.0"
] | null | null | null |
calliope/test/test_analysis.py
|
mhdella/calliope
|
a4e49c3b7d37f908bafc84543510eec0b4cf5d9f
|
[
"Apache-2.0"
] | 1
|
2019-11-11T15:50:18.000Z
|
2019-11-11T15:50:18.000Z
|
# import matplotlib
# matplotlib.use('Qt5Agg') # Prevents `Invalid DISPLAY variable` errors
import pytest
import tempfile
from calliope import Model
from calliope.utils import AttrDict
from calliope import analysis
from . import common
from .common import assert_almost_equal, solver, solver_io
import matplotlib.pyplot as plt
plt.switch_backend('agg') # Prevents `Invalid DISPLAY variable` errors
class TestModel:
@pytest.fixture(scope='module')
def model(self):
locations = """
locations:
1:
techs: ['ccgt', 'demand_power']
override:
ccgt:
constraints:
e_cap.max: 100
demand_power:
constraints:
r: -50
metadata:
map_boundary: [-10, 35, 5, 45]
location_coordinates:
1: [40, -2]
links:
"""
config_run = """
mode: plan
model: ['{techs}', '{locations}']
subset_t: ['2005-01-01', '2005-01-02']
"""
with tempfile.NamedTemporaryFile(delete=False) as f:
f.write(locations.encode('utf-8'))
f.read()
override_dict = AttrDict({
'solver': solver,
'solver_io': solver_io,
})
model = common.simple_model(config_run=config_run,
config_locations=f.name,
override=override_dict)
model.run()
return model
@pytest.fixture(scope='module')
def builtin_model(self):
model = Model()
model.run()
return model
def test_plot_carrier_production(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_carrier_production(model.solution)
def test_plot_timeseries(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_timeseries(model.solution,
model.solution['e'].loc[dict(c='power')].sum(dim='x'),
carrier='power', demand='demand_power')
def test_plot_installed_capacities(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_installed_capacities(model.solution)
def test_plot_transmission(self, model):
# Just make sure this doesn't raise any exceptions
analysis.plot_transmission(model.solution, map_resolution='c')
def test_get_delivered_cost(self, model):
# TODO this should be tested with a more complex model
assert_almost_equal(analysis.get_delivered_cost(model.solution), 0.1)
def test_get_levelized_cost(self, model):
lcoe = analysis.get_levelized_cost(model.solution)
assert_almost_equal(lcoe.at['ccgt'], 0.1)
def test_get_group_share(self, model):
# TODO this should be tested with a more complex model
share = analysis.get_group_share(model.solution, techs=['ccgt'])
assert share == 1.0
def test_get_unmet_demand_hours(self, builtin_model):
# TODO this should be tested with a more complex model
unmet = analysis.get_unmet_demand_hours(builtin_model.solution)
assert unmet == 1
def test_recompute_levelized_costs(self, model):
# Cost in solution
sol = model.solution
assert_almost_equal(sol['summary'].to_pandas().loc['ccgt', 'levelized_cost_monetary'], 0.1)
# Recomputed cost must be the same
dm = analysis.SolutionModel(model.solution)
recomputed = dm.recompute_levelized_costs('ccgt')
assert_almost_equal(recomputed['total'], 0.1)
def test_recompute_levelized_costs_after_changes(self, model):
# Make changes
dm = analysis.SolutionModel(model.solution)
dm.config_model.techs.ccgt.costs.monetary.e_cap = 50
dm.config_model.techs.ccgt.costs.monetary.om_fuel = 1.0
# Recomputed cost
recomputed = dm.recompute_levelized_costs('ccgt')
assert_almost_equal(recomputed['total'], 1.0, tolerance=0.001)
| 36.672414
| 99
| 0.603197
| 485
| 4,254
| 5.103093
| 0.305155
| 0.06303
| 0.041212
| 0.027475
| 0.40404
| 0.270707
| 0.245657
| 0.217374
| 0.217374
| 0.217374
| 0
| 0.018945
| 0.305125
| 4,254
| 115
| 100
| 36.991304
| 0.818336
| 0.132581
| 0
| 0.166667
| 0
| 0
| 0.211214
| 0.011976
| 0
| 0
| 0
| 0.008696
| 0.095238
| 1
| 0.142857
| false
| 0
| 0.095238
| 0
| 0.27381
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
917a93c6b5689f031c6779f12176c0d60e186575
| 13,198
|
py
|
Python
|
cinder/tests/unit/targets/test_spdknvmf.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 3
|
2015-04-02T21:44:36.000Z
|
2016-04-29T21:19:04.000Z
|
cinder/tests/unit/targets/test_spdknvmf.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 3
|
2016-04-29T21:45:26.000Z
|
2016-05-04T19:41:23.000Z
|
cinder/tests/unit/targets/test_spdknvmf.py
|
lightsey/cinder
|
e03d68e42e57a63f8d0f3e177fb4287290612b24
|
[
"Apache-2.0"
] | 4
|
2016-01-27T00:25:52.000Z
|
2021-03-25T19:54:08.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import json
from unittest import mock
from cinder import test
from cinder.volume import configuration as conf
from cinder.volume.targets import spdknvmf as spdknvmf_driver
BDEVS = [{
"num_blocks": 4096000,
"name": "Nvme0n1",
"driver_specific": {
"nvme": {
"trid": {
"trtype": "PCIe",
"traddr": "0000:00:04.0"
},
"ns_data": {
"id": 1
},
"pci_address": "0000:00:04.0",
"vs": {
"nvme_version": "1.1"
},
"ctrlr_data": {
"firmware_revision": "1.0",
"serial_number": "deadbeef",
"oacs": {
"ns_manage": 0,
"security": 0,
"firmware": 0,
"format": 0
},
"vendor_id": "0x8086",
"model_number": "QEMU NVMe Ctrl"
},
"csts": {
"rdy": 1,
"cfs": 0
}
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": True,
"unmap": False,
"read": True,
"write_zeroes": False,
"write": True,
"flush": True,
"nvme_io": True
},
"claimed": False,
"block_size": 512,
"product_name": "NVMe disk",
"aliases": ["Nvme0n1"]
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p0"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"Nvme0n1p1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Nvme0n1",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Split Disk",
"name": "Nvme0n1p1"
}, {
"num_blocks": 8192,
"uuid": "70efd305-4e66-49bd-99ff-faeda5c3052d",
"aliases": [
"lvs_test/lvol0"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": False
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967297"
}, {
"num_blocks": 8192,
"uuid": "8dec1964-d533-41df-bea7-40520efdb416",
"aliases": [
"lvs_test/lvol1"
],
"driver_specific": {
"lvol": {
"base_bdev": "Malloc0",
"lvol_store_uuid": "58b17014-d4a1-4f85-9761-093643ed18f1",
"thin_provision": True
}
},
"supported_io_types": {
"reset": True,
"nvme_admin": False,
"unmap": True,
"read": True,
"write_zeroes": True,
"write": True,
"flush": False,
"nvme_io": False
},
"claimed": False,
"block_size": 4096,
"product_name": "Logical Volume",
"name": "58b17014-d4a1-4f85-9761-093643ed18f1_4294967298"
}]
NVMF_SUBSYSTEMS = [{
"listen_addresses": [],
"subtype": "Discovery",
"nqn": "nqn.2014-08.org.nvmexpress.discovery",
"hosts": [],
"allow_any_host": True
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [{
"nqn": "nqn.2016-06.io.spdk:init"
}],
"namespaces": [{
"bdev_name": "Nvme0n1p0",
"nsid": 1,
"name": "Nvme0n1p0"
}],
"allow_any_host": False,
"serial_number": "SPDK00000000000001",
"nqn": "nqn.2016-06.io.spdk:cnode1"
}, {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": "SPDK00000000000002",
"nqn": "nqn.2016-06.io.spdk:cnode2"
}]
class JSONRPCException(Exception):
def __init__(self, message):
self.message = message
class JSONRPCClient(object):
def __init__(self, addr=None, port=None):
self.methods = {"bdev_get_bdevs": self.get_bdevs,
"construct_nvmf_subsystem":
self.construct_nvmf_subsystem,
"nvmf_delete_subsystem": self.delete_nvmf_subsystem,
"nvmf_create_subsystem": self.nvmf_subsystem_create,
"nvmf_subsystem_add_listener":
self.nvmf_subsystem_add_listener,
"nvmf_subsystem_add_ns":
self.nvmf_subsystem_add_ns,
"nvmf_get_subsystems": self.get_nvmf_subsystems}
self.bdevs = copy.deepcopy(BDEVS)
self.nvmf_subsystems = copy.deepcopy(NVMF_SUBSYSTEMS)
def __del__(self):
pass
def get_bdevs(self, params=None):
if params and 'name' in params:
for bdev in self.bdevs:
for alias in bdev['aliases']:
if params['name'] in alias:
return json.dumps({"result": [bdev]})
if bdev['name'] == params['name']:
return json.dumps({"result": [bdev]})
return json.dumps({"error": "Not found"})
return json.dumps({"result": self.bdevs})
def get_nvmf_subsystems(self, params=None):
return json.dumps({"result": self.nvmf_subsystems})
def construct_nvmf_subsystem(self, params=None):
nvmf_subsystem = {
"listen_addresses": [],
"subtype": "NVMe",
"hosts": [],
"namespaces": [{
"bdev_name": "Nvme1n1p0",
"nsid": 1,
"name": "Nvme1n1p0"
}],
"allow_any_host": True,
"serial_number": params['serial_number'],
"nqn": params['nqn']
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def delete_nvmf_subsystem(self, params=None):
found_id = -1
i = 0
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
found_id = i
i += 1
if found_id != -1:
del self.nvmf_subsystems[found_id]
return json.dumps({"result": {}})
def nvmf_subsystem_create(self, params=None):
nvmf_subsystem = {
"namespaces": [],
"nqn": params['nqn'],
"serial_number": "S0000000000000000001",
"allow_any_host": False,
"subtype": "NVMe",
"hosts": [],
"listen_addresses": []
}
self.nvmf_subsystems.append(nvmf_subsystem)
return json.dumps({"result": nvmf_subsystem})
def nvmf_subsystem_add_listener(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['listen_addresses'].append(
params['listen_address']
)
return json.dumps({"result": ""})
def nvmf_subsystem_add_ns(self, params=None):
for nvmf_subsystem in self.nvmf_subsystems:
if nvmf_subsystem['nqn'] == params['nqn']:
nvmf_subsystem['namespaces'].append(
params['namespace']
)
return json.dumps({"result": ""})
def call(self, method, params=None):
req = {}
req['jsonrpc'] = '2.0'
req['method'] = method
req['id'] = 1
if (params):
req['params'] = params
response = json.loads(self.methods[method](params))
if not response:
return {}
if 'error' in response:
msg = "\n".join(["Got JSON-RPC error response",
"request:",
json.dumps(req, indent=2),
"response:",
json.dumps(response['error'], indent=2)])
raise JSONRPCException(msg)
return response['result']
class Target(object):
def __init__(self, name="Nvme0n1p0"):
self.name = name
class SpdkNvmfDriverTestCase(test.TestCase):
def setUp(self):
super(SpdkNvmfDriverTestCase, self).setUp()
self.configuration = mock.Mock(conf.Configuration)
self.configuration.target_ip_address = '192.168.0.1'
self.configuration.target_port = '4420'
self.configuration.target_prefix = ""
self.configuration.nvmet_port_id = "1"
self.configuration.nvmet_ns_id = "fake_id"
self.configuration.nvmet_subsystem_name = "nqn.2014-08.io.spdk"
self.configuration.target_protocol = "nvmet_rdma"
self.configuration.spdk_rpc_ip = "127.0.0.1"
self.configuration.spdk_rpc_port = 8000
self.driver = spdknvmf_driver.SpdkNvmf(configuration=
self.configuration)
self.jsonrpcclient = JSONRPCClient()
def test__get_spdk_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
bdevs = self.driver._rpc_call("bdev_get_bdevs")
bdev_name = bdevs[0]['name']
volume_name = self.driver._get_spdk_volume_name(bdev_name)
self.assertEqual(bdev_name, volume_name)
volume_name = self.driver._get_spdk_volume_name("fake")
self.assertIsNone(volume_name)
def test__get_nqn_with_volume_name(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
nqn = self.driver._get_nqn_with_volume_name("Nvme0n1p0")
nqn_tmp = self.driver._rpc_call("nvmf_get_subsystems")[1]['nqn']
self.assertEqual(nqn, nqn_tmp)
nqn = self.driver._get_nqn_with_volume_name("fake")
self.assertIsNone(nqn)
def test__get_first_free_node(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
free_node = self.driver._get_first_free_node()
self.assertEqual(3, free_node)
def test_create_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("nvmf_get_subsystems")
self.driver.create_nvmeof_target("Nvme0n1p1",
"nqn.2016-06.io.spdk",
"192.168.0.1",
4420, "rdma", -1, -1, "")
subsystems_last = self.driver._rpc_call("nvmf_get_subsystems")
self.assertEqual(len(subsystems_first) + 1, len(subsystems_last))
def test_delete_nvmeof_target(self):
with mock.patch.object(self.driver, "_rpc_call",
self.jsonrpcclient.call):
subsystems_first = self.driver._rpc_call("nvmf_get_subsystems")
target = Target()
self.driver.delete_nvmeof_target(target)
subsystems_last = self.driver._rpc_call("nvmf_get_subsystems")
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))
target.name = "fake"
self.driver.delete_nvmeof_target(target)
self.assertEqual(len(subsystems_first) - 1, len(subsystems_last))
| 32.268949
| 78
| 0.539855
| 1,342
| 13,198
| 5.076006
| 0.195976
| 0.051527
| 0.020992
| 0.027452
| 0.4931
| 0.430123
| 0.412214
| 0.391368
| 0.381092
| 0.361568
| 0
| 0.057786
| 0.329974
| 13,198
| 408
| 79
| 32.348039
| 0.712541
| 0.04137
| 0
| 0.451253
| 0
| 0
| 0.233758
| 0.048113
| 0
| 0
| 0.000475
| 0
| 0.022284
| 1
| 0.050139
| false
| 0.002786
| 0.016713
| 0.002786
| 0.111421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
917afbcd55aefac0dcfd4785b8010a4e43b0d1c3
| 4,204
|
py
|
Python
|
server/algos/euler/transformer.py
|
yizhang7210/Acre
|
c98cf8a4fdfb223a1958e8e61df759f889a1b13f
|
[
"MIT"
] | 2
|
2017-11-27T21:55:21.000Z
|
2017-12-30T03:34:40.000Z
|
server/algos/euler/transformer.py
|
yizhang7210/Acre
|
c98cf8a4fdfb223a1958e8e61df759f889a1b13f
|
[
"MIT"
] | 30
|
2017-09-06T12:00:08.000Z
|
2018-06-20T22:47:46.000Z
|
server/algos/euler/transformer.py
|
yizhang7210/Acre
|
c98cf8a4fdfb223a1958e8e61df759f889a1b13f
|
[
"MIT"
] | 1
|
2021-04-05T13:59:37.000Z
|
2021-04-05T13:59:37.000Z
|
""" This is algos.euler.transformer module.
This module is responsible for transforming raw candle data into training
samples usable to the Euler algorithm.
"""
import datetime
import decimal
from algos.euler.models import training_samples as ts
from core.models import instruments
from datasource.models import candles
TWO_PLACES = decimal.Decimal('0.01')
def extract_features(day_candle):
""" Extract the features for the learning algorithm from a daily candle.
The Features are:
high_bid, low_bid, close_bid, open_ask, high_ask, low_ask,
and close_ask (all relative to open_bid) in pips.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
features: List of Decimals. The features described above, all in two
decimal places.
"""
multiplier = day_candle.instrument.multiplier
features = [
day_candle.high_bid,
day_candle.low_bid,
day_candle.close_bid,
day_candle.open_ask,
day_candle.high_ask,
day_candle.low_ask,
day_candle.close_ask,
]
features = [multiplier * (x - day_candle.open_bid) for x in features]
features = [decimal.Decimal(x).quantize(TWO_PLACES) for x in features]
return features
def get_profitable_change(day_candle):
""" Get the potential daily profitable price change in pips.
If prices rise enough, we have: close_bid - open_ask (> 0), buy.
If prices fall enough, we have: close_ask - open_bid (< 0), sell.
if prices stay relatively still, we don't buy or sell. It's 0.
Args:
day_candle: candles.Candle object representing a daily candle.
Returns:
profitable_change: Decimal. The profitable rate change described
above, in two decimal places.
"""
multiplier = day_candle.instrument.multiplier
change = 0
if day_candle.close_bid > day_candle.open_ask:
change = multiplier * (day_candle.close_bid - day_candle.open_ask)
elif day_candle.close_ask < day_candle.open_bid:
change = multiplier * (day_candle.close_ask - day_candle.open_bid)
return decimal.Decimal(change).quantize(TWO_PLACES)
def build_sample_row(candle_previous, candle_next):
""" Build one training sample from two consecutive days of candles.
Args:
candle_previous: candles.Candle object. Candle of first day.
candle_next: candles.Candle object. Candle of second day.
Returns:
sample: TrainingSample object. One training sample for learning.
"""
return ts.create_one(
instrument=candle_next.instrument,
date=candle_next.start_time.date() + datetime.timedelta(1),
features=extract_features(candle_previous),
target=get_profitable_change(candle_next))
def get_start_time(instrument):
""" Get the start time for retrieving candles of the given instrument.
This is determined by the last training sample in the database.
Args:
instrument: Instrument object. The given instrument.
Returns:
start_time: Datetime object. The datetime from which to query
candles from to fill the rest of the training samples.
"""
last_sample = ts.get_last(instrument)
if last_sample is not None:
start_date = last_sample.date - datetime.timedelta(1)
return datetime.datetime.combine(start_date, datetime.time())
return datetime.datetime(2005, 1, 1)
def run():
""" Update the training samples in the database from the latest candles.
This should be run daily to ensure the training set is up-to-date.
Args:
None.
"""
all_new_samples = []
for instrument in instruments.get_all():
start_time = get_start_time(instrument)
new_candles = candles.get_candles(
instrument=instrument, start=start_time, order_by='start_time')
for i in range(len(new_candles) - 1):
all_new_samples.append(
build_sample_row(new_candles[i], new_candles[i + 1]))
ts.insert_many(all_new_samples)
| 35.033333
| 80
| 0.674833
| 553
| 4,204
| 4.943942
| 0.251356
| 0.075713
| 0.030724
| 0.018654
| 0.17959
| 0.148135
| 0.148135
| 0.148135
| 0.087783
| 0.046086
| 0
| 0.005411
| 0.252617
| 4,204
| 119
| 81
| 35.327731
| 0.864736
| 0.41746
| 0
| 0.04
| 0
| 0
| 0.006548
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
917d24af3dd098f693a886046f82e8514c7bd83a
| 2,628
|
py
|
Python
|
CEST/Evaluation/lorenzian.py
|
ludgerradke/bMRI
|
dcf93749bb2fba3700e6bcfde691355d55090951
|
[
"MIT"
] | null | null | null |
CEST/Evaluation/lorenzian.py
|
ludgerradke/bMRI
|
dcf93749bb2fba3700e6bcfde691355d55090951
|
[
"MIT"
] | null | null | null |
CEST/Evaluation/lorenzian.py
|
ludgerradke/bMRI
|
dcf93749bb2fba3700e6bcfde691355d55090951
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
from scipy.optimize import curve_fit
def calc_lorentzian(CestCurveS, x_calcentires, mask, config):
(rows, colums, z_slices, entires) = CestCurveS.shape
lorenzian = {key: np.zeros((rows, colums, z_slices), dtype=float) for key in config.lorenzian_keys}
for k in range(z_slices):
for i in range(rows):
for j in range(colums):
if mask[i, j, k] != 0:
params = calc_lorenzian_pixel(CestCurveS[i, j, k, :], x_calcentires, config.Lorenzian['MT_f'],
config.Lorenzian['NOE1_f'], config.Lorenzian['NOE2_f'],
config.Lorenzian['OH_f'], config.Lorenzian['NH_f'])
if params is None:
continue
dic = {
'OH_a': params[3],
'OH_w': params[4],
'NH_a': params[5],
'NH_w': params[6],
'NOE1_a': params[7],
'NOE1_w': params[8],
'NOE2_a': params[9],
'NOE2_w': params[10],
'MT_a': params[11],
'MT_w': params[12],
}
for key in config.lorenzian_keys:
lorenzian[key][i, j, k] = dic[key]
return lorenzian
def calc_lorenzian_pixel(values, x_calcentires, MT_f, NOE1_f, NOE2_f, OH_f, NH_f):
# wassr_offset, da die Z-Spektren vorher korrigiert wurden
fit = lorenz_like_matlab(wassr_offset=0, MT_f=MT_f, NOE1_f=NOE1_f, NOE2_f=NOE2_f, OH_f=OH_f, NH_f=NH_f)
try:
param, param_cov = curve_fit(fit, x_calcentires, values, bounds=([0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[10, 10, 10, 10, 10, 10, 10, 10, 10, 10, 10,
10,
10]))
except RuntimeError:
param = None
return param
def lorenz_like_matlab(wassr_offset, MT_f: float = - 2.43, NOE1_f: float = - 1, NOE2_f: float = - 2.6,
OH_f: float = + 1.4, NH_f: float = + 3.2):
# X_f = frequenz of X
#ret = (a + ak) - (a * ((b ** 2) / 4) / (((b ** 2) / 4) + (x - wassr_offset) ** 2))
pass
def one_lorenz(x, amplitude, width, wassr_offset, frequenz):
return amplitude * ((width ** 2) / 4) / (((width ** 2) / 4) + (x - (wassr_offset + frequenz)) ** 2)
| 45.310345
| 117
| 0.459665
| 324
| 2,628
| 3.530864
| 0.287037
| 0.020979
| 0.028846
| 0.034965
| 0.189685
| 0.081294
| 0.034091
| 0.034091
| 0.034091
| 0.034091
| 0
| 0.057516
| 0.417808
| 2,628
| 57
| 118
| 46.105263
| 0.690196
| 0.060122
| 0
| 0
| 0
| 0
| 0.029197
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0.022222
| 0.066667
| 0.022222
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
917ddc860e3cb5987c6d77cf2eda4923d9234d7a
| 7,572
|
py
|
Python
|
components/network_models_LSTU.py
|
neuralchen/CooGAN
|
3155cbb5a283226474356d3a9f01918609ddd4ec
|
[
"MIT"
] | 12
|
2020-12-09T07:04:12.000Z
|
2022-03-01T03:30:46.000Z
|
components/network_models_LSTU.py
|
neuralchen/CooGAN
|
3155cbb5a283226474356d3a9f01918609ddd4ec
|
[
"MIT"
] | null | null | null |
components/network_models_LSTU.py
|
neuralchen/CooGAN
|
3155cbb5a283226474356d3a9f01918609ddd4ec
|
[
"MIT"
] | 4
|
2020-12-23T03:57:53.000Z
|
2022-03-28T13:56:14.000Z
|
#!/usr/bin/env python3
# -*- coding:utf-8 -*-
#############################################################
# File: network_models_LSTU.py
# Created Date: Tuesday February 25th 2020
# Author: Chen Xuanhong
# Email: chenxuanhongzju@outlook.com
# Last Modified: Tuesday, 25th February 2020 9:57:06 pm
# Modified By: Chen Xuanhong
# Copyright (c) 2020 Shanghai Jiao Tong University
#############################################################
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from functools import partial
import tensorflow as tf
import tensorflow.contrib.slim as slim
import tflib as tl
conv = partial(slim.conv2d, activation_fn=None)
dconv = partial(slim.conv2d_transpose, activation_fn=None)
fc = partial(tl.flatten_fully_connected, activation_fn=None)
relu = tf.nn.relu
lrelu = tf.nn.leaky_relu
sigmoid = tf.nn.sigmoid
tanh = tf.nn.tanh
batch_norm = partial(slim.batch_norm, scale=True, updates_collections=None)
instance_norm = slim.instance_norm
MAX_DIM = 64 * 16
def Genc(x, dim=64, n_layers=5, multi_inputs=1, is_training=True):
bn = partial(batch_norm, is_training=is_training)
conv_bn_lrelu = partial(conv, normalizer_fn=bn, activation_fn=lrelu)
with tf.variable_scope('Genc', reuse=tf.AUTO_REUSE):
h, w = x.shape[1:3]
z = x
zs = []
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
if multi_inputs > i and i > 0:
z = tf.concat([z, tf.image.resize_bicubic(x, (h//(2**i), w//(2**i)))], 3)
z = conv_bn_lrelu(z, d, 4, 2)
zs.append(z)
return zs
def LSTU(in_data, state, out_channel, is_training=True, kernel_size=3, norm='none', pass_state='lstate'):
if norm == 'bn':
norm_fn = partial(batch_norm, is_training=is_training)
elif norm == 'in':
norm_fn = instance_norm
else:
norm_fn = None
gate = partial(conv, normalizer_fn=norm_fn, activation_fn=sigmoid)
info = partial(conv, normalizer_fn=norm_fn, activation_fn=tanh)
with tf.name_scope('ConvGRUCell'):
state_ = dconv(state, out_channel, 4, 2) # upsample and make `channel` identical to `out_channel`
reset_gate = gate(tf.concat([in_data, state_], axis=3), 1, kernel_size)
update_gate = gate(tf.concat([in_data, state_], axis=3), 1, kernel_size)
new_state = reset_gate * state_
new_info = info(tf.concat([in_data, new_state], axis=3), out_channel, kernel_size)
output = (1-update_gate)*state_ + update_gate*new_info
if pass_state == 'gru':
return output, output
elif pass_state == 'direct':
return output, state_
else: # 'stu'
return output, new_state
# state_hat = dconv(old_state, outdim, 4, 2)
# tmp_concat= _concat(x, state_hat, None)
# channelpool1=tf.concat([tf.reduce_max(tmp_concat,3,True), tf.reduce_mean(tmp_concat,3,True)], axis=3)
# r_channel=conv(channelpool1,1,7,1,normalizer_fn=None,activation_fn=sigmoid)
# new_state = r_channel * state_hat
# tmp_concat= _concat(x, new_state, None)
# hidden_info = conv(tmp_concat,outdim,3,1,normalizer_fn=None,activation_fn=tanh)
# tmp_concat= _concat(x, state_hat, None)
# channelpool2=tf.concat([tf.reduce_max(tmp_concat,3,True), tf.reduce_mean(tmp_concat,3,True)], axis=3)
# z=conv(channelpool2,1,7,1,normalizer_fn=None,activation_fn=sigmoid)
# output =z *hidden_info +(1-z)*state_hat
# return output,new_state
def Gstu(zs, _a, dim=64, n_layers=1, inject_layers=0, is_training=True, kernel_size=3, norm='none', pass_state='stu'):
def _concat(z, z_, _a):
feats = [z]
if z_ is not None:
feats.append(z_)
if _a is not None:
_a = tf.reshape(_a, [-1, 1, 1, tl.shape(_a)[-1]])
_a = tf.tile(_a, [1, tl.shape(z)[1], tl.shape(z)[2], 1])
feats.append(_a)
return tf.concat(feats, axis=3)
with tf.variable_scope('Gstu', reuse=tf.AUTO_REUSE):
zs_ = [zs[-1]]
state = _concat(zs[-1], None, _a)
for i in range(n_layers): # n_layers <= 4
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
output = LSTU(zs[n_layers - 1 - i],state,d,is_training=is_training,
kernel_size=kernel_size, norm=norm, pass_state=pass_state)
zs_.insert(0, output[0])
if inject_layers > i:
state = _concat(output[1], None, _a)
else:
state = output[1]
return zs_
def Gdec(zs, _a, dim=64, n_layers=5, shortcut_layers=1, inject_layers=0, is_training=True, one_more_conv=0):
bn = partial(batch_norm, is_training=is_training)
dconv_bn_relu = partial(dconv, normalizer_fn=bn, activation_fn=relu)
shortcut_layers = min(shortcut_layers, n_layers - 1)
inject_layers = min(inject_layers, n_layers - 1)
def _concat(z, z_, _a):
feats = [z]
if z_ is not None:
feats.append(z_)
if _a is not None:
_a = tf.reshape(_a, [-1, 1, 1, tl.shape(_a)[-1]])
_a = tf.tile(_a, [1, tl.shape(z)[1], tl.shape(z)[2], 1])
feats.append(_a)
return tf.concat(feats, axis=3)
with tf.variable_scope('Gdec', reuse=tf.AUTO_REUSE):
z = _concat(zs[-1], None, _a)
for i in range(n_layers):
if i < n_layers - 1:
d = min(dim * 2**(n_layers - 1 - i), MAX_DIM)
z = dconv_bn_relu(z, d, 4, 2)
if shortcut_layers > i:
z = _concat(z, zs[n_layers - 2 - i], None)
if inject_layers > i:
z = _concat(z, None, _a)
else:
if one_more_conv: # add one more conv after the decoder
z = dconv_bn_relu(z, dim//4, 4, 2)
x = tf.nn.tanh(dconv(z, 3, one_more_conv))
else:
x = z = tf.nn.tanh(dconv(z, 3, 4, 2))
return x
def D(x, n_att, dim=64, fc_dim=MAX_DIM, n_layers=5):
conv_in_lrelu = partial(conv, normalizer_fn=instance_norm, activation_fn=lrelu)
with tf.variable_scope('D', reuse=tf.AUTO_REUSE):
y = x
for i in range(n_layers):
d = min(dim * 2**i, MAX_DIM)
y = conv_in_lrelu(y, d, 4, 2)
logit_gan = lrelu(fc(y, fc_dim))
logit_gan = fc(logit_gan, 1)
logit_att = lrelu(fc(y, fc_dim))
logit_att = fc(logit_att, n_att)
return logit_gan, logit_att
def gradient_penalty(f, real, fake=None):
def _interpolate(a, b=None):
with tf.name_scope('interpolate'):
if b is None: # interpolation in DRAGAN
beta = tf.random_uniform(shape=tf.shape(a), minval=0., maxval=1.)
_, variance = tf.nn.moments(a, range(a.shape.ndims))
b = a + 0.5 * tf.sqrt(variance) * beta
shape = [tf.shape(a)[0]] + [1] * (a.shape.ndims - 1)
alpha = tf.random_uniform(shape=shape, minval=0., maxval=1.)
inter = a + alpha * (b - a)
inter.set_shape(a.get_shape().as_list())
return inter
with tf.name_scope('gradient_penalty'):
x = _interpolate(real, fake)
pred = f(x)
if isinstance(pred, tuple):
pred = pred[0]
grad = tf.gradients(pred, x)[0]
norm = tf.norm(slim.flatten(grad), axis=1)
gp = tf.reduce_mean((norm - 1.)**2)
return gp
| 39.233161
| 118
| 0.58822
| 1,119
| 7,572
| 3.748883
| 0.186774
| 0.026698
| 0.013349
| 0.01907
| 0.366865
| 0.325387
| 0.29702
| 0.257926
| 0.20739
| 0.189273
| 0
| 0.026734
| 0.268885
| 7,572
| 192
| 119
| 39.4375
| 0.731033
| 0.148838
| 0
| 0.25
| 0
| 0
| 0.012863
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064286
| false
| 0.035714
| 0.05
| 0
| 0.192857
| 0.007143
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9181932ab3632366f38b401fcbe5e47425259914
| 6,809
|
py
|
Python
|
test/functional/bchn-txbroadcastinterval.py
|
1Crazymoney/bitcoin-cash-node
|
8f82823b3c5d4bcb401b0e4e6b464c1228f936e1
|
[
"MIT"
] | 1
|
2021-11-24T03:54:05.000Z
|
2021-11-24T03:54:05.000Z
|
test/functional/bchn-txbroadcastinterval.py
|
1Crazymoney/bitcoin-cash-node
|
8f82823b3c5d4bcb401b0e4e6b464c1228f936e1
|
[
"MIT"
] | null | null | null |
test/functional/bchn-txbroadcastinterval.py
|
1Crazymoney/bitcoin-cash-node
|
8f82823b3c5d4bcb401b0e4e6b464c1228f936e1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) 2020 The Bitcoin Cash Node developers
# Author matricz
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""
Test that inv messages are sent according to
an exponential distribution with scale -txbroadcastinterval
The outbound interval should be half of the inbound
"""
import time
from test_framework.mininode import P2PInterface, mininode_lock
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import wait_until, connect_nodes, disconnect_nodes
from scipy import stats
class InvReceiver(P2PInterface):
def __init__(self):
super().__init__()
self.invTimes = []
self.invDelays = []
def on_inv(self, message):
timeArrived = time.time()
# If an inv contains more then one transaction, then the number of invs (==samplesize)
# will be non-deterministic. This would be an error.
assert(len(message.inv) == 1)
self.invTimes.append(timeArrived)
if len(self.invTimes) > 1:
timediff = self.invTimes[-1] - self.invTimes[-2]
self.invDelays.append(timediff)
class TxBroadcastIntervalTest(BitcoinTestFramework):
# This test will have a node create a number of transactions and relay them
# to the mininode InvReceivers (one inbound and one outbound)
# according to test parameters.
# A third disconnected node is used only to create signed transactions
# The nodes are configured with "-txbroadcastrate=1" and
# "-excessiveblocksize=2000000" so that they relay at most one tx per inv
# It's convenient, because we can now define the exact number of invs
# (== sample size -1) that we want to send
# This holds true only for interval values <= 500 ms
# The mininode InvReceiver just listens and registers the delays between invs
# and constructs a sample array from these delays
# This sample is tested against a reference exponential distribution
# density with the same parameters with scipy.stats.kstest
# (See https://en.wikipedia.org/wiki/Kolmogorov%E2%80%93Smirnov_test)
# The test is accepted if the delays sample resembles the reference
# distribution -- or, more specifically, if the probability that the
# observed distribution would have occurred as a sampling of the theoretical
# exponential distribution with a probability of at least alpha
# (pvalue > alpha, default 0.001)
# There is one mininode that connects directly to the node that generates transactions.
# This tests the *inbound* connection interval.
# The first node creates an outbound connection to the second node,
# which relays the transactions instantly (-txbroadcastinterval=1)
# to the second mininode, which tests the *outbound* connection interval (= 1/2 of the inbound).
# (but is less reliable for small values of the -txbroadcastinterval)
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def add_options(self, parser):
parser.add_argument("--interval", dest="interval", type=int, default=500,
help="Set the average send interval in ms")
parser.add_argument("--samplesize", dest="samplesize", type=int, default=100,
help="Set the samplesize (number of inv message delays) for testing")
parser.add_argument("--testoutbound", dest="testoutbound", action="store_true",
help="Set whether to test outbound (along inbound) connection interval")
parser.add_argument("--alpha", dest="alpha", type=float, default="0.001",
help="Set a confidence threshold for the kstest")
def set_test_params(self):
self.scale = self.options.interval / 1000
self.num_nodes = 3
args = [
["-txbroadcastinterval={}".format(self.options.interval),
"-txbroadcastrate=1", "-excessiveblocksize=2000000",
"-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)],
["-txbroadcastinterval=1",
"-txbroadcastrate=1", "-excessiveblocksize=2000000",
"-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)],
["-limitancestorcount={}".format(self.options.samplesize+1),
"-limitdescendantcount={}".format(self.options.samplesize+1)]
]
self.extra_args = args
def setup_network(self):
self.setup_nodes()
connect_nodes(self.nodes[0], self.nodes[1])
connect_nodes(self.nodes[1], self.nodes[2])
# Generate enough coins on the spending nodes
self.nodes[2].generate(20 + 100)
self.sync_all()
# Disconnect node 3 so that it doesn't broadcast the txs it creates
disconnect_nodes(self.nodes[1], self.nodes[2])
self.signedtxs = []
to = self.nodes[2].getnewaddress()
for i in range(self.options.samplesize):
txid = self.nodes[2].sendtoaddress(to, "0.00001", "comment", "comment_to", False, 2)
self.signedtxs.append(self.nodes[2].gettransaction(txid)['hex'])
def run_test(self):
inboundReceiver, outboundReceiver = InvReceiver(), InvReceiver()
self.nodes[0].add_p2p_connection(inboundReceiver)
self.nodes[1].add_p2p_connection(outboundReceiver)
for signextx in self.signedtxs:
self.nodes[0].sendrawtransaction(signextx, True)
wait_until(
lambda: len(inboundReceiver.invTimes) == self.options.samplesize,
lock=mininode_lock,
timeout=self.options.samplesize * self.options.interval / 1000 * 2)
wait_until(
lambda: len(outboundReceiver.invTimes) == self.options.samplesize,
lock=mininode_lock,
timeout=self.options.samplesize * self.options.interval / 1000)
inboundkstestresult = stats.kstest(inboundReceiver.invDelays, stats.expon(scale=self.scale).cdf)
outboundkstestresult = stats.kstest(outboundReceiver.invDelays, stats.expon(scale=self.scale / 2).cdf)
self.log.info("kstestresults for interval {}: inbound {}, outbound {}".format(
self.options.interval,
inboundkstestresult,
outboundkstestresult))
assert(inboundkstestresult.pvalue > self.options.alpha), inboundReceiver.invDelays
if self.options.testoutbound:
assert(outboundkstestresult.pvalue > self.options.alpha), outboundReceiver.invDelays
if __name__ == '__main__':
TxBroadcastIntervalTest().main()
| 46.958621
| 110
| 0.679101
| 800
| 6,809
| 5.70625
| 0.34125
| 0.045783
| 0.050602
| 0.035487
| 0.147207
| 0.147207
| 0.132749
| 0.121796
| 0.121796
| 0.121796
| 0
| 0.020995
| 0.223528
| 6,809
| 144
| 111
| 47.284722
| 0.842444
| 0.321339
| 0
| 0.121951
| 0
| 0
| 0.143231
| 0.051747
| 0
| 0
| 0
| 0
| 0.036585
| 1
| 0.085366
| false
| 0
| 0.060976
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9183b4d3330e5dc6c4da3188d85901cf1703c4d4
| 3,178
|
py
|
Python
|
plugins/voila/voila/__init__.py
|
srinivasreddych/aws-orbit-workbench
|
2d154addff58d26f5459a73c06148aaf5e9fad46
|
[
"Apache-2.0"
] | 94
|
2021-03-19T19:55:11.000Z
|
2022-03-31T19:50:01.000Z
|
plugins/voila/voila/__init__.py
|
srinivasreddych/aws-orbit-workbench
|
2d154addff58d26f5459a73c06148aaf5e9fad46
|
[
"Apache-2.0"
] | 410
|
2021-03-19T18:04:48.000Z
|
2022-03-22T13:56:53.000Z
|
plugins/voila/voila/__init__.py
|
srinivasreddych/aws-orbit-workbench
|
2d154addff58d26f5459a73c06148aaf5e9fad46
|
[
"Apache-2.0"
] | 24
|
2021-03-19T23:16:23.000Z
|
2022-03-04T01:05:18.000Z
|
# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License").
# You may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import os
from typing import TYPE_CHECKING, Any, Dict, Optional
import aws_orbit
from aws_orbit.plugins import hooks
from aws_orbit.remote_files import helm
if TYPE_CHECKING:
from aws_orbit.models.context import Context, TeamContext
_logger: logging.Logger = logging.getLogger("aws_orbit")
CHART_PATH = os.path.join(os.path.dirname(__file__))
@hooks.deploy
def deploy(
plugin_id: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
_logger.debug("Team Env name: %s | Team name: %s", context.name, team_context.name)
plugin_id = plugin_id.replace("_", "-")
_logger.debug("plugin_id: %s", plugin_id)
chart_path = helm.create_team_charts_copy(team_context=team_context, path=CHART_PATH, target_path=plugin_id)
vars: Dict[str, Optional[str]] = dict(
team=team_context.name,
region=context.region,
account_id=context.account_id,
env_name=context.name,
restart_policy=parameters["restartPolicy"] if "restartPolicy" in parameters else "Always",
path=parameters["path"] if "path" in parameters else "/home/jovyan/shared/voila",
options=parameters["options"] if "options" in parameters else "",
plugin_id=plugin_id,
toolkit_s3_bucket=context.toolkit.s3_bucket,
image_pull_policy="Always" if aws_orbit.__version__.endswith(".dev0") else "IfNotPresent",
image=parameters["image"] if "image" in parameters else team_context.final_image_address,
sts_ep="legacy" if context.networking.data.internet_accessible else "regional",
)
repo_location = team_context.team_helm_repository
if repo_location:
repo = team_context.name
helm.add_repo(repo=repo, repo_location=repo_location)
chart_name, chart_version, chart_package = helm.package_chart(repo=repo, chart_path=chart_path, values=vars)
helm.install_chart(
repo=repo,
namespace=team_context.name,
name=f"{team_context.name}-{plugin_id}",
chart_name=chart_name,
chart_version=chart_version,
)
@hooks.destroy
def destroy(
plugin_id: str,
context: "Context",
team_context: "TeamContext",
parameters: Dict[str, Any],
) -> None:
_logger.debug(
"Delete Plugin %s of Team Env name: %s | Team name: %s",
plugin_id,
context.name,
team_context.name,
)
helm.uninstall_chart(f"{team_context.name}-{plugin_id}", namespace=team_context.name)
| 37.833333
| 116
| 0.701385
| 425
| 3,178
| 5.04
| 0.36
| 0.071895
| 0.056022
| 0.029412
| 0.170868
| 0.118581
| 0.096172
| 0.076564
| 0.076564
| 0.076564
| 0
| 0.002746
| 0.197923
| 3,178
| 83
| 117
| 38.289157
| 0.837583
| 0.193518
| 0
| 0.163934
| 0
| 0
| 0.131083
| 0.034144
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0
| 0.114754
| 0
| 0.147541
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91848acd7c9a76b40212893d24a66f1267e0b221
| 4,316
|
py
|
Python
|
tools/generate_driver_list.py
|
aarunsai81/netapp
|
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
|
[
"Apache-2.0"
] | 11
|
2015-08-25T13:11:18.000Z
|
2020-10-15T11:29:20.000Z
|
tools/generate_driver_list.py
|
aarunsai81/netapp
|
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
|
[
"Apache-2.0"
] | 5
|
2018-01-25T11:31:56.000Z
|
2019-05-06T23:13:35.000Z
|
tools/generate_driver_list.py
|
aarunsai81/netapp
|
8f0f7bf9be7f4d9fb9c3846bfc639c90a05f86ba
|
[
"Apache-2.0"
] | 11
|
2015-02-20T18:48:24.000Z
|
2021-01-30T20:26:18.000Z
|
#! /usr/bin/env python
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""Generate list of cinder drivers"""
import argparse
import os
from cinder.interface import util
parser = argparse.ArgumentParser(prog="generate_driver_list")
parser.add_argument("--format", default='str', choices=['str', 'dict'],
help="Output format type")
# Keep backwards compatibilty with the gate-docs test
# The tests pass ['docs'] on the cmdln, but it's never been used.
parser.add_argument("output_list", default=None, nargs='?')
CI_WIKI_ROOT = "https://wiki.openstack.org/wiki/ThirdPartySystems/"
class Output(object):
def __init__(self, base_dir, output_list):
# At this point we don't care what was passed in, just a trigger
# to write this out to the doc tree for now
self.driver_file = None
if output_list:
self.driver_file = open(
'%s/doc/source/drivers.rst' % base_dir, 'w+')
self.driver_file.write('===================\n')
self.driver_file.write('Available Drivers\n')
self.driver_file.write('===================\n\n')
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
if self.driver_file:
self.driver_file.close()
def write(self, text):
if self.driver_file:
self.driver_file.write('%s\n' % text)
else:
print(text)
def format_description(desc, output):
desc = desc or '<None>'
lines = desc.rstrip('\n').split('\n')
for line in lines:
output.write(' %s' % line)
def print_drivers(drivers, config_name, output):
for driver in sorted(drivers, key=lambda x: x.class_fqn):
output.write(driver.class_name)
output.write('-' * len(driver.class_name))
if driver.version:
output.write('* Version: %s' % driver.version)
output.write('* %s=%s' % (config_name, driver.class_fqn))
if driver.ci_wiki_name:
output.write('* CI info: %s%s' % (CI_WIKI_ROOT,
driver.ci_wiki_name))
output.write('* Description:')
format_description(driver.desc, output)
output.write('')
output.write('')
def output_str(cinder_root, args):
with Output(cinder_root, args.output_list) as output:
output.write('Volume Drivers')
output.write('==============')
print_drivers(util.get_volume_drivers(), 'volume_driver', output)
output.write('Backup Drivers')
output.write('==============')
print_drivers(util.get_backup_drivers(), 'backup_driver', output)
output.write('FC Zone Manager Drivers')
output.write('=======================')
print_drivers(util.get_fczm_drivers(), 'zone_driver', output)
def collect_driver_info(driver):
"""Build the dictionary that describes this driver."""
info = {'name': driver.class_name,
'version': driver.version,
'fqn': driver.class_fqn,
'description': driver.desc,
'ci_wiki_name': driver.ci_wiki_name}
return info
def output_dict():
import pprint
driver_list = []
drivers = util.get_volume_drivers()
for driver in drivers:
driver_list.append(collect_driver_info(driver))
pprint.pprint(driver_list)
def main():
tools_dir = os.path.dirname(os.path.abspath(__file__))
cinder_root = os.path.dirname(tools_dir)
cur_dir = os.getcwd()
os.chdir(cinder_root)
args = parser.parse_args()
try:
if args.format == 'str':
output_str(cinder_root, args)
elif args.format == 'dict':
output_dict()
finally:
os.chdir(cur_dir)
if __name__ == '__main__':
main()
| 30.609929
| 78
| 0.621177
| 550
| 4,316
| 4.690909
| 0.345455
| 0.063953
| 0.048837
| 0.029457
| 0.143798
| 0.087209
| 0.066279
| 0
| 0
| 0
| 0
| 0.001225
| 0.243281
| 4,316
| 140
| 79
| 30.828571
| 0.788732
| 0.202271
| 0
| 0.069767
| 0
| 0
| 0.137295
| 0.026932
| 0
| 0
| 0
| 0
| 0
| 1
| 0.116279
| false
| 0
| 0.046512
| 0.011628
| 0.197674
| 0.081395
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9186f6c899c8a19e537fae60a274b21c711b183a
| 7,649
|
py
|
Python
|
demos/odyssey/dodyssey.py
|
steingabelgaard/reportlab
|
b9a537e8386fb4b4b80e9ec89e0cdf392dbd6f61
|
[
"BSD-3-Clause"
] | 55
|
2019-09-21T02:45:18.000Z
|
2021-12-10T13:38:51.000Z
|
demos/odyssey/dodyssey.py
|
cnauroth/reportlab
|
377d4ff58491dc6de48551e730c3d7f72db783e5
|
[
"BSD-3-Clause"
] | 4
|
2019-09-26T03:16:50.000Z
|
2021-12-10T13:40:49.000Z
|
demos/odyssey/dodyssey.py
|
cnauroth/reportlab
|
377d4ff58491dc6de48551e730c3d7f72db783e5
|
[
"BSD-3-Clause"
] | 26
|
2019-09-25T03:54:30.000Z
|
2022-03-21T14:03:12.000Z
|
#Copyright ReportLab Europe Ltd. 2000-2017
#see license.txt for license details
__version__='3.3.0'
__doc__=''
#REPORTLAB_TEST_SCRIPT
import sys, copy, os
from reportlab.platypus import *
_NEW_PARA=os.environ.get('NEW_PARA','0')[0] in ('y','Y','1')
_REDCAP=int(os.environ.get('REDCAP','0'))
_CALLBACK=os.environ.get('CALLBACK','0')[0] in ('y','Y','1')
if _NEW_PARA:
def Paragraph(s,style):
from rlextra.radxml.para import Paragraph as PPPP
return PPPP(s,style)
from reportlab.lib.units import inch
from reportlab.lib.styles import getSampleStyleSheet
from reportlab.lib.enums import TA_LEFT, TA_RIGHT, TA_CENTER, TA_JUSTIFY
import reportlab.rl_config
reportlab.rl_config.invariant = 1
styles = getSampleStyleSheet()
Title = "The Odyssey"
Author = "Homer"
def myTitlePage(canvas, doc):
canvas.saveState()
canvas.restoreState()
def myLaterPages(canvas, doc):
canvas.saveState()
canvas.setFont('Times-Roman',9)
canvas.drawString(inch, 0.75 * inch, "Page %d" % doc.page)
canvas.restoreState()
def go():
def myCanvasMaker(fn,**kw):
from reportlab.pdfgen.canvas import Canvas
canv = Canvas(fn,**kw)
# attach our callback to the canvas
canv.myOnDrawCB = myOnDrawCB
return canv
doc = BaseDocTemplate('dodyssey.pdf',showBoundary=0)
#normal frame as for SimpleFlowDocument
frameT = Frame(doc.leftMargin, doc.bottomMargin, doc.width, doc.height, id='normal')
#Two Columns
frame1 = Frame(doc.leftMargin, doc.bottomMargin, doc.width/2-6, doc.height, id='col1')
frame2 = Frame(doc.leftMargin+doc.width/2+6, doc.bottomMargin, doc.width/2-6,
doc.height, id='col2')
doc.addPageTemplates([PageTemplate(id='First',frames=frameT, onPage=myTitlePage),
PageTemplate(id='OneCol',frames=frameT, onPage=myLaterPages),
PageTemplate(id='TwoCol',frames=[frame1,frame2], onPage=myLaterPages),
])
doc.build(Elements,canvasmaker=myCanvasMaker)
Elements = []
ChapterStyle = copy.deepcopy(styles["Heading1"])
ChapterStyle.alignment = TA_CENTER
ChapterStyle.fontsize = 14
InitialStyle = copy.deepcopy(ChapterStyle)
InitialStyle.fontsize = 16
InitialStyle.leading = 20
PreStyle = styles["Code"]
def newPage():
Elements.append(PageBreak())
chNum = 0
def myOnDrawCB(canv,kind,label):
print('myOnDrawCB(%s)'%kind, 'Page number=', canv.getPageNumber(), 'label value=', label)
def chapter(txt, style=ChapterStyle):
global chNum
Elements.append(NextPageTemplate('OneCol'))
newPage()
chNum += 1
if _NEW_PARA or not _CALLBACK:
Elements.append(Paragraph(txt, style))
else:
Elements.append(Paragraph(('foo<onDraw name="myOnDrawCB" label="chap %d"/> '%chNum)+txt, style))
Elements.append(Spacer(0.2*inch, 0.3*inch))
if useTwoCol:
Elements.append(NextPageTemplate('TwoCol'))
def fTitle(txt,style=InitialStyle):
Elements.append(Paragraph(txt, style))
ParaStyle = copy.deepcopy(styles["Normal"])
ParaStyle.spaceBefore = 0.1*inch
if 'right' in sys.argv:
ParaStyle.alignment = TA_RIGHT
elif 'left' in sys.argv:
ParaStyle.alignment = TA_LEFT
elif 'justify' in sys.argv:
ParaStyle.alignment = TA_JUSTIFY
elif 'center' in sys.argv or 'centre' in sys.argv:
ParaStyle.alignment = TA_CENTER
else:
ParaStyle.alignment = TA_JUSTIFY
useTwoCol = 'notwocol' not in sys.argv
def spacer(inches):
Elements.append(Spacer(0.1*inch, inches*inch))
def p(txt, style=ParaStyle):
if _REDCAP:
fs, fe = '<font color="red" size="+2">', '</font>'
n = len(txt)
for i in range(n):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = (txt[:i]+(fs+txt[i]+fe))+txt[i+1:]
break
if _REDCAP>=2 and n>20:
j = i+len(fs)+len(fe)+1+int((n-1)/2)
while not ('a'<=txt[j]<='z' or 'A'<=txt[j]<='Z'): j += 1
txt = (txt[:j]+('<b><i><font size="+2" color="blue">'+txt[j]+'</font></i></b>'))+txt[j+1:]
if _REDCAP==3 and n>20:
n = len(txt)
fs = '<font color="green" size="+1">'
for i in range(n-1,-1,-1):
if 'a'<=txt[i]<='z' or 'A'<=txt[i]<='Z':
txt = txt[:i]+((fs+txt[i]+fe)+txt[i+1:])
break
Elements.append(Paragraph(txt, style))
firstPre = 1
def pre(txt, style=PreStyle):
global firstPre
if firstPre:
Elements.append(NextPageTemplate('OneCol'))
newPage()
firstPre = 0
spacer(0.1)
p = Preformatted(txt, style)
Elements.append(p)
def parseOdyssey(fn):
from time import time
E = []
t0=time()
text = open(fn,'r').read()
i0 = text.index('Book I')
endMarker = 'covenant of peace between the two contending parties.'
i1 = text.index(endMarker)+len(endMarker)
PREAMBLE=list(map(str.strip,text[0:i0].split('\n')))
L=list(map(str.strip,text[i0:i1].split('\n')))
POSTAMBLE=list(map(str.strip,text[i1:].split('\n')))
def ambleText(L):
while L and not L[0]: L.pop(0)
while L:
T=[]
while L and L[0]:
T.append(L.pop(0))
yield T
while L and not L[0]: L.pop(0)
def mainText(L):
while L:
B = L.pop(0)
while not L[0]: L.pop(0)
T=[]
while L and L[0]:
T.append(L.pop(0))
while not L[0]: L.pop(0)
P = []
while L and not (L[0].startswith('Book ') and len(L[0].split())==2):
E=[]
while L and L[0]:
E.append(L.pop(0))
P.append(E)
if L:
while not L[0]: L.pop(0)
yield B,T,P
t1 = time()
print("open(%s,'r').read() took %.4f seconds" %(fn,t1-t0))
E.append([spacer,2])
E.append([fTitle,'<font color="red">%s</font>' % Title, InitialStyle])
E.append([fTitle,'<font size="-4">by</font> <font color="green">%s</font>' % Author, InitialStyle])
for T in ambleText(PREAMBLE):
E.append([p,'\n'.join(T)])
for (B,T,P) in mainText(L):
E.append([chapter,B])
E.append([p,'<font size="+1" color="Blue"><b>%s</b></font>' % '\n'.join(T),ParaStyle])
for x in P:
E.append([p,' '.join(x)])
firstPre = 1
for T in ambleText(POSTAMBLE):
E.append([p,'\n'.join(T)])
t3 = time()
print("Parsing into memory took %.4f seconds" %(t3-t1))
del L
t4 = time()
print("Deleting list of lines took %.4f seconds" %(t4-t3))
for i in range(len(E)):
E[i][0](*E[i][1:])
t5 = time()
print("Moving into platypus took %.4f seconds" %(t5-t4))
del E
t6 = time()
print("Deleting list of actions took %.4f seconds" %(t6-t5))
go()
t7 = time()
print("saving to PDF took %.4f seconds" %(t7-t6))
print("Total run took %.4f seconds"%(t7-t0))
import hashlib
print('file digest: %s' % hashlib.md5(open('dodyssey.pdf','rb').read()).hexdigest())
def run():
for fn in ('odyssey.full.txt','odyssey.txt'):
if os.path.isfile(fn):
parseOdyssey(fn)
break
def doProf(profname,func,*args,**kwd):
import hotshot, hotshot.stats
prof = hotshot.Profile(profname)
prof.runcall(func)
prof.close()
stats = hotshot.stats.load(profname)
stats.strip_dirs()
stats.sort_stats('time', 'calls')
stats.print_stats(20)
if __name__=='__main__':
if '--prof' in sys.argv:
doProf('dodyssey.prof',run)
else:
run()
| 31.093496
| 104
| 0.587397
| 1,060
| 7,649
| 4.195283
| 0.248113
| 0.03463
| 0.010119
| 0.006746
| 0.204183
| 0.117158
| 0.07848
| 0.061839
| 0.061839
| 0.037104
| 0
| 0.024272
| 0.245915
| 7,649
| 245
| 105
| 31.220408
| 0.746706
| 0.023532
| 0
| 0.211823
| 0
| 0
| 0.126759
| 0.009782
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083744
| false
| 0
| 0.054187
| 0
| 0.147783
| 0.049261
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9187649de93ea28a41bff761a58a3a5d39922848
| 764
|
py
|
Python
|
tests/test_fred_fred_view.py
|
Traceabl3/GamestonkTerminal
|
922353cade542ce3f62701e10d816852805b9386
|
[
"MIT"
] | null | null | null |
tests/test_fred_fred_view.py
|
Traceabl3/GamestonkTerminal
|
922353cade542ce3f62701e10d816852805b9386
|
[
"MIT"
] | null | null | null |
tests/test_fred_fred_view.py
|
Traceabl3/GamestonkTerminal
|
922353cade542ce3f62701e10d816852805b9386
|
[
"MIT"
] | null | null | null |
""" econ/fred_view.py tests """
import unittest
from unittest import mock
from io import StringIO
import pandas as pd
# pylint: disable=unused-import
from gamestonk_terminal.econ.fred_view import get_fred_data # noqa: F401
fred_data_mock = """
,GDP
2019-01-01,21115.309
2019-04-01,21329.877
2019-07-01,21540.325
2019-10-01,21747.394
2020-01-01,21561.139
2020-04-01,19520.114
2020-07-01,21170.252
2020-10-01,21494.731
"""
class TestFredFredView(unittest.TestCase):
@mock.patch("gamestonk_terminal.econ.fred_view.Fred.get_series")
def test_get_fred_data(self, mock_get_series):
fred_data = pd.read_csv(StringIO(fred_data_mock), header=0, index_col=0)
mock_get_series.return_value = fred_data
get_fred_data(["--noplot"], "gdp")
| 24.645161
| 80
| 0.747382
| 128
| 764
| 4.257813
| 0.492188
| 0.102752
| 0.066055
| 0.091743
| 0.106422
| 0
| 0
| 0
| 0
| 0
| 0
| 0.199102
| 0.125654
| 764
| 30
| 81
| 25.466667
| 0.616766
| 0.086387
| 0
| 0
| 0
| 0
| 0.33913
| 0.071014
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.227273
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9187b814b570a612e2b93ab230ce46d039efd3f1
| 4,974
|
py
|
Python
|
lecarb/estimator/lw/lw_tree.py
|
anshumandutt/AreCELearnedYet
|
e2286c3621dea8e4961057b6197c1e14e75aea5a
|
[
"MIT"
] | 34
|
2020-12-14T01:21:29.000Z
|
2022-03-29T04:52:46.000Z
|
lecarb/estimator/lw/lw_tree.py
|
anshumandutt/AreCELearnedYet
|
e2286c3621dea8e4961057b6197c1e14e75aea5a
|
[
"MIT"
] | 5
|
2020-12-28T16:06:22.000Z
|
2022-01-19T18:28:53.000Z
|
lecarb/estimator/lw/lw_tree.py
|
anshumandutt/AreCELearnedYet
|
e2286c3621dea8e4961057b6197c1e14e75aea5a
|
[
"MIT"
] | 12
|
2021-02-08T17:50:13.000Z
|
2022-03-28T11:09:06.000Z
|
import time
import logging
from typing import Dict, Any, Tuple
import pickle
import numpy as np
import xgboost as xgb
from .common import load_lw_dataset, encode_query, decode_label
from ..postgres import Postgres
from ..estimator import Estimator
from ..utils import evaluate, run_test
from ...dataset.dataset import load_table
from ...workload.workload import Query
from ...constants import MODEL_ROOT, NUM_THREADS, PKL_PROTO
L = logging.getLogger(__name__)
class Args:
def __init__(self, **kwargs):
self.trees = 16
self.bins = 200
self.train_num = 10000
# overwrite parameters from user
self.__dict__.update(kwargs)
def train_lw_tree(seed, dataset, version, workload, params, sizelimit):
np.random.seed(seed)
# convert parameter dict of lw(nn)
L.info(f"params: {params}")
args = Args(**params)
valid_num = args.train_num // 10
table = load_table(dataset, version)
dataset = load_lw_dataset(table, workload, seed, args.bins)
train_X, train_y, _ = dataset['train']
valid_X, valid_y, valid_gt = dataset['valid']
# Train model
model_path = MODEL_ROOT / table.dataset
model_path.mkdir(parents=True, exist_ok=True)
model_file = model_path / f"{table.version}_{workload}-lwxgb_tr{args.trees}_bin{args.bins}_{args.train_num//1000}k-{seed}.pkl"
L.info(f"Start training...")
start_stmp = time.time()
model = xgb.XGBRegressor(objective='reg:squarederror', n_estimators=args.trees, random_state=seed, n_jobs=NUM_THREADS)
model.fit(train_X[:args.train_num], train_y[:args.train_num], eval_set=[(valid_X[:valid_num], valid_y[:valid_num])])
dur_min = (time.time() - start_stmp) / 60
L.info(f"Finish training, time since start: {dur_min:.4f} mins")
L.info(f"Run on valid set...")
preds = np.maximum(np.round(decode_label(model.predict(valid_X[:valid_num]))), 0.0)
gts = valid_gt[:valid_num]
L.info("Q-Error on validation set:")
_, metrics = evaluate(preds, gts)
state = {
'seed': seed,
'args': args,
'device': 'cpu',
'threads': NUM_THREADS,
'dataset': table.dataset,
'version': table.version,
'workload': workload,
'model': model,
'train_time': dur_min,
'valid_error': {workload: metrics}
# 'model_size': model_size,
}
with open(model_file, 'wb') as f:
pickle.dump(state, f, protocol=PKL_PROTO)
L.info(f'All finished! Time spent since training start: {(time.time()-start_stmp)/60:.2f} mins')
L.info(f"Model saved to {model_file}")
class LWTree(Estimator):
def __init__(self, model, model_name, pg_est, table):
super(LWTree, self).__init__(table=table, model=model_name)
self.model = model
self.pg_est = pg_est
def query(self, query):
if isinstance(query, Query):
query = encode_query(self.table, query, self.pg_est)
return self.query_vector(np.expand_dims(query, axis=0))
def query_vector(self, vec):
start_stmp = time.time()
pred = self.model.predict(vec).item()
dur_ms = (time.time() - start_stmp) * 1e3
return np.maximum(np.round(decode_label(pred)), 0.0), dur_ms
def load_lw_tree(dataset: str, model_name: str) -> Tuple[Estimator, Dict[str, Any]]:
model_file = MODEL_ROOT / dataset / f"{model_name}.pkl"
L.info(f"load model from {model_file} ...")
with open(model_file, 'rb') as f:
state = pickle.load(f)
# load model
args = state['args']
model = state['model']
table = load_table(dataset, state['version'])
pg_est = Postgres(table, args.bins, state['seed'])
estimator = LWTree(model, model_name, pg_est, table)
return estimator, state
def test_lw_tree(dataset: str, version: str, workload: str, params: Dict[str, Any], overwrite: bool) -> None:
"""
params:
model: model file name
use_cache: load processed vectors directly instead of build from queries
"""
# uniform thread number
model_file = MODEL_ROOT / dataset / f"{params['model']}.pkl"
L.info(f"Load model from {model_file} ...")
with open(model_file, 'rb') as f:
state = pickle.load(f)
# load corresonding version of table
table = load_table(dataset, state['version'])
# load model
args = state['args']
model = state['model']
pg_est = Postgres(table, args.bins, state['seed'])
estimator = LWTree(model, params['model'], pg_est, table)
L.info(f"Load and built lw(tree) estimator: {estimator}")
if params['use_cache']:
# test table might has different version with train
test_table = load_table(dataset, version)
lw_dataset = load_lw_dataset(test_table, workload, state['seed'], args.bins)
X, _, gt = lw_dataset['test']
run_test(dataset, version, workload, estimator, overwrite, lw_vec=(X, gt))
else:
run_test(dataset, version, workload, estimator, overwrite)
| 34.783217
| 130
| 0.65963
| 696
| 4,974
| 4.518678
| 0.242816
| 0.015898
| 0.01717
| 0.026709
| 0.231797
| 0.201908
| 0.133545
| 0.103657
| 0.081399
| 0.081399
| 0
| 0.007366
| 0.208484
| 4,974
| 142
| 131
| 35.028169
| 0.791466
| 0.06916
| 0
| 0.137255
| 0
| 0.019608
| 0.14208
| 0.032855
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068627
| false
| 0
| 0.127451
| 0
| 0.245098
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9187ef6ed78f1f18095fecd6ea3ce015376d4dfc
| 2,525
|
py
|
Python
|
fsim/utils.py
|
yamasampo/fsim
|
30100789b03981dd9ea11c5c2e17a3c53910f724
|
[
"MIT"
] | null | null | null |
fsim/utils.py
|
yamasampo/fsim
|
30100789b03981dd9ea11c5c2e17a3c53910f724
|
[
"MIT"
] | null | null | null |
fsim/utils.py
|
yamasampo/fsim
|
30100789b03981dd9ea11c5c2e17a3c53910f724
|
[
"MIT"
] | null | null | null |
import os
import configparser
from warnings import warn
def read_control_file(control_file):
# Initialize ConfigParser object
config = configparser.ConfigParser(
strict=True,
comment_prefixes=('/*', ';', '#'),
inline_comment_prefixes=('/*', ';', '#')
)
# Parse control file
paths = config.read(control_file)
# Check number of read control files.
if len(paths) == 0:
raise FileNotFoundError(
f'Specified control file, {control_file}, is not found.')
elif len(paths) > 1:
raise TypeError(f'Iterable {type(control_file)} is given as a control '\
'file. Only one control file is supported.')
# Check sections. Only 'REQUIRED' and 'OPTIONAL' sections will be used.
assert 'REQUIRED' in config.sections(), \
f'REQUIRED section is not found in {control_file}.'
expected_sections = ['REQUIRED', 'OPTIONAL']
not_expected_sections = [
s for s in config.sections() if s not in expected_sections]
if len(not_expected_sections) >= 1:
msg = f'Unexpected sections, {", ".join(not_expected_sections)}, '\
'were found. These are not used in '\
'the analysis. If you wish to include in the analysis, please '\
'specify in "REQUIRED" or "OPTIONAL" sections.'
warn(msg)
converters_d = {
'pop_size': int,
'ns': float,
'init_mut_num': int,
'generation_num': int,
'total_site_num': int,
'var_site_num': int,
'poly_site_num': int,
'fix_site_num': int,
'output_only_fixation': lambda s: True if s == 'True' else (False if s == 'False' else -9)
}
flattened = [
(opt, converters_d[opt](v))
if opt in converters_d.keys() else (opt, v)
for s in expected_sections
for opt, v in config[s].items()
]
return dict(flattened)
def write_info_to_file(file_handle, separator, *args, **kw_args):
""" Write arguments or keyword arguments to a file. Values will be
separated by a given separator.
"""
output_lines = []
if len(args) > 0:
output_lines.append(separator.join(args))
if len(kw_args) > 0:
for k, v in kw_args.items():
output_lines.append(f'{k}{separator}{v}')
print('\n'.join(output_lines), file=file_handle)
def write_settings(file_handle, **kw_args):
print('[Setting]', file=file_handle)
write_info_to_file(file_handle, separator=' = ', **kw_args)
| 32.371795
| 98
| 0.613861
| 325
| 2,525
| 4.593846
| 0.369231
| 0.073677
| 0.026792
| 0.029471
| 0.045546
| 0.045546
| 0.045546
| 0
| 0
| 0
| 0
| 0.003231
| 0.264554
| 2,525
| 77
| 99
| 32.792208
| 0.800754
| 0.100198
| 0
| 0
| 0
| 0
| 0.253221
| 0.013772
| 0
| 0
| 0
| 0
| 0.017857
| 1
| 0.053571
| false
| 0
| 0.053571
| 0
| 0.125
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
918946b8867e4746cc6439a71e8ab2ad6d7dc6a7
| 2,950
|
py
|
Python
|
src/pymortests/function.py
|
mahgadalla/pymor
|
ee2806b4c93748e716294c42454d611415da7b5e
|
[
"Unlicense"
] | 1
|
2021-07-26T12:58:50.000Z
|
2021-07-26T12:58:50.000Z
|
src/pymortests/function.py
|
mahgadalla/pymor
|
ee2806b4c93748e716294c42454d611415da7b5e
|
[
"Unlicense"
] | null | null | null |
src/pymortests/function.py
|
mahgadalla/pymor
|
ee2806b4c93748e716294c42454d611415da7b5e
|
[
"Unlicense"
] | null | null | null |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2017 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
import numpy as np
import pytest
from pymor.core.pickle import dumps, loads
from pymor.functions.basic import ConstantFunction, GenericFunction
from pymortests.fixtures.function import function, picklable_function, function_argument
from pymortests.fixtures.parameter import parameters_of_type
from pymortests.pickling import assert_picklable, assert_picklable_without_dumps_function
# monkey np.testing.assert_allclose to behave the same as np.allclose
# for some reason, the default atol of np.testing.assert_allclose is 0
# while it is 1e-8 for np.allclose
real_assert_allclose = np.testing.assert_allclose
def monkey_allclose(a, b, rtol=1.e-5, atol=1.e-8):
real_assert_allclose(a, b, rtol=rtol, atol=atol)
np.testing.assert_allclose = monkey_allclose
def test_evaluate(function):
f = function
mus = parameters_of_type(f.parameter_type, 4711)
for count in [0, 1, 5, (0, 1), (2, 2, 2)]:
arg = function_argument(f, count, 454)
result = f.evaluate(arg, next(mus))
assert result.shape == arg.shape[:-1] + f.shape_range
def test_lincomb_function():
for steps in (1, 10):
x = np.linspace(0, 1, num=steps)
zero = ConstantFunction(0.0, dim_domain=steps)
for zero in (ConstantFunction(0.0, dim_domain=steps),
GenericFunction(lambda X: np.zeros(X.shape[:-1]), dim_domain=steps)):
for one in (ConstantFunction(1.0, dim_domain=steps),
GenericFunction(lambda X: np.ones(X.shape[:-1]), dim_domain=steps), 1.0):
add = (zero + one) + 0
sub = (zero - one) + np.zeros(())
neg = - zero
assert np.allclose(sub(x), [-1])
assert np.allclose(add(x), [1.0])
assert np.allclose(neg(x), [0.0])
(repr(add), str(add), repr(one), str(one)) # just to cover the respective special funcs too
mul = neg * 1.
assert np.allclose(mul(x), [0.0])
with pytest.raises(AssertionError):
zero + ConstantFunction(dim_domain=steps + 1)
with pytest.raises(AssertionError):
zero * ConstantFunction(dim_domain=steps)
with pytest.raises(AssertionError):
ConstantFunction(dim_domain=0)
def test_pickle(function):
assert_picklable(function)
def test_pickle_without_dumps_function(picklable_function):
assert_picklable_without_dumps_function(picklable_function)
def test_pickle_by_evaluation(function):
f = function
f2 = loads(dumps(f))
mus = parameters_of_type(f.parameter_type, 47)
for arg in function_argument(f, 10, 42):
mu = next(mus)
assert np.all(f.evaluate(arg, mu) == f2.evaluate(arg, mu))
| 38.815789
| 108
| 0.671186
| 410
| 2,950
| 4.697561
| 0.297561
| 0.037383
| 0.050883
| 0.047767
| 0.275182
| 0.188474
| 0.141225
| 0.106957
| 0.066459
| 0
| 0
| 0.029055
| 0.218305
| 2,950
| 75
| 109
| 39.333333
| 0.806158
| 0.146102
| 0
| 0.096154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.288462
| 1
| 0.115385
| false
| 0
| 0.134615
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
918a293306bf241e1a965c6b6c86f2b524157237
| 4,603
|
py
|
Python
|
Code/userIDCrawler.py
|
CarberZ/social-media-mining
|
41aee64a41244a0692987b75b30dedbd0552be49
|
[
"MIT"
] | 2
|
2018-10-16T23:09:00.000Z
|
2018-11-14T04:08:00.000Z
|
Code/userIDCrawler.py
|
CarberZ/social-media-mining
|
41aee64a41244a0692987b75b30dedbd0552be49
|
[
"MIT"
] | 1
|
2018-11-14T04:06:13.000Z
|
2018-11-14T04:15:56.000Z
|
Code/userIDCrawler.py
|
CarberZ/social-media-mining
|
41aee64a41244a0692987b75b30dedbd0552be49
|
[
"MIT"
] | 1
|
2018-11-14T04:06:31.000Z
|
2018-11-14T04:06:31.000Z
|
'''
step 1
get the userID and their locations
put them all into a database
'''
from bs4 import BeautifulSoup
import urllib
import sqlite3
from selenium import webdriver
import time
import re
from urllib import request
import random
import pickle
import os
import pytesseract
url_dog = "https://www.douban.com/group/lovelydog/members?start="
url_cat = "https://www.douban.com/group/cat/members?start="
'''
cat = 1 ~ 336770
dog = 1 ~ 156240
'''
class getInfo(object):
memberList = []
type = None
url = None
memberNumber = 0
conn = None
cursor = None
def __init__(self, type):
getInfo.type = type
if type == "cat":
getInfo.url = url_cat
getInfo.memberNumber = 336770
else:
getInfo.url = url_dog
getInfo.memberNumber = 156240
dbName = "CDPeopleDB.sqlite"
#iniate the start point
if not os.path.isfile('stopPoint.pickle'):
with open('stopPoint.pickle', 'rb') as file:
pickle.dump(1, file)
conn = sqlite3.connect(dbName)
getInfo.conn = conn
getInfo.cursor = getInfo.conn.cursor()
# if getInfo.type == 'dog':
# getInfo.cursor.execute("drop table if exists DogPeople")
# getInfo.cursor.execute("create table DogPeople(id varchar(48), location varchar(48))")
# else:
# getInfo.cursor.execute("drop table if exists CatPeople")
# getInfo.cursor.execute("create table CatPeople(id varchar(48), location varchar(48))")
def sliceContent(self, pageContent):
pageContent = re.sub(r"<ul>(.*)</ul>", "\\1", pageContent.replace("\n", ""))
# print(pageContent)
memberList = re.sub(r'<li class=""> (.*?) </li>', "\\1mark", pageContent.strip())
memberList = re.split(r"mark", memberList)
inforContent = re.findall(r'<div class="name">(.*?)</div>', memberList[35])
for member in memberList:
if member.strip() != '':
inforContent = re.findall(r'<div class="name">(.*?)</div>', member)
if len(inforContent)!= 0:
inforContent = inforContent[0].strip()
identity = re.findall(r'https://www.douban.com/people/(.*?)/', inforContent)[0]
if len(identity)!=0:
id = identity[0]
location = re.findall(r'<span class="pl">\((.*?)\)</span>', inforContent)
if len(location) != 0:
coordinate = str(location[0])
else:
coordinate = 'Unknown'
else:
continue
if getInfo.type == 'dog':
getInfo.cursor.execute("insert into DogPeople values(?, ?)", (id, coordinate))
else:
getInfo.cursor.execute("insert into CatPeople values(?, ?)", (id, coordinate))
getInfo.conn.commit()
def crawler(self):
opener = urllib.request.build_opener(urllib.request.HTTPSHandler)
header = ("User-Agent",
" Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/61.0.3163.100 Safari/537.36")
opener.addheaders = [header]
driver = webdriver.Chrome()
driver.get(getInfo.url)
time.sleep(20)
#store the current position in case there is something wrong with the crawlering
with open('stopPoint.pickle', 'rb') as file:
startPoint = pickle.load(file)
#use the record to be the start position
for i in range(startPoint, getInfo.memberNumber, 35):
driver.get(getInfo.url+str(i))
page = driver.page_source
soup = BeautifulSoup(page, "html5lib")
print(i)
with open('stopPoint.pickle', 'wb') as file:
pickle.dump(i, file)
memberList = soup.find('div', {'class': 'member-list'}).ul
content = str(memberList)
getInfo.sliceContent(self, pageContent=content)
time.sleep(2+random.random())
# info_dog = getInfo("dog")
# info_dog.crawler()
info_cat = getInfo("cat")
info_cat.crawler()
'''
create table CatPeople
as
select distinct *
from CatPeople_backup
WHERE not location GLOB '*[A-Za-z]*';
pre-processing to delete locations out of China
'''
| 30.483444
| 138
| 0.550728
| 497
| 4,603
| 5.070423
| 0.374245
| 0.036111
| 0.047619
| 0.020238
| 0.184127
| 0.12619
| 0.103968
| 0.029365
| 0
| 0
| 0
| 0.027653
| 0.324354
| 4,603
| 150
| 139
| 30.686667
| 0.782637
| 0.13274
| 0
| 0.072289
| 0
| 0.012048
| 0.167916
| 0.020816
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036145
| false
| 0
| 0.13253
| 0
| 0.253012
| 0.012048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
918a81c6af8725a4b95ff16551cc06a18c633a21
| 709
|
py
|
Python
|
tbase/network/polices_test.py
|
iminders/TradeBaselines
|
26eb87f2bcd5f6ff479149219b38b17002be6a40
|
[
"MIT"
] | 16
|
2020-03-19T15:12:28.000Z
|
2021-12-20T06:02:32.000Z
|
tbase/network/polices_test.py
|
iminders/TradeBaselines
|
26eb87f2bcd5f6ff479149219b38b17002be6a40
|
[
"MIT"
] | 14
|
2020-03-23T03:57:00.000Z
|
2021-12-20T05:53:33.000Z
|
tbase/network/polices_test.py
|
iminders/TradeBaselines
|
26eb87f2bcd5f6ff479149219b38b17002be6a40
|
[
"MIT"
] | 7
|
2020-03-25T00:30:18.000Z
|
2021-01-31T18:45:09.000Z
|
import unittest
import numpy as np
from tbase.common.cmd_util import set_global_seeds
from tbase.network.polices import RandomPolicy
class TestPolices(unittest.TestCase):
@classmethod
def setUpClass(self):
set_global_seeds(0)
def test_random_policy(self):
policy = RandomPolicy(2)
# action 1
actual = policy.select_action([])
expected = [1.0, -0.2534131770209437]
self.assertEqual(expected, list(actual.astype(np.float)))
# action 2
actual = policy.select_action([])
expected = [-1.0, 0.8324962832376306]
self.assertEqual(expected, list(actual.astype(np.float)))
if __name__ == '__main__':
unittest.main()
| 25.321429
| 65
| 0.671368
| 84
| 709
| 5.464286
| 0.5
| 0.039216
| 0.061002
| 0.104575
| 0.352941
| 0.352941
| 0.352941
| 0.352941
| 0
| 0
| 0
| 0.076087
| 0.221439
| 709
| 27
| 66
| 26.259259
| 0.755435
| 0.023977
| 0
| 0.222222
| 0
| 0
| 0.011611
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.388889
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
918a8725328fa6920f55c21e0bb7c5f7406c3135
| 36,887
|
py
|
Python
|
keystone/tests/unit/core.py
|
knikolla/keystone
|
50f0a50cf4d52d3f61b64713bd4faa7a4626ae53
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/unit/core.py
|
knikolla/keystone
|
50f0a50cf4d52d3f61b64713bd4faa7a4626ae53
|
[
"Apache-2.0"
] | null | null | null |
keystone/tests/unit/core.py
|
knikolla/keystone
|
50f0a50cf4d52d3f61b64713bd4faa7a4626ae53
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2012 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import atexit
import base64
import contextlib
import datetime
import functools
import hashlib
import json
import ldap
import os
import shutil
import socket
import sys
import uuid
import warnings
import fixtures
import flask
from flask import testing as flask_testing
import http.client
from oslo_config import fixture as config_fixture
from oslo_context import context as oslo_context
from oslo_context import fixture as oslo_ctx_fixture
from oslo_log import fixture as log_fixture
from oslo_log import log
from oslo_utils import timeutils
from sqlalchemy import exc
import testtools
from testtools import testcase
import keystone.api
from keystone.common import context
from keystone.common import json_home
from keystone.common import provider_api
from keystone.common import sql
import keystone.conf
from keystone import exception
from keystone.identity.backends.ldap import common as ks_ldap
from keystone import notifications
from keystone.resource.backends import base as resource_base
from keystone.server.flask import application as flask_app
from keystone.server.flask import core as keystone_flask
from keystone.tests.unit import ksfixtures
keystone.conf.configure()
keystone.conf.set_config_defaults()
PID = str(os.getpid())
TESTSDIR = os.path.dirname(os.path.abspath(__file__))
TESTCONF = os.path.join(TESTSDIR, 'config_files')
ROOTDIR = os.path.normpath(os.path.join(TESTSDIR, '..', '..', '..'))
VENDOR = os.path.join(ROOTDIR, 'vendor')
ETCDIR = os.path.join(ROOTDIR, 'etc')
def _calc_tmpdir():
env_val = os.environ.get('KEYSTONE_TEST_TEMP_DIR')
if not env_val:
return os.path.join(TESTSDIR, 'tmp', PID)
return os.path.join(env_val, PID)
TMPDIR = _calc_tmpdir()
CONF = keystone.conf.CONF
PROVIDERS = provider_api.ProviderAPIs
log.register_options(CONF)
IN_MEM_DB_CONN_STRING = 'sqlite://'
# Strictly matches ISO 8601 timestamps with subsecond precision like:
# 2016-06-28T20:48:56.000000Z
TIME_FORMAT = '%Y-%m-%dT%H:%M:%S.%fZ'
TIME_FORMAT_REGEX = r'^\d{4}-[0-1]\d-[0-3]\dT[0-2]\d:[0-5]\d:[0-5]\d\.\d{6}Z$'
exception._FATAL_EXCEPTION_FORMAT_ERRORS = True
os.makedirs(TMPDIR)
atexit.register(shutil.rmtree, TMPDIR)
class dirs(object):
@staticmethod
def root(*p):
return os.path.join(ROOTDIR, *p)
@staticmethod
def etc(*p):
return os.path.join(ETCDIR, *p)
@staticmethod
def tests(*p):
return os.path.join(TESTSDIR, *p)
@staticmethod
def tmp(*p):
return os.path.join(TMPDIR, *p)
@staticmethod
def tests_conf(*p):
return os.path.join(TESTCONF, *p)
@atexit.register
def remove_test_databases():
db = dirs.tmp('test.db')
if os.path.exists(db):
os.unlink(db)
pristine = dirs.tmp('test.db.pristine')
if os.path.exists(pristine):
os.unlink(pristine)
def skip_if_cache_disabled(*sections):
"""Skip a test if caching is disabled, this is a decorator.
Caching can be disabled either globally or for a specific section.
In the code fragment::
@skip_if_cache_is_disabled('assignment', 'token')
def test_method(*args):
...
The method test_method would be skipped if caching is disabled globally via
the `enabled` option in the `cache` section of the configuration or if
the `caching` option is set to false in either `assignment` or `token`
sections of the configuration. This decorator can be used with no
arguments to only check global caching.
If a specified configuration section does not define the `caching` option,
this decorator makes the caching enabled if `enabled` option in the `cache`
section of the configuration is true.
"""
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if not CONF.cache.enabled:
raise testcase.TestSkipped('Cache globally disabled.')
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if not getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching disabled.' % s)
return f(*args, **kwargs)
return inner
return wrapper
def skip_if_cache_is_enabled(*sections):
def wrapper(f):
@functools.wraps(f)
def inner(*args, **kwargs):
if CONF.cache.enabled:
for s in sections:
conf_sec = getattr(CONF, s, None)
if conf_sec is not None:
if getattr(conf_sec, 'caching', True):
raise testcase.TestSkipped('%s caching enabled.' %
s)
return f(*args, **kwargs)
return inner
return wrapper
def skip_if_no_multiple_domains_support(f):
"""Decorator to skip tests for identity drivers limited to one domain."""
@functools.wraps(f)
def wrapper(*args, **kwargs):
test_obj = args[0]
if not test_obj.identity_api.multiple_domains_supported:
raise testcase.TestSkipped('No multiple domains support')
return f(*args, **kwargs)
return wrapper
class UnexpectedExit(Exception):
pass
def new_region_ref(parent_region_id=None, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'parent_region_id': parent_region_id}
ref.update(kwargs)
return ref
def new_service_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
return ref
NEEDS_REGION_ID = object()
def new_endpoint_ref(service_id, interface='public',
region_id=NEEDS_REGION_ID, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'interface': interface,
'service_id': service_id,
'url': 'https://' + uuid.uuid4().hex + '.com',
}
if region_id is NEEDS_REGION_ID:
ref['region_id'] = uuid.uuid4().hex
elif region_id is None and kwargs.get('region') is not None:
# pre-3.2 form endpoints are not supported by this function
raise NotImplementedError("use new_endpoint_ref_with_region")
else:
ref['region_id'] = region_id
ref.update(kwargs)
return ref
def new_endpoint_group_ref(filters, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'filters': filters,
'name': uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def new_endpoint_ref_with_region(service_id, region, interface='public',
**kwargs):
"""Define an endpoint_ref having a pre-3.2 form.
Contains the deprecated 'region' instead of 'region_id'.
"""
ref = new_endpoint_ref(service_id, interface, region=region,
region_id='invalid', **kwargs)
del ref['region_id']
return ref
def new_domain_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
'tags': [],
'options': {}
}
ref.update(kwargs)
return ref
def new_project_ref(domain_id=None, is_domain=False, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
'domain_id': domain_id,
'is_domain': is_domain,
'tags': [],
'options': {}
}
# NOTE(henry-nash): We don't include parent_id in the initial list above
# since specifying it is optional depending on where the project sits in
# the hierarchy (and a parent_id of None has meaning - i.e. it's a top
# level project).
ref.update(kwargs)
return ref
def new_user_ref(domain_id, project_id=None, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'enabled': True,
'domain_id': domain_id,
'email': uuid.uuid4().hex,
'password': uuid.uuid4().hex,
}
if project_id:
ref['default_project_id'] = project_id
ref.update(kwargs)
return ref
def new_federated_user_ref(idp_id=None, protocol_id=None, **kwargs):
ref = {
'idp_id': idp_id or 'ORG_IDP',
'protocol_id': protocol_id or 'saml2',
'unique_id': uuid.uuid4().hex,
'display_name': uuid.uuid4().hex,
}
ref.update(kwargs)
return ref
def new_mapping_ref(mapping_id=None, rules=None, **kwargs):
ref = {
'id': mapping_id or uuid.uuid4().hex,
'rules': rules or []
}
ref.update(kwargs)
return ref
def new_protocol_ref(protocol_id=None, idp_id=None, mapping_id=None, **kwargs):
ref = {
'id': protocol_id or 'saml2',
'idp_id': idp_id or 'ORG_IDP',
'mapping_id': mapping_id or uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def new_identity_provider_ref(idp_id=None, **kwargs):
ref = {
'id': idp_id or 'ORG_IDP',
'enabled': True,
'description': '',
}
ref.update(kwargs)
return ref
def new_service_provider_ref(**kwargs):
ref = {
'auth_url': 'https://' + uuid.uuid4().hex + '.com',
'enabled': True,
'description': uuid.uuid4().hex,
'sp_url': 'https://' + uuid.uuid4().hex + '.com',
'relay_state_prefix': CONF.saml.relay_state_prefix
}
ref.update(kwargs)
return ref
def new_group_ref(domain_id, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': domain_id
}
ref.update(kwargs)
return ref
def new_credential_ref(user_id, project_id=None, type='cert', **kwargs):
ref = {
'id': uuid.uuid4().hex,
'user_id': user_id,
'type': type,
}
if project_id:
ref['project_id'] = project_id
if 'blob' not in kwargs:
ref['blob'] = uuid.uuid4().hex
ref.update(kwargs)
return ref
def new_cert_credential(user_id, project_id=None, blob=None, **kwargs):
if blob is None:
blob = {'access': uuid.uuid4().hex, 'secret': uuid.uuid4().hex}
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=json.dumps(blob),
type='cert',
**kwargs)
return blob, credential
def new_ec2_credential(user_id, project_id=None, blob=None, **kwargs):
if blob is None:
blob = {
'access': uuid.uuid4().hex,
'secret': uuid.uuid4().hex,
'trust_id': None
}
if 'id' not in kwargs:
access = blob['access'].encode('utf-8')
kwargs['id'] = hashlib.sha256(access).hexdigest()
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=json.dumps(blob),
type='ec2',
**kwargs)
return blob, credential
def new_totp_credential(user_id, project_id=None, blob=None):
if not blob:
# NOTE(notmorgan): 20 bytes of data from os.urandom for
# a totp secret.
blob = base64.b32encode(os.urandom(20)).decode('utf-8')
credential = new_credential_ref(user_id=user_id,
project_id=project_id,
blob=blob,
type='totp')
return credential
def new_application_credential_ref(roles=None,
name=None,
expires=None,
secret=None):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
}
if roles:
ref['roles'] = roles
if secret:
ref['secret'] = secret
if isinstance(expires, str):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = (
timeutils.utcnow() + datetime.timedelta(**expires)
).strftime(TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
return ref
def new_role_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'domain_id': None,
'options': {},
}
ref.update(kwargs)
return ref
def new_policy_ref(**kwargs):
ref = {
'id': uuid.uuid4().hex,
'name': uuid.uuid4().hex,
'description': uuid.uuid4().hex,
'enabled': True,
# Store serialized JSON data as the blob to mimic real world usage.
'blob': json.dumps({'data': uuid.uuid4().hex, }),
'type': uuid.uuid4().hex,
}
ref.update(kwargs)
return ref
def new_domain_config_ref(**kwargs):
ref = {
"identity": {
"driver": "ldap"
},
"ldap": {
"url": "ldap://myldap.com:389/",
"user_tree_dn": "ou=Users,dc=my_new_root,dc=org"
}
}
ref.update(kwargs)
return ref
def new_trust_ref(trustor_user_id, trustee_user_id, project_id=None,
impersonation=None, expires=None, role_ids=None,
role_names=None, remaining_uses=None,
allow_redelegation=False, redelegation_count=None, **kwargs):
ref = {
'id': uuid.uuid4().hex,
'trustor_user_id': trustor_user_id,
'trustee_user_id': trustee_user_id,
'impersonation': impersonation or False,
'project_id': project_id,
'remaining_uses': remaining_uses,
'allow_redelegation': allow_redelegation,
}
if isinstance(redelegation_count, int):
ref.update(redelegation_count=redelegation_count)
if isinstance(expires, str):
ref['expires_at'] = expires
elif isinstance(expires, dict):
ref['expires_at'] = (
timeutils.utcnow() + datetime.timedelta(**expires)
).strftime(TIME_FORMAT)
elif expires is None:
pass
else:
raise NotImplementedError('Unexpected value for "expires"')
role_ids = role_ids or []
role_names = role_names or []
if role_ids or role_names:
ref['roles'] = []
for role_id in role_ids:
ref['roles'].append({'id': role_id})
for role_name in role_names:
ref['roles'].append({'name': role_name})
ref.update(kwargs)
return ref
def new_registered_limit_ref(**kwargs):
ref = {
'service_id': uuid.uuid4().hex,
'resource_name': uuid.uuid4().hex,
'default_limit': 10,
'description': uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def new_limit_ref(**kwargs):
ref = {
'service_id': uuid.uuid4().hex,
'resource_name': uuid.uuid4().hex,
'resource_limit': 10,
'description': uuid.uuid4().hex
}
ref.update(kwargs)
return ref
def create_user(api, domain_id, **kwargs):
"""Create a user via the API. Keep the created password.
The password is saved and restored when api.create_user() is called.
Only use this routine if there is a requirement for the user object to
have a valid password after api.create_user() is called.
"""
user = new_user_ref(domain_id=domain_id, **kwargs)
password = user['password']
user = api.create_user(user)
user['password'] = password
return user
def _assert_expected_status(f):
"""Add `expected_status_code` as an argument to the test_client methods.
`expected_status_code` must be passed as a kwarg.
"""
TEAPOT_HTTP_STATUS = 418
_default_expected_responses = {
'get': http.client.OK,
'head': http.client.OK,
'post': http.client.CREATED,
'put': http.client.NO_CONTENT,
'patch': http.client.OK,
'delete': http.client.NO_CONTENT,
}
@functools.wraps(f)
def inner(*args, **kwargs):
# Get the "expected_status_code" kwarg if supplied. If not supplied use
# the `_default_expected_response` mapping, or fall through to
# "HTTP OK" if the method is somehow unknown.
expected_status_code = kwargs.pop(
'expected_status_code',
_default_expected_responses.get(
f.__name__.lower(), http.client.OK))
response = f(*args, **kwargs)
# Logic to verify the response object is sane. Expand as needed
if response.status_code == TEAPOT_HTTP_STATUS:
# NOTE(morgan): We use 418 internally during tests to indicate
# an un-routed HTTP call was made. This allows us to avoid
# misinterpreting HTTP 404 from Flask and HTTP 404 from a
# resource that is not found (e.g. USER NOT FOUND) programmatically
raise AssertionError("I AM A TEAPOT(418): %s" % response.data)
if response.status_code != expected_status_code:
raise AssertionError(
'Expected HTTP Status does not match observed HTTP '
'Status: %(expected)s != %(observed)s (%(data)s)' % {
'expected': expected_status_code,
'observed': response.status_code,
'data': response.data})
# return the original response object
return response
return inner
class KeystoneFlaskTestClient(flask_testing.FlaskClient):
"""Subclass of flask.testing.FlaskClient implementing assertions.
Implements custom "expected" HTTP Status assertion for
GET/HEAD/PUT/PATCH/DELETE.
"""
@_assert_expected_status
def get(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).get(*args, **kwargs)
@_assert_expected_status
def head(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).head(*args, **kwargs)
@_assert_expected_status
def post(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).post(*args, **kwargs)
@_assert_expected_status
def patch(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).patch(*args, **kwargs)
@_assert_expected_status
def put(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).put(*args, **kwargs)
@_assert_expected_status
def delete(self, *args, **kwargs):
return super(KeystoneFlaskTestClient, self).delete(*args, **kwargs)
class BaseTestCase(testtools.TestCase):
"""Light weight base test class.
This is a placeholder that will eventually go away once the
setup/teardown in TestCase is properly trimmed down to the bare
essentials. This is really just a play to speed up the tests by
eliminating unnecessary work.
"""
def setUp(self):
super(BaseTestCase, self).setUp()
self.useFixture(fixtures.NestedTempfile())
self.useFixture(fixtures.TempHomeDir())
self.useFixture(fixtures.MockPatchObject(sys, 'exit',
side_effect=UnexpectedExit))
self.useFixture(log_fixture.get_logging_handle_error_fixture())
warnings.filterwarnings('error', category=DeprecationWarning,
module='^keystone\\.')
warnings.filterwarnings(
'ignore', category=DeprecationWarning,
message=r"Using function/method 'db_version\(\)' is deprecated")
warnings.simplefilter('error', exc.SAWarning)
if hasattr(exc, "RemovedIn20Warning"):
warnings.simplefilter('ignore', exc.RemovedIn20Warning)
self.addCleanup(warnings.resetwarnings)
# Ensure we have an empty threadlocal context at the start of each
# test.
self.assertIsNone(oslo_context.get_current())
self.useFixture(oslo_ctx_fixture.ClearRequestContext())
orig_debug_level = ldap.get_option(ldap.OPT_DEBUG_LEVEL)
self.addCleanup(ldap.set_option, ldap.OPT_DEBUG_LEVEL,
orig_debug_level)
orig_tls_cacertfile = ldap.get_option(ldap.OPT_X_TLS_CACERTFILE)
if orig_tls_cacertfile is None:
orig_tls_cacertfile = ''
self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_CACERTFILE,
orig_tls_cacertfile)
orig_tls_cacertdir = ldap.get_option(ldap.OPT_X_TLS_CACERTDIR)
# Setting orig_tls_cacertdir to None is not allowed.
if orig_tls_cacertdir is None:
orig_tls_cacertdir = ''
self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_CACERTDIR,
orig_tls_cacertdir)
orig_tls_require_cert = ldap.get_option(ldap.OPT_X_TLS_REQUIRE_CERT)
self.addCleanup(ldap.set_option, ldap.OPT_X_TLS_REQUIRE_CERT,
orig_tls_require_cert)
self.addCleanup(ks_ldap.PooledLDAPHandler.connection_pools.clear)
def cleanup_instance(self, *names):
"""Create a function suitable for use with self.addCleanup.
:returns: a callable that uses a closure to delete instance attributes
"""
def cleanup():
for name in names:
# TODO(dstanek): remove this 'if' statement once
# load_backend in test_backend_ldap is only called once
# per test
if hasattr(self, name):
delattr(self, name)
return cleanup
def skip_if_env_not_set(self, env_var):
if not os.environ.get(env_var):
self.skipTest('Env variable %s is not set.' % env_var)
def skip_test_overrides(self, *args, **kwargs):
if self._check_for_method_in_parents(self._testMethodName):
return super(BaseTestCase, self).skipTest(*args, **kwargs)
raise Exception('%r is not a previously defined test method'
% self._testMethodName)
def _check_for_method_in_parents(self, name):
# skip first to get to parents
for cls in self.__class__.__mro__[1:]:
if hasattr(cls, name):
return True
return False
def loadapp(self, name='public'):
app = flask_app.application_factory(name)
app.testing = True
app.test_client_class = KeystoneFlaskTestClient
# NOTE(morgan): any unexpected 404s, not handled by the routed apis,
# is a hard error and should not pass testing.
def page_not_found_teapot(e):
content = (
'TEST PROGRAMMING ERROR - Reached a 404 from an unrouted (`%s`'
') path. Be sure the test is requesting the right resource '
'and that all blueprints are registered with the flask app.' %
flask.request.url)
return content, 418
app.register_error_handler(404, page_not_found_teapot)
self.test_client = app.test_client
self.test_request_context = app.test_request_context
self.cleanup_instance('test_request_context')
self.cleanup_instance('test_client')
return keystone_flask.setup_app_middleware(app)
class TestCase(BaseTestCase):
def config_files(self):
return []
def _policy_fixture(self):
return ksfixtures.Policy(self.config_fixture)
@contextlib.contextmanager
def make_request(self, path='/', **kwargs):
# standup a fake app and request context with a passed in/known
# environment.
is_admin = kwargs.pop('is_admin', False)
environ = kwargs.setdefault('environ', {})
query_string = kwargs.pop('query_string', None)
if query_string:
# Make sure query string is properly added to the context
path = '{path}?{qs}'.format(path=path, qs=query_string)
if not environ.get(context.REQUEST_CONTEXT_ENV):
environ[context.REQUEST_CONTEXT_ENV] = context.RequestContext(
is_admin=is_admin,
authenticated=kwargs.pop('authenticated', True))
# Create a dummy flask app to work with
app = flask.Flask(__name__)
with app.test_request_context(path=path, environ_overrides=environ):
yield
def config_overrides(self):
# NOTE(morganfainberg): enforce config_overrides can only ever be
# called a single time.
assert self.__config_overrides_called is False
self.__config_overrides_called = True
signing_certfile = 'examples/pki/certs/signing_cert.pem'
signing_keyfile = 'examples/pki/private/signing_key.pem'
self.useFixture(self._policy_fixture())
self.config_fixture.config(
# TODO(morganfainberg): Make Cache Testing a separate test case
# in tempest, and move it out of the base unit tests.
group='cache',
backend='dogpile.cache.memory',
enabled=True,
proxies=['oslo_cache.testing.CacheIsolatingProxy'])
self.config_fixture.config(
group='catalog',
driver='sql',
template_file=dirs.tests('default_catalog.templates'))
self.config_fixture.config(
group='saml', certfile=signing_certfile, keyfile=signing_keyfile)
self.config_fixture.config(
default_log_levels=[
'amqp=WARN',
'amqplib=WARN',
'boto=WARN',
'qpid=WARN',
'sqlalchemy=WARN',
'suds=INFO',
'oslo.messaging=INFO',
'iso8601=WARN',
'requests.packages.urllib3.connectionpool=WARN',
'routes.middleware=INFO',
'stevedore.extension=INFO',
'keystone.notifications=INFO',
'keystone.identity.backends.ldap.common=INFO',
])
# NOTE(notmorgan): Set password rounds low here to ensure speedy
# tests. This is explicitly set because the tests here are not testing
# the integrity of the password hashing, just that the correct form
# of hashing has been used. Note that 4 is the lowest for bcrypt
# allowed in the `[identity] password_hash_rounds` setting
self.config_fixture.config(group='identity', password_hash_rounds=4)
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_tokens',
CONF.fernet_tokens.max_active_keys
)
)
self.useFixture(
ksfixtures.KeyRepository(
self.config_fixture,
'fernet_receipts',
CONF.fernet_receipts.max_active_keys
)
)
def _assert_config_overrides_called(self):
assert self.__config_overrides_called is True
def setUp(self):
super(TestCase, self).setUp()
self.__config_overrides_called = False
self.__load_backends_called = False
self.config_fixture = self.useFixture(config_fixture.Config(CONF))
self.addCleanup(delattr, self, 'config_fixture')
self.config(self.config_files())
# NOTE(morganfainberg): mock the auth plugin setup to use the config
# fixture which automatically unregisters options when performing
# cleanup.
def mocked_register_auth_plugin_opt(conf, opt):
self.config_fixture.register_opt(opt, group='auth')
self.useFixture(fixtures.MockPatchObject(
keystone.conf.auth, '_register_auth_plugin_opt',
new=mocked_register_auth_plugin_opt))
self.config_overrides()
# explicitly load auth configuration
keystone.conf.auth.setup_authentication()
# NOTE(morganfainberg): ensure config_overrides has been called.
self.addCleanup(self._assert_config_overrides_called)
self.useFixture(fixtures.FakeLogger(level=log.DEBUG))
# NOTE(morganfainberg): This code is a copy from the oslo-incubator
# log module. This is not in a function or otherwise available to use
# without having a CONF object to setup logging. This should help to
# reduce the log size by limiting what we log (similar to how Keystone
# would run under mod_wsgi).
for pair in CONF.default_log_levels:
mod, _sep, level_name = pair.partition('=')
logger = log.getLogger(mod)
logger.logger.setLevel(level_name)
self.useFixture(ksfixtures.Cache())
# Clear the registry of providers so that providers from previous
# tests aren't used.
self.addCleanup(provider_api.ProviderAPIs._clear_registry_instances)
# Clear the registry of JSON Home Resources
self.addCleanup(json_home.JsonHomeResources._reset)
# Ensure Notification subscriptions and resource types are empty
self.addCleanup(notifications.clear_subscribers)
self.addCleanup(notifications.reset_notifier)
def config(self, config_files):
sql.initialize()
CONF(args=[], project='keystone', default_config_files=config_files)
def load_backends(self):
"""Initialize each manager and assigns them to an attribute."""
# TODO(morgan): Ensure our tests only ever call load_backends
# a single time via this method. for now just clear the registry
# if we are reloading.
provider_api.ProviderAPIs._clear_registry_instances()
self.useFixture(ksfixtures.BackendLoader(self))
def load_fixtures(self, fixtures):
"""Hacky basic and naive fixture loading based on a python module.
Expects that the various APIs into the various services are already
defined on `self`.
"""
# NOTE(dstanek): create a list of attribute names to be removed
# from this instance during cleanup
fixtures_to_cleanup = []
# TODO(termie): doing something from json, probably based on Django's
# loaddata will be much preferred.
if (hasattr(self, 'identity_api') and
hasattr(self, 'assignment_api') and
hasattr(self, 'resource_api')):
try:
PROVIDERS.resource_api.create_domain(
resource_base.NULL_DOMAIN_ID, fixtures.ROOT_DOMAIN)
except exception.Conflict:
# the root domain already exists, skip now.
pass
for domain in fixtures.DOMAINS:
rv = PROVIDERS.resource_api.create_domain(domain['id'], domain)
attrname = 'domain_%s' % domain['id']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for project in fixtures.PROJECTS:
project_attr_name = 'project_%s' % project['name'].lower()
rv = PROVIDERS.resource_api.create_project(
project['id'], project)
setattr(self, project_attr_name, rv)
fixtures_to_cleanup.append(project_attr_name)
for role in fixtures.ROLES:
rv = PROVIDERS.role_api.create_role(role['id'], role)
attrname = 'role_%s' % role['name']
setattr(self, attrname, rv)
fixtures_to_cleanup.append(attrname)
for user in fixtures.USERS:
user_copy = user.copy()
projects = user_copy.pop('projects')
# For users, the manager layer will generate the ID
user_copy = PROVIDERS.identity_api.create_user(user_copy)
# Our tests expect that the password is still in the user
# record so that they can reference it, so put it back into
# the dict returned.
user_copy['password'] = user['password']
# fixtures.ROLES[2] is the _member_ role.
for project_id in projects:
PROVIDERS.assignment_api.add_role_to_user_and_project(
user_copy['id'], project_id, fixtures.ROLES[2]['id'])
# Use the ID from the fixture as the attribute name, so
# that our tests can easily reference each user dict, while
# the ID in the dict will be the real public ID.
attrname = 'user_%s' % user['name']
setattr(self, attrname, user_copy)
fixtures_to_cleanup.append(attrname)
for role_assignment in fixtures.ROLE_ASSIGNMENTS:
role_id = role_assignment['role_id']
user = role_assignment['user']
project_id = role_assignment['project_id']
user_id = getattr(self, 'user_%s' % user)['id']
PROVIDERS.assignment_api.add_role_to_user_and_project(
user_id, project_id, role_id)
self.addCleanup(self.cleanup_instance(*fixtures_to_cleanup))
def assertCloseEnoughForGovernmentWork(self, a, b, delta=3):
"""Assert that two datetimes are nearly equal within a small delta.
:param delta: Maximum allowable time delta, defined in seconds.
"""
if a == b:
# Short-circuit if the values are the same.
return
msg = '%s != %s within %s delta' % (a, b, delta)
self.assertLessEqual(abs(a - b).seconds, delta, msg)
def assertTimestampEqual(self, expected, value):
# Compare two timestamps but ignore the microseconds part
# of the expected timestamp. Keystone does not track microseconds and
# is working to eliminate microseconds from it's datetimes used.
expected = timeutils.parse_isotime(expected).replace(microsecond=0)
value = timeutils.parse_isotime(value).replace(microsecond=0)
self.assertEqual(
expected,
value,
"%s != %s" % (expected, value))
def assertNotEmpty(self, l):
self.assertGreater(len(l), 0)
def assertUserDictEqual(self, expected, observed, message=''):
"""Assert that a user dict is equal to another user dict.
User dictionaries have some variable values that should be ignored in
the comparison. This method is a helper that strips those elements out
when comparing the user dictionary. This normalized these differences
that should not change the comparison.
"""
# NOTE(notmorgan): An empty option list is the same as no options being
# specified in the user_ref. This removes options if it is empty in
# observed if options is not specified in the expected value.
if ('options' in observed and not observed['options'] and
'options' not in expected):
observed = observed.copy()
del observed['options']
self.assertDictEqual(expected, observed, message)
@property
def ipv6_enabled(self):
if socket.has_ipv6:
sock = None
try:
sock = socket.socket(socket.AF_INET6)
# NOTE(Mouad): Try to bind to IPv6 loopback ip address.
sock.bind(("::1", 0))
return True
except socket.error:
pass
finally:
if sock:
sock.close()
return False
def skip_if_no_ipv6(self):
if not self.ipv6_enabled:
raise self.skipTest("IPv6 is not enabled in the system")
class SQLDriverOverrides(object):
"""A mixin for consolidating sql-specific test overrides."""
def config_overrides(self):
super(SQLDriverOverrides, self).config_overrides()
# SQL specific driver overrides
self.config_fixture.config(group='catalog', driver='sql')
self.config_fixture.config(group='identity', driver='sql')
self.config_fixture.config(group='policy', driver='sql')
self.config_fixture.config(group='trust', driver='sql')
| 34.217996
| 79
| 0.619676
| 4,443
| 36,887
| 4.979743
| 0.186811
| 0.023593
| 0.031458
| 0.018983
| 0.273446
| 0.231322
| 0.196158
| 0.155299
| 0.134734
| 0.117876
| 0
| 0.007054
| 0.281319
| 36,887
| 1,077
| 80
| 34.249768
| 0.827537
| 0.194893
| 0
| 0.301501
| 0
| 0.001364
| 0.097711
| 0.01699
| 0
| 0
| 0
| 0.001857
| 0.030014
| 1
| 0.100955
| false
| 0.013643
| 0.05457
| 0.017735
| 0.245566
| 0.001364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
918dd351f71913e5bfee0b534327c85070c34d0b
| 17,327
|
py
|
Python
|
PyISY/Nodes/__init__.py
|
sneelco/PyISY
|
f1f916cd7951b1b6a5235bb36444c695fe3294e1
|
[
"Apache-2.0"
] | null | null | null |
PyISY/Nodes/__init__.py
|
sneelco/PyISY
|
f1f916cd7951b1b6a5235bb36444c695fe3294e1
|
[
"Apache-2.0"
] | null | null | null |
PyISY/Nodes/__init__.py
|
sneelco/PyISY
|
f1f916cd7951b1b6a5235bb36444c695fe3294e1
|
[
"Apache-2.0"
] | null | null | null |
from .group import Group
from .node import (Node, parse_xml_properties, ATTR_ID)
from time import sleep
from xml.dom import minidom
class Nodes(object):
"""
This class handles the ISY nodes. This class can be used as a dictionary to
navigate through the controller's structure to objects of type
:class:`~PyISY.Nodes.Node` and :class:`~PyISY.Nodes.Group` that represent
objects on the controller.
| parent: ISY class
| root: [optional] String representing the current navigation level's ID
| nids: [optional] list of node ids
| nnames: [optional] list of node names
| nparents: [optional] list of node parents
| nobjs: [optional] list of node objects
| ntypes: [optional] list of node types
| xml: [optional] String of xml data containing the configuration data
:ivar allLowerNodes: Returns all nodes beneath current level
:ivar children: A list of the object's children.
:ivar hasChildren: Indicates if object has children
:ivar name: The name of the current folder in navigation.
"""
nids = []
nnames = []
nparents = []
nobjs = []
ntypes = []
def __init__(self, parent, root=None, nids=None, nnames=None,
nparents=None, nobjs=None, ntypes=None, xml=None):
self.parent = parent
self.root = root
if nids is not None and nnames is not None and nparents is not None \
and nobjs is not None and ntypes is not None:
self.nids = nids
self.nnames = nnames
self.nparents = nparents
self.nobjs = nobjs
self.ntypes = ntypes
elif xml is not None:
self.parse(xml)
def __str__(self):
""" Returns string representation of the nodes/folders/groups. """
if self.root is None:
return 'Folder <root>'
else:
ind = self.nids.index(self.root)
if self.ntypes[ind] == 'folder':
return 'Folder (' + self.root + ')'
elif self.ntypes[ind] == 'group':
return 'Group (' + self.root + ')'
else:
return 'Node (' + self.root + ')'
def __repr__(self):
""" Creates a pretty representation of the nodes/folders/groups. """
# get and sort children
folders = []
groups = []
nodes = []
for child in self.children:
if child[0] is 'folder':
folders.append(child)
elif child[0] is 'group':
groups.append(child)
elif child[0] is 'node':
nodes.append(child)
# initialize data
folders.sort(key=lambda x: x[1])
groups.sort(key=lambda x: x[1])
nodes.sort(key=lambda x: x[1])
out = str(self) + '\n' + self.__reprFolders__(folders) + \
self.__reprGroups__(groups) + self.__reprNodes__(nodes)
return out
def __reprFolders__(self, folders):
# format folders
out = ''
for fold in folders:
fold_obj = self[fold[2]]
out += ' + ' + fold[1] + ': Folder(' + fold[2] + ')\n'
for line in repr(fold_obj).split('\n')[1:]:
if len(line) > 0:
out += ' | ' + line + '\n'
out += ' -\n'
return out
def __reprGroups__(self, groups):
# format groups
out = ''
for group in groups:
out += ' ' + group[1] + ': Group(' + group[2] + ')\n'
return out
def __reprNodes__(self, nodes):
# format nodes
out = ''
for node in nodes:
node_obj = self[node[2]]
if node_obj.hasChildren:
out += ' + '
else:
out += ' '
out += node[1] + ': Node(' + node[2] + ')\n'
if node_obj.hasChildren:
for line in repr(node_obj).split('\n')[1:]:
if len(line) > 0:
out += ' | ' + line + '\n'
out += ' -\n'
return out
def __iter__(self):
"""
Returns an iterator for each node below the current navigation level.
"""
iter_data = self.allLowerNodes
return NodeIterator(self, iter_data, delta=1)
def __reversed__(self):
""" Returns the iterator in reverse order. """
iter_data = self.allLowerNodes
return NodeIterator(self, iter_data, delta=-1)
def _upmsg(self, xmldoc):
"""Updates nodes from event stream message."""
nid = xmldoc.getElementsByTagName('node')[0].firstChild.toxml()
nval = int(xmldoc.getElementsByTagName('action')[0].firstChild.toxml())
ctrl = xmldoc.getElementsByTagName('control')[0].firstChild.toxml()
try:
if ctrl == 'ST':
self.getByID(nid).status.update(nval, force=True, silent=True)
self.parent.log.info('ISY Updated Node: ' + nid)
else:
nid = '{}_{}'.format(nid, ctrl)
status = self.getByID(nid).status
status.update(nval, force=True, silent=True)
self.parent.log.info('ISY Updated Property: ' + nid)
except ValueError:
self.parent.log.warning('Unable to find node:: ' + nid)
def _controlmsg(self, xmldoc):
"""Passes Control events from an event stream message to nodes, for
sending out to subscribers."""
try:
nid = xmldoc.getElementsByTagName('node')[0].firstChild.toxml()
cntrl = xmldoc.getElementsByTagName('control')[0].firstChild.toxml()
except IndexError:
# If there is no node associated with the control message we ignore it
return
self.getByID(nid).controlEvents.notify(cntrl)
self.parent.log.info('ISY Node Control Event: ' + nid + ' ' + cntrl)
def parse(self, xml):
"""
Parses the xml data.
| xml: String of the xml data
"""
try:
xmldoc = minidom.parseString(xml)
except:
self.parent.log.error('ISY Could not parse nodes, '
+ 'poorly formatted XML.')
else:
# get nodes
ntypes = ['folder', 'node', 'group']
for ntype in ntypes:
features = xmldoc.getElementsByTagName(ntype)
for feature in features:
nid = feature.getElementsByTagName('address')[0] \
.firstChild.toxml()
nname = feature.getElementsByTagName('name')[0] \
.firstChild.toxml()
try:
nparent = feature.getElementsByTagName('parent')[0] \
.firstChild.toxml()
except IndexError:
nparent = None
try:
parent_nid = feature.getElementsByTagName('pnode')[0] \
.firstChild.toxml()
except IndexError:
parent_nid = None
try:
type = feature.getElementsByTagName('type')[0] \
.firstChild.toxml()
except IndexError:
type = None
try:
nodeDefId = feature.attributes['nodeDefId'].value
except KeyError:
nodeDefId = None
if ntype == 'folder':
self.insert(nid, nname, nparent, None, ntype)
elif ntype == 'node':
node_xml = self.parent.conn.getNode(nid)
node_doc = minidom.parseString(node_xml) # type: xml.dom.minidom.Document
node = node_doc.getElementsByTagName('node')[0]
(state_val, state_uom, state_prec,
aux_props) = parse_xml_properties(node_doc)
dimmable = '%' in state_uom
self.insert(nid, nname, nparent,
Node(self, nid, state_val, nname,
dimmable,
uom=state_uom, prec=state_prec,
aux_properties=aux_props,
node_def_id=nodeDefId,
parent_nid=parent_nid,
type=type),
ntype)
for id, prop in aux_props.items():
if id == 'ST':
continue
prop_id = '{}_{}'.format(nid, id)
prop_name = '{} {}'.format(nname, id)
self.insert(prop_id, prop_name, nparent,
Node(self, prop_id, prop['value'],
prop_name, False,
uom=prop['uom'],
prec=prop['prec']),
'property')
elif ntype == 'group':
flag = feature.attributes['flag'].value
# Ignore groups that contain 0x08 in the flag since that is a ISY scene that
# contains every device/scene so it will contain some scenes we have not
# seen yet so they are not defined and it includes the ISY MAC addrees in
# newer versions of ISY 5.0.6+ ..
if int(flag) & 0x08:
self.parent.log.info('Skipping group flag=' + flag + " " + nid )
else:
mems = feature.getElementsByTagName('link')
# Build list of members
members = [mem.firstChild.nodeValue for mem in mems]
# Build list of controllers
controllers = []
for mem in mems:
if int(mem.attributes['type'].value) == 16:
controllers.append(mem.firstChild.nodeValue)
self.insert(nid, nname, nparent,
Group(self, nid, nname, members, controllers), ntype)
self.parent.log.info('ISY Loaded Nodes')
def update(self, waitTime=0):
"""
Updates the contents of the class
| waitTime: [optional] Amount of seconds to wait before updating
"""
sleep(waitTime)
xml = self.parent.conn.updateNodes()
if xml is not None:
try:
xmldoc = minidom.parseString(xml)
except:
self.parent.log.error('ISY Could not parse nodes, '
+ 'poorly formatted XML.')
else:
for feature in xmldoc.getElementsByTagName('node'):
nid = feature.attributes['id'].value
(state_val, state_uom, state_prec,
aux_props) = parse_xml_properties(feature)
dimmable = '%' in state_uom
if nid in self.nids:
node = self.getByID(nid)
node.uom = state_uom
node.prec = state_prec
node.dimmable = dimmable
node.status.update(state_val, silent=True)
if len(node.aux_properties) > 0:
node_xml = self.parent.conn.getNode(nid)
node_doc = minidom.parseString(node_xml)
(state_val, state_uom, state_prec,
aux_props) = parse_xml_properties(node_doc)
for key in aux_props.keys():
pid = '{}_{}'.format(nid, key)
prop = self.getByID(pid)
prop.status.update(prop['value'], )
else:
node = Node(self, id, state_val, ' ', dimmable,
uom=state_uom, prec=state_prec,
aux_properties=aux_props)
self.insert(id, ' ', None, node, 'node')
self.parent.log.info('ISY Updated Nodes')
else:
self.parent.log.warning('ISY Failed to update nodes.')
def insert(self, nid, nname, nparent, nobj, ntype):
"""
Inserts a new node into the lists.
| nid: node id
| nname: node name
| nparent: node parent
| nobj: node object
| ntype: node type
"""
self.nids.append(nid)
self.nnames.append(nname)
self.nparents.append(nparent)
self.ntypes.append(ntype)
self.nobjs.append(nobj)
def __getitem__(self, val):
"""
Used for navigating through the node tree. Can take names or IDs.
"""
try:
self.nids.index(val)
fun = self.getByID
except ValueError:
try:
self.nnames.index(val)
fun = self.getByName
except ValueError:
try:
val = int(val)
fun = self.getByInd
except ValueError:
fun = None
if fun:
try:
output = fun(val)
except:
pass
if output:
return output
raise KeyError('Unrecognized Key: [' + val + ']')
def __setitem__(self, val):
return None
def getByName(self, val):
"""
Gets child object with the given name.
| val: String representing name to look for.
"""
for i in range(len(self.nids)):
if self.nparents[i] == self.root and self.nnames[i] == val:
return self.getByInd(i)
def getByID(self, nid):
"""
Gets object with the given ID.
| nid: Integer representing node/group/folder id.
"""
i = self.nids.index(nid)
return self.getByInd(i)
def getByInd(self, i):
"""
Returns the object at the given index in the list.
| i: Integer representing index of node/group/folder.
"""
if self.ntypes[i] in ['group', 'node', 'property']:
return self.nobjs[i]
return Nodes(self.parent, self.nids[i], self.nids, self.nnames,
self.nparents, self.nobjs, self.ntypes)
def parseNotes(self, notes_xml):
spoken = None
if notes_xml is not None and notes_xml != "":
try:
notesdom = minidom.parseString(notes_xml)
except:
self.parent.log.error('ISY Could not parse node, notes '
+ 'poorly formatted XML: ' + notes_xml)
else:
spoken_tag = notesdom.getElementsByTagName('spoken')
if spoken_tag and len(spoken_tag) > 0 and spoken_tag[0].firstChild is not None:
spoken = spoken_tag[0].firstChild.toxml()
return { "spoken": spoken }
@property
def children(self):
out = []
for i in range(len(self.nids)):
if self.nparents[i] == self.root:
out.append((self.ntypes[i], self.nnames[i], self.nids[i]))
return out
@property
def hasChildren(self):
try:
self.nparents.index(self.root)
return True
except:
return False
@property
def name(self):
if self.root is None:
return ''
else:
ind = self.nids.index(self.root)
return self.nnames[ind]
@property
def allLowerNodes(self):
output = []
myname = self.name + '/'
for dtype, name, ident in self.children:
if dtype in ['group', 'node', 'property']:
output.append((dtype, myname + name, ident))
else:
output += [(dtype2, myname + name2, ident2)
for (dtype2, name2, ident2)
in self[ident].allLowerNodes]
return output
class NodeIterator(object):
""" Iterates through a list of nodes, returning node objects. """
def __init__(self, parent, iter_data, delta=1):
self._parent = parent
self._iterdata = iter_data
self._len = len(iter_data)
self._delta = delta
if delta > 0:
self._ind = 0
else:
self._ind = self._len - 1
def __next__(self):
if self._ind >= self._len or self._ind < 0:
raise StopIteration
_, path, ident = self._iterdata[self._ind]
self._ind += self._delta
return (path, self._parent[ident])
def __len__(self):
return self._len
| 36.324948
| 100
| 0.48612
| 1,749
| 17,327
| 4.719268
| 0.168096
| 0.024231
| 0.021323
| 0.012358
| 0.221468
| 0.188636
| 0.147807
| 0.12915
| 0.12915
| 0.12915
| 0
| 0.005745
| 0.417326
| 17,327
| 476
| 101
| 36.401261
| 0.811807
| 0.137416
| 0
| 0.302115
| 0
| 0
| 0.047904
| 0
| 0
| 0
| 0.000275
| 0
| 0
| 1
| 0.07855
| false
| 0.003021
| 0.012085
| 0.006042
| 0.190332
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
918e36c7c2d321203012c2cecdfb70b87e94940f
| 1,329
|
py
|
Python
|
easyCore/Utils/Logging.py
|
easyScience/easyCore
|
5d16d5b27803277d0c44886f94dab599f764ae0b
|
[
"BSD-3-Clause"
] | 2
|
2021-11-02T10:22:45.000Z
|
2022-02-18T23:41:19.000Z
|
easyCore/Utils/Logging.py
|
easyScience/easyCore
|
5d16d5b27803277d0c44886f94dab599f764ae0b
|
[
"BSD-3-Clause"
] | 114
|
2020-06-30T08:52:27.000Z
|
2022-03-30T20:47:56.000Z
|
easyCore/Utils/Logging.py
|
easyScience/easyCore
|
5d16d5b27803277d0c44886f94dab599f764ae0b
|
[
"BSD-3-Clause"
] | 1
|
2022-03-04T13:01:09.000Z
|
2022-03-04T13:01:09.000Z
|
# SPDX-FileCopyrightText: 2021 easyCore contributors <core@easyscience.software>
# SPDX-License-Identifier: BSD-3-Clause
# © 2021 Contributors to the easyCore project <https://github.com/easyScience/easyCore>
__author__ = 'github.com/wardsimon'
__version__ = '0.1.0'
import logging
class Logger:
def __init__(self, log_level: int = logging.INFO):
self.logger = logging.getLogger(__name__)
self.level = log_level
self.logger.setLevel(self.level)
def getLogger(self, logger_name, color: str = '32', defaults: bool = True) -> logging:
"""
Create a logger
:param color:
:param logger_name: logger name. Usually __name__ on creation
:param defaults: Do you want to associate any current file loggers with this logger
:return: A logger
"""
logger = logging.getLogger(logger_name)
logger.setLevel(self.level)
# self.applyLevel(logger)
# for handler_type in self._handlers:
# for handler in self._handlers[handler_type]:
# if handler_type == 'sys' or defaults:
# handler.formatter._fmt = self._makeColorText(color)
# logger.addHandler(handler)
# logger.propagate = False
# self._loggers.append(logger)
return logger
| 36.916667
| 91
| 0.645598
| 152
| 1,329
| 5.434211
| 0.513158
| 0.048426
| 0.053269
| 0.05569
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014213
| 0.258841
| 1,329
| 35
| 92
| 37.971429
| 0.82335
| 0.536494
| 0
| 0
| 0
| 0
| 0.049091
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.083333
| 0
| 0.416667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
919092189581e9b39163223362020fad3bbd08e7
| 3,416
|
py
|
Python
|
defaultsob/core.py
|
honewatson/defaults
|
c6a845ec1f25fc82e7645dfee60dd2df1cfa4e81
|
[
"0BSD"
] | null | null | null |
defaultsob/core.py
|
honewatson/defaults
|
c6a845ec1f25fc82e7645dfee60dd2df1cfa4e81
|
[
"0BSD"
] | null | null | null |
defaultsob/core.py
|
honewatson/defaults
|
c6a845ec1f25fc82e7645dfee60dd2df1cfa4e81
|
[
"0BSD"
] | null | null | null |
# -*- coding: utf-8 -*-
def ordered_set(iter):
"""Creates an ordered set
@param iter: list or tuple
@return: list with unique values
"""
final = []
for i in iter:
if i not in final:
final.append(i)
return final
def class_slots(ob):
"""Get object attributes from child class attributes
@param ob: Defaults object
@type ob: Defaults
@return: Tuple of slots
"""
current_class = type(ob).__mro__[0]
if not getattr(current_class, 'allslots', None) \
and current_class != object:
_allslots = [list(getattr(cls, '__slots__', []))
for cls in type(ob).__mro__]
_fslots = []
for slot in _allslots:
_fslots = _fslots + slot
current_class.allslots = tuple(ordered_set(_fslots))
return current_class.allslots
def use_if_none_cls(alternative_attr):
def use_if_none(original_attr, ob, kwargs):
"""
Try and get a value from kwargs for original_attr. If there
is no original_attr in kwargs use the alternative_attr value
in the object ob
@param alternative_attr: the alternative attribute
@param original_attr: the original attribute
@param ob: the object with the attributes
@param kwargs: key values
@return: final value
"""
return kwargs.get(original_attr, getattr(ob, alternative_attr, None))
return use_if_none
def usef(attr):
"""Use another value as default
@param attr: the name of the attribute to
use as alternative value
@return: value of alternative attribute
"""
return use_if_none_cls(attr)
use_name_if_none = usef('Name')
def choose_alt(attr, ob, kwargs):
"""If the declared class attribute of ob is callable
then use that callable to get a default ob
instance value if a value is not available in kwargs.
@param attr: ob class attribute name
@param ob: the object instance whose default value needs to be set
@param kwargs: the kwargs values passed to the ob __init__ method
@return: value to be used to set ob instance
"""
result = ob.__class__.__dict__.get(attr, None)
if type(result).__name__ == "member_descriptor":
result = None
elif callable(result):
result = result(attr, ob, kwargs)
return result
class Defaults(object):
"""A base class which allows using slots to define
attributes and the ability to set object
instance defaults at the child class level"""
def __init__(self, **kwargs):
"""Assign kwargs to attributes and defaults to attributes"""
allslots = class_slots(self)
for attr in allslots:
setattr(self, attr, kwargs.get(
attr, choose_alt(attr, self, kwargs)))
def to_dict(self):
"""Returns attributes with values as dict
@return: dictionary of attributes with values
"""
allslots = class_slots(self)
return {
item: getattr(self, item, None)
for item in allslots
}
def to_dict_clean(self):
"""Return a dict where there values of None
are not included
@return: dict of the object properties with values
"""
attribs = self.to_dict()
return {
k: v
for k, v in attribs.items() if v
}
| 27.772358
| 77
| 0.624415
| 447
| 3,416
| 4.599553
| 0.241611
| 0.029183
| 0.01751
| 0.011673
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.000833
| 0.297424
| 3,416
| 122
| 78
| 28
| 0.855833
| 0.425351
| 0
| 0.081633
| 0
| 0
| 0.022183
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.183673
| false
| 0
| 0
| 0
| 0.367347
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9190a55060e46f0f4d728a8eb6583235a5fc4dcf
| 3,140
|
py
|
Python
|
tests/bot_test.py
|
item4/yui
|
8628d0d54b94ada3cbe7d1b0f624063258bad10a
|
[
"MIT"
] | 36
|
2017-06-12T01:09:46.000Z
|
2021-01-31T17:57:41.000Z
|
tests/bot_test.py
|
item4/yui
|
8628d0d54b94ada3cbe7d1b0f624063258bad10a
|
[
"MIT"
] | 145
|
2017-06-21T13:31:29.000Z
|
2021-06-20T01:01:30.000Z
|
tests/bot_test.py
|
item4/yui
|
8628d0d54b94ada3cbe7d1b0f624063258bad10a
|
[
"MIT"
] | 21
|
2017-07-24T15:53:19.000Z
|
2021-12-23T04:18:31.000Z
|
import asyncio
from collections import defaultdict
from datetime import timedelta
import pytest
from yui.api import SlackAPI
from yui.bot import Bot
from yui.box import Box
from yui.types.slack.response import APIResponse
from yui.utils import json
from .util import FakeImportLib
def test_bot_init(event_loop, monkeypatch, bot_config):
importlib = FakeImportLib()
monkeypatch.setattr('importlib.import_module', importlib.import_module)
bot_config.APPS = ['yui.app1', 'yui.app2']
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
assert bot.config == bot_config
assert bot.channels == []
assert bot.ims == []
assert bot.groups == []
assert bot.restart is False
assert isinstance(bot.api, SlackAPI)
assert bot.box is box
assert isinstance(bot.queue, asyncio.Queue)
assert importlib.import_queue == [
'yui.app1',
'yui.app2',
]
@pytest.mark.asyncio
async def test_call(event_loop, bot_config, response_mock):
token = 'asdf1234'
response_mock.post(
'https://slack.com/api/test11',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test12',
body=json.dumps({'res': 'hello world!', 'data': {'extra': 'wow'}}),
headers={'content-type': 'application/json'},
status=200,
)
response_mock.post(
'https://slack.com/api/test21',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test22',
body=json.dumps({'error': 'aaa'}),
headers={'content-type': 'application/json'},
status=404,
)
response_mock.post(
'https://slack.com/api/test3',
body=json.dumps({'res': 'hello world!'}),
headers={'content-type': 'application/json'},
status=200,
)
box = Box()
bot = Bot(bot_config, event_loop, using_box=box)
bot.api.throttle_interval = defaultdict(lambda: timedelta(0))
res = await bot.call('test11')
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test12', data={'extra': 'wow'})
assert res == APIResponse(
body={'res': 'hello world!', 'data': {'extra': 'wow'}},
status=200,
headers={'content-type': 'application/json'},
)
res = await bot.call('test21')
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test22', data={'extra': 'wow'})
assert res == APIResponse(
body={'error': 'aaa'},
status=404,
headers={'content-type': 'application/json'},
)
res = await bot.call('test3', token=token)
assert res == APIResponse(
body={'res': 'hello world!'},
status=200,
headers={'content-type': 'application/json'},
)
| 27.787611
| 75
| 0.605414
| 367
| 3,140
| 5.111717
| 0.217984
| 0.074627
| 0.095949
| 0.154584
| 0.567697
| 0.567697
| 0.541045
| 0.491471
| 0.491471
| 0.491471
| 0
| 0.023671
| 0.233121
| 3,140
| 112
| 76
| 28.035714
| 0.755399
| 0
| 0
| 0.446809
| 0
| 0
| 0.214331
| 0.007325
| 0
| 0
| 0
| 0
| 0.148936
| 1
| 0.010638
| false
| 0
| 0.138298
| 0
| 0.148936
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9190bf228865d048848fd87f601781ac36e5057a
| 2,901
|
py
|
Python
|
scripts/marker_filter.py
|
CesMak/aruco_detector_ocv
|
bb45e39664247779cbbbc8d37b89c4556b4984d6
|
[
"BSD-3-Clause"
] | 12
|
2019-03-12T08:47:07.000Z
|
2022-02-09T03:59:39.000Z
|
scripts/marker_filter.py
|
vprooks/simple_aruco_detector
|
40cb7354d7da67028c91b4c4652e8c4a1d2abbbb
|
[
"MIT"
] | 3
|
2020-07-02T04:25:10.000Z
|
2021-08-31T15:56:13.000Z
|
scripts/marker_filter.py
|
CesMak/aruco_detector_ocv
|
bb45e39664247779cbbbc8d37b89c4556b4984d6
|
[
"BSD-3-Clause"
] | 11
|
2019-10-25T17:36:44.000Z
|
2022-02-16T17:12:38.000Z
|
#!/usr/bin/env python
import numpy as np
import rospy
import geometry_msgs.msg
import tf2_ros
from tf.transformations import quaternion_slerp
def translation_to_numpy(t):
return np.array([t.x, t.y, t.z])
def quaternion_to_numpy(q):
return np.array([q.x, q.y, q.z, q.w])
if __name__ == '__main__':
rospy.init_node('marker_filter')
alpha = rospy.get_param('~alpha', 0.9)
parent_frame_id = rospy.get_param('~parent_frame_id', 'kinect2_link')
marker_id = rospy.get_param('~marker_id', 'marker_id0')
marker_filtered_id = rospy.get_param(
'~marker_filtered_id', 'marker_id0_filtered')
rate_value = rospy.get_param('~rate_value', 125)
tfBuffer = tf2_ros.Buffer()
listener = tf2_ros.TransformListener(tfBuffer)
br = tf2_ros.TransformBroadcaster()
marker_pose = None
marker_pose0 = None
rate = rospy.Rate(rate_value)
while not rospy.is_shutdown():
marker_pose0 = marker_pose
# Lookup the transform
try:
marker_pose_new = tfBuffer.lookup_transform(
parent_frame_id, marker_id, rospy.Time())
if not marker_pose_new is None:
marker_pose = marker_pose_new
except (tf2_ros.LookupException, tf2_ros.ConnectivityException, tf2_ros.ExtrapolationException) as e:
rospy.logwarn(e)
if marker_pose is None:
rate.sleep()
continue
# Apply running average filter to translation and rotation
if not marker_pose0 is None:
rotation0 = quaternion_to_numpy(marker_pose0.transform.rotation)
rotation = quaternion_to_numpy(marker_pose.transform.rotation)
rotation_interpolated = quaternion_slerp(
rotation0, rotation, 1 - alpha)
translation0 = translation_to_numpy(
marker_pose0.transform.translation)
translation = translation_to_numpy(
marker_pose.transform.translation)
translation = alpha * translation0 + (1 - alpha) * translation
# Update pose of the marker
marker_pose.transform.rotation.x = rotation_interpolated[0]
marker_pose.transform.rotation.y = rotation_interpolated[1]
marker_pose.transform.rotation.z = rotation_interpolated[2]
marker_pose.transform.rotation.w = rotation_interpolated[3]
marker_pose.transform.translation.x = translation[0]
marker_pose.transform.translation.y = translation[1]
marker_pose.transform.translation.z = translation[2]
# Create new transform and broadcast it
t = geometry_msgs.msg.TransformStamped()
t.header.stamp = rospy.Time.now()
t.header.frame_id = parent_frame_id
t.child_frame_id = marker_filtered_id
t.transform = marker_pose.transform
br.sendTransform(t)
rate.sleep()
| 36.2625
| 109
| 0.666322
| 352
| 2,901
| 5.215909
| 0.286932
| 0.092593
| 0.103486
| 0.073529
| 0.08061
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015131
| 0.24819
| 2,901
| 79
| 110
| 36.721519
| 0.826685
| 0.055843
| 0
| 0.033898
| 0
| 0
| 0.045355
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033898
| false
| 0
| 0.084746
| 0.033898
| 0.152542
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9190f1884667aaeb95f3ee0745ae12dfce3341d8
| 3,713
|
py
|
Python
|
src/backbone/utils.py
|
hankyul2/FaceDA
|
73006327df3668923d4206f81d4976ca1240329d
|
[
"Apache-2.0"
] | 20
|
2021-11-26T18:05:30.000Z
|
2022-02-15T12:21:10.000Z
|
src/backbone/utils.py
|
hankyul2/FaceDA
|
73006327df3668923d4206f81d4976ca1240329d
|
[
"Apache-2.0"
] | null | null | null |
src/backbone/utils.py
|
hankyul2/FaceDA
|
73006327df3668923d4206f81d4976ca1240329d
|
[
"Apache-2.0"
] | 1
|
2022-02-15T12:21:17.000Z
|
2022-02-15T12:21:17.000Z
|
import os
import subprocess
from pathlib import Path
from torch.hub import load_state_dict_from_url
import numpy as np
model_urls = {
# ResNet
'resnet18': 'https://download.pytorch.org/models/resnet18-f37072fd.pth',
'resnet34': 'https://download.pytorch.org/models/resnet34-b627a593.pth',
'resnet50': 'https://download.pytorch.org/models/resnet50-0676ba61.pth',
'resnet101': 'https://download.pytorch.org/models/resnet101-63fe2227.pth',
'resnet152': 'https://download.pytorch.org/models/resnet152-394f9c45.pth',
'resnext50_32x4d': 'https://download.pytorch.org/models/resnext50_32x4d-7cdf4587.pth',
'resnext101_32x8d': 'https://download.pytorch.org/models/resnext101_32x8d-8ba56ff5.pth',
'wide_resnet50_2': 'https://download.pytorch.org/models/wide_resnet50_2-95faca4d.pth',
'wide_resnet101_2': 'https://download.pytorch.org/models/wide_resnet101_2-32ee1156.pth',
# MobileNetV2
'mobilenet_v2': 'https://download.pytorch.org/models/mobilenet_v2-b0353104.pth',
# Se ResNet
'seresnet18': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet18-4bb0ce65.pth',
'seresnet34': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-weights/seresnet34-a4004e63.pth',
'seresnet50': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet50-ce0d4300.pth',
'seresnet101': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet101-7e38fcc6.pth',
'seresnet152': 'https://github.com/rwightman/pytorch-image-models/releases/download/v0.1-cadene/se_resnet152-d17c99b7.pth',
'seresnext50_32x4d': 'http://data.lip6.fr/cadene/pretrainedmodels/se_resnext50_32x4d-a260b3a4.pth',
# ViT
'vit_base_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-B_16.npz',
'vit_base_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-B_32.npz',
'vit_large_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-L_16.npz',
'vit_large_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/ViT-L_32.npz',
# Hybrid (resnet50 + ViT)
'r50_vit_base_patch16_224': 'https://storage.googleapis.com/vit_models/imagenet21k/R50+ViT-B_16.npz',
'r50_vit_large_patch32_224': 'https://storage.googleapis.com/vit_models/imagenet21k/R50+ViT-L_32.npz',
}
def load_from_zoo(model, model_name, pretrained_path='pretrained/official'):
model_name = change_384_224(model_name)
Path(os.path.join(pretrained_path, model_name)).mkdir(parents=True, exist_ok=True)
if model_urls[model_name].endswith('pth'):
state_dict = load_state_dict_from_url(url=model_urls[model_name],
model_dir=os.path.join(pretrained_path, model_name),
progress=True, map_location='cpu')
state_dict.pop('fc.weight', None)
state_dict.pop('fc.bias', None)
state_dict.pop('classifier.weight', None)
state_dict.pop('classifier.bias', None)
model.load_state_dict(state_dict, strict=False)
elif model_urls[model_name].endswith('npz'):
npz = load_npz_from_url(url=model_urls[model_name],
file_name=os.path.join(pretrained_path, model_name, os.path.basename(model_urls[model_name])))
model.load_npz(npz)
def change_384_224(model_name):
model_name = model_name.replace('384', '224')
return model_name
def load_npz_from_url(url, file_name):
if not Path(file_name).exists():
subprocess.run(["wget", "-r", "-nc", '-O', file_name, url])
return np.load(file_name)
| 50.175676
| 127
| 0.720442
| 503
| 3,713
| 5.091451
| 0.260437
| 0.052714
| 0.078094
| 0.089809
| 0.56228
| 0.370558
| 0.370558
| 0.283483
| 0.283483
| 0.283483
| 0
| 0.084704
| 0.131969
| 3,713
| 73
| 128
| 50.863014
| 0.709898
| 0.015082
| 0
| 0
| 0
| 0.096154
| 0.552329
| 0.024932
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057692
| false
| 0
| 0.096154
| 0
| 0.192308
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91918e0b4360daa841c2dd658213e7f9249510fa
| 702
|
py
|
Python
|
crawler1.py
|
pjha1994/Scrape_reddit
|
2a00a83854085e09f0cf53aef81969025876039b
|
[
"Apache-2.0"
] | null | null | null |
crawler1.py
|
pjha1994/Scrape_reddit
|
2a00a83854085e09f0cf53aef81969025876039b
|
[
"Apache-2.0"
] | null | null | null |
crawler1.py
|
pjha1994/Scrape_reddit
|
2a00a83854085e09f0cf53aef81969025876039b
|
[
"Apache-2.0"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
def recursiveUrl(url, link, depth):
if depth == 5:
return url
else:
print(link['href'])
page = requests.get(url + link['href'])
soup = BeautifulSoup(page.text, 'html.parser')
newlink = soup.find('a')
if len(newlink) == 0:
return link
else:
return link, recursiveUrl(url, newlink, depth + 1)
def getLinks(url):
page = requests.get(url)
soup = BeautifulSoup(page.text, 'html.parser')
links = soup.find_all('a')
for link in links:
links.append(recursiveUrl(url, link, 0))
return links
links = getLinks("http://www.reddit.com/")
print(links)
| 27
| 62
| 0.602564
| 88
| 702
| 4.795455
| 0.431818
| 0.106635
| 0.090047
| 0.085308
| 0.165877
| 0.165877
| 0
| 0
| 0
| 0
| 0
| 0.009709
| 0.266382
| 702
| 26
| 63
| 27
| 0.809709
| 0
| 0
| 0.173913
| 0
| 0
| 0.076814
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.086957
| 0
| 0.347826
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9191a318c08b49c9339f1e4504f721d3f2d1d83b
| 2,428
|
py
|
Python
|
chime2/tests/normal/models/seir_test.py
|
BrianThomasRoss/CHIME-2
|
f084ab552fac5e50841a922293b74d653450790b
|
[
"BSD-3-Clause"
] | null | null | null |
chime2/tests/normal/models/seir_test.py
|
BrianThomasRoss/CHIME-2
|
f084ab552fac5e50841a922293b74d653450790b
|
[
"BSD-3-Clause"
] | null | null | null |
chime2/tests/normal/models/seir_test.py
|
BrianThomasRoss/CHIME-2
|
f084ab552fac5e50841a922293b74d653450790b
|
[
"BSD-3-Clause"
] | 1
|
2020-11-19T23:08:52.000Z
|
2020-11-19T23:08:52.000Z
|
"""Tests for SEIR model in this repo
* Compares conserved quantities
* Compares model against SEIR wo social policies in limit to SIR
"""
from pandas import Series
from pandas.testing import assert_frame_equal, assert_series_equal
from bayes_chime.normal.models import SEIRModel, SIRModel
from pytest import fixture
from tests.normal.models.sir_test import ( # pylint: disable=W0611
fixture_penn_chime_raw_df_no_policy,
fixture_penn_chime_setup,
fixture_sir_data_wo_policy,
)
COLS_TO_COMPARE = [
"susceptible",
"infected",
"recovered",
# Does not compare census as this repo uses the exponential distribution
]
PENN_CHIME_COMMIT = "188c35be9561164bedded4a8071a320cbde0d2bc"
@fixture(name="seir_data")
def fixture_seir_data(sir_data_wo_policy):
"""Returns data for the SIHR model
"""
x, p = sir_data_wo_policy
pp = p.copy()
xx = x.copy()
pp["alpha"] = 0.5
pp["nu"] = 1
pp["initial_exposed"] = 0
return xx, pp
def test_conserved_n(seir_data):
"""Checks if S + E + I + R is conserved for SEIR
"""
x, pars = seir_data
n_total = 0
for key in SEIRModel.compartments:
n_total += pars[f"initial_{key}"]
seir_model = SEIRModel()
predictions = seir_model.propagate_uncertainties(x, pars)
n_computed = predictions[SEIRModel.compartments].sum(axis=1)
n_expected = Series(data=[n_total] * len(n_computed), index=n_computed.index)
assert_series_equal(n_expected, n_computed)
def test_compare_sir_vs_seir(sir_data_wo_policy, seir_data, monkeypatch):
"""Checks if SEIR and SIR return same results if the code enforces
* alpha = gamma
* E = 0
* dI = dE
"""
x_sir, pars_sir = sir_data_wo_policy
x_seir, pars_seir = seir_data
pars_seir["alpha"] = pars_sir["gamma"] # will be done by hand
def mocked_seir_step(data, **pars):
data["exposed"] = 0
new_data = SEIRModel.simulation_step(data, **pars)
new_data["infected"] += new_data["exposed_new"]
return new_data
seir_model = SEIRModel()
monkeypatch.setattr(seir_model, "simulation_step", mocked_seir_step)
sir_model = SIRModel()
predictions_sir = sir_model.propagate_uncertainties(x_sir, pars_sir)
predictions_seir = seir_model.propagate_uncertainties(x_seir, pars_seir)
assert_frame_equal(
predictions_sir[COLS_TO_COMPARE], predictions_seir[COLS_TO_COMPARE],
)
| 28.232558
| 81
| 0.710461
| 340
| 2,428
| 4.773529
| 0.347059
| 0.033272
| 0.027726
| 0.046211
| 0.039433
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017409
| 0.195634
| 2,428
| 85
| 82
| 28.564706
| 0.81362
| 0.177924
| 0
| 0.039216
| 0
| 0
| 0.083376
| 0.02046
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.078431
| false
| 0
| 0.098039
| 0
| 0.215686
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91928996da1f5de4298b9395563c76e7f7e3542f
| 4,681
|
py
|
Python
|
Libraries/mattsLibraries/mathOperations.py
|
mrware91/PhilTransA-TRXS-Limits
|
5592c6c66276cd493d10f066aa636aaf600d3a00
|
[
"MIT"
] | null | null | null |
Libraries/mattsLibraries/mathOperations.py
|
mrware91/PhilTransA-TRXS-Limits
|
5592c6c66276cd493d10f066aa636aaf600d3a00
|
[
"MIT"
] | 2
|
2018-06-19T00:01:27.000Z
|
2018-10-16T18:33:24.000Z
|
Libraries/mattsLibraries/mathOperations.py
|
mrware91/PhilTransA-TRXS-Limits
|
5592c6c66276cd493d10f066aa636aaf600d3a00
|
[
"MIT"
] | null | null | null |
import numpy as np
from scipy.interpolate import interp1d
from pyTools import *
################################################################################
#~~~~~~~~~Log ops
################################################################################
def logPolyVal(p,x):
ord = p.order()
logs = []
for idx in xrange(ord+1):
logs.append( np.log( p[idx] ) + (ord-idx)*np.log(x) )
return logs
################################################################################
#~~~~~~~~~Symmeterize data
################################################################################
def symmeterize( x, y, interp_type='cubic' ):
if x.min() <= 0:
raise ValueError('x.min() must be greater than zero.')
xs = np.array([-x,x]).flatten()
xs.sort()
f = interp1d( x , y , kind=interp_type )
return { 'x':xs , 'y':f(np.abs(xs)) }
################################################################################
#~~~~~~~~~3D Shapes
################################################################################
def makeSphere(x0=0,y0=0,z0=0,r=1,ntheta=30,nphi=30):
u = np.linspace(0, np.pi, ntheta)
v = np.linspace(0, 2 * np.pi, nphi)
x = np.outer(np.sin(u), np.sin(v))*r
y = np.outer(np.sin(u), np.cos(v))*r
z = np.outer(np.cos(u), np.ones_like(v))*r
return x+x0, y+y0, z+z0
def makeCylinder(x0=0,y0=0,z0=0,r=1,h=10,ntheta=30,nz=30):
u = np.linspace(0, 2*np.pi, ntheta)
z = np.linspace(0, h, nz)
UU,ZZ = np.meshgrid(u,z)
XX = np.cos(UU)*r
YY = np.sin(UU)*r
# ax.plot_wireframe(x, y, z)
return XX+x0, YY+y0, ZZ+z0
def generateLine3D( x0=0, x1=1, y0=0, y1=1, z0=0, z1=0, N=2 ):
return {'line':{'xData':np.linspace(x0,x1,N),
'yData':np.linspace(y0,y1,N),
'zData':np.linspace(z0,z1,N),
'cData':np.ones((N,1))}}
################################################################################
#~~~~~~~~~2D Shapes
################################################################################
def generateCircle(R=1, X0=0, Y0=0, N = 60, thetaMin = 0, thetaMax = 2*np.pi ):
thetas = np.linspace( thetaMin , thetaMax , N)
uY = np.sin( thetas )*R
uX = np.cos( thetas )*R
return {'circle':{'xData':uX+X0, 'yData':uY+Y0}}
def generateEllipse( RX=2, RY=1, X0=0, Y0=0, N = 60, thetaMin = 0, thetaMax = 2*np.pi ):
thetas = np.linspace( thetaMin , thetaMax , N)
uY = np.sin( thetas )*RY
uX = np.cos( thetas )*RX
return {'ellipse':{'xData':uX+X0, 'yData':uY+Y0}}
def makeCylinder2D( L = 10., R = 1., N=60, view_degrees=30. ):
yFac = np.cos(view_degrees * np.pi/180.)
zFac = np.sin(view_degrees * np.pi/180.)
xL = np.ones((2,1))*-R
xR = -xL
y = np.array([0,L])*yFac
cylinder = { 'leftSide':{'xData':xL, 'yData':y},
'rightSide':{'xData':xR, 'yData':y},
'upperEllipse':generateEllipse(RX = R, RY=R*zFac, Y0=L*yFac,N=N)['ellipse'],
'lowerHalfEllipse':generateEllipse(RX = R, RY=R*zFac, thetaMin=np.pi, thetaMax=2*np.pi, N=int(N/2.))['ellipse']}
return cylinder
################################################################################
#~~~~~~~~~Rotations
################################################################################
def rotateObject(x,y,z,ax=None,ay=None,az=None):
if ax is not None:
y,z = rotateAt(y,z,ax)
if ay is not None:
x,z = rotateAt(x,z,-ay)
if az is not None:
x,y = rotateAt(x,y,az)
return x,y,z
def rotateAt(x,y,a):
xp = np.cos(a)*x-np.sin(a)*y
yp = np.cos(a)*y+np.sin(a)*x
return xp, yp
def rotateObj2D( obj_in, degrees ):
obj = obj_in.copy()
keys = obj.keys()
for key in keys:
obj[key] = rotate2D( degrees=degrees, **obj[key] )
return obj
def rotate2D( xData, yData, degrees ):
x = xData.flatten()
y = yData.flatten()
z = np.zeros_like(x)
x,y,z = rotateObject( x, y, z, az=float(degrees)/180.*np.pi )
return {'xData':x, 'yData':y}
def rotateObj3D( obj_in, gamma, theta, phi ):
obj = obj_in.copy()
keys = obj.keys()
for key in keys:
obj[key] = rotate3D( gamma=gamma, theta=theta, phi=phi, **obj[key] )
return obj
def rotate3D( xData, yData, zData, gamma, theta, phi, kwargs_toggle=True, **kwargs ):
ignore_kwargs(kwargs, toggle=kwargs_toggle)
x = xData.flatten()
y = yData.flatten()
z = zData.flatten()
x,y,z = rotateObject( x, y, z, az=float(gamma)/180.*np.pi )
x,y,z = rotateObject( x, y, z, ay=float(theta)/180.*np.pi )
x,y,z = rotateObject( x, y, z, az=float(phi)/180.*np.pi )
return {'xData':x, 'yData':y, 'zData':z}
| 32.734266
| 129
| 0.470626
| 663
| 4,681
| 3.300151
| 0.221719
| 0.014625
| 0.015082
| 0.034278
| 0.326325
| 0.285649
| 0.234461
| 0.190585
| 0.157678
| 0.131627
| 0
| 0.032709
| 0.19013
| 4,681
| 142
| 130
| 32.964789
| 0.544447
| 0.026063
| 0
| 0.147368
| 0
| 0
| 0.053824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.147368
| false
| 0
| 0.031579
| 0.010526
| 0.326316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91941908fbc07382f07b7bc44926ab4220545f9d
| 947
|
py
|
Python
|
src/routes/web.py
|
enflo/weather-flask
|
c4d905e1f557b4c9b39d0a578fdbb6fefc839028
|
[
"Apache-2.0"
] | null | null | null |
src/routes/web.py
|
enflo/weather-flask
|
c4d905e1f557b4c9b39d0a578fdbb6fefc839028
|
[
"Apache-2.0"
] | null | null | null |
src/routes/web.py
|
enflo/weather-flask
|
c4d905e1f557b4c9b39d0a578fdbb6fefc839028
|
[
"Apache-2.0"
] | null | null | null |
from flask import Blueprint, render_template
from gateways.models import getWeatherData
web = Blueprint("web", __name__, template_folder='templates')
@web.route("/", methods=['GET'])
def home():
items = getWeatherData.get_last_item()
cityName = items["city"]
return render_template("index.html",
city=cityName[0],
temperature=items["temperature"],
humidity=items["humidity"],
pressure=items["pressure"])
#@web.route("/profile", methods=['GET'])
#def profile():
# items = getWeatherData.get_last_item()
# return render_template("profile.html",
# celcius=items["temperature"],
# humidity=items["humidity"],
# pressure=items["pressure"])
#@web.route("/about", methods=['GET'])
#def about():
# return render_template("about.html")
| 32.655172
| 61
| 0.564942
| 87
| 947
| 6
| 0.37931
| 0.10728
| 0.074713
| 0.099617
| 0.367816
| 0.252874
| 0.252874
| 0.252874
| 0.252874
| 0.252874
| 0
| 0.001486
| 0.289335
| 947
| 28
| 62
| 33.821429
| 0.774146
| 0.414995
| 0
| 0
| 0
| 0
| 0.104779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.333333
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91979003f9cb74dc9f591b8277facbe005dfd825
| 532
|
py
|
Python
|
swapidemo1.py
|
anvytran-dev/mycode
|
3753c19828f0ecc506a6450bb6b71b4a5d651e5f
|
[
"MIT"
] | null | null | null |
swapidemo1.py
|
anvytran-dev/mycode
|
3753c19828f0ecc506a6450bb6b71b4a5d651e5f
|
[
"MIT"
] | null | null | null |
swapidemo1.py
|
anvytran-dev/mycode
|
3753c19828f0ecc506a6450bb6b71b4a5d651e5f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""Star Wars API HTTP response parsing"""
# requests is used to send HTTP requests (get it?)
import requests
URL= "https://swapi.dev/api/people/1"
def main():
"""sending GET request, checking response"""
# SWAPI response is stored in "resp" object
resp= requests.get(URL)
# what kind of python object is "resp"?
print("This object class is:", type(resp), "\n")
# what can we do with it?
print("Methods/Attributes include:", dir(resp))
if __name__ == "__main__":
main()
| 22.166667
| 52
| 0.654135
| 77
| 532
| 4.415584
| 0.675325
| 0.064706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004717
| 0.203008
| 532
| 23
| 53
| 23.130435
| 0.79717
| 0.468045
| 0
| 0
| 0
| 0
| 0.329588
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.125
| 0
| 0.25
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
9198600a03831a59503bb3d3f2827b284d0e1c16
| 2,316
|
bzl
|
Python
|
format/format.bzl
|
harshad-deo/TorchVI
|
f66d1486201368c9906869477ba7ae254d2e7191
|
[
"Apache-2.0"
] | null | null | null |
format/format.bzl
|
harshad-deo/TorchVI
|
f66d1486201368c9906869477ba7ae254d2e7191
|
[
"Apache-2.0"
] | null | null | null |
format/format.bzl
|
harshad-deo/TorchVI
|
f66d1486201368c9906869477ba7ae254d2e7191
|
[
"Apache-2.0"
] | null | null | null |
def _replace_formatted(ctx, manifest, files):
out = ctx.actions.declare_file(ctx.label.name)
# this makes it easier to add variables
file_lines = [
"""#!/bin/bash -e
WORKSPACE_ROOT="${1:-$BUILD_WORKSPACE_DIRECTORY}" """,
"""RUNPATH="${TEST_SRCDIR-$0.runfiles}"/""" + ctx.workspace_name,
"""RUNPATH=(${RUNPATH//bin/ })
RUNPATH="${RUNPATH[0]}"bin
echo $WORKSPACE_ROOT
echo $RUNPATH
while read original formatted; do
if [[ ! -z "$original" ]] && [[ ! -z "$formatted" ]]; then
if ! cmp -s "$WORKSPACE_ROOT/$original" "$RUNPATH/$formatted"; then
echo "Formatting $original"
cp "$RUNPATH/$formatted" "$WORKSPACE_ROOT/$original"
fi
fi
done < "$RUNPATH"/""" + manifest.short_path,
]
file_content = "\n".join(file_lines)
ctx.actions.write(
output = out,
content = file_content,
)
files.append(manifest)
return [DefaultInfo(files = depset(files), executable = out)]
def _build_format_py(ctx):
files = []
manifest_content = []
for src in ctx.files.srcs:
if src.is_source:
file = ctx.actions.declare_file("{}.format.output".format(src.short_path))
files.append(file)
ctx.actions.run(
arguments = [src.path, file.path],
executable = ctx.executable._fmt,
outputs = [file],
inputs = [src, ctx.file._style],
)
manifest_content.append("{} {}".format(src.short_path, file.short_path))
manifest = ctx.actions.declare_file("format/{}/manifest.txt".format(ctx.label.name))
ctx.actions.write(manifest, "\n".join(manifest_content) + "\n")
return manifest, files
def _format_py_impl(ctx):
manifest, files = _build_format_py(ctx)
return _replace_formatted(ctx, manifest, files)
format_py = rule(
implementation = _format_py_impl,
executable = True,
attrs = {
"srcs": attr.label_list(
allow_files = [".py"],
mandatory = True,
),
"_fmt": attr.label(
cfg = "host",
default = "//format:format_py",
executable = True,
),
"_style": attr.label(
allow_single_file = True,
default = ":setup.cfg",
),
},
)
| 30.473684
| 88
| 0.577288
| 254
| 2,316
| 5.070866
| 0.346457
| 0.046584
| 0.037267
| 0.048913
| 0.091615
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001786
| 0.274611
| 2,316
| 75
| 89
| 30.88
| 0.764881
| 0.015976
| 0
| 0.09434
| 0
| 0
| 0.055904
| 0.01255
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056604
| false
| 0
| 0
| 0
| 0.113208
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
919e14a6393eda0c7e38c0fd3d5e470f7982030f
| 11,038
|
py
|
Python
|
first-floor.py
|
levabd/smart-climat-daemon
|
8ff273eeb74fb03ea04fda11b0128fa13d35b500
|
[
"MIT"
] | null | null | null |
first-floor.py
|
levabd/smart-climat-daemon
|
8ff273eeb74fb03ea04fda11b0128fa13d35b500
|
[
"MIT"
] | 1
|
2021-06-02T03:55:13.000Z
|
2021-06-02T03:55:13.000Z
|
first-floor.py
|
levabd/smart-climat-daemon
|
8ff273eeb74fb03ea04fda11b0128fa13d35b500
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
import argparse
import re
import datetime
import paramiko
import requests
# cmd ['ssh', 'smart',
# 'mkdir -p /home/levabd/smart-home-temp-humidity-monitor;
# cat - > /home/levabd/smart-home-temp-humidity-monitor/lr.json']
from miio import chuangmi_plug
from btlewrap import available_backends, BluepyBackend
from mitemp_bt.mitemp_bt_poller import MiTempBtPoller, \
MI_TEMPERATURE, MI_HUMIDITY, MI_BATTERY
state = {}
f = open('/home/pi/smart-climat-daemon/ac_state.json')
state = json.load(f)
plug_type = 'chuangmi.plug.m1'
def valid_mitemp_mac(mac, pat=re.compile(r"[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}:[0-9A-F]{2}")):
"""Check for valid mac addresses."""
if not pat.match(mac.upper()):
raise argparse.ArgumentTypeError(
'The MAC address "{}" seems to be in the wrong format'.format(mac))
return mac
def turn_on_humidifier():
"""Turn on humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.on()
def turn_off_humidifier():
"""Turn off humidifier on a first floor."""
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=1,
lazy_discover=True,
model=plug_type)
hummidifier_plug.off()
def check_if_ac_off():
"""Check if AC is turned off."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if response.json()['props']['boot'] == 0:
return True
return False
return None
def check_if_ac_cool():
"""Check if AC is turned for a automate cooling."""
status_url = 'http://smart.levabd.pp.ua:2002/status-bedroom?key=27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) or ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '001':
return False
if not response.json()['props']['wdNumber'] == 25:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def check_if_ac_heat():
"""Check if AC is turned for a automate heating."""
status_url = 'http://smart.levabd.pp.ua:2003/status/key/27fbc501b51b47663e77c46816a'
response = requests.get(status_url, timeout=(20, 30))
if ('address' not in response.json()) and ('name' not in response.json()):
return None
if ((response.json()['name'] == "08bc20043df8") and (response.json()['address'] == "192.168.19.54")):
if not response.json()['props']['boot'] == 1:
return False
if not response.json()['props']['runMode'] == '100':
return False
if not response.json()['props']['wdNumber'] == 23:
return False
if not response.json()['props']['windLevel'] == '001':
return False
return True
return None
def turn_on_heat_ac():
"""Turn on AC on a first floor for a heating if it was not."""
if (state['wasTurnedHeat'] == 1) and not state['triedTurnedHeat'] == 1:
return
heat_url = 'http://smart.levabd.pp.ua:2003/heat/key/27fbc501b51b47663e77c46816a'
ac_heat = check_if_ac_heat()
if ac_heat is not None:
if not ac_heat:
state['triedTurnedHeat'] = 1
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(heat_url)
print(response.json())
else:
if state['triedTurnedHeat'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 1
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_on_cool_ac():
"""Turn on AC on a first floor for a cooling if it was not."""
if (state['wasTurnedCool'] == 1) and not state['triedTurnedCool'] == 1:
return
cool_url = 'http://smart.levabd.pp.ua:2003/cool/key/27fbc501b51b47663e77c46816a'
ac_cool = check_if_ac_cool()
if ac_cool is not None:
if not ac_cool:
state['triedTurnedCool'] = 1
state['wasTurnedCool'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(cool_url)
print(response.json())
else:
if state['triedTurnedCool'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 1
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def turn_off_ac():
"""Turn off AC on a first floor."""
if (state['wasTurnedOff'] == 1) and not state['triedTurnedOff'] == 1:
return
turn_url = 'http://smart.levabd.pp.ua:2003/power-off/key/27fbc501b51b47663e77c46816a'
ac_off = check_if_ac_off()
if ac_off is not None:
if not ac_off:
state['triedTurnedOff'] = 1
state['wasTurnedOff'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
response = requests.get(turn_url)
print(response.json())
else:
if state['triedTurnedOff'] == 1:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 1
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
def record_temp_humid(temperature, humidity):
"""Record temperature and humidity data for web interface monitor"""
dicty = {
"temperature": temperature,
"humidity": humidity
}
ssh = paramiko.SSHClient()
ssh.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh.connect('smart.levabd.pp.ua', port = 2001, username='levabd', password='vapipu280.')
sftp = ssh.open_sftp()
with sftp.open('smart-home-temp-humidity-monitor/lr.json', 'w') as outfile:
json.dump(dicty, outfile)
ssh.close()
def poll_temp_humidity():
"""Poll data frstate['triedTurnedOff']om the sensor."""
today = datetime.datetime.today()
backend = BluepyBackend
poller = MiTempBtPoller('58:2d:34:38:c0:91', backend)
temperature = poller.parameter_value(MI_TEMPERATURE)
humidity = poller.parameter_value(MI_HUMIDITY)
print("Month: {}".format(today.month))
print("Getting data from Mi Temperature and Humidity Sensor")
print("FW: {}".format(poller.firmware_version()))
print("Name: {}".format(poller.name()))
print("Battery: {}".format(poller.parameter_value(MI_BATTERY)))
print("Temperature: {}".format(poller.parameter_value(MI_TEMPERATURE)))
print("Humidity: {}".format(poller.parameter_value(MI_HUMIDITY)))
return (today, temperature, humidity)
# scan(args):
# """Scan for sensors."""
# backend = _get_backend(args)
# print('Scanning for 10 seconds...')
# devices = mitemp_scanner.scan(backend, 10)
# devices = []
# print('Found {} devices:'.format(len(devices)))
# for device in devices:
# print(' {}'.format(device))
def list_backends(_):
"""List all available backends."""
backends = [b.__name__ for b in available_backends()]
print('\n'.join(backends))
def main():
"""Main function."""
# check_if_ac_cool()
(today, temperature, humidity) = poll_temp_humidity()
# Record temperature and humidity for monitor
record_temp_humid(temperature, humidity)
try:
if (humidity > 49) and (today.month < 10) and (today.month > 4):
turn_off_humidifier()
if (humidity < 31) and (today.month < 10) and (today.month > 4):
turn_on_humidifier()
if (humidity < 31) and ((today.month > 9) or (today.month < 5)):
turn_on_humidifier()
if (humidity > 49) and ((today.month > 9) or (today.month < 5)):
turn_off_humidifier()
# Prevent Sleep of Xiaomi Smart Plug
hummidifier_plug = chuangmi_plug.ChuangmiPlug(
ip='192.168.19.59',
token='14f5b868a58ef4ffaef6fece61c65b16',
start_id=0,
debug=0,
lazy_discover=True,
model='chuangmi.plug.m1')
print(hummidifier_plug.status())
except Exception:
print("Can not connect to humidifier")
# clear env at night
if today.hour == 4:
state['triedTurnedOff'] = 0
state['wasTurnedOff'] = 0
state['triedTurnedCool'] = 0
state['wasTurnedCool'] = 0
state['triedTurnedHeat'] = 0
state['wasTurnedHeat'] = 0
with open('/home/pi/smart-climat-daemon/ac_state.json', 'w') as file:
json.dump(state, file)
if (today.hour > -1) and (today.hour < 7):
turn_off_ac()
if (temperature > 26.4) and (today.month < 6) and (today.month > 4) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 26.4) and (today.month < 10) and (today.month > 8) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature > 27.3) and (today.month < 9) and (today.month > 5) and (today.hour < 24) and (today.hour > 10):
turn_on_cool_ac()
if (temperature < 23.5) and (today.month < 10) and (today.month > 4):
turn_off_ac()
# _if (temperature < 20) and ((today.month > 9) or (today.month < 5)) and (today.hour < 24) and (today.hour > 9):
# turn_on_heat_ac()
if (temperature > 22) and ((today.month > 9) or (today.month < 5)):
turn_off_ac()
if __name__ == '__main__':
main()
| 37.80137
| 118
| 0.602102
| 1,379
| 11,038
| 4.704133
| 0.171139
| 0.030831
| 0.032064
| 0.018499
| 0.631417
| 0.566209
| 0.539232
| 0.480962
| 0.449669
| 0.433945
| 0
| 0.055024
| 0.252491
| 11,038
| 291
| 119
| 37.931271
| 0.731184
| 0.111705
| 0
| 0.486486
| 0
| 0.004505
| 0.217982
| 0.055858
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058559
| false
| 0.004505
| 0.040541
| 0
| 0.202703
| 0.058559
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
919e36250164a66af6592305ae454fa0dbde1d43
| 642
|
py
|
Python
|
reservior_classification.py
|
Optimist-Prime/QML-for-MNIST-classification
|
7513b3faa548166dba3df927a248e8c7f1ab2a15
|
[
"BSD-3-Clause"
] | 1
|
2020-02-04T12:51:47.000Z
|
2020-02-04T12:51:47.000Z
|
reservior_classification.py
|
Optimist-Prime/QML-for-MNIST-classification
|
7513b3faa548166dba3df927a248e8c7f1ab2a15
|
[
"BSD-3-Clause"
] | null | null | null |
reservior_classification.py
|
Optimist-Prime/QML-for-MNIST-classification
|
7513b3faa548166dba3df927a248e8c7f1ab2a15
|
[
"BSD-3-Clause"
] | null | null | null |
import pickle
from sklearn.neural_network import MLPClassifier
train = pickle.load(open('train_pca_reservoir_output_200samples.pickle','rb'))
test = pickle.load(open('test_pca_reservoir_output_50samples.pickle','rb'))
train_num = 200
test_num = 50
mlp = MLPClassifier(hidden_layer_sizes=(2000,), max_iter=100, alpha=1e-5,
solver='sgd', verbose=10, tol=1e-4, random_state=1,
learning_rate_init=.1, batch_size= 20)
mlp.fit(train[0], train[1][:train_num])
print("Training set score: %f" % mlp.score(train[0], train[1][:train_num]))
print("Test set score: %f" % mlp.score(test[0], test[1][:test_num]))
| 37.764706
| 78
| 0.700935
| 99
| 642
| 4.333333
| 0.535354
| 0.055944
| 0.065268
| 0.055944
| 0.195804
| 0.11655
| 0.11655
| 0
| 0
| 0
| 0
| 0.059783
| 0.140187
| 642
| 16
| 79
| 40.125
| 0.717391
| 0
| 0
| 0
| 0
| 0
| 0.207165
| 0.133956
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
919f4e67778a5a961b0e58f4deb0ff4d5a7ee8e6
| 4,099
|
py
|
Python
|
util.py
|
delmarrerikaine/LPG-PCA
|
deb631ee2c4c88190ce4204fcbc0765ae5cd8f53
|
[
"MIT"
] | 1
|
2021-05-07T01:00:18.000Z
|
2021-05-07T01:00:18.000Z
|
util.py
|
delmarrerikaine/LPG-PCA
|
deb631ee2c4c88190ce4204fcbc0765ae5cd8f53
|
[
"MIT"
] | null | null | null |
util.py
|
delmarrerikaine/LPG-PCA
|
deb631ee2c4c88190ce4204fcbc0765ae5cd8f53
|
[
"MIT"
] | 2
|
2019-06-29T16:30:32.000Z
|
2020-11-18T17:40:47.000Z
|
import numpy as np
import pandas as pd
from skimage import io
import skimage.measure as measure
import os
from lpg_pca_impl import denoise
def getNoisedImage(originalImage, variance):
# return random_noise(originalImage, mode='gaussian', var=variance)
np.random.seed(42)
noise = np.random.normal(size=originalImage.shape)
noise = noise/np.sqrt(np.power(noise, 2).mean())
noisedImage = originalImage + variance*noise
return noisedImage
def clip(img):
img = np.minimum(np.ones(img.shape), img)
img = np.maximum(np.zeros(img.shape), img)
return img
def readImg(path):
return io.imread(path, as_gray=True).astype('float64')/255.0
def showImg(img, name):
print(name)
img = clip(img)
io.imshow((img*255.0).astype('uint8'))
def saveImg(img, path):
img = clip(img)
io.imsave(path, (img*255.0).astype('uint8'))
def compare_psnr(img1, img2):
return measure.compare_psnr(img1, img2)
def compare_ssim(img1, img2):
return measure.compare_ssim(img1, img2)
def generate_images(img_name='mri'):
experiments_folder = 'experiments'
noise_variances = [10, 20, 30, 40]
for noise_variance in noise_variances:
corrected_noise_variance = noise_variance / 255.0
original_img = readImg(os.path.join('images', img_name + '.png'))
noised_img = getNoisedImage(original_img, corrected_noise_variance)
noised_file_name = img_name + '_noised_' + str(noise_variance) + '.png'
saveImg(noised_img, os.path.join(experiments_folder, noised_file_name))
print(noised_file_name + ' started.')
denoised_img = denoise(noised_img, noise_variance)
denoised_file_name = img_name + '_denoised_' + str(noise_variance) + '.png'
saveImg(denoised_img, os.path.join(experiments_folder, denoised_file_name))
print(denoised_file_name + ' finished.')
print("noised PSNR: " + str(compare_psnr(original_img, noised_img)) + ", SSIM: " + str(compare_ssim(original_img, noised_img)))
print("denoised PSNR: " + str(compare_psnr(original_img, denoised_img)) + ", SSIM: " + str(compare_ssim(original_img, denoised_img)))
def generate_latex_tables():
df = pd.read_csv('data.csv')
df = df.round(2)
image_texts = np.array([])
temp_directory = os.path.join(os.path.dirname(__file__), 'temp')
if not os.path.exists(temp_directory):
os.makedirs(temp_directory)
for image_name in list(set(df['image_name'])):
image_df = df[df['image_name'] == image_name]
image_df['denoise_lpg_pca'] = image_df['denoise_psnr_lpg_pca'].map(str) + '(' + image_df['denoise_ssim_lpg_pca'].map(str) + ')'
image_df['denoise_mf'] = image_df['denoise_psnr_mf'].map(str) + '(' + image_df['denoise_ssim_mf'].map(str) + ')'
image_df['denoise_nlm'] = image_df['denoise_psnr_nlm'].map(str) + '(' + image_df['denoise_ssim_nlm'].map(str) + ')'
image_df['denoise_bm3d'] = image_df['denoise_psnr_bm3d'].map(str) + '(' + image_df['denoise_ssim_bm3d'].map(str) + ')'
image_df = image_df[['sigma', 'denoise_lpg_pca', 'denoise_mf', 'denoise_nlm', 'denoise_bm3d']]
image_df['sigma'] = image_df['sigma'].map(int)
image_df.columns = ['sigma', 'LPG-PCA', 'MF', "NLM", 'BM3D']
path = os.path.join(temp_directory, image_name + '.tex')
image_df.to_latex(path, index=False, column_format='lrrrr')
with open(path, 'r') as file:
image_text = file.read()
image_text = image_text.replace(' ', '').replace(r'\toprule', r'\toprule &&' + image_name + r'\\ \midrule')
image_text = r'\noindent\begin{minipage}{.5\linewidth}' + '\n' + image_text + '\n' + r'\end{minipage}'
image_text = image_text.replace('\n\n', '\n').replace('sigma&', '$\\sigma$&')
image_texts = np.append(image_texts, image_text)
os.remove(path)
result = '\n'.join(image_texts)
filename = 'tables.tex'
with open(filename, "w+") as file:
file.write(result)
if(len(os.listdir(temp_directory))) == 0:
os.rmdir(temp_directory)
| 36.598214
| 141
| 0.658941
| 562
| 4,099
| 4.553381
| 0.24911
| 0.051973
| 0.065651
| 0.040641
| 0.227042
| 0.157093
| 0.04533
| 0
| 0
| 0
| 0
| 0.014072
| 0.185167
| 4,099
| 111
| 142
| 36.927928
| 0.752096
| 0.015858
| 0
| 0.026316
| 0
| 0
| 0.141865
| 0.009673
| 0
| 0
| 0
| 0
| 0
| 1
| 0.118421
| false
| 0
| 0.078947
| 0.039474
| 0.263158
| 0.065789
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91a0653094ec563d20865f6d3bbca729f2752582
| 3,178
|
py
|
Python
|
ui/ui.py
|
kringen/wingnut
|
73be4f8393720ff0932ab069543e5f2d2308296d
|
[
"MIT"
] | null | null | null |
ui/ui.py
|
kringen/wingnut
|
73be4f8393720ff0932ab069543e5f2d2308296d
|
[
"MIT"
] | null | null | null |
ui/ui.py
|
kringen/wingnut
|
73be4f8393720ff0932ab069543e5f2d2308296d
|
[
"MIT"
] | null | null | null |
import redis
from rq import Queue, Connection
from flask import Flask, render_template, Blueprint, jsonify, request
import tasks
import rq_dashboard
from wingnut import Wingnut
app = Flask(
__name__,
template_folder="./templates",
static_folder="./static",
)
app.config.from_object(rq_dashboard.default_settings)
app.register_blueprint(rq_dashboard.blueprint, url_prefix="/rq")
@app.route("/", methods=["GET"])
def home():
return render_template("main/home.html")
@app.route("/tasks", methods=["POST"])
def run_task():
task_type = request.form["type"]
with Connection(redis.from_url("redis://localhost:6379")):
q = Queue()
task = q.enqueue(tasks.create_task, task_type)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@app.route("/mode", methods=["POST"])
def set_mode():
task_type = request.form["type"]
with Connection(redis.from_url("redis://localhost:6379")):
q = Queue("mode")
task = q.enqueue(tasks.set_mode, task_type)
response_object = {
"status": "success",
"data": {
"task_id": task.get_id()
}
}
return jsonify(response_object), 202
@app.route("/tasks/<task_id>", methods=["GET"])
def get_status(task_id):
with Connection(redis.from_url("redis://localhost:6379")):
q = Queue()
task = q.fetch_job(task_id)
if task:
response_object = {
"status": "success",
"data": {
"task_id": task.get_id(),
"task_status": task.get_status(),
"task_result": task.result,
},
}
else:
response_object = {"status": "error"}
return jsonify(response_object)
@app.route("/configuration", methods=["GET"])
def get_configuration():
wingnut = Wingnut()
response_object = {
"status": "success",
"data": {
"servoPin": wingnut.servoPin,
"leftMotorPin1": wingnut.leftMotorPin1,
"leftMotorPin1": wingnut.leftMotorPin2,
"leftMotorEnablePin": wingnut.leftMotorEnablePin,
"rightMotorPin1": wingnut.rightMotorPin1,
"rightMotorPin1": wingnut.rightMotorPin2,
"rightMotorEnablePin": wingnut.rightMotorEnablePin,
"sonarTriggerPin": wingnut.sonarTriggerPin,
"sonarEchoPin": wingnut.sonarEchoPin
}
}
return jsonify(response_object)
@app.route("/diagnostics", methods=["GET"])
def get_diagnostics():
r = redis.Redis()
diagnostics = {}
diagnostics["power_level"] = r.get("power_level").decode("utf-8")
diagnostics["temperature"] = r.get("temperature").decode("utf-8")
diagnostics["free_memory_mb"] = r.get("free_memory_mb").decode("utf-8")
diagnostics["free_disk_space"] = r.get("free_disk_space").decode("utf-8")
response_object = {
"status": "success",
"data": {
"diagnostics": diagnostics
}
}
return jsonify(response_object)
if __name__ == "__main__":
app.run(host="0.0.0.0",debug=1)
| 29.425926
| 77
| 0.608559
| 337
| 3,178
| 5.52819
| 0.25816
| 0.082662
| 0.064412
| 0.072464
| 0.331723
| 0.271605
| 0.234031
| 0.234031
| 0.234031
| 0.234031
| 0
| 0.014523
| 0.241661
| 3,178
| 107
| 78
| 29.700935
| 0.758506
| 0
| 0
| 0.308511
| 0
| 0
| 0.189113
| 0.020768
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.06383
| 0.010638
| 0.191489
| 0.021277
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91a63511fb79b5745ac6428aee3eedeaa5046fe6
| 1,410
|
py
|
Python
|
omkar/code.py
|
omi28/ga-learner-dst-repo
|
396c35ea56028717a96aed6ca771e39ebf68dc5b
|
[
"MIT"
] | null | null | null |
omkar/code.py
|
omi28/ga-learner-dst-repo
|
396c35ea56028717a96aed6ca771e39ebf68dc5b
|
[
"MIT"
] | null | null | null |
omkar/code.py
|
omi28/ga-learner-dst-repo
|
396c35ea56028717a96aed6ca771e39ebf68dc5b
|
[
"MIT"
] | null | null | null |
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
#New record
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
data.shape
cenus=np.concatenate((new_record,data),axis=0)
cenus.shape
print(cenus)
age=cenus[:,0]
max_age=age.max()
print(max_age)
min_age=age.min()
mean_age=np.mean(age)
age_std=np.std(age)
race=cenus[:,2]
print(race)
race_0=(race==0)
len_0=len(race[race_0])
print(len_0)
race_1=(race==1)
len_1=len(race[race_1])
race_2=(race==2)
race_3=(race==3)
race_4=(race==4)
len_2=len(race[race_2])
len_3=len(race[race_3])
len_4=len(race[race_4])
minority_race=3
print(minority_race)
senior_citizen=(age>60)
working_hour_sum=sum(cenus[:,6][senior_citizen])
print(working_hour_sum)
senior_citizen_len=len(age[senior_citizen])
avg_working_hours=working_hour_sum/senior_citizen_len
avg_working_hours=round(avg_working_hours,2)
education_num=cenus[:,1]
print(education_num)
high=education_num>10
#high=education_num[high]
print(high)
low=education_num<=10
#low=education_num[low]
print(low)
INCOME=cenus[:,7][high]
print(INCOME)
print(np.mean(INCOME))
avg_pay_high=round(np.mean(INCOME),2)
print(avg_pay_high)
LOW_AVG=cenus[:,7][low]
avg_pay_low=round(np.mean(LOW_AVG),2)
print(avg_pay_low)
#Code starts here
| 20.434783
| 57
| 0.719858
| 246
| 1,410
| 3.878049
| 0.247967
| 0.050314
| 0.057652
| 0.041929
| 0.062893
| 0.062893
| 0
| 0
| 0
| 0
| 0
| 0.039231
| 0.114184
| 1,410
| 68
| 58
| 20.735294
| 0.72458
| 0.085816
| 0
| 0
| 0
| 0
| 0.005771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.04
| 0
| 0.04
| 0.26
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91aa65150dc0f4a17f1e9ed16821f5753cc86fa6
| 389
|
py
|
Python
|
python/Excel/enumerateCells.py
|
davidgjy/arch-lib
|
b4402b96d2540995a848e6c5f600b2d99847ded6
|
[
"Apache-2.0"
] | null | null | null |
python/Excel/enumerateCells.py
|
davidgjy/arch-lib
|
b4402b96d2540995a848e6c5f600b2d99847ded6
|
[
"Apache-2.0"
] | null | null | null |
python/Excel/enumerateCells.py
|
davidgjy/arch-lib
|
b4402b96d2540995a848e6c5f600b2d99847ded6
|
[
"Apache-2.0"
] | null | null | null |
import openpyxl
wb = openpyxl.load_workbook('example.xlsx')
sheet = wb.get_sheet_by_name('Sheet1')
rows = sheet.get_highest_row()
cols = sheet.get_highest_column()
for i in range(1, rows + 1):
for j in range(1, cols + 1):
print('%s: %s' % (sheet.cell(row=i, column=j).coordinate, sheet.cell(row=i, column=j).value))
print('---------------------------------------------')
| 32.416667
| 96
| 0.59126
| 57
| 389
| 3.894737
| 0.491228
| 0.072072
| 0.135135
| 0.117117
| 0.18018
| 0.18018
| 0
| 0
| 0
| 0
| 0
| 0.014925
| 0.138817
| 389
| 12
| 97
| 32.416667
| 0.647761
| 0
| 0
| 0
| 0
| 0
| 0.182058
| 0.118734
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.111111
| 0
| 0.111111
| 0.222222
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91ac9d140e7247cc524f64941c877611ed2cbd70
| 6,257
|
py
|
Python
|
CurrencyExchange.py
|
aarana14/CurrencyExchange
|
e3f35c1481acf19683a74a41509b1dd37ae48594
|
[
"MIT"
] | null | null | null |
CurrencyExchange.py
|
aarana14/CurrencyExchange
|
e3f35c1481acf19683a74a41509b1dd37ae48594
|
[
"MIT"
] | null | null | null |
CurrencyExchange.py
|
aarana14/CurrencyExchange
|
e3f35c1481acf19683a74a41509b1dd37ae48594
|
[
"MIT"
] | null | null | null |
#import external libraries used in code
import requests, json
import pycountry
print('Currency Exchange')
currencies = []
def findCurrency():
#Finds all avaliable currencies
allCurrency = (list(pycountry.currencies))
for x in allCurrency:
y = str(x)
y = y[18:21]
#Adds the value of their ISO to the "currencies" list
currencies.append(y)
#Organizes all values in "currency" list
currecyDisplay = ''
inline = 0
for cs in currencies:
currecyDisplay += cs + ' | '
inline += 1
#Allows up to 26 ISOs to be in one line
if inline >= 26:
currecyDisplay += '\n '
inline = 0
#Displays all currency ISOs to user
print('Avaliable Currencies:\n',currecyDisplay)
def help():
#Ask user if they need help
questions = input('Type ? for help or Enter to continue: ')
#If user inputs "?" run help procedure
if questions == '?':
#Display information order
print('--------\nCurrency Exchange Help\nISO currency codes are three-letter alphabetic codes that represent the various currencies\n\nCurrency ISO:\nCurrency Name:\n--------')
#Obtains information of all currencies
allCurrency = (list(pycountry.currencies))
#For each currency obtain the ISO and the name of currency
#Display ISO and Data
for x in allCurrency:
y = str(x)
w = y[18:21]
n = int(y.index(',', y.index(',') + 1))
z = y[30:n-1]
print(w)
print(z + '\n')
print('--------\n')
#Else user does not input "?" continue program
else:
pass
def userInput():
#Program try asking user for data input
try:
fromCurrency = input('From (ISO): ').upper()
toCurrency = input('To (ISO): ').upper()
currencyAmount = input('Amount: ')
currencyAmount = int(currencyAmount.replace(',', ''))
#If data inputed is not the correct type of data inform user
except ValueError:
print('Amount Is A Number Value')
#Return inputed data
return currencyAmount, fromCurrency, toCurrency
def checkInfo(fromC, toC, currencyA, check):
#"validCurrency" value increses as data inputed if verified
validCurrency = 0
#Check if inputed ISO is valid
#If values are valid the vlue of "validCurrency" is increased
for givenCurrencies in currencies:
if fromC == givenCurrencies:
validCurrency += 1
for givenCurrencies in currencies:
if toC == givenCurrencies:
validCurrency += 1
#Check if "validCurrency" meets necessary verification value
#Check if "validCurrency" is not 2 (Data is not valid) or inputed amount data is not the correct value
if validCurrency != 2 or type(currencyA) != int:
#Let user know data is invalid
print('Information Invalid\n')
#Ask user if they need help
help()
#Reset "validCurrency"
validCurrency = 0
#Set "check" as False
checks = False
#If type of data is correct and valid "check" is set to True
else:
checks = True
return fromC, toC, currencyA, checks
def dataInput():
#Data has not been checked yet, therefore "check" is False
check = False
#While the data is not valid or not checked repeat data input and data check
while check == False:
currencyAmount, fromCurrency, toCurrency = userInput()
fromC, toC, currencyA, check = checkInfo(fromCurrency, toCurrency, currencyAmount, check)
#Once data is valid and checked return values
return fromC, toC, currencyA
def userData():
#No data if the information provided is correct
correctInfo = ''
#While the user does not approve of data, repeat data input and data check
while correctInfo != 'y':
fromC, toC, currencyA = dataInput()
#Display data user has inputed after being checked and validated
print('\nFrom:',fromC)
print('To:',toC)
print('Amount:', currencyA)
#Ask user if the data provided is correct
correctInfo = input('Is the information correct (y/n)?: ').lower()
print('')
help()
#Once data is approved by user, return values
return currencyA, fromC, toC
def realTimeRate(from_currency, to_currency):
#API key provided by Alpha Vanatage
api_key = "1RU6IZY5D9UIISJK"
#Define "url" where data is stored
#"url" varies from user selected data
url = ('https://www.alphavantage.co/query?function=CURRENCY_EXCHANGE_RATE&from_currency=%s&to_currency=%s&apikey=%s' % (from_currency, to_currency, api_key))
#Get response from reqest of "url"
req = requests.get(url)
#Obtain json format and set data for python to read
#"Result" has nested dictionaries
result = req.json()
#Display exchange rate information to user
print("Realtime Currency Exchange Rate for",
result["Realtime Currency Exchange Rate"]
["2. From_Currency Name"], "to",
result["Realtime Currency Exchange Rate"]
["4. To_Currency Name"], "is",
result["Realtime Currency Exchange Rate"]
['5. Exchange Rate'], to_currency)
#Return the value of exchange
return float(result["Realtime Currency Exchange Rate"]
['5. Exchange Rate'])
def completeExchange(rate, cAmount, fCurrency, tCurrency):
#Total of the "to" currency is the rate times the amount of the "from" currency
total = rate * cAmount
end = ' '
#Maintain program Running until user has inputed the Enter key
while end == ' ':
print('\n%s %s is %.2f %s' % (cAmount, fCurrency, total, tCurrency))
end = input('Press Enter To Close')
if __name__ == "__main__":
findCurrency()
help()
currencyAmount, fromCurrency, toCurrency = userData()
rate = realTimeRate(fromCurrency, toCurrency)
completeExchange(rate, currencyAmount, fromCurrency, toCurrency)
| 38.863354
| 189
| 0.61563
| 739
| 6,257
| 5.184032
| 0.281461
| 0.028191
| 0.031323
| 0.036544
| 0.144871
| 0.102062
| 0.052728
| 0.024537
| 0
| 0
| 0
| 0.007687
| 0.293112
| 6,257
| 160
| 190
| 39.10625
| 0.858467
| 0.307496
| 0
| 0.215686
| 0
| 0.019608
| 0.194088
| 0.005331
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078431
| false
| 0.009804
| 0.019608
| 0
| 0.147059
| 0.137255
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91ad8a5fd94219e90c24839542dbfefd0cc9fc70
| 6,142
|
py
|
Python
|
scripts/analyse_bse.py
|
QU-XIAO/yambopy
|
ff65a4f90c1bfefe642ebc61e490efe781709ff9
|
[
"BSD-3-Clause"
] | 21
|
2016-04-07T20:53:29.000Z
|
2021-05-14T08:06:02.000Z
|
scripts/analyse_bse.py
|
alexmoratalla/yambopy
|
8ec0e1e18868ccaadb3eab36c55e6a47021e257d
|
[
"BSD-3-Clause"
] | 22
|
2016-06-14T22:29:47.000Z
|
2021-09-16T15:36:26.000Z
|
scripts/analyse_bse.py
|
alexmoratalla/yambopy
|
8ec0e1e18868ccaadb3eab36c55e6a47021e257d
|
[
"BSD-3-Clause"
] | 15
|
2016-06-14T18:40:57.000Z
|
2021-08-07T13:17:43.000Z
|
# Copyright (C) 2018 Alexandre Morlet, Henrique Pereira Coutada Miranda
# All rights reserved.
#
# This file is part of yambopy
#
from __future__ import print_function
from builtins import range
from yambopy import *
from qepy import *
import json
import matplotlib.pyplot as plt
import numpy as np
import sys
import argparse
import operator
def analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack ):
"""
Using ypp, you can study the convergence of BSE calculations in 2 ways:
Create a .png of all absorption spectra relevant to the variable you study
Look at the eigenvalues of the first n "bright" excitons (given a threshold intensity)
The script reads from <folder> all results from <variable> calculations for processing.
The resulting pictures and data files are saved in the ./analyse_bse/ folder.
By default, the graphical interface is deactivated (assuming you run on a cluster because of ypp calls).
See line 2 inside the script.
"""
# Packing results (o-* files) from the calculations into yambopy-friendly .json files
if pack: # True by default, False if -np used
print('Packing ...')
pack_files_in_folder(folder,mask=var)
pack_files_in_folder(folder,mask='reference')
print('Packing done.')
else:
print('Packing skipped.')
# importing data from .json files in <folder>
print('Importing...')
data = YamboAnalyser(folder)
# extract data according to relevant var
invars = data.get_inputfiles_tag(var)
# Get only files related to the convergence study of the variable,
# ordered to have a smooth plot
keys=[]
sorted_invars = sorted(list(invars.items()), key=operator.itemgetter(1))
for i in range(0,len(sorted_invars)):
key=sorted_invars[i][0]
if key.startswith(var) or key=='reference.json':
keys.append(key)
print('Files detected: ',keys)
# unit of the input value
unit = invars[keys[0]]['variables'][var][1]
######################
# Output-file filename
######################
os.system('mkdir -p analyse_bse')
outname = './analyse_%s/%s_%s'%(folder,folder,var)
# Array that will contain the output
excitons = []
# Loop over all calculations
for key in keys:
jobname=key.replace('.json','')
print(jobname)
# input value
# BndsRn__ is a special case
if var.startswith('BndsRnX'):
# format : [1, nband, ...]
inp = invars[key]['variables'][var][0][1]
else:
inp = invars[key]['variables'][var][0]
print('Preparing JSON file. Calling ypp if necessary.')
### Creating the 'absorptionspectra.json' file
# It will contain the exciton energies
y = YamboOut(folder=folder,save_folder=folder)
# Args : name of job, SAVE folder path, folder where job was run path
a = YamboBSEAbsorptionSpectra(jobname,path=folder)
# Get excitons values (runs ypp once)
a.get_excitons(min_intensity=exc_int,max_energy=exc_max_E,Degen_Step=exc_degen)
# Write .json file with spectra and eigenenergies
a.write_json(filename=outname)
### Loading data from .json file
f = open(outname+'.json')
data = json.load(f)
f.close()
print('JSON file prepared and loaded.')
### Plotting the absorption spectra
# BSE spectra
plt.plot(data['E/ev[1]'], data['EPS-Im[2]'],label=jobname,lw=2)
# # Axes : lines for exciton energies (disabled, would make a mess)
# for n,exciton in enumerate(data['excitons']):
# plt.axvline(exciton['energy'])
### Creating array with exciton values (according to settings)
l = [inp]
for n,exciton in enumerate(data['excitons']):
if n <= exc_n-1:
l.append(exciton['energy'])
excitons.append(l)
if text:
header = 'Columns : '+var+' (in '+unit+') and "bright" excitons eigenenergies in order.'
print(excitons)
np.savetxt(outname+'.dat',excitons,header=header)
#np.savetxt(outname,excitons,header=header,fmt='%1f')
print(outname+'.dat')
else:
print('-nt flag : no text produced.')
if draw:
plt.xlabel('$\omega$ (eV)')
plt.gca().yaxis.set_major_locator(plt.NullLocator())
plt.legend()
#plt.draw()
#plt.show()
plt.savefig(outname+'.png', bbox_inches='tight')
print(outname+'.png')
else:
print('-nd flag : no plot produced.')
print('Done.')
if __name__ == "__main__":
parser = argparse.ArgumentParser(description='Study convergence on BS calculations using ypp calls.')
pa = parser.add_argument
pa('folder', help='Folder containing SAVE and convergence runs.' )
pa('variable', help='Variable tested (e.g. FFTGvecs)' )
pa('-ne','--numbexc', help='Number of excitons to read beyond threshold', default=2,type=int)
pa('-ie','--intexc', help='Minimum intensity for excitons to be considered bright', default=0.05,type=float)
pa('-de','--degenexc', help='Energy threshold under which different peaks are merged (eV)', default=0.01,type=float)
pa('-me','--maxexc', help='Energy threshold after which excitons are not read anymore (eV)', default=8.0,type=float)
pa('-np','--nopack', help='Skips packing o- files into .json files', action='store_false')
pa('-nt','--notext', help='Skips writing the .dat file', action='store_false')
pa('-nd','--nodraw', help='Skips drawing (plotting) the abs spectra', action='store_false')
if len(sys.argv)==1:
parser.print_help()
sys.exit(1)
args = parser.parse_args()
folder = args.folder
var = args.variable
exc_n = args.numbexc
exc_int = args.intexc
exc_degen = args.degenexc
exc_max_E = args.maxexc
pack = args.nopack
text = args.text
draw = args.draw
analyse_bse( folder, var, exc_n, exc_int, exc_degen, exc_max_E, pack=pack, text=text, draw=draw )
| 36.559524
| 122
| 0.632693
| 813
| 6,142
| 4.697417
| 0.369004
| 0.015711
| 0.007332
| 0.00995
| 0.070175
| 0.070175
| 0.042943
| 0.025137
| 0.025137
| 0.025137
| 0
| 0.006644
| 0.240313
| 6,142
| 167
| 123
| 36.778443
| 0.81183
| 0.279551
| 0
| 0.041667
| 0
| 0
| 0.238428
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.010417
| false
| 0
| 0.114583
| 0
| 0.125
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91aeb848169969b77dd6c9be3484be7a02c40a1b
| 2,218
|
py
|
Python
|
tools/acetz.py
|
arkhipenko/AceTime
|
bc6e6aa530e309b62a204b7574322ba013066b06
|
[
"MIT"
] | 1
|
2021-02-23T06:17:36.000Z
|
2021-02-23T06:17:36.000Z
|
tools/acetz.py
|
arkhipenko/AceTime
|
bc6e6aa530e309b62a204b7574322ba013066b06
|
[
"MIT"
] | null | null | null |
tools/acetz.py
|
arkhipenko/AceTime
|
bc6e6aa530e309b62a204b7574322ba013066b06
|
[
"MIT"
] | null | null | null |
from typing import cast, Optional
from datetime import datetime, tzinfo, timedelta
from zonedbpy import zone_infos
from zone_processor.zone_specifier import ZoneSpecifier
from zone_processor.inline_zone_info import ZoneInfo
__version__ = '1.1'
class acetz(tzinfo):
"""An implementation of datetime.tzinfo using the ZoneSpecifier class
from AceTime/tools.
"""
def __init__(self, zone_info: ZoneInfo):
self.zone_info = zone_info
self.zs = ZoneSpecifier(zone_info, use_python_transition=True)
def utcoffset(self, dt: Optional[datetime]) -> timedelta:
assert dt
self.zs.init_for_year(dt.year)
offset_info = self.zs.get_timezone_info_for_datetime(dt)
if not offset_info:
raise Exception(
f'Unknown timezone info for '
f'{dt.year:04}-{dt.month:02}-{dt.day:02} '
f'{dt.hour:02}:{dt.minute:02}:{dt.second:02}'
)
return timedelta(seconds=offset_info.total_offset)
def dst(self, dt: Optional[datetime]) -> timedelta:
assert dt
self.zs.init_for_year(dt.year)
offset_info = self.zs.get_timezone_info_for_datetime(dt)
if not offset_info:
raise Exception(
f'Unknown timezone info for '
f'{dt.year:04}-{dt.month:02}-{dt.day:02} '
f'{dt.hour:02}:{dt.minute:02}:{dt.second:02}'
)
return timedelta(seconds=offset_info.dst_offset)
def tzname(self, dt: Optional[datetime]) -> str:
assert dt
self.zs.init_for_year(dt.year)
offset_info = self.zs.get_timezone_info_for_datetime(dt)
if not offset_info:
raise Exception(
f'Unknown timezone info for '
f'{dt.year:04}-{dt.month:02}-{dt.day:02} '
f'{dt.hour:02}:{dt.minute:02}:{dt.second:02}'
)
return offset_info.abbrev
def zone_specifier(self) -> ZoneSpecifier:
return self.zs
def gettz(zone_name: str) -> acetz:
zone_info = cast(ZoneInfo, zone_infos.ZONE_INFO_MAP.get(zone_name))
if not zone_info:
raise Exception(f"Zone '{zone_name}' not found")
return acetz(zone_info)
| 34.65625
| 73
| 0.628945
| 300
| 2,218
| 4.453333
| 0.226667
| 0.053892
| 0.067365
| 0.056886
| 0.50524
| 0.50524
| 0.50524
| 0.50524
| 0.50524
| 0.50524
| 0
| 0.023213
| 0.261948
| 2,218
| 63
| 74
| 35.206349
| 0.792914
| 0.038774
| 0
| 0.48
| 0
| 0
| 0.166273
| 0.113368
| 0
| 0
| 0
| 0
| 0.06
| 1
| 0.12
| false
| 0
| 0.1
| 0.02
| 0.34
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91b37c8672721c9195859e7e71caa5db1a857b4d
| 25,928
|
py
|
Python
|
examples/run_chemistry_parser.py
|
ZhuoyuWei/transformers
|
16d0ebd55d17dd5095231566a0544ecebd56bc9c
|
[
"Apache-2.0"
] | null | null | null |
examples/run_chemistry_parser.py
|
ZhuoyuWei/transformers
|
16d0ebd55d17dd5095231566a0544ecebd56bc9c
|
[
"Apache-2.0"
] | null | null | null |
examples/run_chemistry_parser.py
|
ZhuoyuWei/transformers
|
16d0ebd55d17dd5095231566a0544ecebd56bc9c
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2019 The HuggingFace Inc. team.
# Copyright (c) 2019 The HuggingFace Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Finetuning seq2seq models for sequence generation."""
import argparse
import functools
import logging
import os
import random
import sys
sys.path.append(r'../')
import numpy as np
from tqdm import tqdm, trange
import torch
from torch.optim import Adam
from torch.utils.data import DataLoader, RandomSampler, SequentialSampler
from transformers import (
AutoTokenizer,
BertForMaskedLM,
BertConfig,
PreTrainedEncoderDecoder,
Model2Models,
)
from utils_summarization import (
CNNDailyMailDataset,
encode_for_summarization,
fit_to_block_size,
build_lm_labels,
build_mask,
compute_token_type_ids,
)
from utils_chemistry import (ChemistryDataset,)
'''
class InputExample(object):
def __init__(self,example_id,question_input,question_varible_output=None,condition_output=None):
self.example_id=example_id
self.question_input=question_input
self.question_varible_output=question_varible_output
self.condition_output=condition_output
'''
logger = logging.getLogger(__name__)
logging.basicConfig(stream=sys.stdout, level=logging.INFO)
def set_seed(args):
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
# ------------
# Load dataset
# ------------
def load_and_cache_examples(args, tokenizer, prefix="train"):
dataset = ChemistryDataset(tokenizer, prefix=prefix, data_dir=args.data_dir)
return dataset
def collate(data, tokenizer, input_block_size,output_block_size):
""" List of tuple as an input. """
question_inputs=[]
question_varible_outputs=[]
condition_outputs=[]
for i,example in enumerate(data):
question_input=tokenizer.encode(example.question_input)
question_input=fit_to_block_size(question_input, input_block_size, tokenizer.pad_token_id)
question_inputs.append(question_input)
if example.question_varible_output is not None:
question_varible_output=tokenizer.encode(example.question_varible_output)
else:
question_varible_output=tokenizer.build_inputs_with_special_tokens([])
question_varible_output=fit_to_block_size(question_varible_output, output_block_size, tokenizer.pad_token_id)
question_varible_outputs.append(question_varible_output)
if example.condition_output is not None:
condition_output=tokenizer.encode(example.condition_output)
else:
condition_output=tokenizer.build_inputs_with_special_tokens([])
condition_output=fit_to_block_size(condition_output, output_block_size, tokenizer.pad_token_id)
condition_outputs.append(condition_output)
question_inputs = torch.tensor(question_inputs)
question_varible_outputs = torch.tensor(question_varible_outputs)
condition_outputs = torch.tensor(condition_outputs)
question_inputs_mask = build_mask(question_inputs, tokenizer.pad_token_id)
question_varible_outputs_mask = build_mask(question_varible_outputs, tokenizer.pad_token_id)
condition_outputs_mask = build_mask(condition_outputs, tokenizer.pad_token_id)
question_varible_outputs_mask_lm_labels = build_lm_labels(question_varible_outputs, tokenizer.pad_token_id)
condition_outputs_mask_lm_labels = build_lm_labels(condition_outputs, tokenizer.pad_token_id)
return (
question_inputs,
[question_varible_outputs,condition_outputs],
question_inputs_mask,
[question_varible_outputs_mask,condition_outputs_mask],
[question_varible_outputs_mask_lm_labels,condition_outputs_mask_lm_labels],
)
# ----------
# Optimizers
# ----------
class BertSumOptimizer(object):
""" Specific optimizer for BertSum.
As described in [1], the authors fine-tune BertSum for abstractive
summarization using two Adam Optimizers with different warm-up steps and
learning rate. They also use a custom learning rate scheduler.
[1] Liu, Yang, and Mirella Lapata. "Text summarization with pretrained encoders."
arXiv preprint arXiv:1908.08345 (2019).
"""
def __init__(self, model, lr, warmup_steps, beta_1=0.99, beta_2=0.999, eps=1e-8):
self.encoder = model.encoder
self.decoders = model.decoders
self.lr = lr
self.warmup_steps = warmup_steps
self.decoders_parameters=[]
for decoder in model.decoders:
self.decoders_parameters+=decoder.parameters()
self.optimizers = {
"encoder": Adam(
model.encoder.parameters(),
lr=lr["encoder"],
betas=(beta_1, beta_2),
eps=eps,
),
"decoder": Adam(
self.decoders_parameters,
lr=lr["decoder"],
betas=(beta_1, beta_2),
eps=eps,
),
}
self._step = 0
def _update_rate(self, stack):
return self.lr[stack] * min(
self._step ** (-0.5), self._step * self.warmup_steps[stack] ** (-0.5)
)
def zero_grad(self):
self.optimizer_decoder.zero_grad()
self.optimizer_encoder.zero_grad()
def step(self):
self._step += 1
for stack, optimizer in self.optimizers.items():
new_rate = self._update_rate(stack)
for param_group in optimizer.param_groups:
param_group["lr"] = new_rate
optimizer.step()
# ------------
# Train
# ------------
def train(args, model, tokenizer):
""" Fine-tune the pretrained model on the corpus. """
set_seed(args)
# Load the data
args.train_batch_size = args.per_gpu_train_batch_size * max(1, args.n_gpu)
train_dataset = load_and_cache_examples(args, tokenizer, "train")
train_sampler = RandomSampler(train_dataset)
model_collate_fn = functools.partial(collate, tokenizer=tokenizer,
input_block_size=args.input_block_size,output_block_size=args.output_block_size)
train_dataloader = DataLoader(
train_dataset,
sampler=train_sampler,
batch_size=args.train_batch_size,
collate_fn=model_collate_fn,
)
# Training schedule
if args.max_steps > 0:
t_total = args.max_steps
args.num_train_epochs = t_total // (
len(train_dataloader) // args.gradient_accumulation_steps + 1
)
else:
t_total = (
len(train_dataloader)
// args.gradient_accumulation_steps
* args.num_train_epochs
)
# Prepare the optimizer
#lr = {"encoder": 0.002, "decoder": 0.2}
lr = {"encoder": args.encoder_lr, "decoder": args.decoder_lr}
#warmup_steps = {"encoder": 20000, "decoder": 10000}
warmup_steps = {"encoder": args.encoder_warmup, "decoder": args.decoder_warmup}
optimizer = BertSumOptimizer(model, lr, warmup_steps)
# Train
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_dataset))
logger.info(" Num Epochs = %d", args.num_train_epochs)
logger.info(
" Instantaneous batch size per GPU = %d", args.per_gpu_train_batch_size
)
logger.info(
" Total train batch size (w. parallel, distributed & accumulation) = %d",
args.train_batch_size * args.gradient_accumulation_steps
# * (torch.distributed.get_world_size() if args.local_rank != -1 else 1),
)
logger.info(" Gradient Accumulation steps = %d", args.gradient_accumulation_steps)
logger.info(" Total optimization steps = %d", t_total)
model.zero_grad()
train_iterator = trange(args.num_train_epochs, desc="Epoch", disable=False)
global_step = 0
tr_loss = 0.0
for _ in train_iterator:
epoch_iterator = tqdm(train_dataloader, desc="Iteration", disable=False)
for step, batch in enumerate(epoch_iterator):
source, target, encoder_mask, decoder_mask, lm_labels = batch
#print('source: {}'.format(source))
#print('target: {}'.format(target))
feed_source=None
feed_targets=[None]*len(target)
feed_encoder_mask=None
feed_decoder_masks=[None]*len(decoder_mask)
feed_lm_labels=[None]*len(lm_labels)
feed_source = source.to(args.device)
for i in range(len(target)):
feed_targets[i] = target[i].to(args.device)
feed_encoder_mask = encoder_mask.to(args.device)
for i in range(len(decoder_mask)):
feed_decoder_masks[i] = decoder_mask[i].to(args.device)
for i in range(len(lm_labels)):
feed_lm_labels[i] = lm_labels[i].to(args.device)
model.train()
#print('debug by zhuoyu: source = {}'.format(source))
#print('debug by zhuoyu: target = {}'.format(target))
#print('debug by zhuoyu, device:')
#print('feed source {}'.format(feed_source.device))
#print('feed target {}'.format([str(feed_target.device) for feed_target in feed_targets]))
#print('feed encoder mask {}'.format(feed_encoder_mask.device))
#print('feed decoder masks {}'.format([str(feed_decoder_mask.device) for feed_decoder_mask in feed_decoder_masks]))
#print('feed lm labels {}'.format([str(feed_lm_label.device) for feed_lm_label in feed_lm_labels]))
outputs = model(
feed_source,
feed_targets,
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks,
decoder_lm_labels=feed_lm_labels,
)
loss=0
for i in range(len(model.decoders)):
#print('outputs[{}][0] type: {}'.format(i,type(outputs[i][0])))
loss += outputs[i][0]
#print(loss)
if args.gradient_accumulation_steps > 1:
loss /= args.gradient_accumulation_steps
loss.backward()
tr_loss += loss.item()
if (step + 1) % args.gradient_accumulation_steps == 0:
torch.nn.utils.clip_grad_norm_(model.parameters(), args.max_grad_norm)
optimizer.step()
model.zero_grad()
global_step += 1
if args.max_steps > 0 and global_step > args.max_steps:
epoch_iterator.close()
break
if args.max_steps > 0 and global_step > args.max_steps:
train_iterator.close()
break
return global_step, tr_loss / global_step
# ------------
# Train
# ------------
def evaluate(args, model, tokenizer, prefix=""):
set_seed(args)
args.eval_batch_size = args.per_gpu_eval_batch_size * max(1, args.n_gpu)
eval_dataset = load_and_cache_examples(args, tokenizer, prefix="dev")
#for example in eval_dataset.examples:
# print(example.example_id)
# print(example.question_input)
# print(example.question_varible_output)
# print(example.condition_output)
#exit(-1)
eval_sampler = SequentialSampler(eval_dataset)
model_collate_fn = functools.partial(collate, tokenizer=tokenizer,
input_block_size=args.input_block_size,output_block_size=args.output_block_size)
eval_dataloader = DataLoader(
eval_dataset, sampler=eval_sampler, batch_size=args.eval_batch_size,collate_fn=model_collate_fn,
)
# multi-gpu evaluate
#if args.n_gpu > 1:
# model = torch.nn.DataParallel(model)
logger.info("***** Running evaluation {} *****".format(prefix))
logger.info(" Num examples = %d", len(eval_dataset))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_loss = 0.0
nb_eval_steps = 0
model.eval()
fout=open(os.path.join(args.output_dir,"dev.res"),'w',encoding='utf-8')
fdebug=open(os.path.join(args.output_dir,"dev.debug.res"),'w',encoding='utf-8')
for batch in tqdm(eval_dataloader, desc="Evaluating"):
source, target, encoder_mask, decoder_mask, lm_labels = batch
#print('[SOURCE]: {}'.format(source))
#print('[TARGET]: {}'.format(target))
#source = source.to(args.device)
#target = target.to(args.device)
#encoder_mask = encoder_mask.to(args.device)
#decoder_mask = decoder_mask.to(args.device)
#lm_labels = lm_labels.to(args.device)
feed_source = None
feed_targets = [None] * len(target)
feed_encoder_mask = None
feed_decoder_masks = [None] * len(decoder_mask)
feed_lm_labels = [None] * len(lm_labels)
feed_source = source.to(args.device)
for i in range(len(target)):
feed_targets[i] = target[i].to(args.device)
feed_encoder_mask = encoder_mask.to(args.device)
for i in range(len(decoder_mask)):
feed_decoder_masks[i] = decoder_mask[i].to(args.device)
for i in range(len(lm_labels)):
feed_lm_labels[i] = lm_labels[i].to(args.device)
with torch.no_grad():
if args.decoding_type=='decoding':
tokens_roles=[]
for i in range(len(feed_targets)):
outputs_ids=model.decoding(
feed_source,
feed_targets[i],
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks[i],
decoder_lm_labels=feed_lm_labels[i],
decoder=model.decoders[i]
#fdebug=fdebug,
)
print('outputs size: {}'.format(outputs_ids.size()))
outputs_ids =outputs_ids.cpu().numpy()
batch_tokens=[]
for idx in outputs_ids:
tokens = []
for id in idx:
#print('{}\t{}'.format(id,type(id)))
tokens.append(tokenizer.ids_to_tokens.get(int(id), tokenizer.unk_token))
batch_tokens.append(tokens)
tokens_roles.append(batch_tokens)
def subtoken2token(subtokens):
token=""
tokens=[]
for subtoken in subtokens:
if subtoken.startswith("##"):
token+=subtoken[2:]
else:
if token!="":
tokens.append(token)
token=subtoken
if token!="":
tokens.append(token)
return tokens
for i in range(len(tokens_roles[0])):
fout.write('\t'.join([' '.join(subtoken2token(tokens_roles[0][i]))
,' '.join(subtoken2token(tokens_roles[1][i]))]) + '\n')
else:
print('debug eva input:')
print('feed_source={}'.format(feed_source))
print('feed_targets={}'.format(feed_targets))
print('feed_encoder_mask={}'.format(feed_encoder_mask))
print('feed_decoder_masks={}'.format(feed_decoder_masks))
print('feed_lm_labels={}'.format(feed_lm_labels))
outputs = model(
feed_source,
feed_targets,
encoder_attention_mask=feed_encoder_mask,
decoder_attention_mask=feed_decoder_masks,
decoder_lm_labels=feed_lm_labels,
#fdebug=fdebug,
)
ans_seqs=[[],[]]
for i in range(len(model.decoders)):
print(outputs[i][1].size())
predicted_scores=outputs[i][1].argmax(-1).cpu().numpy().tolist()
for idx in predicted_scores:
tokens = []
for id in idx:
tokens.append(tokenizer.ids_to_tokens.get(id, tokenizer.unk_token))
ans_seqs[i].append(tokens)
for i in range(len(ans_seqs[0])):
fout.write('\t'.join([' '.join(ans_seqs[0][i]),' '.join(ans_seqs[1][i])]) + '\n')
# print('debug by zhuoyu, predicted_scores size={}'.format(predicted_scores.size()))
#eval_loss += lm_loss.mean().item()
nb_eval_steps += 1
eval_loss = eval_loss / nb_eval_steps
perplexity = torch.exp(torch.tensor(eval_loss))
result = {"perplexity": perplexity}
# Save the evaluation's results
output_eval_file = os.path.join(args.output_dir, "eval_results.txt")
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
with open(output_eval_file, "w") as writer:
logger.info("***** Eval results {} *****".format(prefix))
for key in sorted(result.keys()):
logger.info(" %s = %s", key, str(result[key]))
writer.write("%s = %s\n" % (key, str(result[key])))
#with open(os.path.join(args.output_dir,"dev.res"),'w',encoding='utf-8') as fout:
fout.flush()
fout.close()
fdebug.flush()
fdebug.close()
return result
def main():
parser = argparse.ArgumentParser()
# Required parameters
parser.add_argument(
"--data_dir",
default=None,
type=str,
required=True,
help="The input training data file (a text file).",
)
parser.add_argument(
"--output_dir",
default=None,
type=str,
required=True,
help="The output directory where the model predictions and checkpoints will be written.",
)
# Optional parameters
parser.add_argument(
"--gradient_accumulation_steps",
type=int,
default=1,
help="Number of updates steps to accumulate before performing a backward/update pass.",
)
parser.add_argument(
"--do_evaluate",
type=bool,
default=False,
help="Run model evaluation on out-of-sample data.",
)
parser.add_argument("--do_train", type=bool, default=False, help="Run training.")
parser.add_argument(
"--do_overwrite_output_dir",
type=bool,
default=False,
help="Whether to overwrite the output dir.",
)
parser.add_argument(
"--encoder_model_name_or_path",
default="bert-base-cased",
type=str,
help="The model checkpoint to initialize the encoder's weights with.",
)
parser.add_argument(
"--decoder_model_name_or_path",
default="/data/zhuoyu/semantic_parsing/models",
type=str,
help="The model checkpoint to initialize the decoder's weights with.",
)
parser.add_argument(
"--model_type",
default="bert",
type=str,
help="The decoder architecture to be fine-tuned.",
)
parser.add_argument(
"--max_grad_norm", default=1.0, type=float, help="Max gradient norm."
)
parser.add_argument(
"--max_steps",
default=-1,
type=int,
help="If > 0: set total number of training steps to perform. Override num_train_epochs.",
)
parser.add_argument(
"--to_cpu", default=False, type=bool, help="Whether to force training on CPU."
)
parser.add_argument(
"--num_train_epochs",
default=10,
type=int,
help="Total number of training epochs to perform.",
)
parser.add_argument(
"--per_gpu_eval_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for eval.",
)
parser.add_argument(
"--per_gpu_train_batch_size",
default=4,
type=int,
help="Batch size per GPU/CPU for training.",
)
parser.add_argument(
"--input_block_size",
default=256,
type=int,
help="Max seq length for input",
)
parser.add_argument(
"--output_block_size",
default=64,
type=int,
help="Max seq length for output",
)
parser.add_argument(
"--trained_checkpoints",
default="",
type=str,
help="trained_checkpoints",
)
parser.add_argument(
"--decoding_type",
default="pnt",
type=str,
help="",
)
parser.add_argument(
"--encoder_lr",
default=5e-4,
type=float,
help="encoder's learning rate",
)
parser.add_argument(
"--decoder_lr",
default=5e-4,
type=float,
help="encoder's learning rate",
)
parser.add_argument(
"--encoder_warmup",
default=10,
type=int,
help="encoder's learning rate",
)
parser.add_argument(
"--decoder_warmup",
default=100,
type=int,
help="encoder's learning rate",
)
parser.add_argument("--seed", default=42, type=int)
parser.add_argument(
"--decoder_version",
default="v1",
type=str,
help="",
)
args = parser.parse_args()
if (
os.path.exists(args.output_dir)
and os.listdir(args.output_dir)
and args.do_train
and not args.do_overwrite_output_dir
):
raise ValueError(
"Output directory ({}) already exists and is not empty. Use --do_overwrite_output_dir to overwrite.".format(
args.output_dir
)
)
# Set up training device
if args.to_cpu or not torch.cuda.is_available():
args.device = torch.device("cpu")
args.n_gpu = 0
else:
args.device = torch.device("cuda")
args.n_gpu = torch.cuda.device_count()
print(args.n_gpu)
# Load pretrained model and tokenizer. The decoder's weights are randomly initialized.
tokenizer = AutoTokenizer.from_pretrained(args.encoder_model_name_or_path
,never_split=['[unused0]','[unused1]','[unused2]','[unused3]'])
#config = BertConfig.from_pretrained(args.model_name_or_path)
#config.num_hidden_layers=3
#config.is_decoder=True
#decoder_model = BertForMaskedLM(config)
decoder_models=[BertForMaskedLM.from_pretrained(args.decoder_model_name_or_path),
BertForMaskedLM.from_pretrained(args.decoder_model_name_or_path)]
model = Model2Models.from_pretrained(
args.encoder_model_name_or_path, decoder_model=decoder_models
)
#model = Model2Model.from_pretrained(
# args.model_name_or_path, decoder_model=None
#)
# Setup logging
logging.basicConfig(
format="%(asctime)s - %(levelname)s - %(name)s - %(message)s",
datefmt="%m/%d/%Y %H:%M:%S",
level=logging.INFO,
)
logger.warning(
"Process rank: %s, device: %s, n_gpu: %s, distributed training: %s, 16-bits training: %s",
0,
args.device,
args.n_gpu,
False,
False,
)
logger.info("Training/evaluation parameters %s", args)
# Train the model
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
if args.do_train:
model.to(args.device)
global_step, tr_loss = train(args, model, tokenizer)
logger.info(" global_step = %s, average loss = %s", global_step, tr_loss)
logger.info("Saving model checkpoint to %s", args.output_dir)
# Save a trained model, configuration and tokenizer using `save_pretrained()`.
# They can then be reloaded using `from_pretrained()`
model_to_save = (
model.module if hasattr(model, "module") else model
) # Take care of distributed/parallel training
model_to_save.save_pretrained(args.output_dir)
tokenizer.save_pretrained(args.output_dir)
torch.save(args, os.path.join(args.output_dir, "training_arguments.bin"))
# Evaluate the model
results = {}
if args.do_evaluate:
checkpoints = [args.trained_checkpoints]
logger.info("Evaluate the following checkpoints: %s", checkpoints)
for checkpoint in checkpoints:
encoder_checkpoint = os.path.join(checkpoint, "encoder")
decoder_checkpoint_question_varibles = os.path.join(checkpoint, "decoder_0")
decoder_checkpoint_conditions = os.path.join(checkpoint, "decoder_1")
decoder_models = [BertForMaskedLM.from_pretrained(decoder_checkpoint_question_varibles),
BertForMaskedLM.from_pretrained(decoder_checkpoint_conditions)]
model = Model2Models.from_pretrained(
encoder_checkpoint, decoder_model=decoder_models
)
model.to(args.device)
#model = PreTrainedEncoderDecoder.from_pretrained(
# encoder_checkpoint, decoder_checkpoint
#)
#model = Model2Model.from_pretrained(encoder_checkpoint)
#model.to(args.device)
results = "placeholder"
evaluate(args,model,tokenizer,"test")
return results
if __name__ == "__main__":
main()
| 34.432935
| 127
| 0.612234
| 3,024
| 25,928
| 5.005622
| 0.146825
| 0.016384
| 0.028077
| 0.007994
| 0.373258
| 0.300522
| 0.261214
| 0.209355
| 0.185704
| 0.154258
| 0
| 0.007957
| 0.277808
| 25,928
| 752
| 128
| 34.478723
| 0.800427
| 0.144747
| 0
| 0.294798
| 0
| 0.001927
| 0.115507
| 0.013172
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021195
| false
| 0.001927
| 0.026975
| 0.001927
| 0.063584
| 0.017341
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
91b47f9da5c47dfa6628ace04164ad0d1bc8a057
| 1,710
|
py
|
Python
|
vimfiles/bundle/ultisnips/test/test_AnonymousExpansion.py
|
duanqiaobb/vim-for-java
|
01b60e4494e65a73c9a9de00f50259d8a7c8d0bb
|
[
"MIT"
] | null | null | null |
vimfiles/bundle/ultisnips/test/test_AnonymousExpansion.py
|
duanqiaobb/vim-for-java
|
01b60e4494e65a73c9a9de00f50259d8a7c8d0bb
|
[
"MIT"
] | null | null | null |
vimfiles/bundle/ultisnips/test/test_AnonymousExpansion.py
|
duanqiaobb/vim-for-java
|
01b60e4494e65a73c9a9de00f50259d8a7c8d0bb
|
[
"MIT"
] | null | null | null |
from test.vim_test_case import VimTestCase as _VimTest
from test.constant import *
# Anonymous Expansion {{{#
class _AnonBase(_VimTest):
args = ''
def _extra_options_pre_init(self, vim_config):
vim_config.append('inoremap <silent> %s <C-R>=UltiSnips#Anon(%s)<cr>'
% (EA, self.args))
class Anon_NoTrigger_Simple(_AnonBase):
args = '"simple expand"'
keys = 'abc' + EA
wanted = 'abcsimple expand'
class Anon_NoTrigger_AfterSpace(_AnonBase):
args = '"simple expand"'
keys = 'abc ' + EA
wanted = 'abc simple expand'
class Anon_NoTrigger_BeginningOfLine(_AnonBase):
args = r"':latex:\`$1\`$0'"
keys = EA + 'Hello' + JF + 'World'
wanted = ':latex:`Hello`World'
class Anon_NoTrigger_FirstCharOfLine(_AnonBase):
args = r"':latex:\`$1\`$0'"
keys = ' ' + EA + 'Hello' + JF + 'World'
wanted = ' :latex:`Hello`World'
class Anon_NoTrigger_Multi(_AnonBase):
args = '"simple $1 expand $1 $0"'
keys = 'abc' + EA + '123' + JF + '456'
wanted = 'abcsimple 123 expand 123 456'
class Anon_Trigger_Multi(_AnonBase):
args = '"simple $1 expand $1 $0", "abc"'
keys = '123 abc' + EA + '123' + JF + '456'
wanted = '123 simple 123 expand 123 456'
class Anon_Trigger_Simple(_AnonBase):
args = '"simple expand", "abc"'
keys = 'abc' + EA
wanted = 'simple expand'
class Anon_Trigger_Twice(_AnonBase):
args = '"simple expand", "abc"'
keys = 'abc' + EA + '\nabc' + EX
wanted = 'simple expand\nabc' + EX
class Anon_Trigger_Opts(_AnonBase):
args = '"simple expand", ".*abc", "desc", "r"'
keys = 'blah blah abc' + EA
wanted = 'simple expand'
# End: Anonymous Expansion #}}}
| 25.147059
| 77
| 0.615789
| 214
| 1,710
| 4.742991
| 0.271028
| 0.079803
| 0.124138
| 0.118227
| 0.539901
| 0.461084
| 0.423645
| 0.362562
| 0.151724
| 0.151724
| 0
| 0.034928
| 0.229825
| 1,710
| 67
| 78
| 25.522388
| 0.735763
| 0.031579
| 0
| 0.348837
| 0
| 0
| 0.300424
| 0.016959
| 0
| 0
| 0
| 0
| 0
| 1
| 0.023256
| false
| 0
| 0.046512
| 0
| 0.953488
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|