hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
f5d40b58d32d09631a74deab03cacd263794a4ed
| 3,204
|
py
|
Python
|
look-for.py
|
barnesrobert/find-aws-resource-in-all-accounts
|
5f02aacca3ce3a28894d7d497c4158ed9b08c238
|
[
"Apache-2.0"
] | null | null | null |
look-for.py
|
barnesrobert/find-aws-resource-in-all-accounts
|
5f02aacca3ce3a28894d7d497c4158ed9b08c238
|
[
"Apache-2.0"
] | null | null | null |
look-for.py
|
barnesrobert/find-aws-resource-in-all-accounts
|
5f02aacca3ce3a28894d7d497c4158ed9b08c238
|
[
"Apache-2.0"
] | null | null | null |
#--------------------------------------------------------------------------------------------------
# Function: look-for
# Purpose: Loops through all AWS accounts and regions within an Organization to find a specific resource
# Inputs:
#
# {
# "view_only": "true|false",
# "regions": ["us-east-1", ...]
# }
#
# Leave the regions sections blank to apply to all regions
#
#--------------------------------------------------------------------------------------------------
import json
import boto3
import botocore
from botocore.exceptions import ClientError
from botocore.exceptions import EndpointConnectionError
sts_client = boto3.client('sts')
organizations_client = boto3.client('organizations')
#--------------------------------------------------------------------------------------------------
# Function handler
#--------------------------------------------------------------------------------------------------
def lambda_handler(event, context):
# Determine whether the user just wants to view the orphaned logs.
view_only = ('view_only' in event and event['view_only'].lower() == 'true')
regions = []
#--------------------------------------------------
# Determine which regions to include. Apply to all regions by default.
#--------------------------------------------------
if 'regions' in event and type(event['regions']) == list:
regions = event['regions']
# Get all regions if not otherwise specified.
if not regions:
region_response = boto3.client('ec2').describe_regions()
regions = [region['RegionName'] for region in region_response['Regions']]
# Loop through the accounts in the organization.
response = organizations_client.list_accounts()
for account in response['Accounts']:
if account['Status'] == 'ACTIVE':
member_account = sts_client.assume_role(
RoleArn='arn:aws:iam::{}:role/AWSControlTowerExecution'.format(account['Id']),
RoleSessionName='look_for'
)
loop_through_account(account['Id'], member_account, regions, view_only)
return
#--------------------------------------------------
# function: loop_through_account
#--------------------------------------------------
def loop_through_account(account_id, assumed_role, regions, view_only):
ACCESS_KEY = assumed_role['Credentials']['AccessKeyId']
SECRET_KEY = assumed_role['Credentials']['SecretAccessKey']
SESSION_TOKEN = assumed_role['Credentials']['SessionToken']
#--------------------------------------------------
# Iterate through the specified regions.
#--------------------------------------------------
for region in regions:
print({
"Account": account_id,
"Region": region
}
)
try:
# Create service client using the assumed role credentials, e.g. S3
client = boto3.client(
'SERVICE_NAME',
aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY,
aws_session_token=SESSION_TOKEN,
region_name=region
)
for RESOURCE in client.METHOD()['RESOURCES']:
print('DO SOMETHING HERE')
except botocore.exceptions.SERVCICE_METHOD_ERROR as error:
print(ValueError(error))
| 32.693878
| 105
| 0.542447
| 300
| 3,204
| 5.63
| 0.393333
| 0.028419
| 0.052102
| 0.02013
| 0.031972
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002962
| 0.156991
| 3,204
| 97
| 106
| 33.030928
| 0.622362
| 0.415418
| 0
| 0
| 0
| 0
| 0.150759
| 0.024403
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.108696
| 0
| 0.173913
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5d6cff69b0e62527106143d8be0c05d4bcd4fe7
| 2,972
|
py
|
Python
|
opennem/spiders/aemo/monitoring.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 22
|
2020-06-30T05:27:21.000Z
|
2022-02-21T12:13:51.000Z
|
opennem/spiders/aemo/monitoring.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 71
|
2020-08-07T13:06:30.000Z
|
2022-03-15T06:44:49.000Z
|
opennem/spiders/aemo/monitoring.py
|
paulculmsee/opennem
|
9ebe4ab6d3b97bdeebc352e075bbd5c22a8ddea1
|
[
"MIT"
] | 13
|
2020-06-30T03:28:32.000Z
|
2021-12-30T08:17:16.000Z
|
import logging
from typing import Any, Dict
from pydantic import ValidationError
from scrapy import Spider
from scrapy.http import Response
from opennem.pipelines.aemo.downloads import DownloadMonitorPipeline
from opennem.schema.aemo.downloads import AEMOFileDownloadSection
from opennem.utils.dates import parse_date
from opennem.utils.numbers import filesize_from_string
from opennem.utils.url import strip_query_string
class AEMOMonitorRelSpider(Spider):
name = "au.aemo.downloads"
start_urls = [
"https://aemo.com.au/en/energy-systems/electricity/national-electricity-market-nem/participate-in-the-market/registration",
"https://www.aemo.com.au/energy-systems/electricity/national-electricity-market-nem/nem-forecasting-and-planning/forecasting-and-planning-data/generation-information",
]
pipelines = set([DownloadMonitorPipeline])
def parse(self, response: Any) -> Dict[str, Any]:
file_downloads = []
source_title = response.css("title::text").get()
download_sections = response.xpath("//div[@class='file-list-wrapper']/..")
if not download_sections or len(download_sections) < 1:
raise Exception("{} spider could not find any download sections".format(self.name))
for download_section in download_sections:
date_text = download_section.css("div.field-publisheddate span::text").get()
if not date_text:
raise Exception(
"{} could not get download section published date".format(self.name)
)
published_date = parse_date(date_text)
publish_link_relative = download_section.css("a::attr(href)").get()
if not publish_link_relative:
raise Exception("{} could not get rel published link".format(self.name))
publish_link = response.urljoin(publish_link_relative)
publish_link = strip_query_string(publish_link)
download_title = download_section.css(".field-title::text").get()
download_size_raw = download_section.css(".field-size span::text").get()
download_size = None
if download_size_raw:
download_size, _ = filesize_from_string(download_size_raw)
# create a model from the extracted fields
section_model = None
try:
section_model = AEMOFileDownloadSection(
published_date=published_date,
filename=download_title,
download_url=publish_link,
file_size=download_size,
source_url=response.url,
source_title=source_title,
)
file_downloads.append(section_model)
except ValidationError as e:
self.log("Validation error: {}".format(e), logging.ERROR)
return {"_data": file_downloads, "items": file_downloads}
| 37.620253
| 175
| 0.657133
| 330
| 2,972
| 5.730303
| 0.342424
| 0.040719
| 0.038075
| 0.033845
| 0.081438
| 0.054997
| 0.054997
| 0
| 0
| 0
| 0
| 0.00045
| 0.252019
| 2,972
| 78
| 176
| 38.102564
| 0.850202
| 0.013459
| 0
| 0
| 0
| 0.037037
| 0.20273
| 0.020137
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.185185
| 0
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5d87e21f9ec6f8ae018914ba1e9c0e382bc83dd
| 319
|
py
|
Python
|
python/13/servo.py
|
matsujirushi/raspi_parts_kouryaku
|
35cd6f34d21c5e3160636671175fa8d5aff2d4dc
|
[
"Apache-2.0"
] | 6
|
2022-03-05T02:36:57.000Z
|
2022-03-12T12:31:27.000Z
|
python/13/servo.py
|
matsujirushi/raspi_parts_kouryaku
|
35cd6f34d21c5e3160636671175fa8d5aff2d4dc
|
[
"Apache-2.0"
] | null | null | null |
python/13/servo.py
|
matsujirushi/raspi_parts_kouryaku
|
35cd6f34d21c5e3160636671175fa8d5aff2d4dc
|
[
"Apache-2.0"
] | null | null | null |
import wiringpi as pi
pi.wiringPiSetupGpio()
pi.pinMode(18, pi.PWM_OUTPUT)
pi.pwmSetMode(pi.PWM_MODE_MS)
pi.pwmSetClock(2)
pi.pwmSetRange(192000)
while True:
for i in list(range(-90, 90, 10)) + list(range(90, -90, -10)):
pi.pwmWrite(18, int(((i + 90) / 180 * (2.4 - 0.5) + 0.5) / 20 * 192000))
pi.delay(200)
| 26.583333
| 76
| 0.652038
| 57
| 319
| 3.596491
| 0.596491
| 0.04878
| 0.107317
| 0.126829
| 0.146341
| 0
| 0
| 0
| 0
| 0
| 0
| 0.167286
| 0.15674
| 319
| 11
| 77
| 29
| 0.594796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5d9d9ea4f3e787d1de8f24aa36d4dcbede900ec
| 2,549
|
py
|
Python
|
src/vswarm/object_detection/blob_detector.py
|
Faust-Wang/vswarm
|
d18ce643218c18ef1e762f40562104b2a0926ad7
|
[
"MIT"
] | 21
|
2021-03-03T10:51:46.000Z
|
2022-03-28T11:00:35.000Z
|
src/vswarm/object_detection/blob_detector.py
|
Faust-Wang/vswarm
|
d18ce643218c18ef1e762f40562104b2a0926ad7
|
[
"MIT"
] | 2
|
2021-07-21T07:57:16.000Z
|
2022-03-17T12:41:51.000Z
|
src/vswarm/object_detection/blob_detector.py
|
hvourtsis/vswarm
|
d18ce643218c18ef1e762f40562104b2a0926ad7
|
[
"MIT"
] | 8
|
2021-02-27T14:29:55.000Z
|
2022-01-05T19:40:38.000Z
|
import cv2 as cv
from geometry_msgs.msg import Pose2D
from vision_msgs.msg import (BoundingBox2D, Detection2D, Detection2DArray,
ObjectHypothesisWithPose)
THRESHOLD_MAX = 255
THRESHOLD = 240
class BlobDetector:
def __init__(self):
pass
def detect_multi(self, images):
detections_list = []
for image in images:
detections = self.detect(image)
detections_list.append(detections)
return detections_list
def detect(self, image):
# Convert to grayscale if needed
if image.ndim == 3:
image = cv.cvtColor(image, cv.COLOR_BGR2GRAY)
image_height, image_width = image.shape
image_area = image_height * image_width
# Apply (inverse) binary threshold to input image
mask = cv.threshold(image, THRESHOLD, THRESHOLD_MAX, cv.THRESH_BINARY_INV)[1]
# Dilate mask to find more reliable contours
# kernel = np.ones((5, 5), np.uint8)
# mask_dilated = cv.dilate(mask, kernel, iterations=1)
# Find external approximate contours in dilated mask
contours, hierarchy = cv.findContours(mask, cv.RETR_EXTERNAL,
cv.CHAIN_APPROX_SIMPLE)
# Filter out contours that don't qualify as a detection
detections = []
for contour in contours:
# Filer out if the contour touches the image border
x, y, w, h = cv.boundingRect(contour)
if x == 0 or y == 0 or x + w == image_width or y + h == image_height:
continue
# Filter out if the contour is too small
if cv.contourArea(contour) < 1e-4 * image_area:
continue
detections.append((x, y, w, h))
# Fill detections msg
detection_array_msg = Detection2DArray()
for detection in detections:
x, y, w, h = detection
center_x = x + w / 2.
center_y = y + h / 2.
bbox = BoundingBox2D()
bbox.center = Pose2D(x=center_x, y=center_y, theta=0)
bbox.size_x = w
bbox.size_y = h
object_hypothesis = ObjectHypothesisWithPose()
object_hypothesis.id = 0
object_hypothesis.score = 1.0
detection_msg = Detection2D()
detection_msg.bbox = bbox
detection_msg.results.append(object_hypothesis)
detection_array_msg.detections.append(detection_msg)
return detection_array_msg
| 32.265823
| 85
| 0.59592
| 298
| 2,549
| 4.939597
| 0.385906
| 0.005435
| 0.006114
| 0.008152
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018779
| 0.331503
| 2,549
| 78
| 86
| 32.679487
| 0.84507
| 0.165947
| 0
| 0.041667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.020833
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5dedc85895871ad1a7086cfc4fa5d80500516b2
| 7,557
|
py
|
Python
|
bibref_parser/parser.py
|
glooney/python-bibref-parser
|
9ca6b99a917659425fe7b4759f523c78f0180124
|
[
"MIT"
] | null | null | null |
bibref_parser/parser.py
|
glooney/python-bibref-parser
|
9ca6b99a917659425fe7b4759f523c78f0180124
|
[
"MIT"
] | null | null | null |
bibref_parser/parser.py
|
glooney/python-bibref-parser
|
9ca6b99a917659425fe7b4759f523c78f0180124
|
[
"MIT"
] | null | null | null |
import re
class BibRefParser:
def __init__(self):
self.reset()
def reset(self, reference=''):
self._ref = reference
self.reference = reference
self.title = ''
self.authors = ''
# publication date
self.date = ''
self.publisher = ''
self._ref = self._normalise(self._ref)
@classmethod
def _normalise(cls, s):
return s.replace('“', '"').replace('”', '"').replace('–', '-')
def _extract(self, pattern, field, first=False):
ret = ''
matches = re.findall(pattern, self._ref)
if len(matches):
if (len(matches) == 1) or first:
match = matches[0]
self._ref = self._ref.replace(match[0], '{' + field + '}')
ret = match[1]
return ret
def parse(self, reference):
self.reset(reference)
# get quoted title
self.title = self._extract(r'("([^"]+)")', 'title')
datep = r'(\b(18|19|20)\d\d[abc]?\b)'
while not self.date:
# get bracketed year
self.date = self._extract(
r'(\([^)]*' + datep + r'[^)]*\))', 'date')
# get unique year
if not self.date:
self.date = self._extract(r'(' + datep + r')', 'date')
if not self.date:
self.date = self._extract(
r'(\. ' + datep + r'\.)', 'date'
)
if not self.date:
self.date = self._extract(
r'(, ' + datep + r'\.)', 'date'
)
if not self.date:
self.date = self._extract(
r'(, ' + datep + r',)', 'date'
)
# get unique year not preceded or followed by -
# if 0 and not self.date:
# self.date = self._extract(
# r'((?<![-0-9])' + datep + r'(?![-0-9]))', 'date')
# remove access date
if 1 and not self.date:
access_date = self._extract(
r'(\[[^\]]*' + datep + r'[^\]]*\])', 'access_date')
if not access_date:
break
else:
break
if self.date:
self._extract(r'({date}([.,;]))', 'date')
if 1 and self.title and not self.authors:
# anything in front of title (or date) that isn't a date
# catches 40% of authors on test set
self.authors = self._extract(
r'^((([^{](?!\d{4,4}))+))', 'authors',
)
# if 0:
# # author (without . or ,) -> title
# # Works sometimes BUT
# # NO: b/c title can be after
# if self.authors and not self.title:
# if not re.search(r'\.|,', self.authors):
# self.title = self.authors
# self.authors = ''
if 1 and not self.authors:
# the authors field most likely captured the title
# we need to split them
#
# #80, ACS
# Evans, D. A.; Fitch, D. M.; Smith, T. E.; Cee, V. J.
# #69, AMA
# Venkat Narayan, KM.
# #4, ?
# Bagdikian, B.H.
# 22, APA
# Greene, C. (Producer), del Toro, G.(Director)
#
# sentence with lowercase words (other than and/et) indicate title
#
if not self.authors:
# #32, IEEE
# B. Klaus and P. Horn
# #34
# L. Bass, P. Clements, and R. Kazman
# #84
# W. Zeng, H. Yu, C. Lin
# self.authors = self._extract(
# r'^(((( ?[A-Z]{1,2}\.)+ [^.,]+[,.]( and)?)+))',
# 'authors1'
# )
self.authors = self._extract(
r'^((((^|,|,? and)( ?[A-Z]{1,2}\.)+ ([^,{.](?!and ))+)+))',
'authors1'
)
if not self.authors:
# #10 xxx
# Ellman, M., and F. Germano
# #19 APA
# Carter, S., & Dunbar-Odom, D.
# #20
# Gaudio, J. L., & Snowdon, C. T.
# included = [19, 80, 20, 69, 4, 22]
self.authors = self._extract(
# r'^((([^,.{]+,((| |-)[A-Z]{1,2}\.)+(\s*\([^)]+\))?,?)+))',
r'^((((^|,|,? (and|&) )[^,.{]+,((| |-)[A-Z]{1,2}\.)+(\s*\([^)]+\))?)+))',
'authors2'
)
if not self.authors:
# #49, MLA
# #50
# Smith, John, and Bob Anderson
# #51
# Campbell, Megan, et al.
self.authors = self._extract(
r'^(([A-Z][a-z]+, [A-Z][a-z]+[^.{]+\.))',
'authors3'
)
if 1 and not self.authors:
# #68, AMA
# Boyd B, Basic C, Bethem R, eds
# #70, AMA
# Guyton JL, Crockarell JR
# #76
# Florez H, Martinez R, Chakra W, Strickman-Stein M, Levis S
self.authors = self._extract(
r'^((((^| )[A-Z][a-z][-\w]* [A-Z]{1,2}[,.])+))',
'authors4'
)
if 1 and self.authors:
self.authors += self._extract(
r'(\{authors\d?\}((\.? ?(,? ?(et al|and others)\.?)?(,? ?[Ee]ds\.?))?))',
'authors9',
True
)
if 1 and not self.authors:
# authors = anything from start to . or {
# catches 80%
# BUT also a lot of FALSE POSITIVES
# (i.e. include title and other stuff in the authors)
# e.g. Goh, S. L. Polymer Chemistry
part = self._extract(
# r'^(([^{]+?))(?:\{|(?<![A-Z)])\.)',
r'^((((?<=[A-Z])\.|[^{.])+))',
'authors8'
)
if not self.title and (
re.match(r'(The|A|An) ', part)
# Fast facts
or (
re.search(r' [a-z]+\.?$', part)
and not re.search(r' et al\.?$', part)
)
):
self.title = part
else:
self.authors = part
if 0 and self.authors and not self.title:
# we might have captured the title in the authors
# Michael Pollan, The Omnivore's Dilemma
# if self.authors
pass
if self.authors and self.date and not self.title:
# title = anything between } and { with a dot in it
# assumes that the date is after the title
self.title = self._extract(
r'\}\s*\.*\s*(([^.{}]{2,}))', 'title',
True
)
# clean the title
if self.title:
# Crimson peak [Motion picture]
self.title = re.sub(r'\[[^\]]+\]$', '', self.title)
# The New Media Monopoly, Boston: Beacon Press
self.title = re.sub(r',[^,:]+:[^,:]+$', '', self.title)
self.title = self.title.strip(' ').strip(
'.').strip(',')
self.title = re.sub(r"^'(.+)'$", r"\1", self.title)
| 34.040541
| 93
| 0.382162
| 764
| 7,557
| 3.736911
| 0.304974
| 0.092469
| 0.075657
| 0.044834
| 0.271454
| 0.225219
| 0.145359
| 0.128546
| 0.117688
| 0.07986
| 0
| 0.021718
| 0.451634
| 7,557
| 221
| 94
| 34.19457
| 0.666988
| 0.285563
| 0
| 0.221239
| 0
| 0.00885
| 0.119366
| 0.029606
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044248
| false
| 0.00885
| 0.00885
| 0.00885
| 0.079646
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5e2b3958e10bba2c1126d9063cd6d9ca99a6bc2
| 1,217
|
py
|
Python
|
kernellib/utils/visualization.py
|
jejjohnson/kernellib
|
eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050
|
[
"MIT"
] | 1
|
2021-02-04T08:52:04.000Z
|
2021-02-04T08:52:04.000Z
|
kernellib/utils/visualization.py
|
jejjohnson/kernellib
|
eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050
|
[
"MIT"
] | null | null | null |
kernellib/utils/visualization.py
|
jejjohnson/kernellib
|
eb9f80c1b605c8a6b5e8a324efd4ef07d8f59050
|
[
"MIT"
] | 1
|
2018-04-17T06:42:09.000Z
|
2018-04-17T06:42:09.000Z
|
import matplotlib.pyplot as plt
def plot_gp(xtest, predictions, std=None, xtrain=None, ytrain=None, title=None, save_name=None):
xtest, predictions = xtest.squeeze(), predictions.squeeze()
fig, ax = plt.subplots()
# Plot the training data
if (xtrain is not None) and (ytrain is not None):
xtrain, ytrain = xtrain.squeeze(), ytrain.squeeze()
ax.scatter(xtrain, ytrain, s=100, color='r', label='Training Data')
# plot the testing data
ax.plot(xtest, predictions, linewidth=5,
color='k', label='Predictions')
# plot the confidence interval
if std is not None:
std = std.squeeze()
upper_bound = predictions + 1.960 * std
lower_bound = predictions - 1.960 * std
ax.fill_between(xtest, upper_bound, lower_bound,
color='red', alpha=0.2, label='95% Condidence Interval')
# ax.legend()
if title is not None:
ax.set_title(title)
ax.tick_params(
axis='both',
which='both',
bottom=False,
top=False,
left=False,
labelleft=False,
labelbottom=False)
if save_name:
fig.savefig(save_name)
else:
plt.show()
return fig
| 25.354167
| 97
| 0.612161
| 157
| 1,217
| 4.675159
| 0.44586
| 0.027248
| 0.049046
| 0.054496
| 0.06267
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018079
| 0.272802
| 1,217
| 47
| 98
| 25.893617
| 0.811299
| 0.069844
| 0
| 0
| 0
| 0
| 0.053239
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.033333
| 0
| 0.1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5e5cd56b7a8f566083c50626d4a1f1f2165bd63
| 2,284
|
py
|
Python
|
noxutils.py
|
sphinx-contrib/zopeext
|
b749d0023f4fb8b8eea3a8f3216f63397c6272de
|
[
"BSD-2-Clause"
] | 1
|
2020-03-16T07:20:58.000Z
|
2020-03-16T07:20:58.000Z
|
noxutils.py
|
sphinx-contrib/zopeext
|
b749d0023f4fb8b8eea3a8f3216f63397c6272de
|
[
"BSD-2-Clause"
] | 3
|
2021-12-19T09:39:45.000Z
|
2022-01-06T05:05:03.000Z
|
noxutils.py
|
sphinx-contrib/zopeext
|
b749d0023f4fb8b8eea3a8f3216f63397c6272de
|
[
"BSD-2-Clause"
] | null | null | null |
"""
From https://github.com/brechtm/rinohtype/blob/master/noxutil.py
https://github.com/cjolowicz/nox-poetry/discussions/289
"""
import json
from collections.abc import Iterable
from pathlib import Path
from typing import Optional
from urllib.request import urlopen, Request
from poetry.core.factory import Factory
from poetry.core.semver import parse_single_constraint as parse_version
VERSION_PARTS = ("major", "minor", "patch")
def get_versions(
dependency: str,
granularity: str = "minor",
# ascending: bool = False, limit: Optional[int] = None,
# allow_prerelease: bool = False,
) -> Iterable[str]:
"""Yield all versions of `dependency` considering version constraints
Args:
dependency: the name of the dependency
granularity: yield only the newest patch version of each major/minor
release
ascending: count backwards from latest version, by default (not much
use without the 'limit' arg)
limit: maximum number of entries to return
allow_prerelease: whether to include pre-release versions
Yields:
All versions of `dependency` that match the version constraints defined
and in this project's pyproject.toml and the given `granularity`.
"""
package = Factory().create_poetry(Path(__file__).parent).package
for requirement in package.requires:
if requirement.name == dependency:
break
else:
raise ValueError(f"{package.name} has no dependency '{dependency}'")
filtered_versions = [
version
for version in all_versions(dependency)
if requirement.constraint.allows(version)
]
parts = VERSION_PARTS[: VERSION_PARTS.index(granularity) + 1]
result = {}
for version in filtered_versions:
key = tuple(getattr(version, part) for part in parts)
result[key] = max((result[key], version)) if key in result else version
return [str(version) for version in result.values()]
def all_versions(dependency):
request = Request(f"https://pypi.org/pypi/{dependency}/json")
response = urlopen(request)
json_string = response.read().decode("utf8")
json_data = json.loads(json_string)
yield from (parse_version(version) for version in json_data["releases"])
| 35.138462
| 79
| 0.700088
| 285
| 2,284
| 5.529825
| 0.459649
| 0.030457
| 0.030457
| 0.036168
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002761
| 0.207093
| 2,284
| 64
| 80
| 35.6875
| 0.867477
| 0.343695
| 0
| 0
| 0
| 0
| 0.08223
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.2
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5e6d7bb0bd30f9540f1c0b749f54516092b6ca3
| 3,806
|
py
|
Python
|
nodes/centered_mocap_and_tag_rebroadcaster.py
|
rislab/apriltag_tracker
|
41c4deb4b5bcd94e5f666f3d4b1f1d141c705582
|
[
"BSD-3-Clause"
] | null | null | null |
nodes/centered_mocap_and_tag_rebroadcaster.py
|
rislab/apriltag_tracker
|
41c4deb4b5bcd94e5f666f3d4b1f1d141c705582
|
[
"BSD-3-Clause"
] | null | null | null |
nodes/centered_mocap_and_tag_rebroadcaster.py
|
rislab/apriltag_tracker
|
41c4deb4b5bcd94e5f666f3d4b1f1d141c705582
|
[
"BSD-3-Clause"
] | 1
|
2019-02-18T00:40:20.000Z
|
2019-02-18T00:40:20.000Z
|
#!/usr/bin/env python2.7
from __future__ import division
import roslib
import rospy
import tf
from nav_msgs.msg import Odometry
from nav_msgs.msg import Path
from geometry_msgs.msg import PoseStamped
import numpy as np
import pdb
from message_filters import Subscriber, ApproximateTimeSynchronizer
class GT_cleaner:
def __init__(self):
self.init = [False, False]
self.broadcaster = tf.TransformBroadcaster()
self.mocap_pub = rospy.Publisher(
'/gt_clean_odom', Odometry, queue_size=10)
self.april_pub = rospy.Publisher(
'/april_clean_odom', Odometry, queue_size=10)
self.first_quat = None
self.first_pos = np.array([0, 0, 0])
self.prev_frame = [np.eye(4), np.eye(4)]
self.first_frame = [np.eye(4),np.eye(4)]
self.first_frame_inv = [np.eye(4),np.eye(4)]
self.last_time = [rospy.Time.now(),rospy.Time.now()]
self.sub = ApproximateTimeSynchronizer([Subscriber("/mocap/odom", Odometry),Subscriber("/apriltag_tracker/odom", Odometry)],100, 0.05)
self.sub.registerCallback(self.callback)
def callback(self, mocap_msg, odom_msg):
for i,msg in enumerate([mocap_msg, odom_msg]):
q = msg.pose.pose.orientation
p = msg.pose.pose.position
quat = np.array([q.x, q.y, q.z, q.w])
pos = np.array([p.x, p.y, p.z])
frame = tf.transformations.quaternion_matrix(quat)
frame[:3, 3] = pos
if i==1:
frame = np.linalg.inv(frame) # Because track tag in body is the other way around
if self.init[i] == False:
self.last_time[i] = msg.header.stamp
self.init[i] = True
self.first_frame[i] = frame
self.first_frame_inv[i] = np.linalg.inv(frame)
continue
dt = (msg.header.stamp - self.last_time[i]).to_sec()
self.last_time[i] = msg.header.stamp
frame_in_first = np.dot(self.first_frame_inv[i], frame)
# add to path
odom = Odometry()
odom.header.frame_id = msg.header.frame_id
odom.pose.pose.position.x = frame_in_first[0, 3]
odom.pose.pose.position.y = frame_in_first[1, 3]
odom.pose.pose.position.z = frame_in_first[2, 3]
q = tf.transformations.quaternion_from_matrix(frame_in_first)
odom.pose.pose.orientation.x = q[0]
odom.pose.pose.orientation.y = q[1]
odom.pose.pose.orientation.z = q[2]
odom.pose.pose.orientation.w = q[3]
odom.header.stamp = msg.header.stamp
#Now time for the velocities
# Get the delta transform to obtain the velocities
delta_frame = np.dot(np.linalg.inv(self.prev_frame[i]), frame_in_first)
self.prev_frame[i] = frame_in_first
# Linear part is easy
odom.twist.twist.linear.x = delta_frame[0,3]/dt
odom.twist.twist.linear.y = delta_frame[1,3]/dt
odom.twist.twist.linear.z = delta_frame[2,3]/dt
# For the angular velocity, we compute the angle axis
result = tf.transformations.rotation_from_matrix(delta_frame)
angle = result[0]
direction = result[1]
omega = direction * angle/dt
odom.twist.twist.angular.x = omega[0]
odom.twist.twist.angular.y = omega[1]
odom.twist.twist.angular.z = omega[2]
if i == 0:
self.mocap_pub.publish(odom)
else:
self.april_pub.publish(odom)
if __name__ == '__main__':
rospy.init_node('gt_cleaner', anonymous=True)
cleaner_obj = GT_cleaner()
rospy.spin()
| 37.313725
| 142
| 0.59196
| 517
| 3,806
| 4.195358
| 0.257253
| 0.033195
| 0.038728
| 0.042416
| 0.187644
| 0.133241
| 0.112033
| 0.026279
| 0.026279
| 0.026279
| 0
| 0.017126
| 0.294272
| 3,806
| 101
| 143
| 37.683168
| 0.790395
| 0.061219
| 0
| 0.026316
| 0
| 0
| 0.022995
| 0.006169
| 0
| 0
| 0
| 0
| 0
| 1
| 0.026316
| false
| 0
| 0.131579
| 0
| 0.171053
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5e74389c152886253bc86c73ff3f6d23bab1e6e
| 3,266
|
py
|
Python
|
garage.py
|
DidymusRex/garage-pi
|
4f4dcc0251f8cb5f5150ddaff7dac01a64eac948
|
[
"CC0-1.0"
] | null | null | null |
garage.py
|
DidymusRex/garage-pi
|
4f4dcc0251f8cb5f5150ddaff7dac01a64eac948
|
[
"CC0-1.0"
] | null | null | null |
garage.py
|
DidymusRex/garage-pi
|
4f4dcc0251f8cb5f5150ddaff7dac01a64eac948
|
[
"CC0-1.0"
] | null | null | null |
from datetime import datetime
from gpiozero import DistanceSensor
from garage_door import garage_door
from garage_camera import garage_camera
import MQTT_Config
import paho.mqtt.client as mqtt
from temp_sensor import temp_sensor
from time import sleep
"""
GPIO pin assignments:
relays
range finder sensor (echo passes thru voltage converter)
DHT11 temperature/huidity sensor
"""
GPIO_Pins = {'temp_1':21,
'relay_1':6,
'relay_2':12,
'trig_1':17,
'echo_1':18,
'trig_2':22,
'echo_2':23}
"""
MQTT connect callback
Subscribing in on_connect() means that if we lose the connection and
reconnect then subscriptions will be renewed.
"""
def on_connect(client, userdata, flags, rc):
client.subscribe(mqtt_topic)
"""
MQTT receive message callback (garage/command)
Take action on a subject
"""
def on_message(client, userdata, msg):
print("message received ", str(msg.payload.decode("utf-8")))
print("message topic=", msg.topic)
print("message qos=", msg.qos)
print("message retain flag=", msg.retain)
cmd = str(msg.payload.decode("utf-8")).split(",")
bad_command = False
if len(cmd) == 2:
(subject, action) = cmd
if subject in garage_doors:
if action == "open":
garage_doors[subject].open()
elif action == "close":
garage_doors[subject].close()
elif action == "check":
garage_doors[subject].get_position()
else:
bad_command = True
elif subject == "dht11":
dht11.check_temp()
elif subject == "camera":
if action == "still":
garage_cam.take_still()
else:
bad_command = True
else:
bad_command = True
else:
bad_command = True
if bad_command:
print("Invalid payload {}".format(msg.payload.decode("utf-8")))
"""
MQTT publish callback
Mainly for debugging
"""
def on_publish(client, userdata, mid):
print("message id {} published".format(mid))
"""
Just in case
"""
def main():
pass
"""
Create client and connect it to the MQTT broker
"""
mqc = mqtt.Client("garage-pi", clean_session=True)
mqc.on_connect = on_connect
mqc.on_message = on_message
mqc.on_publish = on_publish
mqc.username_pw_set(mqtt_account, mqtt_passwd)
mqc.connect(mqtt_broker)
mqc.loop_start()
mqc.publish("garage/foo", "go!")
"""
Create temperature sensor object
"""
dht11 = temp_sensor(mqc, GPIO_Pins['temp_1'])
"""
Create garage camera object
"""
garage_cam = garage_camera(mqc)
"""
Create garage door objects
"""
garage_doors = dict()
garage_doors["left"] = garage_door(mqc,
"left",
GPIO_Pins['relay_1'],
GPIO_Pins['echo_1'],
GPIO_Pins['trig_1'])
garage_doors["right"] = garage_door(mqc,
"right",
GPIO_Pins['relay_2'],
GPIO_Pins['echo_2'],
GPIO_Pins['trig_2'])
if __name__ == "__main__":
main()
| 26.33871
| 72
| 0.580527
| 387
| 3,266
| 4.700258
| 0.356589
| 0.035184
| 0.030786
| 0.039582
| 0.06597
| 0.054975
| 0.029687
| 0.029687
| 0
| 0
| 0
| 0.017203
| 0.305879
| 3,266
| 123
| 73
| 26.552846
| 0.785179
| 0
| 0
| 0.106667
| 0
| 0
| 0.107721
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.053333
| false
| 0.026667
| 0.106667
| 0
| 0.16
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5e7ef3d480cf9bb53271fcd48200dc95c179ef9
| 5,887
|
py
|
Python
|
app.py
|
leemengtaiwan/gist-evernote
|
90d8573870ded37dc82575ba25968d7a06efe219
|
[
"MIT"
] | 35
|
2018-01-29T00:50:36.000Z
|
2021-04-04T13:59:26.000Z
|
app.py
|
leemengtaiwan/gist-evernote
|
90d8573870ded37dc82575ba25968d7a06efe219
|
[
"MIT"
] | 5
|
2021-02-08T20:18:24.000Z
|
2022-03-11T23:15:12.000Z
|
app.py
|
leemengtaiwan/gist-evernote
|
90d8573870ded37dc82575ba25968d7a06efe219
|
[
"MIT"
] | 4
|
2018-02-06T12:13:09.000Z
|
2019-12-20T09:12:41.000Z
|
# encoding: utf-8
import os
import time
from multiprocessing import Pool, cpu_count
from selenium import webdriver
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.common.exceptions import TimeoutException
from enote.util import get_note, get_notebook, get_notebooks, \
create_resource, create_note, create_notebook, update_note
from github.util import get_user_name, get_all_gists
from web.util import fullpage_screenshot, get_gist_hash, create_chrome_driver
from settings import NOTEBOOK_TO_SYNC
from db import get_db
DATE_FORMAT = "%Y-%m-%dT%H:%M:%SZ"
GIST_BASE_URL = 'https://gist.github.com'
notebook = None
github_user = get_user_name() # get current login github user for fetching gist content
db = get_db() # database to store synchronization info
def app():
start = time.time()
global notebook
# find notebook to put new notes
notebooks = get_notebooks()
for n in notebooks:
if n.name == NOTEBOOK_TO_SYNC:
notebook = get_notebook(n.guid)
# create notebook with the specified name if not found
if not notebook:
notebook = create_notebook(NOTEBOOK_TO_SYNC)
print('Using notebook: %s' % notebook.name)
# initialize, get all available gists
if db.is_empty() or db.is_cold_start():
gists = get_all_gists()
# sync only gists that were pushed after last synchronization
else:
last_sync_date = db.get_last_sync()
print("Find gists that are updated after last sync (UTC): {}".format(last_sync_date))
gists = get_all_gists(after_date=last_sync_date)
print("Total number of gists to be synchronized: %d" % len(gists))
# headless mode to reduce overhead and distraction
driver = create_chrome_driver() if gists else None
for gist in gists:
_ = sync_gist(gist, driver=driver)
if driver:
driver.quit()
# TODO multi-processes + mysql
# setup multiple selenium drivers to speed up if multiple cpu available
# num_processes = min(4, cpu_count() - 1) if cpu_count() > 1 else 1
# print("Number of %d processes being created" % num_processes)
# pool = Pool(num_processes)
#
# notes = pool.map(sync_gist, gists)
#
# pool.terminate()
# pool.close()
# pool.join()
# sync all gists successfully, set to warm-start mode
if db.is_cold_start():
db.toggle_cold_start()
print("Synchronization took {:.0f} seconds.".format(time.time() - start))
def sync_gist(gist, driver):
"""Sync the Github gist to the corresponding Evernote note.
Create a new Evernote note if there is no corresponding one with the gist.
Overwrite existing note's content if gist has been changed.
Parameters
----------
gist : dict
A Gist acquired by Github GraphQL API with format like:
{
'id': 'gist_id',
'name': 'gist_name',
'description': 'description',
'pushAt': '2018-01-15T00:48:23Z'
}
driver : selenium.webdriver
The web driver used to access gist url
Returns
-------
note : evernote.edam.type.ttpyes.Note
None if no new note created or updated
"""
note_exist = False
gist_url = '/'.join((GIST_BASE_URL, gist['name']))
# check existing gist hash before fetch if available
prev_hash = db.get_hash_by_id(gist['id'])
note_guid = db.get_note_guid_by_id(gist['id'])
if prev_hash and note_guid:
note_exist = True
cur_hash = get_gist_hash(github_user, gist['name'])
if prev_hash == cur_hash:
print('Gist {} remain the same, ignore.'.format(gist_url))
db.update_gist(gist, note_guid, cur_hash)
return None
driver.get(gist_url)
# wait at most x seconds for Github rendering gist context
delay_seconds = 10
try:
WebDriverWait(driver, delay_seconds).until(EC.presence_of_element_located((By.CLASS_NAME, 'is-render-ready')))
except TimeoutException:
print("Take longer than {} seconds to load page.".format(delay_seconds))
# get first file name as default note title
gist_title = driver.find_element(By.CLASS_NAME, 'gist-header-title>a').text
# take screen shot for the gist and save it temporally
image_path = 'images/{}.png'.format(gist['name'])
fullpage_screenshot(driver, image_path)
# build skeleton for note (including screenshot)
resource, _ = create_resource(image_path)
note_title = gist['description'] if gist['description'] else gist_title
note_body = format_note_body(gist)
# get hash of raw gist content and save gist info to database
gist_hash = get_gist_hash(github_user, gist['name'])
# create new note / update existing note
if not note_exist:
note = create_note(note_title, note_body, [resource], parent_notebook=notebook)
db.save_gist(gist, note.guid, gist_hash)
else:
note = get_note(note_guid)
update_note(note, note_title, note_body, note_guid, [resource])
db.update_gist(gist, note_guid, gist_hash)
os.remove(image_path)
print("Finish creating note for gist {}".format(gist_url))
return note
def format_note_body(gist):
"""Create the note content that will be shown before attachments.
Parameters
----------
gist : dict
Dict that contains all information of the gist
Returns
-------
note_body : str
"""
blocks = []
desc = gist['description']
if desc:
blocks.append(desc)
gist_url = '/'.join((GIST_BASE_URL, gist['name']))
blocks.append('<a href="{}">Gist on Github</a>'.format(gist_url))
note_body = '<br/>'.join(blocks)
return note_body
if __name__ == '__main__':
app()
| 31.821622
| 118
| 0.674367
| 815
| 5,887
| 4.680982
| 0.304294
| 0.016776
| 0.016514
| 0.012582
| 0.065007
| 0.053997
| 0.033028
| 0.033028
| 0
| 0
| 0
| 0.004837
| 0.22745
| 5,887
| 184
| 119
| 31.994565
| 0.833993
| 0.319687
| 0
| 0.044444
| 0
| 0
| 0.115923
| 0
| 0
| 0
| 0
| 0.005435
| 0
| 1
| 0.033333
| false
| 0
| 0.144444
| 0
| 0.211111
| 0.077778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5e81680dbe98070292ce77eaa7479aa8b7e1630
| 326
|
py
|
Python
|
python-leetcode/350.py
|
MDGSF/interviews
|
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
|
[
"MIT"
] | 12
|
2020-01-16T08:55:27.000Z
|
2021-12-02T14:52:39.000Z
|
python-leetcode/350.py
|
MDGSF/interviews
|
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
|
[
"MIT"
] | null | null | null |
python-leetcode/350.py
|
MDGSF/interviews
|
9faa9aacdb0cfbb777d4d3d4d1b14b55ca2c9f76
|
[
"MIT"
] | 1
|
2019-12-11T12:00:38.000Z
|
2019-12-11T12:00:38.000Z
|
import collections
class Solution:
def intersect(self, nums1: List[int], nums2: List[int]) -> List[int]:
m = collections.Counter(nums1)
result = []
for num in nums2:
if num in m:
result.append(num)
if m[num] == 1:
del m[num]
else:
m[num] -= 1
return result
| 21.733333
| 71
| 0.546012
| 44
| 326
| 4.045455
| 0.522727
| 0.117978
| 0.05618
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.027523
| 0.331288
| 326
| 14
| 72
| 23.285714
| 0.788991
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.076923
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5edd88e2d458d89d6714005f92ae5a2d900050e
| 564
|
py
|
Python
|
polls/urls.py
|
SkyFlame00/webpolls
|
d137da1aaaa8af78520af7762b8002428842d617
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
SkyFlame00/webpolls
|
d137da1aaaa8af78520af7762b8002428842d617
|
[
"MIT"
] | null | null | null |
polls/urls.py
|
SkyFlame00/webpolls
|
d137da1aaaa8af78520af7762b8002428842d617
|
[
"MIT"
] | null | null | null |
from django.urls import path
from django.conf.urls import url
from . import views
urlpatterns = [
path('', views.index, name='index'),
path('logout/', views.logoutView, name='logout'),
path('signup/', views.signup, name='signup'),
url(r'^activate/(?P<uidb64>[0-9A-Za-z_\-]+)/(?P<token>[0-9A-Za-z]{1,13}-[0-9A-Za-z]{1,20})/$', views.activate, name='activate'),
path('myprofile/', views.myprofile, name='myprofile'),
path('myprofile/edit/', views.myprofile_edit, name='myprofile_edit'),
path('testing', views.testing, name='testing')
]
| 37.6
| 132
| 0.654255
| 80
| 564
| 4.575
| 0.3625
| 0.02459
| 0.040984
| 0.04918
| 0.038251
| 0
| 0
| 0
| 0
| 0
| 0
| 0.028169
| 0.118794
| 564
| 14
| 133
| 40.285714
| 0.70825
| 0
| 0
| 0
| 0
| 0.083333
| 0.33156
| 0.152482
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5ee0fc5d74aae0b09b30c0e37603f02a2ea4deb
| 14,918
|
py
|
Python
|
forceDAQ/gui/plotter.py
|
gftabor/pyForceDAQ
|
3eababb41d855b961d228d8366fdd154bb6314ea
|
[
"MIT"
] | null | null | null |
forceDAQ/gui/plotter.py
|
gftabor/pyForceDAQ
|
3eababb41d855b961d228d8366fdd154bb6314ea
|
[
"MIT"
] | null | null | null |
forceDAQ/gui/plotter.py
|
gftabor/pyForceDAQ
|
3eababb41d855b961d228d8366fdd154bb6314ea
|
[
"MIT"
] | null | null | null |
__version__ = "0.2"
import threading
import numpy as np
import pygame
from expyriment.stimuli import Canvas, Rectangle, TextLine
from expyriment.stimuli._visual import Visual
from expyriment.misc import constants
lock_expyriment = threading.Lock()
Numpy_array_type = type(np.array([]))
class Scaling(object):
"""littel helper object function to handle plotter scaling"""
step_size = 5 # for increasing/decreasing
def __init__(self, min, max,
pixel_min, pixel_max):
"""xy-value arrays"""
self._min = min
self._max = max
self.pixel_min = pixel_min
self.pixel_max = pixel_max
self._update()
@property
def max(self):
return self._max
@max.setter
def max(self, value):
self._max = value
self._update()
@property
def min(self):
return self._min
@min.setter
def min(self, value):
self._min = value
self._update()
def _update(self):
self._zero_shift = (self._min + self._max)/2.0
self._range = float(self._max - self._min)
def get_pixel_factor(self):
return (self.pixel_max - self.pixel_min) / self._range
def increase_data_range(self):
self.min += Scaling.step_size
self.max -= Scaling.step_size
if self.min >= self.max:
self.decrease_data_range()
def decrease_data_range(self):
self.min -= Scaling.step_size
self.max += Scaling.step_size
def data_range_up(self):
self.min += Scaling.step_size
self.max += Scaling.step_size
def data_range_down(self):
self.min -= Scaling.step_size
self.max -= Scaling.step_size
def data2pixel(self, values):
""" values: numeric or numpy array
pixel_min_max: 2D array"""
return (values - self._zero_shift) * \
(self.pixel_max - self.pixel_min) / self._range # pixel_factor
def trim(self, value):
"""trims value to the range, ie. set to min or max if <min or > max """
if value < self.min:
return self.min
elif value > self.max:
return self.max
return value
class PGSurface(Canvas):
"""PyGame Surface: Expyriment Stimulus for direct Pygame operations and
PixelArrays
In contrast to other Expyriment stimuli the class does not generate temporary
surfaces.
"""
def __init__(self, size, position=None, colour=None):
Canvas.__init__(self, size, position, colour)
self._px_array = None
@property
def surface(self):
"""todo"""
if not self.has_surface:
ok = self._set_surface(self._get_surface()) # create surface
if not ok:
raise RuntimeError(Visual._compression_exception_message.format(
"surface"))
return self._surface
@property
def pixel_array(self):
"""todo"""
if self._px_array is None:
self._px_array = pygame.PixelArray(self.surface)
return self._px_array
@pixel_array.setter
def pixel_array(self, value):
if self._px_array is None:
self._px_array = pygame.PixelArray(self.surface)
self._px_array = value
def unlock_pixel_array(self):
"""todo"""
self._px_array = None
def preload(self, inhibit_ogl_compress=False):
self.unlock_pixel_array()
return Canvas.preload(self, inhibit_ogl_compress)
def compress(self):
self.unlock_pixel_array()
return Canvas.compress(self)
def decompress(self):
self.unlock_pixel_array()
return Canvas.decompress(self)
def plot(self, stimulus):
self.unlock_pixel_array()
return Canvas.plot(self, stimulus)
def clear_surface(self):
self.unlock_pixel_array()
return Canvas.clear_surface(self)
def copy(self):
self.unlock_pixel_array()
return Canvas.copy(self)
def unload(self, keep_surface=False):
if not keep_surface:
self.unlock_pixel_array()
return Canvas.unload(self, keep_surface)
def rotate(self, degree):
self.unlock_pixel_array()
return Canvas.rotate(self, degree)
def scale(self, factors):
self.unlock_pixel_array()
return Canvas.scale(self, factors)
# expyriment 0.8.0
# def scale_to_fullscreen(self, keep_aspect_ratio=True):
# self.unlock_pixel_array()
# return Canvas.scale_to_fullscreen(self, keep_aspect_ratio)
def flip(self, booleans):
self.unlock_pixel_array()
return Canvas.flip(self, booleans)
def blur(self, level):
self.unlock_pixel_array()
return Canvas.blur(self, level)
def scramble(self, grain_size):
self.unlock_pixel_array()
return Canvas.scramble(self, grain_size)
def add_noise(self, grain_size, percentage, colour):
self.unlock_pixel_array()
return Canvas.add_noise(self, grain_size, percentage, colour)
class Plotter(PGSurface):
"""Pygame Plotter"""
def __init__(self, n_data_rows, data_row_colours,
width=600, y_range=(-100, 100),
background_colour=(180, 180, 180),
marker_colour=(200, 200, 200),
position=None,
axis_colour=None):
self.n_data_rows = n_data_rows
self.data_row_colours = data_row_colours
self.width = width
self.y_range = y_range
self._background_colour = background_colour
self.marker_colour = marker_colour
self._horizontal_lines = None
if axis_colour is None:
self.axis_colour = background_colour
else:
self.axis_colour = axis_colour
self._previous = [None] * n_data_rows
PGSurface.__init__(self, size=(self.width, self._height),
position=position)
self.clear_area()
@property
def y_range(self):
return self.y_range
@y_range.setter
def y_range(self, values):
"""tuple with lower and upper values"""
self._y_range = values
self._height = self._y_range[1] - self._y_range[0]
@property
def data_row_colours(self):
return self._data_row_colours
@data_row_colours.setter
def data_row_colours(self, values):
"""data_row_colours: list of colour"""
try:
if not isinstance(values[0], list) and \
not isinstance(values[0], tuple): # one dimensional
values = [values]
except:
values = [[]] # values is not listpixel_array
if len(values) != self.n_data_rows:
raise RuntimeError('Number of data row colour does not match the ' +
'defined number of data rows!')
self._data_row_colours = values
def clear_area(self):
self.pixel_array[:, :] = self._background_colour
def set_horizontal_line(self, y_values):
"""y_values: array"""
try:
self._horizontal_lines = np.array(y_values, dtype=int)
except:
self._horizontal_lines = None
def write_values(self, position, values, set_marker=False,
set_point_marker=False):
"""
additional points: np.array
"""
if set_marker:
self.pixel_array[position, :] = self.marker_colour
else:
self.pixel_array[position, :] = self._background_colour
if set_point_marker:
self.pixel_array[position, 0:2] = self.marker_colour
if self._horizontal_lines is not None:
for c in (self._y_range[1] - self._horizontal_lines):
self.pixel_array[:, c:c+1] = self.marker_colour
for c, plot_value in enumerate(self._y_range[1] - \
np.array(values, dtype=int)):
if plot_value >= 0 and self._previous[c] >= 0 \
and plot_value <= self._height and \
self._previous[c] <= self._height:
if self._previous[c] > plot_value:
self.pixel_array[position,
plot_value:self._previous[c] + 1] = \
self._data_row_colours[c]
else:
self.pixel_array[position,
self._previous[c]:plot_value + 1] = \
self._data_row_colours[c]
self._previous[c] = plot_value
def add_values(self, values, set_marker=False):
""" high level function of write values with type check and shifting to left
not used by plotter thread
"""
if type(values) is not Numpy_array_type and \
not isinstance(values, tuple) and \
not isinstance(values, list):
values = [values]
if len(values) != self.n_data_rows:
raise RuntimeError('Number of data values does not match the ' +
'defined number of data rows!')
# move plot one pixel to the left
self.pixel_array[:-1, :] = self.pixel_array[1:, :]
self.write_values(position=-1, values=values, set_marker=set_marker)
class PlotterThread(threading.Thread):
def __init__(self, n_data_rows, data_row_colours,
width=600, y_range=(-100, 100),
background_colour=(80, 80, 80),
marker_colour=(200, 200, 200),
position=None,
axis_colour=None):
super(PlotterThread, self).__init__()
self._plotter = Plotter(n_data_rows=n_data_rows,
data_row_colours=data_row_colours,
width=width, y_range=y_range,
background_colour=background_colour,
marker_colour=marker_colour,
position=position,
axis_colour=axis_colour)
self._new_values = []
self._lock_new_values = threading.Lock()
self._running = threading.Event()
self._stop_request = threading.Event()
self._clear_area_event = threading.Event()
self.unpause()
def get_plotter_rect(self, screen_size):
half_screen_size = (screen_size[0] / 2, screen_size[1] / 2)
pos = self._plotter.absolute_position
stim_size = self._plotter.surface_size
rect_pos = (pos[0] + half_screen_size[0] - stim_size[0] / 2,
- pos[1] + half_screen_size[1] - stim_size[1] / 2)
return pygame.Rect(rect_pos, stim_size)
def clear_area(self):
self._clear_area_event.set()
def pause(self):
self._running.clear()
def unpause(self):
self._running.set()
def stop(self):
self.join()
def join(self, timeout=None):
self._stop_request.set()
super(PlotterThread, self).join(timeout)
def run(self):
"""the plotter thread is constantly updating the the
pixel_area"""
while not self._stop_request.is_set():
if not self._running.is_set():
self._running.wait(timeout=1)
continue
if self._clear_area_event.is_set():
self._plotter.clear_area()
self._clear_area_event.clear()
# get data
if self._lock_new_values.acquire(False):
values = self._new_values
self._new_values = []
self._lock_new_values.release() # release to receive new values
else:
values = []
n = len(values)
if n > 0:
if n > self._plotter.width:
values = values[-1 * self._plotter.width:] # only the last
n = len(values)
self._plotter.pixel_array[:-1 * n, :] = \
self._plotter.pixel_array[n:, :]
for x in range(-1 * n, 0):
self._plotter.write_values(position=x,
values=values[x][0],
set_marker=values[x][1],
set_point_marker=values[x][2])
# Expyriment present
lock_expyriment.acquire()
self._plotter.present(update=False, clear=False)
lock_expyriment.release()
def set_horizontal_lines(self, y_values):
"""adds new values to the plotter
y_values has to be an array
"""
self._lock_new_values.acquire()
self._plotter.set_horizontal_line(y_values=y_values)
self._lock_new_values.release()
def add_values(self, values, set_marker=False, set_point_marker=False):
"""adds new values to the plotter"""
self._lock_new_values.acquire()
self._new_values.append((values, set_marker, set_point_marker))
self._lock_new_values.release()
def level_indicator(value, text, scaling, width=20,
text_size=14, text_gap=20, position=(0,0), thresholds = None,
colour=constants.C_EXPYRIMENT_ORANGE):
"""make an level indicator in for of an Expyriment stimulus
text_gap: gap between indicator and text
scaling: Scaling object
Returns
--------
expyriment.Canvas
"""
value = scaling.trim(value)
# indicator
height = scaling.pixel_max - scaling.pixel_min
indicator = Canvas(size=[width + 2, height + 2],
colour=(30, 30, 30))
zero = scaling.data2pixel(0)
px_bar_height = scaling.data2pixel(value) - zero
bar = Rectangle(size=(width, abs(px_bar_height)),
position=(0, zero + int((px_bar_height + 1) / 2)),
colour=colour)
bar.plot(indicator)
# levels & horizontal lines
try:
px_horizontal_lines = scaling.data2pixel(values=np.array(thresholds.thresholds))
except:
px_horizontal_lines = None
if px_horizontal_lines is not None:
for px in px_horizontal_lines:
level = Rectangle(size=(width+6, 2),
position=(0, px),
colour=constants.C_WHITE)
level.plot(indicator)
# text labels
txt = TextLine(text=text, text_size=text_size,
position=(0, -1 * (int(height / 2.0) + text_gap)),
text_colour=constants.C_YELLOW)
# make return canvas
w = max(txt.surface_size[0], indicator.size[0])
h = height + 2 * (txt.surface_size[1]) + text_gap
rtn = Canvas(size=(w, h), colour=(0, 0, 0), position=position)
indicator.plot(rtn)
txt.plot(rtn)
return rtn
if __name__ == "__main__":
pass
| 32.714912
| 88
| 0.58292
| 1,782
| 14,918
| 4.61055
| 0.138047
| 0.035297
| 0.029211
| 0.03408
| 0.310735
| 0.256573
| 0.175998
| 0.11721
| 0.093354
| 0.084104
| 0
| 0.013314
| 0.320284
| 14,918
| 455
| 89
| 32.786813
| 0.796943
| 0.089958
| 0
| 0.246875
| 0
| 0
| 0.011981
| 0
| 0
| 0
| 0
| 0.002198
| 0
| 1
| 0.159375
| false
| 0.003125
| 0.01875
| 0.015625
| 0.275
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5f03ea17d8bc72c5ae1602cba0dbeef3ed61e6b
| 2,905
|
py
|
Python
|
app/modules/payments/resources.py
|
almlys/sample_paymentsapi
|
d7ba4d2effeb7654ee06aab6dbb15e22f8d213cc
|
[
"MIT"
] | null | null | null |
app/modules/payments/resources.py
|
almlys/sample_paymentsapi
|
d7ba4d2effeb7654ee06aab6dbb15e22f8d213cc
|
[
"MIT"
] | null | null | null |
app/modules/payments/resources.py
|
almlys/sample_paymentsapi
|
d7ba4d2effeb7654ee06aab6dbb15e22f8d213cc
|
[
"MIT"
] | null | null | null |
# encoding: utf-8
# pylint: disable=bad-continuation
"""
RESTful API Payments resources
--------------------------
"""
import logging
from flask_login import current_user
from flask_restplus_patched import Resource
from flask_restplus._http import HTTPStatus
from app.extensions import db
from app.extensions.api import Namespace, abort
from app.extensions.api.parameters import PaginationParameters
from . import parameters, schemas
from .models import Payment
log = logging.getLogger(__name__) # pylint: disable=invalid-name
api = Namespace('payments', description="Payments") # pylint: disable=invalid-name
@api.route('/')
class Payments(Resource):
"""
Manipulations with Payments.
"""
@api.parameters(PaginationParameters())
@api.response(schemas.BasePaymentSchema(many=True))
def get(self, args):
"""
List of Payment.
Returns a list of Payment starting from ``offset`` limited by ``limit``
parameter.
"""
return Payment.query.offset(args['offset']).limit(args['limit'])
@api.parameters(parameters.CreatePaymentParameters())
@api.response(schemas.DetailedPaymentSchema())
@api.response(code=HTTPStatus.CONFLICT)
def post(self, args):
"""
Create a new instance of Payment.
"""
with api.commit_or_abort(
db.session,
default_error_message="Failed to create a new Payment"
):
payment = Payment(**args)
db.session.add(payment)
return payment
@api.route('/<payment_id>')
@api.response(
code=HTTPStatus.NOT_FOUND,
description="Payment not found.",
)
@api.resolve_object_by_model(Payment, 'payment')
class PaymentByID(Resource):
"""
Manipulations with a specific Payment.
"""
@api.response(schemas.DetailedPaymentSchema())
def get(self, payment):
"""
Get Payment details by ID.
"""
return payment
@api.parameters(parameters.PatchPaymentDetailsParameters())
@api.response(schemas.DetailedPaymentSchema())
@api.response(code=HTTPStatus.CONFLICT)
def patch(self, args, payment):
"""
Patch Payment details by ID.
"""
with api.commit_or_abort(
db.session,
default_error_message="Failed to update Payment details."
):
parameters.PatchPaymentDetailsParameters.perform_patch(args, obj=payment)
db.session.merge(payment)
return payment
@api.response(code=HTTPStatus.CONFLICT)
@api.response(code=HTTPStatus.NO_CONTENT)
def delete(self, payment):
"""
Delete a Payment by ID.
"""
with api.commit_or_abort(
db.session,
default_error_message="Failed to delete the Payment."
):
db.session.delete(payment)
return None
| 27.666667
| 85
| 0.640275
| 304
| 2,905
| 6.023026
| 0.325658
| 0.054069
| 0.040961
| 0.068269
| 0.22556
| 0.178045
| 0.178045
| 0.178045
| 0.178045
| 0.178045
| 0
| 0.000456
| 0.245783
| 2,905
| 104
| 86
| 27.932692
| 0.835235
| 0.154217
| 0
| 0.310345
| 0
| 0
| 0.069147
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086207
| false
| 0
| 0.155172
| 0
| 0.362069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5f344323771b9cf37b06554ddc6a58b22178367
| 1,616
|
py
|
Python
|
bin/list-teams.py
|
kws/python-msgraphy
|
a5dad8bd834c476974fae151f30865c229e0f798
|
[
"MIT"
] | 1
|
2022-01-06T08:06:47.000Z
|
2022-01-06T08:06:47.000Z
|
bin/list-teams.py
|
kws/python-msgraphy
|
a5dad8bd834c476974fae151f30865c229e0f798
|
[
"MIT"
] | null | null | null |
bin/list-teams.py
|
kws/python-msgraphy
|
a5dad8bd834c476974fae151f30865c229e0f798
|
[
"MIT"
] | null | null | null |
import msgraphy_util
import argparse
from msgraphy import GraphApi
def main(name, starts_with, exact, channels, folder):
api = GraphApi(scopes=["Group.Read.All"])
response = api.team.list_teams(search=name, starts_with=starts_with, exact=exact)
for team in response.value:
print(f"{team.display_name} [{team.id}]")
print(team.description)
if channels or folder:
response = api.team.list_channels(team.id)
for ch in response.value:
print(f"* {ch.display_name} [{ch.id}]")
if folder:
response = api.team.get_channel_files_folder(team.id, ch.id)
if response.ok:
folder = response.value
print(f" {folder.web_url}")
else:
print(" [Folder not found]")
print("")
if __name__ == "__main__":
parser = argparse.ArgumentParser(
description='List or search for MS team'
)
parser.add_argument("name", type=str, nargs="?", help="show only teams which contains [name]")
parser.add_argument("--starts_with", "-s", type=str, nargs="?", metavar="value", help="only teams starting with [value]")
parser.add_argument("--exact", "-e", type=str, nargs="?", metavar="value", help="only teams exactly matching [value]")
parser.add_argument("--channels", "-c", action='store_true', help="include channels")
parser.add_argument("--folder", "-f", action='store_true', help="include channel folder (implies -c)")
args = parser.parse_args()
main(**vars(args))
| 41.435897
| 125
| 0.603342
| 196
| 1,616
| 4.826531
| 0.367347
| 0.047569
| 0.089852
| 0.060254
| 0.17759
| 0.078224
| 0.078224
| 0.078224
| 0
| 0
| 0
| 0
| 0.251856
| 1,616
| 38
| 126
| 42.526316
| 0.782465
| 0
| 0
| 0
| 0
| 0
| 0.237624
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.09375
| 0
| 0.125
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5f35c0e3a98205f6d6bd8dde9d15ab552f7d436
| 21,372
|
py
|
Python
|
tileEditor.py
|
haywireSSC/Level-Editor
|
34fedbe36b90afeb8c0d995fcecbed845ffd6253
|
[
"CC0-1.0"
] | null | null | null |
tileEditor.py
|
haywireSSC/Level-Editor
|
34fedbe36b90afeb8c0d995fcecbed845ffd6253
|
[
"CC0-1.0"
] | null | null | null |
tileEditor.py
|
haywireSSC/Level-Editor
|
34fedbe36b90afeb8c0d995fcecbed845ffd6253
|
[
"CC0-1.0"
] | null | null | null |
import pygame as p
from math import floor
from copy import deepcopy
import Tkinter, tkFileDialog
root = Tkinter.Tk()
root.withdraw()
p.init()
running = True
tileWidth = 16
tileHeight = 16
mapWidth = 100
mapHeight = 100
camX = 0
camY = 0
scale = 2
uiScale = 2
hand = 1
layerStack = True
file_path = ''
file_path = tkFileDialog.askopenfilename()
if file_path[-3:] != 'png':
exit()
layers = []
currentLayer = 1
layers.append([-1] * (mapWidth * mapHeight))
layers.append([-1] * (mapWidth * mapHeight))
prevLayers = deepcopy(layers)
prevLayerLists = []
prevLayerListsRedo = []
brush = p.image.load('brush.png')
brushHover = p.image.load('brushHover.png')
square = p.image.load('square.png')
squareHover = p.image.load('squareHover.png')
brushRect = brush.get_rect()
squareRect = square.get_rect()
brushRect.width, brushRect.height = brushRect.width * uiScale, brushRect.height * uiScale
squareRect.width, squareRect.height = squareRect.width * uiScale, squareRect.height * uiScale
(width, height) = (480, 360)
p.display.set_caption('Tile Editor')
font = p.font.Font('Minecraftia-Regular.ttf', 8)
s = p.display.set_mode((width, height), p.RESIZABLE)
clock = p.time.Clock()
middleClick = False
leftClick = False
leftClickPrev = False
rightClick = False
rightClickDown = False
rightClickPrev = False
mouseOffset = (0, 0)
mousePos = (0, 0)
buttonClick = False
buttonHover = False
sDown = False
squareT = False
sDownStart = False
startPos = (0,0)
def drawBox(width, height, filled):
surf = p.Surface((width, height))
if(filled):
surf.fill((41,48,50))
else:
surf.fill((0,0,0,0))
p.draw.rect(surf, (113,58,41), (0, 0, width, height), 1)
surf.set_at((0, 0), (0,0,0,0))
surf.set_at((width-1, 0), (0,0,0,0))
surf.set_at((0, height-1), (0,0,0,0))
surf.set_at((width-1, height-1), (0,0,0,0))
p.draw.rect(surf, (10,21,27), (1, 1, width-2, height-2), 1)
surf.set_at((1, 1), (88,41,24))
surf.set_at((width-2, 1), (88,41,24))
surf.set_at((1, height-2), (88,41,24))
surf.set_at((width-2, height-2), (88,41,24))
p.draw.lines(surf, (34,30,21), False, ((2, height-3), (2, 2), (width-3, 2)))
p.draw.lines(surf, (86,92,86), False, ((3, height-3), (width-3, height-3), (width-3, 3)))
#p.draw.rect(surf, (225,0,225), (3, 3, width-6, height-6))
return(p.transform.scale(surf, (uiScale * width, uiScale * height)))
def drawButton(textt, x, y):
global buttonClick
buttonClick = False
global buttonHover
buttonHover = False
text = font.render(textt, False, (251,175,113))
width = text.get_width() + 5
height = text.get_height() + 3
if textt[-1] == str(currentLayer):
text = font.render(textt, False, (150,179,174))
if textt == 'Layer Stack' and layerStack:
text = font.render(textt, False, (150,179,174))
if p.Rect(x, y, width * uiScale, height * uiScale).collidepoint(mousePos[0], mousePos[1]):
text = font.render(textt, False, (150,179,174))
buttonHover = True
if leftClick:
y += uiScale
if not leftClickPrev:
buttonClick = True
surf = p.Surface((width, height), p.SRCALPHA)
surf.fill((41,48,50))
surf.blit(text, (3, 1))
p.draw.rect(surf, (113,58,41), (0, 0, width, height), 1)
surf.set_at((0, 0), (0,0,0,0))
surf.set_at((width-1, 0), (0,0,0,0))
surf.set_at((0, height-1), (0,0,0,0))
surf.set_at((width-1, height-1), (0,0,0,0))
p.draw.rect(surf, (10,21,27), (1, 1, width-2, height-2), 1)
surf.set_at((1, 1), (88,41,24))
surf.set_at((width-2, 1), (88,41,24))
surf.set_at((1, height-2), (88,41,24))
surf.set_at((width-2, height-2), (88,41,24))
p.draw.lines(surf, (34,30,21), False, ((2, height-3), (2, 2), (width-3, 2)))
p.draw.lines(surf, (86,92,86), False, ((3, height-3), (width-3, height-3), (width-3, 3)))
s.blit(p.transform.scale(surf, (uiScale * width, uiScale * height)), (x, y))
tiles = []
sheetHeight = 0
sheetWidth = 0
def load_sheet(path):
global tiles
global sheetHeight
global sheetWidth
sheet = p.image.load(path)
if sheet.get_width() >= tileWidth and sheet.get_height() >= tileHeight:
tiles = []
sheetWidth = sheet.get_width()
sheetHeight = sheet.get_height()
for y in range(sheetHeight // tileHeight):
for x in range(sheetWidth // tileWidth):
image = p.Surface((tileWidth, tileHeight), p.SRCALPHA)
image.blit(sheet, (0, 0), (x * tileWidth, y * tileHeight, tileWidth, tileHeight))
tiles.append((image, x * tileWidth, y * tileHeight))
load_sheet(file_path)
while running:
windowResize = False
for event in p.event.get():
if event.type == p.QUIT:
running = False
elif event.type == p.MOUSEMOTION:
mousePos = p.mouse.get_pos()
elif event.type == p.MOUSEBUTTONDOWN:
mousePos = p.mouse.get_pos()
if event.button == 2:
mouseOffset = (mousePos[0] - camX, mousePos[1] - camY);
middleClick = True
elif event.button == 1:
leftClick = True
elif event.button == 3:
rightClick = True
rightClickDown = True
elif event.type == p.MOUSEBUTTONUP:
if event.button == 2:
middleClick = False
elif event.button == 1:
leftClick = False
elif event.button == 3:
rightClick = False
elif event.type == p.MOUSEWHEEL and not middleClick:
scale += event.y
if(scale < 1):
scale = 1
elif event.type == p.VIDEORESIZE:
width = event.w
height = event.h
windowResize = True
elif event.type == p.KEYDOWN:
if event.key == p.K_z and p.key.get_mods() & p.KMOD_CTRL:
if len(prevLayerLists) != 0:
prevLayerListsRedo.append(layers)
layers = prevLayerLists[-1]
del prevLayerLists[-1]
elif event.key == p.K_y and p.key.get_mods() & p.KMOD_CTRL:
if len(prevLayerListsRedo) != 0:
prevLayerLists.append(layers)
layers = prevLayerListsRedo[-1]
del prevLayerListsRedo[-1]
elif event.key == p.K_s:
sDown = True
elif event.type == p.KEYUP:
if event.key == p.K_s:
sDown = False
prevLayers = deepcopy(layers)
if middleClick:
camX, camY = mousePos[0] - mouseOffset[0], mousePos[1] - mouseOffset[1]
x = int(round((mousePos[0] - camX) / (tileWidth * scale)))
y = int(round((mousePos[1] - camY) / (tileHeight * scale)))
layers[0][(y * mapWidth) + x] = hand
if leftClick and not sDownStart:
if(mousePos[0] > (9 * uiScale) and mousePos[0] < (sheetWidth + 9) * uiScale and mousePos[1] > (9 * uiScale) and mousePos[1] < (sheetHeight + 9) * uiScale):
x = int(round((mousePos[0] - (9 * uiScale)) / (tileWidth * uiScale)))
y = int(round((mousePos[1] - (9 * uiScale)) / (tileHeight * uiScale)))
hand = (y * (sheetWidth // (tileWidth))) + x
else:
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
elif rightClick and not sDown:
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
s.fill((41,48,50))
renderList = []
for i in range(0, len(layers)):
if not i == 0:
for x in range(mapWidth):
for y in range(mapHeight):
if (x * tileWidth * scale) + camX > tileWidth * -scale and (x * tileWidth * scale) + camX < width and (y * tileHeight * scale) + camY > tileHeight * -scale and (y * tileHeight * scale) + camY < height:
tile = layers[0][y * mapWidth + x]
if not layerStack:
if i == currentLayer and tile != -1 and not [x,y] in renderList:
renderList.append([x,y])
s.blit(p.transform.scale(tiles[tile][0], (tileWidth * scale, tileHeight * scale)), ((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY))
else:
tile = layers[i][y * mapWidth + x]
if not [x,y] in renderList:
if tile == -1 and i == currentLayer:
if uiScale >= scale:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), 1)
else:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), uiScale)
elif tile != -1:
renderList.append([x,y])
s.blit(p.transform.scale(tiles[tile][0], (tileWidth * scale, tileHeight * scale)), ((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY))
else:
if i == currentLayer and tile != -1:
renderList.append([x,y,tile])
else:
tile = layers[i][y * mapWidth + x]
if tile == -1 and i == currentLayer:
if uiScale >= scale:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), 1)
else:
p.draw.rect(s, (86,92,86), p.Rect((x * tileWidth * scale) + camX, (y * tileHeight * scale) + camY, tileWidth * scale, tileHeight * scale), uiScale)
elif tile != -1:
renderList.append([x,y,tile])
if layerStack:
for i in range(len(renderList)-1, 0, -1):
s.blit(p.transform.scale(tiles[renderList[i][2]][0], (tileWidth * scale, tileHeight * scale)), ((renderList[i][0] * tileWidth * scale) + camX, (renderList[i][1] * tileHeight * scale) + camY))
i = sheetHeight + int(tileHeight * 1.5 + 12)
s.blit(drawBox(sheetWidth + 12, i, True), (3 * uiScale, 3 * uiScale))
drawButton('New Layer', 3 * uiScale, (i + 6) * uiScale)
if buttonClick:
layers.append([-1] * (mapWidth * mapHeight))
currentLayer = len(layers)-1
for layer in range(0, len(layers)-1):
drawButton('Layer ' + str(layer + 1), 3 * uiScale, (i + 26 * (layer + 1)) * uiScale)
if buttonClick:
currentLayer = layer + 1
if buttonHover and rightClickDown and len(layers) > 2:
prevLayerLists.append(deepcopy(layers))
del layers[layer + 1]
if currentLayer > len(layers) - 1:
currentLayer -= 1
prevLayers = layers
for image in tiles:
s.blit(p.transform.scale(image[0], (tileWidth * uiScale, tileHeight * uiScale)), ((image[1] + 9) * uiScale, (image[2] + 9) * uiScale))
s.blit(p.transform.scale(tiles[hand][0], (tileWidth * uiScale, tileHeight * uiScale)), (9 * uiScale, (sheetHeight + tileHeight) * uiScale))
drawButton('Open Tilesheet', (sheetWidth + 18) * uiScale, 3 * uiScale)
if buttonClick:
file_path = tkFileDialog.askopenfilename()
if file_path[-3:] == 'png':
load_sheet(file_path)
drawButton('Layer Stack', (sheetWidth + 18) * uiScale, 23 * uiScale)
if buttonClick:
layerStack = not layerStack
layers[0] = [-1] * (mapWidth * mapHeight)
if not leftClick and leftClickPrev and sDownStart:
sDownStart = False
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = hand
elif leftClick and sDownStart:
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = hand
if not rightClick and rightClickPrev and sDownStart:
sDownStart = False
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[currentLayer][(y * mapWidth) + x] = -1
elif rightClick and sDownStart:
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) + 1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) - 1, -1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
for x in range(startPos[0], int(round((mousePos[0] - camX) / (tileWidth * scale))) - 1, -1):
for y in range(startPos[1], int(round((mousePos[1] - camY) / (tileHeight * scale))) + 1):
if(mousePos[0] > camX and mousePos[0] < camX + ((tileWidth * scale) * mapWidth) and mousePos[1] > camY and mousePos[1] < camY + ((tileHeight * scale) * mapHeight)):
layers[0][(y * mapWidth) + x] = -2
if leftClick and not leftClickPrev or rightClick and not rightClickPrev:
if sDown:
sDownStart = True
startPos = (int(round((mousePos[0] - camX) / (tileWidth * scale))), int(round((mousePos[1] - camY) / (tileHeight * scale))))
if prevLayers != layers:
prevLayerLists.append(deepcopy(prevLayers))
leftClickPrev = leftClick
backDown = False
rightClickDown = False
brushRect.x,brushRect.y = (sheetWidth + 18) * uiScale, 43 * uiScale
if brushRect.collidepoint(mousePos[0], mousePos[1]) or not squareT:
if leftClick and brushRect.collidepoint(mousePos[0], mousePos[1]):
squareT = False
sDown = False
s.blit(p.transform.scale(brushHover, (brushRect.width, brushRect.height)), (brushRect.x, brushRect.y + uiScale))
else:
s.blit(p.transform.scale(brushHover, (brushRect.width, brushRect.height)), brushRect)
else:
s.blit(p.transform.scale(brush, (brushRect.width, brushRect.height)), brushRect)
squareRect.x,squareRect.y = (sheetWidth + 34) * uiScale, 43 * uiScale
if squareRect.collidepoint(mousePos[0], mousePos[1]) or squareT:
if leftClick and squareRect.collidepoint(mousePos[0], mousePos[1]):
squareT = True
s.blit(p.transform.scale(squareHover, (squareRect.width, squareRect.height)), (squareRect.x, squareRect.y + uiScale))
else:
s.blit(p.transform.scale(squareHover, (squareRect.width, squareRect.height)), squareRect)
else:
s.blit(p.transform.scale(square, (squareRect.width, squareRect.height)), squareRect)
if squareT:
sDown = True
rightClickPrev = rightClick
p.display.update()
clock.tick(60)
| 48.794521
| 221
| 0.561623
| 2,660
| 21,372
| 4.495113
| 0.071053
| 0.048925
| 0.059798
| 0.066237
| 0.666221
| 0.585849
| 0.555156
| 0.538597
| 0.522121
| 0.508071
| 0
| 0.042743
| 0.282987
| 21,372
| 437
| 222
| 48.906178
| 0.737536
| 0.002667
| 0
| 0.475138
| 0
| 0
| 0.006522
| 0.001079
| 0
| 0
| 0
| 0
| 0
| 1
| 0.008287
| false
| 0
| 0.01105
| 0
| 0.019337
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5f4c4714755e8b9549c5e4949c349f3b753fe90
| 5,148
|
py
|
Python
|
EditGroupWindow.py
|
TheYargonaut/lucre
|
1abd472993df01b443ab4811379dfe52e18cf790
|
[
"MIT"
] | null | null | null |
EditGroupWindow.py
|
TheYargonaut/lucre
|
1abd472993df01b443ab4811379dfe52e18cf790
|
[
"MIT"
] | null | null | null |
EditGroupWindow.py
|
TheYargonaut/lucre
|
1abd472993df01b443ab4811379dfe52e18cf790
|
[
"MIT"
] | null | null | null |
import tkinter as tk
from tkinter.colorchooser import askcolor
from tkinter import ttk
from Scrollable import Scrollable
from ViewLedgerWidget import ViewLedgerWidget
from List import ListView
from Group import Group
# window for editing a group
prevLens = [ 10, 25, 100 ]
class EditGroupWindow( tk.Toplevel ):
def __init__( self, master, group, ledger, psize, *args, **kwargs ):
tk.Toplevel.__init__( self, master, *args, **kwargs )
self.title( "edit group" )
self.groupBack = group
self.group = Group( **dict( group ) )
self.ledger = ledger
self.psize = psize
self.highlight = self.group.color # "white"
self.ignored = "#E00E00E00" # gray
self.view = None
self.build()
self.matchListCb( self.view )
def matchListCb( self, view ):
'set the highlights when group lists change'
mask = self.group.filter( self.ledger.df.head( len( view ) ) )
for r, m in enumerate( mask ):
view.highlightRow( r, self.highlight if m else self.ignored )
def finalize( self ):
self.groupBack.whitelist = [ r for r in self.group.whitelist if r ]
self.groupBack.blacklist = [ r for r in self.group.blacklist if r ]
self.groupBack.negate = self.group.negate
self.groupBack.title = self.group.title
self.groupBack.color = self.group.color
self.ledger.updateCb( self.ledger.df )
self.destroy()
def whiteListCb( self, idx, txt ):
self.group.whitelist[ idx ] = txt
self.matchListCb( self.view )
def blackListCb( self, idx, txt ):
self.group.blacklist[ idx ] = txt
self.matchListCb( self.view )
def nameCb( self, *args ):
self.group.title = self.nameVar.get()
def expenseCb( self, value ):
self.group.negate = value == 'expense'
def colorCb( self ):
self.group.color = askcolor( self.group.color, parent=self )[ 1 ]
self.highlight = self.group.color
self.color.config( fg=self.group.color )
self.matchListCb( self.view )
def build( self ):
self.grid_rowconfigure( 0, weight=1 )
self.grid_columnconfigure( 0, weight=1 )
mainFrame = ttk.Frame( self )
mainFrame.grid( row=0, column=0, sticky=tk.NSEW )
mainFrame.grid_rowconfigure( 1, weight=1 )
mainFrame.grid_columnconfigure( 0, weight=1 )
listFrame = ttk.Frame( self )
listFrame.grid( row=0, column=1, sticky=tk.NSEW )
listFrame.grid_rowconfigure( 0, weight=1 )
listFrame.grid_rowconfigure( 1, weight=1 )
listFrame.grid_columnconfigure( 0, weight=1 )
whiteFrame = ttk.Frame( listFrame )
whiteFrame.grid( row=0, column=0, sticky=tk.NSEW )
whiteLabel = tk.Label( whiteFrame, text='whitelist' )
whiteLabel.pack( side=tk.TOP, fill=tk.X )
whiteScroll = Scrollable( whiteFrame, vertical=True )
whiteScroll.pack( side=tk.TOP, fill=tk.BOTH )
whiteList = ListView( whiteScroll, self.group.whitelist, '+', self.whiteListCb )
whiteList.pack()
blackFrame = ttk.Frame( listFrame )
blackFrame.grid( row=1, column=0, sticky=tk.NSEW )
blackLabel = tk.Label( blackFrame, text='blacklist' )
blackLabel.pack( side=tk.TOP, fill=tk.X )
blackScroll = Scrollable( blackFrame, vertical=True )
blackScroll.pack( side=tk.TOP, fill=tk.BOTH )
blackList = ListView( blackScroll, self.group.blacklist, '+', self.blackListCb )
blackList.pack()
button = ttk.Frame( self )
button.grid( row=1, column=0, columnspan=2, sticky=tk.W + tk.E )
cancel = ttk.Button( button, text="Cancel", command=self.destroy )
cancel.pack( side=tk.RIGHT )
confirm = ttk.Button( button, text="Confirm", command=self.finalize )
confirm.pack( side=tk.RIGHT )
nameFrame = ttk.Frame( mainFrame )
nameFrame.grid( row=0, column=0, sticky=tk.NSEW )
self.color = tk.Button( nameFrame, text="\u2B1B", command=self.colorCb, width=3 )
self.color.config( fg=self.group.color )
self.color.pack( side=tk.LEFT, fill=tk.NONE, expand=False )
self.nameVar = tk.StringVar( nameFrame )
self.nameVar.set( self.group.title )
self.nameVar.trace( 'w', self.nameCb )
name = ttk.Entry( nameFrame, textvariable=self.nameVar, exportselection=0 )
name.pack( side=tk.LEFT, fill=tk.X, expand=True )
style = ttk.OptionMenu( nameFrame, tk.StringVar( nameFrame ), ( "expense" if self.group.negate else "income" ), "income", "expense", command=self.expenseCb )
style.pack( side=tk.RIGHT, fill=tk.NONE, expand=False )
self.view = ViewLedgerWidget( mainFrame, self.ledger.df, lenCb=self.matchListCb )
self.view.grid( row=1, column=0, sticky=tk.NE + tk.S )
def editGroupCb( master, group, ledger, psize ):
def cb( master=master, group=group, ledger=ledger, psize=psize ):
window = EditGroupWindow( master, group, ledger, psize )
master.wait_window( window )
return cb
| 43.260504
| 165
| 0.633061
| 635
| 5,148
| 5.107087
| 0.228346
| 0.058279
| 0.027752
| 0.035461
| 0.263336
| 0.142769
| 0.107
| 0.046562
| 0
| 0
| 0
| 0.012126
| 0.247086
| 5,148
| 119
| 166
| 43.260504
| 0.824561
| 0.016123
| 0
| 0.078431
| 0
| 0
| 0.026439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107843
| false
| 0
| 0.068627
| 0
| 0.196078
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5f839cc33260b873ad589657cb5b87f8a948df8
| 5,172
|
py
|
Python
|
dialmonkey/nlu/basketball.py
|
alexandergazo/NPFL123
|
c52b6a880abf9fe694ce6a2d775c7db1bd765fba
|
[
"Apache-2.0"
] | null | null | null |
dialmonkey/nlu/basketball.py
|
alexandergazo/NPFL123
|
c52b6a880abf9fe694ce6a2d775c7db1bd765fba
|
[
"Apache-2.0"
] | null | null | null |
dialmonkey/nlu/basketball.py
|
alexandergazo/NPFL123
|
c52b6a880abf9fe694ce6a2d775c7db1bd765fba
|
[
"Apache-2.0"
] | null | null | null |
# Author: Matej Mik
from ..component import Component
from ..da import DAI
import re
def add_team_g(string, attributes):
if 'tym' in string:
if re.search('(muj|moj|meh)[^ ]{0,3} tym', string):
attributes.append('team=default')
else:
team = string.split('tym')[-1].split(' ', 1)[1]
if team.startswith('na '):
team = team[3:]
attributes.append(f'team={team}')
return attributes
def add_team_s(string, attributes):
if 'tym' in string:
if re.search('(vychozi[^ ]{0,2}|(muj|moj|meh)[^ ]{0,3}) tym', string):
attributes.append('default')
team = string.split('tym')[-1].split(' ', 1)[1]
if team.startswith('na '):
team = team[3:]
attributes.append(f'team={team}')
return attributes
def add_type(string, attributes):
if ' hrac' in string:
attributes.append('type=player')
elif ' tym' in string:
attributes.append('type=team')
return attributes
def add_nums(string, attributes):
nums = re.findall('[0-9]+[^ ]?', string)
if len(nums) == 1:
num = nums[0]
if num.endswith('.'):
attributes.append('rank=' + num.rstrip('.'))
else:
attributes.append('value=' + num)
elif any([stem in string for stem in [' nejv', ' nejlepsi']]):
attributes.append('rank=1')
return attributes
def add_time(string, attributes):
if ' dnes' in string:
attributes.append('time=today')
elif ' zitr' in string:
attributes.append('time=tommorow')
else:
time = re.findall('[0-9]{1,2}[. ]{1,2}[0-9]{1,2}[.]?', string)
if len(time) == 1:
attributes.append(f'time={time[0]}')
return attributes
def add_name(string, attributes):
if re.search('(vychozi[^ ]{0,2}|(muj|moj|meh)[^ ]{0,3}) tym', string):
attributes.append('name=default')
else:
names = re.findall(' hrac.*$', string) + re.findall(' tym.*$', string)
if len(names) == 1:
name = names[0].lstrip().split(' ', 1)
if len(name) == 2:
attributes.append(f'name={name[1]}')
return attributes
def add_stat(string, attributes):
if re.search('dv(.{2}bod|oje?k)', string):
attributes.append('stat=2_pt_made')
elif re.search('tr(.{1,2}bod|oje?k)', string):
attributes.append('stat=3_pt_made')
elif any([stem in string for stem in ['trestn', 'sestk', 'sestek']]):
if any([stem in string for stem in ['uspesn', 'procent']]):
attributes.append('stat=ft_percentage')
else:
attributes.append('stat=ft_made')
elif any([stem in string for stem in ['vyher', 'vyhr']]):
attributes.append('stat=wins')
elif any([stem in string for stem in ['strelec', 'strelc', ' bod']]):
attributes.append('stat=points')
return attributes
def to_DAIs(intent, attributes):
items = []
if intent:
if attributes:
for att in attributes:
items.append(DAI.parse(f'{intent}({att})'))
else:
items.append(DAI.parse(f'{intent}()'))
return items
class BasketballNLU(Component):
def __call__(self, dial, logger):
intent= ''
attributes = []
if dial['user'].startswith('kde'):
intent = 'request_game'
attributes.append('place=?')
attributes = add_team_g(dial['user'], attributes)
elif dial['user'].startswith('kdy'):
intent = 'request_game'
attributes.append('time=?')
attributes = add_team_g(dial['user'], attributes)
elif any([stem in dial['user'] for stem in ['zapas', 'utkani']]):
intent = 'request_game'
attributes = add_time(dial['user'], attributes)
elif any([dial['user'].startswith(stem) for stem in ['kolik', 'jaky pocet', 'na jake']]):
intent = 'request_stats'
if any([stem in dial['user'] for stem in ['kolikat', 'mist', 'pozic']]):
attributes.append('rank=?')
else:
attributes.append('value=?')
attributes = add_stat(dial['user'], attributes)
attributes = add_type(dial['user'], attributes)
attributes = add_name(dial['user'], attributes)
elif any([dial['user'].startswith(stem) for stem in ['kter', 'kdo', 'jak']]):
intent = 'request_stats'
attributes.append('name=?')
attributes = add_type(dial['user'], attributes)
attributes = add_nums(dial['user'], attributes)
attributes = add_stat(dial['user'], attributes)
elif any([stem in dial['user'] for stem in ['zmen', 'nastav']]):
intent = 'set'
years = re.findall('[0-9]{4}', dial['user'])
if len(years) == 1:
attributes.append(f'season={years[0]}')
attributes = add_team_s(dial['user'], attributes)
for item in to_DAIs(intent, attributes):
dial['nlu'].append(item)
logger.info('NLU: %s', str(dial['nlu']))
return dial
| 37.478261
| 97
| 0.552204
| 623
| 5,172
| 4.521669
| 0.199037
| 0.147675
| 0.031949
| 0.046858
| 0.506922
| 0.396876
| 0.359957
| 0.351438
| 0.243521
| 0.183884
| 0
| 0.013398
| 0.278422
| 5,172
| 138
| 98
| 37.478261
| 0.741426
| 0.003287
| 0
| 0.300813
| 0
| 0.00813
| 0.164726
| 0.008537
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0.02439
| 0
| 0.178862
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5f954fff242094361f8f329de47188d709c63c7
| 1,447
|
py
|
Python
|
test_SSstache.py
|
jonschull/Lyte
|
e9ba2bb1b07c9398b81a6f591898d2474d1a4609
|
[
"MIT"
] | 1
|
2018-06-07T17:54:27.000Z
|
2018-06-07T17:54:27.000Z
|
test_SSstache.py
|
jonschull/Lyte
|
e9ba2bb1b07c9398b81a6f591898d2474d1a4609
|
[
"MIT"
] | 1
|
2018-06-28T05:08:57.000Z
|
2018-06-28T05:08:57.000Z
|
test_SSstache.py
|
jonschull/Lyte
|
e9ba2bb1b07c9398b81a6f591898d2474d1a4609
|
[
"MIT"
] | null | null | null |
from SSstache import *
from plumbum.path.utils import delete
from plumbum.cmd import ls, touch, mkdir
def test_makeSupportScriptStache():
delete('xyz')
assert makeSupportScriptStache(stacheDir='xyz').endswith('xyz')
assert ls('xyz').split()==['RSrun.2.7.min.js', 'glow.2.7.min.js', 'ide.css', 'jquery-ui.custom.css', 'jquery-ui.custom.min.js', 'jquery.min.js']
delete('xyz')
def test_prepareHTMLdir():
delete('xyz')
prepareHTMLdir('xyz')
assert('xyz' in ls().strip())
delete('xyz')
def test_makeHTMLdir():
HTMLdirName = '123'
delete( HTMLdirName )
fakeSSname = 'fakeSupportScripts'
delete(fakeSSname)
mkdir(fakeSSname)
scriptNames=['xyz.test', 'xyz2.test']
for scriptName in scriptNames:
touch(f'{fakeSSname}/{scriptName}')
makeHTMLdir( HTMLdirName ,
stacheDir = fakeSSname,
GLOWPATH='.',
scriptNames= scriptNames)
assert('supportScripts' in ls( HTMLdirName ).split() )
assert( ls('123/supportScripts').split() == scriptNames )
delete( HTMLdirName )
delete(fakeSSname)
def test_putInHTMLdir():
open('box2.py','w').write('box(color=color.green)')
putInHTMLdir('box2.py')
assert( 'box2.py' in ls('box2').split() )
delete('box2.py')
delete('box2')
#prepareHTMLdir(dirName='xyz')
#test_makeHTMLdir()
| 27.301887
| 148
| 0.608846
| 152
| 1,447
| 5.763158
| 0.361842
| 0.031963
| 0.011416
| 0.015982
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015399
| 0.237042
| 1,447
| 53
| 149
| 27.301887
| 0.77808
| 0.032481
| 0
| 0.222222
| 0
| 0
| 0.197284
| 0.050036
| 0
| 0
| 0
| 0
| 0.166667
| 1
| 0.111111
| false
| 0
| 0.083333
| 0
| 0.194444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5fc2d7fa7991a4448eb7eb0d16d8da0aa0e1f7e
| 173
|
py
|
Python
|
graphic/introductions/graficoNormal.py
|
jonathanccardoso/data-science
|
d5977e5cd26b6a9ad05ef8940841158911a91586
|
[
"MIT"
] | null | null | null |
graphic/introductions/graficoNormal.py
|
jonathanccardoso/data-science
|
d5977e5cd26b6a9ad05ef8940841158911a91586
|
[
"MIT"
] | null | null | null |
graphic/introductions/graficoNormal.py
|
jonathanccardoso/data-science
|
d5977e5cd26b6a9ad05ef8940841158911a91586
|
[
"MIT"
] | null | null | null |
import matplotlib.pyplot as plt
x = [1, 2, 5]
y = [2, 3, 7]
plt.title("1 grafico com python")
# Eixos
plt.xlabel("Eixo X")
plt.ylabel("Eixo Y")
plt.plot(x,y)
plt.show()
| 12.357143
| 33
| 0.630058
| 34
| 173
| 3.205882
| 0.647059
| 0.073395
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048951
| 0.17341
| 173
| 13
| 34
| 13.307692
| 0.713287
| 0.028902
| 0
| 0
| 0
| 0
| 0.192771
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
f5fce2318bd81cf7ddc8f556365d8f472f7cc726
| 18,008
|
py
|
Python
|
darknet.py
|
sugey/pytorch-yolov3
|
cb6b46fd798debca5d8d066eabb2bd2e6c679953
|
[
"MIT"
] | 3
|
2019-10-21T16:05:15.000Z
|
2019-10-25T00:43:17.000Z
|
darknet.py
|
sugey/pytorch-yolov3
|
cb6b46fd798debca5d8d066eabb2bd2e6c679953
|
[
"MIT"
] | null | null | null |
darknet.py
|
sugey/pytorch-yolov3
|
cb6b46fd798debca5d8d066eabb2bd2e6c679953
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
import numpy as np
from model.layers import *
from model.build import *
import cv2
from model.utils import *
def get_test_input():
img = cv2.imread("images/dog-cycle-car.png")
img = cv2.resize(img, (416, 416)) # Resize to the input dimension
# BGR -> RGB | H X W C -> C X H X W
img_ = img[:, :, ::-1].transpose((2, 0, 1))
# Add a channel at 0 (for batch) | Normalise
img_ = img_[np.newaxis, :, :, :]/255.0
img_ = torch.from_numpy(img_).float() # Convert to float
img_ = Variable(img_) # Convert to Variable
return img_
class Darknet(nn.Module):
"""
Main Darknet class. It is a subclass of nn.Module
"""
def __init__(self, cfgfile):
super(Darknet, self).__init__()
# Translate our YOLOv3 CFG file to blocks
self.blocks = parse_cfg(cfgfile)
# Convert those blocks to a module list for Pytorch
self.net_info, self.module_list = create_modules(self.blocks)
# These are for loading the weights below
self.header = torch.IntTensor([0, 0, 0, 0])
self.seen = 0
def get_blocks(self):
"""
Getter function for blocks
Returns:
blocks
"""
return self.blocks
def get_module_list(self):
"""
Getter function for module_list
Returns:
module_list
"""
return self.module_list
# Main forward pass
def forward(self, x, CUDA):
"""
Does the forward pass
Params:
x: The input
CUDA: Use GPU to accelerate task
"""
detections = []
# We don't want the first block, that contains the network info
modules = self.blocks[1:]
# We cache the output feature maps of every layer in a dict outputs.
# The keys are the the indices of the layers, and the values are
# the feature maps. We can then search through the keys to look up
# a layers feature maps for route or shortcuts.
outputs = {}
write = 0
# Go through every module (layer)
for i in range(len(modules)):
# Get the module type value from the current index
module_type = (modules[i]["type"])
if module_type == "convolutional" or module_type == "upsample" or module_type == "maxpool":
# Not 100% sure, but I think because the module list is a
# Pytorch nn.ModuleList(), you can multiply the index of this list,
# that is, the block, by the inputs to this function (x), to get the output.
# I believe this is the matrix multiplication part.
x = self.module_list[i](x)
# Set the key to the index, and set the value to the computed
# calculation of the block and the input
outputs[i] = x
elif module_type == "route":
layers = modules[i]["layers"]
# The two layers designated in the layer get turned into a list with indexes
# of 0 and 1
layers = [int(a) for a in layers]
# Route layers[0] is never greater than 0, so candidate for optimization deletion
if (layers[0]) > 0:
layers[0] = layers[0] - i
# This happens only on the 2 smaller detection laters, i.e. on a 416x416 image,
# the 13x13 and 26x26 detection region levels
if len(layers) == 1:
# Grab the out put from the index plus the first value, usually
# a -4 in this situation. This is what allows a kind of independent route
# for the detection region layers. This will then go back and take the layer
# where the split happen, pull those weights forward past the detection
# layer, and prepare them as a piece of input for the next convolution.
x = outputs[i + (layers[0])]
else:
# These are the two large skip connections, from layers 37 -> 99 and 62 -> 87
if (layers[1]) > 0:
# Reset layer 1 to the difference between the desired layer index
# and the current layer. So, from 37 - 99 = (-62). We then add
# it to the current layer below in map2
layers[1] = layers[1] - i
# map1 is the output of the previous layer (layers[0] is always a
# negative number), here an upsample layer in the YOLO Cfg
map1 = outputs[i + layers[0]]
# map2 is the previous convolution to pull the data from
map2 = outputs[i + layers[1]]
# We're adding together the values of the outputs from the routed layers
# along the depth of the tensor since the param of 1 corresponds to
# the depth dimension. `Cat` method stands for concatenate.
x = torch.cat((map1, map2), 1)
# Set the key to the current module index, and set the dict value to the computed
# calculation of the block x variable
outputs[i] = x
elif module_type == "shortcut":
from_ = int(modules[i]["from"])
# Grab the output from the previous layer, as well as the `from` layer (which
# is always -3) before. This is either a downsampling, upsampling or shortcut
# connection.This simply adds the weights together without the tensor
# concatenation you find in the routings. The is what creates the residual
# blocks throughout the YOLO network
# x = outputs[i-1] + outputs[i+from_]
x = outputs[i-1] + outputs[i+from_]
# Set the key to the current module index, and value to x variable calculation
outputs[i] = x
elif module_type == 'yolo':
# Get the anchor list
anchors = self.module_list[i][0].anchors
# Get the input dimensions
inp_dim = int(self.net_info["height"])
# Get the number of classes
num_classes = int(modules[i]["classes"])
# Output the result
x = x.data
# Run a prediction on a particular region size
x = predict_transform(x, inp_dim, anchors, num_classes, CUDA)
if type(x) == int:
continue
# If write = 0, that means this is the first detection
if not write:
detections = x
write = 1
# Otherise, concatenate the different predictions together along the
# depth of the tensor
else:
detections = torch.cat((detections, x), 1)
# Since this is a detection layer, we still need to pull the weights from the previous layer
# output, so that we can use it as input to the next later
outputs[i] = outputs[i-1]
try:
# After all the modules have been gone through, return the detections tensor, which is a
# combined tensor for all three region size
return detections
except:
return 0
def load_weights(self, weightfile):
"""
Loads the weightfile. It is all 32-bit floats with 5 bytes as headers. There
are only weights for convolution and batch_normalization layers.
Params:
weightfile: link to weightfile
Return:
loads weights
"""
# Open the weights file
fp = open(weightfile, "rb")
# The first 4 values are header information
# 1. Major version number
# 2. Minor Version Number
# 3. Subversion number
# 4. Images seen
header = np.fromfile(fp, dtype=np.int32, count=5)
# Turn the numpy header file into a tensor
self.header = torch.from_numpy(header)
# The total number of images seen
self.seen = self.header[3]
# The rest of the values are the weights, let's load them up
# into a numpy
weights = np.fromfile(fp, dtype=np.float32)
# This variable keeps track of where we are in the weight list
# which is different than the module list
ptr = 0
# Let's go through every item in the module list of this
# instantiated class
for i in range(len(self.module_list)):
# We have to add one to this list because the first block
# is the netinfo block. This is different then the module
# list which took the netinfo block out
module_type = self.blocks[i + 1]["type"]
if module_type == "convolutional":
# Grab the current module
model = self.module_list[i]
try:
# If there is batch normalize on this convolutional layer
# let's grab that
batch_normalize = int(self.blocks[i+1]["batch_normalize"])
except:
batch_normalize = 0
# The first value in the model is the Conv2D module, so, for example
# Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
conv = model[0]
if (batch_normalize):
# The second value in the model is a BatchNorm2d module, so, for example
# BatchNorm2d(32, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
bn = model[1]
# Get the number of weights of Batch Norm Layer
# This is the first value in the module, so 32 in previous example
# PyTorch numel method stands for number of elements, which it returns
num_bn_biases = bn.bias.numel()
# Load the weights. Batch norm layers have a sequences of values stored
# for them in weights file. It goes:
# 1. bn_biases
# 2. bn_weights
# 3. bn_running mean
# 4. bn_running_var
# After those 4 items, then the convolutional weights are added, which
# we see once you exit this conditional loop
# Weight values are a numpy file, so we turn them into a tensor here via torch.
# We grab from the current ptr index, which is the (full file - header),
# and then add the number of biases for first section. We then increment the ptr
# variable so we can continue moving through the chunks of file data.
# First time through on 416, we get weights[0:32], so the first 32 bias values
bn_biases = torch.from_numpy(
weights[ptr:ptr + num_bn_biases])
ptr += num_bn_biases
# Grab the weights next. Following previous example, we get weights[32:64], which
# is the next chunk of 32 float values assigned to the weights for this
# batch norm layer
bn_weights = torch.from_numpy(
weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
# Grab the runing_mean next. Following previous example, we get weights[64:96], which
# is the next chunk of 32 float values assigned to the running_mean for this
# batch norm layer
bn_running_mean = torch.from_numpy(
weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
# Grab the running variance next. Following previous example, we get weights[96:128],
# which is the next chunk of 32 float values assigned to the running_mean for this
# batch norm layer
bn_running_var = torch.from_numpy(
weights[ptr: ptr + num_bn_biases])
ptr += num_bn_biases
# Cast the loaded weights into dims of model weights. This doens't
# seem like it's necessary since all of these are currently in
# the proper tensor format. Under consideration for deletion
# under optimization
bn_biases = bn_biases.view_as(bn.bias.data)
bn_weights = bn_weights.view_as(bn.weight.data)
bn_running_mean = bn_running_mean.view_as(bn.running_mean)
bn_running_var = bn_running_var.view_as(bn.running_var)
# Copy all the tensor data pulled from the files to the
# model BatchNorm2d data (bn) which we can process
bn.bias.data.copy_(bn_biases)
bn.weight.data.copy_(bn_weights)
bn.running_mean.copy_(bn_running_mean)
bn.running_var.copy_(bn_running_var)
else:
# Remember the format for the model is:
# Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
# The only places there are biases in convolution layers are in the
# pre-detection layers where there are 255. Three of them in the CFG.
num_biases = conv.bias.numel()
# Load the biases. Convolution layers have a sequences of values stored
# for them in weights file. It goes:
# 1. conv_biases
# 2. conv_weights
# Since we add the conv_weights outside this loop, we only have to focus
# on preparing the biases here. In 416 example, the first ptr and bias
# values are 56367712, 255, which is what we expect since the first
# detection layer isn't until layer 83 out of 106, far into the CFG
conv_biases = torch.from_numpy(
weights[ptr: ptr + num_biases])
ptr = ptr + num_biases
# reshape the loaded weights according to the dims of the model weights
# Again, tensors in proper shape so candidate for
# optimization deletion
conv_biases = conv_biases.view_as(conv.bias.data)
# Copy all the tensor data pulled from the files to the
# model Conv2d data (conv) which we can process
conv.bias.data.copy_(conv_biases)
# Total the weight slots for the Convolutional layers
# Conv2d(3, 32, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
num_weights = conv.weight.numel()
# Load the weights from the weights file into a tensor
# at the current ptr values plus the rest of chunk necessary
# from the file
conv_weights = torch.from_numpy(weights[ptr:ptr+num_weights])
# reset ptr to where we are in file
ptr = ptr + num_weights
# Reformat the weights tensor into a format that matches
# the model conv placeholder tensor
conv_weights = conv_weights.view_as(conv.weight.data)
# Copy the weights into the conv model
conv.weight.data.copy_(conv_weights)
def save_weights(self, savedfile, cutoff=0):
if cutoff <= 0:
cutoff = len(self.blocks) - 1
fp = open(savedfile, 'wb')
# Attach the header at the top of the file
self.header[3] = self.seen
header = self.header
header = header.numpy()
header.tofile(fp)
# Now, let us save the weights
for i in range(len(self.module_list)):
# We have to add one to this list because the first block
# is the netinfo block. This is different then the module
# list which took the netinfo block out
module_type = self.blocks[i+1]["type"]
if (module_type) == "convolutional":
# Grab the full module
model = self.module_list[i]
try:
# If this is a batch normalize layer
batch_normalize = int(self.blocks[i+1]["batch_normalize"])
except:
batch_normalize = 0
conv = model[0]
if (batch_normalize):
bn = model[1]
# If the parameters are on GPU, convert them back to CPU
# We don't convert the parameter to GPU
# Instead. we copy the parameter and then convert it to CPU
# This is done as weight are need to be saved during training
cpu(bn.bias.data).numpy().tofile(fp)
cpu(bn.weight.data).numpy().tofile(fp)
cpu(bn.running_mean).numpy().tofile(fp)
cpu(bn.running_var).numpy().tofile(fp)
else:
cpu(conv.bias.data).numpy().tofile(fp)
# Let us save the weights for the Convolutional layers
cpu(conv.weight.data).numpy().tofile(fp)
model = Darknet("cfg/yolov3.cfg")
model.load_weights("yolov3.weights")
inp = get_test_input()
pred = model(inp, torch.cuda.is_available())
| 44.907731
| 108
| 0.549034
| 2,285
| 18,008
| 4.253829
| 0.197812
| 0.01749
| 0.010185
| 0.011523
| 0.254012
| 0.223251
| 0.186317
| 0.169444
| 0.146193
| 0.138992
| 0
| 0.02043
| 0.385717
| 18,008
| 400
| 109
| 45.02
| 0.858253
| 0.464127
| 0
| 0.23871
| 0
| 0
| 0.020665
| 0.002583
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045161
| false
| 0
| 0.058065
| 0
| 0.141935
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb03b18815a588a66491abb92833213166f65e34
| 2,271
|
py
|
Python
|
superset/shuju_into_mysql.py
|
LCM1999/superset_secondary_dev
|
293e3df9d46ef6096d35ee7d523ce5c7898902bc
|
[
"Apache-2.0"
] | 1
|
2021-06-29T05:36:30.000Z
|
2021-06-29T05:36:30.000Z
|
superset/shuju_into_mysql.py
|
LCM1999/superset_secondary_dev
|
293e3df9d46ef6096d35ee7d523ce5c7898902bc
|
[
"Apache-2.0"
] | null | null | null |
superset/shuju_into_mysql.py
|
LCM1999/superset_secondary_dev
|
293e3df9d46ef6096d35ee7d523ce5c7898902bc
|
[
"Apache-2.0"
] | null | null | null |
import json
import pymysql
import random
import string
import time
# def get_data():
# with open('E:\\QQ文档\\1420944066\\FileRecv\\Code (2)\\data\\nice looking data\\与gooddata里重复\\20_30(1).json', 'r') as f:
# camera_text = json.load(f) # 解析每一行数据
# print(camera_text)
# return camera_text
# def data_insert(text):
# db = pymysql.connect(host = "localhost",user = "root",password = "lxyroot",database = "superset-test")
# cur = db.cursor()
# try:
# cur.execute("drop table liutu_data")
# cur.execute("create table liutu_data(id int,name char(20),fillcolor char(20),time char(20),size_data TINYTEXT)")
# except:
# cur.execute("create table liutu_data(id int,name char(20),fillcolor char(20),time char(20),size_data TINYTEXT)")
# for i in text:
# for j in range(0,len(text[0]['size'])):
# sql="INSERT INTO liutu_data (id,name,fillcolor,time,size_data) VALUES ('"+str(i['id'])+"','"+i['name']+"','"+i['fillcolor']+"','"+str(j)+"','"+str(i['size'][j])+"');"
# cur.execute(sql)
# db.commit()
# cur.close()
def new_table():
db = pymysql.connect(host = "10.0.2.15",user = "mysqluser",password = "mysqlpw",database = "inventory")
cur = db.cursor()
#cur.execute("drop table refresh_data")
cur.execute("create table refresh_data(id int,name char(20),email char(20),view_data char(30))")
for i in range(0,30):
name = ''.join(random.sample(string.ascii_letters + string.digits, 8))
email = random.choice('abcdefghijklmnopqrstuvwxyz!@#$%^&*()')
view_data = random.random()*100
sql="INSERT INTO refresh_data (id,name,email,view_data) VALUES ("+str(i)+",'"+name+"','"+email+"','"+str(view_data)+"');"
print(sql)
cur.execute(sql)
db.commit()
return cur,db
def data_update(cur,update_num,db):
for i in range(0,update_num):
view_data = random.random()*100
sql = 'update refresh_data set view_data="'+str(view_data)+'" where id='+str(random.randint(1,30))+';'
cur.execute(sql)
db.commit()
if __name__ == "__main__":
cur,db = new_table()
i = 0
while 1==1:
time.sleep(5)
print('one update')
data_update(cur,20,db)
i = i+1
| 37.85
| 180
| 0.607221
| 321
| 2,271
| 4.174455
| 0.317757
| 0.059701
| 0.031343
| 0.047015
| 0.26791
| 0.181343
| 0.128358
| 0.128358
| 0.128358
| 0.128358
| 0
| 0.034884
| 0.204756
| 2,271
| 59
| 181
| 38.491525
| 0.707087
| 0.455306
| 0
| 0.1875
| 0
| 0.03125
| 0.235391
| 0.050206
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.03125
| 0.15625
| 0
| 0.25
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb03b84ad235ef7df8266830a1654259db309611
| 3,290
|
py
|
Python
|
Experiments/create_mean_optimization_sets.py
|
ariel415el/PerceptualLossGLO-Pytorch
|
7caa743b719cd95066103a69f3e78a70507de8b5
|
[
"MIT"
] | null | null | null |
Experiments/create_mean_optimization_sets.py
|
ariel415el/PerceptualLossGLO-Pytorch
|
7caa743b719cd95066103a69f3e78a70507de8b5
|
[
"MIT"
] | null | null | null |
Experiments/create_mean_optimization_sets.py
|
ariel415el/PerceptualLossGLO-Pytorch
|
7caa743b719cd95066103a69f3e78a70507de8b5
|
[
"MIT"
] | null | null | null |
import os
import random
import cv2
import numpy as np
import torch
from Experiments.all import load_models, embedd_data, save_batch
from GenerativeModels.utils.data_utils import get_dataset
device = torch.device("cuda")
def sample_latent_neighbors(outputs_dir, models_dir):
"""Find nearest latent neighbors of data samples and create sets of original/reconstructed similar images """
# Load models
n = 32
train_dataset = get_dataset('ffhq', split='train', resize=128, val_percent=0.15)
encoder, generator = load_models(device, models_dir)
embeddings = embedd_data(train_dataset, encoder, 32, device)
for i in [11, 15, 16, 25, 48, 53, 60, 67, 68, 78, 122]:
os.makedirs(os.path.join(outputs_dir, os.path.basename(models_dir), f"data_neighbors{i}"), exist_ok=True)
dists = torch.norm(embeddings - embeddings[i], dim=1)
neighbor_indices = torch.argsort(dists)[:n]
neighbors = torch.from_numpy(np.array([train_dataset[x][1] for x in neighbor_indices]))
save_batch(neighbors, os.path.join(outputs_dir, os.path.basename(models_dir), f"data_neighbors{i}"))
def center_crop_image_to_square(img, edge_perc=None):
h = img.shape[0]
w = img.shape[1]
if h > w:
e = int(np.ceil((h - w) / 2))
img = img[e:-e]
elif h < w:
e = int(np.ceil((w - h) / 2))
img = img[:, e:-e]
if edge_perc:
z = int(img.shape[0] * edge_perc)
img = img[z:-z, z:-z]
return img
def make_shift_sets(root, edge_size=7, zoom=0.2):
for path in os.listdir(root):
img = cv2.imread(os.path.join(root, path))
img = center_crop_image_to_square(img, zoom)
img = cv2.resize(img, (128+edge_size, 128 + edge_size))
dir_name = os.path.join(root, 'jitters', f"{os.path.splitext(path)[0]}_e-{edge_size}_z-{zoom}")
os.makedirs(dir_name, exist_ok=True)
for i, (x1, y1) in enumerate([(0, 0), (0, edge_size), (edge_size, 0), (edge_size, edge_size)]):
# x1 = np.random.randint(0, edge_size)
# y1 = np.random.randint(0, edge_size)
img2 = img[y1:img.shape[0] - edge_size + y1]
img2 = img2[:, x1:img.shape[1] - edge_size + x1]
img2 = cv2.resize(img2, (128, 128))
x = cv2.imwrite(os.path.join(dir_name, f"{i}.png"), img2)
print(x)
def create_shifted_colorfull_box_images():
im_dim = 128
n_images = 32
box_dim = 32
colors = [[128, 128, 255], [255, 128, 128], [128, 255, 128], [0, 128, 255], [255, 0, 128], [128, 255, 0]]
os.makedirs('color_box_dataset', exist_ok=True)
for i in range(n_images):
x = random.choice(range(0, im_dim - box_dim + 3, 3))
y = random.choice(range(0, im_dim - box_dim + 3, 3))
im = np.ones((im_dim, im_dim, 3)) * 127
im[y:y + box_dim, x:x + box_dim] = colors[i % len(colors)]
cv2.imwrite(f"color_box_dataset/{i}.png", im)
if __name__ == '__main__':
# sample_latent_neighbors("latent_neighbors_sets", 'trained_models/VGG-None_PT')
# sample_latent_neighbors("latent_neighbors_sets", 'trained_models/VGG-random')
make_shift_sets('/home/ariel/university/PerceptualLoss/PerceptualLossExperiments/style_transfer/imgs/textures')
# create_shifted_colorfull_box_images()
| 39.166667
| 115
| 0.643161
| 512
| 3,290
| 3.925781
| 0.292969
| 0.047761
| 0.024876
| 0.016915
| 0.277612
| 0.20597
| 0.144279
| 0.144279
| 0.144279
| 0.088557
| 0
| 0.057396
| 0.210942
| 3,290
| 83
| 116
| 39.638554
| 0.716872
| 0.117021
| 0
| 0
| 0
| 0
| 0.087422
| 0.057706
| 0
| 0
| 0
| 0
| 0
| 1
| 0.067797
| false
| 0
| 0.118644
| 0
| 0.20339
| 0.016949
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb03e3a050ceea7bb9cd25f052a0aa3154068c30
| 1,830
|
py
|
Python
|
run-length-encoding/run_length_encoding.py
|
geekmuse/exercism-python
|
089efc0382147bd48f1e2d68c33ba4cbd58d3dfd
|
[
"MIT"
] | null | null | null |
run-length-encoding/run_length_encoding.py
|
geekmuse/exercism-python
|
089efc0382147bd48f1e2d68c33ba4cbd58d3dfd
|
[
"MIT"
] | null | null | null |
run-length-encoding/run_length_encoding.py
|
geekmuse/exercism-python
|
089efc0382147bd48f1e2d68c33ba4cbd58d3dfd
|
[
"MIT"
] | null | null | null |
def decode(to_be_decoded):
"""
Decodes a run-length encoded string.
:param to_be_decoded: run-length encoded string
:return: run-length decoded string
"""
to_be_decoded_list = list(to_be_decoded)
decoded_str_as_list = list()
num_to_print_as_list = list()
for c in to_be_decoded_list:
if c.isdigit():
num_to_print_as_list.append(c)
else:
if len(num_to_print_as_list) > 0:
num_to_print = int(''.join(num_to_print_as_list))
append = c * num_to_print
decoded_str_as_list.append(append)
num_to_print_as_list = list()
else:
decoded_str_as_list.append(c)
return ''.join(decoded_str_as_list)
def encode(to_be_encoded):
"""
Run-length encodes a string
:param to_be_encoded: string to be run-length encoded
:return: run-length encoded string
"""
last_seen = None
last_seen_count = 0
to_be_encoded_as_list = list(to_be_encoded)
encoded_str_as_list = list()
for c in to_be_encoded_as_list:
if last_seen:
if last_seen == c:
last_seen_count += 1
else:
if last_seen_count > 1:
encoded_str_as_list.append('{}{}'.format(last_seen_count, last_seen))
else:
encoded_str_as_list.append('{}'.format(last_seen))
last_seen_count = 1
else:
last_seen_count += 1
last_seen = c
if last_seen_count > 1:
encoded_str_as_list.append('{}{}'.format(last_seen_count, last_seen))
else:
if last_seen:
encoded_str_as_list.append('{}'.format(last_seen))
else:
encoded_str_as_list = list()
return ''.join(encoded_str_as_list)
| 30
| 89
| 0.595082
| 252
| 1,830
| 3.892857
| 0.146825
| 0.110092
| 0.100917
| 0.114169
| 0.495413
| 0.352701
| 0.32212
| 0.254842
| 0.140673
| 0.140673
| 0
| 0.005591
| 0.315847
| 1,830
| 60
| 90
| 30.5
| 0.777955
| 0.130055
| 0
| 0.5
| 0
| 0
| 0.007767
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0
| 0
| 0.095238
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb0791e28d8a88a76f9e3bcff8a0767061c1499e
| 3,816
|
py
|
Python
|
pytorch/benchmarks/operator_benchmark/pt/conv_test.py
|
raghavnauhria/whatmt
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
[
"MIT"
] | null | null | null |
pytorch/benchmarks/operator_benchmark/pt/conv_test.py
|
raghavnauhria/whatmt
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
[
"MIT"
] | 1
|
2019-07-22T09:48:46.000Z
|
2019-07-22T09:48:46.000Z
|
pytorch/benchmarks/operator_benchmark/pt/conv_test.py
|
raghavnauhria/whatmt
|
c20483a437c82936cb0fb8080925e37b9c4bba87
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import operator_benchmark as op_bench
import torch
import torch.nn as nn
"""
Microbenchmarks for Conv1d and ConvTranspose1d operators.
"""
# Configs for conv-1d ops
conv_1d_configs = op_bench.config_list(
attrs=[
[16, 33, 3, 1, 1, 64],
[16, 33, 3, 2, 16, 128],
],
attr_names=[
"in_c", "out_c", "kernel", "stride", "N", "L"
],
tags=["short"]
)
class Conv1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, L):
self.input = torch.rand(N, in_c, L)
self.conv1d = nn.Conv1d(in_c, out_c, kernel, stride=stride)
self.set_module_name("Conv1d")
def forward(self):
return self.conv1d(self.input)
class ConvTranspose1dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, L):
self.input = torch.rand(N, in_c, L)
self.convtranspose1d = nn.ConvTranspose1d(in_c, out_c, kernel, stride=stride)
self.set_module_name("ConvTranspose1d")
def forward(self):
return self.convtranspose1d(self.input)
op_bench.generate_pt_test(conv_1d_configs, Conv1dBenchmark)
op_bench.generate_pt_test(conv_1d_configs, ConvTranspose1dBenchmark)
"""
Microbenchmarks for Conv2d and ConvTranspose2d operators.
"""
# Configs for Conv2d and ConvTranspose1d
conv_2d_configs = op_bench.config_list(
attrs=[
[16, 33, 3, 1, 1, 32, 32],
[16, 33, 3, 2, 16, 64, 64],
],
attr_names=[
"in_c", "out_c", "kernel", "stride", "N", "H", "W"
],
tags=["short"]
)
class Conv2dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, H, W):
self.input = torch.rand(N, in_c, H, W)
self.conv2d = nn.Conv2d(in_c, out_c, kernel, stride=stride)
self.set_module_name("Conv2d")
def forward(self):
return self.conv2d(self.input)
class ConvTranspose2dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, H, W):
self.input = torch.rand(N, in_c, H, W)
self.convtranspose2d = nn.ConvTranspose2d(in_c, out_c, kernel, stride=stride)
self.set_module_name("ConvTranspose2d")
def forward(self):
return self.convtranspose2d(self.input)
op_bench.generate_pt_test(conv_2d_configs, Conv2dBenchmark)
op_bench.generate_pt_test(conv_2d_configs, ConvTranspose2dBenchmark)
"""
Microbenchmarks for Conv3d and ConvTranspose3d operators.
"""
# Configs for Conv3d and ConvTranspose3d
conv_3d_configs = op_bench.config_list(
attrs=[
[16, 33, 3, 1, 8, 4, 32, 32],
[16, 33, 3, 2, 16, 8, 64, 64],
],
attr_names=[
"in_c", "out_c", "kernel", "stride", "N", "D", "H", "W"
],
tags=["short"]
)
class Conv3dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, D, H, W):
self.input = torch.rand(N, in_c, D, H, W)
self.conv3d = nn.Conv3d(in_c, out_c, kernel, stride=stride)
self.set_module_name("Conv3d")
def forward(self):
return self.conv3d(self.input)
class ConvTranspose3dBenchmark(op_bench.TorchBenchmarkBase):
def init(self, in_c, out_c, kernel, stride, N, D, H, W):
self.input = torch.rand(N, in_c, D, H, W)
self.convtranspose3d = nn.ConvTranspose3d(in_c, out_c, kernel, stride=stride)
self.set_module_name("ConvTranspose3d")
def forward(self):
return self.convtranspose3d(self.input)
op_bench.generate_pt_test(conv_3d_configs, Conv3dBenchmark)
op_bench.generate_pt_test(conv_3d_configs, ConvTranspose3dBenchmark)
if __name__ == "__main__":
op_bench.benchmark_runner.main()
| 27.453237
| 85
| 0.673742
| 532
| 3,816
| 4.588346
| 0.156015
| 0.025809
| 0.03687
| 0.043015
| 0.578042
| 0.504302
| 0.504302
| 0.494469
| 0.399426
| 0.386727
| 0
| 0.040616
| 0.199948
| 3,816
| 138
| 86
| 27.652174
| 0.758926
| 0.026468
| 0
| 0.37931
| 0
| 0
| 0.044937
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.137931
| false
| 0
| 0.08046
| 0.068966
| 0.356322
| 0.011494
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb083967d51239e917a7b39eeaa1d72f732ba81d
| 1,605
|
py
|
Python
|
local_test/course_search/nyuapi/request.py
|
NYUSHer/Widgets
|
b630d01331ca0101778fc7ca44fff7b65412f9ef
|
[
"MIT"
] | 1
|
2018-05-01T06:04:39.000Z
|
2018-05-01T06:04:39.000Z
|
local_test/course_search/nyuapi/request.py
|
NYUSHer/Widgets
|
b630d01331ca0101778fc7ca44fff7b65412f9ef
|
[
"MIT"
] | null | null | null |
local_test/course_search/nyuapi/request.py
|
NYUSHer/Widgets
|
b630d01331ca0101778fc7ca44fff7b65412f9ef
|
[
"MIT"
] | null | null | null |
import requests as R
class reqNYU():
TOKEN = ""
BASEURI = "https://sandbox.api.it.nyu.edu/"
def __init__(self, token=""):
if not token:
raise Exception("[Error] Token can not be empty!")
self.TOKEN = token
self.ping()
def ping(self):
try:
req = R.get("https://sandbox.api.it.nyu.edu/course-catalog-exp/", headers={
"Authorization": "Bearer " + self.TOKEN
}, timeout=10)
except R.exceptions.ReadTimeout:
raise Exception("[Error] NYU API not responding!")
if req.text.find("Invalid or missing token") > -1:
raise Exception("[Error] Token is not valid!")
def rawReq(self, uri="", params={}):
print("A request has been sent.")
try:
req = R.get(self.BASEURI + uri, data=params, headers={
"Authorization": "Bearer " + self.TOKEN
}, timeout=10)
except R.exceptions.ReadTimeout:
raise Exception("[Error] NYU API not responding!")
return req.json()
def repeatReq(self, url="", params={}):
"""
server will send request repeatedly until valid response is received.
However, if token invalid msg keep appearing, the server will halt.
Therefore, a server monitor is needed.
"""
counter = 0
while 1:
response = self.rawReq(url, params)
counter += 1
if isinstance(response, list):
break
if counter > 10:
self.ping()
return response
| 32.1
| 87
| 0.544548
| 180
| 1,605
| 4.833333
| 0.472222
| 0.041379
| 0.087356
| 0.03908
| 0.305747
| 0.305747
| 0.252874
| 0.252874
| 0.252874
| 0.252874
| 0
| 0.009407
| 0.337695
| 1,605
| 49
| 88
| 32.755102
| 0.809031
| 0.109657
| 0
| 0.324324
| 0
| 0
| 0.208063
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108108
| false
| 0
| 0.027027
| 0
| 0.27027
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb0a67e0dac6431fa8a950d7b99db76a91a069c7
| 11,877
|
py
|
Python
|
cnnlstm/preprocessing.py
|
mingjiewong/Kaggle-M5-Forecasting-Accuracy-2020
|
6467a08640990f2d07e517adf7bacd566fb442c4
|
[
"MIT"
] | null | null | null |
cnnlstm/preprocessing.py
|
mingjiewong/Kaggle-M5-Forecasting-Accuracy-2020
|
6467a08640990f2d07e517adf7bacd566fb442c4
|
[
"MIT"
] | null | null | null |
cnnlstm/preprocessing.py
|
mingjiewong/Kaggle-M5-Forecasting-Accuracy-2020
|
6467a08640990f2d07e517adf7bacd566fb442c4
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import os
from sklearn.preprocessing import MinMaxScaler
from data_processing.helpers import Config
class Load:
def __init__(self,train_sales='',calendar=''):
"""
Read CSV files for daily sales and calendar input data respectively.
Args:
train_sales (str): file path for daily sales input data
calendar (str): file path for calendar input data
Attributes:
train_sales (dataframe): daily sales input data
calendar (dataframe): calendar input data
float_cols (arr): list of daily sales with dtype "float64"
int_cols (arr): list of daily sales with dtype "int32" or "int64"
"""
self.train_sales = pd.read_csv(train_sales)
self.calendar = pd.read_csv(calendar)
self.float_cols = [c for c in self.train_sales if self.train_sales[c].dtype == "float64"]
self.int_cols = [c for c in self.train_sales if self.train_sales[c].dtype in ["int64","int32"]]
def downcast_dtypes(self):
"""
Downcast daily sales input data to reduce memory usage.
Attributes:
train_sales (dataframe): downcasted daily sales input data
Returns:
dataframe: downcasted daily sales input data
"""
self.train_sales[self.float_cols] = self.train_sales[self.float_cols].astype(np.float32)
self.train_sales[self.int_cols] = self.train_sales[self.int_cols].astype(np.int16)
return self.train_sales
class Preprocess:
# Preprocess: remove id, item_id, dept_id, cat_id, store_id, state_id columns
def __init__(self,loaded_train_sales,loaded_calendar,startDay=350):
"""
Load preprocessing parameters.
Args:
loaded_train_sales (dataframe): daily sales input data
loaded_calendar (dataframe): calendar input data
startDay (int): start day
Attributes:
loaded_train_sales (dataframe): daily sales input data
calendar (dataframe): calendar input data
daysBeforeEvent1 (dataframe): input daily data of festive events
daysBeforeEvent2 (dataframe): input daily data of sporting events
snap_CA (dataframe): input daily data of SNAP program in California
snap_TX (dataframe): input daily data of SNAP program in Texas
snap_WI (dataframe): input daily data of SNAP program in Wisconsin
"""
# Remove the first 350 days in train sales data due to zero_inflated data
self.loaded_train_sales = loaded_train_sales.T[6 + startDay:]
self.calendar = loaded_calendar
# Initialize a dataframe with zeros for 1969 days in the calendar
self.daysBeforeEvent1 = pd.DataFrame(np.zeros((1969,1)))
self.daysBeforeEvent2 = pd.DataFrame(np.zeros((1969,1)))
self.snap_CA = pd.DataFrame(np.zeros((1969,1)))
self.snap_TX = pd.DataFrame(np.zeros((1969,1)))
self.snap_WI = pd.DataFrame(np.zeros((1969,1)))
def label_calendar(self):
"""
Label days with festive or sporting events, SNAP programs in California, Texas or Wisconsin.
Attributes:
daysBeforeEvent1 (dataframe): input daily data of festive events
daysBeforeEvent2 (dataframe): input daily data of sporting events
snap_CA (dataframe): input daily data of SNAP program in California
snap_TX (dataframe): input daily data of SNAP program in Texas
snap_WI (dataframe): input daily data of SNAP program in Wisconsin
Returns:
dataframe: input daily data of festive events
dataframe: input daily data of sporting events
dataframe: input daily data of SNAP program in California
dataframe: input daily data of SNAP program in Texas
dataframe: input daily data of SNAP program in Wisconsin
"""
for x,y in self.calendar.iterrows():
if((pd.isnull(self.calendar["event_name_1"][x])) == False):
self.daysBeforeEvent1[0][x-1] = 1
if((pd.isnull(self.calendar["event_name_2"][x])) == False):
self.daysBeforeEvent2[0][x-1] = 1
if((pd.isnull(self.calendar["snap_CA"][x])) == False):
self.snap_CA[0][x] = 1
if((pd.isnull(self.calendar["snap_TX"][x])) == False):
self.snap_TX[0][x] = 1
if((pd.isnull(self.calendar["snap_WI"][x])) == False):
self.snap_WI[0][x] = 1
return self.daysBeforeEvent1, self.daysBeforeEvent2, self.snap_CA, self.snap_TX, self.snap_WI
class SplitDataset:
# split dataset into evaluation (last 2 weeks), validation (first 2 weeks), training
def __init__(self, loaded_train_sales,
daysBeforeEvent1, daysBeforeEvent2,
snap_CA, snap_TX, snap_WI, startDay=350):
"""
Generate training (startDay to day 1941), evaluation (day 1941 to 1969) and validation (day 1913 to 1941) datasets.
Args:
load_train_sales (dataframe): daily sales input data
daysBeforeEvent1 (dataframe): input daily data of festive events
daysBeforeEvent2 (dataframe): input daily data of sporting events
snap_CA (dataframe): input daily data of SNAP program in California
snap_TX (dataframe): input daily data of SNAP program in Texas
snap_WI (dataframe): input daily data of SNAP program in Wisconsin
startDay (int): start day
Attributes:
load_train_sales (dataframe): daily sales input data
daysBeforeEvent1_train (dataframe): input daily data of festive events (training)
daysBeforeEvent2_train (dataframe): input daily data of sporting events (training)
snap_CA_train (dataframe): input daily data of SNAP program in California (training)
snap_TX_train (dataframe): input daily data of SNAP program in Texas (training)
snap_WI_train (dataframe): input daily data of SNAP program in Wisconsin (training)
daysBeforeEvent1_eval (dataframe): input daily data of festive events (evaluation)
daysBeforeEvent2_eval (dataframe): input daily data of sporting events (evaluation)
snap_CA_eval (dataframe): input daily data of SNAP program in California (evaluation)
snap_TX_eval (dataframe): input daily data of SNAP program in Texas (evaluation)
snap_WI_eval (dataframe): input daily data of SNAP program in Wisconsin (evaluation)
daysBeforeEvent1_valid (dataframe): input daily data of festive events (validation)
daysBeforeEvent2_valid (dataframe): input daily data of sporting events (validation)
snap_CA_valid (dataframe): input daily data of SNAP program in California (validation)
snap_TX_valid (dataframe): input daily data of SNAP program in Texas (validation)
snap_WI_valid (dataframe): input daily data of SNAP program in Wisconsin (validation)
"""
# Remove the first 350 days in train sales data due to zero_inflated data
self.loaded_train_sales = loaded_train_sales
# input for predicting validation period day 1941 to 1969
self.daysBeforeEvent1_eval = daysBeforeEvent1[1941:]
self.daysBeforeEvent2_eval = daysBeforeEvent2[1941:]
self.snap_CA_eval = snap_CA[1941:]
self.snap_TX_eval = snap_TX[1941:]
self.snap_WI_eval = snap_WI[1941:]
# input for predicting validation period day 1913 to 1941
self.daysBeforeEvent1_valid = daysBeforeEvent1[1913:1941]
self.daysBeforeEvent2_valid = daysBeforeEvent2[1913:1941]
self.snap_CA_valid = snap_CA[1913:1941]
self.snap_TX_valid = snap_TX[1913:1941]
self.snap_WI_valid = snap_WI[1913:1941]
# input for training as a feature
self.daysBeforeEvent1_train = daysBeforeEvent1[startDay:1941]
self.daysBeforeEvent2_train = daysBeforeEvent2[startDay:1941]
self.snap_CA_train = snap_CA[startDay:1941]
self.snap_TX_train = snap_TX[startDay:1941]
self.snap_WI_train = snap_WI[startDay:1941]
def concatenate(self):
"""
Generate a daily sales input data with the presence of events and SNAP program at day level.
Attributes:
concat_train_sales (dataframe): input daily data of sales, presence of events and SNAP program
Returns:
dataframe: input daily data of sales, presence of events and SNAP program
"""
#Before concatanation with our main data "dt", indexes are made same and column name is changed to "oneDayBeforeEvent"
self.daysBeforeEvent1_train.columns = ["oneDayBeforeEvent1"]
self.daysBeforeEvent1_train.index = self.loaded_train_sales.index
self.daysBeforeEvent2_train.columns = ["oneDayBeforeEvent2"]
self.daysBeforeEvent2_train.index = self.loaded_train_sales.index
self.snap_CA_train.columns = ["snap_CA"]
self.snap_CA_train.index = self.loaded_train_sales.index
self.snap_TX_train.columns = ["snap_TX"]
self.snap_TX_train.index = self.loaded_train_sales.index
self.snap_WI_train.columns = ["snap_WI"]
self.snap_WI_train.index = self.loaded_train_sales.index
self.concat_train_sales = pd.concat([self.loaded_train_sales, self.daysBeforeEvent1_train,
self.daysBeforeEvent2_train, self.snap_CA_train,
self.snap_TX_train, self.snap_WI_train], axis = 1, sort=False)
return self.concat_train_sales
class ScalingTrainSales:
def __init__(self,concat_train_sales,feature_range=(0,1),startDay=350, config_path=''):
"""
Load parameters for scaling features in input data.
Args:
concat_train_sales (dataframe): input daily data of sales, presence of events and SNAP program
feature_range ((int, int)): the scaling range
startDay (int): start day
config_path (str): file path for config.yaml
Attributes:
concat_train_sales (dataframe): input daily data of sales, presence of events and SNAP program
feature_range ((int, int)): the scaling range
X_train (arr): training inputs
y_train (arr): test inputs
startDay (int): start day
config (dict): parameter configurations from config.yaml
timesteps (int): number of timesteps
"""
self.concat_train_sales = concat_train_sales
self.feature_range = feature_range
self.X_train = []
self.y_train = []
self.startDay = startDay
self.config = Config(config_path)
self.timesteps = self.config.timesteps
def gen_train_data(self):
"""
Generate training dataset using Min-Max scaler.
Attributes:
X_train (arr): training inputs with dimensions
[n_timeseries, n_timesteps, n_features]
y_train (arr): test inputs with dimensions
[n_timeseries, n_pred_products]
Returns:
arr: training inputs with dimensions
[n_timeseries, n_timesteps, n_features]
arr: test inputs with dimensions
[n_timeseries, n_pred_products]
obj: scaler
"""
sc = MinMaxScaler(feature_range=self.feature_range)
train_sales_scaled = sc.fit_transform(self.concat_train_sales)
for i in range(self.timesteps, 1941 - self.startDay):
self.X_train.append(train_sales_scaled[i-self.timesteps:i])
self.y_train.append(train_sales_scaled[i][0:30490])
#Convert to np array to be able to feed the LSTM model
self.X_train = np.array(self.X_train)
self.y_train = np.array(self.y_train)
return self.X_train, self.y_train, sc
| 46.214008
| 126
| 0.667088
| 1,519
| 11,877
| 5.044108
| 0.127716
| 0.058731
| 0.096711
| 0.117071
| 0.543461
| 0.513573
| 0.445837
| 0.396372
| 0.359175
| 0.23858
| 0
| 0.027834
| 0.255873
| 11,877
| 256
| 127
| 46.394531
| 0.839104
| 0.494233
| 0
| 0
| 0
| 0
| 0.023333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.055556
| 0
| 0.233333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb0ac6a6f7fdd1cf17fa0a0d491c03fde96fdfc1
| 331
|
py
|
Python
|
Physics250-ME3738/timeIntervalBlinks.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME3738/timeIntervalBlinks.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
Physics250-ME3738/timeIntervalBlinks.py
|
illusion173/Physics250
|
69f2ffdb8af013e8b0739779861c1455b579ddaf
|
[
"MIT"
] | null | null | null |
import math
speedofLight = 2.9979*pow(10,8)
def timeIntervalBlinks():
time = float(input('Input Time (sec): '))
speed = float(input('Speed: '))
speed = speed * pow(10,8)
gamma = math.sqrt(1/(1-pow((speed/speedofLight),2)))
answer = gamma * time
print(answer)
timeIntervalBlinks()
| 18.388889
| 56
| 0.592145
| 40
| 331
| 4.9
| 0.5
| 0.132653
| 0.061224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056452
| 0.250755
| 331
| 17
| 57
| 19.470588
| 0.733871
| 0
| 0
| 0
| 0
| 0
| 0.075529
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb10c1e56faa83018c15d8d04331071eb6bc524c
| 786
|
py
|
Python
|
PythonTest/Aula18A.py
|
MatthewsTomts/Python_Class
|
f326d521d62c45a4fcb429d2a22cf2ab958492cb
|
[
"MIT"
] | null | null | null |
PythonTest/Aula18A.py
|
MatthewsTomts/Python_Class
|
f326d521d62c45a4fcb429d2a22cf2ab958492cb
|
[
"MIT"
] | null | null | null |
PythonTest/Aula18A.py
|
MatthewsTomts/Python_Class
|
f326d521d62c45a4fcb429d2a22cf2ab958492cb
|
[
"MIT"
] | null | null | null |
teste = list()
teste.append('Matheus')
teste.append(17)
galera = [teste[:]] # Cria uma copia de teste dentro de galera
teste[0] = 'Oliver'
teste[1] = 22
galera.append(teste) # Cria um vínculo entre teste e galera
print(galera)
pessoas = [['Harvey', 23], ['Madeleine', 19], ['Roger', 250], ['Mark', 20]]
print(pessoas[0][0]) # Mostra o primeiro valor da primeira lista desta lista
for p in pessoas:
print(f'{p[0]} tem {p[1]} anos de idade.')
dados = []
pes = []
for i in range(0, 3):
print('-='*10)
dados.append(input('Nome: '))
dados.append(int(input('Idade: ')))
pes.append(dados[:])
dados.clear() # Excluí os valores dentro de dados
for p in pes:
print(f'{p[0]} é maior de idade.' if p[1] > 20 else f'{p[0]} é menor de idade.')
# Exercício 84 -89
| 27.103448
| 84
| 0.624682
| 129
| 786
| 3.806202
| 0.503876
| 0.01222
| 0.01833
| 0.032587
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050235
| 0.189567
| 786
| 28
| 85
| 28.071429
| 0.720565
| 0.231552
| 0
| 0
| 0
| 0
| 0.220736
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.227273
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb17d457b2e3da5e9c6ce129bda974e0910d6212
| 1,967
|
py
|
Python
|
tencentcloud/cat/v20180409/errorcodes.py
|
HS-Gray/tencentcloud-sdk-python
|
b28b19c4beebc9f361aa3221afa36ad1ee047ccc
|
[
"Apache-2.0"
] | 37
|
2017-10-12T01:50:42.000Z
|
2022-02-24T02:44:45.000Z
|
tencentcloud/cat/v20180409/errorcodes.py
|
HS-Gray/tencentcloud-sdk-python
|
b28b19c4beebc9f361aa3221afa36ad1ee047ccc
|
[
"Apache-2.0"
] | null | null | null |
tencentcloud/cat/v20180409/errorcodes.py
|
HS-Gray/tencentcloud-sdk-python
|
b28b19c4beebc9f361aa3221afa36ad1ee047ccc
|
[
"Apache-2.0"
] | 12
|
2018-07-31T10:04:56.000Z
|
2022-02-07T00:08:06.000Z
|
# -*- coding: utf8 -*-
# Copyright (c) 2017-2021 THL A29 Limited, a Tencent company. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# 操作失败。
FAILEDOPERATION = 'FailedOperation'
# 数据库查询错误。
FAILEDOPERATION_DBQUERYFAILED = 'FailedOperation.DbQueryFailed'
# 数据库创建失败。
FAILEDOPERATION_DBRECORDCREATEFAILED = 'FailedOperation.DbRecordCreateFailed'
# 数据库更新失败。
FAILEDOPERATION_DBRECORDUPDATEFAILED = 'FailedOperation.DbRecordUpdateFailed'
# ES查询错误。
FAILEDOPERATION_ESQUERYERROR = 'FailedOperation.ESQueryError'
# 无有效节点。
FAILEDOPERATION_NOVALIDNODES = 'FailedOperation.NoValidNodes'
# 账单欠费。
FAILEDOPERATION_ORDEROUTOFCREDIT = 'FailedOperation.OrderOutOfCredit'
# 资源不存在。
FAILEDOPERATION_RESOURCENOTFOUND = 'FailedOperation.ResourceNotFound'
# 任务未运行。
FAILEDOPERATION_TASKNOTRUNNING = 'FailedOperation.TaskNotRunning'
# 任务未暂停。
FAILEDOPERATION_TASKNOTSUSPENDED = 'FailedOperation.TaskNotSuspended'
# 任务状态不允许当前操作。
FAILEDOPERATION_TASKOPERATIONNOTALLOW = 'FailedOperation.TaskOperationNotAllow'
# 批量拨测任务的类型不相同。
FAILEDOPERATION_TASKTYPENOTSAME = 'FailedOperation.TaskTypeNotSame'
# 试用任务量超时。
FAILEDOPERATION_TRIALTASKEXCEED = 'FailedOperation.TrialTaskExceed'
# 内部错误。
INTERNALERROR = 'InternalError'
# 参数错误。
INVALIDPARAMETER = 'InvalidParameter'
# 参数取值错误。
INVALIDPARAMETERVALUE = 'InvalidParameterValue'
# 缺少参数错误。
MISSINGPARAMETER = 'MissingParameter'
# 资源不存在。
RESOURCENOTFOUND = 'ResourceNotFound'
# 未知参数错误。
UNKNOWNPARAMETER = 'UnknownParameter'
| 26.945205
| 82
| 0.804779
| 182
| 1,967
| 8.631868
| 0.598901
| 0.038192
| 0.01655
| 0.020369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008616
| 0.114896
| 1,967
| 72
| 83
| 27.319444
| 0.893739
| 0.394509
| 0
| 0
| 0
| 0
| 0.427831
| 0.348315
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb1aab5b6a3a998c629d8d9ed3c85dc9531c3cbf
| 6,248
|
py
|
Python
|
py2.5/processing/reduction.py
|
geofft/multiprocess
|
d998ffea9e82d17662b12b94a236182e7fde46d5
|
[
"BSD-3-Clause"
] | 356
|
2015-06-21T21:05:10.000Z
|
2022-03-30T11:57:08.000Z
|
py2.5/processing/reduction.py
|
geofft/multiprocess
|
d998ffea9e82d17662b12b94a236182e7fde46d5
|
[
"BSD-3-Clause"
] | 103
|
2015-06-22T01:44:14.000Z
|
2022-03-01T03:44:25.000Z
|
py2.5/processing/reduction.py
|
geofft/multiprocess
|
d998ffea9e82d17662b12b94a236182e7fde46d5
|
[
"BSD-3-Clause"
] | 72
|
2015-09-02T14:10:24.000Z
|
2022-03-25T06:49:43.000Z
|
#
# Module to support the pickling of different types of connection
# objects and file objects so that they can be transferred between
# different processes.
#
# processing/reduction.py
#
# Copyright (c) 2006-2008, R Oudkerk --- see COPYING.txt
#
__all__ = []
import os
import sys
import socket
import threading
import copy_reg
import processing
from processing import _processing
from processing.logger import debug, subDebug, subWarning
from processing.forking import thisThreadIsSpawning
from processing.process import _registerAfterFork
#
#
#
connections_are_picklable = (
sys.platform == 'win32' or hasattr(_processing, 'recvFd')
)
try:
fromfd = socket.fromfd
except AttributeError:
def fromfd(fd, family, type, proto=0):
s = socket._socket.socket()
_processing.changeFd(s, fd, family, type, proto)
return s
#
# Platform specific definitions
#
if sys.platform == 'win32':
import _subprocess
from processing._processing import win32
closeHandle = win32.CloseHandle
def duplicateHandle(handle):
return _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle,
_subprocess.GetCurrentProcess(),
0, False, _subprocess.DUPLICATE_SAME_ACCESS
).Detach()
def sendHandle(conn, handle, destination_pid):
process_handle = win32.OpenProcess(
win32.PROCESS_ALL_ACCESS, False, destination_pid
)
try:
new_handle = _subprocess.DuplicateHandle(
_subprocess.GetCurrentProcess(), handle,
process_handle, 0, False, _subprocess.DUPLICATE_SAME_ACCESS
)
conn.send(new_handle.Detach())
finally:
win32.CloseHandle(process_handle)
def recvHandle(conn):
return conn.recv()
def isInheritableHandle(handle):
return (win32.GetHandleInformation(handle) & win32.HANDLE_FLAG_INHERIT)
else:
closeHandle = os.close
duplicateHandle = os.dup
def sendHandle(conn, handle, destination_pid):
_processing.sendFd(conn.fileno(), handle)
def recvHandle(conn):
return _processing.recvFd(conn.fileno())
def isInheritableHandle(handle):
return True
#
# Support for a per-process server thread which caches pickled handles
#
_cache = set()
def _reset(obj):
global _lock, _listener, _cache
for h in _cache:
closeHandle(h)
_cache.clear()
_lock = threading.Lock()
_listener = None
_reset(None)
_registerAfterFork(_reset, _reset)
def _getListener():
global _listener
if _listener is None:
_lock.acquire()
try:
if _listener is None:
from processing.connection import Listener
debug('starting listener and thread for sending handles')
_listener = Listener(authenticate=True)
t = threading.Thread(target=_serve)
t.setDaemon(True)
t.start()
finally:
_lock.release()
return _listener
def _serve():
while 1:
try:
conn = _listener.accept()
handle_wanted, destination_pid = conn.recv()
_cache.remove(handle_wanted)
sendHandle(conn, handle_wanted, destination_pid)
closeHandle(handle_wanted)
conn.close()
except (SystemExit, KeyboardInterrupt):
raise
except:
if not processing.currentProcess()._exiting:
import traceback
subWarning(
'thread for sharing handles raised exception :\n' +
'-'*79 + '\n' + traceback.format_exc() + '-'*79
)
#
# Functions to be used for pickling/unpickling objects with handles
#
def reduceHandle(handle):
if thisThreadIsSpawning() and isInheritableHandle(handle):
return (None, handle, True)
dup_handle = duplicateHandle(handle)
_cache.add(dup_handle)
subDebug('reducing handle %d', handle)
return (_getListener().address, dup_handle, False)
def rebuildHandle(pickled_data):
from processing.connection import Client
address, handle, inherited = pickled_data
if inherited:
return handle
subDebug('rebuilding handle %d', handle)
conn = Client(address, authenticate=True)
conn.send((handle, os.getpid()))
new_handle = recvHandle(conn)
conn.close()
return new_handle
#
# Register `_processing.Connection` with `copy_reg`
#
def reduceConnection(conn):
return rebuildConnection, (reduceHandle(conn.fileno()),)
def rebuildConnection(reduced_handle):
fd = rebuildHandle(reduced_handle)
return _processing.Connection(fd, duplicate=False)
copy_reg.pickle(_processing.Connection, reduceConnection)
#
# Register `socket.socket` with `copy_reg`
#
def reduceSocket(s):
try:
Family, Type, Proto = s.family, s.type, s.proto
except AttributeError:
# have to guess family, type, proto
address = s.getsockname()
Family = type(address) is str and socket.AF_UNIX or socket.AF_INET
Type = s.getsockopt(socket.SOL_SOCKET, socket.SO_TYPE)
Proto = 0
reduced_handle = reduceHandle(s.fileno())
return rebuildSocket, (reduced_handle, Family, Type, Proto)
def rebuildSocket(reduced_handle, family, type, proto):
fd = rebuildHandle(reduced_handle)
_sock = fromfd(fd, family, type, proto)
closeHandle(fd)
return socket.socket(_sock=_sock)
copy_reg.pickle(socket.socket, reduceSocket)
#
# Register `_processing.PipeConnection` with `copy_reg`
#
if sys.platform == 'win32':
def reducePipeConnection(conn):
return rebuildPipeConnection, (reduceHandle(conn.fileno()),)
def rebuildPipeConnection(reduced_handle):
handle = rebuildHandle(reduced_handle)
return _processing.PipeConnection(handle, duplicate=False)
copy_reg.pickle(_processing.PipeConnection, reducePipeConnection)
| 28.52968
| 80
| 0.639725
| 623
| 6,248
| 6.242376
| 0.303371
| 0.020571
| 0.026999
| 0.013114
| 0.15531
| 0.077141
| 0
| 0
| 0
| 0
| 0
| 0.008177
| 0.275768
| 6,248
| 218
| 81
| 28.66055
| 0.851271
| 0.091549
| 0
| 0.171233
| 0
| 0
| 0.029119
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130137
| false
| 0
| 0.10274
| 0.047945
| 0.349315
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb1afd11fd2f6d89e9d5a3d5e84072981f86d593
| 570
|
py
|
Python
|
data-structures/print-the-elements-of-a-linked-list-in-reverse.py
|
gajubadge11/HackerRank-1
|
7b136ccaa1ed47ae737467ace6b494c720ccb942
|
[
"MIT"
] | 340
|
2018-06-17T19:45:56.000Z
|
2022-03-22T02:26:15.000Z
|
data-structures/print-the-elements-of-a-linked-list-in-reverse.py
|
gajubadge11/HackerRank-1
|
7b136ccaa1ed47ae737467ace6b494c720ccb942
|
[
"MIT"
] | 3
|
2021-02-02T17:17:29.000Z
|
2021-05-18T10:06:04.000Z
|
data-structures/print-the-elements-of-a-linked-list-in-reverse.py
|
gajubadge11/HackerRank-1
|
7b136ccaa1ed47ae737467ace6b494c720ccb942
|
[
"MIT"
] | 229
|
2019-04-20T08:28:49.000Z
|
2022-03-31T04:23:52.000Z
|
"""
Print elements of a linked list in reverse order as standard output
head could be None as well for empty list
Node is defined as
class Node(object):
def __init__(self, data=None, next_node=None):
self.data = data
self.next = next_node
"""
def ReversePrint(head):
if head is None:
return
else:
out = []
node = head
while node != None:
out.append(node.data)
node = node.next
print("\n".join(map(str, out[::-1])))
| 16.285714
| 68
| 0.522807
| 72
| 570
| 4.055556
| 0.569444
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002841
| 0.382456
| 570
| 34
| 69
| 16.764706
| 0.826705
| 0.438596
| 0
| 0
| 0
| 0
| 0.006667
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0
| 0
| 0.2
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb1bfe5091ca2f0f84f38e9d762348c024630c00
| 9,088
|
py
|
Python
|
cfd/cfd_rel_perms.py
|
lanetszb/vofpnm
|
520544db894fb13e44a86e989bd17b4690e996d3
|
[
"MIT"
] | null | null | null |
cfd/cfd_rel_perms.py
|
lanetszb/vofpnm
|
520544db894fb13e44a86e989bd17b4690e996d3
|
[
"MIT"
] | null | null | null |
cfd/cfd_rel_perms.py
|
lanetszb/vofpnm
|
520544db894fb13e44a86e989bd17b4690e996d3
|
[
"MIT"
] | null | null | null |
# MIT License
#
# Copyright (c) 2020 Aleksandr Zhuravlyov and Zakhar Lanets
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import os
import numpy as np
import json
import pandas as pd
import copy
import matplotlib.pyplot as plt
import time as tm
from matplotlib import rc
current_path = os.path.dirname(os.path.abspath(__file__))
sys.path.append(os.path.join(current_path, '../../'))
from netgrid import save_files_collection_to_file
from matplotlib.ticker import FormatStrFormatter
from vofpnm.cfd.ini_class import Ini
from vofpnm.cfd.cfd_class import Cfd
from vofpnm.helpers import plot_rel_perms, plot_conesrvation_check, plot_viscs_vels, plot_av_sat, \
plot_capillary_pressure_curve, plot_capillary_pressures
# rc('text', usetex=True)
# plt.rcParams["font.family"] = "Times New Roman"
start_time = tm.time()
ini = Ini(config_file=sys.argv[1])
cfd = Cfd(ini)
visc_0 = ini.paramsPnm['visc_0']
visc_1 = ini.paramsPnm['visc_1']
ini.throats_viscs = np.tile(visc_0, ini.netgrid.throats_N)
cfd.run_pnm()
throats_volumes = cfd.ini.throats_volumes
# ### validation with openFoam ###
test_case_vofpnm = dict()
times_alpha_avs = dict()
times_u_mgn_avs = dict()
times_F_avs = dict()
times_F_avs_new = dict()
times_V_in = dict()
thrs_velocities_to_output = dict()
thrs_alphas_to_output = dict()
nus = {'1': visc_0, '2': visc_1}
rhos = {'1': ini.paramsPnm['b_dens_fluid1'], '2': ini.paramsPnm['b_dens_fluid1']}
test_case_vofpnm['mus'] = nus
test_case_vofpnm['rhos'] = rhos
test_case_vofpnm['sigma'] = ini.ift
# ### validation with openfoam one-phase ###
throats_vels = np.absolute(np.array(list(cfd.ini.throats_velocities.values())))
u_mgn_av = np.sum((throats_volumes * throats_vels)) / np.sum(throats_volumes)
test_case_vofpnm['ref_u_mgn'] = u_mgn_av
print('ref_u_mgn', u_mgn_av)
throats_widths = np.absolute(np.array(list(cfd.ini.throats_widths.values())))
av_width = np.sum((throats_volumes * throats_widths)) / np.sum(throats_volumes)
test_case_vofpnm['width'] = av_width
ini.flow_0_ref = cfd.calc_rel_flow_rate()
print('flow_0_ref', ini.flow_0_ref)
visc_1 = ini.paramsPnm['visc_1']
ini.throats_viscs = np.tile(visc_1, ini.netgrid.throats_N)
cfd.run_pnm()
ini.flow_1_ref = cfd.calc_rel_flow_rate()
cfd.calc_coupling_params()
cfd.run_pnm()
rel_perms_0 = []
rel_perms_1 = []
capillary_numbers = []
capillary_pressures = []
av_sats = []
throats_volumes = cfd.ini.throats_volumes
throats_av_sats = cfd.ini.equation.throats_av_sats
dens_0 = cfd.ini.paramsPnm['dens_0']
mass_already_in = copy.deepcopy(np.sum(throats_volumes * throats_av_sats * dens_0))
mass_rates_in = []
mass_rates_out = []
masses_inside = []
times = []
viscs = []
vol_rates_in = []
vol_rates_out = []
#################
# Paraview output
#################
os.system('rm -r inOut/*.vtu')
os.system('rm -r inOut/*.pvd')
sats_dict = dict()
file_name = 'inOut/collection.pvd'
files_names = list()
files_descriptions = list()
cells_arrays = cfd.process_paraview_data()
cfd.ini.netgrid.cells_arrays = cells_arrays
files_names.append(str(0) + '.vtu')
files_descriptions.append(str(0))
cfd.ini.netgrid.save_cells('inOut/' + files_names[-1])
save_files_collection_to_file(file_name, files_names, files_descriptions)
#################
time = [0]
time_steps = []
cour_number = np.empty([])
time_curr = 0
time_step_curr = 0
time_output_freq = cfd.ini.time_period / 500.
round_output_time = int(ini.round_output_time)
output_time_step = ini.output_time_step
time_bound = output_time_step
is_output_step = False
is_last_step = False
out_idx = int(0)
while True:
if cfd.ini.time_step_type == 'const':
cfd.ini.time_step = cfd.ini.const_time_step
elif cfd.ini.time_step_type == 'flow_variable':
cfd.ini.time_step = cfd.ini.local.calc_flow_variable_time_step(
cfd.ini.throats_velocities)
elif cfd.ini.time_step_type == 'div_variable':
cfd.ini.time_step = cfd.ini.local.calc_div_variable_time_step(
cfd.ini.equation.sats[cfd.ini.equation.i_curr], cfd.ini.throats_velocities)
time_step_curr = cfd.ini.time_step
if time_curr + time_step_curr >= time_bound:
time_step_curr = time_bound - time_curr
time_bound += output_time_step
is_output_step = True
if time_curr + time_step_curr >= cfd.ini.time_period:
is_last_step = True
if not is_output_step:
time_step_curr = cfd.ini.time_period - time_curr
time_steps.append(time_step_curr)
time_curr += time_step_curr
cfd.ini.equation.cfd_procedure_one_step(cfd.ini.throats_velocities, time_step_curr)
cfd.calc_coupling_params()
mass_inside = copy.deepcopy(np.sum(throats_volumes * throats_av_sats * dens_0))
masses_inside.append(mass_inside)
vol_rate_in, vol_rate_out, vol_rate_in_0, vol_rate_out_1 = cfd.calc_flow_rates(mass_rates_in,
mass_rates_out)
vol_rates_out.append(vol_rate_out_1)
cfd.calc_rel_perms(rel_perms_0, rel_perms_1, capillary_numbers, capillary_pressures,
av_sats, ini.flow_0_ref, ini.flow_1_ref, vol_rate_in_0)
print('time_step: ', round(time_step_curr, round_output_time))
time.append(time_curr)
cfd.ini.equation.print_cour_numbers(cfd.ini.throats_velocities, cfd.ini.time_step)
print(' percentage executed:', round((time_curr / cfd.ini.time_period * 100.), 2), '%.', '\n')
cfd.run_pnm()
cells_arrays = cfd.process_paraview_data()
if is_output_step:
cfd.ini.netgrid.cells_arrays = cells_arrays
files_names.append(str(round(time_curr, round_output_time)) + '.vtu')
files_descriptions.append(str(round(time_curr, round_output_time)))
cfd.ini.netgrid.save_cells('inOut/' + files_names[-1])
save_files_collection_to_file(file_name, files_names, files_descriptions)
out_idx += 1
is_output_step = False
####### validation with openfoam #######
throats_vels = np.absolute(np.array(list(cfd.ini.throats_velocities.values())))
u_mgn_av = np.sum(throats_volumes * throats_vels) / np.sum(throats_volumes)
alpha_av = np.sum(throats_volumes * throats_av_sats) / np.sum(throats_volumes)
F_av = np.sum(throats_volumes * throats_vels * throats_av_sats) / np.sum(
throats_volumes * throats_vels)
times_u_mgn_avs[str(round(time_curr, round_output_time))] = u_mgn_av
times_alpha_avs[str(round(time_curr, round_output_time))] = alpha_av
times_F_avs[str(round(time_curr, round_output_time))] = F_av
times_F_avs_new[str(round(time_curr, round_output_time))] = (
vol_rate_out - vol_rate_out_1) / vol_rate_out
times_V_in[str(round(time_curr, round_output_time))] = vol_rate_in
####### validation with openfoam #######
print(str(round(time_curr, round_output_time)), time_curr)
throats_vels = np.absolute(np.array(list(cfd.ini.throats_velocities.values())))
throats_viscs = cfd.ini.throats_viscs
visc = np.sum(cfd.ini.throats_volumes * throats_viscs) / np.sum(cfd.ini.throats_volumes)
times.append(time_curr)
viscs.append(visc)
vol_rates_in.append(vol_rate_in)
if is_last_step:
break
execution_time = tm.time() - start_time
print("--- %s seconds ---" % execution_time)
#############
# Rel perms validation output
#############
test_case_vofpnm['times_alpha_avs'] = times_alpha_avs
test_case_vofpnm['times_u_mgn_avs'] = times_u_mgn_avs
test_case_vofpnm['times_F_avs'] = times_F_avs
test_case_vofpnm['times_F_avs_new'] = times_F_avs_new
test_case_vofpnm['execution_time'] = execution_time
test_case_vofpnm['time_step'] = cfd.ini.output_time_step
test_case_vofpnm['grid_volume'] = cfd.ini.grid_volume
test_case_vofpnm['total_volume'] = np.sum(throats_volumes)
test_case_vofpnm['times_V_in'] = times_V_in
json_file_u_mgns = 'inOut/validation/tmp.json'
with open(json_file_u_mgns, 'w') as f:
json.dump(test_case_vofpnm, f, sort_keys=False, indent=4 * ' ', ensure_ascii=False)
| 36.943089
| 125
| 0.725682
| 1,396
| 9,088
| 4.378224
| 0.197708
| 0.04123
| 0.036649
| 0.040412
| 0.450589
| 0.376309
| 0.298429
| 0.229385
| 0.172611
| 0.148069
| 0
| 0.00744
| 0.15702
| 9,088
| 245
| 126
| 37.093878
| 0.790367
| 0.145467
| 0
| 0.135294
| 0
| 0
| 0.052749
| 0.00328
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.082353
| 0
| 0.082353
| 0.041176
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb1e990c875a84c89463cedf50afc813143a16f2
| 1,330
|
py
|
Python
|
GUI/WifiMonitor/UDP/Utils/gpio_mapping.py
|
gchinellato/XD
|
f6c0134030c5e229a7b9c2621311c5204aed77af
|
[
"MIT"
] | 1
|
2019-10-15T20:31:39.000Z
|
2019-10-15T20:31:39.000Z
|
GUI/WifiMonitor/Utils/gpio_mapping.py
|
gchinellato/XD
|
f6c0134030c5e229a7b9c2621311c5204aed77af
|
[
"MIT"
] | null | null | null |
GUI/WifiMonitor/Utils/gpio_mapping.py
|
gchinellato/XD
|
f6c0134030c5e229a7b9c2621311c5204aed77af
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python
"""
*************************************************
* @Project: Self Balance
* @Description: GPIO Mapping
* @Owner: Guilherme Chinellato
* @Email: guilhermechinellato@gmail.com
*************************************************
"""
"""
#
#Arduino GPIO
#
4x encoder (INT0-D2, INT1-D3, D4, D7)
4x motor enable (D5, D6, D11, D12)
2x PWM (D9, D10)
2x I2C (SCL-A5, SDA-A4)
"""
'''
Deprecated (replaced to Arduino)
#
#Motors GPIOs
#
#Motor A & B PWM outputs (BCM pinout)
MA_PWM_GPIO = 19
MB_PWM_GPIO = 26
#Motor A & B enable outputs (BCM pinout)
MA_CLOCKWISE_GPIO = 5
MA_ANTICLOCKWISE_GPIO = 6
MB_CLOCKWISE_GPIO = 20
MB_ANTICLOCKWISE_GPIO = 21
#
#Encoders GPIOs
#
#Enconders 1 & 2 for each motor (BCM pinout)
MA_ENCODER_1 = 12
MA_ENCODER_2 = 13
MB_ENCODER_1 = 7
MB_ENCODER_2 = 8
'''
#
#PanTilt GPIOs
#
#MicroServo Vertical and Horizontal outputs (BCM pinout)
SERVO_V_GPIO = 18
SERVO_H_GPIO = 23
'''Servo mapping for servoblaster:
0 on P1-7 GPIO-4
1 on P1-11 GPIO-17
*2 on P1-12 GPIO-18*
3 on P1-13 GPIO-27
4 on P1-15 GPIO-22
*5 on P1-16 GPIO-23*
6 on P1-18 GPIO-24
7 on P1-22 GPIO-25'''
#Servo pins
SERVO_H = '2' #pin 12 BCM 18
SERVO_V = '5' #pin 16 BCM 23
| 18.472222
| 69
| 0.566165
| 198
| 1,330
| 3.671717
| 0.484848
| 0.044017
| 0.066025
| 0.049519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.105584
| 0.259399
| 1,330
| 71
| 70
| 18.732394
| 0.632487
| 0.315038
| 0
| 0
| 0
| 0
| 0.02439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb212bcaed139e5c9db595186ee8e16677921512
| 8,088
|
py
|
Python
|
mmdet/utils/memory.py
|
Youth-Got/mmdetection
|
2e0a02599804da6e07650dde37b9df538e15d646
|
[
"Apache-2.0"
] | 1
|
2021-12-10T15:08:22.000Z
|
2021-12-10T15:08:22.000Z
|
mmdet/utils/memory.py
|
q3394101/mmdetection
|
ca11860f4f3c3ca2ce8340e2686eeaec05b29111
|
[
"Apache-2.0"
] | null | null | null |
mmdet/utils/memory.py
|
q3394101/mmdetection
|
ca11860f4f3c3ca2ce8340e2686eeaec05b29111
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) OpenMMLab. All rights reserved.
import warnings
from collections import abc
from contextlib import contextmanager
from functools import wraps
import torch
from mmdet.utils import get_root_logger
def cast_tensor_type(inputs, src_type=None, dst_type=None):
"""Recursively convert Tensor in inputs from ``src_type`` to ``dst_type``.
Args:
inputs: Inputs that to be casted.
src_type (torch.dtype | torch.device): Source type.
src_type (torch.dtype | torch.device): Destination type.
Returns:
The same type with inputs, but all contained Tensors have been cast.
"""
assert dst_type is not None
if isinstance(inputs, torch.Tensor):
if isinstance(dst_type, torch.device):
# convert Tensor to dst_device
if hasattr(inputs, 'to') and \
hasattr(inputs, 'device') and \
(inputs.device == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
else:
# convert Tensor to dst_dtype
if hasattr(inputs, 'to') and \
hasattr(inputs, 'dtype') and \
(inputs.dtype == src_type or src_type is None):
return inputs.to(dst_type)
else:
return inputs
# we need to ensure that the type of inputs to be casted are the same
# as the argument `src_type`.
elif isinstance(inputs, abc.Mapping):
return type(inputs)({
k: cast_tensor_type(v, src_type=src_type, dst_type=dst_type)
for k, v in inputs.items()
})
elif isinstance(inputs, abc.Iterable):
return type(inputs)(
cast_tensor_type(item, src_type=src_type, dst_type=dst_type)
for item in inputs)
# TODO: Currently not supported
# elif isinstance(inputs, InstanceData):
# for key, value in inputs.items():
# inputs[key] = cast_tensor_type(
# value, src_type=src_type, dst_type=dst_type)
# return inputs
else:
return inputs
@contextmanager
def _ignore_torch_cuda_oom():
"""A context which ignores CUDA OOM exception from pytorch.
Code is modified from
<https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py> # noqa: E501
"""
try:
yield
except RuntimeError as e:
# NOTE: the string may change?
if 'CUDA out of memory. ' in str(e):
pass
else:
raise
class AvoidOOM:
"""Try to convert inputs to FP16 and CPU if got a PyTorch's CUDA Out of
Memory error. It will do the following steps:
1. First retry after calling `torch.cuda.empty_cache()`.
2. If that still fails, it will then retry by converting inputs
to FP16.
3. If that still fails trying to convert inputs to CPUs.
In this case, it expects the function to dispatch to
CPU implementation.
Args:
to_cpu (bool): Whether to convert outputs to CPU if get an OOM
error. This will slow down the code significantly.
Defaults to True.
test (bool): Skip `_ignore_torch_cuda_oom` operate that can use
lightweight data in unit test, only used in
test unit. Defaults to False.
Examples:
>>> from mmdet.utils.memory import AvoidOOM
>>> AvoidCUDAOOM = AvoidOOM()
>>> output = AvoidOOM.retry_if_cuda_oom(
>>> some_torch_function)(input1, input2)
>>> # To use as a decorator
>>> # from mmdet.utils import AvoidCUDAOOM
>>> @AvoidCUDAOOM.retry_if_cuda_oom
>>> def function(*args, **kwargs):
>>> return None
```
Note:
1. The output may be on CPU even if inputs are on GPU. Processing
on CPU will slow down the code significantly.
2. When converting inputs to CPU, it will only look at each argument
and check if it has `.device` and `.to` for conversion. Nested
structures of tensors are not supported.
3. Since the function might be called more than once, it has to be
stateless.
"""
def __init__(self, to_cpu=True, test=False):
self.to_cpu = to_cpu
self.test = test
def retry_if_cuda_oom(self, func):
"""Makes a function retry itself after encountering pytorch's CUDA OOM
error.
The implementation logic is referred to
https://github.com/facebookresearch/detectron2/blob/main/detectron2/utils/memory.py
Args:
func: a stateless callable that takes tensor-like objects
as arguments.
Returns:
func: a callable which retries `func` if OOM is encountered.
""" # noqa: W605
@wraps(func)
def wrapped(*args, **kwargs):
# raw function
if not self.test:
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# Clear cache and retry
torch.cuda.empty_cache()
with _ignore_torch_cuda_oom():
return func(*args, **kwargs)
# get the type and device of first tensor
dtype, device = None, None
values = args + tuple(kwargs.values())
for value in values:
if isinstance(value, torch.Tensor):
dtype = value.dtype
device = value.device
break
if dtype is None or device is None:
raise ValueError('There is no tensor in the inputs, '
'cannot get dtype and device.')
# Convert to FP16
fp16_args = cast_tensor_type(args, dst_type=torch.half)
fp16_kwargs = cast_tensor_type(kwargs, dst_type=torch.half)
logger = get_root_logger()
logger.warning(f'Attempting to copy inputs of {str(func)} '
'to FP16 due to CUDA OOM')
# get input tensor type, the output type will same as
# the first parameter type.
with _ignore_torch_cuda_oom():
output = func(*fp16_args, **fp16_kwargs)
output = cast_tensor_type(
output, src_type=torch.half, dst_type=dtype)
if not self.test:
return output
logger.warning('Using FP16 still meet CUDA OOM')
# Try on CPU. This will slow down the code significantly,
# therefore print a notice.
if self.to_cpu:
logger.warning(f'Attempting to copy inputs of {str(func)} '
'to CPU due to CUDA OOM')
cpu_device = torch.empty(0).device
cpu_args = cast_tensor_type(args, dst_type=cpu_device)
cpu_kwargs = cast_tensor_type(kwargs, dst_type=cpu_device)
# convert outputs to GPU
with _ignore_torch_cuda_oom():
logger.warning(f'Convert outputs to GPU (device={device})')
output = func(*cpu_args, **cpu_kwargs)
output = cast_tensor_type(
output, src_type=cpu_device, dst_type=device)
return output
warnings.warn('Cannot convert output to GPU due to CUDA OOM, '
'the output is now on CPU, which might cause '
'errors if the output need to interact with GPU '
'data in subsequent operations')
logger.warning('Cannot convert output to GPU due to '
'CUDA OOM, the output is on CPU now.')
return func(*cpu_args, **cpu_kwargs)
else:
# may still get CUDA OOM error
return func(*args, **kwargs)
return wrapped
# To use AvoidOOM as a decorator
AvoidCUDAOOM = AvoidOOM()
| 37.794393
| 103
| 0.574679
| 994
| 8,088
| 4.554326
| 0.246479
| 0.027833
| 0.030926
| 0.023857
| 0.249172
| 0.231942
| 0.212503
| 0.154628
| 0.131213
| 0.098962
| 0
| 0.00706
| 0.352003
| 8,088
| 213
| 104
| 37.971831
| 0.856707
| 0.391568
| 0
| 0.277228
| 0
| 0
| 0.114811
| 0
| 0
| 0
| 0
| 0.004695
| 0.009901
| 1
| 0.049505
| false
| 0.009901
| 0.059406
| 0
| 0.257426
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb213849d6f5cbf00a64871c3293e7fb777f9ff4
| 2,278
|
py
|
Python
|
game.py
|
YeonjuKim05/Kim_Y_RPS_Fall2020
|
031bfeec09f663686ae2c9418185ab5070af3b7a
|
[
"MIT"
] | null | null | null |
game.py
|
YeonjuKim05/Kim_Y_RPS_Fall2020
|
031bfeec09f663686ae2c9418185ab5070af3b7a
|
[
"MIT"
] | 1
|
2020-11-28T16:29:28.000Z
|
2020-11-28T16:29:28.000Z
|
game.py
|
YeonjuKim05/Kim_Y_RPS_Fall2020
|
031bfeec09f663686ae2c9418185ab5070af3b7a
|
[
"MIT"
] | null | null | null |
# import packages to extend python (just like we extend sublime, or Atom, or VSCode)
from random import randint
from gameComponents import gameVars, chooseWinner
while gameVars.player is False:
print("=======================*/ RPS CONTEST /*=======================")
print("Computer Lives: ", gameVars.ai_lives, "/", gameVars.total_lives)
print("Player Lives: ", gameVars.player_lives, "/", gameVars.total_lives)
print("==============================================")
print("Choose your weapon! or type quit to leave\n")
gameVars.player = input("Choose rock, paper or scissors: \n")
# if the player chose to quit then exit the game
if gameVars.player == "quit":
print("You chose to quit")
exit()
#player = True -> it has a value (rock, paper, or scissors)
# this will be the AI choice -> a random pick from the choices array
computer = gameVars.choices[randint(0, 2)]
# check to see what the user input
# print outputs whatever is in the round brackets -> in this case it outputs player to the command prompt window
print("user chose: " + gameVars.player)
# validate that the random choice worked for the AI
print("AI chose: " + computer)
#--------------------------- MOVE THIS CHUNK OF CODE TO A PACKAGE - START HERE --------------------
if (computer == gameVars.player):
print("tie")
# always check for negative conditions first (the losing case)
elif (computer == "rock"):
if (gameVars.player == "scissors"):
print("you lose")
gameVars.player_lives -= 1
else:
print("you win!")
gameVars.ai_lives -= 1
elif (computer == "paper"):
if (gameVars.player == "rock"):
print("you lose")
gameVars.player_lives -= 1
else:
print("you win!")
gameVars.ai_lives -= 1
elif (computer == "scissors"):
if (gameVars.player == "paper"):
print("you lose")
gameVars.player_lives -= 1
else:
print("you win!")
gameVars.ai_lives -= 1
#--------------------------- stop here - all of the above needs to move -----------------------
if gameVars.player_lives is 0:
chooseWinner.winorlose("lost")
if gameVars.ai_lives is 0:
chooseWinner.winorlose("won")
print("Player has", gameVars.player_lives, "lives left")
print("AI has", gameVars.ai_lives, "lives left")
gameVars.player = False
| 26.183908
| 113
| 0.6295
| 299
| 2,278
| 4.749164
| 0.347826
| 0.147887
| 0.06338
| 0.042254
| 0.230282
| 0.15
| 0.15
| 0.15
| 0.15
| 0.15
| 0
| 0.005328
| 0.176032
| 2,278
| 86
| 114
| 26.488372
| 0.751199
| 0.307726
| 0
| 0.333333
| 0
| 0
| 0.248721
| 0.061381
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.044444
| 0
| 0.044444
| 0.377778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb21b87b5bc6c350c9c4db10e19ca1430b1bd7c2
| 1,227
|
py
|
Python
|
dataset/utils.py
|
tarun-bisht/mlpipe
|
0cd1f0b57a7788222228dc08f0c8a21ed51a7cc1
|
[
"MIT"
] | null | null | null |
dataset/utils.py
|
tarun-bisht/mlpipe
|
0cd1f0b57a7788222228dc08f0c8a21ed51a7cc1
|
[
"MIT"
] | null | null | null |
dataset/utils.py
|
tarun-bisht/mlpipe
|
0cd1f0b57a7788222228dc08f0c8a21ed51a7cc1
|
[
"MIT"
] | null | null | null |
import pandas as pd
import os
def df_from_image_dirs(directory, image_format="jpg",
relative_path=False, verbose=0):
dataframe_dict = {
"images":[],
"classes":[]
}
num_dirs = 0
num_images = 0
images_per_classes = []
classes = []
for dirs in os.listdir(directory):
dir_path = os.path.join(directory,dirs)
if os.path.isdir(dir_path):
files = [f for f in os.listdir(dir_path) if f.split(".")[1]==image_format]
num = len(files)
if relative_path:
dataframe_dict["images"] = dataframe_dict["images"]+[os.path.join(dir_path,f) for f in files]
else:
dataframe_dict["images"] = dataframe_dict["images"]+files
dataframe_dict["classes"] = dataframe_dict["classes"]+[dirs]*num
num_images+=num
images_per_classes.append(num)
classes.append(dirs)
num_dirs+=1
if verbose:
print("number of directories(classes)= ",num_dirs)
print("total number of images= ",num_images)
for clss, imgs in zip(classes, images_per_classes):
print(f"{clss} : {imgs}")
return pd.DataFrame.from_dict(dataframe_dict)
| 36.088235
| 109
| 0.597392
| 156
| 1,227
| 4.49359
| 0.301282
| 0.148359
| 0.135521
| 0.019971
| 0.108417
| 0.108417
| 0
| 0
| 0
| 0
| 0
| 0.00565
| 0.278729
| 1,227
| 34
| 110
| 36.088235
| 0.786441
| 0
| 0
| 0
| 0
| 0
| 0.102606
| 0.017101
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.0625
| 0
| 0.125
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb2259b4263e5697783bf6849627924369449a0f
| 1,222
|
py
|
Python
|
THreading.py
|
asd86826/OpticalFlow_Test
|
f4d621994871b4913b95a18f59cb171526d786ae
|
[
"MIT"
] | null | null | null |
THreading.py
|
asd86826/OpticalFlow_Test
|
f4d621994871b4913b95a18f59cb171526d786ae
|
[
"MIT"
] | null | null | null |
THreading.py
|
asd86826/OpticalFlow_Test
|
f4d621994871b4913b95a18f59cb171526d786ae
|
[
"MIT"
] | null | null | null |
import time
from threading import Timer
i = 0
class RepeatedTimer(object):
def __init__(self, interval, function, *args, **kwargs):
self._timer = None
self.interval = interval
self.function = function
self.args = args
self.kwargs = kwargs
self.is_running = False
self.start() #if you dont want auto start, delte that
def _run(self):
self.is_running = False
self.start()
self.function(*self.args, **self.kwargs)
def start(self):
if not self.is_running:
self._timer = Timer(self.interval, self._run)
self._timer.start()
self.is_running = True
def stop(self):
self._timer.cancel()
self.is_running = False
def timeTest():
global i
i = i+1
print ("Hello %d!" % i)
if __name__ == "__main__":
print("Starting...")
rt = RepeatedTimer(0.05, timeTest) # it auto start ,so dont need rt.start()
try:
ST = time.time()
time.sleep(5)
except Exception as e:
raise e
finally:
rt.stop()
print(time.time() - ST)
| 24.44
| 85
| 0.531097
| 143
| 1,222
| 4.377622
| 0.405594
| 0.047923
| 0.103834
| 0.086262
| 0.086262
| 0.086262
| 0
| 0
| 0
| 0
| 0
| 0.007722
| 0.364157
| 1,222
| 49
| 86
| 24.938776
| 0.797941
| 0.06383
| 0
| 0.128205
| 0
| 0
| 0.025618
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128205
| false
| 0
| 0.051282
| 0
| 0.205128
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb266bf3b2f0517ce3d9501b3cfc011f8ded2d3e
| 3,817
|
bzl
|
Python
|
defs.bzl
|
attilaolah/bazel-tools
|
823216936ee93ab6884c6111a8e60e9a836fa7cc
|
[
"Apache-2.0"
] | 2
|
2021-09-02T18:59:09.000Z
|
2021-09-20T23:13:17.000Z
|
defs.bzl
|
attilaolah/bazel-tools
|
823216936ee93ab6884c6111a8e60e9a836fa7cc
|
[
"Apache-2.0"
] | null | null | null |
defs.bzl
|
attilaolah/bazel-tools
|
823216936ee93ab6884c6111a8e60e9a836fa7cc
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load("@bazel_skylib//lib:shell.bzl", "shell")
def _json_extract_impl(ctx):
flags = list(ctx.attr.flags)
if ctx.attr.raw:
flags += ["-r"]
outputs = []
for src in ctx.files.srcs:
parts = [ctx.executable._jq.path] + flags
parts += [shell.quote(ctx.attr.query), shell.quote(src.path)]
basename, _, _ = src.basename.rpartition(".json")
output = ctx.actions.declare_file(basename + ctx.attr.suffix)
outputs.append(output)
parts += [">", shell.quote(output.path), "\n"]
cmd = " ".join([part for part in parts if part])
# Using run() would be much nicer, but jq insts on writing to stdout.
ctx.actions.run_shell(
inputs = [src],
outputs = [output],
progress_message = "Executing jq for {}".format(src.short_path),
tools = [ctx.executable._jq],
command = cmd,
)
return [DefaultInfo(
runfiles = ctx.runfiles(files = outputs),
)]
json_extract = rule(
implementation = _json_extract_impl,
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = [".json"],
doc = "List of inputs. Must all be valid JSON files.",
),
"suffix": attr.string(
default = "",
doc = ("Output file extensions. Each input file will be renamed " +
"from basename.json to basename+suffix."),
),
"raw": attr.bool(
default = False,
doc = ("Whether or not to pass -r to jq. Passing -r will result " +
"in raw data being extracted, i.e. non-JSQN output."),
),
"query": attr.string(
default = ".",
doc = ("Query to pass to the jq binary. The default is '.', " +
"meaning just copy the validated input."),
),
"flags": attr.string_list(
allow_empty = True,
doc = "List of flags to pass to the jq binary.",
),
"_jq": attr.label(
executable = True,
cfg = "host",
default = Label("@jq"),
),
},
)
def _json_test_impl(ctx):
inputs = [f.path for f in ctx.files.srcs]
parts = [ctx.executable._jq.short_path, "."] + inputs
parts += [">", "/dev/null"] # silence jq, only show errors
cmd = " ".join([part for part in parts if part])
# Write the file that will be executed by 'bazel test'.
ctx.actions.write(
output = ctx.outputs.test,
content = cmd,
)
return [DefaultInfo(
executable = ctx.outputs.test,
runfiles = ctx.runfiles(files = [
ctx.executable._jq,
] + ctx.files.srcs),
)]
json_test = rule(
implementation = _json_test_impl,
attrs = {
"srcs": attr.label_list(
mandatory = True,
allow_files = [".json"],
doc = ("List of inputs. The test will verify that they are " +
"valid JSON files."),
),
"_jq": attr.label(
executable = True,
cfg = "host",
default = Label("@jq"),
),
},
outputs = {"test": "%{name}.sh"},
test = True,
)
| 31.545455
| 79
| 0.556196
| 454
| 3,817
| 4.601322
| 0.389868
| 0.028722
| 0.028722
| 0.015318
| 0.189564
| 0.189564
| 0.171374
| 0.171374
| 0.138822
| 0.109143
| 0
| 0.003079
| 0.319361
| 3,817
| 120
| 80
| 31.808333
| 0.801001
| 0.183128
| 0
| 0.32967
| 0
| 0
| 0.189939
| 0.009029
| 0
| 0
| 0
| 0
| 0
| 1
| 0.021978
| false
| 0.032967
| 0
| 0
| 0.043956
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb289039ceb1e6cb9ff0bbb176aa1f763781e163
| 692
|
py
|
Python
|
tests/test_instrumentation/test_base.py
|
cloudchacho/hedwig-python
|
1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b
|
[
"Apache-2.0"
] | null | null | null |
tests/test_instrumentation/test_base.py
|
cloudchacho/hedwig-python
|
1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b
|
[
"Apache-2.0"
] | 3
|
2021-06-25T20:52:50.000Z
|
2021-11-30T16:22:30.000Z
|
tests/test_instrumentation/test_base.py
|
cloudchacho/hedwig-python
|
1e4ca5472fe661ffd9d3cedd10a9ddc2daa0926b
|
[
"Apache-2.0"
] | null | null | null |
from unittest import mock
import pytest
get_tracer = pytest.importorskip('opentelemetry.trace.get_tracer')
@mock.patch('hedwig.backends.base.Message.exec_callback', autospec=True)
def test_message_handler_updates_span_name(mock_exec_callback, message, consumer_backend):
provider_metadata = mock.Mock()
tracer = get_tracer(__name__)
with tracer.start_as_current_span(test_message_handler_updates_span_name.__name__, {}) as span:
assert span.name == test_message_handler_updates_span_name.__name__
consumer_backend.message_handler(*message.serialize(), provider_metadata)
assert span.name == message.type
assert span.get_span_context().is_valid
| 40.705882
| 99
| 0.789017
| 90
| 692
| 5.577778
| 0.433333
| 0.079681
| 0.10757
| 0.149402
| 0.213147
| 0.213147
| 0.14741
| 0
| 0
| 0
| 0
| 0
| 0.124277
| 692
| 16
| 100
| 43.25
| 0.828383
| 0
| 0
| 0
| 0
| 0
| 0.104046
| 0.104046
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb2a6dfadfc03cbe4b08fd33a47e0c0b3e370224
| 1,184
|
py
|
Python
|
Leetcode/SwapNodesInPairs.py
|
tswsxk/CodeBook
|
01b976418d64f5f94257ae0e2b36751afb93c105
|
[
"MIT"
] | null | null | null |
Leetcode/SwapNodesInPairs.py
|
tswsxk/CodeBook
|
01b976418d64f5f94257ae0e2b36751afb93c105
|
[
"MIT"
] | 1
|
2019-09-24T22:04:03.000Z
|
2019-09-24T22:04:03.000Z
|
Leetcode/SwapNodesInPairs.py
|
tswsxk/CodeBook
|
01b976418d64f5f94257ae0e2b36751afb93c105
|
[
"MIT"
] | null | null | null |
# Definition for singly-linked list.
class ListNode(object):
def __init__(self, x):
self.val = x
self.next = None
class Solution(object):
def swapPairs(self, head):
"""
:type head: ListNode
:rtype: ListNode
"""
nodeRec = []
check = head
precheck = head
count = 0
n = 2
while check:
nodeRec.append(check)
count += 1
if count == n:
count = 0
check = check.next
for i, x in enumerate(nodeRec):
if i > 0:
x.next = nodeRec[i - 1]
else:
x.next = check
if nodeRec[0] == head:
head = nodeRec[n - 1]
else:
precheck.next = nodeRec[n - 1]
precheck = nodeRec[0]
nodeRec = []
continue
check = check.next
return head
def initlist(listnum):
head = ListNode(listnum[0])
tail = head
for num in listnum[1:]:
tail.next = ListNode(num)
tail = tail.next
return head
if __name__ == "__main__":
sol = Solution()
sol.swapPairs(initlist([1,2,3,4]))
| 24.163265
| 44
| 0.47973
| 131
| 1,184
| 4.244275
| 0.351145
| 0.032374
| 0.05036
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.023121
| 0.415541
| 1,184
| 49
| 45
| 24.163265
| 0.780347
| 0.061655
| 0
| 0.243902
| 0
| 0
| 0.007407
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073171
| false
| 0
| 0
| 0
| 0.170732
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb2c8b8b8d777e9a0438515ac0aea6cd01f5301b
| 2,696
|
py
|
Python
|
chess-board-0.2.0/chessboard/pieces.py
|
fshelobolin/irohbot
|
4ad4c554ecff1e1005fbecf26ee097c387bf357d
|
[
"MIT"
] | null | null | null |
chess-board-0.2.0/chessboard/pieces.py
|
fshelobolin/irohbot
|
4ad4c554ecff1e1005fbecf26ee097c387bf357d
|
[
"MIT"
] | null | null | null |
chess-board-0.2.0/chessboard/pieces.py
|
fshelobolin/irohbot
|
4ad4c554ecff1e1005fbecf26ee097c387bf357d
|
[
"MIT"
] | null | null | null |
"""
Ahira Justice, ADEFOKUN
justiceahira@gmail.com
"""
import os
import pygame
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
IMAGE_DIR = os.path.join(BASE_DIR, "images")
BLACK = "BLACK"
WHITE = "WHITE"
BISHOP = "BISHOP"
KING = "KING"
KNGHT = "KNIGHT"
PAWN = "PAWN"
QUEEN = "QUEEN"
ROOK = "ROOK"
class Piece:
bBishop = pygame.image.load(os.path.join(IMAGE_DIR, "bB.png"))
bKing = pygame.image.load(os.path.join(IMAGE_DIR, "bK.png"))
bKnight = pygame.image.load(os.path.join(IMAGE_DIR, "bN.png"))
bPawn = pygame.image.load(os.path.join(IMAGE_DIR, "bP.png"))
bQueen = pygame.image.load(os.path.join(IMAGE_DIR, "bQ.png"))
bRook = pygame.image.load(os.path.join(IMAGE_DIR, "bR.png"))
wBishop = pygame.image.load(os.path.join(IMAGE_DIR, "wB.png"))
wKing = pygame.image.load(os.path.join(IMAGE_DIR, "wK.png"))
wKnight = pygame.image.load(os.path.join(IMAGE_DIR, "wN.png"))
wPawn = pygame.image.load(os.path.join(IMAGE_DIR, "wP.png"))
wQueen = pygame.image.load(os.path.join(IMAGE_DIR, "wQ.png"))
wRook = pygame.image.load(os.path.join(IMAGE_DIR, "wR.png"))
def __init__(self, color, piece, DISPLAYSURF):
self.position = None
self.sprite = None
self.DISPLAYSURF = DISPLAYSURF
self.color = color
self.piece = piece
self.setSprite()
def setPosition(self, position):
self.position = position
def setSprite(self):
if self.piece == BISHOP:
if self.color == BLACK:
self.sprite = Piece.bBishop
elif self.color == WHITE:
self.sprite = Piece.wBishop
elif self.piece == KING:
if self.color == BLACK:
self.sprite = Piece.bKing
elif self.color == WHITE:
self.sprite = Piece.wKing
elif self.piece == KNGHT:
if self.color == BLACK:
self.sprite = Piece.bKnight
if self.color == WHITE:
self.sprite = Piece.wKnight
elif self.piece == PAWN:
if self.color == BLACK:
self.sprite = Piece.bPawn
elif self.color == WHITE:
self.sprite = Piece.wPawn
elif self.piece == QUEEN:
if self.color == BLACK:
self.sprite = Piece.bQueen
elif self.color == WHITE:
self.sprite = Piece.wQueen
elif self.piece == ROOK:
if self.color == BLACK:
self.sprite = Piece.bRook
elif self.color == WHITE:
self.sprite = Piece.wRook
def displayPiece(self):
self.DISPLAYSURF.blit(self.sprite, self.position)
| 29.304348
| 66
| 0.582715
| 340
| 2,696
| 4.552941
| 0.202941
| 0.05814
| 0.083979
| 0.131783
| 0.501292
| 0.501292
| 0.482558
| 0.255814
| 0
| 0
| 0
| 0
| 0.28635
| 2,696
| 91
| 67
| 29.626374
| 0.804574
| 0.017062
| 0
| 0.164179
| 0
| 0
| 0.044419
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059701
| false
| 0
| 0.029851
| 0
| 0.283582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb361ceecffd166eeb0b6b3ee13b8be48e6f4d86
| 819
|
py
|
Python
|
setup.py
|
ktvng/cue
|
5f31c8898f3bc53a18956220f609489cd2bbe590
|
[
"MIT"
] | null | null | null |
setup.py
|
ktvng/cue
|
5f31c8898f3bc53a18956220f609489cd2bbe590
|
[
"MIT"
] | null | null | null |
setup.py
|
ktvng/cue
|
5f31c8898f3bc53a18956220f609489cd2bbe590
|
[
"MIT"
] | null | null | null |
"""Cue: Script Orchestration for Data Analysis
Cue lets your package your data analysis into simple actions which can be connected
into a dynamic data analysis pipeline with coverage over even complex data sets.
"""
DOCLINES = (__doc__ or '').split('\n')
from setuptools import find_packages, setup
setup(
name='py-cue',
package_dir={'cue/cue': 'cue'},
packages=find_packages(include=['cue']),
version='0.1.0',
description=DOCLINES[0],
long_description="\n".join(DOCLINES[2:]),
project_urls={
"Source Code": "https://github.com/ktvng/cue"
},
author='ktvng',
license='MIT',
python_requires='>=3.8',
install_requires=['pyyaml>=5.2'],
entry_points={
'console_scripts': {
'cue=cue.cli:run'
}
}
)
| 26.419355
| 85
| 0.616606
| 101
| 819
| 4.871287
| 0.70297
| 0.073171
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014423
| 0.238095
| 819
| 30
| 86
| 27.3
| 0.774038
| 0.25641
| 0
| 0
| 0
| 0
| 0.211538
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.045455
| 0
| 0.045455
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb3657629d59fdcbd7874c2822fc0707cfc70c45
| 1,689
|
py
|
Python
|
tests/getz.py
|
deflax/steinvord
|
709326ff219159a78f644c0adf3c5b224ed42804
|
[
"Zlib"
] | 1
|
2021-06-02T19:51:26.000Z
|
2021-06-02T19:51:26.000Z
|
tests/getz.py
|
deflax/steinvord
|
709326ff219159a78f644c0adf3c5b224ed42804
|
[
"Zlib"
] | null | null | null |
tests/getz.py
|
deflax/steinvord
|
709326ff219159a78f644c0adf3c5b224ed42804
|
[
"Zlib"
] | null | null | null |
#!/usr/bin/python3.2
#
# Zabbix API Python usage example
# Christoph Haas <email@christoph-haas.de>
#
username=''
password='1'
hostgroup=''
item_name='system.cpu.load[,avg1]'
zabbix_url=''
import zabbix_api
import sys
# Connect to Zabbix server
z=zabbix_api.ZabbixAPI(server=zabbix_url)
z.login(user=username, password=password)
# Get hosts in the hostgroup
hostgroup = z.hostgroup.get(
{
'filter': { 'name':hostgroup },
'sortfield': 'name',
'sortorder': 'ASC',
'limit':2,
'select_hosts':'extend'
})
print(hostgroup[0])
print("\n")
for host in hostgroup[0]['name']:
hostname = host['host']
print("Host:", hostname)
print("Host-ID:", host['hostid'])
item = z.item.get({
'output':'extend',
'hostids':host['hostid'],
'filter':{'key_':item_name}})
if item:
print(item[0]['lastvalue'])
print("Item-ID:", item[0]['itemid'])
# Get history
lastvalue = z.history.get({
'history': item[0]['value_type'],
'itemids': item[0]['itemid'],
'output': 'extend',
# Sort by timestamp from new to old
'sortfield':'clock',
'sortorder':'DESC',
# Get only the first (=newest) entry
'limit': 1,
})
# CAVEAT! The history.get function must be told which type the
# values are (float, text, etc.). The item.value_type contains
# the number that needs to be passed to history.get.
if lastvalue:
lastvalue = lastvalue[0]['value']
print("Last value:", lastvalue)
else:
print("No item....")
print("---------------------------")
| 23.788732
| 70
| 0.562463
| 199
| 1,689
| 4.723618
| 0.467337
| 0.021277
| 0.023404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010425
| 0.261693
| 1,689
| 70
| 71
| 24.128571
| 0.743384
| 0.235642
| 0
| 0.090909
| 0
| 0
| 0.231975
| 0.038401
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.045455
| 0.045455
| 0
| 0.045455
| 0.204545
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb3b035d6a2b960bc0d338d7dd3785c2208f99f5
| 11,813
|
py
|
Python
|
server.py
|
uanthwal/starter-snake-python
|
6eff23ac9b9b0cfb9dbbf6d756a92a677bbf0417
|
[
"MIT"
] | null | null | null |
server.py
|
uanthwal/starter-snake-python
|
6eff23ac9b9b0cfb9dbbf6d756a92a677bbf0417
|
[
"MIT"
] | null | null | null |
server.py
|
uanthwal/starter-snake-python
|
6eff23ac9b9b0cfb9dbbf6d756a92a677bbf0417
|
[
"MIT"
] | null | null | null |
import copy
import math
import os
import random
import cherrypy
"""
This is a simple Battlesnake server written in Python.
For instructions see https://github.com/BattlesnakeOfficial/starter-snake-python/README.md
"""
class Battlesnake(object):
global neighbours
@cherrypy.expose
@cherrypy.tools.json_out()
def index(self):
# This function is called when you register your Battlesnake on play.battlesnake.com
# It controls your Battlesnake appearance and author permissions.
# TIP: If you open your Battlesnake URL in browser you should see this data
return {
"apiversion": "1",
"author": "", # TODO: Your Battlesnake Username
"color": "#B765CD", # TODO: Personalize
"head": "default", # TODO: Personalize
"tail": "default", # TODO: Personalize
}
@cherrypy.expose
@cherrypy.tools.json_in()
def start(self):
# This function is called everytime your snake is entered into a game.
# cherrypy.request.json contains information about the game that's about to be played.
data = cherrypy.request.json
print("START")
return "ok"
def get_head_radii_coordinates(self, head):
top_btm_coordinates = [
{
'x': head['x'],
'y': head['y'] - 1
}
,
{
'x': head['x'],
'y': head['y'] + 1
}
]
left_right_coordinates = [
{
'x': head['x'] - 1,
'y': head['y']
}
,
{
'x': head['x'] + 1,
'y': head['y']
}
]
diagonal_coord = [
{
'x': head['x'] + 1,
'y': head['y'] + 1
}
,
{
'x': head['x'] - 1,
'y': head['y'] - 1
}
]
return top_btm_coordinates + left_right_coordinates + diagonal_coord
def get_distance_bw_2_points(self, p1, p2):
return math.sqrt(((p1[0] - p2[0]) ** 2) + ((p1[1] - p2[1]) ** 2))
def get_neighbours(self, data):
neighbours = []
min_dist = 9999999
min_dist_id = ""
for snek in data['board']['snakes']:
if snek['id'] != data['you']['id']:
p1 = [data['you']['head']['x'], data['you']['head']['y']]
p2 = [snek['head']['x'], snek['head']['y']]
dist = self.get_distance_bw_2_points(p1, p2)
if dist < min_dist:
min_dist_id = snek['id']
neigh_coord = self.get_head_radii_coordinates(data['you']['head'])
for snek_bdy_coord in snek['body']:
if snek_bdy_coord in neigh_coord:
neighbours.append(snek['id'])
break
if len(neighbours) == 0:
neighbours.append(min_dist_id)
return neighbours
def will_go_out_of_bounds(self, data, direction):
head = data['you']['head']
if direction == "up" and head['y'] == data['board']['height'] - 1:
return True
elif direction == "down" and head['y'] == 0:
return True
elif direction == "right" and head['x'] == data['board']['width'] - 1:
return True
elif direction == "left" and head['x'] == 0:
return True
return False
def will_collide_with_self(self, data, direction):
head = data['you']['head']
your_body = data['you']['body']
if direction == "up" and {
'x': head['x'],
'y': head['y'] + 1
} in your_body:
return True
elif direction == "down" and {
'x': head['x'],
'y': head['y'] - 1
} in your_body:
return True
elif direction == "right" and {
'x': head['x'] + 1,
'y': head['y']
} in your_body:
return True
elif direction == "left" and {
'x': head['x'] - 1,
'y': head['y']
} in your_body:
return True
return False
def will_hit_another_snake(self, data, direction, neighbours):
head = data['you']['head']
for snake in data['board']['snakes']:
res = True
if len(neighbours) > 0:
res = data['you']['id'] != snake['id'] and snake['id'] in neighbours
else:
res = data['you']['id'] != snake['id']
if res:
opponent_body = snake['body']
if direction == "up":
if {
'x': head['x'],
'y': head['y'] + 1
} in opponent_body:
return True
elif direction == "down":
if {
'x': head['x'],
'y': head['y'] - 1
} in opponent_body:
return True
elif direction == "right":
if {
'x': head['x'] + 1,
'y': head['y']
} in opponent_body:
return True
elif direction == "left":
if {
'x': head['x'] - 1,
'y': head['y']
} in opponent_body:
return True
return False
def get_safe_move_x_from_data(self, moves_data, data):
move = None
for key in moves_data:
will_hit_another_snake = moves_data[key]['will_hit_another_snake']
will_go_out_of_bounds = moves_data[key]['will_go_out_of_bounds']
will_hit_self = moves_data[key]['will_hit_self']
if not will_hit_another_snake and not will_go_out_of_bounds and not will_hit_self and \
self.check_if_move_is_safe(data, key):
move = key
break
# if there's no move that looks to be safe after checking with self.check_if_move_is_safe(data, key); then
# for survival leaving it to its fate; LUCK :D
if move is None:
for key in moves_data:
will_hit_another_snake = moves_data[key]['will_hit_another_snake']
will_go_out_of_bounds = moves_data[key]['will_go_out_of_bounds']
will_hit_self = moves_data[key]['will_hit_self']
if not will_hit_another_snake and not will_go_out_of_bounds and not will_hit_self:
move = key
break
return move
def should_eat_food(self, data):
if data['you']['health'] < 40:
return True
return False
def get_distance_to_food(self, food_pos, head):
return abs(food_pos['x'] - head['x']) + abs(food_pos['y'] - head['y'])
def find_nearest_food(self, data):
if len(data['board']['food']) == 0:
return None
nearest = data['board']['food'][0]
min_distance = self.get_distance_to_food(data['board']['food'][0], data['you']['head'])
for food in data['board']['food']:
current_distance = self.get_distance_to_food(food, data['you']['head'])
if min_distance > current_distance:
nearest = food
min_distance = current_distance
return nearest
def get_direction_to_eat(self, data, moves_data):
nearest_food = self.find_nearest_food(data)
if nearest_food is not None:
print(f"there is food at: {nearest_food}")
shouldGoUp = False
shouldGoRight = False
shouldGoLeft = False
shouldGoDown = False
if nearest_food['x'] > data['you']['head']['x']:
# need to move right
shouldGoRight = True
print("1")
elif nearest_food['x'] < data['you']['head']['x']:
# need to move left
shouldGoLeft = True
print("2")
if nearest_food['y'] > data['you']['head']['y']:
# need to move up
shouldGoUp = True
print("3")
elif nearest_food['y'] < data['you']['head']['y']:
# need to move down
shouldGoDown = True
print("4")
if shouldGoRight and self.can_go_in_direction(moves_data, data, "right"):
return "right"
elif shouldGoLeft and self.can_go_in_direction(moves_data, data, "left"):
return "left"
elif shouldGoUp and self.can_go_in_direction(moves_data, data, "up"):
return "up"
elif shouldGoDown and self.can_go_in_direction(moves_data, data, "down"):
return "down"
return None
def can_go_in_direction(self, moves_data, data, key):
can_go = False
will_hit_another_snake = moves_data[key]['will_hit_another_snake']
will_go_out_of_bounds = moves_data[key]['will_go_out_of_bounds']
will_hit_self = moves_data[key]['will_hit_self']
if not will_hit_another_snake and not will_go_out_of_bounds and not will_hit_self and \
self.check_if_move_is_safe(data, key):
can_go = True
if not can_go:
return not will_hit_another_snake and not will_go_out_of_bounds and not will_hit_self
return can_go
@cherrypy.expose
@cherrypy.tools.json_in()
@cherrypy.tools.json_out()
def move(self):
# This function is called on every turn of a game. It's how your snake decides where to move.
# Valid moves are "up", "down", "left", or "right".
# TODO: Use the information in cherrypy.request.json to decide your next move.
data = cherrypy.request.json
print("data is:****************")
print(data)
print("data is:****************")
neighbours = self.get_neighbours(data)
possible_moves = ["up", "down", "left", "right"]
# random.shuffle(possible_moves)
# moves_data stores data for all 4 directions with their values for will_hit_another_snake and
# will_go_out_of_bounds
moves_data = {
"up": {}, "down": {}, "left": {}, "right": {}
}
for possible_move in possible_moves:
will_go_out_of_bounds = self.will_go_out_of_bounds(data, possible_move)
if not will_go_out_of_bounds:
will_hit_self = self.will_collide_with_self(data, possible_move)
will_hit_another_snake = self.will_hit_another_snake(
data, possible_move, neighbours)
moves_data[possible_move] = {
'will_hit_another_snake': will_hit_another_snake,
'will_hit_self': will_hit_self,
'will_go_out_of_bounds': will_go_out_of_bounds
}
else:
moves_data[possible_move] = {
'will_hit_another_snake': True,
'will_hit_self': True,
'will_go_out_of_bounds': will_go_out_of_bounds
}
move = None
# if self.should_eat_food(data):
# move = self.get_direction_to_eat(data, moves_data)
if move is None:
move = self.get_safe_move_x_from_data(moves_data,
data)
if move is None:
print("************* making a random move ****************")
move = random.choice(possible_moves)
print(f"MOVE: {move}")
return {"move": move}
def check_if_move_is_safe(self, data, move):
your_head_nxt_pos = copy.deepcopy(data['you']['head'])
if move == "up":
your_head_nxt_pos['y'] += 1
possible_heads = [{'x': your_head_nxt_pos['x'] - 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'] + 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] + 1}]
for snake in data['board']['snakes']:
if snake['id'] != data['you']['id'] and snake['head'] in possible_heads:
return False
if move == "down":
your_head_nxt_pos['y'] -= 1
possible_heads = [{'x': your_head_nxt_pos['x'] - 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] - 1},
{'x': your_head_nxt_pos['x'] + 1, 'y': your_head_nxt_pos['y']}]
for snake in data['board']['snakes']:
if snake['id'] != data['you']['id'] and snake['head'] in possible_heads:
return False
if move == "left":
your_head_nxt_pos['x'] -= 1
possible_heads = [{'x': your_head_nxt_pos['x'] - 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] + 1},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] - 1}]
for snake in data['board']['snakes']:
if snake['id'] != data['you']['id'] and snake['head'] in possible_heads:
return False
if move == "right":
your_head_nxt_pos['x'] += 1
possible_heads = [{'x': your_head_nxt_pos['x'] + 1, 'y': your_head_nxt_pos['y']},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] + 1},
{'x': your_head_nxt_pos['x'], 'y': your_head_nxt_pos['y'] - 1}]
for snake in data['board']['snakes']:
if snake['id'] != data['you']['id'] and snake['head'] in possible_heads:
return False
return True
@cherrypy.expose
@cherrypy.tools.json_in()
def end(self):
# This function is called when a game your snake was in ends.
# It's purely for informational purposes, you don't have to make any decisions here.
data = cherrypy.request.json
print("END")
return "ok"
if __name__ == "__main__":
server = Battlesnake()
cherrypy.config.update({"server.socket_host": "0.0.0.0"})
cherrypy.config.update({
"server.socket_port":
int(os.environ.get("PORT", "8080")),
})
print("Starting Battlesnake Server...")
cherrypy.quickstart(server)
| 31.501333
| 108
| 0.632439
| 1,766
| 11,813
| 3.98188
| 0.12684
| 0.030859
| 0.045364
| 0.057736
| 0.525313
| 0.466866
| 0.38979
| 0.350683
| 0.327361
| 0.306883
| 0
| 0.009097
| 0.209007
| 11,813
| 374
| 109
| 31.585562
| 0.743472
| 0.107678
| 0
| 0.414013
| 0
| 0
| 0.110789
| 0.020749
| 0
| 0
| 0
| 0.002674
| 0
| 1
| 0.05414
| false
| 0
| 0.015924
| 0.009554
| 0.200637
| 0.041401
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb3c1435400a880f8b3833ff6b37ef02c5237e11
| 59,098
|
py
|
Python
|
google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/types/test_execution.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 7
|
2021-02-21T10:39:41.000Z
|
2021-12-07T07:31:28.000Z
|
google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/types/test_execution.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 6
|
2021-02-02T23:46:11.000Z
|
2021-11-15T01:46:02.000Z
|
google/devtools/testing/v1/devtools-testing-v1-py/google/devtools/testing_v1/types/test_execution.py
|
googleapis/googleapis-gen
|
d84824c78563d59b0e58d5664bfaa430e9ad7e7a
|
[
"Apache-2.0"
] | 4
|
2021-01-28T23:25:45.000Z
|
2021-08-30T01:55:16.000Z
|
# -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import proto # type: ignore
from google.protobuf import duration_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
__protobuf__ = proto.module(
package='google.devtools.testing.v1',
manifest={
'OrchestratorOption',
'RoboActionType',
'InvalidMatrixDetails',
'TestState',
'OutcomeSummary',
'TestMatrix',
'TestExecution',
'TestSpecification',
'SystraceSetup',
'TestSetup',
'IosTestSetup',
'EnvironmentVariable',
'Account',
'GoogleAuto',
'Apk',
'AppBundle',
'DeviceFile',
'ObbFile',
'RegularFile',
'IosDeviceFile',
'AndroidTestLoop',
'IosXcTest',
'IosTestLoop',
'AndroidInstrumentationTest',
'AndroidRoboTest',
'RoboDirective',
'RoboStartingIntent',
'LauncherActivityIntent',
'StartActivityIntent',
'EnvironmentMatrix',
'AndroidDeviceList',
'IosDeviceList',
'AndroidMatrix',
'ClientInfo',
'ClientInfoDetail',
'ResultStorage',
'ToolResultsHistory',
'ToolResultsExecution',
'ToolResultsStep',
'GoogleCloudStorage',
'FileReference',
'Environment',
'AndroidDevice',
'IosDevice',
'TestDetails',
'InvalidRequestDetail',
'ShardingOption',
'UniformSharding',
'ManualSharding',
'TestTargetsForShard',
'Shard',
'CreateTestMatrixRequest',
'GetTestMatrixRequest',
'CancelTestMatrixRequest',
'CancelTestMatrixResponse',
},
)
class OrchestratorOption(proto.Enum):
r"""Specifies how to execute the test."""
ORCHESTRATOR_OPTION_UNSPECIFIED = 0
USE_ORCHESTRATOR = 1
DO_NOT_USE_ORCHESTRATOR = 2
class RoboActionType(proto.Enum):
r"""Actions which Robo can perform on UI elements."""
ACTION_TYPE_UNSPECIFIED = 0
SINGLE_CLICK = 1
ENTER_TEXT = 2
IGNORE = 3
class InvalidMatrixDetails(proto.Enum):
r"""The detailed reason that a Matrix was deemed INVALID."""
INVALID_MATRIX_DETAILS_UNSPECIFIED = 0
DETAILS_UNAVAILABLE = 1
MALFORMED_APK = 2
MALFORMED_TEST_APK = 3
NO_MANIFEST = 4
NO_PACKAGE_NAME = 5
INVALID_PACKAGE_NAME = 31
TEST_SAME_AS_APP = 6
NO_INSTRUMENTATION = 7
NO_SIGNATURE = 20
INSTRUMENTATION_ORCHESTRATOR_INCOMPATIBLE = 18
NO_TEST_RUNNER_CLASS = 19
NO_LAUNCHER_ACTIVITY = 8
FORBIDDEN_PERMISSIONS = 9
INVALID_ROBO_DIRECTIVES = 10
INVALID_RESOURCE_NAME = 33
INVALID_DIRECTIVE_ACTION = 34
TEST_LOOP_INTENT_FILTER_NOT_FOUND = 12
SCENARIO_LABEL_NOT_DECLARED = 13
SCENARIO_LABEL_MALFORMED = 14
SCENARIO_NOT_DECLARED = 15
DEVICE_ADMIN_RECEIVER = 17
MALFORMED_XC_TEST_ZIP = 11
BUILT_FOR_IOS_SIMULATOR = 24
NO_TESTS_IN_XC_TEST_ZIP = 25
USE_DESTINATION_ARTIFACTS = 26
TEST_NOT_APP_HOSTED = 28
PLIST_CANNOT_BE_PARSED = 30
TEST_ONLY_APK = 21
MALFORMED_IPA = 22
MISSING_URL_SCHEME = 35
MALFORMED_APP_BUNDLE = 36
NO_CODE_APK = 23
INVALID_INPUT_APK = 27
INVALID_APK_PREVIEW_SDK = 29
class TestState(proto.Enum):
r"""The state (i.e., progress) of a test execution or matrix."""
TEST_STATE_UNSPECIFIED = 0
VALIDATING = 8
PENDING = 1
RUNNING = 2
FINISHED = 3
ERROR = 4
UNSUPPORTED_ENVIRONMENT = 5
INCOMPATIBLE_ENVIRONMENT = 9
INCOMPATIBLE_ARCHITECTURE = 10
CANCELLED = 6
INVALID = 7
class OutcomeSummary(proto.Enum):
r"""Outcome summary for a finished test matrix."""
OUTCOME_SUMMARY_UNSPECIFIED = 0
SUCCESS = 1
FAILURE = 2
INCONCLUSIVE = 3
SKIPPED = 4
class TestMatrix(proto.Message):
r"""TestMatrix captures all details about a test. It contains the
environment configuration, test specification, test executions
and overall state and outcome.
Attributes:
test_matrix_id (str):
Output only. Unique id set by the service.
project_id (str):
The cloud project that owns the test matrix.
client_info (google.devtools.testing_v1.types.ClientInfo):
Information about the client which invoked
the test.
test_specification (google.devtools.testing_v1.types.TestSpecification):
Required. How to run the test.
environment_matrix (google.devtools.testing_v1.types.EnvironmentMatrix):
Required. The devices the tests are being
executed on.
test_executions (Sequence[google.devtools.testing_v1.types.TestExecution]):
Output only. The list of test executions that
the service creates for this matrix.
result_storage (google.devtools.testing_v1.types.ResultStorage):
Required. Where the results for the matrix
are written.
state (google.devtools.testing_v1.types.TestState):
Output only. Indicates the current progress
of the test matrix.
timestamp (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time this test matrix was
initially created.
invalid_matrix_details (google.devtools.testing_v1.types.InvalidMatrixDetails):
Output only. Describes why the matrix is
considered invalid. Only useful for matrices in
the INVALID state.
flaky_test_attempts (int):
The number of times a TestExecution should be
re-attempted if one or more of its test cases
fail for any reason. The maximum number of
reruns allowed is 10.
Default is 0, which implies no reruns.
outcome_summary (google.devtools.testing_v1.types.OutcomeSummary):
Output Only. The overall outcome of the test.
Only set when the test matrix state is FINISHED.
fail_fast (bool):
If true, only a single attempt at most will
be made to run each execution/shard in the
matrix. Flaky test attempts are not affected.
Normally, 2 or more attempts are made if a
potential infrastructure issue is detected.
This feature is for latency sensitive workloads.
The incidence of execution failures may be
significantly greater for fail-fast matrices and
support is more limited because of that
expectation.
"""
test_matrix_id = proto.Field(
proto.STRING,
number=1,
)
project_id = proto.Field(
proto.STRING,
number=7,
)
client_info = proto.Field(
proto.MESSAGE,
number=10,
message='ClientInfo',
)
test_specification = proto.Field(
proto.MESSAGE,
number=3,
message='TestSpecification',
)
environment_matrix = proto.Field(
proto.MESSAGE,
number=4,
message='EnvironmentMatrix',
)
test_executions = proto.RepeatedField(
proto.MESSAGE,
number=5,
message='TestExecution',
)
result_storage = proto.Field(
proto.MESSAGE,
number=6,
message='ResultStorage',
)
state = proto.Field(
proto.ENUM,
number=8,
enum='TestState',
)
timestamp = proto.Field(
proto.MESSAGE,
number=9,
message=timestamp_pb2.Timestamp,
)
invalid_matrix_details = proto.Field(
proto.ENUM,
number=11,
enum='InvalidMatrixDetails',
)
flaky_test_attempts = proto.Field(
proto.INT32,
number=13,
)
outcome_summary = proto.Field(
proto.ENUM,
number=14,
enum='OutcomeSummary',
)
fail_fast = proto.Field(
proto.BOOL,
number=17,
)
class TestExecution(proto.Message):
r"""A single test executed in a single environment.
Attributes:
id (str):
Output only. Unique id set by the service.
matrix_id (str):
Output only. Id of the containing TestMatrix.
project_id (str):
Output only. The cloud project that owns the
test execution.
test_specification (google.devtools.testing_v1.types.TestSpecification):
Output only. How to run the test.
shard (google.devtools.testing_v1.types.Shard):
Output only. Details about the shard.
environment (google.devtools.testing_v1.types.Environment):
Output only. How the host machine(s) are
configured.
state (google.devtools.testing_v1.types.TestState):
Output only. Indicates the current progress
of the test execution (e.g., FINISHED).
tool_results_step (google.devtools.testing_v1.types.ToolResultsStep):
Output only. Where the results for this
execution are written.
timestamp (google.protobuf.timestamp_pb2.Timestamp):
Output only. The time this test execution was
initially created.
test_details (google.devtools.testing_v1.types.TestDetails):
Output only. Additional details about the
running test.
"""
id = proto.Field(
proto.STRING,
number=1,
)
matrix_id = proto.Field(
proto.STRING,
number=9,
)
project_id = proto.Field(
proto.STRING,
number=10,
)
test_specification = proto.Field(
proto.MESSAGE,
number=3,
message='TestSpecification',
)
shard = proto.Field(
proto.MESSAGE,
number=12,
message='Shard',
)
environment = proto.Field(
proto.MESSAGE,
number=4,
message='Environment',
)
state = proto.Field(
proto.ENUM,
number=5,
enum='TestState',
)
tool_results_step = proto.Field(
proto.MESSAGE,
number=11,
message='ToolResultsStep',
)
timestamp = proto.Field(
proto.MESSAGE,
number=7,
message=timestamp_pb2.Timestamp,
)
test_details = proto.Field(
proto.MESSAGE,
number=8,
message='TestDetails',
)
class TestSpecification(proto.Message):
r"""A description of how to run the test.
Attributes:
test_timeout (google.protobuf.duration_pb2.Duration):
Max time a test execution is allowed to run
before it is automatically cancelled.
The default value is 5 min.
test_setup (google.devtools.testing_v1.types.TestSetup):
Test setup requirements for Android e.g.
files to install, bootstrap scripts.
ios_test_setup (google.devtools.testing_v1.types.IosTestSetup):
Test setup requirements for iOS.
android_instrumentation_test (google.devtools.testing_v1.types.AndroidInstrumentationTest):
An Android instrumentation test.
android_robo_test (google.devtools.testing_v1.types.AndroidRoboTest):
An Android robo test.
android_test_loop (google.devtools.testing_v1.types.AndroidTestLoop):
An Android Application with a Test Loop.
ios_xc_test (google.devtools.testing_v1.types.IosXcTest):
An iOS XCTest, via an .xctestrun file.
ios_test_loop (google.devtools.testing_v1.types.IosTestLoop):
An iOS application with a test loop.
disable_video_recording (bool):
Disables video recording. May reduce test
latency.
disable_performance_metrics (bool):
Disables performance metrics recording. May
reduce test latency.
"""
test_timeout = proto.Field(
proto.MESSAGE,
number=1,
message=duration_pb2.Duration,
)
test_setup = proto.Field(
proto.MESSAGE,
number=6,
oneof='setup',
message='TestSetup',
)
ios_test_setup = proto.Field(
proto.MESSAGE,
number=14,
oneof='setup',
message='IosTestSetup',
)
android_instrumentation_test = proto.Field(
proto.MESSAGE,
number=2,
oneof='test',
message='AndroidInstrumentationTest',
)
android_robo_test = proto.Field(
proto.MESSAGE,
number=3,
oneof='test',
message='AndroidRoboTest',
)
android_test_loop = proto.Field(
proto.MESSAGE,
number=9,
oneof='test',
message='AndroidTestLoop',
)
ios_xc_test = proto.Field(
proto.MESSAGE,
number=13,
oneof='test',
message='IosXcTest',
)
ios_test_loop = proto.Field(
proto.MESSAGE,
number=15,
oneof='test',
message='IosTestLoop',
)
disable_video_recording = proto.Field(
proto.BOOL,
number=10,
)
disable_performance_metrics = proto.Field(
proto.BOOL,
number=11,
)
class SystraceSetup(proto.Message):
r"""
Attributes:
duration_seconds (int):
Systrace duration in seconds.
Should be between 1 and 30 seconds. 0 disables
systrace.
"""
duration_seconds = proto.Field(
proto.INT32,
number=1,
)
class TestSetup(proto.Message):
r"""A description of how to set up the Android device prior to
running the test.
Attributes:
files_to_push (Sequence[google.devtools.testing_v1.types.DeviceFile]):
List of files to push to the device before
starting the test.
directories_to_pull (Sequence[str]):
List of directories on the device to upload to GCS at the
end of the test; they must be absolute paths under /sdcard,
/storage or /data/local/tmp. Path names are restricted to
characters a-z A-Z 0-9 \_ - . + and /
Note: The paths /sdcard and /data will be made available and
treated as implicit path substitutions. E.g. if /sdcard on a
particular device does not map to external storage, the
system will replace it with the external storage path prefix
for that device.
additional_apks (Sequence[google.devtools.testing_v1.types.Apk]):
APKs to install in addition to those being
directly tested. Currently capped at 100.
account (google.devtools.testing_v1.types.Account):
The device will be logged in on this account
for the duration of the test.
network_profile (str):
The network traffic profile used for running the test.
Available network profiles can be queried by using the
NETWORK_CONFIGURATION environment type when calling
TestEnvironmentDiscoveryService.GetTestEnvironmentCatalog.
environment_variables (Sequence[google.devtools.testing_v1.types.EnvironmentVariable]):
Environment variables to set for the test
(only applicable for instrumentation tests).
systrace (google.devtools.testing_v1.types.SystraceSetup):
Systrace configuration for the run.
If set a systrace will be taken, starting on
test start and lasting for the configured
duration. The systrace file thus obtained is put
in the results bucket together with the other
artifacts from the run.
dont_autogrant_permissions (bool):
Whether to prevent all runtime permissions to
be granted at app install
"""
files_to_push = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='DeviceFile',
)
directories_to_pull = proto.RepeatedField(
proto.STRING,
number=2,
)
additional_apks = proto.RepeatedField(
proto.MESSAGE,
number=3,
message='Apk',
)
account = proto.Field(
proto.MESSAGE,
number=4,
message='Account',
)
network_profile = proto.Field(
proto.STRING,
number=5,
)
environment_variables = proto.RepeatedField(
proto.MESSAGE,
number=6,
message='EnvironmentVariable',
)
systrace = proto.Field(
proto.MESSAGE,
number=9,
message='SystraceSetup',
)
dont_autogrant_permissions = proto.Field(
proto.BOOL,
number=23,
)
class IosTestSetup(proto.Message):
r"""A description of how to set up an iOS device prior to running
the test.
Attributes:
network_profile (str):
The network traffic profile used for running the test.
Available network profiles can be queried by using the
NETWORK_CONFIGURATION environment type when calling
TestEnvironmentDiscoveryService.GetTestEnvironmentCatalog.
additional_ipas (Sequence[google.devtools.testing_v1.types.FileReference]):
iOS apps to install in addition to those
being directly tested.
push_files (Sequence[google.devtools.testing_v1.types.IosDeviceFile]):
List of files to push to the device before
starting the test.
pull_directories (Sequence[google.devtools.testing_v1.types.IosDeviceFile]):
List of directories on the device to upload
to Cloud Storage at the end of the test.
Directories should either be in a shared
directory (e.g. /private/var/mobile/Media) or
within an accessible directory inside the app's
filesystem (e.g. /Documents) by specifying the
bundle id.
"""
network_profile = proto.Field(
proto.STRING,
number=1,
)
additional_ipas = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='FileReference',
)
push_files = proto.RepeatedField(
proto.MESSAGE,
number=3,
message='IosDeviceFile',
)
pull_directories = proto.RepeatedField(
proto.MESSAGE,
number=4,
message='IosDeviceFile',
)
class EnvironmentVariable(proto.Message):
r"""A key-value pair passed as an environment variable to the
test.
Attributes:
key (str):
Key for the environment variable.
value (str):
Value for the environment variable.
"""
key = proto.Field(
proto.STRING,
number=1,
)
value = proto.Field(
proto.STRING,
number=2,
)
class Account(proto.Message):
r"""Identifies an account and how to log into it.
Attributes:
google_auto (google.devtools.testing_v1.types.GoogleAuto):
An automatic google login account.
"""
google_auto = proto.Field(
proto.MESSAGE,
number=1,
oneof='account_type',
message='GoogleAuto',
)
class GoogleAuto(proto.Message):
r"""Enables automatic Google account login.
If set, the service automatically generates a Google test
account and adds it to the device, before executing the test.
Note that test accounts might be reused.
Many applications show their full set of functionalities when an
account is present on the device. Logging into the device with
these generated accounts allows testing more functionalities.
"""
class Apk(proto.Message):
r"""An Android package file to install.
Attributes:
location (google.devtools.testing_v1.types.FileReference):
The path to an APK to be installed on the
device before the test begins.
package_name (str):
The java package for the APK to be installed.
Value is determined by examining the
application's manifest.
"""
location = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
package_name = proto.Field(
proto.STRING,
number=2,
)
class AppBundle(proto.Message):
r"""An Android App Bundle file format, containing a
BundleConfig.pb file, a base module directory, zero or more
dynamic feature module directories. <p>See
https://developer.android.com/guide/app-bundle/build for
guidance on building App Bundles.
Attributes:
bundle_location (google.devtools.testing_v1.types.FileReference):
.aab file representing the app bundle under
test.
"""
bundle_location = proto.Field(
proto.MESSAGE,
number=1,
oneof='bundle',
message='FileReference',
)
class DeviceFile(proto.Message):
r"""A single device file description.
Attributes:
obb_file (google.devtools.testing_v1.types.ObbFile):
A reference to an opaque binary blob file.
regular_file (google.devtools.testing_v1.types.RegularFile):
A reference to a regular file.
"""
obb_file = proto.Field(
proto.MESSAGE,
number=1,
oneof='device_file',
message='ObbFile',
)
regular_file = proto.Field(
proto.MESSAGE,
number=2,
oneof='device_file',
message='RegularFile',
)
class ObbFile(proto.Message):
r"""An opaque binary blob file to install on the device before
the test starts.
Attributes:
obb_file_name (str):
Required. OBB file name which must conform to the format as
specified by Android e.g.
[main|patch].0300110.com.example.android.obb which will be
installed into <shared-storage>/Android/obb/<package-name>/
on the device.
obb (google.devtools.testing_v1.types.FileReference):
Required. Opaque Binary Blob (OBB) file(s) to
install on the device.
"""
obb_file_name = proto.Field(
proto.STRING,
number=1,
)
obb = proto.Field(
proto.MESSAGE,
number=2,
message='FileReference',
)
class RegularFile(proto.Message):
r"""A file or directory to install on the device before the test
starts.
Attributes:
content (google.devtools.testing_v1.types.FileReference):
Required. The source file.
device_path (str):
Required. Where to put the content on the device. Must be an
absolute, allowlisted path. If the file exists, it will be
replaced. The following device-side directories and any of
their subdirectories are allowlisted:
.. raw:: html
<p>${EXTERNAL_STORAGE}, /sdcard, or /storage</p>
<p>${ANDROID_DATA}/local/tmp, or /data/local/tmp</p>
<p>Specifying a path outside of these directory trees is invalid.
.. raw:: html
<p> The paths /sdcard and /data will be made available and treated as
implicit path substitutions. E.g. if /sdcard on a particular device does
not map to external storage, the system will replace it with the external
storage path prefix for that device and copy the file there.
.. raw:: html
<p> It is strongly advised to use the <a href=
"http://developer.android.com/reference/android/os/Environment.html">
Environment API</a> in app and test code to access files on the device in a
portable way.
"""
content = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
device_path = proto.Field(
proto.STRING,
number=2,
)
class IosDeviceFile(proto.Message):
r"""A file or directory to install on the device before the test
starts.
Attributes:
content (google.devtools.testing_v1.types.FileReference):
The source file
bundle_id (str):
The bundle id of the app where this file
lives.
iOS apps sandbox their own filesystem, so app
files must specify which app installed on the
device.
device_path (str):
Location of the file on the device, inside
the app's sandboxed filesystem
"""
content = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
bundle_id = proto.Field(
proto.STRING,
number=2,
)
device_path = proto.Field(
proto.STRING,
number=3,
)
class AndroidTestLoop(proto.Message):
r"""A test of an Android Application with a Test Loop.
The intent \<intent-name\> will be implicitly added, since Games
is the only user of this api, for the time being.
Attributes:
app_apk (google.devtools.testing_v1.types.FileReference):
The APK for the application under test.
app_bundle (google.devtools.testing_v1.types.AppBundle):
A multi-apk app bundle for the application
under test.
app_package_id (str):
The java package for the application under
test. The default is determined by examining the
application's manifest.
scenarios (Sequence[int]):
The list of scenarios that should be run
during the test. The default is all test loops,
derived from the application's manifest.
scenario_labels (Sequence[str]):
The list of scenario labels that should be run during the
test. The scenario labels should map to labels defined in
the application's manifest. For example, player_experience
and com.google.test.loops.player_experience add all of the
loops labeled in the manifest with the
com.google.test.loops.player_experience name to the
execution. Scenarios can also be specified in the scenarios
field.
"""
app_apk = proto.Field(
proto.MESSAGE,
number=1,
oneof='app_under_test',
message='FileReference',
)
app_bundle = proto.Field(
proto.MESSAGE,
number=5,
oneof='app_under_test',
message='AppBundle',
)
app_package_id = proto.Field(
proto.STRING,
number=2,
)
scenarios = proto.RepeatedField(
proto.INT32,
number=3,
)
scenario_labels = proto.RepeatedField(
proto.STRING,
number=4,
)
class IosXcTest(proto.Message):
r"""A test of an iOS application that uses the XCTest framework.
Xcode supports the option to "build for testing", which
generates an .xctestrun file that contains a test specification
(arguments, test methods, etc). This test type accepts a zip
file containing the .xctestrun file and the corresponding
contents of the Build/Products directory that contains all the
binaries needed to run the tests.
Attributes:
tests_zip (google.devtools.testing_v1.types.FileReference):
Required. The .zip containing the .xctestrun
file and the contents of the
DerivedData/Build/Products directory. The
.xctestrun file in this zip is ignored if the
xctestrun field is specified.
xctestrun (google.devtools.testing_v1.types.FileReference):
An .xctestrun file that will override the
.xctestrun file in the tests zip. Because the
.xctestrun file contains environment variables
along with test methods to run and/or ignore,
this can be useful for sharding tests. Default
is taken from the tests zip.
xcode_version (str):
The Xcode version that should be used for the
test. Use the TestEnvironmentDiscoveryService to
get supported options. Defaults to the latest
Xcode version Firebase Test Lab supports.
app_bundle_id (str):
Output only. The bundle id for the
application under test.
test_special_entitlements (bool):
The option to test special app entitlements.
Setting this would re-sign the app having
special entitlements with an explicit
application-identifier. Currently supports
testing aps-environment entitlement.
"""
tests_zip = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
xctestrun = proto.Field(
proto.MESSAGE,
number=2,
message='FileReference',
)
xcode_version = proto.Field(
proto.STRING,
number=3,
)
app_bundle_id = proto.Field(
proto.STRING,
number=4,
)
test_special_entitlements = proto.Field(
proto.BOOL,
number=6,
)
class IosTestLoop(proto.Message):
r"""A test of an iOS application that implements one or more game
loop scenarios. This test type accepts an archived application
(.ipa file) and a list of integer scenarios that will be
executed on the app sequentially.
Attributes:
app_ipa (google.devtools.testing_v1.types.FileReference):
Required. The .ipa of the application to
test.
scenarios (Sequence[int]):
The list of scenarios that should be run
during the test. Defaults to the single scenario
0 if unspecified.
app_bundle_id (str):
Output only. The bundle id for the
application under test.
"""
app_ipa = proto.Field(
proto.MESSAGE,
number=1,
message='FileReference',
)
scenarios = proto.RepeatedField(
proto.INT32,
number=2,
)
app_bundle_id = proto.Field(
proto.STRING,
number=3,
)
class AndroidInstrumentationTest(proto.Message):
r"""A test of an Android application that can control an Android
component independently of its normal lifecycle. Android
instrumentation tests run an application APK and test APK inside the
same process on a virtual or physical AndroidDevice. They also
specify a test runner class, such as com.google.GoogleTestRunner,
which can vary on the specific instrumentation framework chosen.
See http://developer.android.com/tools/testing/testing_android.html
for more information on types of Android tests.
Attributes:
app_apk (google.devtools.testing_v1.types.FileReference):
The APK for the application under test.
app_bundle (google.devtools.testing_v1.types.AppBundle):
A multi-apk app bundle for the application
under test.
test_apk (google.devtools.testing_v1.types.FileReference):
Required. The APK containing the test code to
be executed.
app_package_id (str):
The java package for the application under
test. The default value is determined by
examining the application's manifest.
test_package_id (str):
The java package for the test to be executed.
The default value is determined by examining the
application's manifest.
test_runner_class (str):
The InstrumentationTestRunner class.
The default value is determined by examining the
application's manifest.
test_targets (Sequence[str]):
Each target must be fully qualified with the package name or
class name, in one of these formats:
- "package package_name"
- "class package_name.class_name"
- "class package_name.class_name#method_name"
If empty, all targets in the module will be run.
orchestrator_option (google.devtools.testing_v1.types.OrchestratorOption):
The option of whether running each test within its own
invocation of instrumentation with Android Test Orchestrator
or not. \*\* Orchestrator is only compatible with
AndroidJUnitRunner version 1.0 or higher! \*\* Orchestrator
offers the following benefits:
- No shared state
- Crashes are isolated
- Logs are scoped per test
See
https://developer.android.com/training/testing/junit-runner.html#using-android-test-orchestrator
for more information about Android Test Orchestrator.
If not set, the test will be run without the orchestrator.
sharding_option (google.devtools.testing_v1.types.ShardingOption):
The option to run tests in multiple shards in
parallel.
"""
app_apk = proto.Field(
proto.MESSAGE,
number=1,
oneof='app_under_test',
message='FileReference',
)
app_bundle = proto.Field(
proto.MESSAGE,
number=8,
oneof='app_under_test',
message='AppBundle',
)
test_apk = proto.Field(
proto.MESSAGE,
number=2,
message='FileReference',
)
app_package_id = proto.Field(
proto.STRING,
number=3,
)
test_package_id = proto.Field(
proto.STRING,
number=4,
)
test_runner_class = proto.Field(
proto.STRING,
number=5,
)
test_targets = proto.RepeatedField(
proto.STRING,
number=6,
)
orchestrator_option = proto.Field(
proto.ENUM,
number=7,
enum='OrchestratorOption',
)
sharding_option = proto.Field(
proto.MESSAGE,
number=9,
message='ShardingOption',
)
class AndroidRoboTest(proto.Message):
r"""A test of an android application that explores the
application on a virtual or physical Android Device, finding
culprits and crashes as it goes. Next tag: 30
Attributes:
app_apk (google.devtools.testing_v1.types.FileReference):
The APK for the application under test.
app_bundle (google.devtools.testing_v1.types.AppBundle):
A multi-apk app bundle for the application
under test.
app_package_id (str):
The java package for the application under
test. The default value is determined by
examining the application's manifest.
app_initial_activity (str):
The initial activity that should be used to
start the app.
max_depth (int):
The max depth of the traversal stack Robo can
explore. Needs to be at least 2 to make Robo
explore the app beyond the first activity.
Default is 50.
max_steps (int):
The max number of steps Robo can execute.
Default is no limit.
robo_directives (Sequence[google.devtools.testing_v1.types.RoboDirective]):
A set of directives Robo should apply during
the crawl. This allows users to customize the
crawl. For example, the username and password
for a test account can be provided.
robo_script (google.devtools.testing_v1.types.FileReference):
A JSON file with a sequence of actions Robo
should perform as a prologue for the crawl.
starting_intents (Sequence[google.devtools.testing_v1.types.RoboStartingIntent]):
The intents used to launch the app for the
crawl. If none are provided, then the main
launcher activity is launched. If some are
provided, then only those provided are launched
(the main launcher activity must be provided
explicitly).
"""
app_apk = proto.Field(
proto.MESSAGE,
number=1,
oneof='app_under_test',
message='FileReference',
)
app_bundle = proto.Field(
proto.MESSAGE,
number=16,
oneof='app_under_test',
message='AppBundle',
)
app_package_id = proto.Field(
proto.STRING,
number=2,
)
app_initial_activity = proto.Field(
proto.STRING,
number=3,
)
max_depth = proto.Field(
proto.INT32,
number=7,
)
max_steps = proto.Field(
proto.INT32,
number=8,
)
robo_directives = proto.RepeatedField(
proto.MESSAGE,
number=11,
message='RoboDirective',
)
robo_script = proto.Field(
proto.MESSAGE,
number=13,
message='FileReference',
)
starting_intents = proto.RepeatedField(
proto.MESSAGE,
number=15,
message='RoboStartingIntent',
)
class RoboDirective(proto.Message):
r"""Directs Robo to interact with a specific UI element if it is
encountered during the crawl. Currently, Robo can perform text
entry or element click.
Attributes:
resource_name (str):
Required. The android resource name of the
target UI element. For example,
in Java: R.string.foo
in xml: @string/foo
Only the "foo" part is needed.
Reference doc:
https://developer.android.com/guide/topics/resources/accessing-
resources.html
input_text (str):
The text that Robo is directed to set. If left empty, the
directive will be treated as a CLICK on the element matching
the resource_name.
action_type (google.devtools.testing_v1.types.RoboActionType):
Required. The type of action that Robo should
perform on the specified element.
"""
resource_name = proto.Field(
proto.STRING,
number=1,
)
input_text = proto.Field(
proto.STRING,
number=2,
)
action_type = proto.Field(
proto.ENUM,
number=3,
enum='RoboActionType',
)
class RoboStartingIntent(proto.Message):
r"""Message for specifying the start activities to crawl.
Attributes:
launcher_activity (google.devtools.testing_v1.types.LauncherActivityIntent):
An intent that starts the main launcher
activity.
start_activity (google.devtools.testing_v1.types.StartActivityIntent):
An intent that starts an activity with
specific details.
timeout (google.protobuf.duration_pb2.Duration):
Timeout in seconds for each intent.
"""
launcher_activity = proto.Field(
proto.MESSAGE,
number=1,
oneof='starting_intent',
message='LauncherActivityIntent',
)
start_activity = proto.Field(
proto.MESSAGE,
number=2,
oneof='starting_intent',
message='StartActivityIntent',
)
timeout = proto.Field(
proto.MESSAGE,
number=3,
message=duration_pb2.Duration,
)
class LauncherActivityIntent(proto.Message):
r"""Specifies an intent that starts the main launcher activity.
"""
class StartActivityIntent(proto.Message):
r"""A starting intent specified by an action, uri, and
categories.
Attributes:
action (str):
Action name. Required for START_ACTIVITY.
uri (str):
URI for the action.
categories (Sequence[str]):
Intent categories to set on the intent.
"""
action = proto.Field(
proto.STRING,
number=2,
)
uri = proto.Field(
proto.STRING,
number=3,
)
categories = proto.RepeatedField(
proto.STRING,
number=4,
)
class EnvironmentMatrix(proto.Message):
r"""The matrix of environments in which the test is to be
executed.
Attributes:
android_matrix (google.devtools.testing_v1.types.AndroidMatrix):
A matrix of Android devices.
android_device_list (google.devtools.testing_v1.types.AndroidDeviceList):
A list of Android devices; the test will be
run only on the specified devices.
ios_device_list (google.devtools.testing_v1.types.IosDeviceList):
A list of iOS devices.
"""
android_matrix = proto.Field(
proto.MESSAGE,
number=1,
oneof='environment_matrix',
message='AndroidMatrix',
)
android_device_list = proto.Field(
proto.MESSAGE,
number=2,
oneof='environment_matrix',
message='AndroidDeviceList',
)
ios_device_list = proto.Field(
proto.MESSAGE,
number=3,
oneof='environment_matrix',
message='IosDeviceList',
)
class AndroidDeviceList(proto.Message):
r"""A list of Android device configurations in which the test is
to be executed.
Attributes:
android_devices (Sequence[google.devtools.testing_v1.types.AndroidDevice]):
Required. A list of Android devices.
"""
android_devices = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='AndroidDevice',
)
class IosDeviceList(proto.Message):
r"""A list of iOS device configurations in which the test is to
be executed.
Attributes:
ios_devices (Sequence[google.devtools.testing_v1.types.IosDevice]):
Required. A list of iOS devices.
"""
ios_devices = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='IosDevice',
)
class AndroidMatrix(proto.Message):
r"""A set of Android device configuration permutations is defined
by the the cross-product of the given axes. Internally, the
given AndroidMatrix will be expanded into a set of
AndroidDevices.
Only supported permutations will be instantiated. Invalid
permutations (e.g., incompatible models/versions) are ignored.
Attributes:
android_model_ids (Sequence[str]):
Required. The ids of the set of Android
device to be used. Use the
TestEnvironmentDiscoveryService to get supported
options.
android_version_ids (Sequence[str]):
Required. The ids of the set of Android OS
version to be used. Use the
TestEnvironmentDiscoveryService to get supported
options.
locales (Sequence[str]):
Required. The set of locales the test device
will enable for testing. Use the
TestEnvironmentDiscoveryService to get supported
options.
orientations (Sequence[str]):
Required. The set of orientations to test
with. Use the TestEnvironmentDiscoveryService to
get supported options.
"""
android_model_ids = proto.RepeatedField(
proto.STRING,
number=1,
)
android_version_ids = proto.RepeatedField(
proto.STRING,
number=2,
)
locales = proto.RepeatedField(
proto.STRING,
number=3,
)
orientations = proto.RepeatedField(
proto.STRING,
number=4,
)
class ClientInfo(proto.Message):
r"""Information about the client which invoked the test.
Attributes:
name (str):
Required. Client name, such as gcloud.
client_info_details (Sequence[google.devtools.testing_v1.types.ClientInfoDetail]):
The list of detailed information about
client.
"""
name = proto.Field(
proto.STRING,
number=1,
)
client_info_details = proto.RepeatedField(
proto.MESSAGE,
number=2,
message='ClientInfoDetail',
)
class ClientInfoDetail(proto.Message):
r"""Key-value pair of detailed information about the client which
invoked the test. Examples: {'Version', '1.0'}, {'Release
Track', 'BETA'}.
Attributes:
key (str):
Required. The key of detailed client
information.
value (str):
Required. The value of detailed client
information.
"""
key = proto.Field(
proto.STRING,
number=1,
)
value = proto.Field(
proto.STRING,
number=2,
)
class ResultStorage(proto.Message):
r"""Locations where the results of running the test are stored.
Attributes:
google_cloud_storage (google.devtools.testing_v1.types.GoogleCloudStorage):
Required.
tool_results_history (google.devtools.testing_v1.types.ToolResultsHistory):
The tool results history that contains the
tool results execution that results are written
to.
If not provided, the service will choose an
appropriate value.
tool_results_execution (google.devtools.testing_v1.types.ToolResultsExecution):
Output only. The tool results execution that
results are written to.
results_url (str):
Output only. URL to the results in the
Firebase Web Console.
"""
google_cloud_storage = proto.Field(
proto.MESSAGE,
number=1,
message='GoogleCloudStorage',
)
tool_results_history = proto.Field(
proto.MESSAGE,
number=5,
message='ToolResultsHistory',
)
tool_results_execution = proto.Field(
proto.MESSAGE,
number=6,
message='ToolResultsExecution',
)
results_url = proto.Field(
proto.STRING,
number=7,
)
class ToolResultsHistory(proto.Message):
r"""Represents a tool results history resource.
Attributes:
project_id (str):
Required. The cloud project that owns the
tool results history.
history_id (str):
Required. A tool results history ID.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
history_id = proto.Field(
proto.STRING,
number=2,
)
class ToolResultsExecution(proto.Message):
r"""Represents a tool results execution resource.
This has the results of a TestMatrix.
Attributes:
project_id (str):
Output only. The cloud project that owns the
tool results execution.
history_id (str):
Output only. A tool results history ID.
execution_id (str):
Output only. A tool results execution ID.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
history_id = proto.Field(
proto.STRING,
number=2,
)
execution_id = proto.Field(
proto.STRING,
number=3,
)
class ToolResultsStep(proto.Message):
r"""Represents a tool results step resource.
This has the results of a TestExecution.
Attributes:
project_id (str):
Output only. The cloud project that owns the
tool results step.
history_id (str):
Output only. A tool results history ID.
execution_id (str):
Output only. A tool results execution ID.
step_id (str):
Output only. A tool results step ID.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
history_id = proto.Field(
proto.STRING,
number=2,
)
execution_id = proto.Field(
proto.STRING,
number=3,
)
step_id = proto.Field(
proto.STRING,
number=4,
)
class GoogleCloudStorage(proto.Message):
r"""A storage location within Google cloud storage (GCS).
Attributes:
gcs_path (str):
Required. The path to a directory in GCS that
will eventually contain the results for this
test. The requesting user must have write access
on the bucket in the supplied path.
"""
gcs_path = proto.Field(
proto.STRING,
number=1,
)
class FileReference(proto.Message):
r"""A reference to a file, used for user inputs.
Attributes:
gcs_path (str):
A path to a file in Google Cloud Storage.
Example: gs://build-
app-1414623860166/app%40debug-unaligned.apk
These paths are expected to be url encoded
(percent encoding)
"""
gcs_path = proto.Field(
proto.STRING,
number=1,
oneof='file',
)
class Environment(proto.Message):
r"""The environment in which the test is run.
Attributes:
android_device (google.devtools.testing_v1.types.AndroidDevice):
An Android device which must be used with an
Android test.
ios_device (google.devtools.testing_v1.types.IosDevice):
An iOS device which must be used with an iOS
test.
"""
android_device = proto.Field(
proto.MESSAGE,
number=1,
oneof='environment',
message='AndroidDevice',
)
ios_device = proto.Field(
proto.MESSAGE,
number=2,
oneof='environment',
message='IosDevice',
)
class AndroidDevice(proto.Message):
r"""A single Android device.
Attributes:
android_model_id (str):
Required. The id of the Android device to be
used. Use the TestEnvironmentDiscoveryService to
get supported options.
android_version_id (str):
Required. The id of the Android OS version to
be used. Use the TestEnvironmentDiscoveryService
to get supported options.
locale (str):
Required. The locale the test device used for
testing. Use the TestEnvironmentDiscoveryService
to get supported options.
orientation (str):
Required. How the device is oriented during
the test. Use the
TestEnvironmentDiscoveryService to get supported
options.
"""
android_model_id = proto.Field(
proto.STRING,
number=1,
)
android_version_id = proto.Field(
proto.STRING,
number=2,
)
locale = proto.Field(
proto.STRING,
number=3,
)
orientation = proto.Field(
proto.STRING,
number=4,
)
class IosDevice(proto.Message):
r"""A single iOS device.
Attributes:
ios_model_id (str):
Required. The id of the iOS device to be
used. Use the TestEnvironmentDiscoveryService to
get supported options.
ios_version_id (str):
Required. The id of the iOS major software
version to be used. Use the
TestEnvironmentDiscoveryService to get supported
options.
locale (str):
Required. The locale the test device used for
testing. Use the TestEnvironmentDiscoveryService
to get supported options.
orientation (str):
Required. How the device is oriented during
the test. Use the
TestEnvironmentDiscoveryService to get supported
options.
"""
ios_model_id = proto.Field(
proto.STRING,
number=1,
)
ios_version_id = proto.Field(
proto.STRING,
number=2,
)
locale = proto.Field(
proto.STRING,
number=3,
)
orientation = proto.Field(
proto.STRING,
number=4,
)
class TestDetails(proto.Message):
r"""Additional details about the progress of the running test.
Attributes:
progress_messages (Sequence[str]):
Output only. Human-readable, detailed descriptions of the
test's progress. For example: "Provisioning a device",
"Starting Test".
During the course of execution new data may be appended to
the end of progress_messages.
error_message (str):
Output only. If the TestState is ERROR, then
this string will contain human-readable details
about the error.
"""
progress_messages = proto.RepeatedField(
proto.STRING,
number=3,
)
error_message = proto.Field(
proto.STRING,
number=4,
)
class InvalidRequestDetail(proto.Message):
r"""Details behind an invalid request.
Attributes:
reason (google.devtools.testing_v1.types.InvalidRequestDetail.Reason):
The reason behind the error.
"""
class Reason(proto.Enum):
r"""Possible invalid request reasons."""
REASON_UNSPECIFIED = 0
REQUEST_INVALID = 1
RESOURCE_TOO_BIG = 2
RESOURCE_NOT_FOUND = 3
UNSUPPORTED = 4
NOT_IMPLEMENTED = 5
reason = proto.Field(
proto.ENUM,
number=1,
enum=Reason,
)
class ShardingOption(proto.Message):
r"""Options for enabling sharding.
Attributes:
uniform_sharding (google.devtools.testing_v1.types.UniformSharding):
Uniformly shards test cases given a total
number of shards.
manual_sharding (google.devtools.testing_v1.types.ManualSharding):
Shards test cases into the specified groups
of packages, classes, and/or methods.
"""
uniform_sharding = proto.Field(
proto.MESSAGE,
number=1,
oneof='option',
message='UniformSharding',
)
manual_sharding = proto.Field(
proto.MESSAGE,
number=2,
oneof='option',
message='ManualSharding',
)
class UniformSharding(proto.Message):
r"""Uniformly shards test cases given a total number of shards.
For Instrumentation test, it will be translated to "-e numShard" "-e
shardIndex" AndroidJUnitRunner arguments. With uniform sharding
enabled, specifying these sharding arguments via
environment_variables is invalid.
Attributes:
num_shards (int):
Required. Total number of shards. When any
physical devices are selected, the number must
be >= 1 and <= 50. When no physical devices are
selected, the number must be >= 1 and <= 500.
"""
num_shards = proto.Field(
proto.INT32,
number=1,
)
class ManualSharding(proto.Message):
r"""Shards test cases into the specified groups of packages, classes,
and/or methods.
With manual sharding enabled, specifying test targets via
environment_variables or in InstrumentationTest is invalid.
Attributes:
test_targets_for_shard (Sequence[google.devtools.testing_v1.types.TestTargetsForShard]):
Required. Group of packages, classes, and/or test methods to
be run for each shard. When any physical devices are
selected, the number of test_targets_for_shard must be >= 1
and <= 50. When no physical devices are selected, the number
must be >= 1 and <= 500.
"""
test_targets_for_shard = proto.RepeatedField(
proto.MESSAGE,
number=1,
message='TestTargetsForShard',
)
class TestTargetsForShard(proto.Message):
r"""Test targets for a shard.
Attributes:
test_targets (Sequence[str]):
Group of packages, classes, and/or test methods to be run
for each shard. The targets need to be specified in
AndroidJUnitRunner argument format. For example, "package
com.my.packages" "class com.my.package.MyClass".
The number of shard_test_targets must be greater than 0.
"""
test_targets = proto.RepeatedField(
proto.STRING,
number=1,
)
class Shard(proto.Message):
r"""Output only. Details about the shard.
Attributes:
shard_index (int):
Output only. The index of the shard among all
the shards.
num_shards (int):
Output only. The total number of shards.
test_targets_for_shard (google.devtools.testing_v1.types.TestTargetsForShard):
Output only. Test targets for each shard.
"""
shard_index = proto.Field(
proto.INT32,
number=1,
)
num_shards = proto.Field(
proto.INT32,
number=2,
)
test_targets_for_shard = proto.Field(
proto.MESSAGE,
number=3,
message='TestTargetsForShard',
)
class CreateTestMatrixRequest(proto.Message):
r"""Request to submit a matrix of tests for execution.
Attributes:
project_id (str):
The GCE project under which this job will
run.
test_matrix (google.devtools.testing_v1.types.TestMatrix):
The matrix of tests that the user wants to
run.
request_id (str):
A string id used to detect duplicated
requests. Ids are automatically scoped to a
project, so users should ensure the ID is unique
per-project. A UUID is recommended.
Optional, but strongly recommended.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
test_matrix = proto.Field(
proto.MESSAGE,
number=2,
message='TestMatrix',
)
request_id = proto.Field(
proto.STRING,
number=3,
)
class GetTestMatrixRequest(proto.Message):
r"""Request to get the Test Matrix with the given id.
Attributes:
project_id (str):
Cloud project that owns the test matrix.
test_matrix_id (str):
Unique test matrix id which was assigned by
the service.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
test_matrix_id = proto.Field(
proto.STRING,
number=2,
)
class CancelTestMatrixRequest(proto.Message):
r"""Request to stop running all of the tests in the specified
matrix.
Attributes:
project_id (str):
Cloud project that owns the test.
test_matrix_id (str):
Test matrix that will be canceled.
"""
project_id = proto.Field(
proto.STRING,
number=1,
)
test_matrix_id = proto.Field(
proto.STRING,
number=2,
)
class CancelTestMatrixResponse(proto.Message):
r"""Response containing the current state of the specified test
matrix.
Attributes:
test_state (google.devtools.testing_v1.types.TestState):
The current rolled-up state of the test
matrix. If this state is already final, then the
cancelation request will have no effect.
"""
test_state = proto.Field(
proto.ENUM,
number=1,
enum='TestState',
)
__all__ = tuple(sorted(__protobuf__.manifest))
| 30.029472
| 108
| 0.619953
| 6,682
| 59,098
| 5.396289
| 0.125561
| 0.036885
| 0.055328
| 0.047202
| 0.467663
| 0.422347
| 0.345333
| 0.258472
| 0.208248
| 0.186782
| 0
| 0.010715
| 0.308301
| 59,098
| 1,967
| 109
| 30.044738
| 0.871373
| 0.571661
| 0
| 0.435451
| 0
| 0
| 0.093091
| 0.008727
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.003074
| 0
| 0.281762
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb3c4ae70f222dd8a499b8678c9508db3922f5b5
| 1,457
|
py
|
Python
|
CONTENT/Resources/guides/__UNSORTED/244_shortest_word_distance_ii/shortest.py
|
impastasyndrome/DS-ALGO-OFFICIAL
|
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
|
[
"Apache-2.0"
] | 13
|
2021-03-11T00:25:22.000Z
|
2022-03-19T00:19:23.000Z
|
CONTENT/Resources/guides/__UNSORTED/244_shortest_word_distance_ii/shortest.py
|
impastasyndrome/DS-ALGO-OFFICIAL
|
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
|
[
"Apache-2.0"
] | 162
|
2021-03-09T01:52:11.000Z
|
2022-03-12T01:09:07.000Z
|
CONTENT/Resources/guides/__UNSORTED/244_shortest_word_distance_ii/shortest.py
|
impastasyndrome/DS-ALGO-OFFICIAL
|
c85ec9cf0af0009f038b7a571a7ac1fb466b7f3a
|
[
"Apache-2.0"
] | 12
|
2021-04-26T19:43:01.000Z
|
2022-01-31T08:36:29.000Z
|
from collections import defaultdict
class WordDistance(object):
def __init__(self, words):
"""
initialize your data structure here.
:type words: List[str]
"""
self.indice = defaultdict(list)
self.memo = {}
self.MAXLEN = len(words)
for i, word in enumerate(words):
self.indice[word].append(i)
def shortest(self, word1, word2):
"""
Adds a word into the data structure.
:type word1: str
:type word2: str
:rtype: int
"""
if (word1, word2) in self.memo:
return self.memo[(word1, word2)]
l1, l2 = self.indice[word1], self.indice[word2]
idx1, idx2 = 0, 0
min_distance = self.MAXLEN
while True:
if idx1 >= len(l1) or idx2 >= len(l2):
break
if l1[idx1] < l2[idx2]:
if l2[idx2] - l1[idx1] < min_distance:
min_distance = l2[idx2] - l1[idx1]
idx1 += 1
else:
if l1[idx1] - l2[idx2] < min_distance:
min_distance = l1[idx1] - l2[idx2]
idx2 += 1
self.memo[(word1, word2)] = min_distance
return min_distance
# Your WordDistance object will be instantiated and called as such:
# wordDistance = WordDistance(words)
# wordDistance.shortest("word1", "word2")
# wordDistance.shortest("anotherWord1", "anotherWord2")
| 29.734694
| 67
| 0.539465
| 165
| 1,457
| 4.69697
| 0.375758
| 0.099355
| 0.030968
| 0.046452
| 0.036129
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052798
| 0.350034
| 1,457
| 48
| 68
| 30.354167
| 0.765576
| 0.231984
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.074074
| false
| 0
| 0.037037
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb41c51ce9970b54d5b685bba4f5e3319c3b6398
| 33,225
|
py
|
Python
|
Developer-Essentials-Capstone/Python/Includes/Capstone-Setup.py
|
databricks-academy/developer-essentials-capstone
|
77e70b1eb5b49b5f6779495fac7d14f5fadded9d
|
[
"CC0-1.0"
] | 1
|
2022-02-08T03:56:32.000Z
|
2022-02-08T03:56:32.000Z
|
Developer-Essentials-Capstone/Python/Includes/Capstone-Setup.py
|
databricks-academy/developer-essentials-capstone
|
77e70b1eb5b49b5f6779495fac7d14f5fadded9d
|
[
"CC0-1.0"
] | null | null | null |
Developer-Essentials-Capstone/Python/Includes/Capstone-Setup.py
|
databricks-academy/developer-essentials-capstone
|
77e70b1eb5b49b5f6779495fac7d14f5fadded9d
|
[
"CC0-1.0"
] | 4
|
2022-01-01T09:41:31.000Z
|
2022-02-17T09:48:05.000Z
|
# Databricks notebook source
import builtins as BI
# Setup the capstone
import re, uuid
from pyspark.sql.types import StructType, StringType, IntegerType, TimestampType, DoubleType
from pyspark.sql.functions import col, to_date, weekofyear
from pyspark.sql import DataFrame
static_tests = None
bronze_tests = None
silver_tests = None
gold_tests = None
registration_id = None
final_passed = False
course_name = "Core Partner Enablement"
username = spark.sql("SELECT current_user()").first()[0]
clean_username = re.sub("[^a-zA-Z0-9]", "_", username)
user_db = f"dbacademy_{clean_username}_dev_ess_cap"
working_dir = f"dbfs:/user/{username}/dbacademy/dev-ess-cap"
outputPathBronzeTest = f"{working_dir}/bronze_test"
outputPathSilverTest = f"{working_dir}/silver_test"
outputPathGoldTest = f"{working_dir}/gold_test"
source_path = f"wasbs://courseware@dbacademy.blob.core.windows.net/developer-essentials-capstone/v01"
eventSchema = ( StructType()
.add('eventName', StringType())
.add('eventParams', StructType()
.add('game_keyword', StringType())
.add('app_name', StringType())
.add('scoreAdjustment', IntegerType())
.add('platform', StringType())
.add('app_version', StringType())
.add('device_id', StringType())
.add('client_event_time', TimestampType())
.add('amount', DoubleType())
)
)
class Key:
singleStreamDF = (spark
.readStream
.schema(eventSchema)
.option('streamName','mobilestreaming_test')
.option("maxFilesPerTrigger", 1)
.json(f"{source_path}/solutions/single")
)
bronzeDF = spark.read.format("delta").load(f"{source_path}/solutions/bronze")
correctLookupDF = spark.read.format("delta").load(f"{source_path}/solutions/lookup")
silverDF = spark.read.format("delta").load(f"{source_path}/solutions/silver")
goldDF = spark.read.format("delta").load(f"{source_path}/solutions/gold")
print(f"Declared the following variables:")
print(f" * user_db: {user_db}")
print(f" * working_dir: {working_dir}")
print()
print(f"Declared the following function:")
print(f" * realityCheckBronze(..)")
print(f" * realityCheckStatic(..)")
print(f" * realityCheckSilver(..)")
print(f" * realityCheckGold(..)")
print(f" * realityCheckFinal()")
# COMMAND ----------
def path_exists(path):
try:
return len(dbutils.fs.ls(path)) >= 0
except Exception:
return False
def install_exercise_datasets(reinstall):
global registration_id
min_time = "1 minute"
max_time = "5 minutes"
existing = path_exists(f"{working_dir}/lookup_data") and path_exists(f"{working_dir}/event_source")
if not reinstall and existing:
print(f"Skipping install of existing datasets to\n{working_dir}/lookup_data and\n{working_dir}/event_source")
registration_id = spark.read.json(f"{working_dir}/_meta/config.json").first()["registration_id"]
return
# Remove old versions of the previously installed datasets
if existing:
print(f"Removing previously installed datasets from\n{working_dir}/lookup_data and\n{working_dir}/event_source\n")
dbutils.fs.rm(f"{working_dir}/lookup_data", True)
dbutils.fs.rm(f"{source_path}/event_source", True)
print(f"""Installing the datasets to\n{working_dir}/lookup_data\n{working_dir}/event_source""")
print(f"""\nNOTE: The datasets that we are installing are located in Washington, USA - depending on the
region that your workspace is in, this operation can take as little as {min_time} and
upwards to {max_time}, but this is a one-time operation.""")
dbutils.fs.cp(f"{source_path}/lookup_data", f"{working_dir}/lookup_data", True)
dbutils.fs.cp(f"{source_path}/event_source/part-00000-tid-6718866119967790308-cef1b03e-5fda-4259-885e-e992ca3996c3-25700-c000.json",
f"{working_dir}/event_source/file-0.json")
dbutils.fs.cp(f"{source_path}/event_source/part-00001-tid-6718866119967790308-cef1b03e-5fda-4259-885e-e992ca3996c3-25701-c000.json",
f"{working_dir}/event_source/file-1.json")
dbutils.fs.cp(f"{source_path}/event_source/part-00002-tid-6718866119967790308-cef1b03e-5fda-4259-885e-e992ca3996c3-25702-c000.json",
f"{working_dir}/event_source/file-2.json")
registration_id = str(uuid.uuid4()).replace("-","")
payload = f"""\u007b"registration_id": "{registration_id}"\u007d\n"""
dbutils.fs.put(f"{working_dir}/_meta/config.json", payload, overwrite=True)
print(f"""\nThe install of the datasets completed successfully.""")
try: reinstall = dbutils.widgets.get("reinstall").lower() == "true"
except: reinstall = False
install_exercise_datasets(reinstall)
print(f"\nYour Registration ID is {registration_id}")
# COMMAND ----------
# Setup Bronze
from pyspark.sql import DataFrame
import time
def realityCheckBronze(writeToBronze):
global bronze_tests
bronze_tests = TestSuite()
dbutils.fs.rm(outputPathBronzeTest, True)
dbutils.fs.rm(f"{outputPathBronzeTest}_checkpoint", True)
try:
writeToBronze(Key.singleStreamDF, outputPathBronzeTest, "bronze_test")
def groupAndCount(df: DataFrame):
return df.select('eventName').groupBy('eventName').count()
for s in spark.streams.active:
if s.name == "bronze_test":
first = True
while (len(s.recentProgress) == 0):
if first:
print("waiting for stream to start...")
first = False
time.sleep(5)
try:
testDF = (spark
.read
.format("delta")
.load(outputPathBronzeTest))
except Exception as e:
print(e)
testDF = (spark
.read
.load(outputPathBronzeTest))
test_dtype = findColumnDatatype(testDF, 'eventDate')
historyDF = spark.sql("DESCRIBE HISTORY delta.`{}`".format(outputPathBronzeTest))
bronze_tests.test(id = "rc_bronze_delta_format", points = 2, description = "Is in Delta format",
testFunction = lambda: isDelta(outputPathBronzeTest))
bronze_tests.test(id = "rc_bronze_contains_columns", points = 2, description = "Dataframe contains eventDate column",
testFunction = lambda: verifyColumnsExists(testDF, ['eventDate']))
bronze_tests.test(id = "rc_bronze_correct_schema", points = 2, description = "Returns correct schema",
testFunction = lambda: checkSchema(testDF.schema, Key.bronzeDF.schema))
bronze_tests.test(id = "rc_bronze_column_check", points = 2, description = "eventDate column is correct data type",
testFunction = lambda: test_dtype == "date")
bronze_tests.test(id = "rc_bronze_null_check", points = 2, description = "Does not contain nulls",
testFunction = lambda: checkForNulls(testDF, 'eventParams'))
bronze_tests.test(id = "rc_bronze_is_streaming", points = 2, description = "Is streaming DataFrame",
testFunction = lambda: isStreamingDataframe(historyDF))
bronze_tests.test(id = "rc_bronze_output_mode", points = 2, description = "Output mode is Append",
testFunction = lambda: checkOutputMode(historyDF, "Append"))
bronze_tests.test(id = "rc_bronze_correct_rows", points = 2, description = "Returns a Dataframe with the correct number of rows",
testFunction = lambda: testDF.count() == Key.bronzeDF.count())
bronze_tests.test(id = "rc_bronze_correct_df", points = 2, description = "Returns the correct Dataframe",
testFunction = lambda: compareDataFrames(groupAndCount(testDF), groupAndCount(Key.bronzeDF)))
daLogger.logTestSuite("Bronze Reality Check", registration_id, bronze_tests)
bronze_tests.displayResults()
finally:
for s in spark.streams.active:
if s.name == 'bronze_test':
try:
s.stop()
except Exception as e:
print('!!', e)
None
# COMMAND ----------
# Setup Static
def realityCheckStatic(loadStaticData):
global static_tests
static_tests = TestSuite()
testDF = loadStaticData(f"{source_path}/solutions/lookup")
static_tests.test(id = "rc_static_count", points = 2, description = "Has the correct number of rows",
testFunction = lambda: testDF.count() == 475)
static_tests.test(id = "rc_static_schema", points = 2, description = "Returns correct schema",
testFunction = lambda: checkSchema(testDF.schema, Key.correctLookupDF.schema))
daLogger.logTestSuite("Static Reality Check", registration_id, static_tests)
static_tests.displayResults()
None
# COMMAND ----------
# Setup Silver
def realityCheckSilver(bronzeToSilver):
global silver_tests
silver_tests = TestSuite()
dbutils.fs.rm(outputPathSilverTest, True)
dbutils.fs.rm(f"{outputPathSilverTest}_checkpoint", True)
try:
bronzeToSilver(outputPathBronzeTest, outputPathSilverTest, "silver_test", Key.correctLookupDF)
def groupAndCount(df: DataFrame):
try:
return df.select('deviceType').groupBy('deviceType').count()
except:
print("deviceType not found")
for s in spark.streams.active:
first = True
while (len(s.recentProgress) == 0):
if first:
print("waiting for stream to start...")
first = False
time.sleep(5)
try:
testDF = (spark
.read
.format("delta")
.load(outputPathSilverTest))
except Exception as e:
testDF = (spark
.read
.load(outputPathSilverTest))
historyDF = spark.sql("DESCRIBE HISTORY delta.`{}`".format(outputPathSilverTest))
silver_tests.test(id = "rc_silver_delta_format", points = 2, description = "Is in Delta format",
testFunction = lambda: isDelta(outputPathSilverTest))
silver_tests.test(id = "rc_silver_contains_columns", points = 2, description = "Dataframe contains device_id, client_event_time, deviceType columns",
testFunction = lambda: verifyColumnsExists(testDF, ["device_id", "client_event_time", "deviceType"]))
silver_tests.test(id = "rc_silver_correct_schema", points = 2, description = "Returns correct schema",
testFunction = lambda: checkSchema(testDF.schema, Key.silverDF.schema))
silver_tests.test(id = "rc_silver_null_check", points = 2, description = "Does not contain nulls",
testFunction = lambda: checkForNulls(testDF, "eventName"))
silver_tests.test(id = "rc_silver_is_streaming", points = 2, description = "Is streaming DataFrame",
testFunction = lambda: isStreamingDataframe(historyDF))
silver_tests.test(id = "rc_silver_output_mode", points = 2, description = "Output mode is Append",
testFunction = lambda: checkOutputMode(historyDF, "Append"))
silver_tests.test(id = "rc_silver_correct_rows", points = 2, description = "Returns a Dataframe with the correct number of rows",
testFunction = lambda: testDF.count() == Key.silverDF.count())
silver_tests.test(id = "rc_silver_correct_df", points = 2, description = "Returns the correct Dataframe",
testFunction = lambda: compareDataFrames(groupAndCount(testDF), groupAndCount(Key.silverDF)))
daLogger.logTestSuite("Silver Reality Check", registration_id, silver_tests)
silver_tests.displayResults()
finally:
for s in spark.streams.active:
if s.name == 'silver_test':
s.stop()
None
# COMMAND ----------
# Setup Gold
def realityCheckGold(silverToGold):
global gold_tests
gold_tests = TestSuite()
dbutils.fs.rm(outputPathGoldTest, True)
dbutils.fs.rm(f"{outputPathGoldTest}_checkpoint", True)
try:
silverToGold(outputPathSilverTest, outputPathGoldTest, "gold_test")
for s in spark.streams.active:
first = True
while (len(s.recentProgress) == 0):
if first:
print("waiting for stream to start...")
first = False
time.sleep(5)
try:
testDF = (spark
.read
.format("delta")
.load(outputPathGoldTest))
except Exception as e:
testDF = (spark
.read
.load(outputPathGoldTest))
historyDF = spark.sql("DESCRIBE HISTORY delta.`{}`".format(outputPathGoldTest))
gold_tests.test(id = "rc_gold_delta_format", points = 2, description = "Is in Delta format",
testFunction = lambda: isDelta(outputPathGoldTest))
gold_tests.test(id = "rc_gold_contains_columns", points = 2, description = "Dataframe contains week and WAU columns",
testFunction = lambda: verifyColumnsExists(testDF, ["week", "WAU"]))
gold_tests.test(id = "rc_gold_correct_schema", points = 2, description = "Returns correct schema",
testFunction = lambda: checkSchema(testDF.schema, Key.goldDF.schema))
gold_tests.test(id = "rc_gold_null_check", points = 2, description = "Does not contain nulls",
testFunction = lambda: checkForNulls(testDF, "eventName"))
gold_tests.test(id = "rc_gold_is_streaming", points = 2, description = "Is streaming DataFrame",
testFunction = lambda: isStreamingDataframe(historyDF))
gold_tests.test(id = "rc_gold_output_mode", points = 2, description = "Output mode is Complete",
testFunction = lambda: checkOutputMode(historyDF, "Complete"))
gold_tests.test(id = "rc_gold_correct_rows", points = 2, description = "Returns a Dataframe with the correct number of rows",
testFunction = lambda: testDF.count() == Key.goldDF.count())
gold_tests.test(id = "rc_gold_correct_df", points = 2, description = "Returns the correct Dataframe",
testFunction = lambda: compareDataFrames(testDF.sort("week"), Key.goldDF.sort("week")))
daLogger.logTestSuite("Gold Reality Check", registration_id, gold_tests)
gold_tests.displayResults()
finally:
for s in spark.streams.active:
if s.name == 'gold_test':
s.stop()
None
# COMMAND ----------
html_passed = f"""
<html>
<body>
<h2>Congratulations! You're all done!</h2>
While the preliminary evaluation of your project indicates that you have passed, we have a few more validation steps to run on the back-end:<br/>
<ul style="margin:0">
<li> Code & statistical analysis of your capstone project</li>
<li> Correlation of your account in our LMS via your email address, <b>{username}</b></li>
<li> Final preparation of your badge
</ul>
<p>Assuming there are no issues with our last few steps, you will receive your <b>Databricks Developer Essentials Badge</b> within 2 weeks. Notification will be made by email to <b>{username}</b> regarding the availability of your digital badge via <b>Accredible</b>.
Should we have any issues, such as not finding your email address in our LMS, we will do our best to resolve the issue using the email address provided here.
</p>
<p>Your digital badge will be available in a secure, verifiable, and digital format that you can easily retrieve via <b>Accredible</b>. You can then share your achievement via any number of different social media platforms.</p>
<p>If you have questions about the status of your badge after the initial two-week window, or if the email address listed above is incorrect, please <a href="https://help.databricks.com/s/contact-us?ReqType=training" target="_blank">submit a ticket</a> with the subject "Core Capstone" and your Registration ID (<b>{registration_id}</b>) in the message body. Please allow us 3-5 business days to respond.</p>
One final note: In order to comply with <a href="https://oag.ca.gov/privacy/ccpa" target="_blank">CCPA</a> and <a href="https://gdpr.eu/" target="_blank">GDPR</a>, which regulate the collection of your personal information, the status of this capstone and its correlation to your email address will be deleted within 30 days of its submission.
</body>
</html>
"""
html_failed = f"""
<html>
<body>
<h2>Almost There!</h2>
<p>Our preliminary evaluation of your project indicates that you have not passed.</p>
<p>In order for your project to be submitted <b>all</b> reality checks must pass.</p>
<p>In some cases this problem can be resolved by simply clearning the notebook's state (<b>Clear State & Results</b>) and then selecting <b>Run All</b> from the toolbar above.</p>
<p>If your project continues to fail validation, please review each step above to ensure that you are have properly addressed all the corresponding requirements.</p>
</body>
</html>
"""
# Setup Final
def realityCheckFinal():
global final_passed
suite = TestSuite()
suite.testEquals(f"final.static-passed", "Reality Check Bronze passed", static_tests.passed, True)
suite.testEquals(f"final.bronze-passed", "Reality Check Static passed", bronze_tests.passed, True)
suite.testEquals(f"final.silver-passed", "Reality Check Silver passed", silver_tests.passed, True)
suite.testEquals(f"final.final-passed", "Reality Check Gold passed", gold_tests.passed, True)
final_passed = suite.passed
daLogger.logTestSuite("Final Reality Check", registration_id, suite)
daLogger.logAggregation("Capstone", registration_id, TestResultsAggregator)
suite.displayResults()
if final_passed and TestResultsAggregator.passed:
displayHTML(html_passed)
daLogger.logCompletion(registration_id, username)
else:
displayHTML(html_failed)
None
# COMMAND ----------
class CapstoneLogger:
def logTestResult(self, event_id, registration_id, result):
self.logEvent(event_id = event_id,
registration_id = registration_id,
description = result.test.description,
passed = result.passed,
points = result.points,
max_points = result.test.points)
def logTestSuite(self, event_id, registration_id, suite):
self.logEvent(event_id = event_id,
registration_id = registration_id,
description = None,
passed = suite.passed,
points = suite.score,
max_points = suite.maxScore)
def logAggregation(self, event_id, registration_id, aggregate):
self.logEvent(event_id = event_id,
registration_id = registration_id,
description = None,
passed = aggregate.passed,
points = aggregate.score,
max_points = aggregate.maxScore)
def logCompletion(self, registration_id:str, email_address:str):
import time, json, requests
try:
content = {
"registration_id": registration_id,
"email_address": email_address,
}
try:
response = requests.put(
url="https://rqbr3jqop0.execute-api.us-west-2.amazonaws.com/prod/capstone/completed",
json=content,
headers={
"Accept": "application/json; charset=utf-8",
"Content-Type": "application/json; charset=utf-8"
})
assert response.status_code == 200, f"Expected HTTP response code 200, found {response.status_code}"
except requests.exceptions.RequestException as e:
raise Exception("Exception sending message") from e
except Exception as e:
raise Exception("Exception constructing message") from e
def logEvent(self, event_id:str, registration_id:str, description:str, passed:str, points:int, max_points:int):
import time, json, requests
try:
content = {
"module_name": "essentials-capstone-v2",
"lesson_name": "Capstone",
"language": "python",
"event_id": event_id,
"event_time": f"{BI.int(BI.round((time.time() * 1000)))}",
"registration_id": registration_id,
"description": description,
"passed": passed,
"points": points,
"max_points": max_points,
}
try:
response = requests.post(
url="https://rqbr3jqop0.execute-api.us-west-2.amazonaws.com/prod/capstone/status",
json=content,
headers={
"Accept": "application/json; charset=utf-8",
"Content-Type": "application/json; charset=utf-8"
})
assert response.status_code == 200, f"Expected HTTP response code 200, found {response.status_code}"
except requests.exceptions.RequestException as e:
raise Exception("Exception sending message") from e
except Exception as e:
raise Exception("Exception constructing message") from e
daLogger = CapstoneLogger()
None
# COMMAND ----------
# These imports are OK to provide for students
import pyspark
from typing import Callable, Any, Iterable, List, Set, Tuple
import uuid
#############################################
# Test Suite classes
#############################################
# Test case
class TestCase(object):
__slots__=('description', 'testFunction', 'id', 'uniqueId', 'dependsOn', 'escapeHTML', 'points')
def __init__(self,
description:str,
testFunction:Callable[[], Any],
id:str=None,
dependsOn:Iterable[str]=[],
escapeHTML:bool=False,
points:int=1):
self.description=description
self.testFunction=testFunction
self.id=id
self.dependsOn=dependsOn
self.escapeHTML=escapeHTML
self.points=points
# Test result
class TestResult(object):
__slots__ = ('test', 'skipped', 'debug', 'passed', 'status', 'points', 'exception', 'message')
def __init__(self, test, skipped = False, debug = False):
try:
self.test = test
self.skipped = skipped
self.debug = debug
if skipped:
self.status = 'skipped'
self.passed = False
self.points = 0
else:
assert test.testFunction() != False, "Test returned false"
self.status = "passed"
self.passed = True
self.points = self.test.points
self.exception = None
self.message = ""
except Exception as e:
self.status = "failed"
self.passed = False
self.points = 0
self.exception = e
self.message = repr(self.exception)
if (debug and not isinstance(e, AssertionError)):
raise e
# Decorator to lazy evaluate - used by TestSuite
def lazy_property(fn):
'''Decorator that makes a property lazy-evaluated.
'''
attr_name = '_lazy_' + fn.__name__
@property
def _lazy_property(self):
if not hasattr(self, attr_name):
setattr(self, attr_name, fn(self))
return getattr(self, attr_name)
return _lazy_property
testResultsStyle = """
<style>
table { text-align: left; border-collapse: collapse; margin: 1em; caption-side: bottom; font-family: Sans-Serif; font-size: 16px}
caption { text-align: left; padding: 5px }
th, td { border: 1px solid #ddd; padding: 5px }
th { background-color: #ddd }
.passed { background-color: #97d897 }
.failed { background-color: #e2716c }
.skipped { background-color: #f9d275 }
.results .points { display: none }
.results .message { display: none }
.results .passed::before { content: "Passed" }
.results .failed::before { content: "Failed" }
.results .skipped::before { content: "Skipped" }
.grade .passed .message:empty::before { content:"Passed" }
.grade .failed .message:empty::before { content:"Failed" }
.grade .skipped .message:empty::before { content:"Skipped" }
</style>
""".strip()
# Test suite class
class TestSuite(object):
def __init__(self) -> None:
self.ids = set()
self.testCases = list()
@lazy_property
def testResults(self) -> List[TestResult]:
return self.runTests()
def runTests(self, debug=False) -> List[TestResult]:
import re
failedTests = set()
testResults = list()
for test in self.testCases:
skip = any(testId in failedTests for testId in test.dependsOn)
result = TestResult(test, skip, debug)
if (not result.passed and test.id != None):
failedTests.add(test.id)
if result.test.id: event_id = "Test-"+result.test.id
elif result.test.description: event_id = "Test-"+re.sub("[^a-zA-Z0-9_]", "", result.test.description)
else: event_id = "Test-"+str(uuid.uuid1())
daLogger.logTestResult(event_id, registration_id, result)
testResults.append(result)
TestResultsAggregator.update(result)
return testResults
def _display(self, cssClass:str="results", debug=False) -> None:
from html import escape
testResults = self.testResults if not debug else self.runTests(debug=True)
lines = []
lines.append(testResultsStyle)
lines.append("<table class='"+cssClass+"'>")
lines.append(" <tr><th class='points'>Points</th><th class='test'>Test</th><th class='result'>Result</th></tr>")
for result in testResults:
resultHTML = "<td class='result "+result.status+"'><span class='message'>"+result.message+"</span></td>"
descriptionHTML = escape(str(result.test.description)) if (result.test.escapeHTML) else str(result.test.description)
lines.append(" <tr><td class='points'>"+str(result.points)+"</td><td class='test'>"+descriptionHTML+"</td>"+resultHTML+"</tr>")
lines.append(" <caption class='points'>Score: "+str(self.score)+"</caption>")
lines.append("</table>")
html = "\n".join(lines)
displayHTML(html)
def displayResults(self) -> None:
self._display("results")
def grade(self) -> int:
self._display("grade")
return self.score
def debug(self) -> None:
self._display("grade", debug=True)
@lazy_property
def score(self) -> int:
return __builtins__.sum(map(lambda result: result.points, self.testResults))
@lazy_property
def maxScore(self) -> int:
return __builtins__.sum(map(lambda result: result.test.points, self.testResults))
@lazy_property
def percentage(self) -> int:
return 0 if self.maxScore == 0 else int(100.0 * self.score / self.maxScore)
@lazy_property
def passed(self) -> bool:
return self.percentage == 100
def addTest(self, testCase: TestCase):
if not testCase.id: raise ValueError("The test cases' id must be specified")
if testCase.id in self.ids: raise ValueError(f"Duplicate test case id: {testCase.id}")
self.testCases.append(testCase)
self.ids.add(testCase.id)
return self
def test(self, id:str, description:str, testFunction:Callable[[], Any], points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testEquals(self, id:str, description:str, valueA, valueB, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: valueA == valueB
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testFloats(self, id:str, description:str, valueA, valueB, tolerance=0.01, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: compareFloats(valueA, valueB, tolerance)
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testRows(self, id:str, description:str, rowA: pyspark.sql.Row, rowB: pyspark.sql.Row, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: compareRows(rowA, rowB)
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testDataFrames(self, id:str, description:str, dfA: pyspark.sql.DataFrame, dfB: pyspark.sql.DataFrame, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: compareDataFrames(dfA, dfB)
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
def testContains(self, id:str, description:str, listOfValues, value, points:int=1, dependsOn:Iterable[str]=[], escapeHTML:bool=False):
testFunction = lambda: value in listOfValues
testCase = TestCase(id=id, description=description, testFunction=testFunction, dependsOn=dependsOn, escapeHTML=escapeHTML, points=points)
return self.addTest(testCase)
class __TestResultsAggregator(object):
testResults = dict()
def update(self, result:TestResult):
self.testResults[result.test.id] = result
return result
@lazy_property
def score(self) -> int:
return __builtins__.sum(map(lambda result: result.points, self.testResults.values()))
@lazy_property
def maxScore(self) -> int:
return __builtins__.sum(map(lambda result: result.test.points, self.testResults.values()))
@lazy_property
def percentage(self) -> int:
return 0 if self.maxScore == 0 else int(100.0 * self.score / self.maxScore)
@lazy_property
def passed(self) -> bool:
return self.percentage == 100
def displayResults(self):
displayHTML(testResultsStyle + f"""
<table class='results'>
<tr><th colspan="2">Test Summary</th></tr>
<tr><td>Number of Passing Tests</td><td style="text-align:right">{self.score}</td></tr>
<tr><td>Number of Failing Tests</td><td style="text-align:right">{self.maxScore-self.score}</td></tr>
<tr><td>Percentage Passed</td><td style="text-align:right">{self.percentage}%</td></tr>
</table>
""")
# Lazy-man's singleton
TestResultsAggregator = __TestResultsAggregator()
None
# COMMAND ----------
from pyspark.sql import Row, DataFrame
def returnTrue():
return True
def compareFloats(valueA, valueB, tolerance=0.01):
# Usage: compareFloats(valueA, valueB) (uses default tolerance of 0.01)
# compareFloats(valueA, valueB, tolerance=0.001)
from builtins import abs
try:
if (valueA == None and valueB == None):
return True
else:
return abs(float(valueA) - float(valueB)) <= tolerance
except:
return False
def compareRows(rowA: Row, rowB: Row):
# Usage: compareRows(rowA, rowB)
# compares two Dictionaries
if (rowA == None and rowB == None):
return True
elif (rowA == None or rowB == None):
return False
else:
return rowA.asDict() == rowB.asDict()
def compareDataFrames(dfA: DataFrame, dfB: DataFrame):
from functools import reduce
# Usage: compareDataFrames(dfA, dfB)
if (dfA == None and dfB == None):
return True
else:
n = dfA.count()
if (n != dfB.count()):
return False
kv1 = dfA.rdd.zipWithIndex().map(lambda t : (t[1], t[0])).collectAsMap()
kv2 = dfB.rdd.zipWithIndex().map(lambda t : (t[1], t[0])).collectAsMap()
kv12 = [kv1, kv2]
d = {}
for k in kv1.keys():
d[k] = tuple(d[k] for d in kv12)
return reduce(lambda a, b: a and b, [compareRows(rowTuple[0], rowTuple[1]) for rowTuple in d.values()])
def checkSchema(schemaA, schemaB, keepOrder=True, keepNullable=False):
# Usage: checkSchema(schemaA, schemaB, keepOrder=false, keepNullable=false)
from pyspark.sql.types import StructField
if (schemaA == None and schemaB == None):
return True
elif (schemaA == None or schemaB == None):
return False
else:
schA = schemaA
schB = schemaB
if (keepNullable == False):
schA = [StructField(s.name, s.dataType) for s in schemaA]
schB = [StructField(s.name, s.dataType) for s in schemaB]
if (keepOrder == True):
return [schA] == [schB]
else:
return set(schA) == set(schB)
None
# COMMAND ----------
from pyspark.sql import DataFrame
from pyspark.sql.functions import col, sum
import os
def verifyColumnsExists(df: DataFrame, columnNames):
return all(col in df.columns for col in columnNames)
def findColumnDatatype(df: DataFrame, columnName):
try:
return df.select(columnName).dtypes[0][1]
except Exception as e:
return False
def isDelta(path):
found = False
for file in dbutils.fs.ls(path):
if file.name == "_delta_log/":
found = True
return found
def checkForNulls(df: DataFrame, columnName):
try:
nullCount = df.select(sum(col(columnName).isNull().astype(IntegerType())).alias('nullCount')).collect()[0].nullCount
if (nullCount > 0):
return False
except Exception as e:
return True
def isStreamingDataframe(df: DataFrame):
return df.take(1)[0].operation == "STREAMING UPDATE"
def checkOutputMode(df: DataFrame, mode):
return df.take(1)[0].operationParameters['outputMode'] == mode
print("Finished setting up the capstone environment.")
| 38.544084
| 408
| 0.669586
| 3,975
| 33,225
| 5.500881
| 0.168302
| 0.021769
| 0.013583
| 0.016052
| 0.435928
| 0.386719
| 0.349812
| 0.296305
| 0.277966
| 0.258712
| 0
| 0.012532
| 0.205026
| 33,225
| 861
| 409
| 38.58885
| 0.815318
| 0.026155
| 0
| 0.307453
| 0
| 0.032609
| 0.294704
| 0.073536
| 0
| 0
| 0
| 0
| 0.006211
| 1
| 0.082298
| false
| 0.048137
| 0.032609
| 0.021739
| 0.21118
| 0.035714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb424108a96bf604264def77319d83c190ad7040
| 12,658
|
py
|
Python
|
scraper/Scraper.py
|
tiskutis/Capstone24Scraper
|
3182463e129f37f0f895a440d2285a51e0cfb9a2
|
[
"MIT"
] | null | null | null |
scraper/Scraper.py
|
tiskutis/Capstone24Scraper
|
3182463e129f37f0f895a440d2285a51e0cfb9a2
|
[
"MIT"
] | null | null | null |
scraper/Scraper.py
|
tiskutis/Capstone24Scraper
|
3182463e129f37f0f895a440d2285a51e0cfb9a2
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup as bs, BeautifulSoup
import pandas as pd
import numpy as np
import re
import logging
class Scraper:
"""
This is a scraper class, which can scrape California housing information from https://www.point2homes.com/ website.
The flow:
- First, all California areas are extracted and put into a list.
- Area list is iterated over. Each area has a number of pages with real estate descriptions. User can select how
many pages he wants to go through.
- Scraper visits every real estate link in the page and scrapes required information. After all houses are scraped,
scraper moves to the next page. When no more pages are left or user denoted page limit is reached, scraper
moves to the next category.
"""
def __init__(
self,
logger=logging.basicConfig(
filename="scraping.log", filemode="w", level=logging.DEBUG
),
basic_url: str = "https://www.point2homes.com",
):
"""
Initialization method
:param logger: text file to log events
:param basic_url: url used for to construct new urls.
"""
self.logger = logger
self.basic_url = basic_url
@staticmethod
def get_page(url_: str) -> BeautifulSoup or None:
"""
Gets page HTML from the provided url
:param url_: page you want to scrape from;
:return: get_page() method queries the provided url and returns response, processed with beautiful soup library;
if response is not ok, response status_code is printed and None is returned.
"""
logging.info(f"Getting url: {url_}")
response = requests.get(url_, headers={"User-Agent": "Mozilla/5.0"})
if not response.ok:
logging.error(f"Server response: {response.status_code}")
return None
else:
return bs(response.text, "lxml")
@staticmethod
def get_location_urls(soup: BeautifulSoup) -> list:
"""
Finds all location links in a page and puts them in a list
:param soup: BeautifulSoup object
:return: list with location urls
"""
location_urls_ = []
for elem_ in soup.find_all("a", class_="psrk-events"):
if elem_["href"] not in location_urls_ and "CA" in elem_["href"]:
location_urls_.append(elem_["href"])
return location_urls_
@staticmethod
def get_price(soup: BeautifulSoup) -> float:
"""
Extracts price from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: price of type int or np.nan if not found
"""
try:
price = int(
re.findall(
r"[0-9][0-9,.]+",
soup.find("div", class_="price").get_text().strip(),
)[0].replace(",", "")
)
except Exception as err:
logging.warning(f"Price not found. Error message: {err}")
return np.nan
return price
@staticmethod
def get_bedrooms(soup: BeautifulSoup) -> int or float:
"""
Extracts number of bedrooms from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: number of bedrooms of type int or np.nan if not found
"""
try:
bedrooms = int(
re.findall(
r"\d+", soup.find("li", class_="ic-beds").get_text().strip()
)[0]
)
except Exception as err:
logging.warning(f"Bedroom not found. Error message: {err}")
return np.nan
return bedrooms
@staticmethod
def get_baths(soup: BeautifulSoup) -> int or float:
"""
Extracts number of baths from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: number of baths of type int or np.nan if not found
"""
try:
baths = int(
re.findall(
r"\d+", soup.find("li", class_="ic-baths").get_text().strip()
)[0]
)
except Exception as err:
logging.warning(f"Bath not found. Error message: {err}")
return np.nan
return baths
@staticmethod
def get_sqm(soup: BeautifulSoup) -> float:
"""
Extracts house size in square meters from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: house size in square meters or np.nan if not found
"""
try:
sqm = round(
float(
re.findall(
r"[0-9][0-9,.]+",
soup.find("li", class_="ic-sqft").get_text().strip(),
)[0].replace(",", "")
)
/ 10.764,
2,
)
except Exception as err:
logging.warning(f"Sqm not found. Error message: {err}")
return np.nan
return sqm
@staticmethod
def get_lot_size(soup: BeautifulSoup) -> float:
"""
Extracts lot size in acres from provided BeautifulSoup object
:param soup: BeautifulSoup object
:return: lot size in acres or np.nan if not found
"""
try:
lot_size = float(
re.findall(
r"[0-9][0-9,.]+",
soup.find("li", class_="ic-lotsize").get_text().strip(),
)[0]
)
except Exception as err:
logging.warning(f"Lot size not found. Error message: {err}")
return np.nan
return lot_size
@staticmethod
def description_dictionary(soup: BeautifulSoup) -> dict:
"""
Extracts description information, contained in dt and dd elements
:param soup: BeautifulSoup object
:return: dictionary with dt as keys and dd as values
"""
dt_data = soup.find_all("dt")
dd_data = soup.find_all("dd")
description = {}
for dt, dd in zip(dt_data, dd_data):
description[dt.get_text().strip()] = dd.get_text().strip()
return description
@staticmethod
def demographics_dictionary(soup: BeautifulSoup) -> dict:
"""
Extracts demographics information, contained in td
:param soup: BeautifulSoup object
:return: dictionary with demographics in that area keys (e.g. median income, median age) and values
"""
demographics = soup.find("div", {"id": "demographics_content"}).find_all("td")
demographics_ = {}
for i in range(0, len(demographics), 2):
demographics_[demographics[i].get_text()] = demographics[i + 1].get_text()
return demographics_
def scrape_info_one_house(self, soup: BeautifulSoup) -> dict or None:
"""
Accepts soup object which contains all the required information about one house.
Scrapes house type, year built, parking spaces, area population, median age, total households,
median year built, median household income, number of baths and bedrooms, size in square meters, lot size in
acres and price.
:param soup: BeautifulSoup object
:return: dictionary with all the required info
"""
house_information = {}
try:
description = self.description_dictionary(soup)
demographics = self.demographics_dictionary(soup)
house_information["Type"] = description["Type"]
house_information["Year Built"] = description["Year Built"]
house_information["Parking Spaces"] = int(
re.findall(r"\d+", description["Parking info"])[0]
)
house_information["Area population"] = int(
demographics["Total population"].replace(",", "")
)
house_information["Median age"] = demographics["Median age"]
house_information["Total households"] = int(
demographics["Total households"].replace(",", "")
)
house_information["Median year built"] = demographics["Median year built"]
house_information["Median household income"] = int(
demographics["Median household income"].replace(",", "")
)
house_information["Bedrooms"] = self.get_bedrooms(soup)
house_information["Baths"] = self.get_baths(soup)
house_information["Square Meters"] = self.get_sqm(soup)
house_information["Lot size (acres)"] = self.get_lot_size(soup)
house_information["Price"] = self.get_price(soup)
return house_information
except Exception as err:
logging.warning(
f"Some of the required information was missing for this house. Error message: {err}"
)
return None
def get_houses_in_location(
self,
location_url_: str,
houses_in_location: set = set(),
page_limit: int = 1,
page_number: int = 1,
) -> list:
"""
Accepts location url and goes through pages in that location scraping every house
until page limit is reached. Returns list of dicts with scraped information about every house in that location.
:param location_url_: string with link to specific location in California state
:param houses_in_location: set with already scraped links. Since retrieved links can be repetitive, there is
no need to go to the same link which has already been scraped. Set is used for faster search
:param page_limit: how many pages to scraped. If not passed by the user, default is 1
:param page_number: Current page to scrape. Starting number is 1
:return: list of dictionaries
"""
houses_information = []
try:
new_url = self.basic_url + location_url_ + f"?page={page_number}"
page_ = self.get_page(new_url)
if page_.find_all("li", class_="lslide"):
for elem in page_.find_all("li", class_="lslide"):
link = elem.find("a")["href"]
if link.startswith("/US") and link not in houses_in_location:
houses_information.append(
self.scrape_info_one_house(
self.get_page(self.basic_url + link)
)
)
houses_in_location.add(link)
if page_number <= page_limit:
page_number += 1
self.get_houses_in_location(
location_url_,
houses_in_location,
page_limit,
page_number=page_number,
)
except Exception as err:
logging.error(f"Error occurred while scraping locations. Message: {err}")
return houses_information
def scrape_platform(self, page_limit: int = 1) -> None:
"""
Main scraping function. Accepts page limit - how many pages to scrape, default is 1.
The flow:
- First, all California areas (locations) are extracted and put into a list.
- Area list is iterated over. Each area has a number of pages with real estate descriptions. User can select how
many pages he wants to go through.
- Scraper visits every real estate link in the page and scrapes required information. After all houses are scraped,
scraper moves to the next page. When no more pages are left or user denoted page limit is reached, scraper
moves to the next category.
:param page_limit: how many pages to scrape per area
:return: None.
"""
starting_url = "https://www.point2homes.com/US/Real-Estate-Listings/CA.html"
houses = []
starting_page = self.get_page(starting_url)
locations = self.get_location_urls(starting_page)
for location in locations:
houses.extend(
self.get_houses_in_location(location, set(), page_limit=page_limit)
)
self.to_dataframe(houses).to_csv("California Housing.csv")
@staticmethod
def to_dataframe(house_list: list) -> pd.DataFrame:
"""
Filters out None values and converts the list to pandas DataFrame
:param house_list: list of dictionaries
:return: pandas DataFrame
"""
return pd.DataFrame([house for house in house_list if house is not None])
| 38.241692
| 123
| 0.586902
| 1,480
| 12,658
| 4.906081
| 0.187162
| 0.042143
| 0.027269
| 0.034706
| 0.349125
| 0.31015
| 0.286737
| 0.234403
| 0.222559
| 0.165129
| 0
| 0.004688
| 0.325881
| 12,658
| 330
| 124
| 38.357576
| 0.846244
| 0.316717
| 0
| 0.240838
| 0
| 0
| 0.125079
| 0.002774
| 0
| 0
| 0
| 0
| 0
| 1
| 0.073298
| false
| 0
| 0.031414
| 0
| 0.209424
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb444f1d2f4c6079bc153578e3e68294eef319a0
| 4,344
|
py
|
Python
|
src/gapminder_challenge/dashboard/dash_app2.py
|
UBC-MDS/gapminder_challenge
|
bbc8132a475d483e7c6c46572c8efca40b506afc
|
[
"MIT"
] | 1
|
2022-03-19T03:31:49.000Z
|
2022-03-19T03:31:49.000Z
|
src/gapminder_challenge/dashboard/dash_app2.py
|
imtvwy/gapminder_challenge
|
0f7d9816b0c5baf6422baff24e0413c800d6e62a
|
[
"MIT"
] | 39
|
2022-02-17T05:04:48.000Z
|
2022-03-19T21:37:20.000Z
|
src/gapminder_challenge/dashboard/dash_app2.py
|
imtvwy/gapminder_challenge
|
0f7d9816b0c5baf6422baff24e0413c800d6e62a
|
[
"MIT"
] | 1
|
2022-03-19T03:30:08.000Z
|
2022-03-19T03:30:08.000Z
|
import pandas as pd
from dash import Dash, html, dcc, Input, Output
import altair as alt
df = pd.read_csv('../../data/raw/world-data-gapminder_raw.csv') # local run
# df = pd.read_csv('data/raw/world-data-gapminder_raw.csv') # heroku deployment
url = '/dash_app2/'
def add_dash(server):
"""
It creates a Dash app that plots a line chart of children per woman from gapminder dataset
with 2 widgets : rangeslider for years and dropdown for filter
:param server: The Flask app object
:return: A Dash server
"""
app = Dash(server=server, url_base_pathname=url)
app.layout = html.Div([
html.Iframe(
id='line_children',
style={'border-width': '0', 'width': '600px', 'height': '400px', 'display': 'block',
'margin-left': 'auto', 'margin-right': 'auto'}),
html.Label([
'Zoom in years: ',
dcc.RangeSlider(1918, 2018, 10, value=[1918, 2018], id='year_range_slider',
marks={str(year): str(year) for year in range(1918, 2028, 10)}),
]),
html.Label([
'See breakdown number by: ',
dcc.Dropdown(options=[
{'label': 'All', 'value': 'all'},
{'label': 'Income Group', 'value': 'income_group'},
{'label': 'Region', 'value': 'region'}
],
value='', id='filter_dropdown')
]),
html.Div(id="data_card_2", **{'data-card_2_data': []})
])
# Set up callbacks/backend
@app.callback(
Output('line_children', 'srcDoc'),
Input('year_range_slider', 'value'),
Input('filter_dropdown', 'value')
)
def update_line(year_range_slider, filter_dropdown):
"""
The function takes in a year range and filter option and outputs the line chart per children
for that year range with the filter
:param year_range_slider: The year range to plot
:param filter_dropdown: The filter to plot
:return: The Altair chart is being returned.
"""
filter = filter_dropdown
title_params = alt.TitleParams("Average Number of Children", subtitle=[
"Click on legend entries to mute the corresponding lines"])
if filter == "all" or filter == '':
df_by_year = df.groupby(["year"]).mean()
df_by_year = df_by_year.reset_index()
chart = alt.Chart(df_by_year.query(f'year>={year_range_slider[0]} and year<={year_range_slider[1]}'),
title="Average Number of Children").mark_line().encode(
y=alt.Y("children_per_woman", title="Children per woman"),
x=alt.X("year", title="Year"),
strokeWidth=alt.value(3),
tooltip=['year', 'children_per_woman']).interactive()
else:
# group by filter field and then year to get the average
df_by_year = df.groupby([filter, "year"]).mean()
df_by_year = df_by_year.reset_index()
# add interactive click
click = alt.selection_multi(fields=[filter], bind='legend')
chart = alt.Chart(df_by_year.query(f'year>={year_range_slider[0]} and year<={year_range_slider[1]}'),
title=title_params).mark_line().encode(
y=alt.Y("children_per_woman", title="Children per woman"),
x=alt.X("year", title="Year"),
strokeWidth=alt.value(3),
# color=filter,
color=alt.Color(filter, title=filter.replace('_', ' ').title()),
opacity=alt.condition(click, alt.value(0.9), alt.value(0.2)),
tooltip=['year', 'children_per_woman']).interactive().add_selection(click)
return chart.to_html()
@app.callback(
Output('data_card_2', 'data-card_2_data'),
Input('filter_dropdown', 'value'))
def get_data(filter_dropdown="income_group"):
if filter_dropdown == '':
filter_dropdown = 'income_group'
df_by_year = df.groupby([filter_dropdown, "year"]).mean()
df_viz = df_by_year.reset_index()
df_viz = df_viz[[filter_dropdown, 'year', 'children_per_woman']]
df_viz = df_viz.to_json()
return (df_viz)
return app.server
| 42.174757
| 113
| 0.575506
| 538
| 4,344
| 4.464684
| 0.291822
| 0.041216
| 0.033306
| 0.020816
| 0.310158
| 0.273106
| 0.222315
| 0.203997
| 0.203997
| 0.203997
| 0
| 0.016478
| 0.287523
| 4,344
| 102
| 114
| 42.588235
| 0.759612
| 0.157689
| 0
| 0.225352
| 0
| 0
| 0.239204
| 0.043466
| 0
| 0
| 0
| 0
| 0
| 1
| 0.042254
| false
| 0
| 0.042254
| 0
| 0.126761
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
eb448a448b8928b4d93cd021756f058d5d672505
| 4,595
|
py
|
Python
|
emulator/utils/common.py
|
Harry45/emuPK
|
c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9
|
[
"MIT"
] | 2
|
2021-05-10T16:59:34.000Z
|
2021-05-19T16:10:24.000Z
|
emulator/utils/common.py
|
Harry45/emuPK
|
c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9
|
[
"MIT"
] | null | null | null |
emulator/utils/common.py
|
Harry45/emuPK
|
c5cd8a4ab7ef593b196ee58d9df5d826d444a2b9
|
[
"MIT"
] | 2
|
2021-04-16T23:55:16.000Z
|
2021-09-09T12:48:41.000Z
|
# Author: Arrykrishna Mootoovaloo
# Collaborators: Alan Heavens, Andrew Jaffe, Florent Leclercq
# Email : a.mootoovaloo17@imperial.ac.uk
# Affiliation : Imperial Centre for Inference and Cosmology
# Status : Under Development
'''
Perform all additional operations such as interpolations
'''
import os
import logging
import numpy as np
import scipy.interpolate as itp
from typing import Tuple
def indices(nzmax: int) -> Tuple[list, tuple]:
'''
Generates indices for double sum power spectra
:param: nzmax (int) - the maximum number of redshifts (assuming first redshift is zero)
:return: di_ee (list), idx_gi (tuple) - double indices for EE and indices for GI
'''
# create emty lists to recod all indices
# for EE power spectrum
di_ee = []
# for GI power spectrum
# ab means alpha, beta
Lab_1 = []
Lab_2 = []
Lba_1 = []
Lba_2 = []
for i in range(1, nzmax + 1):
for j in range(1, nzmax + 1):
di_ee.append(np.min([i, j]))
if i > j:
Lab_1.append(i)
Lab_2.append(j)
elif j > i:
Lba_1.append(i)
Lba_2.append(j)
Lab_1 = np.asarray(Lab_1)
Lab_2 = np.asarray(Lab_2)
Lba_1 = np.asarray(Lba_1)
Lba_2 = np.asarray(Lba_2)
di_ee = np.asarray(di_ee)
idx_gi = (Lab_1, Lab_2, Lba_1, Lba_2)
return di_ee, idx_gi
def dvalues(d: dict) -> np.ndarray:
'''
Returns an array of values instead of dictionary format
:param: d (dict) - a dictionary with keys and values
:return: v (np.ndarray) - array of values
'''
v = np.array(list(d.values()))
return v
def like_interp_2d(inputs: list, int_type: str = 'cubic') -> object:
'''
We want to predict the function for any new point of k and z (example)
:param: inputs (list) - a list containing x, y, f(x,y)
:param: int_type (str) - interpolation type (default: 'cubic')
:return: f (object) - the interpolator
'''
k, z, f_kz = np.log(inputs[0]), inputs[1], inputs[2]
inputs_trans = [k, z, f_kz]
f = itp.interp2d(*inputs_trans)
return f
def two_dims_interpolate(inputs: list, grid: list) -> np.ndarray:
'''
Function to perform 2D interpolation using interpolate.interp2d
:param: inputs (list) : inputs to the interpolation module, that is, we need to specify the following:
- x
- y
- f(x,y)
- 'linear', 'cubic', 'quintic'
:param: grid (list) : a list containing xnew and ynew
:return: pred_new (np.ndarray) : the predicted values on the 2D grid
'''
# check that all elements are greater than 0 for log-transformation to be used
condition = np.all(inputs[2] > 0)
if condition:
# transform k and f to log
k, z, f_kz, int_type = np.log(inputs[0]), inputs[1], np.log(inputs[2]), inputs[3]
else:
# transform in k to log
k, z, f_kz, int_type = np.log(inputs[0]), inputs[1], inputs[2], inputs[3]
inputs_trans = [k, z, f_kz, int_type]
# tranform the grid to log
knew, znew = np.log(grid[0]), grid[1]
grid_trans = [knew, znew]
f = itp.interp2d(*inputs_trans)
if condition:
pred_new = np.exp(f(*grid_trans))
else:
pred_new = f(*grid_trans)
return pred_new
def interpolate(inputs: list) -> np.ndarray:
'''
Function to interpolate the power spectrum along the redshift axis
:param: inputs (list or tuple) : x values, y values and new values of x
:return: ynew (np.ndarray) : an array of the interpolated power spectra
'''
x, y, xnew = inputs[0], inputs[1], inputs[2]
spline = itp.splrep(x, y)
ynew = itp.splev(xnew, spline)
return ynew
def get_logger(name: str, log_name: str, folder_name: str = 'logs'):
'''
Create a log file for each Python scrip
:param: name (str) - name of the Python script
:param: log_name (str) - name of the output log file
'''
# create the folder if it does not exist
if not os.path.exists(folder_name):
os.makedirs(folder_name)
log_format = '%(asctime)s %(name)8s %(levelname)5s %(message)s'
logging.basicConfig(level=logging.DEBUG,
format=log_format,
filename=folder_name + '/' + log_name + '.log',
filemode='w')
console = logging.StreamHandler()
console.setLevel(logging.DEBUG)
console.setFormatter(logging.Formatter(log_format))
logging.getLogger(name).addHandler(console)
return logging.getLogger(name)
| 24.972826
| 106
| 0.618498
| 669
| 4,595
| 4.150972
| 0.316891
| 0.008642
| 0.005402
| 0.009003
| 0.131437
| 0.0731
| 0.053655
| 0.053655
| 0.042132
| 0.025927
| 0
| 0.015769
| 0.268553
| 4,595
| 183
| 107
| 25.10929
| 0.810473
| 0.40914
| 0
| 0.088235
| 0
| 0
| 0.026066
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088235
| false
| 0
| 0.073529
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de1a03c3bf2d4b4418706f4fb2057bc7977a7251
| 777
|
py
|
Python
|
client.py
|
juzejunior/HttpBasicServer
|
7e77b49f693d9cfe0d782e93026d8f9261368b69
|
[
"MIT"
] | null | null | null |
client.py
|
juzejunior/HttpBasicServer
|
7e77b49f693d9cfe0d782e93026d8f9261368b69
|
[
"MIT"
] | null | null | null |
client.py
|
juzejunior/HttpBasicServer
|
7e77b49f693d9cfe0d782e93026d8f9261368b69
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
Simple Http Client, to request html files
Modification: 11/09/2017
Author: J. Júnior
'''
import httplib
import sys
#get http server ip - pass in the command line
http_server = sys.argv[1]
#create a connection with the server
conn = httplib.HTTPConnection(http_server)
while 1:
cmd = raw_input('input command (ex. GET index.html): ')
cmd = cmd.split()
if cmd[0] == 'exit': #type exit to end it
break
#request command to server
conn.request(cmd[0], cmd[1])
#get response from server
rsp = conn.getresponse()
#print server response and data
print(rsp.status, rsp.reason)
print(rsp.getheaders())
data_received = rsp.read()
print(data_received)
#close connection
conn.close()
| 22.852941
| 58
| 0.679537
| 115
| 777
| 4.547826
| 0.582609
| 0.057361
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022472
| 0.198198
| 777
| 33
| 59
| 23.545455
| 0.817014
| 0.413127
| 0
| 0
| 0
| 0
| 0.093458
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.125
| 0
| 0.125
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de1d5ad5042762573fde2a3a38799da995504ae1
| 6,881
|
py
|
Python
|
pyssh/crypto/asymmetric.py
|
beckjake/pyssh
|
d6b7a6cca7e38d0835f84386723ec10ac5ad621f
|
[
"CC0-1.0"
] | null | null | null |
pyssh/crypto/asymmetric.py
|
beckjake/pyssh
|
d6b7a6cca7e38d0835f84386723ec10ac5ad621f
|
[
"CC0-1.0"
] | null | null | null |
pyssh/crypto/asymmetric.py
|
beckjake/pyssh
|
d6b7a6cca7e38d0835f84386723ec10ac5ad621f
|
[
"CC0-1.0"
] | null | null | null |
"""Implement asymmetric cryptography.
"""
from __future__ import print_function, division, absolute_import
from __future__ import unicode_literals
from cryptography.hazmat.primitives import hashes, serialization
from cryptography.hazmat.primitives.asymmetric import rsa, dsa, utils, padding
from cryptography.hazmat.primitives.asymmetric.padding import PKCS1v15
from cryptography.hazmat.backends import default_backend
from collections import OrderedDict
import io
from builtins import int #pylint: disable=redefined-builtin
from pyssh.constants import ENC_SSH_RSA, ENC_SSH_DSS
from pyssh.base_types import String, MPInt
# pylint:disable=invalid-name
class UnsupportedKeyProtocol(Exception):
"""Key protocol not supported."""
class InvalidAlgorithm(Exception):
"""Mismatched algorithm"""
#TODO: ECDSA (RFC 5656)
class BaseAlgorithm(object):
"""The base algorithm. Has private keys and/or public keys and does
signature creation and/or verification.
"""
FORMAT_STR = None
PUBKEY_CLASS = None
PRIVKEY_CLASS = None
def __init__(self, privkey=None, pubkey=None):
self._privkey = None
self.privkey = privkey
self.pubkey = pubkey
@property
def privkey(self):
"""Getter for the private key."""
return self._privkey
@privkey.setter
def privkey(self, value):
"""When setting the private key, also set the public key to match."""
self._privkey = value
if value:
self.pubkey = value.public_key()
def unpack_pubkey(self, stream):
"""Unpack a public key from a stream."""
raise NotImplementedError('not implemented')
def pack_pubkey(self):
"""Pack a public key into bytes."""
raise NotImplementedError('not implemented')
@classmethod
def _check_keytype(cls, stream):
"""Verify that the keytype from the stream is the expected one."""
keytype = String.unpack_from(stream)
if cls.FORMAT_STR != keytype:
msg = 'Got {!r}, expected {!r}'.format(keytype, cls.FORMAT_STR)
raise InvalidAlgorithm(msg)
def verify_signature(self, signature, data):
"""Verify the signature against the given data. Pubkey must be set."""
raise NotImplementedError('not implemented')
def sign(self, data):
"""Sign some data. Privkey must be set."""
raise NotImplementedError('not implemented')
def read_pubkey(self, data):
"""Read a public key from data in the ssh public key format.
:param bytes data: the data to read.
Sets self.pubkey.
"""
pubkey = serialization.load_ssh_public_key(data, default_backend())
assert isinstance(pubkey.public_numbers(), self.PUBKEY_CLASS)
self.pubkey = pubkey
def read_privkey(self, data, password=None):
"""Read a PEM-encoded private key from data. If a password is set, it
will be used to decode the key.
:param bytes data: the data to read
:param bytes password: The password.
Sets self.privkey.
"""
privkey = serialization.load_pem_private_key(data, password,
default_backend())
assert isinstance(privkey.private_numbers(), self.PRIVKEY_CLASS)
self.privkey = privkey
class RSAAlgorithm(BaseAlgorithm):
"""Support for the RSA algorithm."""
FORMAT_STR = String(ENC_SSH_RSA)
PRIVKEY_CLASS = rsa.RSAPrivateNumbers
PUBKEY_CLASS = rsa.RSAPublicNumbers
def unpack_pubkey(self, stream):
self._check_keytype(stream)
e = MPInt.unpack_from(stream).value
n = MPInt.unpack_from(stream).value
self.pubkey = rsa.RSAPublicNumbers(e, n).public_key(default_backend())
def pack_pubkey(self):
return b''.join([
self.FORMAT_STR.pack(),
MPInt(self.pubkey.public_numbers().e).pack(),
MPInt(self.pubkey.public_numbers().n).pack()
])
def verify_signature(self, signature, data):
stream = io.BytesIO(signature)
self._check_keytype(stream)
blob = String.unpack_from(stream).value
verifier = self.pubkey.verifier(
blob,
padding.PKCS1v15(),
hashes.SHA1()
)
verifier.update(data)
verifier.verify()
def sign(self, data):
signer = self.privkey.signer(
PKCS1v15(),
hashes.SHA1()
)
signer.update(data)
signed = signer.finalize()
return b''.join([
self.FORMAT_STR.pack(),
String(signed).pack()
])
class DSAAlgorithm(BaseAlgorithm):
"""Support for the DSA."""
FORMAT_STR = String(ENC_SSH_DSS)
PRIVKEY_CLASS = dsa.DSAPrivateNumbers
PUBKEY_CLASS = dsa.DSAPublicNumbers
def unpack_pubkey(self, stream):
self._check_keytype(stream)
p = MPInt.unpack_from(stream)
q = MPInt.unpack_from(stream)
g = MPInt.unpack_from(stream)
params = dsa.DSAParameterNumbers(p.value, q.value, g.value)
y = MPInt.unpack_from(stream)
pubnums = dsa.DSAPublicNumbers(y.value, params)
self.pubkey = pubnums.public_key(default_backend())
def pack_pubkey(self):
pubnums = self.pubkey.public_numbers()
return b''.join([
self.FORMAT_STR.pack(),
MPInt(pubnums.parameter_numbers.p).pack(),
MPInt(pubnums.parameter_numbers.q).pack(),
MPInt(pubnums.parameter_numbers.g).pack(),
MPInt(pubnums.y).pack(),
])
def verify_signature(self, signature, data):
stream = io.BytesIO(signature)
self._check_keytype(stream)
blob = String.unpack_from(stream).value
# convert to rfc6979 signature
blob = utils.encode_rfc6979_signature(
r=int.from_bytes(blob[:20], 'big'),
s=int.from_bytes(blob[20:], 'big')
)
verifier = self.pubkey.verifier(
blob,
hashes.SHA1()
)
verifier.update(data)
verifier.verify()
def sign(self, data):
signer = self.privkey.signer(
hashes.SHA1()
)
signer.update(data)
signed = signer.finalize()
r, s = utils.decode_rfc6979_signature(signed)
return b''.join([
self.FORMAT_STR.pack(),
String(int(r).to_bytes(20, 'big') + int(s).to_bytes(20, 'big')).pack(),
])
PUBLIC_KEY_PROTOCOLS = OrderedDict((
(ENC_SSH_RSA, RSAAlgorithm),
(ENC_SSH_DSS, DSAAlgorithm)
))
def get_asymmetric_algorithm(keytype):
"""Get the referenced public key type. If a signature_blob blob is included,
validate it.
"""
try:
handler = PUBLIC_KEY_PROTOCOLS[keytype]
except KeyError:
raise UnsupportedKeyProtocol(keytype)
return handler()
| 31.277273
| 83
| 0.636826
| 794
| 6,881
| 5.374055
| 0.224181
| 0.028123
| 0.033747
| 0.029529
| 0.34427
| 0.252637
| 0.221701
| 0.209046
| 0.113897
| 0.091868
| 0
| 0.007263
| 0.259701
| 6,881
| 219
| 84
| 31.420091
| 0.830389
| 0.154919
| 0
| 0.39726
| 0
| 0
| 0.016826
| 0
| 0
| 0
| 0
| 0.004566
| 0.013699
| 1
| 0.130137
| false
| 0.013699
| 0.075342
| 0.006849
| 0.342466
| 0.006849
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de1dfa963d73dc87e79e92fa3fe653f6462539c8
| 1,230
|
py
|
Python
|
books/李航-统计学习/machine_learning_algorithm-master/naive_bayes/naive_bayes.py
|
haohonglin/DeepLearning-1
|
c00eee4738d322f6eb5d61d5bafbcfa7b20152a0
|
[
"Apache-2.0"
] | 1
|
2020-12-01T06:13:21.000Z
|
2020-12-01T06:13:21.000Z
|
books/李航-统计学习/machine_learning_algorithm-master/naive_bayes/naive_bayes.py
|
idonashino/DeepLearning
|
c00eee4738d322f6eb5d61d5bafbcfa7b20152a0
|
[
"Apache-2.0"
] | null | null | null |
books/李航-统计学习/machine_learning_algorithm-master/naive_bayes/naive_bayes.py
|
idonashino/DeepLearning
|
c00eee4738d322f6eb5d61d5bafbcfa7b20152a0
|
[
"Apache-2.0"
] | 1
|
2021-01-01T15:28:36.000Z
|
2021-01-01T15:28:36.000Z
|
"""
@ jetou
@ cart decision_tree
@ date 2017 10 31
"""
import numpy as np
class naive_bayes:
def __init__(self, feature, label):
self.feature = feature.transpose()
self.label = label.transpose().flatten(1)
self.positive = np.count_nonzero(self.label == 1) * 1.0
self.negative = np.count_nonzero(self.label == -1) * 1.0
def train(self):
positive_dict = {}
negative_dict = {}
for i in self.feature:
unqiue = set(i)
for j in unqiue:
positive_dict[j] = np.count_nonzero(self.label[i==j]==1) / self.positive
negative_dict[j] = np.count_nonzero(self.label[i==j]==-1) / self.negative
return positive_dict, negative_dict
def prediction(self, pre_feature):
positive_chance = self.positive / self.label.shape[0]
negative_chance = self.negative / self.label.shape[0]
positive_dict, negative_dict = self.train()
for i in pre_feature:
i = str(i)
positive_chance *= positive_dict[i]
negative_chance *= negative_dict[i]
if positive_chance > negative_chance:
return 1
else:
return -1
| 28.604651
| 89
| 0.585366
| 155
| 1,230
| 4.464516
| 0.283871
| 0.09104
| 0.080925
| 0.104046
| 0.176301
| 0.176301
| 0.176301
| 0.176301
| 0.101156
| 0.101156
| 0
| 0.024561
| 0.304878
| 1,230
| 42
| 90
| 29.285714
| 0.784795
| 0.037398
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.107143
| false
| 0
| 0.035714
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de2067c1459291384093f5c6102e9ab0301ade68
| 3,164
|
py
|
Python
|
src/rsa_decryption_125/app.py
|
seanballais/rsa-decryption-125
|
df2ad27d055469e7c58a811f40cfc2c8a6171298
|
[
"MIT"
] | null | null | null |
src/rsa_decryption_125/app.py
|
seanballais/rsa-decryption-125
|
df2ad27d055469e7c58a811f40cfc2c8a6171298
|
[
"MIT"
] | null | null | null |
src/rsa_decryption_125/app.py
|
seanballais/rsa-decryption-125
|
df2ad27d055469e7c58a811f40cfc2c8a6171298
|
[
"MIT"
] | null | null | null |
import tkinter
from tkinter import *
from rsa_decryption_125 import decryptor
class AppWindow(Frame):
def __init__(self, master=None):
super().__init__(master)
self.master = master
self.init_window()
def init_window(self):
self.master.title('RSA Decryptor')
self.pack(fill=BOTH, expand=1)
self.encrypted_message_label = Label(self, text='Encrypted Message')
self.encrypted_message_label.place(x=0, y=0)
self.encrypted_message_entrybox = Entry(self)
self.encrypted_message_entrybox.place(x=122, y=0, width=300)
self.public_key_label = Label(self, text='Public Key')
self.public_key_label.place(x=0, y=25)
self.n_label = Label(self, text='n =')
self.n_label.place(x=96, y=40)
self.n_entrybox = Entry(self)
self.n_entrybox.place(x=122, y=40, width=300)
self.e_label = Label(self, text='e =')
self.e_label.place(x=96, y=70)
self.e_entrybox = Entry(self)
self.e_entrybox.place(x=122, y=65, width=300)
self.decrypted_message_label = Label(self, text='Decrypted message')
self.decrypted_message_label.place(x=0, y=95)
self.decrypted_message_box = Text(self, width=60, height=12)
box_scroll = Scrollbar(self, command=self.decrypted_message_box.yview)
self.decrypted_message_box.configure(yscrollcommand=box_scroll.set)
self.decrypted_message_box.place(x=0, y=115)
self.decrypt_button = Button(self, text="Decrypt message", command=self.get_decrypted_message)
self.decrypt_button.place(x=0, y=305)
def get_decrypted_message(self):
self.decrypt_button['text'] = 'Decrypting message...'
self.decrypt_button['state'] = 'disabled'
self.encrypted_message_entrybox['state'] = 'disabled'
self.n_entrybox['state'] = 'disabled'
self.e_entrybox['state'] = 'disabled'
encrypted = str(self.encrypted_message_entrybox.get())
n = int(self.n_entrybox.get())
e = int(self.e_entrybox.get())
decrypted = decryptor.decrypt(encrypted, n, e)
self.decrypted_message_box.delete('1.0', END)
try:
self.decrypted_message_box.insert(END, decryptor.decode_message(decrypted))
except ValueError as ve:
tkinter.messagebox.showerror(
'Error!', '{}. Invalid encrypted message or public key.'.format(ve)
)
except Exception as e:
tkinter.messagebox.showerror(
'Something went terribly wrong!', e
)
self.decrypt_button['text'] = 'Decrypt message'
self.decrypt_button['state'] = 'normal'
self.encrypted_message_entrybox['state'] = 'normal'
self.n_entrybox['state'] = 'normal'
self.e_entrybox['state'] = 'normal'
self.decrypted_message_box['state'] = 'normal'
def app_exit(self):
exit()
def main():
root = Tk()
root.geometry('430x350')
root.resizable(False, False)
app = AppWindow(root)
root.mainloop()
if __name__ == '__main__':
main()
| 34.391304
| 102
| 0.631163
| 395
| 3,164
| 4.843038
| 0.255696
| 0.100366
| 0.094093
| 0.084161
| 0.161526
| 0.02091
| 0
| 0
| 0
| 0
| 0
| 0.026349
| 0.244311
| 3,164
| 92
| 103
| 34.391304
| 0.773735
| 0
| 0
| 0.028571
| 0
| 0
| 0.103318
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.042857
| 0
| 0.128571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de207e25aa9bca185c57928c53cd749f04d47818
| 2,031
|
py
|
Python
|
model.py
|
starinsun/multiagent-particle-envs
|
23b1c47fad4d71347ba3de7a5e8cec910f08382d
|
[
"MIT"
] | null | null | null |
model.py
|
starinsun/multiagent-particle-envs
|
23b1c47fad4d71347ba3de7a5e8cec910f08382d
|
[
"MIT"
] | null | null | null |
model.py
|
starinsun/multiagent-particle-envs
|
23b1c47fad4d71347ba3de7a5e8cec910f08382d
|
[
"MIT"
] | null | null | null |
import paddle.fluid as fluid
import parl
from parl import layers
class MAModel(parl.Model):
def __init__(self, act_dim):
self.actor_model = ActorModel(act_dim)
self.critic_model = CriticModel()
def policy(self, obs):
return self.actor_model.policy(obs)
def value(self, obs, act):
return self.critic_model.value(obs, act)
def get_actor_params(self):
return self.actor_model.parameters()
def get_critic_params(self):
return self.critic_model.parameters()
class ActorModel(parl.Model):
def __init__(self, act_dim):
hid1_size = 64
hid2_size = 64
self.fc1 = layers.fc(
size=hid1_size,
act='relu',
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
self.fc2 = layers.fc(
size=hid2_size,
act='relu',
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
self.fc3 = layers.fc(
size=act_dim,
act=None,
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
def policy(self, obs):
hid1 = self.fc1(obs)
hid2 = self.fc2(hid1)
means = self.fc3(hid2)
means = means
return means
class CriticModel(parl.Model):
def __init__(self):
hid1_size = 64
hid2_size = 64
self.fc1 = layers.fc(
size=hid1_size,
act='relu',
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
self.fc2 = layers.fc(
size=hid2_size,
act='relu',
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
self.fc3 = layers.fc(
size=1,
act=None,
param_attr=fluid.initializer.Normal(loc=0.0, scale=0.1))
def value(self, obs_n, act_n):
inputs = layers.concat(obs_n + act_n, axis=1)
hid1 = self.fc1(inputs)
hid2 = self.fc2(hid1)
Q = self.fc3(hid2)
Q = layers.squeeze(Q, axes=[1])
return Q
| 27.445946
| 68
| 0.573609
| 275
| 2,031
| 4.076364
| 0.189091
| 0.042819
| 0.064228
| 0.133809
| 0.503122
| 0.485281
| 0.485281
| 0.438894
| 0.438894
| 0.438894
| 0
| 0.044872
| 0.308715
| 2,031
| 74
| 69
| 27.445946
| 0.753561
| 0
| 0
| 0.533333
| 0
| 0
| 0.007874
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.15
| false
| 0
| 0.05
| 0.066667
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de20802d519423344cda6384cb09a94946775ee1
| 724
|
py
|
Python
|
src/fmWidgets/FmColorEdit.py
|
ComputerArchitectureGroupPWr/Floorplan-Maker
|
8f2922cdab16501d3bb00f93c3130d3f2c593698
|
[
"MIT"
] | null | null | null |
src/fmWidgets/FmColorEdit.py
|
ComputerArchitectureGroupPWr/Floorplan-Maker
|
8f2922cdab16501d3bb00f93c3130d3f2c593698
|
[
"MIT"
] | null | null | null |
src/fmWidgets/FmColorEdit.py
|
ComputerArchitectureGroupPWr/Floorplan-Maker
|
8f2922cdab16501d3bb00f93c3130d3f2c593698
|
[
"MIT"
] | null | null | null |
from PyQt4.QtGui import QPalette, QColor
__author__ = 'pawel'
from PyQt4 import QtGui
from PyQt4.QtCore import Qt
class FmColorEdit(QtGui.QLineEdit):
def __init__(self, parent):
super(FmColorEdit, self).__init__(parent)
self.setReadOnly(True)
def mousePressEvent(self, event):
self.color = QtGui.QColorDialog.getColor(Qt.blue)
palette = self.palette()
palette.setColor(QPalette.Base, self.color)
self.setPalette(palette)
def currentColor(self):
return self.color.name()
def setColor(self, color):
self.color = color
palette = self.palette()
palette.setColor(QPalette.Base, QColor(color))
self.setPalette(palette)
| 25.857143
| 57
| 0.672652
| 82
| 724
| 5.792683
| 0.402439
| 0.094737
| 0.075789
| 0.105263
| 0.189474
| 0.189474
| 0.189474
| 0
| 0
| 0
| 0
| 0.005348
| 0.225138
| 724
| 28
| 58
| 25.857143
| 0.841355
| 0
| 0
| 0.2
| 0
| 0
| 0.006897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.15
| 0.05
| 0.45
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de26d7fc8c223d9eef08edc2aa50933adc8cafe1
| 1,777
|
py
|
Python
|
scripts/geodata/address_expansions/equivalence.py
|
Fillr/libpostal
|
bce153188aff9fbe65aef12c3c639d8069e707fc
|
[
"MIT"
] | 3,489
|
2015-03-03T00:21:38.000Z
|
2022-03-29T09:03:05.000Z
|
scripts/geodata/address_expansions/equivalence.py
|
StephenHildebrand/libpostal
|
d8c9847c5686a1b66056e65128e1774f060ff36f
|
[
"MIT"
] | 488
|
2015-05-29T23:04:28.000Z
|
2022-03-29T11:20:24.000Z
|
scripts/geodata/address_expansions/equivalence.py
|
StephenHildebrand/libpostal
|
d8c9847c5686a1b66056e65128e1774f060ff36f
|
[
"MIT"
] | 419
|
2015-11-24T16:53:07.000Z
|
2022-03-27T06:51:28.000Z
|
import random
import re
import six
from itertools import izip
from geodata.address_expansions.gazetteers import *
from geodata.encoding import safe_decode, safe_encode
from geodata.text.normalize import normalized_tokens
from geodata.text.tokenize import tokenize_raw, token_types
from geodata.text.utils import non_breaking_dash_regex
def canonicals_for_language(data, language):
canonicals = set()
for d in data:
lang, dictionary, is_canonical, canonical = d.split(six.b('|'))
if language is None or lang == language:
canonicals.add(canonical)
return canonicals
def equivalent(s1, s2, gazetteer, language):
'''
Address/place equivalence
-------------------------
OSM discourages abbreviations, but to make our training data map better
to real-world input, we can safely replace the canonical phrase with an
abbreviated version and retain the meaning of the words
'''
tokens_s1 = normalized_tokens(s1)
tokens_s2 = normalized_tokens(s2)
abbreviated_s1 = list(abbreviations_gazetteer.filter(tokens_s1))
abbreviated_s2 = list(abbreviations_gazetteer.filter(tokens_s2))
if len(abbreviated_s1) != len(abbreviated_s2):
return False
for ((t1, c1, l1, d1), (t2, c2, l2, d2)) in izip(abbreviated_s1, abbreviated_s2):
if c1 != token_types.PHRASE and c2 != token_types.PHRASE:
if t1 != t2:
return False
elif c2 == token_types.PHRASE and c2 == token_types.PHRASE:
canonicals_s1 = canonicals_for_language(d1, language)
canonicals_s2 = canonicals_for_language(d2, language)
if not canonicals_s1 & canonicals_s2:
return False
else:
return False
return True
| 31.175439
| 85
| 0.68655
| 226
| 1,777
| 5.230089
| 0.429204
| 0.046531
| 0.054146
| 0.045685
| 0.126904
| 0.062606
| 0.062606
| 0.062606
| 0
| 0
| 0
| 0.024818
| 0.229038
| 1,777
| 56
| 86
| 31.732143
| 0.837956
| 0.141812
| 0
| 0.114286
| 0
| 0
| 0.00067
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.057143
| false
| 0
| 0.257143
| 0
| 0.485714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de28f51f7fb4db9f4c4cfed3b53384caa7188918
| 3,200
|
py
|
Python
|
ssanchors/utilities.py
|
IoSR-Surrey/source-separation-anchors
|
c2c73312bdc7f08f37c088fa3986168813f13799
|
[
"MIT"
] | 4
|
2018-07-06T14:35:29.000Z
|
2019-08-28T17:13:11.000Z
|
ssanchors/utilities.py
|
nd1511/source-separation-anchors
|
c2c73312bdc7f08f37c088fa3986168813f13799
|
[
"MIT"
] | 1
|
2018-06-18T17:08:28.000Z
|
2018-06-19T10:45:58.000Z
|
ssanchors/utilities.py
|
nd1511/source-separation-anchors
|
c2c73312bdc7f08f37c088fa3986168813f13799
|
[
"MIT"
] | 1
|
2018-11-05T19:56:17.000Z
|
2018-11-05T19:56:17.000Z
|
from __future__ import division
import numpy as np
from untwist import data
from untwist import transforms
def target_accompaniment(target, others, sample_rate=None):
"""
Given a target source and list of 'other' sources, this function returns
the target and accompaniment as untwist.data.audio.Wave objects. The
accompaniment is defined as the sum of the other sources.
Parameters
----------
target : np.ndarray or Wave, shape=(num_samples, num_channels)
The true target source.
others : List or single np.ndarray or Wave object
Each object should have the shape=(num_samples, num_channels)
If a single array is given, this should correspond to the
accompaniment.
sample_rate : int, optional
Only needed if Wave objects not provided.
Returns
-------
target : Wave, shape=(num_samples, num_channels)
accompaniment : Wave, shape=(num_samples, num_channels)
"""
if isinstance(others, list):
if not isinstance(others[0], data.audio.Wave):
others = [data.audio.Wave(_, sample_rate) for _ in others]
accompaniment = sum(other for other in others)
else:
if not isinstance(others, data.audio.Wave):
others = data.audio.Wave(others, sample_rate)
accompaniment = others
if not isinstance(target, data.audio.Wave):
target = data.audio.Wave(target, sample_rate)
return target, accompaniment
def stft_istft(num_points=2048, window='hann'):
"""
Returns an STFT and an ISTFT Processor object, both configured with the
same window and transform length. These objects are to be used as follows:
>>> stft, istft = stft_istft()
>>> x = untwist.data.audio.Wave.tone() # Or some Wave
>>> y = stft.process(x)
>>> x = istft.process(y)
Parameters
----------
num_points : int
The number of points to use for the window and the fft transform.
window : str
The type of window to use.
Returns
-------
stft : untwist.transforms.stft.STFT
An STFT processor.
itft : untwist.transforms.stft.ITFT
An ISTFT processor.
"""
stft = transforms.STFT(window, num_points, num_points // 2)
istft = transforms.ISTFT(window, num_points, num_points // 2)
return stft, istft
def ensure_audio_doesnt_clip(list_of_arrays):
"""
Takes a list of arrays and scales them by the same factor such that
none clip.
Parameters
----------
list_of_arrays : list
A list of array_like objects
Returns
-------
new_list_of_arrays : list
A list of scaled array_like objects.
"""
max_peak = 1
for audio in list_of_arrays:
audio_peak = np.max(np.abs(audio))
if audio_peak > max_peak:
max_peak = audio_peak
if max_peak >= 1:
print('Warning: Audio has been attenuated to prevent clipping')
gain = 0.999 / max_peak
new_list_of_arrays = []
for audio in list_of_arrays:
new_list_of_arrays.append(audio * gain)
else:
new_list_of_arrays = list_of_arrays
return new_list_of_arrays
| 25.806452
| 78
| 0.64625
| 429
| 3,200
| 4.666667
| 0.286713
| 0.041958
| 0.065934
| 0.037463
| 0.200799
| 0.161838
| 0.054945
| 0
| 0
| 0
| 0
| 0.005541
| 0.266875
| 3,200
| 123
| 79
| 26.01626
| 0.847826
| 0.480938
| 0
| 0.114286
| 0
| 0
| 0.040194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085714
| false
| 0
| 0.114286
| 0
| 0.285714
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de2d96eb9081272f5172b90d540db88b204c04b4
| 427
|
py
|
Python
|
Python_Challenge_115/6/F.py
|
LIkelion-at-KOREATECH/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | 28
|
2019-10-15T13:15:26.000Z
|
2021-11-08T08:23:45.000Z
|
Python_Challenge_115/6/F.py
|
jhleed/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | null | null | null |
Python_Challenge_115/6/F.py
|
jhleed/LikeLion_Django_Study_Summary
|
c788182af5bcfd16bdd4b57235a48659758e494b
|
[
"MIT"
] | 17
|
2019-09-09T00:15:36.000Z
|
2021-01-28T13:08:51.000Z
|
'''
Statement
Fibonacci numbers are the numbers in the integer sequence starting with 1, 1 where every number after the first two is the sum of the two preceding ones:
1, 1, 2, 3, 5, 8, 13, 21, 34, ...
Given a positive integer n, print the nth Fibonacci number.
Example input
6
Example output
8
'''
num = int(input())
before, curr, i = 0, 1, 1
while num > i:
before, curr = curr, curr + before
i += 1
print(curr)
| 18.565217
| 153
| 0.676815
| 75
| 427
| 3.853333
| 0.613333
| 0.020761
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060423
| 0.224824
| 427
| 22
| 154
| 19.409091
| 0.812689
| 0.688525
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.166667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de2ffb901bbfbc3af2061583ab91b8842066be1f
| 1,376
|
py
|
Python
|
cluster.py
|
YektaDmrc/UW_GEMSEC
|
b9e0c995e34f098fdb607fa35a3fe47663839086
|
[
"MIT"
] | 1
|
2018-07-10T23:37:47.000Z
|
2018-07-10T23:37:47.000Z
|
cluster.py
|
YektaDmrc/UW_GEMSEC
|
b9e0c995e34f098fdb607fa35a3fe47663839086
|
[
"MIT"
] | null | null | null |
cluster.py
|
YektaDmrc/UW_GEMSEC
|
b9e0c995e34f098fdb607fa35a3fe47663839086
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Fri Jul 13 15:38:11 2018
@author: Yekta
"""
import csv
import numpy as np
from sklearn.cluster import KMeans
clon = list(csv.reader(open("C:/Users/Yekta/Desktop/stajvol3/MoS2BP Binding Characterization_07-11-17_DY.csv")))
for k in range(1,15):
fin=[]
for m in range(1,13):
dataFromCSV = list(csv.reader(open("C:/Users/Yekta/Desktop/stajvol3/573x96/recon/location"+str(m)+"/PCA"+str(k)+".csv")))
dataFromCSV=np.asarray(dataFromCSV)
dataFromCSV=dataFromCSV.T
temp=dataFromCSV[1:,1:]
temp=temp.astype(np.float)
#clusters according to properties
kmeans = KMeans(n_clusters = 3, init = 'k-means++', random_state = 42)
y_kmeans = kmeans.fit_predict(temp)
fin.append(y_kmeans)
fin=np.asarray(fin)
fin=fin.T
matrix = [[0 for x in range(13)] for y in range(97)]
matrix[0][0]="Index"
for z in range(1,97):
matrix[z][0]=clon[z+1][11]
for x in range(1,13):
matrix[0][x]=x
for y in range(1,97):
matrix[y][x]=fin[y-1,x-1]
matrix=np.asarray(matrix)
with open("C:/Users/Yekta/Desktop/stajvol3/573x96/cluster/clusteredPCA"+str(k)+".csv", 'w', newline='') as myfile:
wr = csv.writer(myfile)
wr.writerows(matrix)
| 32.761905
| 130
| 0.588663
| 204
| 1,376
| 3.936275
| 0.411765
| 0.061021
| 0.049813
| 0.05604
| 0.199253
| 0.159402
| 0.159402
| 0.107098
| 0.107098
| 0
| 0
| 0.065239
| 0.253634
| 1,376
| 42
| 131
| 32.761905
| 0.71665
| 0.077035
| 0
| 0
| 0
| 0
| 0.178542
| 0.149058
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.103448
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de319a3d0a027f8b448c09d0528c44c359822d8e
| 1,440
|
py
|
Python
|
test_collision/test_discretedynamicsworld.py
|
Klumhru/boost-python-bullet
|
d9ffae09157280f60cb469d8c9c9fa4c1920e3ce
|
[
"MIT"
] | 2
|
2015-09-16T15:24:39.000Z
|
2015-11-18T11:53:51.000Z
|
test_collision/test_discretedynamicsworld.py
|
Klumhru/boost-python-bullet
|
d9ffae09157280f60cb469d8c9c9fa4c1920e3ce
|
[
"MIT"
] | 1
|
2018-04-04T15:33:20.000Z
|
2018-04-04T15:33:20.000Z
|
test_collision/test_discretedynamicsworld.py
|
Klumhru/boost-python-bullet
|
d9ffae09157280f60cb469d8c9c9fa4c1920e3ce
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_collision.test_discretedynamicsworld
"""
from __future__ import unicode_literals, print_function, absolute_import
import unittest
import bullet
from .test_worlds import WorldTestDataMixin
class DiscreteDynamicsWorldTestCase(WorldTestDataMixin,
unittest.TestCase):
def setUp(self):
super(DiscreteDynamicsWorldTestCase, self).setUp()
self.world = bullet.btDiscreteDynamicsWorld(
self.dispatcher,
self.broadphase,
self.solver,
self.collision_config
)
def test_ctor(self):
pass
def test_step(self):
for i in range(120):
self.world.step_simulation(self.time_step)
def test_sync_states(self):
for i in range(120):
self.world.step_simulation(self.time_step)
self.world.synchronize_motion_states()
def test_gravity(self):
self.world.set_gravity(self.gravity)
self.assertEquals(self.world.gravity, self.gravity)
self.world.gravity = bullet.btVector3(0, 0, 0)
self.assertEquals(self.world.get_gravity(),
bullet.btVector3(0, 0, 0))
self.assertEquals(self.world.gravity,
bullet.btVector3(0, 0, 0))
def tearDown(self):
del self.world
super(DiscreteDynamicsWorldTestCase, self).tearDown()
| 28.8
| 72
| 0.634028
| 153
| 1,440
| 5.810458
| 0.372549
| 0.101237
| 0.067492
| 0.084364
| 0.305962
| 0.269966
| 0.269966
| 0.269966
| 0.231721
| 0.231721
| 0
| 0.018112
| 0.271528
| 1,440
| 49
| 73
| 29.387755
| 0.829361
| 0.058333
| 0
| 0.176471
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088235
| 1
| 0.176471
| false
| 0.029412
| 0.117647
| 0
| 0.323529
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de31e808778594864eecf61a23f3d4e16b0f2a4b
| 820
|
py
|
Python
|
force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 1
|
2019-08-19T16:02:20.000Z
|
2019-08-19T16:02:20.000Z
|
force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 396
|
2017-07-18T15:19:55.000Z
|
2021-05-03T06:23:06.000Z
|
force_wfmanager/notifications/tests/test_ui_notification_hooks_factory.py
|
force-h2020/force-wfmanager
|
bcd488cd37092cacd9d0c81b544ee8c1654d1d92
|
[
"BSD-2-Clause"
] | 2
|
2019-03-05T16:23:10.000Z
|
2020-04-16T08:59:11.000Z
|
# (C) Copyright 2010-2020 Enthought, Inc., Austin, TX
# All rights reserved.
import unittest
from force_wfmanager.notifications.ui_notification_hooks_manager \
import \
UINotificationHooksManager
from force_wfmanager.notifications.ui_notification_plugin import \
UINotificationPlugin
class TestUINotificationHooksFactory(unittest.TestCase):
def setUp(self):
self.plugin = UINotificationPlugin()
self.factory = self.plugin.ui_hooks_factories[0]
def test_initialization(self):
self.assertEqual(self.factory.plugin_id, self.plugin.id)
self.assertEqual(self.factory.plugin_name, self.plugin.name)
def test_create_ui_hooks_manager(self):
self.assertIsInstance(
self.factory.create_ui_hooks_manager(),
UINotificationHooksManager)
| 31.538462
| 68
| 0.74878
| 88
| 820
| 6.761364
| 0.454545
| 0.067227
| 0.060504
| 0.104202
| 0.258824
| 0.151261
| 0
| 0
| 0
| 0
| 0
| 0.013274
| 0.173171
| 820
| 25
| 69
| 32.8
| 0.864307
| 0.089024
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.176471
| false
| 0
| 0.176471
| 0
| 0.411765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de35289eea69e5ceb7febfc7fa32b43c5609a79c
| 887
|
py
|
Python
|
src/commands/reload.py
|
zaanposni/umfrageBot
|
3e19dc0629cde394da2ae8706e6e043b4e87059d
|
[
"MIT"
] | 6
|
2019-08-15T20:19:38.000Z
|
2021-02-28T21:33:19.000Z
|
src/commands/reload.py
|
zaanposni/umfrageBot
|
3e19dc0629cde394da2ae8706e6e043b4e87059d
|
[
"MIT"
] | 31
|
2019-08-14T08:42:08.000Z
|
2020-05-07T13:43:43.000Z
|
src/commands/reload.py
|
zaanposni/umfrageBot
|
3e19dc0629cde394da2ae8706e6e043b4e87059d
|
[
"MIT"
] | 5
|
2019-08-17T13:39:53.000Z
|
2020-04-01T07:25:51.000Z
|
from bt_utils.console import Console
from bt_utils.config import cfg
from bt_utils.embed_templates import SuccessEmbed, WarningEmbed
from bt_utils.handle_sqlite import DatabaseHandler
SHL = Console('BundestagsBot Reload')
DB = DatabaseHandler()
settings = {
'name': 'reload',
'channels': ['team'],
'mod_cmd': True
}
async def main(client, message, params):
files_failed = cfg.reload(debug=True)
if files_failed == 0:
embed = SuccessEmbed('Success', 'All files reloaded')
else:
embed = WarningEmbed('Reloading failed', f'Failed to reload {files_failed} file(s)')
roles = cfg.options["roles_stats"].values()
# creates basic table structures if not already present
DB.create_structure(roles)
# updates table structure, e.g. if a new role has been added
DB.update_columns(roles)
await message.channel.send(embed=embed)
| 27.71875
| 92
| 0.713641
| 116
| 887
| 5.344828
| 0.62069
| 0.03871
| 0.070968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001379
| 0.182638
| 887
| 31
| 93
| 28.612903
| 0.853793
| 0.126268
| 0
| 0
| 0
| 0
| 0.181347
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.190476
| 0
| 0.190476
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de38b348a7c3f728ca43e602a33e53edfd8f033d
| 10,812
|
py
|
Python
|
tests/eth2/beacon/state_machines/forks/test_serenity_block_attestation_validation.py
|
hwwhww/trinity
|
614b083a637c665f84b1af228541f37c25d9c665
|
[
"MIT"
] | 2
|
2020-01-30T21:51:00.000Z
|
2020-07-22T14:51:05.000Z
|
tests/eth2/beacon/state_machines/forks/test_serenity_block_attestation_validation.py
|
hwwhww/trinity
|
614b083a637c665f84b1af228541f37c25d9c665
|
[
"MIT"
] | null | null | null |
tests/eth2/beacon/state_machines/forks/test_serenity_block_attestation_validation.py
|
hwwhww/trinity
|
614b083a637c665f84b1af228541f37c25d9c665
|
[
"MIT"
] | null | null | null |
import pytest
from hypothesis import (
given,
settings,
strategies as st,
)
from eth_utils import (
ValidationError,
)
from eth.constants import (
ZERO_HASH32,
)
from eth2.beacon.committee_helpers import (
get_crosslink_committees_at_slot,
)
from eth2.beacon.state_machines.forks.serenity.block_validation import (
validate_attestation_aggregate_signature,
validate_attestation_latest_crosslink_root,
validate_attestation_justified_block_root,
validate_attestation_justified_epoch,
validate_attestation_crosslink_data_root,
validate_attestation_slot,
)
from eth2.beacon.tools.builder.validator import (
create_mock_signed_attestation,
)
from eth2.beacon.types.attestation_data import AttestationData
from eth2.beacon.types.crosslink_records import CrosslinkRecord
@pytest.mark.parametrize(
('genesis_slot', 'genesis_epoch', 'slots_per_epoch', 'min_attestation_inclusion_delay'),
[
(8, 2, 4, 2),
]
)
@pytest.mark.parametrize(
(
'attestation_slot,'
'state_slot,'
'is_valid,'
),
[
# in bounds at lower end
(8, 2 + 8, True),
# in bounds at high end
(8, 8 + 4, True),
# attestation_slot < genesis_slot
(7, 2 + 8, False),
# state_slot > attestation_data.slot + slots_per_epoch
(8, 8 + 4 + 1, False),
# attestation_data.slot + min_attestation_inclusion_delay > state_slot
(8, 8 - 2, False),
]
)
def test_validate_attestation_slot(sample_attestation_data_params,
attestation_slot,
state_slot,
slots_per_epoch,
genesis_slot,
genesis_epoch,
min_attestation_inclusion_delay,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
slot=attestation_slot,
)
if is_valid:
validate_attestation_slot(
attestation_data,
state_slot,
slots_per_epoch,
min_attestation_inclusion_delay,
genesis_slot,
)
else:
with pytest.raises(ValidationError):
validate_attestation_slot(
attestation_data,
state_slot,
slots_per_epoch,
min_attestation_inclusion_delay,
genesis_slot,
)
@pytest.mark.parametrize(
(
'attestation_slot,'
'attestation_justified_epoch,'
'current_epoch,'
'previous_justified_epoch,'
'justified_epoch,'
'slots_per_epoch,'
'is_valid,'
),
[
# slot_to_epoch(attestation_data.slot + 1, slots_per_epoch) >= current_epoch
(23, 2, 3, 1, 2, 8, True), # attestation_data.justified_epoch == justified_epoch
(23, 1, 3, 1, 2, 8, False), # attestation_data.justified_epoch != justified_epoch
# slot_to_epoch(attestation_data.slot + 1, slots_per_epoch) < current_epoch
(22, 1, 3, 1, 2, 8, True), # attestation_data.justified_epoch == previous_justified_epoch
(22, 2, 3, 1, 2, 8, False), # attestation_data.justified_epoch != previous_justified_epoch
]
)
def test_validate_attestation_justified_epoch(
sample_attestation_data_params,
attestation_slot,
attestation_justified_epoch,
current_epoch,
previous_justified_epoch,
justified_epoch,
slots_per_epoch,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
slot=attestation_slot,
justified_epoch=attestation_justified_epoch,
)
if is_valid:
validate_attestation_justified_epoch(
attestation_data,
current_epoch,
previous_justified_epoch,
justified_epoch,
slots_per_epoch,
)
else:
with pytest.raises(ValidationError):
validate_attestation_justified_epoch(
attestation_data,
current_epoch,
previous_justified_epoch,
justified_epoch,
slots_per_epoch,
)
@pytest.mark.parametrize(
(
'attestation_justified_block_root,'
'justified_block_root,'
'is_valid,'
),
[
(b'\x33' * 32, b'\x22' * 32, False), # attestation.justified_block_root != justified_block_root # noqa: E501
(b'\x33' * 32, b'\x33' * 32, True),
]
)
def test_validate_attestation_justified_block_root(sample_attestation_data_params,
attestation_justified_block_root,
justified_block_root,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
justified_block_root=attestation_justified_block_root,
)
if is_valid:
validate_attestation_justified_block_root(
attestation_data,
justified_block_root
)
else:
with pytest.raises(ValidationError):
validate_attestation_justified_block_root(
attestation_data,
justified_block_root
)
@pytest.mark.parametrize(
(
'attestation_latest_crosslink,'
'attestation_crosslink_data_root,'
'state_latest_crosslink,'
'is_valid,'
),
[
(
CrosslinkRecord(0, b'\x11' * 32),
b'\x33' * 32,
CrosslinkRecord(0, b'\x22' * 32),
False,
),
(
CrosslinkRecord(0, b'\x33' * 32),
b'\x33' * 32,
CrosslinkRecord(0, b'\x11' * 32),
False,
),
(
CrosslinkRecord(0, b'\x11' * 32),
b'\x33' * 32,
CrosslinkRecord(0, b'\x33' * 32),
True,
),
(
CrosslinkRecord(0, b'\x33' * 32),
b'\x22' * 32,
CrosslinkRecord(0, b'\x33' * 32),
True,
),
(
CrosslinkRecord(0, b'\x33' * 32),
b'\x33' * 32,
CrosslinkRecord(0, b'\x33' * 32),
True,
),
]
)
def test_validate_attestation_latest_crosslink(sample_attestation_data_params,
attestation_latest_crosslink,
attestation_crosslink_data_root,
state_latest_crosslink,
slots_per_epoch,
is_valid):
sample_attestation_data_params['latest_crosslink'] = attestation_latest_crosslink
sample_attestation_data_params['crosslink_data_root'] = attestation_crosslink_data_root
attestation_data = AttestationData(**sample_attestation_data_params).copy(
latest_crosslink=attestation_latest_crosslink,
crosslink_data_root=attestation_crosslink_data_root,
)
if is_valid:
validate_attestation_latest_crosslink_root(
attestation_data,
state_latest_crosslink,
slots_per_epoch=slots_per_epoch,
)
else:
with pytest.raises(ValidationError):
validate_attestation_latest_crosslink_root(
attestation_data,
state_latest_crosslink,
slots_per_epoch=slots_per_epoch,
)
@pytest.mark.parametrize(
(
'attestation_crosslink_data_root,'
'is_valid,'
),
[
(ZERO_HASH32, True),
(b'\x22' * 32, False),
(b'\x11' * 32, False),
]
)
def test_validate_attestation_crosslink_data_root(sample_attestation_data_params,
attestation_crosslink_data_root,
is_valid):
attestation_data = AttestationData(**sample_attestation_data_params).copy(
crosslink_data_root=attestation_crosslink_data_root,
)
if is_valid:
validate_attestation_crosslink_data_root(
attestation_data,
)
else:
with pytest.raises(ValidationError):
validate_attestation_crosslink_data_root(
attestation_data,
)
@settings(max_examples=1)
@given(random=st.randoms())
@pytest.mark.parametrize(
(
'num_validators,'
'slots_per_epoch,'
'target_committee_size,'
'shard_count,'
'is_valid,'
'genesis_slot'
),
[
(10, 2, 2, 2, True, 0),
(40, 4, 3, 5, True, 0),
(20, 5, 3, 2, True, 0),
(20, 5, 3, 2, False, 0),
],
)
def test_validate_attestation_aggregate_signature(genesis_state,
slots_per_epoch,
random,
sample_attestation_data_params,
is_valid,
target_committee_size,
shard_count,
keymap,
committee_config):
state = genesis_state
# choose committee
slot = 0
crosslink_committee = get_crosslink_committees_at_slot(
state=state,
slot=slot,
committee_config=committee_config,
)[0]
committee, shard = crosslink_committee
committee_size = len(committee)
assert committee_size > 0
# randomly select 3/4 participants from committee
votes_count = len(committee) * 3 // 4
assert votes_count > 0
attestation_data = AttestationData(**sample_attestation_data_params).copy(
slot=slot,
shard=shard,
)
attestation = create_mock_signed_attestation(
state,
attestation_data,
committee,
votes_count,
keymap,
slots_per_epoch,
)
if is_valid:
validate_attestation_aggregate_signature(
state,
attestation,
committee_config,
)
else:
# mess up signature
attestation = attestation.copy(
aggregate_signature=(
attestation.aggregate_signature[0] + 10,
attestation.aggregate_signature[1] - 1
)
)
with pytest.raises(ValidationError):
validate_attestation_aggregate_signature(
state,
attestation,
committee_config,
)
| 31.068966
| 117
| 0.561321
| 990
| 10,812
| 5.728283
| 0.120202
| 0.105801
| 0.043555
| 0.066655
| 0.697937
| 0.600247
| 0.541527
| 0.466584
| 0.405396
| 0.350379
| 0
| 0.028185
| 0.363393
| 10,812
| 347
| 118
| 31.158501
| 0.795729
| 0.067055
| 0
| 0.495208
| 0
| 0
| 0.063059
| 0.027408
| 0
| 0
| 0
| 0
| 0.00639
| 1
| 0.019169
| false
| 0
| 0.028754
| 0
| 0.047923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de3966c1044750e98c8968c82831f55e24112044
| 13,679
|
py
|
Python
|
SeqtaSDSBridge.py
|
jacobcurulli/SeqtaSDSBridge
|
19b8da95462d1e0aa8a059c9f8075d8f7ce1b417
|
[
"CC-BY-4.0"
] | null | null | null |
SeqtaSDSBridge.py
|
jacobcurulli/SeqtaSDSBridge
|
19b8da95462d1e0aa8a059c9f8075d8f7ce1b417
|
[
"CC-BY-4.0"
] | 1
|
2021-05-21T04:52:28.000Z
|
2021-05-21T05:00:10.000Z
|
SeqtaSDSBridge.py
|
jacobcurulli/SeqtaSDSBridge
|
19b8da95462d1e0aa8a059c9f8075d8f7ce1b417
|
[
"CC-BY-4.0"
] | 1
|
2021-04-07T13:50:43.000Z
|
2021-04-07T13:50:43.000Z
|
###########################################################################################################
###########################################################################################################
## SeqtaToSDS ##
## Jacob Curulli ##
## This code is shared as is, under Creative Commons Attribution Non-Commercial 4.0 License ##
## Permissions beyond the scope of this license may be available at http://creativecommons.org/ns ##
###########################################################################################################
# Read Me
# This script will likely not work out of the box and will need to be customised
# 1. The approvedClassesCSV is a list of classes in Seqta that will be exported,
# the list is checked against the 'name' column in the public.classunit table.
# 2. A directory called 'sds' will need to be created in the root of where the script is run.
# 3. This script allows for an admin user to be added to every class (section)
# import required modules
# psycopg2 isn't usually included with python and may need to be installed separately
# see www.psycopg.org for instructions
import psycopg2
import csv
import os.path
import configparser
from datetime import datetime
# Get the date
dateNow = datetime.now()
# Read the config.ini file
config = configparser.ConfigParser()
config.read('config.ini')
# read config file for seqta database connection details
db_user=config['db']['user']
db_port=config['db']['port']
db_password=config['db']['password']
db_database=config['db']['database']
db_host=config['db']['host']
db_sslmode=config['db']['sslmode']
# read config file for school details
teamsAdminUsername=config['school']['teamsAdminUsername']
teamsAdminFirstName=config['school']['teamsAdminFirstName']
teamsAdminLastName=config['school']['teamsAdminLastName']
teamsAdminID=config['school']['teamsAdminID']
schoolName =config['school']['schoolName']
schoolSISId=config['school']['schoolSISId']
classTermName=config['school']['classTermName']
# declare some variables here so we can make sure they are present
staffList = set()
studentList = set()
classArray = tuple()
currentYear = dateNow.strftime("%Y")
print("current year is:", currentYear)
# file locations, this can be changed to suit your environment
csvApprovedClasses = "approved_classes.csv"
csvSchoolFilename = "sds/School.csv"
csvSectionFileName = "sds/Section.csv"
csvStudentFileName = "sds/Student.csv"
csvTeacherFileName = "sds/Teacher.csv"
csvTeacherRosterFileName = "sds/TeacherRoster.csv"
csvStudentEnrollmentFileName = "sds/StudentEnrollment.csv"
# remove the csv files if they already exist. This is a messy way of doing it but I learnt python 2 days ago so whatever
if os.path.exists(csvSchoolFilename):
os.remove(csvSchoolFilename)
if os.path.exists(csvSectionFileName):
os.remove(csvSectionFileName)
if os.path.exists(csvStudentFileName):
os.remove(csvStudentFileName)
if os.path.exists(csvTeacherFileName):
os.remove(csvTeacherFileName)
if os.path.exists(csvTeacherRosterFileName):
os.remove(csvTeacherRosterFileName)
if os.path.exists(csvStudentEnrollmentFileName):
os.remove(csvStudentEnrollmentFileName)
try:
# Import CSV file for approved class lists
with open(csvApprovedClasses, newline='', encoding='utf-8-sig') as csvfile:
classList = list(csv.reader(csvfile))
print (type(classList))
print (classList)
print ("Number of classes imported from csv list: ",len(classList))
except:
print("***************************")
print("Error importing csv file")
# Open connection to Seqta
try:
connection = psycopg2.connect(user=db_user,
port=db_port,
password=db_password,
database=db_database,
host = db_host,
sslmode = db_sslmode)
cursor = connection.cursor()
print(connection.get_dsn_parameters(), "\n")
except (Exception, psycopg2.Error) as error:
print("Error while connecting to PostgreSQL", error)
# Fetch data for classlists
try:
for i in classList:
className = str(('[%s]' % ', '.join(map(str, (i))))[1:-1])
print ("**")
print (className)
# Print PostgreSQL version
cursor.execute("SELECT version();")
record = cursor.fetchone()
# Lookup classID from Class name in Seqta
sq_classUnitQuery = "SELECT * FROM public.classunit WHERE name = (%s);"
cursor.execute(sq_classUnitQuery,(className,))
classUnitPull = cursor.fetchall()
print("Getting class information for:", (className))
for row in classUnitPull:
classUnitID = row[0]
classSubjectID = row[4]
classTermID = row[7]
print("Class unit ID (classUnitID) is:", classUnitID)
print("Class subject ID (classSubjectID) is:", classSubjectID)
print("Class term ID (classTermID) is:", classTermID)
# Check if class has a staff member or students
# If they don't we need to stop processing the class and drop it gracefully
# Get subject description for Class
sq_classSubjectQuery = "SELECT * FROM subject WHERE id = (%s);"
cursor.execute(sq_classSubjectQuery, (classSubjectID,))
classSubjectPull = cursor.fetchall()
for row in classSubjectPull:
classSubjectDescription = row[3]
classSubjectName = row[2]
classTeamName = (className + " - " + classSubjectDescription)
print("Class subject Description (classSubjectDescription) is:", classSubjectDescription)
print("Class team name (classTeamName) is:", classTeamName)
print("Class subject Name (classSubjectName) is:", classSubjectName)
# Get StaffID in this classUnit
sq_staffIDQuery = "SELECT staff from public.classinstance WHERE classunit = (%s) and date <= current_date ORDER BY id DESC LIMIT 1;"
cursor.execute(sq_staffIDQuery, (classUnitID,))
staffID_pre = cursor.fetchone()
if staffID_pre is None:
print("Couldn't find a class today or previously for classunit:", classUnitID)
print("Checking for a class up to 14 days in the future and selecting the closest date to today")
sq_staffIDQuery = "SELECT staff from public.classinstance WHERE classunit = (%s) date = current_date + interval '14 day' ORDER BY id DESC LIMIT 1;"
cursor.execute(sq_staffIDQuery, (classUnitID,))
staffID_pre = cursor.fetchone()
staffID = int(staffID_pre[0])
print("Staff ID is:", (staffID))
# Write to teacher ID list
staffList.add(staffID)
else:
staffID = int(staffID_pre[0])
print("Staff ID is:", (staffID))
# Write to teacher ID list
staffList.add(staffID)
# Get Student ID's for this classUnit
sq_studentIDListQuery = "SELECT student from \"classunitStudent\" WHERE classunit = (%s) and removed is NULL;"
cursor.execute(sq_studentIDListQuery, (classUnitID,))
studentIDArray = tuple([r[0] for r in cursor.fetchall()])
print("List of students in class name:", className)
print(studentIDArray)
for row in studentIDArray:
studentList.add(row)
# Check if the csv section file exists
csvSectionFileExists = os.path.isfile(csvSectionFileName)
# Write to the section csv file
with open(csvSectionFileName, 'a', newline='') as csvSection:
writer = csv.writer(csvSection)
# If the csv doesn't exist already we'll need to put in the headers
if not csvSectionFileExists:
writer.writerow(["SIS ID", "School SIS ID", "Section Name", "Section Number", "Term SIS ID", "Term Name", "Course SIS ID", "Course Name", "Course Description"])
writer.writerow([(classUnitID), (schoolSISId), (classTeamName), (classUnitID), (classTermID), (classTermName), (classUnitID), (classSubjectName), (classSubjectDescription)])
print ("Writing class section row")
# Check if the csv teacher roster file exists
csvTeacherRosterFileExists = os.path.isfile(csvTeacherRosterFileName)
# Write to the teacher roster csv file
with open(csvTeacherRosterFileName, 'a', newline='') as csvTeacherRoster:
writer = csv.writer(csvTeacherRoster)
# If the csv doesn't exist already we'll need to put in the headers
if not csvTeacherRosterFileExists:
writer.writerow(["Section SIS ID", "SIS ID"])
writer.writerow([(classUnitID), (staffID)])
# Also include the Teams Admin account as a teacher
writer.writerow([(classUnitID), (teamsAdminID)])
print("Written staff to roster")
# Check if the csv student enrollment file exists
csvStudentEnrollmentFileNameExists = os.path.isfile(csvStudentEnrollmentFileName)
# Write to the student enrollment csv file
with open(csvStudentEnrollmentFileName, 'a', newline='') as csvStudentEnrollment:
writer = csv.writer(csvStudentEnrollment)
# If the csv doesn't exist already we'll need to put in the headers
if not csvStudentEnrollmentFileNameExists:
writer.writerow(["Section SIS ID", "SIS ID"])
for studentInArray in studentIDArray:
writer.writerow([(classUnitID), (studentInArray)])
except:
print("")
print("***************************")
print("Error fetching class list data")
print("")
# Now we will fetch the staff information
try:
print("Print the staff lists now")
print(staffList)
for staff in staffList:
# Now get the staff information
sq_staffQuery = "SELECT * from public.staff WHERE id = (%s);"
cursor.execute(sq_staffQuery, (staff,))
staffPull = cursor.fetchall()
for row in staffPull:
staffFirstName = row[4]
staffLastName = row[7]
staffUsername = row[21]
print("Staff First Name (staffFirstName) is:", staffFirstName)
print("Staff Last Name (staffLastName) is:", staffLastName)
print("Staff username (staffUsername) is:", staffUsername)
print("Staff ID is (staff) is:", staff)
# Now we write this information to the Teacher.csv file
# Check if the csv teacher file exists
csvTeacherFileNameExists = os.path.isfile(csvTeacherFileName)
# Write to the teacher csv file
with open(csvTeacherFileName, 'a', newline='') as csvTeacher:
writer = csv.writer(csvTeacher)
# If the csv doesn't exist already we'll need to put in the headers
if not csvTeacherFileNameExists:
writer.writerow(["SIS ID", "School SIS ID", "First Name", "Last Name", "Username", "Teacher Number"])
# Also include the Teams Admin user as a teacher
writer.writerow(
[(teamsAdminID), (schoolSISId), (teamsAdminFirstName), (teamsAdminLastName), (teamsAdminUsername),
(teamsAdminID)])
writer.writerow([(staff), (schoolSISId), (staffFirstName), (staffLastName), (staffUsername), (staff)])
except:
print("something went wrong getting the staff data")
# Now we will fetch the student information
try:
print("Print the student lists now")
print(studentList)
for student in studentList:
# Now get the student information
sq_studentQuery = "SELECT * from student WHERE id = (%s) AND status = 'FULL';"
cursor.execute(sq_studentQuery, (student,))
studentPull = cursor.fetchall()
for row in studentPull:
studentFirstName = row[3]
studentLastName = row[6]
studentUsername = row[47]
print("Student First Name (studentFirstName) is:", studentFirstName)
print("Student Last Name (studentLastName) is:", studentLastName)
print("Student username (studentUsername) is:", studentUsername)
print("Student ID is (student) is:", student)
# Now we write this information to the Student.csv file
# Check if the csv Student file exists
csvStudentFileNameExists = os.path.isfile(csvStudentFileName)
# Write to the student enrollment csv file
with open(csvStudentFileName, 'a', newline='') as csvStudent:
writer = csv.writer(csvStudent)
# If the csv doesn't exist already we'll need to put in the headers
if not csvStudentFileNameExists:
writer.writerow(["SIS ID", "School SIS ID", "First Name", "Last Name", "Username", "Student Number"])
writer.writerow([(student), (schoolSISId), (studentFirstName), (studentLastName), (studentUsername), (student)])
except:
print("something went wrong getting the student data")
# write the School.csv file
try:
with open('sds/School.csv', 'a', newline='') as csvSchool:
writer = csv.writer(csvSchool)
writer.writerow(["SIS ID","Name"])
writer.writerow([(schoolSISId),(schoolName)])
except:
print("something went wrong writing the school csv file")
finally:
# closing database connection.
if (connection):
cursor.close()
connection.close()
print("PostgreSQL connection is closed")
| 45.445183
| 185
| 0.635865
| 1,483
| 13,679
| 5.840863
| 0.228591
| 0.022628
| 0.009236
| 0.009698
| 0.186793
| 0.143154
| 0.133226
| 0.106673
| 0.106673
| 0.096975
| 0
| 0.003377
| 0.242269
| 13,679
| 301
| 186
| 45.445183
| 0.83232
| 0.208787
| 0
| 0.132353
| 0
| 0.009804
| 0.226853
| 0.012031
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.009804
| 0.034314
| 0
| 0.034314
| 0.220588
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de3daa1f9c197f223b8adf05ac9c7b5634367d5c
| 5,945
|
py
|
Python
|
bin/plot_examples/plot_vars_barchart.py
|
gonzalorodrigo/ScSFWorkload
|
2301dacf486df8ed783c0ba33cbbde6e9978c17e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2019-03-18T18:27:49.000Z
|
2019-03-18T18:27:49.000Z
|
bin/plot_examples/plot_vars_barchart.py
|
gonzalorodrigo/ScSFWorkload
|
2301dacf486df8ed783c0ba33cbbde6e9978c17e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2020-12-17T21:33:15.000Z
|
2020-12-17T21:35:41.000Z
|
bin/plot_examples/plot_vars_barchart.py
|
gonzalorodrigo/ScSFWorkload
|
2301dacf486df8ed783c0ba33cbbde6e9978c17e
|
[
"BSD-3-Clause-LBNL"
] | 1
|
2021-01-05T08:23:20.000Z
|
2021-01-05T08:23:20.000Z
|
""" Plots analysis on the workflow variables for experiments with different
workflow types and different %of workflow core hours in the workload.
Resuls are plotted as barchars that show how much the vas deviate in
single and multi from aware.
"""
import matplotlib
from orchestration import get_central_db
from orchestration.definition import ExperimentDefinition
from plot import (plot_multi_bars, produce_plot_config, extract_results,
gen_trace_ids_exps, calculate_diffs, get_args, join_rows,
replace)
from stats.trace import ResultTrace
# remote use no Display
matplotlib.use('Agg')
base_trace_id_percent, lim = get_args(2459, True)
print("Base Exp", base_trace_id_percent)
print("Using analysis of limited workflows:", lim)
db_obj = get_central_db()
edge_keys= {0: "[0,48] core.h", 48*3600:"(48, 960] core.h",
960*3600:"(960, inf.) core.h"}
trace_id_rows = []
base_exp=170
exp=ExperimentDefinition()
exp.load(db_obj, base_exp)
core_seconds_edges=exp.get_machine().get_core_seconds_edges()
# trace_id_rows = [
# [ 4166, 4167, 4168, 4184, 4185, 4186, 4202, 4203, 4204,
# 4220, 4221, 4222, 4238, 4239, 4240 ],
# [ 4169, 4170, 4171, 4187, 4188, 4189, 4205, 4206, 4207,
# 4223, 4224, 4225, 4241, 4242, 4243 ],
# [ 4172, 4173, 4174, 4190, 4191, 4192, 4208, 4209, 4210,
# 4226, 4227, 4228, 4244, 4245, 4246 ],
# [ 4175, 4176, 4177, 4193, 4194, 4195, 4211, 4212, 4213,
# 4229, 4230, 4231, 4247, 4248, 4249],
# [ 4178, 4179, 4180, 4196, 4197, 4198, 4214, 4215, 4216,
# 4232, 4233, 4234, 4250, 4251, 4252],
# [ 4181, 4182, 4183, 4199, 4200, 4201, 4217, 4218, 4219,
# 4235, 4236, 4237, 4253, 4254, 4255],
# ]
pre_base_trace_id_percent = 2549+18
trace_id_rows= join_rows(
gen_trace_ids_exps(pre_base_trace_id_percent,
inverse=False,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=1),
gen_trace_ids_exps(base_trace_id_percent,
inverse=False,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=5)
)
trace_id_colors=join_rows(
gen_trace_ids_exps(pre_base_trace_id_percent+1,
inverse=False, skip=1,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=1,
group_size=2),
gen_trace_ids_exps(base_trace_id_percent+1,
inverse=False,skip=1,
group_jump=18, block_count=6,
base_exp_group=None,
group_count=5,
group_size=2)
)
print("IDS", trace_id_rows)
trace_id_rows=replace(trace_id_rows,
[2489, 2490, 2491,
2507, 2508, 2509,
2525, 2526, 2527],
[2801, 2802, 2803,
2804, 2805, 2806,
2807, 2808, 2809])
print("IDS", trace_id_rows)
print("COLORS", trace_id_colors)
time_labels = ["", "5%", "", "10%", "", "25%",
"", "50%", "", "75%",
"", "100%"]
manifest_label=["floodP", "longW", "wideL",
"cybers", "sipht", "montage"]
y_limits_dic={"[0,48] core.h": (1, 1000),
"(48, 960] core.h":(1,100),
"(960, inf.) core.h":(1,20)}
target_dir="percent"
grouping_types = [["bar", "bar"],
["bar", "bar"],
["bar", "bar"],
["bar", "bar"],
["bar", "bar"],
["bar", "bar"]]
colors, hatches, legend = produce_plot_config(db_obj, trace_id_colors)
#head_file_name="percent"
head_file_name="wf_percent-b{0}".format(base_trace_id_percent)
for (name, result_type) in zip(["Turnaround speedup", "wait time(h.)",
"runtime (h.)", "stretch factor"],
["wf_turnaround", "wf_waittime",
"wf_runtime", "wf_stretch_factor"]):
if lim:
result_type="lim_{0}".format(result_type)
print("Loading: {0}".format(name))
factor=1.0/3600.0
if result_type in ("wf_stretch_factor", "lim_wf_stretch_factor"):
factor=None
edge_plot_results = extract_results(db_obj, trace_id_rows,
result_type, factor=factor,
second_pass=lim)
diffs_results = calculate_diffs(edge_plot_results, base_index=0,
group_count=3, speedup=True)
# for res_row in edge_plot_results:
# print [ x._get("median") for x in res_row]
title="{0}".format(name)
y_limits=(0,4)
print("Plotting figure")
ref_level=1.0
plot_multi_bars(
name=title,
file_name=target_dir+"/{0}-{1}-bars.png".format(head_file_name,
result_type),
title=title,
exp_rows=diffs_results,
y_axis_labels=manifest_label,
x_axis_labels=time_labels,
y_axis_general_label=name,
type_rows=grouping_types,
colors=colors,
hatches=hatches,
y_limits=y_limits,
y_log_scale=False,
legend=legend,
y_tick_count=3,
subtitle="% workflow workload",
ncols=2,
ref_line=ref_level
)
| 36.030303
| 75
| 0.518923
| 684
| 5,945
| 4.23538
| 0.409357
| 0.04591
| 0.031067
| 0.03728
| 0.174318
| 0.153952
| 0.153952
| 0.153952
| 0.143597
| 0.143597
| 0
| 0.146289
| 0.369891
| 5,945
| 165
| 76
| 36.030303
| 0.627069
| 0.17561
| 0
| 0.160714
| 0
| 0
| 0.098831
| 0.004306
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.008929
| 0.044643
| 0
| 0.044643
| 0.0625
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de3df638310dcbe32c189284547dca83d1fe51a7
| 410
|
py
|
Python
|
devpotato_bot/commands/daily_titles/models/inevitable_title.py
|
cl0ne/cryptopotato-bot
|
af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1
|
[
"MIT"
] | 1
|
2021-05-15T23:41:29.000Z
|
2021-05-15T23:41:29.000Z
|
devpotato_bot/commands/daily_titles/models/inevitable_title.py
|
cl0ne/cryptopotato-bot
|
af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1
|
[
"MIT"
] | 1
|
2022-02-19T20:38:33.000Z
|
2022-02-19T23:53:39.000Z
|
devpotato_bot/commands/daily_titles/models/inevitable_title.py
|
cl0ne/cryptopotato-bot
|
af62d794adffe186a4f6a4b0aa7ecd4f7e8700a1
|
[
"MIT"
] | 1
|
2021-05-15T23:42:21.000Z
|
2021-05-15T23:42:21.000Z
|
from __future__ import annotations
from .title import TitleFromGroupChat, Base
class InevitableTitle(TitleFromGroupChat):
__tablename__ = f'{Base.TABLENAME_PREFIX}inevitable_titles'
__group_chat_back_populates__ = 'inevitable_titles'
def __repr__(self):
return ('<InevitableTitle('
f'chat_id={self.chat_id}, '
f'text="{self.text}"'
')>')
| 27.333333
| 63
| 0.660976
| 40
| 410
| 6.175
| 0.6
| 0.129555
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.236585
| 410
| 14
| 64
| 29.285714
| 0.789137
| 0
| 0
| 0
| 0
| 0
| 0.287805
| 0.153659
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.2
| 0.1
| 0.7
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de3e64921cbcc4e464aa3d32a70cc4b3179f2705
| 1,034
|
py
|
Python
|
matplotlib/gas_price_overtime.py
|
MatveiAleksandrovich/Artificial-Intelligence
|
d3d6f253e7c2256f6f9d490b077bdb50ca1da229
|
[
"MIT"
] | null | null | null |
matplotlib/gas_price_overtime.py
|
MatveiAleksandrovich/Artificial-Intelligence
|
d3d6f253e7c2256f6f9d490b077bdb50ca1da229
|
[
"MIT"
] | null | null | null |
matplotlib/gas_price_overtime.py
|
MatveiAleksandrovich/Artificial-Intelligence
|
d3d6f253e7c2256f6f9d490b077bdb50ca1da229
|
[
"MIT"
] | null | null | null |
import requests
import pandas as pd
import matplotlib.pyplot as plt
url_gas_data = 'https://raw.githubusercontent.com/KeithGalli/matplotlib_tutorial/master/gas_prices.csv'
res1 = requests.get(url_gas_data, allow_redirects=True)
with open('gas_prices.csv', 'wb') as file:
file.write(res1.content)
plt.figure(figsize=(12, 5))
gas = pd.read_csv('gas_prices.csv')
plt.title('Gas prices overtime (in USD)', fontdict={
'fontweight': 'bold', 'fontsize': 16
})
countries_to_look_at = ['USA', 'Australia', 'South Korea', 'Canada']
for country in gas:
if country in countries_to_look_at:
plt.plot(gas.Year, gas[country], label=country, marker='.')
"""
Other way to pass data:
plt.plot(gas.Year, gas.USA, 'b.-', label='United States')
plt.plot(gas.Year, gas.Canada, 'r.-', label='Canada')
plt.plot(gas.Year, gas['South Korea'], 'g.-', label='South Korea')
plt.plot(gas.Year, gas.Australia, 'y.-', label='Australia')
"""
plt.xticks(gas.Year[::3])
plt.xlabel('Year')
plt.ylabel('US Dollars')
plt.legend()
plt.show()
| 23.5
| 103
| 0.698259
| 159
| 1,034
| 4.440252
| 0.490566
| 0.05949
| 0.070822
| 0.09915
| 0.120397
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008762
| 0.117021
| 1,034
| 43
| 104
| 24.046512
| 0.764513
| 0
| 0
| 0
| 0
| 0
| 0.275229
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de40955063f239619674a2b5ecbf4dbaa910621e
| 2,305
|
py
|
Python
|
integration_tests/test_surveys.py
|
ONSdigital/sdx-tester
|
df193867c0d5e9dbf39790c85c41b07a9efed756
|
[
"MIT"
] | null | null | null |
integration_tests/test_surveys.py
|
ONSdigital/sdx-tester
|
df193867c0d5e9dbf39790c85c41b07a9efed756
|
[
"MIT"
] | null | null | null |
integration_tests/test_surveys.py
|
ONSdigital/sdx-tester
|
df193867c0d5e9dbf39790c85c41b07a9efed756
|
[
"MIT"
] | null | null | null |
import unittest
import uuid
from app import survey_loader
from app import message_manager
from app.tester import run_survey
class TestSurveys(unittest.TestCase):
@classmethod
def setUpClass(cls):
message_manager.start()
@classmethod
def tearDownClass(cls):
message_manager.stop()
def tearDown(self):
print('-----------------------------------------------------')
def execute(self, survey_dict: dict, receipt: bool, multiple_files: bool, eq_version_3: bool = False):
for key, survey_list in survey_dict.items():
for survey in survey_list:
tx_id = str(uuid.uuid4())
survey['tx_id'] = tx_id
with self.subTest(msg=f'test {key} with tx_id: {tx_id}'):
print('---------------------------------------------------------')
print(f'testing {key} with tx_id: {tx_id}')
result = run_survey(message_manager, survey, eq_version_3)
print(str(result))
self.assertFalse(result.timeout, f'{key} has timed out!')
self.assertIsNone(result.quarantine, f'{key} has been quarantined!')
self.assertIsNotNone(result.dap_message, f'{key} did not post dap message!')
if multiple_files:
self.assertTrue(len(result.files) > 1, f'{key} should have produced multiple files!')
else:
self.assertTrue(len(result.files) == 1, f'{key} should have produced one file only!')
if receipt:
self.assertIsNotNone(result.receipt, f'{key} did not produce receipt!')
print("PASSED")
def test_dap(self):
surveys = survey_loader.get_dap()
self.execute(surveys, receipt=True, multiple_files=False)
def test_survey(self):
surveys = survey_loader.get_survey()
self.execute(surveys, receipt=True, multiple_files=True)
def test_hybrid(self):
surveys = survey_loader.get_hybrid()
self.execute(surveys, receipt=True, multiple_files=True)
def test_feedback(self):
survey = survey_loader.get_feedback()
self.execute(survey, receipt=False, multiple_files=False)
| 37.786885
| 109
| 0.572668
| 258
| 2,305
| 4.957364
| 0.317829
| 0.071149
| 0.046912
| 0.018765
| 0.279906
| 0.218921
| 0.195465
| 0.162627
| 0.162627
| 0.162627
| 0
| 0.003032
| 0.284599
| 2,305
| 60
| 110
| 38.416667
| 0.772589
| 0
| 0
| 0.086957
| 0
| 0
| 0.16269
| 0.047722
| 0
| 0
| 0
| 0
| 0.130435
| 1
| 0.173913
| false
| 0.021739
| 0.108696
| 0
| 0.304348
| 0.108696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de42aa506b54f4487685cb532dc908e5f790e4a5
| 509
|
py
|
Python
|
shared/app_business_logic.py
|
c-w/python-loadtests
|
3ffd3dc89780b9372a5d20a71b2becec121ff3d2
|
[
"Apache-2.0"
] | 2
|
2020-02-12T23:03:09.000Z
|
2020-02-12T23:09:42.000Z
|
shared/app_business_logic.py
|
c-w/python-loadtests
|
3ffd3dc89780b9372a5d20a71b2becec121ff3d2
|
[
"Apache-2.0"
] | null | null | null |
shared/app_business_logic.py
|
c-w/python-loadtests
|
3ffd3dc89780b9372a5d20a71b2becec121ff3d2
|
[
"Apache-2.0"
] | null | null | null |
from os import environ
from azure.storage.table import TableService
azure_account_name = environ['AZURE_ACCOUNT_NAME']
azure_account_key = environ['AZURE_ACCOUNT_KEY']
azure_table_name = environ['AZURE_TABLE_NAME']
table = TableService(azure_account_name, azure_account_key)
get_entity = table.get_entity
def fetch_value(ident):
partition_key = ident[:3]
row_key = ident
entity = get_entity(azure_table_name, partition_key, row_key)
value = entity.get('value')
return {'value': value}
| 28.277778
| 65
| 0.776031
| 72
| 509
| 5.125
| 0.291667
| 0.195122
| 0.130081
| 0.151762
| 0.168022
| 0.168022
| 0
| 0
| 0
| 0
| 0
| 0.002262
| 0.131631
| 509
| 17
| 66
| 29.941176
| 0.832579
| 0
| 0
| 0
| 0
| 0
| 0.119843
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de44446f8526c9f2e48dd37b76b2ac71ae33e71b
| 3,424
|
py
|
Python
|
csrank/dataset_reader/objectranking/letor_object_ranking_dataset_reader.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | null | null | null |
csrank/dataset_reader/objectranking/letor_object_ranking_dataset_reader.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | null | null | null |
csrank/dataset_reader/objectranking/letor_object_ranking_dataset_reader.py
|
hytsang/cs-ranking
|
241626a6a100a27b96990b4f199087a6dc50dcc0
|
[
"Apache-2.0"
] | 1
|
2018-10-30T08:57:14.000Z
|
2018-10-30T08:57:14.000Z
|
import logging
import h5py
import numpy as np
from sklearn.utils import check_random_state
from csrank.constants import OBJECT_RANKING
from csrank.dataset_reader.letor_dataset_reader import LetorDatasetReader
from csrank.dataset_reader.objectranking.util import sub_sampling
NAME = "LetorObjectRankingDatasetReader"
class LetorObjectRankingDatasetReader(LetorDatasetReader):
def __init__(self, random_state=None, train_obj=5, **kwargs):
super(LetorObjectRankingDatasetReader, self).__init__(learning_problem=OBJECT_RANKING, **kwargs)
self.logger = logging.getLogger(NAME)
self.random_state = check_random_state(random_state)
self.train_obj = train_obj
self.__load_dataset__()
def __load_dataset__(self):
file = h5py.File(self.train_file, 'r')
self.X_train, self.Y_train = self.get_rankings_dict(file)
if self.train_obj is None:
self.train_obj = 5
self.X_train, self.Y_train = self.sub_sampling_for_dictionary()
file = h5py.File(self.test_file, 'r')
self.X_test, self.Y_test = self.get_rankings_dict(file)
self.logger.info("Done loading the dataset")
def get_rankings_dict(self, file):
lengths = file["lengths"]
X = dict()
Y = dict()
for ranking_length in np.array(lengths):
features = np.array(file["X_{}".format(ranking_length)])
rankings = np.array(file["Y_{}".format(ranking_length)])
X[ranking_length], Y[ranking_length] = self.X, self.rankings = features, rankings
self.__check_dataset_validity__()
return X, Y
def sub_sampling_for_dictionary(self):
X = []
Y = []
for n in self.X_train.keys():
if n > self.train_obj:
x, y = sub_sampling(NAME, self.X_train[n], self.Y_train[n], n_objects=self.train_obj)
if len(X) == 0:
X = np.copy(x)
Y = np.copy(y)
else:
X = np.concatenate([X, x], axis=0)
Y = np.concatenate([Y, y], axis=0)
if self.train_obj in self.X_train.keys():
X = np.concatenate([X, np.copy(self.X_train[self.train_obj])], axis=0)
Y = np.concatenate([Y, np.copy(self.Y_train[self.train_obj])], axis=0)
self.logger.info("Sampled instances {} objects {}".format(X.shape[0], X.shape[1]))
return X, Y
def splitter(self, iter):
pass
def get_train_test_datasets(self, n_datasets):
return self.X_train, self.Y_train, self.X_test, self.Y_test
def get_complete_dataset(self):
pass
def get_single_train_test_split(self):
return self.X_train, self.Y_train, self.X_test, self.Y_test
# if __name__ == '__main__':
# import sys
# import os
# import inspect
# dirname = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
# logging.basicConfig(filename=os.path.join(dirname, 'log.log'), level=logging.DEBUG,
# format='%(asctime)s %(name)s %(levelname)-8s %(message)s',
# datefmt='%Y-%m-%d %H:%M:%S')
# logger = logging.getLogger(name='letor')
# sys.path.append("..")
# for n in [2008, 2007]:
# ds = LetorObjectRankingDatasetReader(year=n)
# logger.info(ds.X_train.shape)
# logger.info(np.array(ds.X_test.keys).shape)
| 39.356322
| 104
| 0.629965
| 457
| 3,424
| 4.474836
| 0.249453
| 0.031785
| 0.046944
| 0.03423
| 0.150122
| 0.113936
| 0.066504
| 0.043032
| 0.043032
| 0.043032
| 0
| 0.008133
| 0.245911
| 3,424
| 86
| 105
| 39.813953
| 0.783888
| 0.183119
| 0
| 0.101695
| 0
| 0
| 0.037024
| 0.011143
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135593
| false
| 0.033898
| 0.118644
| 0.033898
| 0.338983
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de481c317eb312cc809e4b8eb2f8383abd96ba97
| 324
|
py
|
Python
|
src/elrados/views.py
|
IamShobe/elrados
|
dd2523e1523591c7a3213dfd062b376f41bb9f18
|
[
"MIT"
] | 2
|
2018-07-20T11:03:42.000Z
|
2019-06-06T06:00:12.000Z
|
src/elrados/views.py
|
IamShobe/elrados
|
dd2523e1523591c7a3213dfd062b376f41bb9f18
|
[
"MIT"
] | null | null | null |
src/elrados/views.py
|
IamShobe/elrados
|
dd2523e1523591c7a3213dfd062b376f41bb9f18
|
[
"MIT"
] | 2
|
2018-12-18T16:00:34.000Z
|
2019-04-08T14:29:02.000Z
|
"""Global index view."""
import pkg_resources
from django.shortcuts import render
def index(request):
"""Basic view."""
plugins = \
[plugin.load() for plugin in
pkg_resources.iter_entry_points(group='elrados.plugins')]
return render(request, "index.html", {
"plugins": plugins
})
| 21.6
| 66
| 0.641975
| 37
| 324
| 5.513514
| 0.675676
| 0.117647
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.222222
| 324
| 14
| 67
| 23.142857
| 0.809524
| 0.092593
| 0
| 0
| 0
| 0
| 0.113074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de48207667680d4095ac834e7b25417f0ab4f83a
| 2,274
|
py
|
Python
|
examples/old/zipline_momentun.py
|
sherrytp/TradingEvolved
|
4bc9cc18244954bff37a80f67cce658bd0802b5d
|
[
"Apache-2.0"
] | null | null | null |
examples/old/zipline_momentun.py
|
sherrytp/TradingEvolved
|
4bc9cc18244954bff37a80f67cce658bd0802b5d
|
[
"Apache-2.0"
] | null | null | null |
examples/old/zipline_momentun.py
|
sherrytp/TradingEvolved
|
4bc9cc18244954bff37a80f67cce658bd0802b5d
|
[
"Apache-2.0"
] | 1
|
2022-03-26T07:11:18.000Z
|
2022-03-26T07:11:18.000Z
|
import pandas as pd
import matplotlib.pyplot as plt
from zipline.finance.commission import PerShare
from zipline.api import set_commission, symbol, order_target_percent
import zipline
from models.live_momentum import LiveMomentum
with open('/Users/landey/Desktop/Eonum/live_model/eouniverse/stock_list.txt', 'r') as f:
data = f.read().split()
tickers = data[:20]
etf_list = tickers[15:]
def initialize(context):
context.momemtum_window = 5
context.momemtum_window2 = 10
context.min_long_momentum = 60
context.max_short_momentum = -10
context.long = 15
context.short = 15
context.etfs = 5
comm_model = PerShare(cost=0.0005)
set_commission(comm_model)
def handle_data(context, data):
equity_symbols = [symbol(i) for i in tickers]
etf_symbols = [symbol(i) for i in etf_list]
hist_window = max(context.momemtum_window, context.momemtum_window2)
equity_hist = data.history(equity_symbols, 'close', hist_window, "1d").copy()
etf_hist = data.history(etf_symbols, 'close', hist_window, "1d").copy()
equity_hist_ = equity_hist.rename(columns={col: col.symbol for col in equity_hist.columns})
etf_hist_ = etf_hist.rename(columns={col: col.symbol for col in etf_hist.columns})
live = LiveMomentum(equity_hist_, etf_hist_, etf_mom=300, mom1=20, mom2=40,
min_long_mom=20, max_short_mom=-2, long=10,
short=5, etf=3)
# print(equity_hist_)
equity, etf = live.risk_model()
if equity:
for ticker, weight in equity.items():
if data.can_trade(symbol(ticker)) and weight != 0:
order_target_percent(symbol(ticker), weight)
if etf:
for ticker, weight in etf.items():
if data.can_trade(symbol(ticker)) and weight != 0:
order_target_percent(symbol(ticker), weight)
start = pd.Timestamp('2020-3-22', tz='utc')
end = pd.Timestamp('2020-4-28', tz='utc')
perf = zipline.run_algorithm(start=start,
end=end,
initialize=initialize,
capital_base=100000,
handle_data=handle_data,
bundle='sep')
perf.portfolio_value.plot()
plt.show()
| 30.72973
| 95
| 0.647757
| 304
| 2,274
| 4.648026
| 0.365132
| 0.042463
| 0.038217
| 0.024062
| 0.229299
| 0.229299
| 0.161359
| 0.161359
| 0.161359
| 0.108988
| 0
| 0.036649
| 0.244063
| 2,274
| 73
| 96
| 31.150685
| 0.78534
| 0.008355
| 0
| 0.08
| 0
| 0
| 0.047048
| 0.028407
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0.12
| 0
| 0.16
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de4860345de948d81c21b1062677ea640e28f033
| 10,120
|
py
|
Python
|
packages/robotControl/scripts/intercept.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | 2
|
2021-01-15T13:27:19.000Z
|
2021-08-04T08:40:52.000Z
|
packages/robotControl/scripts/intercept.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | null | null | null |
packages/robotControl/scripts/intercept.py
|
Falcons-Robocup/code
|
2281a8569e7f11cbd3238b7cc7341c09e2e16249
|
[
"Apache-2.0"
] | 5
|
2018-05-01T10:39:31.000Z
|
2022-03-25T03:02:35.000Z
|
# Copyright 2020 Jan Feitsma (Falcons)
# SPDX-License-Identifier: Apache-2.0
#!/usr/bin/env python3
# Jan Feitsma, March 2020
# Robot will continuously intercept around current position.
#
# For description and usage hints, execute with '-h'
import sys, os
import time
import logging, signal
logging.basicConfig(level=logging.INFO)
import math, random
import argparse
import falconspy
import rtdb2tools
from robotLibrary import RobotLibrary
from worldState import WorldState
from FalconsCoordinates import *
def parse_arguments():
parser = argparse.ArgumentParser(description="""Automated single-robot intercept test. Robot will choose a position in a circle, continuously attempting to intercept the ball and pass to next robot. Includes a fallback getball in case ball bounces off. See also: wrapper script interceptCircle.py.""")
parser.add_argument('-a', '--actionradius', help='zone/action radius: in case intercept fails and ball is within this radius, just do a getball fallback', type=float, default=2.0)
parser.add_argument('-c', '--circleradius', help='home position circle radius on which robot default positions are set', type=float, default=4.0)
parser.add_argument('-t', '--target', help='pass target (default: next robot)', type=float, nargs=2, default=None)
parser.add_argument('-n', '--targetnoise', help='aim given amount of meters at a random side next to the target', type=float, default=0.0)
parser.add_argument('-w', '--dontwait', help='do not wait with intercepting until previous robot has the ball', action='store_true')
parser.add_argument('-q', '--quiet', help='suppress output', action='store_true')
# TODO use option 'active' intercept?
parser.add_argument('--home', help='home position (x,y), default calculated based on available robots and circleradius', type=float, nargs=2, default=None)
parser.add_argument('-i', '--index', help='home position index to choose (starting count at 1), default calculate based on available robots', type=int, nargs=2, default=None)
parser.add_argument('-r', '--robot', help='robot ID to use (intended only for simulation)', type=int, default=rtdb2tools.guessAgentId())
parser.add_argument('--ignore', help='robots to be ignored', type=int, nargs='+', default=[1])
return parser.parse_args()
def calcCirclePos(robotIdx, numRobots, radius=3, center=(0,0)):
"""
Helper function to distribute robot positions on a circle.
"""
gamma = 2*math.pi / numRobots
x = radius * math.cos(gamma * robotIdx) + center[0]
y = radius * math.sin(gamma * robotIdx) + center[1]
phi = gamma * robotIdx - math.pi
return (x, y, phi)
class Interceptor():
def __init__(self, settings):
self.settings = settings
self.rl = RobotLibrary(settings.robot, joystick=False)
self.ws = WorldState(settings.robot)
self.ws.startMonitoring()
self.otherRobotHasBall = False
# setup logging
self.state = None
self.logger = self.initializeLogger()
if settings.quiet:
self.logger.setLevel(logging.NOTSET)
# setup signal handler for proper shutdown
self.done = False
signal.signal(signal.SIGINT, self.signalHandler)
def signalHandler(self, signal, frame):
self.done = True
self.ws.stopMonitoring()
self.rl.shutdown()
# TODO: this is not yet working as intended...
def initializeLogger(self):
"""
Setup the logging environment
"""
log = logging.getLogger() # root logger
log.setLevel(logging.INFO)
format_str = '%(asctime)s.%(msecs)03d - %(levelname)-8s - r' + str(self.settings.robot) + ' - %(message)s'
date_format = '%Y-%m-%dT%H:%M:%S'
formatter = logging.Formatter(format_str, date_format)
stream_handler = logging.StreamHandler()
stream_handler.setFormatter(formatter)
log.handlers = [] # clear
log.addHandler(stream_handler)
return logging.getLogger(__name__)
def activeRobots(self):
# ignore r1, if it is present, because it can never contribute
return [r for r in self.ws.activeRobots() if not r in self.settings.ignore]
def calculateRobotIndex(self):
# optional overrule
if self.settings.index != None:
idx0 = self.settings.index[0] - 1
n = self.settings.index[1]
else:
# default: get active robots and figure out index of this robot
a = self.activeRobots()
while not self.settings.robot in a: # init robustness
time.sleep(0.1)
a = self.activeRobots()
n = len(a)
idx0 = a.index(self.settings.robot)
return (idx0, n)
def calculateHomePosition(self):
# optional overrule
if self.settings.home != None:
(x, y) = self.settings.home
rz = math.pi * 0.5
else:
# default: position on a circle
(idx0, n) = self.calculateRobotIndex()
(x, y, rz) = calcCirclePos(idx0, n, self.settings.circleradius)
# face the ball if possible
b = self.ws.getBallPosition()
if b:
rz = math.atan2(b.y - y, b.x - x)
return (x, y, rz)
def canStartIntercept(self):
# optional overrule
if self.settings.dontwait:
return True
# robot should never stand idle if ball is closeby
if self.ballCloseBy():
return True
# check if previous robot has the ball
(idx0, n) = self.calculateRobotIndex()
a = self.activeRobots()
otherIdx = a[(idx0-1) % n]
# wait for the pass (state change in ball possession)
# robot should not intercept when other robot is still turning for instance
otherRobotHadBall = self.otherRobotHasBall
self.otherRobotHasBall = self.ws.hasBall(otherIdx)
return self.otherRobotHasBall == False and otherRobotHadBall == True
def determineTarget(self, noise=None):
# optional overrule
if self.settings.target:
(x, y) = self.settings.target
rz = 0
else:
# calculate nominal position of next robot
(idx0, n) = self.calculateRobotIndex()
a = self.activeRobots()
otherIdx = a[(idx0+1) % n]
(x, y, rz) = calcCirclePos(idx0+1, n, self.settings.circleradius)
otherPos = RobotPose(x, y, rz)
# add noise?
if noise:
# add noise to RCS x (perpendicular)
ownPos = self.ws.getRobotPosition()
ownPos.Rz = math.atan2(y - ownPos.y, x - ownPos.x) # face target
otherPosRcs = otherPos.transform_fcs2rcs(ownPos)
# offset RCS x in a random direction
r = random.randint(0, 1)
otherPosRcs.x += (r * 2 - 1) * noise
# back to FCS
otherPos = otherPosRcs.transform_rcs2fcs(ownPos)
return (otherPos.x, otherPos.y) # ignore Rz
def canPass(self):
# compare current position of next robot with nominal
nominalTarget = self.determineTarget()
(idx0, n) = self.calculateRobotIndex()
a = self.activeRobots()
if len(a) == 1:
return True
otherIdx = a[(idx0+1) % n]
otherPos = self.ws.getRobotPosition(otherIdx)
delta = otherPos - RobotPose(*nominalTarget)
return delta.xy().size() < 0.3
def ballCloseBy(self):
bd = self.ws.ballDistance()
return bd != None and bd < self.settings.actionradius
def setState(self, state):
# only write state change
if self.state != state:
# write to RDL eventlog
os.system('export TURTLE5K_ROBOTNUMBER=' + str(self.settings.robot) + ' ; frun diagnostics sendEvent INFO "' + state + '" > /dev/null')
# write to stdout?
logging.info(state)
self.state = state
def run(self):
# iterate
while not self.done:
# move to starting position, facing ball, with coarse tolerances
homePos = self.calculateHomePosition()
self.setState('repositioning / waiting')
self.rl.move(*homePos, xyTol=0.1, rzTol=0.05)
# wait until robot can start his intercept/getBall attempt
if self.canStartIntercept():
# get the ball, preferably via intercept
while not self.ws.hasBall() and not self.done:
if self.ballCloseBy():
self.setState('getball fallback')
self.rl.getBall() # blocking
else:
self.setState('intercepting')
self.rl.interceptBall() # blocking (with not-so-obvious RUNNING/FAILED criteria -> see mp code)
# note: good weather behavior: ball comes into the action radius while the robot
# is continuously intercepting on it, until pass/fail, so the getBall
# fallback should only start after intercept returns FAILED due to the ball moving away
# other robot might still be repositioning
while not self.canPass() and not self.done:
self.setState('waiting to pass')
time.sleep(0.1)
# pass to next robot and sleep a while, to prevent directly chasing the ball
self.setState('pass')
self.rl.passTo(*self.determineTarget(self.settings.targetnoise))
time.sleep(0.5)
else:
# sleep a bit
time.sleep(0.1)
# check if robot went offline
self.done = self.settings.robot not in self.activeRobots()
def main(args):
interceptor = Interceptor(args)
interceptor.run()
if __name__ == '__main__':
args = parse_arguments()
if args.robot == 0 or args.robot == None:
raise RuntimeError("Error: could not determine robot ID, this script should run on a robot")
main(args)
| 42.700422
| 305
| 0.619368
| 1,219
| 10,120
| 5.108285
| 0.297785
| 0.038542
| 0.0273
| 0.014132
| 0.083507
| 0.062149
| 0.045768
| 0.033082
| 0.033082
| 0.019271
| 0
| 0.011244
| 0.279348
| 10,120
| 236
| 306
| 42.881356
| 0.842589
| 0.183399
| 0
| 0.15
| 0
| 0.0125
| 0.155107
| 0.005382
| 0
| 0
| 0
| 0.008475
| 0
| 1
| 0.09375
| false
| 0.04375
| 0.0625
| 0.00625
| 0.24375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de4f135b4907a9ad1ee036150f5775fba0b81256
| 4,859
|
py
|
Python
|
arpym/tools/plc.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2021-04-10T13:24:30.000Z
|
2022-03-26T08:20:42.000Z
|
arpym/tools/plc.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | null | null | null |
arpym/tools/plc.py
|
dpopadic/arpmRes
|
ddcc4de713b46e3e9dcb77cc08c502ce4df54f76
|
[
"MIT"
] | 6
|
2019-08-13T22:02:17.000Z
|
2022-02-09T17:49:12.000Z
|
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec, GridSpecFromSubplotSpec
from matplotlib.ticker import FuncFormatter
def tick_label_func(y, pos=None):
return '%1.f' % (5 * y * 1e-2 // 5)
def tick_label_func_1(y, pos=None):
return '%0.0f' % y
def plot_dynamic_strats(t, v_t_strat, v_t_risky, w_t_risky, h_t_risky,
num, j_sel):
"""For details, see here.
Parameters
----------
t : array, shape (t_,)
v_t_strat : array, shape (j_,t_)
v_t_risky : array, shape (j_,t_)
w_t_risky : array, shape (j_,t_)
h_t_risky: array, shape (j_,t_)
num: int
j_sel: int
"""
# adjust v_t_risky so that it has the same initial value as v_t_strat
v_t_risky = v_t_risky * v_t_strat[0, 0] / v_t_risky[0, 0]
mu_risky = np.mean(v_t_risky, axis=0, keepdims=True).reshape(-1)
sig_risky = np.std(v_t_risky, axis=0, keepdims=True).reshape(-1)
mu_strat = np.mean(v_t_strat, axis=0, keepdims=True).reshape(-1)
sig_strat = np.std(v_t_strat, axis=0, keepdims=True).reshape(-1)
plt.style.use('arpm')
fig = plt.figure()
gs = GridSpec(1, 2)
gs1 = GridSpecFromSubplotSpec(3, 1, subplot_spec=gs[0])
num_bins = int(round(100 * np.log(v_t_strat.shape[1])))
lgrey = [0.8, 0.8, 0.8] # light grey
dgrey = [0.4, 0.4, 0.4] # dark grey
j_ = v_t_risky.shape[0]
x_min = t[0]
x_max = 1.25 * t[-1]
y_min = v_t_strat[0, 0] / 4
y_max = v_t_strat[0, 0] * 2.25
# scatter plot
ax4 = plt.subplot(gs[1])
plt.scatter(v_t_risky[:, -1], v_t_strat[:, -1], marker='.', s=2)
so = np.sort(v_t_risky[:, -1])
plt.plot(so, so, label='100% risky instrument', color='r')
plt.plot([y_min, v_t_risky[j_sel, -1], v_t_risky[j_sel, -1]],
[v_t_strat[j_sel, -1], v_t_strat[j_sel, -1], y_min], 'b--')
plt.plot(v_t_risky[j_sel, -1], v_t_strat[j_sel, -1], 'bo')
ax4.set_xlim(y_min, y_max)
ax4.set_ylim(y_min, y_max)
ax4.xaxis.set_major_formatter(FuncFormatter(tick_label_func))
ax4.yaxis.set_major_formatter(FuncFormatter(tick_label_func))
plt.xlabel('Strategy')
plt.ylabel('Risky instrument')
plt.legend()
# weights and holdings
ax3 = plt.subplot(gs1[2])
y_min_3 = np.min(h_t_risky[j_sel, : -1])
y_max_3 = np.max(h_t_risky[j_sel, : -1])
plt.sca(ax3)
plt.plot(t, w_t_risky[j_sel, :], color='b')
plt.axis([x_min, x_max, 0, 1])
plt.xticks(np.linspace(t[0], 1.2 * t[-1], 7))
plt.yticks(np.linspace(0, 1, 3), color='b')
plt.ylabel('Weights', color='b')
plt.xlabel('Time')
ax3_2 = ax3.twinx()
plt.plot(t, h_t_risky[j_sel, :], color='black')
plt.ylabel('Holdings', color='black')
plt.axis([x_min, x_max, y_min_3 - 1, y_max_3 + 1])
plt.yticks(np.linspace(y_min_3, y_max_3, 3))
ax3_2.yaxis.set_major_formatter(FuncFormatter(tick_label_func_1))
ax1 = plt.subplot(gs1[0], sharex=ax3, sharey=ax4)
# simulated path, standard deviation of strategy
for j in range(j_ - num, j_):
plt.plot(t, v_t_strat[j, :], color=lgrey)
plt.plot(t, v_t_strat[j_sel, :], color='b')
plt.plot(t, mu_strat + sig_strat, color='orange')
plt.plot(t, mu_strat - sig_strat, color='orange')
plt.xticks(np.linspace(t[0], 1.2 * t[-1], 7))
# histogram
y_hist, x_hist = np.histogram(v_t_strat[:, -1], num_bins)
scale = 0.25 * t[-1] / np.max(y_hist)
y_hist = y_hist * scale
plt.barh(x_hist[: -1], y_hist, height=(max(x_hist) - min(x_hist)) /
(len(x_hist) - 1), left=t[-1], facecolor=dgrey, edgecolor=dgrey)
plt.setp(ax1.get_xticklabels(), visible=False)
plt.ylabel('Strategy')
ax1.set_ylim(y_min, y_max)
ax1.yaxis.set_major_formatter(FuncFormatter(tick_label_func))
# risky instrument
ax2 = plt.subplot(gs1[1], sharex=ax3, sharey=ax4)
# simulated path, standard deviation of risky instrument
for j in range(j_ - num, j_):
plt.plot(t, v_t_risky[j, :], color=lgrey)
plt.plot(t, v_t_risky[j_sel, :], color='b')
plt.plot(t, mu_risky + sig_risky, color='orange')
plt.plot(t, mu_risky - sig_risky, color='orange')
plt.xticks(np.linspace(t[0], 1.2 * t[-1], 7))
# histogram
y_hist, x_hist = np.histogram(v_t_risky[:, -1], num_bins)
scale = 0.25 * t[-1] / np.max(y_hist)
y_hist = y_hist * scale
plt.barh(x_hist[: -1], y_hist, height=(max(x_hist) - min(x_hist)) /
(len(x_hist) - 1), left=t[-1], facecolor=dgrey, edgecolor=dgrey)
plt.setp(ax2.get_xticklabels(), visible=False)
plt.ylabel('Risky instrument')
ax2.set_ylim(y_min, y_max)
ax2.yaxis.set_major_formatter(FuncFormatter(tick_label_func))
plt.grid(True)
plt.tight_layout()
return fig, gs
| 35.210145
| 106
| 0.61844
| 845
| 4,859
| 3.304142
| 0.181065
| 0.023639
| 0.042622
| 0.028653
| 0.557307
| 0.534384
| 0.445559
| 0.41404
| 0.353152
| 0.230659
| 0
| 0.039087
| 0.215476
| 4,859
| 137
| 107
| 35.467153
| 0.693337
| 0.109488
| 0
| 0.163043
| 0
| 0
| 0.03445
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032609
| false
| 0
| 0.043478
| 0.021739
| 0.108696
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de4fbddd1a8e5c3c47f15c39acb99e707f22e65b
| 617
|
py
|
Python
|
src/alerter.py
|
Jawgo/DiscordBot
|
43dccce80aa8d8bd51b44c0de732fd70d9194672
|
[
"MIT"
] | null | null | null |
src/alerter.py
|
Jawgo/DiscordBot
|
43dccce80aa8d8bd51b44c0de732fd70d9194672
|
[
"MIT"
] | null | null | null |
src/alerter.py
|
Jawgo/DiscordBot
|
43dccce80aa8d8bd51b44c0de732fd70d9194672
|
[
"MIT"
] | null | null | null |
import os
from discord import Webhook, RequestsWebhookAdapter, Colour, Embed
def send_alert(item):
hook = os.environ.get("WEB_HOOK")
webhook = Webhook.from_url(hook, adapter=RequestsWebhookAdapter())
embedVar = Embed(title="Stock Hunter")
if item.in_stock:
embedVar.description = "{} **IN STOCK** at [{}]({})".format(item.item_name, item.domain, item.url)
embedVar.colour = Colour.green()
else:
embedVar.description = "{} **out of stock** at [{}]({})".format(item.item_name, item.domain, item.url)
embedVar.colour = Colour.red()
webhook.send(embed=embedVar)
| 36.294118
| 110
| 0.666126
| 75
| 617
| 5.4
| 0.453333
| 0.034568
| 0.064198
| 0.083951
| 0.306173
| 0.306173
| 0.306173
| 0.306173
| 0.306173
| 0.306173
| 0
| 0
| 0.178282
| 617
| 16
| 111
| 38.5625
| 0.798817
| 0
| 0
| 0
| 0
| 0
| 0.126418
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.153846
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de50a4c4fb04e2350cc10caa2aea9a7a75fcac8c
| 4,593
|
py
|
Python
|
dataset_preproc/preproc_video/face_extract.py
|
RicardoP0/multimodal-matchmap
|
aa44c574a57073833004172734394882889d8d3b
|
[
"MIT"
] | null | null | null |
dataset_preproc/preproc_video/face_extract.py
|
RicardoP0/multimodal-matchmap
|
aa44c574a57073833004172734394882889d8d3b
|
[
"MIT"
] | null | null | null |
dataset_preproc/preproc_video/face_extract.py
|
RicardoP0/multimodal-matchmap
|
aa44c574a57073833004172734394882889d8d3b
|
[
"MIT"
] | null | null | null |
#%%
#https://github.com/timesler/facenet-pytorch
from facenet_pytorch import MTCNN, extract_face
import torch
import numpy as np
import mmcv, cv2
import os
import matplotlib.pyplot as plt
from PIL import Image
# %%
#%%
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
print(os.getcwd())
mtcnn = MTCNN(keep_all=True, device=device,image_size=100)
video_dir = "VIDEO_FILES/"
dest_path = 'VIDEO_PROCESSED/'
dir_list = os.listdir(video_dir)
dir_list.sort()
if not os.path.exists(dest_path):
os.makedirs(dest_path)
#%%
# %%
#iemocap
k = 1 #session to process
video_dir = "IEMOCAP_full_release.tar/IEMOCAP_full_release/Session{}/dialog/avi/DivX".format(k)
dir_list = os.listdir(video_dir)
dir_list.sort()
dir_list = [x for x in dir_list if x[0] =='S']
i=0
#%%
dir_list
path = 'datasets/IEMOCAP/CLIPPED_VIDEOS/' + 'Session{}/'.format(k)
if not os.path.exists(path):
os.makedirs(path)
dir_list
#%%
#divide each video and manually crop around face
video_dir = "IEMOCAP_full_release.tar/IEMOCAP_full_release/Session{}/dialog/avi/DivX".format(k)
dir_list = os.listdir(video_dir)
dir_list.sort()
dir_list = [x for x in dir_list if x[0] =='S']
path = 'IEMOCAP/CLIPPED_VIDEOS/' + 'Session{}/'.format(k)
if not os.path.exists(path):
os.makedirs(path)
for file_name in dir_list:
print(file_name)
video = mmcv.VideoReader(video_dir + '/'+file_name)
if 'F_' in file_name:
new_file_left = path + file_name[:-4] + '_F.avi'
new_file_right = path +file_name[:-4] + '_M.avi'
else:
new_file_left = path +file_name[:-4] + '_M.avi'
new_file_right = path + file_name[:-4] + '_F.avi'
h,w,c = video[0].shape
dim = (300,280)
fourcc = cv2.VideoWriter_fourcc(*'FMP4')
#left
video_tracked = cv2.VideoWriter(new_file_left, fourcc, 25.0, dim)
i=0
for frame in video:
h,w,c = frame.shape
#left
#different boxes for each session
#box (left, upper, right, lower)-tuple
#ses1 [120:int(h- 690),120:int(w/2.4)]
#ses2 [150:int(h - 660),120:int(w/2.4)]
#ses5 [120:int(h - 690),120:int(w/2.4)]
#[130:int(h/2.18),120:int(w/2.4)]
video_tracked.write(frame[100:h-100,:300])
video_tracked.release()
del video_tracked
print(h,w,c)
dim = (370,280)
# #right
video_tracked = cv2.VideoWriter(new_file_right, fourcc, 25.0, dim)
for frame in video:
h,w,c = frame.shape
#right
#ses1 [150:int(h - 660),int(w/1.5):int(w-60)]
#ses2 [150:int(h - 660),int(w/1.5):int(w-60)]
#ses5 [150:int(h - 660),int(w/1.5):int(w-60)]
video_tracked.write(frame[100:h-100,350:])
video_tracked.release()
del video, video_tracked
#%%
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('Running on device: {}'.format(device))
print(os.getcwd())
mtcnn = MTCNN(keep_all=True, device=device,image_size=2000,margin=5)
i = 1
video_dir = "../../../../datasets/IEMOCAP/CLIPPED_VIDEOS/Session{}/".format(i)
dir_list = os.listdir(video_dir)
dir_list.sort()
dir_list = [x for x in dir_list if x[0] =='S']
dir_list
#%%
file_list = dir_list
path = '../datasets/IEMOCAP/FACE_VIDEOS/Session{}/'.format(i)
if not os.path.exists(path):
os.makedirs(path)
#%%
#%%
#track using mtcnn
for file_name in file_list:
video = mmcv.VideoReader(video_dir + file_name)
frames = [Image.fromarray(cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)) for frame in video]
frames_tracked = []
for x, frame in enumerate(frames):
#print('\rTracking frame: {}'.format(i + 1), end='')
# Detect faces
boxes, _ = mtcnn.detect(frame)
if not boxes is None:
# print(boxes[0])
im_array = extract_face(frame, boxes[0],image_size=112,margin=50)
#im_array = im_array.permute(1,2,0)
img = im_array #Image.fromar ray(np.uint8(im_array.numpy()))
# Add to frame list
frames_tracked.append(img)
else:
frames_tracked.append(img)
dim = frames_tracked[0].size
print(len(frames),len(frames_tracked))
new_file = path + '/' + file_name
print(new_file)
fourcc = cv2.VideoWriter_fourcc(*'FMP4')
video_tracked = cv2.VideoWriter(new_file, fourcc, 25.0, dim)
for frame in frames_tracked:
video_tracked.write(cv2.cvtColor(np.array(frame), cv2.COLOR_RGB2BGR))
video_tracked.release()
del video, video_tracked, frames_tracked, frames
| 29.254777
| 95
| 0.642717
| 708
| 4,593
| 4.002825
| 0.214689
| 0.04693
| 0.021171
| 0.022583
| 0.584333
| 0.521877
| 0.466126
| 0.363797
| 0.344037
| 0.286168
| 0
| 0.045939
| 0.203788
| 4,593
| 156
| 96
| 29.442308
| 0.729013
| 0.152188
| 0
| 0.438776
| 0
| 0
| 0.114078
| 0.075966
| 0.020408
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.071429
| 0
| 0.071429
| 0.081633
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de5241403b212e20d0b5a9c1eb86d5461e49bad7
| 957
|
py
|
Python
|
hlrl/torch/utils/contexts/training.py
|
Chainso/HLRL
|
584f4ed2fa4d8b311a21dbd862ec9434833dd7cd
|
[
"MIT"
] | null | null | null |
hlrl/torch/utils/contexts/training.py
|
Chainso/HLRL
|
584f4ed2fa4d8b311a21dbd862ec9434833dd7cd
|
[
"MIT"
] | null | null | null |
hlrl/torch/utils/contexts/training.py
|
Chainso/HLRL
|
584f4ed2fa4d8b311a21dbd862ec9434833dd7cd
|
[
"MIT"
] | null | null | null |
from contextlib import contextmanager
import torch.nn as nn
@contextmanager
def evaluate(module: nn.Module):
"""
A context manager for evaluating the module.
Args:
module: The module to switch to evaluating in the context.
Returns:
A generator for the context of the module.
"""
training = module.training
try:
module.eval()
yield module
finally:
# Switch batch to training if needed
if training:
module.train()
@contextmanager
def training(module: nn.Module):
"""
A context manager for training the module.
Args:
module: The module to switch to training in the context.
Returns:
A generator for the context of the module.
"""
training = module.training
try:
module.train()
yield module
finally:
# Switch batch to training if needed
if not training:
module.eval()
| 20.804348
| 66
| 0.6186
| 114
| 957
| 5.192982
| 0.289474
| 0.091216
| 0.047297
| 0.050676
| 0.685811
| 0.685811
| 0.685811
| 0.577703
| 0.577703
| 0.449324
| 0
| 0
| 0.317659
| 957
| 45
| 67
| 21.266667
| 0.906585
| 0.428422
| 0
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de5df9efa200676cbee6ac7078451697101f76eb
| 2,931
|
py
|
Python
|
flora_tools/experiments/measure_time_irq_process.py
|
Atokulus/flora-tools
|
6f878a4495e4dcb6b9bc19a75aaac37b9dfb16b0
|
[
"MIT"
] | 1
|
2020-11-20T16:36:17.000Z
|
2020-11-20T16:36:17.000Z
|
flora_tools/experiments/measure_time_irq_process.py
|
Atokulus/flora-tools
|
6f878a4495e4dcb6b9bc19a75aaac37b9dfb16b0
|
[
"MIT"
] | null | null | null |
flora_tools/experiments/measure_time_irq_process.py
|
Atokulus/flora-tools
|
6f878a4495e4dcb6b9bc19a75aaac37b9dfb16b0
|
[
"MIT"
] | null | null | null |
from flora_tools.experiment import *
class MeasureTimeIRQProcess(Experiment):
def __init__(self):
description = "Measures the time needed for an IRQ to be processed."
Experiment.__init__(self, description)
def run(self, bench, iterations=10000):
self.iterations = iterations
Experiment.run(self, bench)
columns = ['time', 'window', 'precision', 'modulation', 'band', 'react', 'finish']
df = pd.DataFrame(columns=columns)
df.index.name = 'sample'
for i in range(0, self.iterations):
configuration = RadioConfiguration.get_random_configuration(tx=False, irq_direct=True)
self.bench.devkit_a.cmd(configuration.cmd)
math = RadioMath(configuration)
min_window = 0.0001
min_precision = 5E-6
window, points, precision = self.bench.scope.get_next_valid_window(min_window, min_precision)
time.sleep(0.01)
self.bench.scope.init_measurement(window, trigger_rise=True, trigger_channel="DIO1", points=points)
self.bench.scope.delay_acquisition_setup_time(window=window)
self.bench.devkit_a.cmd("radio send")
wave = self.bench.scope.finish_measurement(channels=[1, 2])
if wave is not None:
nss_indices = utilities.get_edges(wave[0])
dio1_indices = utilities.get_edges(wave[1])
if 3 < len(nss_indices) < 100:
nss_react = nss_indices[0][0]
nss_finish = nss_indices[3][0]
else:
nss_react = np.nan
nss_finish = np.nan
if 1 < len(dio1_indices) < 100:
dio1_rise = dio1_indices[0][0]
delay_react = (nss_react - dio1_rise) * self.bench.scope.sample_period
delay_finish = (nss_finish - dio1_rise) * self.bench.scope.sample_period
else:
delay_react = np.nan
delay_finish = np.nan
item = [dt.datetime.now(), window, self.bench.scope.sample_period, configuration.modulation,
configuration.band, delay_react, delay_finish]
else:
item = [dt.datetime.now(), window, self.bench.scope.sample_period, configuration.modulation,
configuration.band, np.nan, np.nan]
df.loc[i] = item
print(item)
df.to_csv("{}.csv".format(self.name))
def analyze(self, df: pd.DataFrame):
df.dropna()
delay_react = df.react
delay_finish = df.finish
columns = ['delay_react', 'delay_react_err', 'delay_finish', 'delay_finish_err']
timings = pd.DataFrame(columns=columns)
timings.loc[0] = [delay_react.mean(), delay_react.std(), delay_finish.mean(), delay_finish.std()]
return timings
| 37.576923
| 111
| 0.588536
| 336
| 2,931
| 4.928571
| 0.324405
| 0.065217
| 0.067633
| 0.048309
| 0.205314
| 0.148551
| 0.148551
| 0.107488
| 0.107488
| 0.107488
| 0
| 0.020649
| 0.306039
| 2,931
| 77
| 112
| 38.064935
| 0.79351
| 0
| 0
| 0.090909
| 0
| 0
| 0.060048
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054545
| false
| 0
| 0.018182
| 0
| 0.109091
| 0.018182
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de61aeb69172f0bbf84a85482ba65c30efe863a2
| 1,901
|
py
|
Python
|
main.py
|
SHGoldfarb/fantastic-barnacle
|
64650155ef8172530a6f88be6e7361bfc7e6bfa2
|
[
"MIT"
] | null | null | null |
main.py
|
SHGoldfarb/fantastic-barnacle
|
64650155ef8172530a6f88be6e7361bfc7e6bfa2
|
[
"MIT"
] | null | null | null |
main.py
|
SHGoldfarb/fantastic-barnacle
|
64650155ef8172530a6f88be6e7361bfc7e6bfa2
|
[
"MIT"
] | null | null | null |
import requests
import os
from datetime import datetime
import pandas as pd
def ensure_folder_exists(foldername):
try:
# Create tmp folder
os.mkdir(foldername)
print("Directory created: " + foldername)
except FileExistsError:
pass
def download_and_save(url, filename):
print("Downloading " + url)
response = requests.get(url)
with open(filename, 'wb') as file:
for chunk in response.iter_content(chunk_size=128):
file.write(chunk)
def file_exists(filename):
return os.path.isfile(filename)
def get_data():
tmp_folder_name = "tmp"
ensure_folder_exists(tmp_folder_name)
active_cases_url = "https://raw.githubusercontent.com/MinCiencia/\
Datos-COVID19/master/output/producto19/CasosActivosPorComuna.csv"
phases_url = "https://raw.githubusercontent.com/MinCiencia/Datos-COVID19/\
master/output/producto74/paso_a_paso.csv"
todays_date_string = str(datetime.date(datetime.now()))
active_cases_file_name = "active_cases_{}.csv".format(todays_date_string)
phases_file_name = "phases_{}.csv".format(todays_date_string)
active_cases_file_path = os.path.join(
tmp_folder_name, active_cases_file_name)
phases_file_path = os.path.join(tmp_folder_name, phases_file_name)
if not (file_exists(active_cases_file_path)):
download_and_save(active_cases_url, active_cases_file_path)
if not (file_exists(phases_file_path)):
download_and_save(phases_url, phases_file_path)
# Load data
cases = pd.read_csv(active_cases_file_path)
phases = pd.read_csv(phases_file_path)
return (cases, phases)
def process_and_merge(cases, phases):
# counties = {}
pass
def main():
# Fetch
cases, phases = get_data()
# Process
data = process_and_merge(cases, phases)
# Plot
print(data)
if __name__ == "__main__":
main()
| 23.7625
| 78
| 0.711731
| 251
| 1,901
| 5.047809
| 0.330677
| 0.078137
| 0.071034
| 0.059984
| 0.295975
| 0.151539
| 0.151539
| 0.151539
| 0.102605
| 0.102605
| 0
| 0.007134
| 0.188848
| 1,901
| 79
| 79
| 24.063291
| 0.814527
| 0.031562
| 0
| 0.043478
| 0
| 0
| 0.041439
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.130435
| false
| 0.043478
| 0.086957
| 0.021739
| 0.26087
| 0.065217
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de681128c0eb4ded13f92d6720603223e15efc17
| 4,560
|
py
|
Python
|
train_n_test/train_decoder.py
|
kamieen03/style-transfer-net
|
c9f56aa579553be8c72f37ce975ba88dbd775605
|
[
"BSD-2-Clause"
] | 2
|
2019-12-14T14:59:22.000Z
|
2020-01-30T16:17:28.000Z
|
train_n_test/train_decoder.py
|
kamieen03/style-transfer-net
|
c9f56aa579553be8c72f37ce975ba88dbd775605
|
[
"BSD-2-Clause"
] | null | null | null |
train_n_test/train_decoder.py
|
kamieen03/style-transfer-net
|
c9f56aa579553be8c72f37ce975ba88dbd775605
|
[
"BSD-2-Clause"
] | 1
|
2020-01-16T20:03:35.000Z
|
2020-01-16T20:03:35.000Z
|
#!/usr/bin/env python3
import os, sys
sys.path.append(os.path.abspath(__file__ + "/../../")) # just so we can use 'libs'
import torch.utils.data
import torch.optim as optim
from torch import nn
import numpy as np
import torch
from libs.Loader import Dataset
from libs.shufflenetv2 import ShuffleNetV2AutoEncoder
BATCH_SIZE = 32
CROP_SIZE = 400
ENCODER_SAVE_PATH = f'models/regular/shufflenetv2_x1_encoder.pth'
DECODER_SAVE_PATH = f'models/regular/shufflenetv2_x1_decoder.pth'
EPOCHS = 20
class Trainer(object):
def __init__(self):
datapath = '../data/'
# set up datasets
self.train_set = self.load_dataset(datapath+'mscoco/train/')
self.valid_set = self.load_dataset(datapath+'mscoco/validate/')
# set up model
self.model = ShuffleNetV2AutoEncoder().cuda()
# load encoder
#self.model.encoder.eval()
#for param in self.model.encoder.parameters():
# param.requires_grad = False
# load decoder
try:
self.model.decoder.load_state_dict(torch.load(DECODER_SAVE_PATH))
self.model.encoder.load_state_dict(torch.load(ENCODER_SAVE_PATH))
except:
print("Decoder weights not found. Proceeding with new ones...")
self.model.train()
self.criterion = nn.MSELoss()
self.optimizer = optim.Adam(self.model.parameters(), lr=1e-4)
def load_dataset(self, path):
"""Load the datasets"""
dataset = Dataset(path, CROP_SIZE)
loader = torch.utils.data.DataLoader(dataset = dataset,
batch_size = BATCH_SIZE,
shuffle = True,
num_workers = 8,
drop_last = True)
return loader
def train(self):
best_val = 1e9
flag = False
with open('shufflenetv2_log.txt', 'w+') as f:
for epoch in range(1, EPOCHS+1): # count from one
#if epoch == 2:
# for g in self.optimizer.param_groups:
# g['lr'] = 1e-3
#if epoch == 4:
# for g in self.optimizer.param_groups:
# g['lr'] = 1e-4
self.train_single_epoch(epoch, f)
val = self.validate_single_epoch(epoch, f)
if val < best_val:
best_val = val
torch.save(self.model.decoder.state_dict(), DECODER_SAVE_PATH)
torch.save(self.model.encoder.state_dict(), ENCODER_SAVE_PATH)
#if val < 0.01 and not flag:
# flag = True
# for g in self.optimizer.param_groups:
# g['lr'] = 1e-5
def train_single_epoch(self, epoch, f):
batch_num = len(self.train_set) # number of batches in training epoch
self.model.train()
for batch_i, content in enumerate(self.train_set):
content = content[0].cuda()
self.optimizer.zero_grad()
out = self.model(content)
loss = self.criterion(out, content)
loss.backward()
self.optimizer.step()
print(f'Train Epoch: [{epoch}/{EPOCHS}] ' +
f'Batch: [{batch_i+1}/{batch_num}] ' +
f'Loss: {loss:.6f}')
f.write(f'Train Epoch: [{epoch}/{EPOCHS}] ' +
f'Batch: [{batch_i+1}/{batch_num}] ' +
f'Loss: {loss:.6f}\n')
def validate_single_epoch(self, epoch, f):
batch_num = len(self.valid_set) # number of batches in training epoch
self.model.eval()
losses = []
with torch.no_grad():
for batch_i, content in enumerate(self.valid_set):
content = content[0].cuda()
out = self.model(content)
loss = self.criterion(content, out)
losses.append(loss.item())
print(f'Validate Epoch: [{epoch}/{EPOCHS}] ' +
f'Batch: [{batch_i+1}/{batch_num}] ' +
f'Loss: {loss:.6f}')
f.write(f'Validate Epoch: [{epoch}/{EPOCHS}] ' +
f'Batch: [{batch_i+1}/{batch_num}] ' +
f'Loss: {loss:.6f}\n')
f.write(f'Mean: {np.mean(np.array(losses))}\n')
return np.mean(np.array(losses))
def main():
c = Trainer()
c.train()
if __name__ == '__main__':
main()
| 35.905512
| 84
| 0.53114
| 534
| 4,560
| 4.378277
| 0.273408
| 0.050043
| 0.027374
| 0.029085
| 0.38195
| 0.328058
| 0.300684
| 0.212575
| 0.212575
| 0.145851
| 0
| 0.014845
| 0.35
| 4,560
| 126
| 85
| 36.190476
| 0.773954
| 0.124342
| 0
| 0.162791
| 0
| 0
| 0.146495
| 0.053707
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.093023
| 0
| 0.197674
| 0.034884
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de6c1a64c58a8aca902a8fc78dd2204b84031a65
| 2,871
|
py
|
Python
|
src/main/create/c_chains_user_json.py
|
WikiCommunityHealth/wikimedia-revert
|
b584044d8b6a61a79d98656db356bf1f74d23ee0
|
[
"MIT"
] | null | null | null |
src/main/create/c_chains_user_json.py
|
WikiCommunityHealth/wikimedia-revert
|
b584044d8b6a61a79d98656db356bf1f74d23ee0
|
[
"MIT"
] | null | null | null |
src/main/create/c_chains_user_json.py
|
WikiCommunityHealth/wikimedia-revert
|
b584044d8b6a61a79d98656db356bf1f74d23ee0
|
[
"MIT"
] | null | null | null |
#%%
# PAGE EXAMPLE
# {'title': 'Zuppa_di_pesce_(film)',
# 'chains': [{'revisions': ['95861493', '95861612', '95973728'],
# 'users': {'93.44.99.33': '', 'Kirk39': '63558', 'AttoBot': '482488'},
# 'len': 3,
# 'start': '2018-04-01 04:54:40.0',
# 'end': '2018-04-05 07:36:26.0'}],
# 'n_chains': 1,
# 'n_reverts': 3,
# 'mean': 3.0,
# 'longest': 3,
# 'M': 0,
# 'lunghezze': {'3': 1}}
import json
from datetime import datetime
import numpy as np
import pandas as pd
import os
import shutil
from utils import utils
import sys
language = sys.argv[1]
dataset_folder = f'/home/gandelli/dev/data/{language}/chains/page/'
output = f'/home/gandelli/dev/data/{language}/chains/user/'
#%% get users from the json page
def get_users():
users = {}
i = 10 # number of files in the wars folder
for i in range (0,i):
dump_in = open(f"{dataset_folder}wars_{i}.json")
line = dump_in.readline()
while(line != ''):
line = dump_in.readline()
if line == '{}]' or line == ''or line == '{}]{}]':
continue
try:
page = json.loads(line[:-2])
except:
print(line[:-2])
for chain in page['chains']:
for user in chain['users']:
users.setdefault(user, []).append(chain)
return users
# input a dict of users with the chains joined
def compute_users(users):
i = 0
for user,chains in users.items():
name = user
total_reverts = 0
longest = 0
lunghezze = np.zeros(200)
g , involved = utils.getG(chains)
for chain in chains:
total_reverts += chain['len']
longest = max(longest, chain['len'])
lunghezze[chain['len']] +=1
save_user(name, chains, total_reverts, longest, g, lunghezze, i)
i+=1
finish_files()
def save_user(name, chains, total_reverts, longest, g, lunghezze, n):
mean = round(total_reverts/len(chains), 1)
lun = {}
n_files = 10
path = f"{output}wars_{ n % n_files}.json"
dump_out = open(path, 'a')
filesize = os.path.getsize(path)
for i in range(1,len(lunghezze)):
if(lunghezze[i] > 0):
lun[i] = int(lunghezze[i])
if filesize == 0:
dump_out.write('[')
dump_out.write(json.dumps({'user': name, 'chains': chains,'n_chains' : len(chains),'n_reverts': total_reverts,'mean': mean, 'longest': longest, 'G' : g , 'lunghezze': lun})+',\n')
def finish_files():
for filename in os.listdir(output):
dump_out = open(output+filename, 'a')
# andrebbe cancellata la virgola, uso questo trick per farlo sintatticamente corretto
dump_out.write('{}]')
#%%
shutil.rmtree(output)
os.mkdir(output)
users = get_users()
compute_users(users)
# %%
| 26.1
| 183
| 0.563915
| 377
| 2,871
| 4.201592
| 0.360743
| 0.045455
| 0.034091
| 0.020202
| 0.102273
| 0.102273
| 0.102273
| 0.059343
| 0.059343
| 0
| 0
| 0.049856
| 0.273424
| 2,871
| 109
| 184
| 26.33945
| 0.709492
| 0.198537
| 0
| 0.030769
| 0
| 0
| 0.105748
| 0.053971
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.123077
| 0
| 0.2
| 0.015385
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de72e8f348089a00d8a491df1f651cf4a945ca9c
| 1,500
|
py
|
Python
|
Heap/378-Kth_Smalles_Element_in_a_Sorted_Matrix.py
|
dingwenzheng730/Leet
|
c08bd48e8dcc6bca41134d218d39f66bfc112eaf
|
[
"MIT"
] | 1
|
2021-06-15T21:01:53.000Z
|
2021-06-15T21:01:53.000Z
|
Heap/378-Kth_Smalles_Element_in_a_Sorted_Matrix.py
|
dingwenzheng730/Leet
|
c08bd48e8dcc6bca41134d218d39f66bfc112eaf
|
[
"MIT"
] | null | null | null |
Heap/378-Kth_Smalles_Element_in_a_Sorted_Matrix.py
|
dingwenzheng730/Leet
|
c08bd48e8dcc6bca41134d218d39f66bfc112eaf
|
[
"MIT"
] | null | null | null |
'''
Given an n x n matrix where each of the rows and columns are sorted in ascending order, return the kth smallest element in the matrix.
Note that it is the kth smallest element in the sorted order, not the kth distinct element.
Input: matrix = [[1,5,9],[10,11,13],[12,13,15]], k = 8
Output: 13
Explanation: The elements in the matrix are [1,5,9,10,11,12,13,13,15], and the 8th smallest number is 13
Input: matrix = [[1,5,9],[10,11,13],[12,13,15]], k = 2
Output: 10
Input: [[1,5,9],[10,11,13],[12,13,15]], k= 9
Output: 15
Input: [[2]], k= 1
Output: 2
Precondition:
n >= 1
k <= n*n
No int overflow
C1: Single element
C2: k = n^2
C3: k <= n
C4: k > n
Algo:
Brute force: get elements and sort O(n^2logn^2)
Heap:
x = min(k, n)
Runtime: klogx
Space: O(x)
if n >= k:
compare the first column is enough
if n < k
for each row, we have a pointer, use a heap to record the pointer value,
for k times, pop out the smaller pointer and update that pointer to its next value in its list
Init a heap, the heap size should be min of k and n()
'''
class Solution:
def kthSmallest(self, matrix: List[List[int]], k: int) -> int:
n = len(matrix)
x = min(n, k)
min_heap = []
for r in range(x):
heapq.heappush(min_heap, (matrix[r][0], r, 0))
while k:
element, r, c = heapq.heappop(min_heap)
if c < n-1:
heapq.heappush(min_heap, (matrix[r][c+1], r, c+1))
k -=1
return element
| 24.193548
| 134
| 0.611333
| 281
| 1,500
| 3.24911
| 0.377224
| 0.010953
| 0.013143
| 0.021906
| 0.200438
| 0.192771
| 0.07667
| 0.07667
| 0.07667
| 0.07667
| 0
| 0.079821
| 0.256667
| 1,500
| 61
| 135
| 24.590164
| 0.739013
| 0.694667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de73b0477272b09621a0a7e87406fe9c6c2a1f06
| 5,088
|
py
|
Python
|
baseStation/test/vision/service/test_visionService.py
|
olgam4/design3
|
6e05d123a24deae7dda646df535844a158ef5cc0
|
[
"WTFPL"
] | null | null | null |
baseStation/test/vision/service/test_visionService.py
|
olgam4/design3
|
6e05d123a24deae7dda646df535844a158ef5cc0
|
[
"WTFPL"
] | null | null | null |
baseStation/test/vision/service/test_visionService.py
|
olgam4/design3
|
6e05d123a24deae7dda646df535844a158ef5cc0
|
[
"WTFPL"
] | null | null | null |
from unittest import TestCase
from unittest.mock import Mock
import numpy as np
from pathfinding.domain.angle import Angle
from pathfinding.domain.coord import Coord
from vision.domain.image import Image
from vision.domain.rectangle import Rectangle
from vision.infrastructure.cvVisionException import CameraDoesNotExistError
from vision.service.visionService import VisionService
class TestVisionService(TestCase):
valid_camera_ids_int = [0, 2]
valid_camera_ids_str = ['0', '2']
invalid_camera_id_int = 1
invalid_camera_id_str = '1'
calibration_file_path = 'path'
image = Image(np.zeros((50, 50, 3)))
def setUp(self) -> None:
self.camera_factory = Mock()
self.play_area_finder = Mock()
self.goal_finder = Mock()
self.source_finder = Mock()
self.obstacle_finder = Mock()
self.robot_finder = Mock()
self.camera_calibration_factory = Mock()
self.camera_calibration = Mock()
self.camera_drawer = Mock()
self.vision_service = VisionService(self.camera_factory, self.camera_calibration_factory, self.camera_drawer,
self.play_area_finder, self.goal_finder, self.source_finder,
self.obstacle_finder, self.robot_finder)
def initialiseService(self) -> None:
self.camera = Mock()
self.camera_factory.create_camera = Mock(return_value=self.camera)
self.camera.take_picture = Mock(return_value=self.image)
self.camera_calibration_factory.load_calibration_from_file = Mock(return_value=self.camera_calibration)
self.camera_calibration.rectify_image = Mock(return_value=self.image)
self.vision_service.set_camera(self.valid_camera_ids_str[0], self.calibration_file_path)
def test_when_service_first_created_then_it_is_not_initialized(self) -> None:
self.assertFalse(self.vision_service._initialized.is_set())
def test_when_camera_ids_requested_then_ids_from_camera_factory_returned_as_string(self) -> None:
self.camera_factory.get_cameras = Mock(return_value=self.valid_camera_ids_int)
expected_values = self.valid_camera_ids_str
actual_values = self.vision_service.get_camera_ids()
self.assertListEqual(expected_values, actual_values)
def test_when_camera_set_with_valid_id_then_service_is_initialized(self) -> None:
self.initialiseService()
self.camera_factory.create_camera.assert_called_with(self.valid_camera_ids_int[0])
self.camera.take_picture.assert_called_once()
self.camera_calibration_factory.load_calibration_from_file.assert_called_with(self.calibration_file_path,
self.image)
self.camera_calibration.rectify_image.assert_called_once()
self.assertTrue(self.vision_service._initialized.is_set())
def test_when_camera_set_with_invalid_id_then_CameraDoesNotExistError_is_raised(self) -> None:
self.camera_factory.create_camera = Mock(side_effect=CameraDoesNotExistError(self.invalid_camera_id_int))
self.assertRaises(CameraDoesNotExistError,
self.vision_service.set_camera, self.invalid_camera_id_str, self.calibration_file_path)
def test_when_updated_then_attached_observers_are_notified(self) -> None:
self.initialiseService()
observer = Mock()
self.vision_service.attach(observer)
self.vision_service.update()
observer.update.assert_called_once()
def test_when_get_goal_then_center_of_goal_and_orientation_are_returned_as_real_coordinate(self) -> None:
self.initialiseService()
expected_coord = Coord(0, 0)
expected_angle = Angle(0)
self.goal_finder.find = Mock(return_value=(Rectangle(0, 0, 10, 8), expected_angle))
self.camera_calibration.convert_table_pixel_to_real = Mock(return_value=Coord(0, 0))
position = self.vision_service.get_goal()
actual_coord = position.coordinate
actual_angle = position.orientation
self.camera_calibration.convert_table_pixel_to_real.assert_called_with(Coord(5, 4))
self.assertEqual(expected_coord, actual_coord)
self.assertEqual(expected_angle, actual_angle)
def test_when_get_source_then_center_of_source_and_orientation_are_returned_as_real_coordinate(self) -> None:
self.initialiseService()
expected_coord = Coord(0, 0)
expected_angle = Angle(0)
self.source_finder.find = Mock(return_value=(Rectangle(0, 0, 10, 8), expected_angle))
self.camera_calibration.convert_table_pixel_to_real = Mock(return_value=Coord(0, 0))
position = self.vision_service.get_source()
actual_coord = position.coordinate
actual_angle = position.orientation
self.camera_calibration.convert_table_pixel_to_real.assert_called_with(Coord(5, 4))
self.assertEqual(expected_coord, actual_coord)
self.assertEqual(expected_angle, actual_angle)
| 45.428571
| 117
| 0.725825
| 631
| 5,088
| 5.459588
| 0.179081
| 0.069666
| 0.073149
| 0.027576
| 0.501306
| 0.406676
| 0.34717
| 0.327431
| 0.297823
| 0.297823
| 0
| 0.009035
| 0.195165
| 5,088
| 111
| 118
| 45.837838
| 0.832234
| 0
| 0
| 0.235294
| 0
| 0
| 0.001376
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 1
| 0.105882
| false
| 0
| 0.105882
| 0
| 0.294118
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de758aaeb7ae98b14c58fbe707173fad48237087
| 8,753
|
py
|
Python
|
bmdal/layer_features.py
|
dholzmueller/bmdal_reg
|
1a9e9c19fbd350ec32a2bd7b505e7015df7dc9bf
|
[
"Apache-2.0"
] | 3
|
2022-03-19T21:30:10.000Z
|
2022-03-30T08:20:48.000Z
|
bmdal/layer_features.py
|
dholzmueller/bmdal_reg
|
1a9e9c19fbd350ec32a2bd7b505e7015df7dc9bf
|
[
"Apache-2.0"
] | null | null | null |
bmdal/layer_features.py
|
dholzmueller/bmdal_reg
|
1a9e9c19fbd350ec32a2bd7b505e7015df7dc9bf
|
[
"Apache-2.0"
] | null | null | null |
from .feature_maps import *
import torch.nn as nn
class LayerGradientComputation:
"""
Abstract base class that can be used as a second base class
for layers that support the computation of gradient features
"""
def __init__(self):
super().__init__() # in case this is used with multiple inheritance
def get_feature_map(self) -> FeatureMap:
"""
:return: Returns a FeatureMap object that can compute feature map / kernel values
on the data provided by pop_feature_data()
"""
raise NotImplementedError()
def before_forward(self) -> None:
"""
Callback that is called before the data is passed through the model in a forward pass
and gradients are computed in a backward pass.
This method can be used to set up hooks that grab input data and gradients in both forward and backward pass.
"""
raise NotImplementedError()
def pop_feature_data(self) -> FeatureData:
"""
:return: This method should return the feature data
corresponding to the inputs that were last passed through the model.
This feature data should be usable by the feature map returned by get_feature_map()
"""
raise NotImplementedError()
class ModelGradTransform(DataTransform):
"""
A DataTransform object that passes data through a NN model
in order to obtain feature data corresponding to gradients
"""
def __init__(self, model: nn.Module, grad_layers: List[LayerGradientComputation]):
"""
:param model: The model to be computed gradients of
:param grad_layers: All layers of the model whose parameters we want to compute gradients of
"""
self.model = model
self.grad_layers = grad_layers
def forward(self, feature_data: FeatureData, idxs: Indexes) -> FeatureData:
"""
:param feature_data: Feature data to be passed through the model
:param idxs: indexes of the feature data that should be passed through the model
:return: feature data provided by the layers
"""
for grad_layer in self.grad_layers:
grad_layer.before_forward()
old_training = self.model.training
self.model.eval()
X = feature_data.get_tensor(idxs)
y = self.model(X) # implicitly calls hooks that were set by l.before_forward()
y.backward(torch.ones_like(y))
with torch.no_grad():
for p in self.model.parameters():
p.grad = None
self.model.train(old_training)
data = ListFeatureData([layer_comp.pop_feature_data() for layer_comp in self.grad_layers])
return data
def create_grad_feature_map(model: nn.Module, grad_layers: List[LayerGradientComputation],
use_float64: bool = False) -> FeatureMap:
"""
Creates a feature map corresponding to phi_{grad} or phi_{ll}, depending on which layers are provided.
:param model: Model to compute gradients of
:param grad_layers: All layers of the model whose parameters we want to compute gradients of
:param use_float64: Set to true if the gradient features should be converted to float64 after computing them
:return: Returns a feature map corresponding to phi_{grad} for the given layers.
"""
tfms = [ModelGradTransform(model, grad_layers)]
if use_float64:
tfms.append(ToDoubleTransform())
return SequentialFeatureMap(SumFeatureMap([l.get_feature_map() for l in grad_layers]),
tfms)
# ----- Specific LayerGradientComputation implementation(s) for linear layers
class GeneralLinearGradientComputation(LayerGradientComputation):
"""
Implements LayerGradientFeatures for general linear layers.
It can also be used with the Neural Tangent Parameterization since it includes a weight factor and bias factor.
(These are called sigma_w and sigma_b in the paper.)
"""
def __init__(self, layer: nn.Module, in_features: int, out_features: int,
weight_factor: float = 1.0, bias_factor: float = 1.0):
"""
:param layer: nn.Module object implementing a linear (fully-connected) layer,
whose gradients should be computed.
:param in_features: Input dimension of the layer.
:param out_features: Output dimension of the layer.
:param weight_factor: Factor sigma_w by which the weight matrix is multiplied in the forward pass.
:param bias_factor: Factor sigma_w by which the bias is multiplied in the forward pass.
"""
super().__init__()
self.layers = [layer] # dirty hack to avoid infinite recursion in PyTorch if layer is self.
self.in_features = in_features
self.out_features = out_features
self.weight_factor = weight_factor
self.bias_factor = bias_factor
self._input_data = None
self._grad_output_data = None
self._input_hook = None
self._grad_output_hook = None
def get_feature_map(self) -> FeatureMap:
# gradients wrt to this layer are an outer product of the input and the output gradient,
# so we can use a ProductFeatureMap
# the +1 is for the bias
return ProductFeatureMap([IdentityFeatureMap(n_features=self.in_features+1),
IdentityFeatureMap(n_features=self.out_features)])
def set_input_(self, value: torch.Tensor):
# this is used to have a method to call in the hooks
self._input_data = value
def set_grad_output_(self, value: torch.Tensor):
# this is used to have a method to call in the hooks
self._grad_output_data = value
def before_forward(self):
# sets up hooks that store the input and grad_output
self._input_hook = self.layers[0].register_forward_hook(
lambda layer, inp, output, s=self: s.set_input_(inp[0].detach().clone()))
self._grad_output_hook = self.layers[0].register_full_backward_hook(
lambda layer, grad_input, grad_output, s=self: s.set_grad_output_(grad_output[0].detach().clone()))
def pop_feature_data(self) -> FeatureData:
# remove the hooks
self._input_hook.remove()
self._grad_output_hook.remove()
# compute the adjusted input \tilde{x} from the paper
inp = torch.cat([self.weight_factor * self._input_data,
self.bias_factor * torch.ones(self._input_data.shape[0], 1, device=self._input_data.device)],
dim=1)
# feature data for the two IdentityFeatureMaps in the ProductFeatureMap, given by inputs and grad_outputs
fd = ListFeatureData([TensorFeatureData(inp), TensorFeatureData(self._grad_output_data)])
# allow to release memory earlier
self._input_data = None
self._grad_output_data = None
return fd
class LinearGradientComputation(GeneralLinearGradientComputation):
"""
This class implements a gradient computation for nn.Linear layers.
"""
def __init__(self, layer: nn.Linear):
super().__init__(layer=layer, in_features=layer.in_features, out_features=layer.out_features)
class LinearLayer(GeneralLinearGradientComputation, nn.Module):
"""
Linear layer that implements LayerGradientFeatures, i.e., it can be used for computing gradient-based kernels.
This linear layer does not initialize weight and bias itself,
instead it assumes that they are passed as arguments to the constructor.
It can also be used with the Neural Tangent Parameterization since it includes a weight factor and bias factor.
(These are called sigma_w and sigma_b in the paper.)
"""
def __init__(self, weight: torch.Tensor, bias: torch.Tensor, weight_factor: float, bias_factor: float):
"""
:param weight: Weight matrix parameter of shape [in_features, out_features].
Compared to torch.nn.Linear, this is transposed.
:param bias: Bias parameter of shape [out_features]
:param weight_factor: Factor sigma_w by which the weight matrix is multiplied in the forward pass.
:param bias_factor: Factor sigma_w by which the bias is multiplied in the forward pass.
"""
super().__init__(self, in_features=weight.shape[0], out_features=weight.shape[1],
weight_factor=weight_factor, bias_factor=bias_factor)
self.weight = nn.Parameter(weight)
self.bias = nn.Parameter(bias)
self.weight_factor = weight_factor
self.bias_factor = bias_factor
def forward(self, x: torch.Tensor):
return self.weight_factor * x.matmul(self.weight) + self.bias_factor * self.bias
| 44.207071
| 118
| 0.682052
| 1,148
| 8,753
| 5.026132
| 0.203833
| 0.02669
| 0.016984
| 0.014558
| 0.278336
| 0.238821
| 0.217331
| 0.188215
| 0.188215
| 0.174697
| 0
| 0.003487
| 0.24643
| 8,753
| 197
| 119
| 44.431472
| 0.871286
| 0.434251
| 0
| 0.202381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.190476
| false
| 0
| 0.02381
| 0.02381
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de759ba42ef02e88463fee41b02959bd0f0ddd2c
| 35,389
|
py
|
Python
|
pinsey/gui/MainWindow.py
|
RailKill/Pinsey
|
72a283e6c5683b27918b511d80e45c3af4e67539
|
[
"MIT"
] | 3
|
2021-02-01T06:47:06.000Z
|
2022-01-09T05:54:35.000Z
|
pinsey/gui/MainWindow.py
|
RailKill/Pinsey
|
72a283e6c5683b27918b511d80e45c3af4e67539
|
[
"MIT"
] | 4
|
2019-10-23T09:52:36.000Z
|
2022-03-11T23:17:23.000Z
|
pinsey/gui/MainWindow.py
|
RailKill/Pinsey
|
72a283e6c5683b27918b511d80e45c3af4e67539
|
[
"MIT"
] | null | null | null |
from configparser import ConfigParser
from configparser import DuplicateSectionError
from PyQt5 import QtCore, QtGui, QtWidgets
from pinsey import Constants
from pinsey.Utils import clickable, center, picture_grid, horizontal_line, resolve_message_sender, name_set, windows
from pinsey.gui.MessageWindow import MessageWindow
from pinsey.gui.component.BrowseListing import BrowseListing
from pinsey.gui.component.DislikesListing import DislikesListing
from pinsey.gui.component.LikesListing import LikesListing
from pinsey.handler.DecisionHandler import DecisionHandler
from pinsey.handler.LikesHandler import LikesHandler
from pinsey.thread.DownloadPhotosThread import DownloadPhotosThread
from pinsey.thread.LikesBotThread import LikesBotThread
from pinsey.thread.SessionThread import SessionThread
from pinsey.thread.MatchesThread import MatchesThread
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, app):
super(MainWindow, self).__init__()
# Initialize Window GUI controls.
self.label_status = QtWidgets.QLabel()
self.txt_location = QtWidgets.QLineEdit()
self.txt_auth = QtWidgets.QLineEdit()
self.txt_id = QtWidgets.QLineEdit()
self.txt_img_threshold = QtWidgets.QLineEdit()
self.txt_face_threshold = QtWidgets.QLineEdit()
self.txt_bio_threshold = QtWidgets.QLineEdit()
self.txt_pickup_threshold = QtWidgets.QLineEdit()
self.chk_decision = QtWidgets.QCheckBox('Decision-Making', self)
self.chk_exclude_friends = QtWidgets.QCheckBox('Exclude Facebook Friends', self)
self.chk_exclude_mutual = QtWidgets.QCheckBox('Exclude Mutual Friends', self)
self.chk_autochat = QtWidgets.QCheckBox('Autonomous Chatting', self)
self.chk_respond_list = QtWidgets.QCheckBox('Respond from List', self)
self.chk_respond_bot = QtWidgets.QCheckBox('Respond using Cleverbot', self)
self.profile_area = QtWidgets.QScrollArea()
self.matches_area = QtWidgets.QScrollArea()
self.chk_refresh = QtWidgets.QCheckBox('Refresh every: ')
self.txt_refresh_interval = QtWidgets.QLineEdit()
# Initialize system tray icon and menu.
tray_menu = QtWidgets.QMenu()
restore_action = tray_menu.addAction('Restore')
restore_action.triggered.connect(self.restore_window)
close_action = tray_menu.addAction('Exit')
close_action.triggered.connect(self.close)
self.tray_icon = QtWidgets.QSystemTrayIcon(QtGui.QIcon(Constants.ICON_FILEPATH))
self.tray_icon.activated.connect(self.tray_event)
self.tray_icon.setContextMenu(tray_menu)
self.tray_icon.show()
# Initialize application variables.
self.app = app
self.session = None
self.friend_list = []
self.download_thread = []
self.matches_thread = None
self.session_thread = None
self.likes_bot = None
self.likes_handler = LikesHandler()
self.filter_list = ['Date Added', 'Name', 'Age', 'Distance KM']
self.likeslisting = LikesListing('Reload', self.likes_handler, self.filter_list)
self.dislikeslisting = DislikesListing('Reload', self.likes_handler, self.filter_list)
self.browselisting = BrowseListing('Refresh', self.likes_handler, self.filter_list[1:])
self.setWindowTitle(Constants.APP_NAME)
self.setWindowIcon(QtGui.QIcon(Constants.ICON_FILEPATH))
self.setMinimumWidth(500)
self.resize(800, 480)
center(self)
# Run startup methods to setup the GUI.
self.read_settings()
self.setup_tabs()
self.connect_tinder() # Start Tinder session.
self.decision_change()
'''
+=======================================+
| GUI METHODS: Resizing, UI setup, etc. |
+=======================================+
'''
def setup_tabs(self):
tabs = QtWidgets.QTabWidget()
# Resize width and height
tabs.resize(250, 150)
# Add tabs
tabs.addTab(self.setup_settings(), 'Settings')
tabs.addTab(self.setup_profile(), 'Profile')
tabs.addTab(self.likeslisting, 'Liked')
tabs.addTab(self.dislikeslisting, 'Disliked')
tabs.addTab(self.browselisting, 'Browse')
tabs.addTab(self.setup_matches(), 'Matches')
# Set main window layout
self.setCentralWidget(tabs)
self.show()
def setup_settings(self):
# Set layout of settings tab
tab_settings = QtWidgets.QWidget()
label_location = QtWidgets.QLabel('Location:')
label_auth = QtWidgets.QLabel('Facebook Auth Token:')
label_id = QtWidgets.QLabel('Facebook Profile ID:')
label_img_threshold = QtWidgets.QLabel('Minimum Number of Good Images:')
label_face_threshold = QtWidgets.QLabel('Faces Found Threshold:')
label_bio_threshold = QtWidgets.QLabel('Biography Minimum Length:')
label_friend_exclusion = QtWidgets.QLabel('Friend Exclusion: ')
label_pickup_threshold = QtWidgets.QLabel('Pick-up after X Messages:')
btn_save = QtWidgets.QPushButton('Save Settings', self)
btn_save.setFixedHeight(50)
btn_save.clicked.connect(self.save_settings)
btn_start = QtWidgets.QPushButton('Start Pinning', self)
btn_start.clicked.connect(lambda: self.start_botting(btn_start))
btn_start.setFixedHeight(50)
exclusion_widget = QtWidgets.QWidget()
exclusion_widget.setLayout(QtWidgets.QHBoxLayout())
exclusion_widget.layout().addWidget(self.chk_exclude_friends)
exclusion_widget.layout().addWidget(self.chk_exclude_mutual)
exclusion_widget.layout().addStretch()
self.label_status.setAlignment(QtCore.Qt.AlignCenter)
self.txt_id.setEchoMode(QtWidgets.QLineEdit.Password)
self.txt_auth.setEchoMode(QtWidgets.QLineEdit.Password)
self.txt_img_threshold.setValidator(QtGui.QIntValidator())
self.txt_face_threshold.setValidator(QtGui.QIntValidator())
self.txt_bio_threshold.setValidator(QtGui.QIntValidator())
self.txt_pickup_threshold.setValidator(QtGui.QIntValidator())
self.chk_decision.setStyleSheet(Constants.CSS_FONT_CATEGORY)
self.chk_decision.stateChanged.connect(self.decision_change)
self.chk_autochat.setStyleSheet(Constants.CSS_FONT_CATEGORY)
grid = QtWidgets.QGridLayout()
grid.setSpacing(10)
grid.addWidget(self.label_status, 1, 0, 1, 2)
grid.addWidget(label_location, 2, 0)
grid.addWidget(self.txt_location, 2, 1)
grid.addWidget(label_auth, 3, 0)
grid.addWidget(self.txt_auth, 3, 1)
grid.addWidget(label_id, 4, 0)
grid.addWidget(self.txt_id, 4, 1)
grid.addWidget(horizontal_line(), 5, 0, 1, 2)
grid.addWidget(self.chk_decision, 6, 0, 1, 2)
grid.addWidget(label_img_threshold, 7, 0)
grid.addWidget(self.txt_img_threshold, 7, 1)
grid.addWidget(label_face_threshold, 8, 0)
grid.addWidget(self.txt_face_threshold, 8, 1)
grid.addWidget(label_bio_threshold, 9, 0)
grid.addWidget(self.txt_bio_threshold, 9, 1)
grid.addWidget(label_friend_exclusion, 10, 0)
grid.addWidget(exclusion_widget, 10, 1)
grid.addWidget(horizontal_line(), 11, 0, 1, 2)
grid.addWidget(self.chk_autochat, 12, 0, 1, 2)
grid.addWidget(self.chk_respond_list, 13, 0, 1, 2)
grid.addWidget(self.chk_respond_bot, 14, 0, 1, 2)
grid.addWidget(label_pickup_threshold, 15, 0)
grid.addWidget(self.txt_pickup_threshold, 15, 1)
grid.addWidget(horizontal_line(), 16, 0, 1, 2)
grid.addWidget(btn_save, 17, 0)
grid.addWidget(btn_start, 17, 1)
tab_settings.setLayout(grid)
return tab_settings
def setup_profile(self):
tab_profile = QtWidgets.QWidget()
tab_profile.setLayout(QtWidgets.QVBoxLayout())
tab_profile.layout().addWidget(self.profile_area)
return tab_profile
def setup_matches(self):
tab_matches = QtWidgets.QWidget()
tab_matches.setLayout(QtWidgets.QVBoxLayout())
match_refresh_widget = QtWidgets.QWidget()
match_refresh_widget.setLayout(QtWidgets.QHBoxLayout())
self.txt_refresh_interval.setValidator(QtGui.QIntValidator(10, 300))
self.txt_refresh_interval.setText("60") # Default 60 second refresh interval
lbl_refresh_unit = QtWidgets.QLabel('seconds')
match_refresh_widget.layout().addWidget(self.chk_refresh)
match_refresh_widget.layout().addWidget(self.txt_refresh_interval)
match_refresh_widget.layout().addWidget(lbl_refresh_unit)
match_refresh_widget.layout().addStretch()
btn_refresh = QtWidgets.QPushButton('Refresh', self)
btn_refresh.clicked.connect(self.load_matches)
match_refresh_widget.layout().addWidget(btn_refresh)
tab_matches.layout().addWidget(match_refresh_widget)
tab_matches.layout().addWidget(self.matches_area)
return tab_matches
def load_profile(self):
def populate(data, thread):
self.download_thread.remove(thread)
profile_widget = QtWidgets.QWidget()
profil = self.session.profile
# 1. Profile picture grid.
number_of_photos = Constants.NUMBER_OF_PHOTOS
pp_layout = picture_grid(data, Constants.THUMBNAIL_SIZE, number_of_photos)
# 2. Name and gender of user.
label_name = name_set(profil.name, profil.gender, 0, profil.banned)
pp_layout.addWidget(label_name, number_of_photos, 0, 1, number_of_photos)
# 3. Biography.
def bio_truncate():
# Tinder counts emojis as 2 characters. Find and manipulate them so the character count is correct.
emoji_raw = Constants.EMOJI_PATTERN.findall(txt_bio.toPlainText())
number_of_emojis = 0
for emoji in emoji_raw:
number_of_emojis += len(emoji)
# Encode to UTF-8, emojis are counted as 4 characters.
bio_true_length = len(txt_bio.toPlainText().encode()) - (number_of_emojis * 2)
label_chars.setText(str(biography_max_length - len(txt_bio.toPlainText().encode()) +
(number_of_emojis * 2)) + remaining_chars)
if bio_true_length > biography_max_length:
txt_bio.setPlainText(txt_bio.toPlainText()[:biography_max_length - number_of_emojis])
txt_bio.moveCursor(QtGui.QTextCursor.End)
biography_max_length = 500
label_bio = QtWidgets.QLabel('Biography: ')
remaining_chars = ' characters remaining'
label_chars = QtWidgets.QLabel(str(biography_max_length) + remaining_chars)
bio_widget = QtWidgets.QWidget()
bio_widget.setLayout(QtWidgets.QHBoxLayout())
bio_widget.layout().addWidget(label_bio)
bio_widget.layout().addStretch()
bio_widget.layout().addWidget(label_chars)
pp_layout.addWidget(bio_widget, number_of_photos + 1, 0, 1, number_of_photos)
# Profile may have no biography yet.
try:
bio_text = profil.bio
except KeyError:
bio_text = ''
txt_bio = QtWidgets.QPlainTextEdit(bio_text)
txt_bio.setFont(QtGui.QFont('Segoe UI Symbol', 16))
txt_bio.textChanged.connect(bio_truncate)
bio_truncate()
pp_layout.addWidget(txt_bio, number_of_photos + 2, 0, 1, number_of_photos)
# Form layout setup.
form_layout = QtWidgets.QFormLayout()
# form_layout.setLabelAlignment(QtCore.Qt.AlignRight)
form_widget = QtWidgets.QWidget()
form_widget.setLayout(form_layout)
pp_layout.addWidget(form_widget, number_of_photos + 3, 0, 1, number_of_photos)
form_label_style = 'margin-top: 0.3em'
# 4. Gender
radio_gender_male = QtWidgets.QRadioButton('Male')
radio_gender_female = QtWidgets.QRadioButton('Female')
if profil.gender == 'male':
radio_gender_male.setChecked(True)
else:
radio_gender_female.setChecked(True)
gender_widget = QtWidgets.QWidget()
gender_widget.setLayout(QtWidgets.QHBoxLayout())
gender_widget.layout().addWidget(radio_gender_male)
gender_widget.layout().addWidget(radio_gender_female)
label_gender = QtWidgets.QLabel('Gender: ')
label_gender.setStyleSheet(form_label_style)
form_layout.addRow(label_gender, gender_widget)
# 5. Discoverable?
label_discoverable = QtWidgets.QLabel('Discoverable: ')
chk_discoverable = QtWidgets.QCheckBox()
chk_discoverable.setChecked(profil.discoverable)
form_layout.addRow(label_discoverable, chk_discoverable)
# 6. Maximum distance filter.
label_distance = QtWidgets.QLabel('Maximum Distance: ')
label_distance.setStyleSheet(form_label_style)
slider_distance = QtWidgets.QSlider(QtCore.Qt.Horizontal)
slider_distance.setRange(1, 100)
slider_distance.setSingleStep(1)
slider_distance.setValue(profil.distance_filter)
slider_distance.valueChanged.connect(
lambda: (label_distance_value.setText(str(round(slider_distance.value() * 1.6)) + 'km')))
label_distance_value = QtWidgets.QLabel(str(round(slider_distance.value() * 1.6)) + 'km')
distance_widget = QtWidgets.QWidget()
distance_widget.setLayout(QtWidgets.QHBoxLayout())
distance_widget.layout().addWidget(slider_distance)
distance_widget.layout().addWidget(label_distance_value)
form_layout.addRow(label_distance, distance_widget)
# 7. Age filter.
def max_slider_handle():
label_age_max.setText('55+' if slider_age_max.value() > 54 else str(slider_age_max.value()))
slider_age_min.setRange(18, 46 if slider_age_max.value() > 46 else slider_age_max.value())
def min_slider_handle():
label_age_min.setText(str(slider_age_min.value()))
slider_age_max.setRange(slider_age_min.value(), 55)
label_age = QtWidgets.QLabel('Age: ')
label_age.setStyleSheet(form_label_style)
label_to = QtWidgets.QLabel(' to ')
slider_age_max = QtWidgets.QSlider(QtCore.Qt.Horizontal)
slider_age_max.setRange(profil.age_filter_min, 55)
slider_age_max.setSingleStep(1)
slider_age_max.setValue(55 if profil.age_filter_max > 54 else profil.age_filter_max)
slider_age_max.valueChanged.connect(max_slider_handle)
label_age_max = QtWidgets.QLabel('55+' if slider_age_max.value() > 54 else str(slider_age_max.value()))
slider_age_min = QtWidgets.QSlider(QtCore.Qt.Horizontal)
slider_age_min.setRange(18, 46 if profil.age_filter_max > 46 else profil.age_filter_max)
slider_age_min.setSingleStep(1)
slider_age_min.setValue(profil.age_filter_min)
slider_age_min.valueChanged.connect(min_slider_handle)
label_age_min = QtWidgets.QLabel(str(slider_age_min.value()))
age_widget = QtWidgets.QWidget()
age_widget.setLayout(QtWidgets.QHBoxLayout())
age_widget.layout().addWidget(label_age_min)
age_widget.layout().addWidget(slider_age_min)
age_widget.layout().addWidget(label_to)
age_widget.layout().addWidget(slider_age_max)
age_widget.layout().addWidget(label_age_max)
form_layout.addRow(label_age, age_widget)
# 8. Interested in which gender?
label_interested = QtWidgets.QLabel('Interested in: ')
label_interested.setStyleSheet(form_label_style)
chk_interested_male = QtWidgets.QCheckBox('Male')
chk_interested_male.setChecked('male' in list(profil.interested_in))
chk_interested_female = QtWidgets.QCheckBox('Female')
chk_interested_female.setChecked('female' in list(profil.interested_in))
interested_widget = QtWidgets.QWidget()
interested_widget.setLayout(QtWidgets.QHBoxLayout())
interested_widget.layout().addWidget(chk_interested_male)
interested_widget.layout().addWidget(chk_interested_female)
form_layout.addRow(label_interested, interested_widget)
# 9. Save button.
def save_profile():
# Must have an interested gender before proceeding.
if not chk_interested_male.isChecked() and not chk_interested_female.isChecked():
QtWidgets.QMessageBox().critical(self, 'Profile Error',
'You must be interested in at least one gender.')
return
# Set profile values.
try:
profile.bio = txt_bio.toPlainText()
except KeyError:
self.session.update_profile({
"bio": txt_bio.toPlainText()
})
profile.discoverable = chk_discoverable.isChecked()
profile.distance_filter = slider_distance.value()
profile.age_filter_min = slider_age_min.value()
profile.age_filter_max = 1000 if slider_age_max.value() > 54 else slider_age_max.value()
# Workaround due to pynder 0.0.13 not yet supporting "gender" and "interested in" changes.
gender_filter = 2
profil.interested = []
profil.sex = (0, 'male') if radio_gender_male.isChecked() else (1, 'female')
if chk_interested_male.isChecked():
gender_filter -= 2
profil.interested.append(0)
if chk_interested_female.isChecked():
gender_filter -= 1
profil.interested.append(1)
self.session.update_profile({
"interested_in": profil.interested,
"gender_filter": gender_filter,
"gender": profil.sex[0]
# "squads_discoverable": False
})
QtWidgets.QMessageBox.information(self, 'Profile Saved', 'Profile information has been updated.')
reload_profile()
def reload_profile():
# Refresh GUI.
label_name.setText(name_set(profil.name, profil.sex[1], 0, profil.banned).text())
try:
txt_bio.setPlainText(profil.bio)
except KeyError:
txt_bio.setPlainText('')
chk_discoverable.setChecked(profil.discoverable)
slider_distance.setValue(profil.distance_filter)
label_distance_value.setText(str(round(slider_distance.value() * 1.6)) + 'km')
slider_age_max.setRange(profil.age_filter_min, 55)
slider_age_max.setValue(55 if profil.age_filter_max > 54 else profil.age_filter_max)
label_age_max.setText('55+' if slider_age_max.value() > 54 else str(slider_age_max.value()))
slider_age_min.setRange(18, 46 if profil.age_filter_max > 46 else profil.age_filter_max)
slider_age_min.setValue(profil.age_filter_min)
label_age_min.setText(str(slider_age_min.value()))
chk_interested_male.setChecked(0 in list(profil.interested)) # interested_in workaround.
chk_interested_female.setChecked(1 in list(profil.interested)) # interested_in workaround.
btn_save_profile = QtWidgets.QPushButton('Update Profile')
btn_save_profile.setFixedHeight(50)
btn_save_profile.clicked.connect(save_profile)
pp_layout.addWidget(btn_save_profile, number_of_photos + 4, 0, 1, number_of_photos)
profile_widget.setLayout(pp_layout)
self.profile_area.setWidget(profile_widget)
self.profile_area.setAlignment(QtCore.Qt.AlignCenter)
# Download profile images and then populate the profile GUI.
profile = self.session.profile
download_thread = DownloadPhotosThread(profile.photos)
download_thread.data_downloaded.connect(lambda data, thread=download_thread: populate(data, thread))
self.download_thread.append(download_thread)
download_thread.start()
def load_matches(self, interval=0):
def load_thumbnail(photo, label, thread):
self.download_thread.remove(thread)
thumbnail = QtGui.QImage()
thumbnail.loadFromData(photo[0].data)
label.setPixmap(QtGui.QPixmap(thumbnail))
def populate_matches(data):
matches = data
#updates = list(self.session.updates())
#updates_balloon_message = ''
matches_list = QtWidgets.QWidget()
matches_list.setLayout(QtWidgets.QVBoxLayout())
for match in matches:
"""
# Show notification if it is in updates.
for update in updates:
if match.user.id == update.user.id:
updates_balloon_message += update.user.name
if not update.messages:
updates_balloon_message += ' (NEW) '
updates_balloon_message += '\n'
"""
# Load thumbnail of match.
label_thumbnail = QtWidgets.QLabel()
label_thumbnail.setFixedWidth(Constants.THUMBNAIL_SIZE / 2)
label_thumbnail.setFixedHeight(Constants.THUMBNAIL_SIZE / 2)
label_thumbnail.setScaledContents(True)
download_thread = DownloadPhotosThread([next(match.user.photos)])
download_thread.data_downloaded.connect(
lambda data, l=label_thumbnail, t=download_thread: load_thumbnail(data, l, t)
)
self.download_thread.append(download_thread)
download_thread.start()
# Create name set.
label_name = name_set(match.user.name, match.user.gender, match.user.age)
# Create match date label.
label_match_date = QtWidgets.QLabel('<b>Match Date: </b>' +
match.match_date.strftime("%B %d, %Y at %I:%M%p"))
# Create last message text.
if match.messages:
last_message = match.messages[len(match.messages) - 1]
last_poster = resolve_message_sender(last_message, match)
display_message = last_poster + last_message.body
else:
display_message = 'Conversation not started.'
label_last_message = QtWidgets.QLabel(display_message)
# Create notification text.
#label_notification = QtWidgets.QLabel('NEW UPDATE!' if match in updates else '')
#label_notification.setStyleSheet(Constants.CSS_FONT_NOTIFICATION)
# Create a card for each match.
card_widget = QtWidgets.QWidget()
card_layout = QtWidgets.QGridLayout()
card_layout.setSpacing(10)
card_layout.addWidget(label_thumbnail, 1, 0, 5, 1)
card_layout.addWidget(label_name, 1, 1)
card_layout.addWidget(label_match_date, 2, 1)
card_layout.addWidget(label_last_message, 3, 1)
#card_layout.addWidget(label_notification, 4, 1)
card_widget.setLayout(card_layout)
clickable(card_widget).connect(lambda m=match: (
windows.append(MessageWindow(m, self.friend_list))
))
matches_list.layout().addWidget(card_widget)
# Check if any MessageWindow for this match. If there is, update the messages area.
for window in windows:
if isinstance(window, MessageWindow) and match == window.match:
window.load_messages(match.messages)
self.matches_area.setWidget(matches_list)
self.matches_area.setAlignment(QtCore.Qt.AlignCenter)
"""
if updates_balloon_message:
self.tray_icon.showMessage('Pinsey: New Update!', updates_balloon_message)
"""
if self.chk_refresh.isChecked():
self.load_matches(int(self.txt_refresh_interval.text()))
self.matches_thread = MatchesThread(self.session, interval)
self.matches_thread.data_downloaded.connect(populate_matches)
self.matches_thread.start()
'''
+================================================================+
| HANDLING METHODS: Events, background, saving preferences, etc. |
+================================================================+
'''
def closeEvent(self, event):
for window in windows:
window.close() # Close all windows associated with this window.
super(MainWindow, self).closeEvent(event)
self.app.exit()
def changeEvent(self, event):
if event.type() == QtCore.QEvent.WindowStateChange:
# TODO: Check if windowState = 3, happens when minimize on fullscreen window.
if self.windowState() == QtCore.Qt.WindowMinimized:
for window in windows:
window.setWindowFlags(self.windowFlags() | QtCore.Qt.Tool) # Required to properly hide window.
window.hide() # Hides all windows associated with this window.
self.setWindowFlags(self.windowFlags() | QtCore.Qt.Tool) # Required to properly hide window.
self.hide()
def tray_event(self, reason):
if reason == QtWidgets.QSystemTrayIcon.DoubleClick:
self.restore_window()
def restore_window(self):
if self.isHidden():
for window in windows:
window.setWindowFlags(self.windowFlags() & ~QtCore.Qt.Tool) # Required to properly show window.
window.showNormal()
self.setWindowFlags(self.windowFlags() & ~QtCore.Qt.Tool) # Required to properly show window.
self.showNormal()
def connect_tinder(self):
def session_connected(data):
if data.session:
if data.exception:
QtWidgets.QMessageBox.warning(self, 'Warning', str(data.exception))
self.session = data.session
self.friend_list = list(self.session.get_fb_friends())
self.label_status.setText(status_text + '<span style="color:green;font-weight:bold">Online</span>')
self.load_profile() # Automatically load profile after session is ready.
self.load_matches() # Automatically load matches after session is ready.
# Update user listing.
self.likeslisting.friend_list = self.friend_list
self.likeslisting.refresh()
self.dislikeslisting.friend_list = self.friend_list
self.dislikeslisting.refresh()
self.browselisting.friend_list = self.friend_list
self.browselisting.session = self.session
else:
self.session = None
self.label_status.setText(status_text + '<span style="color:red;font-weight:bold">Offline</span>')
QtWidgets.QMessageBox.critical(self, 'Error', str(data.exception))
status_text = 'Tinder Status: '
if self.txt_location.text() and self.txt_id.text() and self.txt_auth.text():
self.session_thread = SessionThread(self.txt_id.text(), self.txt_auth.text(), self.txt_location.text())
self.session_thread.data_downloaded.connect(session_connected)
self.session_thread.start()
self.label_status.setText(status_text + '<span style="color:orange;font-weight:bold">Connecting...</span>')
else:
self.session = None
self.label_status.setText(status_text + '<span style="color:red;font-weight:bold">Offline</span>')
QtWidgets.QMessageBox.information(self, 'Connect to Tinder', 'In order to start using Pinsey, you will need '
'to key in your rough location (similar to how '
'you would search on Google Maps), Facebook '
'authentication token from Tinder, and Facebook '
'profile ID. Then, click Save Settings and it '
'will start connecting to Tinder.\n\n'
'If you are unsure how to obtain some of the '
'values required, please visit: '
'<a href="http://railkill.com/pinsey">'
'http://railkill.com/pinsey</a>')
def decision_change(self):
"""Handles decision-making checkbox state change."""
if self.chk_decision.isChecked():
self.txt_img_threshold.setDisabled(False)
self.txt_face_threshold.setDisabled(False)
self.txt_bio_threshold.setDisabled(False)
self.chk_exclude_friends.setDisabled(False)
self.chk_exclude_mutual.setDisabled(False)
else:
self.txt_img_threshold.setDisabled(True)
self.txt_face_threshold.setDisabled(True)
self.txt_bio_threshold.setDisabled(True)
self.chk_exclude_friends.setDisabled(True)
self.chk_exclude_mutual.setDisabled(True)
def read_settings(self):
"""Reads saved user preferences and loads it into the application. Otherwise, load defaults."""
config = ConfigParser()
if config.read(Constants.CONFIG_DATA_DIR + 'config.ini'):
self.txt_location.setText(config.get('Authentication', 'location'))
self.txt_auth.setText(config.get('Authentication', 'auth'))
self.txt_id.setText(config.get('Authentication', 'id'))
self.txt_id.setText(config.get('Authentication', 'id'))
self.chk_decision.setChecked(config.getboolean('Decision', 'enabled'))
self.txt_img_threshold.setText(config.get('Decision', 'img_threshold'))
self.txt_face_threshold.setText(config.get('Decision', 'face_threshold'))
self.txt_bio_threshold.setText(config.get('Decision', 'bio_threshold'))
self.chk_exclude_friends.setChecked(config.getboolean('Decision', 'exclude_friends'))
self.chk_exclude_mutual.setChecked(config.getboolean('Decision', 'exclude_mutual'))
self.chk_autochat.setChecked(config.getboolean('Chat', 'enabled'))
self.chk_respond_list.setChecked(config.getboolean('Chat', 'respond_list'))
self.chk_respond_bot.setChecked(config.getboolean('Chat', 'respond_bot'))
self.txt_pickup_threshold.setText(config.get('Chat', 'pickup_threshold'))
def save_settings(self):
config = ConfigParser()
config_path = Constants.CONFIG_DATA_DIR + 'config.ini'
config.read(config_path)
try:
config.add_section('Authentication')
except DuplicateSectionError:
pass
config.set('Authentication', 'location', self.txt_location.text())
config.set('Authentication', 'auth', self.txt_auth.text())
config.set('Authentication', 'id', self.txt_id.text())
try:
config.add_section('Decision')
except DuplicateSectionError:
pass
config.set('Decision', 'enabled', str(self.chk_decision.isChecked()))
config.set('Decision', 'img_threshold', self.txt_img_threshold.text())
config.set('Decision', 'face_threshold', self.txt_face_threshold.text())
# TODO: insert filepath of cascade, for user customizability
config.set('Decision', 'bio_threshold', self.txt_bio_threshold.text())
config.set('Decision', 'exclude_friends', str(self.chk_exclude_friends.isChecked()))
config.set('Decision', 'exclude_mutual', str(self.chk_exclude_mutual.isChecked()))
try:
config.add_section('Chat')
except DuplicateSectionError:
pass
config.set('Chat', 'enabled', str(self.chk_autochat.isChecked()))
config.set('Chat', 'respond_list', str(self.chk_respond_list.isChecked()))
# TODO: insert filepath of response list, for user customizability
config.set('Chat', 'respond_bot', str(self.chk_respond_bot.isChecked()))
config.set('Chat', 'pickup_threshold', self.txt_pickup_threshold.text())
with open(config_path, 'w') as f:
config.write(f)
QtWidgets.QMessageBox.information(self, 'Information', 'Settings saved.')
self.connect_tinder()
def start_botting(self, button):
if self.session:
decision_handler = None
if not self.txt_img_threshold.text():
self.txt_img_threshold.setText(str(Constants.THRESHOLD_IMG_DEFAULT))
if not self.txt_face_threshold.text():
self.txt_face_threshold.setText(str(Constants.THRESHOLD_FACE_DEFAULT))
if not self.txt_bio_threshold.text():
self.txt_bio_threshold.setText(str(Constants.THRESHOLD_BIO_DEFAULT))
if self.chk_decision.isChecked():
decision_handler = DecisionHandler(
int(self.txt_img_threshold.text()),
int(self.txt_face_threshold.text()),
int(self.txt_bio_threshold.text()),
self.chk_exclude_friends.isChecked(),
self.chk_exclude_mutual.isChecked()
)
self.likes_bot = LikesBotThread(self.session, self.likes_handler, decision_handler)
self.likes_bot.start()
if self.chk_autochat.isChecked():
self.matches_thread.start_bot()
button.setText('Stop Pinning')
button.clicked.disconnect()
button.clicked.connect(lambda: self.stop_botting(button))
else:
QtWidgets.QMessageBox.critical(self, 'Unable to Start Pinning', 'You are not connected to Tinder yet.')
def stop_botting(self, button):
self.likes_bot.stop()
self.matches_thread.stop_bot()
button.setText('Start Pinning')
button.clicked.disconnect()
button.clicked.connect(lambda: self.start_botting(button))
| 50.700573
| 121
| 0.628755
| 3,818
| 35,389
| 5.602672
| 0.130959
| 0.019962
| 0.018653
| 0.008882
| 0.316395
| 0.192558
| 0.117433
| 0.102286
| 0.086532
| 0.063718
| 0
| 0.009658
| 0.271469
| 35,389
| 697
| 122
| 50.773314
| 0.820029
| 0.062562
| 0
| 0.120879
| 0
| 0
| 0.068093
| 0.006405
| 0
| 0
| 0
| 0.001435
| 0
| 1
| 0.047619
| false
| 0.009158
| 0.027473
| 0
| 0.084249
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de766a3b6f5c4477c098e9f336005c2394afbbc1
| 1,506
|
py
|
Python
|
app/api/api_v1/tasks/emails.py
|
cdlaimin/fastapi
|
4acf1a1da4a1eedd81a3bdf6256661c2464928b9
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/api_v1/tasks/emails.py
|
cdlaimin/fastapi
|
4acf1a1da4a1eedd81a3bdf6256661c2464928b9
|
[
"BSD-3-Clause"
] | null | null | null |
app/api/api_v1/tasks/emails.py
|
cdlaimin/fastapi
|
4acf1a1da4a1eedd81a3bdf6256661c2464928b9
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- encoding: utf-8 -*-
"""
@File : emails.py
@Contact : 1053522308@qq.com
@License : (C)Copyright 2017-2018, Liugroup-NLPR-CASIA
@Modify Time @Author @Version @Desciption
------------ ------- -------- -----------
2020/9/27 10:22 下午 wuxiaoqiang 1.0 None
"""
import asyncio
from email.mime.text import MIMEText
import aiosmtplib
from app.core.celery_app import celery_app
from app.core.config import settings
async def sendemail(to_addr: str, code: str):
title = '<html><body><h3>亲爱的<a data-auto-link="1" href="mailto:%s" target="_blank">%s</a>,您好:</h3>' % (
to_addr, to_addr)
body = f'<p>请点击以下链接进行激活登录 <a href="%s">http://127.0.0.1:8000/api/v1/users/activated?code={code}</a></p>'
tail = '如果您并不是此网站用户,可能是其他用户误输入了您的邮箱地址。</body></html>'
html = title + body + tail
msg = MIMEText(html, 'html', 'utf-8')
msg['From'] = settings.EMAIL_USER
msg['To'] = to_addr
msg['Subject'] = "欢迎注册此网站"
try:
async with aiosmtplib.SMTP(hostname=settings.EMAIL_HOSTNAEM, port=settings.EMAIL_PORT, use_tls=True,
username=settings.EMAIL_USER, password=settings.EMAIL_PASSWORD) as smtp:
await smtp.send_message(msg)
except aiosmtplib.SMTPException as e:
print(e)
raise e
@celery_app.task(acks_late=True, autoretry_for=(Exception,), retry_kwargs={'max_retries': 3})
def decoratorEmail(To: str, code: str = "123456"):
asyncio.run(sendemail(To, code))
| 34.227273
| 108
| 0.625498
| 202
| 1,506
| 4.569307
| 0.589109
| 0.070423
| 0.023835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045075
| 0.204515
| 1,506
| 43
| 109
| 35.023256
| 0.725376
| 0.199867
| 0
| 0
| 0
| 0.08
| 0.22807
| 0.080201
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0.04
| 0.2
| 0
| 0.24
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de76f5e1a1407299a65c28e63772cca898458059
| 13,487
|
py
|
Python
|
lightwood/encoders/text/distilbert.py
|
ritwik12/lightwood
|
7975688355fba8b0f8349dd55a1b6cb625c3efd0
|
[
"MIT"
] | null | null | null |
lightwood/encoders/text/distilbert.py
|
ritwik12/lightwood
|
7975688355fba8b0f8349dd55a1b6cb625c3efd0
|
[
"MIT"
] | null | null | null |
lightwood/encoders/text/distilbert.py
|
ritwik12/lightwood
|
7975688355fba8b0f8349dd55a1b6cb625c3efd0
|
[
"MIT"
] | null | null | null |
import time
import copy
import random
import logging
from functools import partial
import numpy as np
import torch
from torch.utils.data import DataLoader
from transformers import DistilBertModel, DistilBertForSequenceClassification, DistilBertTokenizer, AlbertModel, AlbertForSequenceClassification, DistilBertTokenizer, AlbertTokenizer, AdamW, get_linear_schedule_with_warmup
from lightwood.config.config import CONFIG
from lightwood.constants.lightwood import COLUMN_DATA_TYPES, ENCODER_AIM
from lightwood.mixers.helpers.default_net import DefaultNet
from lightwood.mixers.helpers.ranger import Ranger
from lightwood.mixers.helpers.shapes import *
from lightwood.mixers.helpers.transformer import Transformer
from lightwood.api.gym import Gym
class DistilBertEncoder:
def __init__(self, is_target=False, aim=ENCODER_AIM.BALANCE):
self.name = 'Text Transformer Encoder'
self._tokenizer = None
self._model = None
self._pad_id = None
self._pytorch_wrapper = torch.FloatTensor
self._max_len = None
self._max_ele = None
self._prepared = False
self._model_type = None
self.desired_error = 0.01
self.max_training_time = CONFIG.MAX_ENCODER_TRAINING_TIME
self._head = None
# Possible: speed, balance, accuracy
self.aim = aim
if self.aim == ENCODER_AIM.SPEED:
# uses more memory, takes very long to train and outputs weird debugging statements to the command line, consider waiting until it gets better or try to investigate why this happens (changing the pretrained model doesn't seem to help)
self._classifier_model_class = AlbertForSequenceClassification
self._embeddings_model_class = AlbertModel
self._tokenizer_class = AlbertTokenizer
self._pretrained_model_name = 'albert-base-v2'
self._model_max_len = 768
if self.aim == ENCODER_AIM.BALANCE:
self._classifier_model_class = DistilBertForSequenceClassification
self._embeddings_model_class = DistilBertModel
self._tokenizer_class = DistilBertTokenizer
self._pretrained_model_name = 'distilbert-base-uncased'
self._model_max_len = 768
if self.aim == ENCODER_AIM.ACCURACY:
self._classifier_model_class = DistilBertForSequenceClassification
self._embeddings_model_class = DistilBertModel
self._tokenizer_class = DistilBertTokenizer
self._pretrained_model_name = 'distilbert-base-uncased'
self._model_max_len = 768
device_str = "cuda" if CONFIG.USE_CUDA else "cpu"
if CONFIG.USE_DEVICE is not None:
device_str = CONFIG.USE_DEVICE
self.device = torch.device(device_str)
def _train_callback(self, error, real_buff, predicted_buff):
logging.info(f'{self.name} reached a loss of {error} while training !')
@staticmethod
def categorical_train_function(model, data, gym, test=False):
input, real = data
input = input.to(gym.device)
labels = torch.tensor([torch.argmax(x) for x in real]).to(gym.device)
outputs = gym.model(input, labels=labels)
loss, logits = outputs[:2]
if not test:
loss.backward()
gym.optimizer.step()
gym.scheduler.step()
gym.optimizer.zero_grad()
return loss
@staticmethod
def numerical_train_function(model, data, gym, backbone, test=False):
input, real = data
input = input.to(gym.device)
real = real.to(gym.device)
embeddings = backbone(input)[0][:,0,:]
outputs = gym.model(embeddings)
loss = gym.loss_criterion(outputs, real)
if not test:
loss.backward()
gym.optimizer.step()
gym.scheduler.step()
gym.optimizer.zero_grad()
return loss
def prepare_encoder(self, priming_data, training_data=None):
if self._prepared:
raise Exception('You can only call "prepare_encoder" once for a given encoder.')
priming_data = [x if x is not None else '' for x in priming_data]
self._max_len = min(max([len(x) for x in priming_data]),self._model_max_len)
self._tokenizer = self._tokenizer_class.from_pretrained(self._pretrained_model_name)
self._pad_id = self._tokenizer.convert_tokens_to_ids([self._tokenizer.pad_token])[0]
# @TODO: Support multiple targets if they are all categorical or train for the categorical target if it's a mix (maybe ?)
# @TODO: Attach a language modeling head and/or use GPT2 and/or provide outputs better suited to a LM head (which will be the mixer) if the output if text
if training_data is not None and 'targets' in training_data and len(training_data['targets']) ==1 and training_data['targets'][0]['output_type'] == COLUMN_DATA_TYPES.CATEGORICAL and CONFIG.TRAIN_TO_PREDICT_TARGET:
self._model_type = 'classifier'
self._model = self._classifier_model_class.from_pretrained(self._pretrained_model_name, num_labels=len(set(training_data['targets'][0]['unencoded_output'])) + 1).to(self.device)
batch_size = 10
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self._model.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.000001},
{'params': [p for n, p in self._model.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8)
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=10, num_training_steps=len(priming_data) * 15/20)
gym = Gym(model=self._model, optimizer=optimizer, scheduler=scheduler, loss_criterion=None, device=self.device, name=self.name)
input = [self._tokenizer.encode(x[:self._max_len], add_special_tokens=True) for x in priming_data]
tokenized_max_len = max([len(x) for x in input])
input = torch.tensor([x + [self._pad_id] * (tokenized_max_len - len(x)) for x in input])
real = training_data['targets'][0]['encoded_output']
merged_data = list(zip(input,real))
train_data_loader = DataLoader(merged_data[:int(len(merged_data)*9/10)], batch_size=batch_size, shuffle=True)
test_data_loader = DataLoader(merged_data[int(len(merged_data)*9/10):], batch_size=batch_size, shuffle=True)
best_model, error, training_time = gym.fit(train_data_loader=train_data_loader, test_data_loader=test_data_loader, desired_error=self.desired_error, max_time=self.max_training_time, callback=self._train_callback, eval_every_x_epochs=1, max_unimproving_models=10, custom_train_func=partial(self.categorical_train_function,test=False), custom_test_func=partial(self.categorical_train_function,test=True))
self._model = best_model.to(self.device)
elif all([x['output_type'] == COLUMN_DATA_TYPES.NUMERIC or x['output_type'] == COLUMN_DATA_TYPES.CATEGORICAL for x in training_data['targets']]) and CONFIG.TRAIN_TO_PREDICT_TARGET:
self.desired_error = 0.01
self._model_type = 'generic_target_predictor'
self._model = self._embeddings_model_class.from_pretrained(self._pretrained_model_name).to(self.device)
batch_size = 10
self._head = DefaultNet(ds=None, dynamic_parameters={},shape=funnel(768, sum( [ len(x['encoded_output'][0]) for x in training_data['targets'] ] ), depth=5), selfaware=False)
no_decay = ['bias', 'LayerNorm.weight']
optimizer_grouped_parameters = [
{'params': [p for n, p in self._head.named_parameters() if not any(nd in n for nd in no_decay)], 'weight_decay': 0.000001},
{'params': [p for n, p in self._head.named_parameters() if any(nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
optimizer = torch.optim.AdamW(optimizer_grouped_parameters, lr=5e-5, eps=1e-8)
#optimizer = Ranger(self._head.parameters(),lr=5e-5)
# num_training_steps is kind of an estimation
scheduler = get_linear_schedule_with_warmup(optimizer, num_warmup_steps=10, num_training_steps=len(priming_data) * 15/20)
criterion = torch.nn.MSELoss()
gym = Gym(model=self._head, optimizer=optimizer, scheduler=scheduler, loss_criterion=criterion, device=self.device, name=self.name)
input = [self._tokenizer.encode(x[:self._max_len], add_special_tokens=True) for x in priming_data]
tokenized_max_len = max([len(x) for x in input])
input = torch.tensor([x + [self._pad_id] * (tokenized_max_len - len(x)) for x in input])
real = [[]] * len(training_data['targets'][0]['encoded_output'])
for i in range(len(real)):
for target in training_data['targets']:
real[i] = real[i] + target['encoded_output'][i]
real = torch.tensor(real)
merged_data = list(zip(input,real))
train_data_loader = DataLoader(merged_data[:int(len(merged_data)*9/10)], batch_size=batch_size, shuffle=True)
test_data_loader = DataLoader(merged_data[int(len(merged_data)*9/10):], batch_size=batch_size, shuffle=True)
self._model.eval()
best_model, error, training_time = gym.fit(train_data_loader=train_data_loader, test_data_loader=test_data_loader, desired_error=self.desired_error, max_time=self.max_training_time, callback=self._train_callback, eval_every_x_epochs=1, max_unimproving_models=10, custom_train_func=partial(self.numerical_train_function, backbone=self._model, test=False), custom_test_func=partial(self.numerical_train_function, backbone=self._model, test=True))
self._head = best_model.to(self.device)
else:
self._model_type = 'embeddings_generator'
self._model = self._embeddings_model_class.from_pretrained(self._pretrained_model_name).to(self.device)
self._prepared = True
def encode(self, column_data):
encoded_representation = []
self._model.eval()
with torch.no_grad():
for text in column_data:
if text is None:
text = ''
input = torch.tensor(self._tokenizer.encode(text[:self._max_len], add_special_tokens=True)).to(self.device).unsqueeze(0)
if self._model_type == 'generic_target_predictor':
embeddings = self._model(input)
output = self._head(embeddings[0][:,0,:])
encoded_representation.append(output.tolist()[0])
elif self._model_type == 'classifier':
output = self._model(input)
logits = output[0]
predicted_targets = logits[0].tolist()
encoded_representation.append(predicted_targets)
else:
output = self._model(input)
embeddings = output[0][:,0,:].cpu().numpy()[0]
encoded_representation.append(embeddings)
return self._pytorch_wrapper(encoded_representation)
def decode(self, encoded_values_tensor, max_length = 100):
# When test is an output... a bit trickier to handle this case, thinking on it
pass
if __name__ == "__main__":
# Generate some tests data
import random
from sklearn.metrics import r2_score
import logging
from lightwood.encoders.numeric import NumericEncoder
logging.basicConfig(level=logging.DEBUG)
random.seed(2)
priming_data = []
primting_target = []
test_data = []
test_target = []
for i in range(0,300):
if random.randint(1,5) == 3:
test_data.append(str(i) + ''.join(['n'] * i))
#test_data.append(str(i))
test_target.append(i)
#else:
priming_data.append(str(i) + ''.join(['n'] * i))
#priming_data.append(str(i))
primting_target.append(i)
output_1_encoder = NumericEncoder()
output_1_encoder.prepare_encoder(primting_target)
encoded_data_1 = output_1_encoder.encode(primting_target)
encoded_data_1 = encoded_data_1.tolist()
enc = DistilBertEncoder()
enc.prepare_encoder(priming_data, training_data={'targets': [{'output_type': COLUMN_DATA_TYPES.NUMERIC, 'encoded_output': encoded_data_1}, {'output_type': COLUMN_DATA_TYPES.NUMERIC, 'encoded_output': encoded_data_1}]})
encoded_predicted_target = enc.encode(test_data).tolist()
predicted_targets_1 = output_1_encoder.decode(torch.tensor([x[:4] for x in encoded_predicted_target]))
predicted_targets_2 = output_1_encoder.decode(torch.tensor([x[4:] for x in encoded_predicted_target]))
for predicted_targets in [predicted_targets_1, predicted_targets_2]:
real = list(test_target)
pred = list(predicted_targets)
# handle nan
for i in range(len(pred)):
try:
float(pred[i])
except:
pred[i] = 0
print(real[0:25], '\n', pred[0:25])
encoder_accuracy = r2_score(real, pred)
print(f'Categorial encoder accuracy for: {encoder_accuracy} on testing dataset')
#assert(encoder_accuracy > 0.5)
| 46.993031
| 456
| 0.671091
| 1,740
| 13,487
| 4.932759
| 0.183333
| 0.026215
| 0.009088
| 0.018758
| 0.499243
| 0.464406
| 0.40487
| 0.372364
| 0.366888
| 0.366888
| 0
| 0.013556
| 0.228813
| 13,487
| 286
| 457
| 47.157343
| 0.811653
| 0.061837
| 0
| 0.308824
| 0
| 0
| 0.056413
| 0.007437
| 0
| 0
| 0
| 0.003497
| 0
| 1
| 0.034314
| false
| 0.004902
| 0.098039
| 0
| 0.151961
| 0.009804
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de775456d4d41592b9970922b77c527e29122163
| 4,542
|
py
|
Python
|
scripts/scopdominfo.py
|
stivalaa/cuda_satabsearch
|
b947fb711f8b138e5a50c81e7331727c372eb87d
|
[
"MIT"
] | null | null | null |
scripts/scopdominfo.py
|
stivalaa/cuda_satabsearch
|
b947fb711f8b138e5a50c81e7331727c372eb87d
|
[
"MIT"
] | null | null | null |
scripts/scopdominfo.py
|
stivalaa/cuda_satabsearch
|
b947fb711f8b138e5a50c81e7331727c372eb87d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###############################################################################
#
# scomdominfo.py - Report information folds and classes of a list of SCOP sids
#
# File: scomdominfo.py
# Author: Alex Stivala
# Created: November 2008
#
# $Id: scopdominfo.py 3009 2009-12-08 03:01:48Z alexs $
#
###############################################################################
"""
Report information on the folds, superfamilies and classes of a list
of SCOP domain identifiers (sids).
See usage in docstring for main()
SCOP and ASTRAL data is obtained using the Bio.SCOP library (Casbon et
al 2006 'A high level interface to SCOP and ASTRAL implemented in
Python' BMC Bioinformatics 7:10) and depends on having the data
downloaded, in SCOP_DIR (defined below).
Downloaded SCOP files from
http://scop.mrc-lmb.cam.ac.uk/scop/parse/index.html
and ASTRAL files (in scopseq-1.73) from
http://astral.berkeley.edu/scopseq-1.73.html
The files downlaoded are:
/local/charikar/SCOP/:
dir.cla.scop.txt_1.73
dir.des.scop.txt_1.73
dir.hie.scop.txt_1.73
/local/charikar/SCOP/scopseq-1.73:
astral-scopdom-seqres-all-1.73.fa
astral-scopdom-seqres-sel-gs-bib-95-1.73.id
Other files there are indices built by Bio.SCOP when first used.
"""
import sys,os
from Bio.SCOP import *
from pathdefs import SCOP_DIR,SCOP_VERSION
#-----------------------------------------------------------------------------
#
# Function definitions
#
#-----------------------------------------------------------------------------
def write_scopdom_info(scopsid_list, fh, scop):
"""
Write information about the list of SCOP sids (domain identifiers)
in the scopsid_list to fh. For each domain write the fold and class,
then write stats about number of different folds represented
and the number of domains in each class.
Parameters:
scopsid_list - list of SCOP sids (domain ids)
fh - open (write) filehandle to write to
scop - previously built Bio.SCOP Scop instance
Return value:
None.
"""
superfamily_count = {} # dict of {sf_sunid : count} counting domains in eac superfamily
fold_count= {} # dict of {fold_sunid : count} counting domains in each fold
class_count={} # dict of {class_sunid : count} counting domains in each class
for sid in scopsid_list:
scop_dom = scop.getDomainBySid(sid)
scop_superfamily = scop_dom.getAscendent('superfamily')
scop_fold = scop_dom.getAscendent('fold')
scop_class = scop_dom.getAscendent('class')
if superfamily_count.has_key(scop_superfamily.sunid):
superfamily_count[scop_superfamily.sunid] += 1
else:
superfamily_count[scop_superfamily.sunid] = 1
if fold_count.has_key(scop_fold.sunid):
fold_count[scop_fold.sunid] += 1
else:
fold_count[scop_fold.sunid] = 1
if class_count.has_key(scop_class.sunid):
class_count[scop_class.sunid] += 1
else:
class_count[scop_class.sunid] = 1
fh.write('%s\t(%s) %s\t%s\t%s\n' % (sid, scop_superfamily.sccs,scop_superfamily.description, scop_fold.description, scop_class.description))
num_domains = len(scopsid_list)
num_superfamilies = len(superfamily_count)
num_folds = len(fold_count)
num_classes = len(class_count)
fh.write('Totals: %d domains\t%d superfamilies\t%d folds\t%d classes\n' %
(num_domains, num_superfamilies, num_folds, num_classes))
fh.write('Class distribution:\n')
for (class_sunid, count) in class_count.iteritems():
fh.write('\t%s:\t%d\n' % (scop.getNodeBySunid(class_sunid).description,
count))
#-----------------------------------------------------------------------------
#
# Main
#
#-----------------------------------------------------------------------------
def usage(progname):
"""
Print usage message and exit
"""
sys.stderr.write("Usage: " +progname +
" < domainidlist\n")
sys.exit(1)
def main():
"""
main for scomdominfo.py
Usage: scomdominfo.py < domainidlist
The list of SCOP domain ids (sids) is read from stdin
Output is written to stdout.
"""
if len(sys.argv) != 1:
usage(os.path.basename(sys.argv[0]))
# read SCOP data
scop = Scop(dir_path=SCOP_DIR,version=SCOP_VERSION)
scopsid_list = sys.stdin.read().split('\n')[:-1]
write_scopdom_info(scopsid_list, sys.stdout, scop)
if __name__ == "__main__":
main()
| 30.689189
| 148
| 0.610524
| 592
| 4,542
| 4.548986
| 0.3125
| 0.008912
| 0.018567
| 0.015596
| 0.157817
| 0.103231
| 0.017081
| 0
| 0
| 0
| 0
| 0.017721
| 0.192426
| 4,542
| 147
| 149
| 30.897959
| 0.716467
| 0.483487
| 0
| 0.06383
| 0
| 0.021277
| 0.080637
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.06383
| false
| 0
| 0.06383
| 0
| 0.12766
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de79c16d6df471bd5320f3fc4154354634f400a7
| 1,334
|
py
|
Python
|
serverless/pytorch/foolwood/siammask/nuclio/model_handler.py
|
arthurtibame/cvat
|
0062ecdec34a9ffcad33e1664a7cac663bec4ecf
|
[
"MIT"
] | null | null | null |
serverless/pytorch/foolwood/siammask/nuclio/model_handler.py
|
arthurtibame/cvat
|
0062ecdec34a9ffcad33e1664a7cac663bec4ecf
|
[
"MIT"
] | null | null | null |
serverless/pytorch/foolwood/siammask/nuclio/model_handler.py
|
arthurtibame/cvat
|
0062ecdec34a9ffcad33e1664a7cac663bec4ecf
|
[
"MIT"
] | 1
|
2021-09-17T10:19:30.000Z
|
2021-09-17T10:19:30.000Z
|
# Copyright (C) 2020 Intel Corporation
#
# SPDX-License-Identifier: MIT
from tools.test import *
import os
class ModelHandler:
def __init__(self):
# Setup device
self.device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
torch.backends.cudnn.benchmark = True
base_dir = "/opt/nuclio/SiamMask/experiments/siammask_sharp"
class configPath:
config = os.path.join(base_dir, "config_davis.json")
self.config = load_config(configPath)
from custom import Custom
siammask = Custom(anchors=self.config['anchors'])
self.siammask = load_pretrain(siammask, os.path.join(base_dir, "SiamMask_DAVIS.pth"))
self.siammask.eval().to(self.device)
def infer(self, image, shape, state):
if state is None: # init tracking
x, y, w, h = shape
target_pos = np.array([x + w / 2, y + h / 2])
target_sz = np.array([w, h])
state = siamese_init(image, target_pos, target_sz, self.siammask,
self.config['hp'], device=self.device)
else: # track
state = siamese_track(state, image, mask_enable=True, refine_enable=True,
device=self.device)
shape = state['ploygon'].flatten()
return {"shape": shape, "state": state}
| 34.205128
| 93
| 0.614693
| 166
| 1,334
| 4.813253
| 0.463855
| 0.050063
| 0.060075
| 0.035044
| 0.042553
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006122
| 0.265367
| 1,334
| 38
| 94
| 35.105263
| 0.809184
| 0.073463
| 0
| 0
| 0
| 0
| 0.093648
| 0.038274
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.115385
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de79c50bcf2db093ce388c48ecf4f5cdef4ddb45
| 10,842
|
py
|
Python
|
pynmt/__init__.py
|
obrmmk/demo
|
b5deb85b2b2bf118b850f93c255ee88d055156a8
|
[
"MIT"
] | null | null | null |
pynmt/__init__.py
|
obrmmk/demo
|
b5deb85b2b2bf118b850f93c255ee88d055156a8
|
[
"MIT"
] | null | null | null |
pynmt/__init__.py
|
obrmmk/demo
|
b5deb85b2b2bf118b850f93c255ee88d055156a8
|
[
"MIT"
] | 1
|
2021-11-23T14:04:36.000Z
|
2021-11-23T14:04:36.000Z
|
import torch
import torch.nn as nn
from torch.nn import (TransformerEncoder, TransformerDecoder,
TransformerEncoderLayer, TransformerDecoderLayer)
from torch import Tensor
from typing import Iterable, List
import math
import os
import numpy as np
try:
from janome.tokenizer import Tokenizer
except ModuleNotFoundError:
import os
os.system('pip install janome')
from janome.tokenizer import Tokenizer
from google_drive_downloader import GoogleDriveDownloader
# デバイスの指定
DEVICE = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
print('DEVICE :', DEVICE)
# SRC (source) : 原文
SRC_LANGUAGE = 'jpn'
# TGT (target) : 訳文
TGT_LANGUAGE = 'py'
# special_token IDX
UNK_IDX, PAD_IDX, SOS_IDX, EOS_IDX = 0, 1, 2, 3
tokenizer = Tokenizer(os.path.join(os.path.dirname(
__file__), 'janomedic.csv'), udic_type="simpledic", udic_enc="utf8", wakati=True)
def jpn_tokenizer(text):
return [token for token in tokenizer.tokenize(text) if token != " " and len(token) != 0]
class Seq2SeqTransformer(nn.Module):
def __init__(self,
num_encoder_layers: int,
num_decoder_layers: int,
emb_size: int,
nhead: int,
src_vocab_size: int,
tgt_vocab_size: int,
dim_feedforward: int = 512,
dropout: float = 0.1):
super(Seq2SeqTransformer, self).__init__()
encoder_layer = TransformerEncoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_encoder = TransformerEncoder(
encoder_layer, num_layers=num_encoder_layers)
decoder_layer = TransformerDecoderLayer(d_model=emb_size, nhead=nhead,
dim_feedforward=dim_feedforward)
self.transformer_decoder = TransformerDecoder(
decoder_layer, num_layers=num_decoder_layers)
self.generator = nn.Linear(emb_size, tgt_vocab_size)
self.src_tok_emb = TokenEmbedding(src_vocab_size, emb_size)
self.tgt_tok_emb = TokenEmbedding(tgt_vocab_size, emb_size)
self.positional_encoding = PositionalEncoding(
emb_size, dropout=dropout)
def forward(self,
src: Tensor,
tgt: Tensor,
src_mask: Tensor,
tgt_mask: Tensor,
src_padding_mask: Tensor,
tgt_padding_mask: Tensor,
memory_key_padding_mask: Tensor):
src_emb = self.positional_encoding(self.src_tok_emb(src))
tgt_emb = self.positional_encoding(self.tgt_tok_emb(tgt))
memory = self.transformer_encoder(src_emb, src_mask, src_padding_mask)
outs = self.transformer_decoder(tgt_emb, memory, tgt_mask, None,
tgt_padding_mask, memory_key_padding_mask)
return self.generator(outs)
def encode(self, src: Tensor, src_mask: Tensor):
return self.transformer_encoder(self.positional_encoding(
self.src_tok_emb(src)), src_mask)
def decode(self, tgt: Tensor, memory: Tensor, tgt_mask: Tensor):
return self.transformer_decoder(self.positional_encoding(
self.tgt_tok_emb(tgt)), memory,
tgt_mask)
class PositionalEncoding(nn.Module):
def __init__(self,
emb_size: int,
dropout: float,
maxlen: int = 5000):
super(PositionalEncoding, self).__init__()
den = torch.exp(- torch.arange(0, emb_size, 2)
* math.log(10000) / emb_size)
pos = torch.arange(0, maxlen).reshape(maxlen, 1)
pos_embedding = torch.zeros((maxlen, emb_size))
pos_embedding[:, 0::2] = torch.sin(pos * den)
pos_embedding[:, 1::2] = torch.cos(pos * den)
pos_embedding = pos_embedding.unsqueeze(-2)
self.dropout = nn.Dropout(dropout)
self.register_buffer('pos_embedding', pos_embedding)
def forward(self, token_embedding: Tensor):
return self.dropout(token_embedding +
self.pos_embedding[:token_embedding.size(0), :])
class TokenEmbedding(nn.Module):
def __init__(self, vocab_size: int, emb_size):
super(TokenEmbedding, self).__init__()
self.embedding = nn.Embedding(vocab_size, emb_size)
self.emb_size = emb_size
def forward(self, tokens: Tensor):
return self.embedding(tokens.long()) * math.sqrt(self.emb_size)
# モデルが予測を行う際に、未来の単語を見ないようにするためのマスク
def generate_square_subsequent_mask(sz):
mask = (torch.triu(torch.ones((sz, sz), device=DEVICE)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float(
'-inf')).masked_fill(mask == 1, float(0.0))
return mask
def sequential_transforms(*transforms):
def func(txt_input):
for transform in transforms:
txt_input = transform(txt_input)
return txt_input
return func
def tensor_transform(token_ids: List[int]):
return torch.cat((torch.tensor([SOS_IDX]),
torch.tensor(token_ids),
torch.tensor([EOS_IDX])))
def beam_topk(model, ys, memory, beamsize):
ys = ys.to(DEVICE)
tgt_mask = (generate_square_subsequent_mask(
ys.size(0)).type(torch.bool)).to(DEVICE)
out = model.decode(ys, memory, tgt_mask)
out = out.transpose(0, 1)
prob = model.generator(out[:, -1])
next_prob, next_word = prob.topk(k=beamsize, dim=1)
return next_prob, next_word
# greedy search を使って翻訳結果 (シーケンス) を生成
def beam_decode(model, src, src_mask, max_len, beamsize, start_symbol):
src = src.to(DEVICE)
src_mask = src_mask.to(DEVICE)
ys_result = {}
memory = model.encode(src, src_mask).to(DEVICE) # encode の出力 (コンテキストベクトル)
# 初期値 (beamsize)
ys = torch.ones(1, 1).fill_(start_symbol).type(torch.long).to(DEVICE)
next_prob, next_word = beam_topk(model, ys, memory, beamsize)
next_prob = next_prob[0].tolist()
# <sos> + 1文字目 の候補 (list の長さはbeamsizeの数)
ys = [torch.cat([ys, torch.ones(1, 1).type_as(src.data).fill_(
next_word[:, idx].item())], dim=0) for idx in range(beamsize)]
for i in range(max_len-1):
prob_list = []
ys_list = []
# それぞれの候補ごとに次の予測トークンとその確率を計算
for ys_token in ys:
next_prob, next_word = beam_topk(model, ys_token, memory, len(ys))
# 予測確率をリスト (next_prob) に代入
next_prob = next_prob[0].tolist()
# 1つのリストに結合
prob_list.extend(next_prob)
ys = [torch.cat([ys_token, torch.ones(1, 1).type_as(src.data).fill_(
next_word[:, idx].item())], dim=0) for idx in range(len(ys))]
ys_list.extend(ys)
# prob_list の topk のインデックスを prob_topk_idx で保持
prob_topk_idx = list(reversed(np.argsort(prob_list).tolist()))
prob_topk_idx = prob_topk_idx[:len(ys)]
# print('@@', prob_topk_idx)
# ys に新たな topk 候補を代入
ys = [ys_list[idx] for idx in prob_topk_idx]
next_prob = [prob_list[idx] for idx in prob_topk_idx]
# print('@@orig', prob_list)
# print('@@next', next_prob)
pop_list = []
for j in range(len(ys)):
# EOS トークンが末尾にあったら、ys_result (返り値) に append
if ys[j][-1].item() == EOS_IDX:
ys_result[ys[j]] = next_prob[j]
pop_list.append(j)
# ys_result に一度入ったら、もとの ys からは抜いておく
# (ys の長さが変わるので、ところどころbeamsize ではなく len(ys) を使用している箇所がある)
for l in sorted(pop_list, reverse=True):
del ys[l]
# ys_result が beamsize よりも大きかった時に、処理を終える
if len(ys_result) >= beamsize:
break
return ys_result
class NMT(object):
vocab: object
def __init__(self, vocab_file):
self.vocab = torch.load(vocab_file)
self.SRC_VOCAB_SIZE = len(self.vocab[SRC_LANGUAGE])
self.TGT_VOCAB_SIZE = len(self.vocab[TGT_LANGUAGE])
self.src_transform = sequential_transforms(jpn_tokenizer, # Tokenization
# Numericalization
self.vocab[SRC_LANGUAGE],
tensor_transform) # Add SOS/EOS and create tensor
self.EMB_SIZE = 512
self.NHEAD = 8
self.FFN_HID_DIM = 512
self.BATCH_SIZE = 128
self.NUM_ENCODER_LAYERS = 3
self.NUM_DECODER_LAYERS = 3
self.transformer = Seq2SeqTransformer(self.NUM_ENCODER_LAYERS, self.NUM_DECODER_LAYERS,
self.EMB_SIZE, self.NHEAD, self.SRC_VOCAB_SIZE, self.TGT_VOCAB_SIZE,
self.FFN_HID_DIM)
for p in self.transformer.parameters():
if p.dim() > 1:
nn.init.xavier_uniform_(p)
self.transformer = self.transformer.to(DEVICE)
def load(self, trained_model):
self.transformer.load_state_dict(torch.load(trained_model))
def translate_beam(self, src_sentence: str, beamsize=5):
"""
複数の翻訳候補をリストで返す。
"""
pred_list = []
self.transformer.eval()
src = self.src_transform(src_sentence).view(-1, 1)
num_tokens = src.shape[0]
src_mask = (torch.zeros(num_tokens, num_tokens)).type(torch.bool)
tgt_tokens = beam_decode(
self.transformer, src, src_mask, max_len=num_tokens + 5, beamsize=beamsize, start_symbol=SOS_IDX)
prob_list = list(tgt_tokens.values())
tgt_tokens = list(tgt_tokens.keys())
for idx in list(reversed(np.argsort(prob_list).tolist())):
pred_list.append(" ".join(self.vocab[TGT_LANGUAGE].lookup_tokens(
list(tgt_tokens[idx].cpu().numpy()))).replace("<sos>", "").replace("<eos>", ""))
return pred_list, sorted(prob_list, reverse=True)
special_token = ['<A>', '<B>', '<C>', '<D>', '<E>']
def make_pynmt(model_id='1zMTrsmcyF2oXpWKe0bIZ7Ej1JBjVq7np', vocab_id='13C39jfdkkmE2mx-1K9PFXqGST84j-mz8', model_file='./model_DS.pt', vocab_file="./vocab_obj_DS.pth"):
GoogleDriveDownloader.download_file_from_google_drive(
file_id=model_id, dest_path=model_file, unzip=False)
GoogleDriveDownloader.download_file_from_google_drive(
file_id=vocab_id, dest_path=vocab_file, unzip=False)
nmt = NMT(vocab_file)
nmt.load(model_file)
def pynmt(sentence):
# candidate = re.findall(r'[a-zA-Z"\']+', sentence)
# for idx in range(len(candidate)):
# sentence = sentence.replace(candidate[idx], special_token[idx])
# print(sentence)
pred, prob = nmt.translate_beam(sentence)
return pred, prob
# print(pred)
# print(prob)
return pynmt
| 36.14
| 168
| 0.620365
| 1,348
| 10,842
| 4.735905
| 0.207715
| 0.019737
| 0.012061
| 0.016291
| 0.181391
| 0.124843
| 0.110902
| 0.099937
| 0.053258
| 0.039474
| 0
| 0.011876
| 0.269969
| 10,842
| 299
| 169
| 36.26087
| 0.794694
| 0.078676
| 0
| 0.068966
| 0
| 0
| 0.020833
| 0.006643
| 0
| 0
| 0
| 0
| 0
| 1
| 0.098522
| false
| 0
| 0.059113
| 0.029557
| 0.256158
| 0.004926
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de7c4534ed26f1d3158aaf6b53415fa79e0c249d
| 574
|
py
|
Python
|
patron/__init__.py
|
rafaelaraujobsb/patron
|
b2d23d4149a5f48156a4a2b0638daac33a66cc6a
|
[
"MIT"
] | null | null | null |
patron/__init__.py
|
rafaelaraujobsb/patron
|
b2d23d4149a5f48156a4a2b0638daac33a66cc6a
|
[
"MIT"
] | null | null | null |
patron/__init__.py
|
rafaelaraujobsb/patron
|
b2d23d4149a5f48156a4a2b0638daac33a66cc6a
|
[
"MIT"
] | null | null | null |
from flask import Flask
from loguru import logger
from flasgger import Swagger
from patron.api import api_bp
logger.add("api.log", format="{time:YYYY-MM-DD at HH:mm:ss} | {level} | {message}", rotation="500 MB")
template = {
"swagger": "2.0",
"info": {
"title": "PATRON",
"description": "",
"version": "0.0.1"
},
"consumes": [
"application/json"
],
"produces": [
"application/json"
]
}
app = Flask(__name__)
swagger = Swagger(app, template=template)
app.register_blueprint(api_bp, url_prefix='/api')
| 19.793103
| 102
| 0.602787
| 70
| 574
| 4.828571
| 0.614286
| 0.029586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0181
| 0.229965
| 574
| 28
| 103
| 20.5
| 0.746606
| 0
| 0
| 0.090909
| 0
| 0.045455
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de7dc549a1952d8dda02b33f493f1bb859b37917
| 735
|
py
|
Python
|
src/perceptron.py
|
tomoki/deep-learning-from-scratch
|
0b6144806b6b79462d6d65616a64b1774f876973
|
[
"MIT"
] | 1
|
2018-08-31T09:39:11.000Z
|
2018-08-31T09:39:11.000Z
|
src/perceptron.py
|
tomoki/deep-learning-from-scratch
|
0b6144806b6b79462d6d65616a64b1774f876973
|
[
"MIT"
] | null | null | null |
src/perceptron.py
|
tomoki/deep-learning-from-scratch
|
0b6144806b6b79462d6d65616a64b1774f876973
|
[
"MIT"
] | null | null | null |
import numpy as np
import matplotlib.pylab as plt
def step_function(x):
y = x > 0
return y.astype(np.int)
def sigmoid(x):
return 1 / (1 + np.exp(-x))
def relu(x):
return np.maximum(0, x)
def AND(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.7
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
def NAND(x1, x2):
x = np.array([x1, x2])
w = np.array([-0.5, -0.5])
b = 0.7
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
def OR(x1, x2):
x = np.array([x1, x2])
w = np.array([0.5, 0.5])
b = -0.2
tmp = np.sum(w * x) + b
if tmp <= 0:
return 0
else:
return 1
| 17.093023
| 31
| 0.469388
| 137
| 735
| 2.510949
| 0.262774
| 0.069767
| 0.043605
| 0.061047
| 0.590116
| 0.590116
| 0.590116
| 0.590116
| 0.590116
| 0.590116
| 0
| 0.090909
| 0.356463
| 735
| 42
| 32
| 17.5
| 0.636364
| 0
| 0
| 0.555556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.055556
| 0.055556
| 0.472222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de82bbe06365e1885857bfec2f5eb9144e01b08c
| 1,729
|
py
|
Python
|
dncnn/dncnn.py
|
kTonpa/DnCNN
|
aca7e07ccbe6b75bee7d4763958dade4a8eee609
|
[
"MIT"
] | null | null | null |
dncnn/dncnn.py
|
kTonpa/DnCNN
|
aca7e07ccbe6b75bee7d4763958dade4a8eee609
|
[
"MIT"
] | null | null | null |
dncnn/dncnn.py
|
kTonpa/DnCNN
|
aca7e07ccbe6b75bee7d4763958dade4a8eee609
|
[
"MIT"
] | null | null | null |
"""
Project: dncnn
Author: khalil MEFTAH
Date: 2021-11-26
DnCNN: Deep Neural Convolutional Network for Image Denoising model implementation
"""
import torch
from torch import nn
import torch.nn.functional as F
# helper functions
def eval_decorator(fn):
def inner(model, *args, **kwargs):
was_training = model.training
model.eval()
out = fn(model, *args, **kwargs)
model.train(was_training)
return out
return inner
# main classe
class DnCNN(nn.Module):
def __init__(
self,
num_layers=17,
num_features=64,
kernel_size=3,
padding=1,
image_channels=1,
image_size=64
):
super(DnCNN, self).__init__()
layers = []
layers.append(nn.Conv2d(in_channels=image_channels, out_channels=num_features, kernel_size=kernel_size, padding=padding, bias=True))
layers.append(nn.ReLU(inplace=True))
for _ in range(num_layers - 2):
layers.append(nn.Conv2d(in_channels=num_features, out_channels=num_features, kernel_size=kernel_size, padding=padding, bias=True))
layers.append(nn.BatchNorm2d(num_features))
layers.append(nn.ReLU(inplace=True))
layers.append(nn.Conv2d(in_channels=num_features, out_channels=image_channels, kernel_size=kernel_size, padding=padding, bias=True))
self.dncnn = nn.Sequential(*layers)
@torch.no_grad()
@eval_decorator
def denoise(self, y):
return self(y)
def forward(self, y, return_loss=False, x=None):
n = self.dncnn(y)
if not return_loss:
return y-n
# calculate the L2 loss
return F.mse_loss(n, y-x)
| 25.80597
| 142
| 0.638519
| 224
| 1,729
| 4.741071
| 0.375
| 0.065913
| 0.079096
| 0.056497
| 0.343691
| 0.343691
| 0.274011
| 0.274011
| 0.234463
| 0.234463
| 0
| 0.017955
| 0.259109
| 1,729
| 66
| 143
| 26.19697
| 0.811085
| 0.108155
| 0
| 0.05
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.075
| 0.025
| 0.35
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de848d1a58c8622dd6042ce58386b34d78eaa285
| 41,886
|
py
|
Python
|
scripts/fabfile/tasks.py
|
Alchem-Lab/deneva
|
5201ef12fd8235fea7833709b8bffe45f53877eb
|
[
"Apache-2.0"
] | 88
|
2017-01-19T03:15:24.000Z
|
2022-03-30T16:22:19.000Z
|
scripts/fabfile/tasks.py
|
Alchem-Lab/deneva
|
5201ef12fd8235fea7833709b8bffe45f53877eb
|
[
"Apache-2.0"
] | null | null | null |
scripts/fabfile/tasks.py
|
Alchem-Lab/deneva
|
5201ef12fd8235fea7833709b8bffe45f53877eb
|
[
"Apache-2.0"
] | 22
|
2017-01-20T10:22:31.000Z
|
2022-02-10T18:55:36.000Z
|
#!/usr/bin/python
from __future__ import print_function
import logging
from fabric.api import task,run,local,put,get,execute,settings
from fabric.decorators import *
from fabric.context_managers import shell_env,quiet
from fabric.exceptions import *
from fabric.utils import puts,fastprint
from time import sleep
from contextlib import contextmanager
import traceback
import os,sys,datetime,re,ast
import itertools
import glob,shlex,subprocess
import pprint
sys.path.append('..')
from environment import *
from experiments import *
from experiments import configs
from helper import get_cfgs,get_outfile_name,get_execfile_name,get_args,CONFIG_PARAMS,FLAG
# (see https://github.com/fabric/fabric/issues/51#issuecomment-96341022)
logging.basicConfig()
paramiko_logger = logging.getLogger("paramiko.transport")
paramiko_logger.disabled = True
COLORS = {
"info" : 32, #green
"warn" : 33, #yellow
"error" : 31, #red
"debug" : 36, #cyan
}
#OUT_FMT = "[{h}] {p}: {fn}:".format
PP = pprint.PrettyPrinter(indent=4)
NOW=datetime.datetime.now()
STRNOW=NOW.strftime("%Y%m%d-%H%M%S")
os.chdir('../..')
#MAX_TIME_PER_EXP = 60 * 2 # in seconds
MAX_TIME_PER_EXP = 60 * 10 # in seconds
EXECUTE_EXPS = True
SKIP = False
CC_ALG = ""
set_env()
@task
@hosts('localhost')
def using_vcloud():
set_env_vcloud()
@task
@hosts('localhost')
def using_istc():
set_env_istc()
@task
@hosts('localhost')
def using_ec2():
set_env_ec2()
@task
@hosts('localhost')
def using_local():
set_env_local()
## Basic usage:
## fab using_vcloud run_exps:experiment_1
## fab using_local run_exps:experiment_1
## fab using_istc run_exps:experiment_1
@task
@hosts('localhost')
def run_exps(exps,skip_completed='False',exec_exps='True',dry_run='False',iterations='1',check='True',delay='',same_node='False',overlap='False',shmem='True',cram='False'):
global SKIP, EXECUTE_EXPS,NOW,STRNOW
ITERS = int(iterations)
SKIP = skip_completed == 'True'
EXECUTE_EXPS = exec_exps == 'True'
CHECK = check == 'True'
env.dry_run = dry_run == 'True'
env.same_node = same_node == 'True'
env.overlap = overlap == 'True'
env.cram = cram == 'True'
if env.cluster != "ec2":
env.shmem = shmem == 'True'
if env.dry_run:
with color(level="warn"):
puts("this will be a dry run!",show_prefix=True)
with color():
puts("running experiment set:{}".format(exps),show_prefix=True)
# Make sure all experiment binaries exist
if CHECK:
execute(check_binaries,exps)
# Run experiments
for i in range(ITERS):
NOW=datetime.datetime.now()
STRNOW=NOW.strftime("%Y%m%d-%H%M%S")
execute(run_exp_old,exps,delay=delay)
# execute(run_exp,exps,delay=delay)
## Basic usage:
## fab using_vcloud network_test
## fab using_istc network_test:4
@task
@hosts(['localhost'])
def network_test(num_nodes=16,exps="network_experiment",skip_completed='False',exec_exps='True'):
env.batch_mode = False
global SKIP, EXECUTE_EXPS, MAX_TIME_PER_EXP
SKIP = skip_completed == 'True'
EXECUTE_EXPS = exec_exps == 'True'
MAX_TIME_PER_EXP = 60
num_nodes = int(num_nodes)
execute(check_binaries,exps)
if num_nodes < 2 or len(env.hosts) < num_nodes:
with color(level="error"):
puts("not enough hosts in ifconfig!",show_prefix=True)
abort()
exp_hosts=env.hosts[0:num_nodes]
pairs = list(itertools.combinations(exp_hosts,2))
for pair in pairs:
set_hosts(list(pair))
execute(run_exp,exps,network_test=True)
@task
@parallel
def check_cpu():
put("test_cpu.out",env.rem_homedir)
run("chmod a+x test_cpu.out; time ./test_cpu.out")
@task
@hosts('localhost')
def delete_local_results():
local("rm -f results/*");
@task
#@hosts('localhost')
@parallel
def delete_remote_results():
if env.cluster == "istc":
if env.shmem:
run("rm -f /dev/shm/results*.out")
else:
run("rm -f /home/%s/results*.out" % env.user)
else:
run("rm -f /home/ubuntu/results*.out")
@task
@parallel
def copy_schema():
if env.dry_run:
return
schemas = ["benchmarks/TPCC_full_schema.txt","benchmarks/YCSB_schema.txt","benchmarks/PPS_schema.txt"]
# Copying regular files should always succeed unless node is down
for schema in schemas:
if env.shmem:
put(schema,"/dev/shm/")
else:
put(schema,env.rem_homedir)
@task
@parallel
def copy_binaries(exp_fname):
if env.dry_run:
return
executable_files = ["rundb","runcl"]
succeeded = True
# Copying executable files may fail if a process is running the executable
with settings(warn_only=True):
for f in (executable_files):
local_fpath = os.path.join("binaries","{}{}".format(exp_fname,f))
if env.shmem:
remote_fpath = os.path.join("/dev/shm/","{}{}".format(exp_fname,f))
else:
remote_fpath = os.path.join(env.rem_homedir,"{}{}".format(exp_fname,f))
#res = put(f,env.rem_homedir,mirror_local_mode=True)
res = put(local_fpath,remote_fpath,mirror_local_mode=True)
if not res.succeeded:
with color("warn"):
puts("WARN: put: {} -> {} failed!".format(f,env.rem_homedir),show_prefix=True)
succeeded = False
break
if not succeeded:
with color("warn"):
puts("WARN: killing all executables and retrying...",show_prefix=True)
killall()
# If this fails again then we abort
for f in (executable_files):
local_fpath = os.path.join("binaries","{}{}".format(exp_fname,f))
if env.shmem:
remote_fpath = os.path.join("/dev/shm",f)
else:
remote_fpath = os.path.join(env.rem_homedir,f)
#res = put(f,env.rem_homedir,mirror_local_mode=True)
res = put(local_fpath,remote_fpath,mirror_local_mode=True)
if not res.succeeded:
with color("error"):
puts("ERROR: put: {} -> {} failed! (2nd attempt)... Aborting".format(f,env.rem_homedir),show_prefix=True)
abort()
@task
@parallel
def copy_ifconfig():
files = ["ifconfig.txt"]
# Copying regular files should always succeed unless node is down
for f in files:
if env.shmem:
put(f,"/dev/shm/")
else:
put(f,env.rem_homedir)
@task
@parallel
def copy_files(schema,exp_fname):
if env.dry_run:
return
executable_files = ["rundb","runcl"]
# if CC_ALG == "CALVIN":
# executable_files.append("runsq")
files = ["ifconfig.txt"]
files.append(schema)
succeeded = True
# Copying regular files should always succeed unless node is down
for f in files:
if env.shmem:
put(f,"/dev/shm/")
else:
put(f,env.rem_homedir)
# Copying executable files may fail if a process is running the executable
with settings(warn_only=True):
for f in (executable_files):
local_fpath = os.path.join("binaries","{}{}".format(exp_fname,f))
if env.shmem:
remote_fpath = os.path.join("/dev/shm/",f)
else:
remote_fpath = os.path.join(env.rem_homedir,f)
#res = put(f,env.rem_homedir,mirror_local_mode=True)
res = put(local_fpath,remote_fpath,mirror_local_mode=True)
if not res.succeeded:
with color("warn"):
puts("WARN: put: {} -> {} failed!".format(f,env.rem_homedir),show_prefix=True)
succeeded = False
break
if not succeeded:
with color("warn"):
puts("WARN: killing all executables and retrying...",show_prefix=True)
killall()
# If this fails again then we abort
for f in (executable_files):
local_fpath = os.path.join("binaries","{}{}".format(exp_fname,f))
if env.shmem:
remote_fpath = os.path.join("/dev/shm",f)
else:
remote_fpath = os.path.join(env.rem_homedir,f)
#res = put(f,env.rem_homedir,mirror_local_mode=True)
res = put(local_fpath,remote_fpath,mirror_local_mode=True)
if not res.succeeded:
with color("error"):
puts("ERROR: put: {} -> {} failed! (2nd attempt)... Aborting".format(f,env.rem_homedir),show_prefix=True)
abort()
#delay is in ms
@task
@parallel
def set_delay(delay='10'):
run("sudo tc qdisc add dev eth0 root netem delay {}ms".format(delay))
#delay is in ms
@task
@parallel
def reset_delay():
run("sudo tc qdisc del dev eth0 root")
@task
@parallel
def sync_clocks(max_offset=0.01,max_attempts=1,delay=15):
if env.dry_run:
return True
offset = sys.float_info.max
attempts = 0
while attempts < max_attempts:
if env.cluster == "ec2":
res = run("ntpdate -q 0.amazon.pool.ntp.org")
else:
res = run("ntpdate -q clock-2.cs.cmu.edu")
offset = float(res.stdout.split(",")[-2].split()[-1])
#print "Host ",env.host,": offset = ",offset
if abs(offset) < max_offset:
break
sleep(delay)
if env.cluster == "ec2":
res = run("sudo ntpdate -b 0.amazon.pool.ntp.org")
else:
res = run("sudo ntpdate -b clock-2.cs.cmu.edu")
sleep(delay)
attempts += 1
return attempts < max_attempts
@task
@hosts('localhost')
def compile():
compiled = False
with quiet():
compiled = local("make clean; make -j8",capture=True).succeeded
if not compiled:
with settings(warn_only=True):
compiled = local("make -j8") # Print compilation errors
if not compiled:
with color("error"):
puts("ERROR: cannot compile code!",show_prefix=True)
@task
@parallel
def killall():
with settings(warn_only=True):
if not env.dry_run:
run("pkill -f rundb")
run("pkill -f runcl")
# run("pkill -f runsq")
@task
@parallel
def run_cmd(cmd):
run(cmd)
@task
@parallel
def put_cmd(cmd):
put(cmd,env.rem_homedir,mirror_local_mode=True)
@task
@parallel
def deploy(schema_path,nids,exps,runfiles,fmt):
nid = iter(nids[env.host])
exp = iter(exps[env.host])
runfile = iter(runfiles[env.host])
succeeded = True
with shell_env(SCHEMA_PATH=schema_path):
with settings(warn_only=True,command_timeout=MAX_TIME_PER_EXP):
# if env.same_node:
cmd = ''
for r in env.roledefs["servers"]:
if r == env.host:
nn = nid.next()
rfile = runfile.next()
args = get_args(fmt,exp.next())
if env.shmem:
cmd += "(/dev/shm/{}rundb -nid{} {}>> /dev/shm/results{}.out 2>&1 &);".format(rfile,nn,args,nn)
# cmd += "(/dev/shm/rundb -nid{} >> /dev/shm/results{}.out 2>&1 &);".format(nn,nn)
else:
cmd += "(./{}rundb -nid{} {}>> results{}.out 2>&1 &);".format(rfile,nn,args,nn)
for r in env.roledefs["clients"]:
if r == env.host:
nn = nid.next()
rfile = runfile.next()
args = get_args(fmt,exp.next())
if env.shmem:
cmd += "(/dev/shm/{}runcl -nid{} {}>> /dev/shm/results{}.out 2>&1 &);".format(rfile,nn,args,nn)
else:
cmd += "(./{}runcl -nid{} {}>> results{}.out 2>&1 &);".format(rfile,nn,args,nn)
# for r in env.roledefs["sequencer"]:
# if r == env.host:
# nn = nid.next()
# args = get_args(fmt,exp.next())
# if env.shmem:
# cmd += "(/dev/shm/runsq -nid{} {}>> /dev/shm/results{}.out 2>&1 &);".format(nn,args,nn)
# else:
# cmd += "(./runsq -nid{} {}>> results{}.out 2>&1 &);".format(nn,args,nn)
cmd = cmd[:-3]
cmd += ")"
try:
res = run("echo $SCHEMA_PATH")
if not env.dry_run:
run(cmd)
else:
print(cmd)
except CommandTimeout:
pass
except NetworkError:
pass
# else:
# if env.host in env.roledefs["servers"]:
# nn = nid.next();
# cmd = "./rundb -nid{} >> results{}.out 2>&1".format(nn,nn)
# elif env.host in env.roledefs["clients"]:
# nn = nid.next();
# cmd = "./runcl -nid{} >> results{}.out 2>&1".format(nn,nn)
# elif "sequencer" in env.roledefs and env.host in env.roledefs["sequencer"]:
# nn = nid.next();
# cmd = "./runsq -nid{} >> results{}.out 2>&1".format(nn,nn)
# else:
# with color('error'):
# puts("host does not belong to any roles",show_prefix=True)
# puts("current roles:",show_prefix=True)
# puts(pprint.pformat(env.roledefs,depth=3),show_prefix=False)
#
# try:
# res = run("echo $SCHEMA_PATH")
# if not env.dry_run:
# run(cmd)
# except CommandTimeout:
# pass
# except NetworkError:
# pass
return True
@task
@parallel
def get_results(outfiles,nids):
succeeded = True
# if env.same_node:
for n in nids[env.host]:
if env.shmem:
rem_path=os.path.join(env.rem_homedir,"/dev/shm/results{}.out".format(n))
else:
rem_path=os.path.join(env.rem_homedir,"results{}.out".format(n))
loc_path=os.path.join(env.result_dir, "{}_{}".format(n,outfiles[env.host]))
with settings(warn_only=True):
if not env.dry_run:
res1 = get(remote_path=rem_path, local_path=loc_path)
succeeded = succeeded and res1.succeeded
with settings(warn_only=True):
if not env.dry_run:
if env.shmem:
res2 = run("rm -f /dev/shm/results*.out")
else:
res2 = run("rm -f results*.out")
succeeded = succeeded and res2.succeeded
# else:
# nid = env.hosts.index(env.host)
# rem_path=os.path.join(env.rem_homedir,"results.out")
# loc_path=os.path.join(env.result_dir, outfiles[env.host])
# with settings(warn_only=True):
# if not env.dry_run:
# res1 = get(remote_path=rem_path, local_path=loc_path)
# res2 = run("rm -f results.out")
# succeeded = res1.succeeded and res2.succeeded
return succeeded
@task
@hosts('localhost')
def write_config(cfgs):
dbx_cfg = os.path.join(env.local_path,"config.h")
f = open(dbx_cfg,'r');
lines = f.readlines()
f.close()
with open(dbx_cfg,'w') as f_cfg:
for line in lines:
found_cfg = False
for c in cfgs:
found_cfg = re.search("#define "+c + "\t",line) or re.search("#define "+c + " ",line);
if found_cfg:
f_cfg.write("#define " + c + " " + str(cfgs[c]) + "\n")
break
if not found_cfg: f_cfg.write(line)
@task
@hosts('localhost')
def write_ifconfig(roles,exp,rfile):
with color():
puts("writing roles to the ifconfig file:",show_prefix=True)
puts(pprint.pformat(roles,depth=3),show_prefix=False)
nids = {}
exps = {}
rfiles = {}
nid = 0
print(roles)
with open("ifconfig.txt",'w') as f:
for server in roles['servers']:
f.write(server + "\n")
if server not in nids:
nids[server] = [nid]
exps[server] = [exp]
rfiles[server] = [rfile]
else:
nids[server].append(nid)
exps[server].append(exp)
rfiles[server].append(rfile)
nid += 1
for client in roles['clients']:
f.write(client + "\n")
if client not in nids:
nids[client] = [nid]
exps[client] = [exp]
rfiles[client] = [rfile]
else:
nids[client].append(nid)
exps[client].append(exp)
rfiles[client].append(rfile)
nid += 1
# if "sequencer" in roles:
# assert CC_ALG == "CALVIN"
# sequencer = roles['sequencer'][0]
# f.write(sequencer + "\n")
# nids[sequencer] = [nid]
# exps[sequencer] = [exp]
# nid += 1
return nids,exps,rfiles
@task
@hosts('localhost')
def assign_roles(server_cnt,client_cnt,append=False):
if env.same_node:
servers=[env.hosts[0]] * server_cnt
clients=[env.hosts[0]] * client_cnt
elif env.cram:
ncnt = max(max(server_cnt,client_cnt) / 8,1)
servers = []
clients = []
for r in range(server_cnt):
servers.append(env.hosts[r%ncnt])
for r in range(client_cnt):
clients.append(env.hosts[r%ncnt])
else:
# if len(env.hosts) < server_cnt+client_cnt:
# with color("error"):
# puts("ERROR: not enough hosts to run experiment",show_prefix=True)
# puts("\tHosts required: {}".format(server_cnt+client_cnt))
# puts("\tHosts available: {} ({})".format(len(env.hosts),pprint.pformat(env.hosts,depth=3)))
# assert len(env.hosts) >= server_cnt+client_cnt
servers=env.hosts[0:server_cnt]
if env.overlap:
clients=env.hosts[0:client_cnt]
else:
clients=env.hosts[server_cnt:server_cnt+client_cnt]
new_roles = {}
# if CC_ALG == 'CALVIN':
# sequencer = env.hosts[server_cnt+client_cnt:server_cnt+client_cnt+1]
if env.roledefs is None or len(env.roledefs) == 0:
env.roledefs={}
env.roledefs['clients']=[]
env.roledefs['servers']=[]
env.roledefs['sequencer']=[]
if append:
env.roledefs['clients'].extend(clients)
env.roledefs['servers'].extend(servers)
# if CC_ALG == 'CALVIN':
# env.roledefs['sequencer'].extend(sequencer)
else:
env.roledefs['clients']=clients
env.roledefs['servers']=servers
# if CC_ALG == 'CALVIN':
# env.roledefs['sequencer']=sequencer
new_roles['clients']=clients
new_roles['servers']=servers
# if CC_ALG == 'CALVIN':
# new_roles['sequencer']=sequencer
with color():
puts("Assigned the following roles:",show_prefix=True)
puts(pprint.pformat(new_roles,depth=3) + "\n",show_prefix=False)
puts("Updated env roles:",show_prefix=True)
puts(pprint.pformat(env.roledefs,depth=3) + "\n",show_prefix=False)
return new_roles
def get_good_hosts():
# good_hosts = []
set_hosts()
good_hosts = env.hosts
# Find and skip bad hosts
ping_results = execute(ping)
for host in ping_results:
if ping_results[host] == 0:
# good_hosts.append(host)
continue
else:
with color("warn"):
puts("Skipping non-responsive host {}".format(host),show_prefix=True)
good_hosts.remove(host)
return good_hosts
@task
@hosts('localhost')
def compile_binary(fmt,e):
ecfgs = get_cfgs(fmt,e)
cfgs = dict(configs)
for c in dict(ecfgs):
if c not in CONFIG_PARAMS and c in FLAG:
del ecfgs[c]
cfgs.update(ecfgs)
# if env.remote and not env.same_node:
if env.cluster == "ec2":
cfgs["ENVIRONMENT_EC2"]="true"
else:
cfgs["ENVIRONMENT_EC2"]="false"
if env.cluster == "istc":
cfgs["CORE_CNT"]=64
else:
cfgs["CORE_CNT"]=8
if env.remote:
cfgs["TPORT_TYPE"]="TCP"
if env.shmem:
cfgs["SHMEM_ENV"]="true"
else:
cfgs["SHMEM_ENV"]="false"
execute(write_config,cfgs)
execute(compile)
# output_f = get_outfile_name(cfgs,fmt,env.hosts)
output_f = get_execfile_name(cfgs,fmt,env.hosts)
local("cp rundb binaries/{}rundb".format(output_f))
local("cp runcl binaries/{}runcl".format(output_f))
# local("cp runsq binaries/{}runsq".format(output_f))
local("cp config.h binaries/{}cfg".format(output_f))
if EXECUTE_EXPS:
cmd = "mkdir -p {}".format(env.result_dir)
local(cmd)
set_hosts() #????
execute(copy_binaries,output_f)
#cmd = "cp config.h {}.cfg".format(os.path.join(env.result_dir,output_f))
#local(cmd)
@task
@hosts('localhost')
def compile_binaries(exps):
local("mkdir -p binaries")
local("rm -rf binaries/*")
fmt,experiments = experiment_map[exps]()
# for e in experiments:
# execute(compile_binary,fmt,e)
@task
@hosts('localhost')
def check_binaries(exps):
# if not os.path.isdir("binaries"):
# execute(compile_binaries,exps)
# return
# if len(glob.glob("binaries/*")) == 0:
# execute(compile_binaries,exps)
# return
if not os.path.isdir("binaries") or len(glob.glob("binaries/*")) == 0:
local("mkdir -p binaries")
local("rm -rf binaries/*")
fmt,experiments = experiment_map[exps]()
for e in experiments:
cfgs = get_cfgs(fmt,e)
# if env.remote and not env.same_node:
if env.cluster == "ec2":
cfgs["ENVIRONMENT_EC2"]="true"
else:
cfgs["ENVIRONMENT_EC2"]="false"
if env.cluster == "istc":
cfgs["CORE_CNT"]=64
else:
cfgs["CORE_CNT"]=8
if env.remote:
cfgs["TPORT_TYPE"]="TCP"
if env.shmem:
cfgs["SHMEM_ENV"]="true"
else:
cfgs["SHMEM_ENV"]="false"
# output_f = get_outfile_name(cfgs,fmt,env.hosts)
output_f = get_execfile_name(cfgs,fmt,env.hosts)
executables = glob.glob("{}*".format(os.path.join("binaries",output_f)))
has_rundb,has_runcl,has_config=False,False,False
# has_rundb,has_runcl,has_runsq,has_config=False,False,False,False
for executable in executables:
if executable.endswith("rundb"):
has_rundb = True
elif executable.endswith("runcl"):
has_runcl = True
# elif executable.endswith("runsq"):
# has_runsq = True
elif executable.endswith("cfg"):
has_config = True
# if not has_rundb or not has_runcl or not has_runsq or not has_config:
if not has_rundb or not has_runcl or not has_config:
execute(compile_binary,fmt,e)
@task
@hosts(['localhost'])
def run_exp_old(exps,network_test=False,delay=''):
if env.shmem:
schema_path = "/dev/shm/"
else:
schema_path = "{}/".format(env.rem_homedir)
good_hosts = []
if not network_test and EXECUTE_EXPS:
good_hosts = get_good_hosts()
with color():
puts("good host list =\n{}".format(pprint.pformat(good_hosts,depth=3)),show_prefix=True)
execute(copy_schema)
fmt,experiments = experiment_map[exps]()
batch_size = 0
nids = {}
outfiles = {}
exps = {}
runfiles = {}
for e in experiments:
print(e)
cfgs = get_cfgs(fmt,e)
output_fbase = get_outfile_name(cfgs,fmt,env.hosts)
output_exec_fname = get_execfile_name(cfgs,fmt,env.hosts)
output_f = output_fbase + STRNOW
last_exp = experiments.index(e) == len(experiments) - 1
skip_exp = False
# Check whether experiment has been already been run in this batch
if SKIP:
if len(glob.glob('{}*{}*.out'.format(env.result_dir,output_fbase))) > 0:
with color("warn"):
puts("experiment exists in results folder... skipping",show_prefix=True)
if last_exp:
skip_exp = True
else:
continue
global CC_ALG
CC_ALG = cfgs["CC_ALG"]
if EXECUTE_EXPS:
cfg_srcpath = "{}cfg".format(os.path.join("binaries",output_exec_fname))
cfg_destpath = "{}.cfg".format(os.path.join(env.result_dir,output_exec_fname+STRNOW))
local("cp {} {}".format(cfg_srcpath,cfg_destpath))
nnodes = cfgs["NODE_CNT"]
nclnodes = cfgs["CLIENT_NODE_CNT"]
try:
ntotal = nnodes + nclnodes
except TypeError:
nclnodes = cfgs[cfgs["CLIENT_NODE_CNT"]]
ntotal = nnodes + nclnodes
# if CC_ALG == 'CALVIN':
# ntotal += 1
if env.same_node:
ntotal = 1
if env.overlap:
ntotal = max(nnodes,nclnodes)
if env.cram:
ntotal = max(max(nnodes,nclnodes)/8,1)
if env.remote:
if not network_test:
set_hosts(good_hosts)
# if ntotal > len(env.hosts):
# msg = "Not enough nodes to run experiment!\n"
# msg += "\tRequired nodes: {}, ".format(ntotal)
# msg += "Actual nodes: {}".format(len(env.hosts))
# with color():
# puts(msg,show_prefix=True)
# cmd = "rm -f config.h {}".format(cfg_destpath)
# local(cmd)
# continue
if not skip_exp:
if env.batch_mode:
# If full, execute all exps in batch and reset everything
full = (batch_size + ntotal) > len(env.hosts)
if full:
if env.cluster != 'istc' and not env.dry_run:
# Sync clocks before each experiment
execute(sync_clocks)
with color():
puts("Batch is full, deploying batch...{}/{}".format(batch_size,len(good_hosts)),show_prefix=True)
with color("debug"):
puts(pprint.pformat(outfiles,depth=3),show_prefix=False)
set_hosts(env.hosts[:batch_size])
with color():
puts("Starttime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(deploy,schema_path,nids,exps,runfiles,fmt)
with color():
puts("Endtime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(get_results,outfiles,nids)
if not env.dry_run:
good_hosts = get_good_hosts()
env.roledefs = None
batch_size = 0
nids = {}
exps = {}
runfiles = {}
outfiles = {}
set_hosts(good_hosts)
else:
with color():
puts("Adding experiment to current batch: {}".format(output_f), show_prefix=True)
machines = env.hosts[batch_size : batch_size + ntotal]
batch_size += ntotal
else:
machines = env.hosts[:ntotal]
set_hosts(machines)
new_roles=execute(assign_roles,nnodes,nclnodes,append=env.batch_mode)[env.host]
new_nids,new_exps,new_runfiles = execute(write_ifconfig,new_roles,e,output_exec_fname)[env.host]
nids.update(new_nids)
exps.update(new_exps)
runfiles.update(new_runfiles)
for host,nid in new_nids.iteritems():
outfiles[host] = "{}.out".format(output_f)
# if env.same_node:
# outfiles[host] = "{}.out".format(output_f)
# else:
# outfiles[host] = "{}_{}.out".format(nid[0],output_f)
print(nids)
if cfgs["WORKLOAD"] == "TPCC":
schema = "benchmarks/TPCC_full_schema.txt"
# schema = "benchmarks/TPCC_short_schema.txt"
elif cfgs["WORKLOAD"] == "YCSB":
schema = "benchmarks/YCSB_schema.txt"
elif cfgs["WORKLOAD"] == "PPS":
schema = "benchmarks/PPS_schema.txt"
# NOTE: copy_files will fail if any (possibly) stray processes
# are still running one of the executables. Setting the 'kill'
# flag in environment.py to true to kill these processes. This
# is useful for running real experiments but dangerous when both
# of us are debugging...
# execute(copy_files,schema,output_exec_fname)
execute(copy_ifconfig)
if not env.batch_mode or last_exp and len(exps) > 0:
if env.batch_mode:
set_hosts(good_hosts[:batch_size])
puts("Deploying last batch...{}/{}".format(batch_size,len(good_hosts)),show_prefix=True)
else:
print("Deploying: {}".format(output_f))
if env.cluster != 'istc':
# Sync clocks before each experiment
print("Syncing Clocks...")
execute(sync_clocks)
if delay != '':
execute(set_delay,delay=delay)
with color():
puts("Starttime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(deploy,schema_path,nids,exps,runfiles,fmt)
with color():
puts("Endtime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
if delay != '':
execute(reset_delay)
execute(get_results,outfiles,nids)
if not env.dry_run:
good_hosts = get_good_hosts()
set_hosts(good_hosts)
batch_size = 0
nids = {}
exps = {}
outfiles = {}
env.roledefs = None
else:
pids = []
print("Deploying: {}".format(output_f))
for n in range(ntotal):
if n < nnodes:
cmd = "./rundb -nid{}".format(n)
elif n < nnodes+nclnodes:
cmd = "./runcl -nid{}".format(n)
# elif n == nnodes+nclnodes:
# assert(CC_ALG == 'CALVIN')
# cmd = "./runsq -nid{}".format(n)
else:
assert(false)
print(cmd)
cmd = shlex.split(cmd)
ofile_n = "{}{}_{}.out".format(env.result_dir,n,output_f)
ofile = open(ofile_n,'w')
p = subprocess.Popen(cmd,stdout=ofile,stderr=ofile)
pids.insert(0,p)
for n in range(ntotal):
pids[n].wait()
def succeeded(outcomes):
for host,outcome in outcomes.iteritems():
if not outcome:
return False
return True
@task
@parallel
def ping():
with settings(warn_only=True):
res=local("ping -w8 -c1 {}".format(env.host),capture=True)
assert res != None
return res.return_code
@task
@hosts('localhost')
def ec2_run_instances(
dry_run="False",
image_id="ami-d05e75b8",
count="12",
security_group="dist-sg",
instance_type="m4.2xlarge",
# instance_type="m4.xlarge",
key_name="devenv-key",
):
opt = "--{k} {v} ".format
cmd = "aws ec2 run-instances "
if dry_run == "True":
cmd += "--dry-run "
cmd += opt(k="image-id",v=image_id)
cmd += opt(k="count",v=count)
cmd += opt(k="security-groups",v=security_group)
cmd += opt(k="instance-type",v=instance_type)
cmd += opt(k="key-name",v=key_name)
local(cmd)
@task
@hosts('localhost')
def ec2_run_spot_instances(
dry_run="False",
image_id="ami-d05e75b8",
price="0.10",
count="12",
security_group="dist-sg",
instance_type="m4.2xlarge",
# instance_type="m4.xlarge",
key_name="devenv-key",
):
opt = "--{k} {v} ".format
cmd = "aws ec2 request-spot-instances "
if dry_run == "True":
cmd += "--dry-run "
# cmd += opt(k="ami-id",v=image_id)
cmd += opt(k="spot-price",v=price)
cmd += opt(k="instance-count",v=count)
# cmd += opt(k="instance-type",v=instance_type)
# cmd += opt(k="group",v=security_group)
# cmd += opt(k="key",v=key_name)
cmd += opt(k="launch-specification",v="file://ec2_specification.json")
local(cmd)
@task
@hosts('localhost')
def ec2_get_status():
cmd = "aws ec2 describe-instance-status --query 'InstanceStatuses[*].{InstanceId:InstanceId,SystemStatus:SystemStatus.Status,InstanceStatus:InstanceStatus.Status}'"
res = local(cmd,capture=True)
statuses = ast.literal_eval(res)
for status in statuses:
if status['SystemStatus'] != "ok":
print("{}: ERROR: bad system status {}".format(status['InstanceId'],status['SystemStatus']))
sys.exit(1)
elif status['InstanceStatus'] == "initializing":
print("{}: ERROR: still initializing...".format(status['InstanceId']))
sys.exit(1)
elif status['InstanceStatus'] != "ok":
print("{}: ERROR: bad instance status {}".format(status['InstanceId'],status['InstanceStatus']))
sys.exit(1)
print("READY!")
return 0
@task
@hosts('localhost')
def ec2_write_ifconfig():
cmd = "aws ec2 describe-instances --query 'Reservations[*].Instances[*].{ID:InstanceId,IP:PublicIpAddress,TYPE:InstanceType}'"
res = local(cmd,capture=True)
# Skip any previously terminated VMs (terminate VM state remains for 1 hour)
res = res.replace("null","\"\"")
ip_info = ast.literal_eval(res)
with open("ec2_ifconfig.txt","w") as f:
for entry in ip_info:
for ip in entry:
if ip["IP"] != "":
f.write(ip["IP"] + "\n")
@task
@hosts('localhost')
def ec2_terminate_instances():
cmd = "aws ec2 describe-instances --query 'Reservations[*].Instances[*].InstanceId'"
res = local(cmd,capture=True)
ids = ast.literal_eval(res)
id_list = []
for id_entry in ids:
for id in id_entry:
id_list.append(id)
cmd = "aws ec2 terminate-instances --instance-ids {}".format(" ".join(id_list))
res = local(cmd,capture=True)
print(res)
@contextmanager
def color(level="info"):
if not level in COLORS:
level = "info"
print("\033[%sm" % COLORS[level],end="")
yield
print("\033[0m",end="")
@task
@hosts(['localhost'])
def run_exp(exps,network_test=False,delay=''):
if env.shmem:
schema_path = "/dev/shm/"
else:
schema_path = "{}/".format(env.rem_homedir)
good_hosts = []
if not network_test and EXECUTE_EXPS:
good_hosts = get_good_hosts()
with color():
puts("good host list =\n{}".format(pprint.pformat(good_hosts,depth=3)),show_prefix=True)
fmt,experiments = experiment_map[exps]()
batch_size = 0
nids = {}
outfiles = {}
exps = {}
if SKIP:
for e in experiments[:]:
cfgs = get_cfgs(fmt,e)
output_fbase = get_outfile_name(cfgs,fmt,env.hosts)
if len(glob.glob('{}*{}*.out'.format(env.result_dir,output_fbase))) > 0:
with color("warn"):
puts("experiment exists in results folder... skipping",show_prefix=True)
experiments.remove(e)
experiments.sort(key=lambda x: x[fmt.index("NODE_CNT")] + x[fmt.index("CLIENT_NODE_CNT")],reverse=True)
# Fill experiment pool
while len(experiments) > 0 :
round_exps = []
batch_total = 0
for e in experiments[:]:
cfgs = get_cfgs(fmt,e)
nnodes = cfgs["NODE_CNT"]
nclnodes = cfgs["CLIENT_NODE_CNT"]
ccalg = cfgs["CC_ALG"]
ntotal = cfgs["NODE_CNT"] + cfgs["CLIENT_NODE_CNT"]
# if ccalg == 'CALVIN':
# ntotal += 1
if env.same_node:
ntotal = 1
if env.overlap:
ntotal = max(nnodes,nclnodes)
if env.cram:
ntotal = max(max(nnodes,nclnodes)/8,1)
if ntotal > len(env.hosts):
msg = "Not enough nodes to run experiment!\n"
msg += "\tRequired nodes: {}, ".format(ntotal)
msg += "Actual nodes: {}".format(len(env.hosts))
with color():
puts(msg,show_prefix=True)
experiments.remove(e)
continue
if (batch_total + ntotal) > len(env.hosts):
continue
batch_total += ntotal
round_exps.append(e)
experiments.remove(e)
if not EXECUTE_EXPS: continue
batch_size = 0
for e in round_exps:
set_hosts(good_hosts)
cfgs = get_cfgs(fmt,e)
global CC_ALG
nnodes = cfgs["NODE_CNT"]
nclnodes = cfgs["CLIENT_NODE_CNT"]
CC_ALG = cfgs["CC_ALG"]
ntotal = cfgs["NODE_CNT"] + cfgs["CLIENT_NODE_CNT"]
# if ccalg == 'CALVIN':
# ntotal += 1
if env.same_node:
ntotal = 1
if env.overlap:
ntotal = max(nnodes,nclnodes)
if env.cram:
ntotal = max(max(nnodes,nclnodes)/8,1)
output_fbase = get_outfile_name(cfgs,fmt,env.hosts)
output_exec_fname = get_execfile_name(cfgs,fmt,env.hosts)
output_f = output_fbase + STRNOW
cfg_srcpath = "{}cfg".format(os.path.join("binaries",output_exec_fname))
cfg_destpath = "{}.cfg".format(os.path.join(env.result_dir,output_exec_fname+STRNOW))
local("cp {} {}".format(cfg_srcpath,cfg_destpath))
with color():
puts("Adding experiment to current batch: {}".format(output_f), show_prefix=True)
machines = env.hosts[batch_size : batch_size + ntotal]
batch_size += ntotal
set_hosts(machines)
new_roles=execute(assign_roles,nnodes,nclnodes,append=env.batch_mode)[env.host]
new_nids,new_exps = execute(write_ifconfig,new_roles,e)[env.host]
nids.update(new_nids)
exps.update(new_exps)
for host,nid in new_nids.iteritems():
outfiles[host] = "{}.out".format(output_f)
if cfgs["WORKLOAD"] == "TPCC":
schema = "benchmarks/TPCC_full_schema.txt"
# schema = "benchmarks/TPCC_short_schema.txt"
elif cfgs["WORKLOAD"] == "YCSB":
schema = "benchmarks/YCSB_schema.txt"
elif cfgs["WORKLOAD"] == "PPS":
schema = "benchmarks/PPS_schema.txt"
# NOTE: copy_files will fail if any (possibly) stray processes
# are still running one of the executables. Setting the 'kill'
# flag in environment.py to true to kill these processes. This
# is useful for running real experiments but dangerous when both
# of us are debugging...
# execute(copy_files,schema,output_exec_fname)
execute(copy_ifconfig)
if env.remote:
set_hosts(good_hosts[:batch_size])
if env.cluster != 'istc' and not env.dry_run:
# Sync clocks before each experiment
execute(sync_clocks)
with color():
puts("Batch is full, deploying batch...{}/{}".format(batch_size,len(good_hosts)),show_prefix=True)
with color("debug"):
puts(pprint.pformat(outfiles,depth=3),show_prefix=False)
with color():
puts("Starttime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(deploy,schema_path,nids,exps,runfiles,fmt)
with color():
puts("Endtime: {}".format(datetime.datetime.now().strftime("%H:%M:%S")),show_prefix=True)
execute(get_results,outfiles,nids)
good_hosts = get_good_hosts()
batch_size = 0
nids = {}
exps = {}
outfiles = {}
set_hosts(good_hosts)
env.roledefs = None
| 36.549738
| 172
| 0.542138
| 4,981
| 41,886
| 4.415378
| 0.1052
| 0.012959
| 0.021643
| 0.020052
| 0.613286
| 0.53758
| 0.492384
| 0.465648
| 0.439276
| 0.413995
| 0
| 0.007384
| 0.327508
| 41,886
| 1,145
| 173
| 36.581659
| 0.773395
| 0.182615
| 0
| 0.581342
| 0
| 0.004551
| 0.133883
| 0.022201
| 0
| 0
| 0
| 0
| 0.002275
| 1
| 0.044369
| false
| 0.002275
| 0.020478
| 0
| 0.080774
| 0.029579
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de8b266bc66642e780d1f515de7639ab0386bd85
| 2,690
|
py
|
Python
|
scheduler.py
|
shuaiqi361/a-PyTorch-Tutorial-to-Object-Detection
|
5706b82ff67911864967aa72adf7e4a994c7ec89
|
[
"MIT"
] | null | null | null |
scheduler.py
|
shuaiqi361/a-PyTorch-Tutorial-to-Object-Detection
|
5706b82ff67911864967aa72adf7e4a994c7ec89
|
[
"MIT"
] | null | null | null |
scheduler.py
|
shuaiqi361/a-PyTorch-Tutorial-to-Object-Detection
|
5706b82ff67911864967aa72adf7e4a994c7ec89
|
[
"MIT"
] | null | null | null |
import json
import os
import torch
import math
def adjust_learning_rate(optimizer, scale):
"""
Scale learning rate by a specified factor.
:param optimizer: optimizer whose learning rate must be shrunk.
:param scale: factor to multiply learning rate with.
"""
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * scale
print("DECAYING learning rate, the new LR is %f" % (optimizer.param_groups[1]['lr'],))
def warm_up_learning_rate(optimizer, rate=5.):
"""
Scale learning rate by a specified factor.
:param rate:
:param optimizer: optimizer whose learning rate must be shrunk.
:param scale: factor to multiply learning rate with.
"""
for param_group in optimizer.param_groups:
param_group['lr'] = param_group['lr'] * rate
print("WARMING up learning rate, the new LR is %f" % (optimizer.param_groups[1]['lr'],))
class WarmUpScheduler(object):
def __init__(self, target_lr, n_steps, optimizer, types='exp'):
self.target_lr = target_lr
self.n_steps = n_steps
self.optimizer = optimizer
self.init_scheduler(types)
def init_scheduler(self, types):
if types.lower() == 'exp':
self.rate = 2.
self.init_lr = self.target_lr / (self.rate ** self.n_steps)
for param_group in self.optimizer.param_groups:
param_group['lr'] = param_group['lr'] / (self.rate ** self.n_steps)
print('EXP Warming up lr from {:.6f}'.format(self.init_lr))
else:
self.init_lr = self.target_lr * 0.1
self.rate = (self.target_lr - self.init_lr) / self.n_steps
for param_group in self.optimizer.param_groups:
param_group['lr'] = self.init_lr
print('Linear Warming up lr from {:.6f}'.format(self.init_lr))
def update(self, types='exp'):
if types.lower() == 'exp':
if self.n_steps > 0:
for param_group in self.optimizer.param_groups:
param_group['lr'] = param_group['lr'] * self.rate
# print(self.n_steps, self.target_lr, self.rate)
print('New lr {:.6f}'.format(self.target_lr / (self.rate ** (self.n_steps - 1))))
else:
return
else:
if self.n_steps > 0:
for param_group in self.optimizer.param_groups:
param_group['lr'] = param_group['lr'] + (self.target_lr - self.init_lr) / self.n_steps
print('New lr {:.6f}'.format(self.target_lr - self.rate * (self.n_steps - 1)))
else:
return
self.n_steps -= 1
| 36.351351
| 106
| 0.600743
| 361
| 2,690
| 4.293629
| 0.168975
| 0.109677
| 0.085161
| 0.058065
| 0.706452
| 0.693548
| 0.666452
| 0.666452
| 0.605161
| 0.530323
| 0
| 0.007748
| 0.280297
| 2,690
| 73
| 107
| 36.849315
| 0.792872
| 0.142007
| 0
| 0.340426
| 0
| 0
| 0.091715
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.106383
| false
| 0
| 0.085106
| 0
| 0.255319
| 0.12766
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de8c915237260239c036a5cbacb8018944e669da
| 8,774
|
py
|
Python
|
lego_sorter.py
|
bmleedy/lego_sorter
|
0164bc0042127f255590d1883b5edadfba781537
|
[
"BSD-2-Clause"
] | null | null | null |
lego_sorter.py
|
bmleedy/lego_sorter
|
0164bc0042127f255590d1883b5edadfba781537
|
[
"BSD-2-Clause"
] | null | null | null |
lego_sorter.py
|
bmleedy/lego_sorter
|
0164bc0042127f255590d1883b5edadfba781537
|
[
"BSD-2-Clause"
] | null | null | null |
#!/bin/python3
"""This is the top-level program to operate the Raspberry Pi based lego sorter."""
# Things I can set myself: AWB, Brightness, crop, exposure_mode,
# exposure_speed,iso (sensitivity), overlays, preview_alpha,
# preview_window, saturation, shutter_speed,
# Thought for future enhancement: at start time, calibrate against
# a background image. Possibly only evaluate pixels which
# deviate significantly in hue from the original background image.
# Thoughts on controlling the air valves:
# I'm going to take the simple approach first, and hopefully it's sufficient:
# 1. Detect different colors in zones in front of their respective valves
# 2. If enough of the first color is detected, puff it into that color's bin
# 3. Otherwise, let it ride through as many detection zones as
# necessary until it's detected or falls off the track
# Upsides:
# 1. It's dead simple and reactive. No state needed to manage
# 2. No timing tuning needed for detect-then-wait method (source of failure)
# 3. No tracking needed (source of failure/flakiness)
# 4. Less memory/CPU intensive
#
# Downsides:
# 1. A multi-color part could slip past without enough "density" of any one color
# 2. More detection zones means more potential variation in the
# lighting - same part could look yellow in one zone and orange
# in the next, causing misses
import os
import json
import time
from datetime import datetime
import cv2
from picamera import PiCamera
from picamera.array import PiRGBArray
import numpy as np
# GPIO Imports
import RPi.GPIO as GPIO
# constants for tweaking
WINDOW_NAME = "Recognition"
SCALE_PERCENT = 20
PIXEL_THRESHOLD = 50
RANGE_PADDING = 10
SHOW_OVERLAY = True
COLOR_COLUMN_WIDTH = 10
OUTPUT_VIDEO = False
VIDEO_NAME = "output.avi"
LEGO_CONFIG_NAME = "legos.config.json"
# setup GPIO (https://pythonhosted.org/RPIO/)
VALVE_PIN = 18
GPIO.setmode(GPIO.BCM)
GPIO.setup(VALVE_PIN, GPIO.OUT)
GPIO.output(VALVE_PIN, GPIO.HIGH)
# Detection box location
XMIN = 36
XMAX = 85
YMIN = 96
YMAX = 121
SHOW_BOX = True
# todo: fork data to a logfile in /var
class Lego:
"""This is the class for a lego object which we want to detect"""
name = "undefined"
upper_hsv = [0, 0, 0]
lower_hsv = [0, 0, 0]
display_bgr = [0, 0, 0]
recognition_mask = []
recognition_indices = []
pixel_count = 0
jet_number = -1 #default to no jet assigned
recognition_box = [(0, 0), (0, 0)] # (XMIN,YMIN),(XMAX,YMAX)
def __init__(self, lconfig, recognition_box):
self.name = lconfig["name"]
self.upper_hsv = lconfig["upperhsv"]
self.lower_hsv = lconfig["lowerhsv"]
self.display_bgr = lconfig["display_bgr"]
self.recognition_box = recognition_box
self.jet_number = lconfig["jet_number"]
def recognize_at(self, hsv_image, box=None):
""" run recognition over an area of an image to determine how
much lego I think is there"""
if box is None:
box = self.recognition_box
# Super simple approach:
# inside a specific box, count the number of pixels I think are each color
self.recognition_mask = cv2.inRange(
hsv_image,
np.array(self.lower_hsv),
np.array(self.upper_hsv))
# find where the masks found the colors
# (making a trade-off here because I'm doing recognition on the whole image,
# then only paring down here)
self.recognition_indices = np.where(
self.recognition_mask[box[0][0]:box[1][0], # XMIN:XMAX
box[0][1]:box[1][1]] > 0) # YMIN: YMAX
self.pixel_count = self.recognition_indices[0].size
def filter_mask(self, filter_params=None):
""" todo: we should be able to filter out less-contiguous pixels
(this would be a particle filter?)"""
# Setup the display window
if SHOW_OVERLAY:
cv2.namedWindow(WINDOW_NAME, cv2.WINDOW_NORMAL)
cv2.resizeWindow(WINDOW_NAME, 800, 800)
# Load jets we want to use
jets = []
with open('jets.config.json') as json_file:
jets = json.load(json_file)
# Load legos we want to recognize
legos = []
with open('legos.config.json') as json_file:
config = json.load(json_file)
for lego_config in config:
if((lego_config["jet_number"] >= 0) and
(lego_config["jet_number"] < len(jets))):
legos.append(
Lego(
lconfig=lego_config,
recognition_box=jets[lego_config["jet_number"]]["bounding_box_corners"],
)
)
else:
legoname = lego_config["name"]
print(f"Lego color {legoname} disabled")
# Run the camera
with PiCamera(
camera_num=0, # default
stereo_mode='none', # default
stereo_decimate=False, # default
resolution=(160, 96), # default (10% of full resolution of 1600x900)
framerate=10, # 10 fps, default is 30
sensor_mode=5) as camera: # default=1, 5 is full FOV with 2x2 binning
#camera.awb_mode = 'off' # turn off AWB because I will control lighting
camera.awb_gains = (1.184, 2.969) # Set constant AWB (tuple for red and blue, or constant)
# time.sleep(2)
print("{datetime.now()} Camera setup complete.")
print(f"{datetime.now()} AWB Gains are {camera.awb_gains}")
# time.sleep(3)
# Setup the buffer into which we'll capture the images
cam_image = PiRGBArray(camera)
if OUTPUT_VIDEO:
cap = cv2.VideoCapture(0)
fourcc = cv2.VideoWriter_fourcc(*'XVID')
out = cv2.VideoWriter('output.avi', fourcc, 10.0, (160, 96))
# start the preview window in the top left corner
camera.start_preview(resolution=(160, 96),
window=(40, 40, 320, 192),
fullscreen=False)
camera.preview_alpha = 200
print("{datetime.now()} Camera preview started")
# continuously capture files
last_loop_time = time.time()
for i, filename in enumerate(
camera.capture_continuous(
cam_image,
format='bgr',
use_video_port=True, # faster, but less good images
resize=None # resolution was specified above
)):
# clear the screen
os.system('clear')
# load the image
image = cam_image.array.copy()
image_hsv = cv2.cvtColor(image, cv2.COLOR_BGR2HSV)
# Run recognition on the same image for each lego type
for lego in legos:
lego.recognize_at(image_hsv)
all_pixel_counts = 0
for lego in legos:
all_pixel_counts += lego.pixel_count
print(f"{datetime.now()} {all_pixel_counts} Pixels detected")
print_string = ""
for lego in legos:
print_string += f"{lego.name:^{COLOR_COLUMN_WIDTH}}|"
print(print_string)
print_string = ""
for lego in legos:
print_string += f"{lego.pixel_count:^{COLOR_COLUMN_WIDTH}}|"
print(print_string)
for lego in legos:
yxmin = (jets[lego.jet_number]["bounding_box_corners"][0][1],
jets[lego.jet_number]["bounding_box_corners"][0][0])
yxmax = (jets[lego.jet_number]["bounding_box_corners"][1][1],
jets[lego.jet_number]["bounding_box_corners"][1][0])
if lego.pixel_count > PIXEL_THRESHOLD:
GPIO.output(jets[lego.jet_number]["gpio_pin"], GPIO.LOW)
print(f"{lego.name} RECOGNIZED! {lego.pixel_count} pixels")
if SHOW_BOX:
cv2.rectangle(image, yxmin, yxmax, lego.display_bgr, 1)
else:
GPIO.output(jets[lego.jet_number]["gpio_pin"], GPIO.HIGH)
if SHOW_BOX:
cv2.rectangle(image, yxmin, yxmax, (0, 0, 0), 1)
if SHOW_OVERLAY:
for lego in legos:
image[lego.recognition_indices[0]+
jets[lego.jet_number]["bounding_box_corners"][0][0],
lego.recognition_indices[1]+
jets[lego.jet_number]["bounding_box_corners"][0][1]] = lego.display_bgr
cv2.waitKey(1)
cv2.imshow(WINDOW_NAME, image)
if OUTPUT_VIDEO:
out.write(image)
# display the loop speed
now_time = int(round(time.time() * 1000))
print(f"Loop [{i}] completed in {now_time-last_loop_time}ms")
last_loop_time = now_time
# clear the buffers for the image
cam_image.truncate(0)
camera.stop_preview()
out.release()
cv2.destroyAllWindows()
| 35.379032
| 94
| 0.624915
| 1,176
| 8,774
| 4.528061
| 0.326531
| 0.005258
| 0.016526
| 0.02554
| 0.112113
| 0.099531
| 0.084883
| 0.084883
| 0.057653
| 0.015399
| 0
| 0.026191
| 0.277638
| 8,774
| 247
| 95
| 35.522267
| 0.813979
| 0.314908
| 0
| 0.117647
| 0
| 0
| 0.121951
| 0.017276
| 0
| 0
| 0
| 0.008097
| 0
| 1
| 0.019608
| false
| 0
| 0.058824
| 0
| 0.143791
| 0.084967
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de8e8bcbbb73ed82dfadbb561cfbfe8bb447a711
| 5,017
|
py
|
Python
|
networks/autoencoder/losses.py
|
annachen/dl_playground
|
f263dc16b4f0d91f6d33d94e678a9bbe2ace8913
|
[
"MIT"
] | null | null | null |
networks/autoencoder/losses.py
|
annachen/dl_playground
|
f263dc16b4f0d91f6d33d94e678a9bbe2ace8913
|
[
"MIT"
] | null | null | null |
networks/autoencoder/losses.py
|
annachen/dl_playground
|
f263dc16b4f0d91f6d33d94e678a9bbe2ace8913
|
[
"MIT"
] | null | null | null |
import tensorflow as tf
import numpy as np
EPS = 1e-5
def KL_monte_carlo(z, mean, sigma=None, log_sigma=None):
"""Computes the KL divergence at a point, given by z.
Implemented based on https://www.tensorflow.org/tutorials/generative/cvae
This is the part "log(p(z)) - log(q(z|x)) where z is sampled from
q(z|x).
Parameters
----------
z : (B, N)
mean : (B, N)
sigma : (B, N) | None
log_sigma : (B, N) | None
Returns
-------
KL : (B,)
"""
if log_sigma is None:
log_sigma = tf.math.log(sigma)
zeros = tf.zeros_like(z)
log_p_z = log_multivar_gaussian(z, mean=zeros, log_sigma=zeros)
log_q_z_x = log_multivar_gaussian(z, mean=mean, log_sigma=log_sigma)
return log_q_z_x - log_p_z
def KL(mean, sigma=None, log_sigma=None):
"""KL divergence between a multivariate Gaussian and Multivariate
N(0, I).
Implemented based on
https://mr-easy.github.io/2020-04-16-kl-divergence-between-2-gaussian-distributions/
Parameters
----------
mean : (B, N)
sigma : (B, N) | None
The diagonol of a covariance matrix of a factorized Gaussian
distribution.
log_sigma : (B, N) | None
The log diagonol of a covariance matrix of a factorized
Gaussian distribution.
One of `sigma` and `log_sigma` has to be passed in.
Returns
-------
KL : (B,)
"""
if sigma is None:
sigma = tf.math.exp(log_sigma)
if log_sigma is None:
log_sigma = tf.math.log(sigma)
u = tf.reduce_sum(mean * mean, axis=1) # (B,)
tr = tf.reduce_sum(sigma, axis=1) # (B,)
k = tf.cast(tf.shape(mean)[1], tf.float32) # scalar
lg = tf.reduce_sum(log_sigma, axis=1) # (B,)
return 0.5 * (u + tr - k - lg)
def log_multivar_gaussian(x, mean, sigma=None, log_sigma=None):
"""Computes log pdf at x of a multi-variate Gaussian.
Parameters
----------
x : (B, N)
mean : (B, N)
sigma : (B, N) | None
log_sigma: (B, N) | None
Returns
-------
log_p : (B,)
"""
if sigma is None:
sigma = tf.math.exp(log_sigma)
if log_sigma is None:
log_sigma = tf.math.log(sigma)
x = x - mean
upper = -0.5 * tf.reduce_sum(x * x / (sigma + EPS), axis=-1) # (B,)
k = tf.cast(tf.shape(x)[1], tf.float32)
log_pi = tf.math.log(np.pi * 2)
log_prod_sig = tf.reduce_sum(log_sigma, axis=1) # (B,)
lower = -0.5 * (k * log_pi + log_prod_sig)
return upper - lower
def multivar_gaussian(x, mean, sigma):
"""Computes pdf at x of a multi-variate Gaussian
Parameters
----------
x : (B, N)
mean : (B, N)
sigma : (B, N)
Represents the diagonol of a covariance matrix of a factorized
Gaussian distribution.
Returns
-------
p_x : (B,)
"""
x = x - mean
upper = tf.reduce_sum(x * x / sigma, axis=-1) # (B,)
upper = tf.math.exp(-0.5 * upper) # (B,)
pi_vec = tf.ones_like(x) * np.pi * 2 # (B, N)
lower = pi_vec * sigma
lower = tf.reduce_prod(lower, axis=-1) # (B,)
lower = tf.math.sqrt(lower)
return upper / lower
def reconstruction_cross_entropy(prediction, labels, is_logit=True):
"""Computes reconstruction error using cross entropy.
Parameters
----------
prediction : (B, ...)
labels : (B, ...)
Same dimensions as `prediction`
is_logit : bool
Whether the prediction is logit (pre-softmax / sigmoid)
Returns
-------
recons_error : (B,)
"""
assert is_logit, "Not Implemented"
cross_ent = tf.nn.sigmoid_cross_entropy_with_logits(
labels=tf.cast(labels, tf.float32),
logits=prediction,
)
batch_size = tf.shape(prediction)[0]
cross_ent = tf.reshape(cross_ent, (batch_size, -1))
return tf.reduce_mean(cross_ent, -1)
def reconstruction_mean_square_error(prediction, labels, is_logit=True):
"""Computes reconstruction error using mean-square-error.
Parameters
----------
prediction : (B, ...)
labels : (B, ...)
Same dimensions as `prediction`
is_logit : bool
Whether the prediciton is logit.
Returns
-------
recons_error : (B,)
"""
if is_logit:
prediction = tf.nn.sigmoid(prediction)
error = prediction - tf.cast(labels, tf.float32)
error = error * error
batch_size = tf.shape(labels)[0]
error = tf.reshape(error, (batch_size, -1))
return tf.reduce_mean(error, axis=1)
def reconstruction_loss(loss_type, prediction, labels, is_logit):
# `is_logit` : whether the input `recons` is logit
if loss_type == 'mse':
loss = reconstruction_mean_square_error(
prediction=prediction,
labels=labels,
is_logit=is_logit,
)
elif loss_type == 'ce':
loss = reconstruction_cross_entropy(
prediction=prediction,
labels=labels,
is_logit=is_logit,
)
else:
raise ValueError()
return loss
| 24.960199
| 88
| 0.590592
| 705
| 5,017
| 4.060993
| 0.202837
| 0.064268
| 0.033531
| 0.023053
| 0.502969
| 0.418093
| 0.396787
| 0.348236
| 0.284666
| 0.243451
| 0
| 0.012305
| 0.271078
| 5,017
| 200
| 89
| 25.085
| 0.770577
| 0.370142
| 0
| 0.243243
| 0
| 0
| 0.007105
| 0
| 0
| 0
| 0
| 0
| 0.013514
| 1
| 0.094595
| false
| 0
| 0.027027
| 0
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
de9037d4a2c6b5fbbf0a5f4e22a9796ae161e5b0
| 4,288
|
py
|
Python
|
Onderdelen/Hoofdscherm.py
|
RemcoTaal/IDP
|
33959e29235448c38b7936f16c7421a24130e745
|
[
"MIT"
] | null | null | null |
Onderdelen/Hoofdscherm.py
|
RemcoTaal/IDP
|
33959e29235448c38b7936f16c7421a24130e745
|
[
"MIT"
] | null | null | null |
Onderdelen/Hoofdscherm.py
|
RemcoTaal/IDP
|
33959e29235448c38b7936f16c7421a24130e745
|
[
"MIT"
] | null | null | null |
from tkinter import *
import os, xmltodict, requests
def knop1():
'Open GUI huidig station'
global root
root.destroy()
os.system('Huidig_Station.py')
def knop2():
'Open GUI ander station'
global root
root.destroy()
os.system('Ander_Station.py')
def nl_to_eng():
'Wanneer er op de Engelse vlag wordt gedrukt veranderd de Nederlandstalige tekst naar het Engels'
button1['text'] = 'Departure\ntimes current station'
button2['text'] = 'Departure\ntimes other station'
welkomlabel['text'] = 'Welcome to NS'
photo['file'] = 'afbeeldingen\kaartlezerengels.PNG'
def eng_to_nl():
'Wanneer er op de Nederlandse vlag wordt gedrukt veranderd de Engelstalige tekst naar het Nederlands'
button1['text'] = 'Actuele vertrektijden\nhuidig station'
button2['text'] = 'Actuele vertrektijden\nander station'
welkomlabel['text'] = 'Welkom bij NS'
photo['file'] = 'afbeeldingen\kaartlezer.PNG'
root = Tk() # Maakt het venster
root.attributes('-fullscreen',True) #Open fullscreen
hoofdframe = Frame(master=root, #Venster gele gedeelte
background='#FFD720',
width=1920,
height=980)
hoofdframe.pack(side='top', fill=X)
onderframe = Frame(master=root, #Venster blauwe gedeelte
background='#001F6A',
width=1920,
height=100)
onderframe.pack(side='bottom', fill=X)
welkomlabel = Label(master=hoofdframe, #Welkom bij NS tekst
text='Welkom bij NS',
foreground='#001F6A',
background='#FFD720',
font=('Helvetica', 60, 'bold'),
width=14,
height=3)
welkomlabel.place(x=615, y=50)
photo = PhotoImage(file='afbeeldingen\kaartlezer.PNG') #Foto kaartlezer
fotolabel = Label(master=hoofdframe, image=photo, borderwidth=-1)
fotolabel.place(x=745, y=320)
button1 = Button(master=hoofdframe, #Knop 2
text="Actuele vertrektijden\nhuidig station",
foreground="white",
background="#001F6A",
font=('arial', 12, 'bold'),
width=17,
height=3,
command=knop1)
button1.place(x=765, y=650)
button2 = Button(master=hoofdframe, #Knop 3
text="Actuele vertrektijden\nander station",
foreground="white",
background="#001F6A",
font=('arial', 12, 'bold'),
width=17,
height=3,
command=knop2)
button2.place(x=965, y=650)
buttonNL = Button (master=onderframe, #Knop van Engels naar Nederlands
width=10,
height=10,
command=eng_to_nl)
photoNL = PhotoImage (file='afbeeldingen\kroodwitblauw.png')
buttonNL.config(image=photoNL, #Het converteren dat de afbeelding een knop wordt
width=48,
height=25)
buttonNL.place(x=50, y=25)
labelengels = Label(master=onderframe, #Label onder de Engelse vlag
text='English',
foreground='white',
background='#001F6A',
font=('arial', 9))
labelengels.place(x=128, y=55)
buttonENG = Button (master=onderframe, #Knop van Nederlands naar Engels
width=10,
height=10,
command=nl_to_eng)
photoENG = PhotoImage (file='afbeeldingen\kengenland.png')
buttonENG.config(image=photoENG, #Het converteren dat de afbeelding een knop wordt
width=48,
height=25)
buttonENG.place(x=125, y=25)
labelnederlands = Label(master=onderframe, #Label onder de Nederlandse vlag
text='Nederlands',
foreground='white',
background='#001F6A',
font=('arial', 9))
labelnederlands.place(x=42, y=55)
root.mainloop()
| 34.861789
| 117
| 0.541045
| 422
| 4,288
| 5.473934
| 0.341232
| 0.020779
| 0.041558
| 0.05368
| 0.334199
| 0.207792
| 0.179221
| 0.112554
| 0.112554
| 0.112554
| 0
| 0.048206
| 0.356576
| 4,288
| 122
| 118
| 35.147541
| 0.789054
| 0.135728
| 0
| 0.34375
| 0
| 0
| 0.215756
| 0.047268
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041667
| false
| 0
| 0.020833
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|