hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
0f70ef09e2ec2194d4547ddd07423964e8195007 | 1,004 | py | Python | setup.py | GainCompliance/ndb-audit | 404d7ae3bd44f0af0f5c3aba72da98cf68d5c610 | [
"MIT"
] | null | null | null | setup.py | GainCompliance/ndb-audit | 404d7ae3bd44f0af0f5c3aba72da98cf68d5c610 | [
"MIT"
] | null | null | null | setup.py | GainCompliance/ndb-audit | 404d7ae3bd44f0af0f5c3aba72da98cf68d5c610 | [
"MIT"
] | null | null | null | import sys
from setuptools import find_packages, setup
try:
from semantic_release import setup_hook
setup_hook(sys.argv)
except ImportError:
pass
with open('README.rst', 'r') as readme_file:
long_description = readme_file.read()
setup(
name='ndb_audit',
packages=find_packages(
exclude=["*.test", "*.test.*", "test.*", "test"]
),
install_requires=[],
entry_points={'console_scripts': ['semantic-release = semantic_release.cli:main']},
version_format='{tag}',
setup_requires=['setuptools-git-version==1.0.3'],
description = 'Adds audit trail to any Google Datastore NDB entity',
long_description=long_description,
author='Jason Jones',
author_email='jason@gaincompliance.com',
keywords=['google', 'appengine', 'datastore', 'NDB', 'audit'],
url='https://github.com/GainCompliance/ndb-audit',
license='MIT',
classifiers=[
'Programming Language :: Python',
'Programming Language :: Python :: 2'
]
)
| 27.888889 | 87 | 0.667331 | 115 | 1,004 | 5.669565 | 0.608696 | 0.069018 | 0.03681 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004872 | 0.182271 | 1,004 | 35 | 88 | 28.685714 | 0.789281 | 0 | 0 | 0 | 0 | 0 | 0.364542 | 0.077689 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.033333 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f74db31bc014e6bd1b0b770b74be07a2bd8444c | 2,045 | py | Python | data/crop_processed_data.py | mdzh10/covid19_ultrasound | 8a1192053554e0a08649c35d9686dc842f0fe026 | [
"MIT"
] | null | null | null | data/crop_processed_data.py | mdzh10/covid19_ultrasound | 8a1192053554e0a08649c35d9686dc842f0fe026 | [
"MIT"
] | null | null | null | data/crop_processed_data.py | mdzh10/covid19_ultrasound | 8a1192053554e0a08649c35d9686dc842f0fe026 | [
"MIT"
] | null | null | null | import cv2
import os
import json
import numpy as np
import matplotlib.pyplot as plt
DISPLAY_IMG = False
# input path with uncropped videos
path = "tmp"
# path where to output the final videos
final_path = "."
if not os.path.exists(final_path):
os.makedirs(final_path)
os.makedirs(os.path.join(final_path, "pocus_videos/convex"))
os.makedirs(os.path.join(final_path, "pocus_videos/linear"))
os.makedirs(os.path.join(final_path, "pocus_images/convex"))
os.makedirs(os.path.join(final_path, "pocus_images/linear"))
# load json with crop
with open("crop.json", "r") as infile:
crop_dir = json.load(infile)
for key in crop_dir.keys():
# I/O paths
vid_path = os.path.join(path, key)
save_video_path = os.path.join(final_path, key)
# get crop and trimming
start, end = crop_dir[key][1]
bottom, left, size = crop_dir[key][0]
print(key, crop_dir[key])
# read video
cap = cv2.VideoCapture(vid_path)
# test whether it's okay
ret, test = cap.read()
if test is None:
print(f"Problem reading file: {vid_path}")
continue
# reset cap
cap = cv2.VideoCapture(vid_path)
# Image processing
if cap.get(7) < 2:
ret, frame = cap.read()
frame = frame[bottom:bottom + size, left:left + size]
cv2.imwrite(save_video_path, frame)
continue
video_array = []
cap.set(cv2.CAP_PROP_POS_FRAMES, start)
for i in range(int(end - start)):
ret, frame = cap.read()
cropped = frame[bottom:bottom + size, left:left + size]
if i == 0 and DISPLAY_IMG:
plt.imshow(cropped)
plt.show()
video_array.append(cropped)
# write video
print(np.array(video_array).shape)
video_path = ".".join(save_video_path.split(".")[:-1])
fourcc = cv2.VideoWriter_fourcc(*'mp4v') # XVID
writer = cv2.VideoWriter(
video_path + '.mp4', fourcc, cap.get(5), cropped.shape[:2]
)
for x in video_array:
writer.write(x.astype("uint8"))
writer.release()
| 27.266667 | 66 | 0.641076 | 300 | 2,045 | 4.24 | 0.36 | 0.056604 | 0.04717 | 0.058962 | 0.241352 | 0.187107 | 0.187107 | 0.13522 | 0.13522 | 0 | 0 | 0.011436 | 0.230318 | 2,045 | 74 | 67 | 27.635135 | 0.796696 | 0.0978 | 0 | 0.117647 | 0 | 0 | 0.074741 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.098039 | 0 | 0.098039 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f74fbcd4381b7c2924d701634a9e6ac5ea7f57a | 2,744 | py | Python | pdm/builders/sdist.py | shidenggui/pdm | 43aa6410db495150029ff69678182bddf5af192c | [
"MIT"
] | 1 | 2021-02-04T19:43:38.000Z | 2021-02-04T19:43:38.000Z | pdm/builders/sdist.py | shidenggui/pdm | 43aa6410db495150029ff69678182bddf5af192c | [
"MIT"
] | null | null | null | pdm/builders/sdist.py | shidenggui/pdm | 43aa6410db495150029ff69678182bddf5af192c | [
"MIT"
] | null | null | null | import os
import tarfile
import tempfile
from copy import copy
from pkg_resources import safe_version, to_filename
from pdm.builders.base import Builder
from pdm.context import context
def normalize_file_permissions(st_mode):
"""
Normalizes the permission bits in the st_mode field from stat to 644/755
Popular VCSs only track whether a file is executable or not. The exact
permissions can vary on systems with different umasks. Normalising
to 644 (non executable) or 755 (executable) makes builds more reproducible.
"""
# Set 644 permissions, leaving higher bits of st_mode unchanged
new_mode = (st_mode | 0o644) & ~0o133
if st_mode & 0o100:
new_mode |= 0o111 # Executable: 644 -> 755
return new_mode
def clean_tarinfo(tar_info):
"""
Clean metadata from a TarInfo object to make it more reproducible.
- Set uid & gid to 0
- Set uname and gname to ""
- Normalise permissions to 644 or 755
- Set mtime if not None
"""
ti = copy(tar_info)
ti.uid = 0
ti.gid = 0
ti.uname = ""
ti.gname = ""
ti.mode = normalize_file_permissions(ti.mode)
return ti
class SdistBuilder(Builder):
"""This build should be performed for PDM project only."""
def build(self, build_dir: str, **kwargs):
if not os.path.exists(build_dir):
os.makedirs(build_dir, exist_ok=True)
context.io.echo("- Building {}...".format(context.io.cyan("sdist")))
version = to_filename(safe_version(self.meta.version))
target = os.path.join(
build_dir, "{}-{}.tar.gz".format(self.meta.project_name, version)
)
tar = tarfile.open(target, mode="w:gz", format=tarfile.PAX_FORMAT)
try:
tar_dir = "{}-{}".format(self.meta.project_name, version)
files_to_add = self.find_files_to_add(True)
for relpath in files_to_add:
tar.add(
relpath,
arcname=os.path.join(tar_dir, str(relpath)),
recursive=False,
)
context.io.echo(f" - Adding: {relpath}", verbosity=context.io.DETAIL)
fd, temp_name = tempfile.mkstemp(prefix="pkg-info")
pkg_info = self.format_pkginfo(False).encode("utf-8")
with open(fd, "wb") as f:
f.write(pkg_info)
tar.add(
temp_name, arcname=os.path.join(tar_dir, "PKG-INFO"), recursive=False
)
context.io.echo(" - Adding: PKG-INFO", verbosity=context.io.DETAIL)
finally:
tar.close()
context.io.echo("- Built {}".format(context.io.cyan(os.path.basename(target))))
return target
| 30.831461 | 87 | 0.612245 | 361 | 2,744 | 4.534626 | 0.404432 | 0.043983 | 0.031765 | 0.023213 | 0.100183 | 0.067196 | 0 | 0 | 0 | 0 | 0 | 0.023834 | 0.281341 | 2,744 | 88 | 88 | 31.181818 | 0.806288 | 0.226312 | 0 | 0.038462 | 0 | 0 | 0.055394 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057692 | false | 0 | 0.134615 | 0 | 0.269231 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f76ac66cf70b1deed16b8639977bd441bbfcfd9 | 1,379 | py | Python | samples/sender.py | strollo/pydomus | 2396fd455e8c12aee9136c0de56431a9ca282728 | [
"Apache-2.0"
] | null | null | null | samples/sender.py | strollo/pydomus | 2396fd455e8c12aee9136c0de56431a9ca282728 | [
"Apache-2.0"
] | null | null | null | samples/sender.py | strollo/pydomus | 2396fd455e8c12aee9136c0de56431a9ca282728 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import random
import logging
from context import *
from multisock import logfactory
from multisock import channel
from multisock.crypter import Crypter
from pydomus.notification import Notification
from pydomus.component import Component
def printResult(parent, evt):
assert (evt is not None and evt.hasPayload())
result = evt.get('value')
logging.info('Received result: %d' % result)
def loop(component):
##########################################
# NOTIFICATION
##########################################
msg = Notification('/arithm/calc/sum')
msg.set('/operation', 'SUM')
msg.set('/p1', random.randint(1, 1000))
msg.set('/p2', random.randint(1, 1000))
#
component.logger.info('Requiring the SUM of %d %d' % (msg.get('p1'), msg.get('p2')))
# Ask calculator to apply the operation
component.notify(msg)
# Next loop step in 5 seconds
component.delay(5)
def main():
# name, mcastIP, mcastPort
logger = logfactory.LogFactory('sendr', 'logs/sender')
logger.info('Instantiating Sender Sample')
ch = channel.Channel('224.1.1.1', 1234, crypto=Crypter('key', 'passphrase'))
c = Component('sendr', ch)
# ACTIVE LOOP
c.setLoop(loop)
# REACTIONS
c.add('/arithm/result', printResult)
# START COMPONENT
c.start()
if __name__ == '__main__':
main()
| 26.519231 | 88 | 0.627991 | 167 | 1,379 | 5.137725 | 0.48503 | 0.045455 | 0.044289 | 0.041958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023091 | 0.183466 | 1,379 | 51 | 89 | 27.039216 | 0.738899 | 0.117476 | 0 | 0 | 0 | 0 | 0.161032 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.1 | false | 0.033333 | 0.266667 | 0 | 0.366667 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f76adbf7d8368dc2550c0fc87e77a48524f22cd | 2,767 | py | Python | exa/util/conversions.py | herbertludowieg/exa | 62a22eb47f18d468c5e1ae43f96e27bd60ec3e57 | [
"Apache-2.0"
] | 2 | 2019-11-30T09:58:12.000Z | 2020-01-28T13:32:40.000Z | exa/util/conversions.py | herbertludowieg/exa | 62a22eb47f18d468c5e1ae43f96e27bd60ec3e57 | [
"Apache-2.0"
] | 108 | 2016-01-08T18:42:50.000Z | 2021-04-10T16:36:30.000Z | exa/util/conversions.py | herbertludowieg/exa | 62a22eb47f18d468c5e1ae43f96e27bd60ec3e57 | [
"Apache-2.0"
] | 11 | 2016-01-08T17:11:52.000Z | 2020-07-04T12:30:22.000Z | # -*- coding: utf-8 -*-
# Copyright (c) 2015-2020, Exa Analytics Development Team
# Distributed under the terms of the Apache License 2.0
"""
Common conversion factors
##########################################
Conversion factors from the tabulated physical constants from `NIST`_.
All of the entries that end with relationship are taken to be the conversion
units provided by `NIST`_ and they are listed by a hardcoded acronym value.
.. code-block:: python
>>> from exa.util import conversions
>>> conversions.Ha2eV
27.211386245988002
>>> conversions.Ha2eV.error
5.3e-11
>>> conversions.Ha2inv_m
21947463.13632
"""
import sys as _sys
import pandas as _pd
from exa.static import resource as _resource
class Conversion(float):
"""
Conversion factors taken from the NIST constants table. Only those
that end with relationship are taken to be the conversion factors.
Acronyms are as follows:
- 'u': Unified atomic mass unit (Dalton)
- 'amu': Atomic unit of mass
- 'eV': Electron volt
- 'J': Joule
- 'Ha': Hartree
- 'inv_m': Inverse meter
- 'K': Kelvin
- 'Kg': Kilogram
- 'Hz': Hertz
"""
def __new__(cls, name, units, value, error):
return super(Conversion, cls).__new__(cls, value)
def __init__(self, name, units, value, error):
float.__init__(value)
self.name = name
self.units = units
self.error = error
self.value = value
def _get_acronym(name):
mapper = {'atomic_mass_unit': 'u', 'electron_volt': 'eV', 'joule': 'J',
'hartree': 'Ha', 'inverse_meter': 'inv_m', 'kelvin': 'K',
'kilogram': 'Kg', 'hertz': 'Hz'}
key = ['', '2', '']
for unit, acr in mapper.items():
if name.startswith(unit):
key[0] = acr
elif name.endswith(unit+'_relationship'):
key[2] = acr
elif name == 'electron_mass':
key = 'amu2Kg'
elif name == 'electron_mass_in_u':
key = 'amu2u'
if ''.join(key) == '2':
raise ValueError("Could not determine the acronym for the value {}".format(name))
return ''.join(key)
def _create():
alt_conversions = ['electron_mass', 'electron_mass_in_u']
df = _pd.read_csv(_path)
for quan, unit, err, val in zip(df['quantity'], df['unit'],
df['uncertainty'], df['value']):
if quan.endswith('relationship') or quan in alt_conversions:
name = _get_acronym(quan)
setattr(_this, name, Conversion(name=name, units=unit, value=val,
error=err))
_this = _sys.modules[__name__]
_path = _resource("nist-constants-2018.csv")
if not hasattr(_this, "Ha2inv_m"):
_create()
| 32.552941 | 89 | 0.600289 | 344 | 2,767 | 4.656977 | 0.427326 | 0.042447 | 0.013733 | 0.028714 | 0.059925 | 0.059925 | 0.059925 | 0.059925 | 0.059925 | 0.059925 | 0 | 0.028809 | 0.259848 | 2,767 | 84 | 90 | 32.940476 | 0.753418 | 0.355258 | 0 | 0 | 0 | 0 | 0.172695 | 0.013419 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.071429 | 0.02381 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f77334e4cd86f599b51f3cd13adc9470cd90e4e | 1,939 | py | Python | setup.py | mharnold/spiderosm | 16323bf0214ca4b7bbb98f62728e83e73bcf6bfd | [
"MIT"
] | 10 | 2015-01-24T00:16:35.000Z | 2021-06-30T17:33:20.000Z | setup.py | mharnold/spiderosm | 16323bf0214ca4b7bbb98f62728e83e73bcf6bfd | [
"MIT"
] | 15 | 2015-01-03T19:10:05.000Z | 2015-11-15T19:19:58.000Z | setup.py | mharnold/spiderosm | 16323bf0214ca4b7bbb98f62728e83e73bcf6bfd | [
"MIT"
] | 3 | 2015-11-12T20:52:51.000Z | 2016-03-16T10:58:34.000Z | import json
import os.path
import setuptools
# get package info from SPIDEROSM.json
with open(os.path.join('spiderosm','SPIDEROSM_INFO.json')) as fp:
spiderosm_info = json.load(fp)
setuptools.setup(name='spiderosm',
version=spiderosm_info['version'],
description='GIS conflation tool for matching street networks.',
long_description='GIS conflation tool: matches segments in one path network (e.g. streets and trails) to corresponding segments in another, based on geography and network connectivity. Useful, among other things, for combining jurisdictional centerline data with Open Street Maps data.',
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 2.7',
'Topic :: Scientific/Engineering :: GIS'
],
keywords='GIS conflation OSM OpenStreetMaps centerline jurisdictional street network matcher',
url=spiderosm_info['homepage'],
author=spiderosm_info['author'],
author_email=spiderosm_info['author_email'],
license=spiderosm_info['license'],
packages=['spiderosm'],
install_requires=[
'pyproj',
'shapely', #requires geos library, on Mac: "%brew install geos"
'pylev',
'geojson >= 1.0.9',
'pyshp',
],
extras_require={
'spatialite' : ['pyspatialite >= 3.0.1'],
'postgis' : 'psycopg2'
#imposm.parser is now optional:
# DOES NOT WORK UNDER WINDOWS!
# needs protobuf / protoc, on Mac: "%brew install protobuf --with-python"
# needs tokyo-cabinet, on Mac: "%brew install tokyo-cabinet" ?
},
include_package_data=True,
scripts=[
'spiderosm/bin/spiderosm_test.py',
'spiderosm/bin/spiderosm_berkeley.py',
'spiderosm/bin/spiderosm_portland.py'
],
zip_safe=False)
| 39.571429 | 294 | 0.641568 | 214 | 1,939 | 5.728972 | 0.593458 | 0.074225 | 0.022023 | 0.039152 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006849 | 0.247035 | 1,939 | 48 | 295 | 40.395833 | 0.832877 | 0.146467 | 0 | 0.076923 | 0 | 0.025641 | 0.493932 | 0.074636 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f7866e1e4b55b3496e50b331dbd552383edc6f1 | 234 | py | Python | search/seqSearch.py | zacario-li/dstest | f157b12564784d6bd63a9e421dffce1d00d8c935 | [
"Apache-2.0"
] | null | null | null | search/seqSearch.py | zacario-li/dstest | f157b12564784d6bd63a9e421dffce1d00d8c935 | [
"Apache-2.0"
] | null | null | null | search/seqSearch.py | zacario-li/dstest | f157b12564784d6bd63a9e421dffce1d00d8c935 | [
"Apache-2.0"
] | null | null | null | def seqSearch(data, target):
for i in range(len(data)):
if target == data[i]:
return i
return -1
if __name__ == '__main__':
ret = seqSearch([1,2,3,34,56,57,78,87],57)
print(f'ret index is: {ret}') | 23.4 | 46 | 0.559829 | 38 | 234 | 3.236842 | 0.684211 | 0.113821 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093567 | 0.269231 | 234 | 10 | 47 | 23.4 | 0.625731 | 0 | 0 | 0 | 0 | 0 | 0.114894 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.375 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f796a2afafe662cd4c53c879fb219748738dd29 | 7,205 | py | Python | appengine/findit/infra_api_clients/codereview/gerrit.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
] | 1 | 2018-01-02T05:47:07.000Z | 2018-01-02T05:47:07.000Z | appengine/findit/infra_api_clients/codereview/gerrit.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
] | null | null | null | appengine/findit/infra_api_clients/codereview/gerrit.py | mcgreevy/chromium-infra | 09064105713603f7bf75c772e8354800a1bfa256 | [
"BSD-3-Clause"
] | null | null | null | # Copyright 2017 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import re
from gae_libs.http.http_client_appengine import HttpClientAppengine
from infra_api_clients.codereview import cl_info
from infra_api_clients.codereview import codereview
from libs import time_util
class Gerrit(codereview.CodeReview):
"""Stub for implementing Gerrit support."""
HTTP_CLIENT = HttpClientAppengine(follow_redirects=False)
def __init__(self, host, settings=None):
super(Gerrit, self).__init__(host)
settings = settings or {}
self.commit_bot_emails = settings.get('commit_bot_emails',
['commit-bot@chromium.org'])
def _HandleResponse(self, status_code, content):
if status_code != 200:
return None
# Remove XSSI magic prefix
if content.startswith(')]}\''):
content = content[4:]
return json.loads(content)
def _AuthenticatedRequest(self, path_parts, payload=None, method='GET',
headers=None):
# Prepend /a/ to make the request authenticated.
if path_parts[0] != 'a':
path_parts = ['a'] + list(path_parts)
url = 'https://%s/%s' % (self._server_hostname, '/'.join(path_parts))
headers = headers or {}
# This header tells gerrit to send compact (non-pretty) JSON which is
# more efficient and encouraged for automated tools.
headers['Accept'] = 'application/json'
headers.setdefault('Accept', 'application/json')
if method == 'GET':
return self.HTTP_CLIENT.Get(url, params=payload, headers=headers)
elif method == 'POST':
return self.HTTP_CLIENT.Post(url, data=payload, headers=headers)
raise NotImplementedError() # pragma: no cover
def _Get(self, path_parts, params=None, headers=None):
"""Makes a simple get to Gerrit's API and parses the json output."""
return self._HandleResponse(*self._AuthenticatedRequest(
path_parts, payload=params, headers=headers))
def _Post(self, path_parts, body=None, headers=None):
headers = headers or {}
if body: # pragma: no branch
headers['Content-Type'] = 'application/json'
body = json.dumps(body)
return self._HandleResponse(*self._AuthenticatedRequest(
path_parts, payload=body, method='POST', headers=headers))
def GetCodeReviewUrl(self, change_id):
return 'https://%s/q/%s' % (self._server_hostname, change_id)
def PostMessage(self, change_id, message):
parts = ['changes', change_id, 'revisions', 'current', 'review']
result = self._Post(parts, body={'message': message})
return result is not None # A successful post will return an empty dict.
def CreateRevert(self, reason, change_id, patchset_id=None):
parts = ['changes', change_id, 'revert']
reverting_change = self._Post(parts, body={'message': reason})
try:
return reverting_change['change_id']
except (TypeError, KeyError):
return None
def AddReviewers(self, change_id, reviewers, message=None):
assert reviewers
current_reviewers = self.GetClDetails(change_id).reviewers
for reviewer in reviewers:
# reviewer must be an email string.
assert len(reviewer.split('@')) == 2
if reviewer in current_reviewers:
# Only add reviewers not currently assinged to the change.
continue
parts =['changes', change_id, 'reviewers']
response = self._Post(parts, body={'reviewer': reviewer})
try:
reviewers = response['reviewers']
if reviewers == []:
# This might be okay if a user has more than one email.
logging.warning('Reviewer %s already assigned to cl %s under a '
'different email' % (reviewer, change_id))
continue
new_reviewer = reviewers[0]['email']
if new_reviewer != reviewer:
# This might be okay if a user has more than one email.
logging.warning('Requested to add %s as reviewer to cl %s but '
'%s was added instead.' % (reviewer, change_id,
new_reviewer))
except (TypeError, KeyError, IndexError):
return False
return True
def GetClDetails(self, change_id):
# Create cl info based on the url.
params = [('o', 'CURRENT_REVISION'), ('o', 'CURRENT_COMMIT')]
change_info = self._Get(['changes', change_id, 'detail'], params=params)
return self._ParseClInfo(change_info, change_id)
def _ParseClInfo(self, change_info, change_id):
if not change_info: # pragma: no cover
return None
result = cl_info.ClInfo(self._server_hostname, change_id)
result.reviewers = [x['email'] for x in change_info['reviewers'].get(
'REVIEWER', [])]
result.cc = [x['email'] for x in change_info['reviewers'].get('CC', [])]
result.closed = change_info['status'] == 'MERGED'
result.owner_email = change_info['owner']['email']
# If the status is merged, look at the commit details for the current
# commit.
if result.closed: # pragma: no branch
current_revision = change_info['current_revision']
revision_info = change_info['revisions'][current_revision]
patchset_id = revision_info['_number']
commit_timestamp = time_util.DatetimeFromString(
change_info['submitted'])
result.commits.append(cl_info.Commit(patchset_id, current_revision,
commit_timestamp))
# Detect manual commits.
committer = revision_info['commit']['committer']['email']
if committer not in self.commit_bot_emails:
result.AddCqAttempt(patchset_id, committer, commit_timestamp)
# Checks for if the culprit owner has turned off auto revert.
result.auto_revert_off = codereview.IsAutoRevertOff(
revision_info['commit']['message'])
# TO FIND COMMIT ATTEMPTS:
# In messages look for "Patch Set 1: Commit-Queue+2"
# or "Patch Set 4: Code-Review+1 Commit-Queue+2".
cq_pattern = re.compile('^Patch Set \d+:( Code-Review..)? Commit-Queue\+2$')
revert_tag = 'autogenerated:gerrit:revert'
revert_pattern = re.compile(
'Created a revert of this change as (?P<change_id>I[a-f\d]{40})')
for message in change_info['messages']:
if cq_pattern.match(message['message'].splitlines()[0]):
patchset_id = message['_revision_number']
author = message['author']['email']
timestamp = time_util.DatetimeFromString(message['date'])
result.AddCqAttempt(patchset_id, author, timestamp)
# TO FIND REVERT(S):
if message.get('tag') == revert_tag:
patchset_id = message['_revision_number']
author = message['author']['email']
timestamp = time_util.DatetimeFromString(message['date'])
reverting_change_id = revert_pattern.match(
message['message']).group('change_id')
reverting_cl = self.GetClDetails(reverting_change_id)
result.reverts.append(cl_info.Revert(patchset_id, reverting_cl, author,
timestamp))
return result
| 42.633136 | 80 | 0.661763 | 882 | 7,205 | 5.243764 | 0.284581 | 0.036324 | 0.010378 | 0.012973 | 0.146595 | 0.124973 | 0.109838 | 0.109838 | 0.082162 | 0.067459 | 0 | 0.003584 | 0.225399 | 7,205 | 168 | 81 | 42.886905 | 0.825121 | 0.152255 | 0 | 0.136 | 0 | 0 | 0.132554 | 0.012679 | 0 | 0 | 0 | 0 | 0.016 | 1 | 0.088 | false | 0 | 0.056 | 0.008 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f79a0344682bbe642616faff16e4ac064ebf91c | 3,436 | py | Python | src/data/download_data.py | RafaelPinto/force_2020_lith | 37900b243d5e76aff90c55a296b01ff710c12df6 | [
"BSD-3-Clause"
] | null | null | null | src/data/download_data.py | RafaelPinto/force_2020_lith | 37900b243d5e76aff90c55a296b01ff710c12df6 | [
"BSD-3-Clause"
] | null | null | null | src/data/download_data.py | RafaelPinto/force_2020_lith | 37900b243d5e76aff90c55a296b01ff710c12df6 | [
"BSD-3-Clause"
] | 1 | 2022-03-01T09:16:42.000Z | 2022-03-01T09:16:42.000Z | import pandas as pd
import gdown
from src.definitions import ROOT_DIR
OUTPUT_ROOT = ROOT_DIR / 'data/external'
if not OUTPUT_ROOT.is_dir():
OUTPUT_ROOT.mkdir(parents=True)
def download_from_google_drive(file_ids, output_root=None, redownload=False):
"""
Download the seleced files from Google Drive using their Google Drive IDs.
Parameters
----------
file_ids : dict
Dictionary with file name with extension as key and file's Google
drive ID as value.
output_root: path like
Directory to store the downloaded data.
redownload: bool
Download the file even if it already exists.
"""
if output_root is None:
output_root = OUTPUT_ROOT
url_root = "https://drive.google.com/uc?id="
for file_name, file_id in file_ids.items():
output = output_root / file_name
# Skip file if already downloaded
if output.exists():
if not redownload:
continue
url = url_root + file_id
gdown.download(url, str(output))
return
def download_competition_files():
"""
Download the competition files from Google Drive using their Google
Drive IDs.
"""
file_ids = {
# "Well log competion rules and description": "1Q_Z7xDREeTGqXvdmFuZ89e6PXN4I1miPLq1I17MTkds",
"Confusion matrix all submitters.xlsx": "1f4DZPmwJFPG7hScEX_S2RbLdOF4IOH_U",
"CSV_hidden_test.csv": "1PLWXrUQKmwMchAmcoJos0lmAm9MLEFnW",
"CSV_test.csv": "17W3I_XfI0JlJ4mLJVtz4rGa0eZKWZ6Xv",
"CSV_train.csv": "1hwDi05hwICWf95SOlofdKKYZUH79ReOa",
"lithology scoring matrix cost function.xlsx": "11Hx1KBCy3vMWzzyqdVumZxIP37qi6kEZ",
"NPD_Casing_depth_most_wells.xlsx": "10HjgB3f1_VpGjTiFPjJs37r6QYLX5T9T",
"NPD_Lithostratigraphy_groups_all_wells.xlsx": "19oTHTNg5jXsss8sElbXQZtjJrJRaffku",
"NPD_Lithostratigraphy_member_formations_all_wells.xlsx": "1X57eNXWW0_ilNO_ISvC6uz1o2OsPDZRP",
"penalty_matrix.npy": "1eCH2LBFywpgopOcHG0RLGXEtBKb7LHhM",
"starter_notebook.ipynb": "1uYG70pz2hh2nmgo6f3Hdg_IxQmyRGWEb",
"Well logs abbreviation description.xlsx": "1EOxhQicZC5X-tbPwojvWxsHjst7IcIsy",
"olawale_hidden_test_pred.csv": "16w0E1QPIdCDdoJRgAXQzqSPJ5eywQyMl",
"olawale_open_test_pred.csv": "1--4oofS0p0tvLriRLs1UhkkbxaKdxlBO",
}
download_from_google_drive(file_ids)
return
def download_well_meta():
"""
Download well meta data from Norwegian Petroleum Directorate (NPD).
"""
well_meta_url = 'https://factpages.npd.no/ReportServer_npdpublic?/FactPages/TableView/wellbore_exploration_all&rs:Command=Render&rc:Toolbar=false&rc:Parameters=f&rs:Format=CSV&Top100=false&IpAddress=not_used&CultureCode=en'
well_meta = pd.read_csv(well_meta_url)
well_meta_path = OUTPUT_ROOT / 'well_meta_npd.csv'
well_meta.to_csv(well_meta_path, index=False)
def download_open_test_labels():
"""
Download the open test set true labels.
"""
url = 'https://github.com/bolgebrygg/Force-2020-Machine-Learning-competition/raw/master/lithology_competition/data/leaderboard_test_target.csv'
test_y_true = pd.read_csv(url, sep=';')
test_y_true_path = OUTPUT_ROOT / 'open_test_y_true.csv'
test_y_true.to_csv(test_y_true_path, index=False)
if __name__ == "__main__":
download_competition_files()
download_well_meta()
download_open_test_labels()
| 31.522936 | 227 | 0.721478 | 409 | 3,436 | 5.762836 | 0.410758 | 0.04667 | 0.019092 | 0.015274 | 0.062792 | 0.062792 | 0.037336 | 0.037336 | 0.037336 | 0 | 0 | 0.032616 | 0.188009 | 3,436 | 108 | 228 | 31.814815 | 0.812186 | 0.189464 | 0 | 0.04 | 0 | 0.04 | 0.464352 | 0.236655 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.06 | 0 | 0.18 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f7aa5d94172cc113752628511e1fdfff0ea27f0 | 1,216 | py | Python | examples/echo.py | fossabot/mosec | b803cffbbdb92212a2810597f8ce59fe14c1f728 | [
"Apache-2.0"
] | null | null | null | examples/echo.py | fossabot/mosec | b803cffbbdb92212a2810597f8ce59fe14c1f728 | [
"Apache-2.0"
] | null | null | null | examples/echo.py | fossabot/mosec | b803cffbbdb92212a2810597f8ce59fe14c1f728 | [
"Apache-2.0"
] | null | null | null | import logging
import time
from typing import List
from pydantic import BaseModel
from mosec import Server, Worker
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
formatter = logging.Formatter(
"%(asctime)s - %(process)d - %(levelname)s - %(filename)s:%(lineno)s - %(message)s"
)
sh = logging.StreamHandler()
sh.setFormatter(formatter)
logger.addHandler(sh)
class EchoReq(BaseModel):
time: float
class EchoResp(BaseModel):
msg: str
class Preprocess(Worker):
def forward(self, data: EchoReq) -> float:
logger.debug(f"pre received {data}")
return data.time
class Inference(Worker):
def forward(self, data: List[float]) -> List[float]:
logger.info(f"received batch size: {len(data)}")
time.sleep(sum(data) / len(data))
return data
class Postprocess(Worker):
def forward(self, data: float) -> EchoResp:
logger.debug(f"post received {data}")
return EchoResp(msg=f"sleep {data} seconds")
if __name__ == "__main__":
server = Server(EchoReq, EchoResp)
server.append_worker(Preprocess, num=2)
server.append_worker(Inference, max_batch_size=16)
server.append_worker(Postprocess, num=2)
server.run()
| 23.384615 | 87 | 0.688322 | 154 | 1,216 | 5.350649 | 0.402597 | 0.032767 | 0.058252 | 0.072816 | 0.087379 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004024 | 0.182566 | 1,216 | 51 | 88 | 23.843137 | 0.82495 | 0 | 0 | 0 | 0 | 0.027778 | 0.148026 | 0.018914 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.138889 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f7b748e205f47f53854aa928e93bf09651b81f6 | 1,697 | py | Python | backend/tutors/serializers.py | ProgrammingLanguageLeader/TutorsApp | f2d5968b5c29ce75f5f634d6076a6e66efc76801 | [
"MIT"
] | 3 | 2019-02-24T23:30:19.000Z | 2019-03-27T20:06:53.000Z | backend/tutors/serializers.py | ProgrammingLanguageLeader/TutorsApp | f2d5968b5c29ce75f5f634d6076a6e66efc76801 | [
"MIT"
] | 1 | 2019-03-30T08:58:06.000Z | 2019-03-30T08:58:06.000Z | backend/tutors/serializers.py | ProgrammingLanguageLeader/TutorsApp | f2d5968b5c29ce75f5f634d6076a6e66efc76801 | [
"MIT"
] | 1 | 2019-03-01T20:10:19.000Z | 2019-03-01T20:10:19.000Z | from django.utils.translation import ugettext_lazy as _
from rest_framework import serializers
from tutors.models import StudentRequest, TutorStudents
from users.models import User
from users.serializers import UserSerializer
class StudentRequestSerializer(serializers.ModelSerializer):
student = serializers.HiddenField(
default=serializers.CurrentUserDefault()
)
tutor = serializers.PrimaryKeyRelatedField(
queryset=User.objects.all()
)
class Meta:
model = StudentRequest
fields = '__all__'
read_only_fields = ('creation_time', )
def run_validators(self, value):
try:
StudentRequest.objects.get(
student=value.get('student'),
tutor=value.get('tutor')
)
except StudentRequest.DoesNotExist:
return super().run_validators(value)
raise serializers.ValidationError(
_('request already exists')
)
def validate(self, attrs):
student = attrs.get('student')
tutor = attrs.get('tutor')
if student == tutor:
raise serializers.ValidationError(
_('tutor and student must be different users')
)
tutor, created = TutorStudents.objects.get_or_create(user=tutor)
if student in tutor.students.all():
raise serializers.ValidationError(
_('student already exists in a list of students')
)
return attrs
class ReadStudentRequestSerializer(StudentRequestSerializer):
student = UserSerializer()
tutor = UserSerializer()
class AcceptStudentRequestSerializer(serializers.Serializer):
pass
| 29.258621 | 72 | 0.655863 | 153 | 1,697 | 7.163399 | 0.470588 | 0.027372 | 0.084854 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.266352 | 1,697 | 57 | 73 | 29.77193 | 0.880321 | 0 | 0 | 0.066667 | 0 | 0 | 0.088981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0.022222 | 0.111111 | 0 | 0.377778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f7c22d2232d820a581701224d3ed6f1fee31b8c | 1,019 | py | Python | slurminade/conf.py | d-krupke/slurminade | 302bc5c3fae185e836b4c4c7063b02a9768b19df | [
"MIT"
] | 1 | 2022-03-31T08:30:39.000Z | 2022-03-31T08:30:39.000Z | slurminade/conf.py | d-krupke/slurminade | 302bc5c3fae185e836b4c4c7063b02a9768b19df | [
"MIT"
] | null | null | null | slurminade/conf.py | d-krupke/slurminade | 302bc5c3fae185e836b4c4c7063b02a9768b19df | [
"MIT"
] | null | null | null | """
This file saves the default configuration for slurm.
"""
import json
import os.path
from pathlib import Path
def _load_default_conf():
default_conf_file = os.path.join(Path.home(), ".slurminade_default.json")
try:
if os.path.isfile(default_conf_file):
with open(default_conf_file, "r") as f:
return json.load(f)
else:
return {}
except Exception as e:
print(
f"slurminade could not open default configuration {default_conf_file}!\n{str(e)}"
)
return {}
__default_conf = _load_default_conf()
def update_default_configuration(conf=None, **kwargs):
if conf:
__default_conf.update(conf)
if kwargs:
__default_conf.update(kwargs)
def set_default_configuration(conf=None, **kwargs):
__default_conf = {}
update_default_configuration(conf, **kwargs)
def _get_conf(conf=None):
conf = conf if conf else {}
conf_ = __default_conf.copy()
conf_.update(conf)
return conf_
| 22.644444 | 93 | 0.656526 | 130 | 1,019 | 4.830769 | 0.338462 | 0.192675 | 0.095541 | 0.095541 | 0.10828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.241413 | 1,019 | 44 | 94 | 23.159091 | 0.812419 | 0.05103 | 0 | 0.066667 | 0 | 0 | 0.107404 | 0.056309 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0 | 0.1 | 0 | 0.366667 | 0.033333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f7dbae2560644c45101965ad0d20fabcf8eed8e | 1,045 | py | Python | countries/regions/Europe.py | vincihb/paper_database | f97ebdcc2bba3ecee3590cde12a5a5f71d26451d | [
"MIT"
] | null | null | null | countries/regions/Europe.py | vincihb/paper_database | f97ebdcc2bba3ecee3590cde12a5a5f71d26451d | [
"MIT"
] | null | null | null | countries/regions/Europe.py | vincihb/paper_database | f97ebdcc2bba3ecee3590cde12a5a5f71d26451d | [
"MIT"
] | null | null | null | from countries.Countries import Countries
class Europe(Countries):
def __init__(self):
super().__init__()
self.lst_of_countries = ['Albania', 'Austria', 'Azerbaijan', 'Belarus', 'Belgium', 'Bosnia and Herzegovina', 'Bulgaria', 'Croatia', 'Czech Republic', 'Denmark', 'Estonia', 'Finland', 'France', 'Georgia', 'Germany', 'Greece', 'Hungary', 'Iceland', 'Ireland', 'Italy', 'Kazakhstan', 'Kosovo', 'Kyrgyzstan', 'Latvia', 'Lithuania', 'Luxembourg', 'Montenegro', 'Netherlands', 'Norway', 'Poland', 'Portugal', 'Romania', 'Russia', 'Serbia', 'Slovakia', 'Slovenia', 'Spain', 'Sweden', 'Switzerland', 'Turkey', 'Ukraine', 'United Kingdom']
# print([val.get('COUNTRY_NAME') for val in self.pc.get_all_countries()])
self.all_papers = self.get_all_papers()
if __name__ == "__main__":
a = Europe()
papers = a.all_papers
print(len(papers))
# print(dict(sorted(a.get_themes_distribution_secondary().items(), key=lambda x: x[1], reverse=True)))
# print(a.get_watercode_distribution_secondary())
| 58.055556 | 538 | 0.669856 | 117 | 1,045 | 5.717949 | 0.717949 | 0.040359 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001111 | 0.138756 | 1,045 | 17 | 539 | 61.470588 | 0.742222 | 0.210526 | 0 | 0 | 0 | 0 | 0.421437 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.3 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f7fa23e44570dfd7a88e050de76d3c0016cc5e5 | 1,265 | py | Python | src/sagemaker_inference/parameters.py | muhyun/sagemaker-inference-toolkit | 31c143a8274129c00af7bc1db826e3cfc68ea535 | [
"Apache-2.0"
] | 188 | 2019-06-01T16:43:24.000Z | 2022-03-19T03:44:56.000Z | src/sagemaker_inference/parameters.py | muhyun/sagemaker-inference-toolkit | 31c143a8274129c00af7bc1db826e3cfc68ea535 | [
"Apache-2.0"
] | 66 | 2019-10-15T00:12:58.000Z | 2022-03-28T21:58:48.000Z | src/sagemaker_inference/parameters.py | muhyun/sagemaker-inference-toolkit | 31c143a8274129c00af7bc1db826e3cfc68ea535 | [
"Apache-2.0"
] | 68 | 2019-06-20T17:34:43.000Z | 2022-03-13T09:10:52.000Z | # Copyright 2019-2020 Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You
# may not use this file except in compliance with the License. A copy of
# the License is located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is
# distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
"""This module contains string constants that define inference toolkit
parameters."""
from __future__ import absolute_import
BASE_PATH_ENV = "SAGEMAKER_BASE_DIR" # type: str
USER_PROGRAM_ENV = "SAGEMAKER_PROGRAM" # type: str
LOG_LEVEL_ENV = "SAGEMAKER_CONTAINER_LOG_LEVEL" # type: str
DEFAULT_INVOCATIONS_ACCEPT_ENV = "SAGEMAKER_DEFAULT_INVOCATIONS_ACCEPT" # type: str
MODEL_SERVER_WORKERS_ENV = "SAGEMAKER_MODEL_SERVER_WORKERS" # type: str
MODEL_SERVER_TIMEOUT_ENV = "SAGEMAKER_MODEL_SERVER_TIMEOUT" # type: str
BIND_TO_PORT_ENV = "SAGEMAKER_BIND_TO_PORT" # type: str
SAFE_PORT_RANGE_ENV = "SAGEMAKER_SAFE_PORT_RANGE" # type: str
MULTI_MODEL_ENV = "SAGEMAKER_MULTI_MODEL" # type: str
| 48.653846 | 84 | 0.785771 | 187 | 1,265 | 5.042781 | 0.529412 | 0.114528 | 0.050901 | 0.038176 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011019 | 0.13913 | 1,265 | 25 | 85 | 50.6 | 0.854913 | 0.566008 | 0 | 0 | 0 | 0 | 0.435946 | 0.369025 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f8118e0a4f36f9b71fbe2c4d7bada9499614b86 | 868 | py | Python | pyetherscan/settings.py | Marto32/pyetherscan | fb9669f731bf58c196d128ebc893dfbb0ab18aa9 | [
"MIT"
] | 13 | 2017-07-17T09:07:29.000Z | 2019-02-10T05:21:20.000Z | pyetherscan/settings.py | Marto32/pyetherscan | fb9669f731bf58c196d128ebc893dfbb0ab18aa9 | [
"MIT"
] | 24 | 2017-07-05T00:09:07.000Z | 2017-07-19T01:05:09.000Z | pyetherscan/settings.py | Marto32/pyetherscan | fb9669f731bf58c196d128ebc893dfbb0ab18aa9 | [
"MIT"
] | 5 | 2017-07-17T12:06:32.000Z | 2019-08-09T00:18:11.000Z | import os
import sys
HOME_DIR = os.path.expanduser('~')
CONFIG_FILE = '.pyetherscan.ini'
PATH = os.path.join(HOME_DIR, CONFIG_FILE)
TESTING_API_KEY = 'YourApiKeyToken'
def parse_configs(python_version, config_object):
"""
A helper function to parse configuration files in
python 2 and 3.
"""
if python_version < 3:
return config_object.get('Credentials', 'ETHERSCAN_API_KEY')
else:
return config_object['Credentials']['ETHERSCAN_API_KEY']
if os.path.isfile(PATH):
try:
from configparser import ConfigParser
except ImportError:
# Handle python 2.7 code
from ConfigParser import ConfigParser
config = ConfigParser()
config.read(PATH)
ETHERSCAN_API_KEY = parse_configs(sys.version_info[0], config)
else:
ETHERSCAN_API_KEY = os.environ.get('ETHERSCAN_API_KEY', TESTING_API_KEY)
| 26.30303 | 76 | 0.709677 | 113 | 868 | 5.221239 | 0.451327 | 0.071186 | 0.127119 | 0.088136 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008584 | 0.1947 | 868 | 32 | 77 | 27.125 | 0.835479 | 0.102535 | 0 | 0.095238 | 0 | 0 | 0.13834 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.238095 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f81bea19ea775512d2bcc5fa9ca9915a2919ecc | 13,886 | py | Python | lib/forest.py | xingfanxia/comps_ediscovery | ac8f8025e77be0a3e4c7166a0dc6586363655328 | [
"MIT"
] | 3 | 2019-11-07T07:22:25.000Z | 2021-06-18T23:52:32.000Z | lib/forest.py | xingfanxia/comps_ediscovery | ac8f8025e77be0a3e4c7166a0dc6586363655328 | [
"MIT"
] | 2 | 2018-03-06T21:16:22.000Z | 2018-03-12T18:31:33.000Z | lib/forest.py | xxf1995/comps_ediscovery | ac8f8025e77be0a3e4c7166a0dc6586363655328 | [
"MIT"
] | null | null | null | '''
Dummy Version of Random Forest
'''
from lib.tree import Tree
from lib.exceptions import *
import numpy as np
import pandas as pd
import random
import pickle
import multiprocessing
import sys
import time
NUM_CORES = 10
class RNF:
'''
params:
train_data - training data to trainthe tree
n_trees - number of trees to setup
tree_depth - max recursive
random_seed - seed for random gen
n_max_features - max num of features to pass to each tree
n_max_input - max num of input to pass to each tree
'''
def __init__(self, train_data, n_trees, tree_depth, random_seed, n_max_features, n_max_input, cat_features, user_input=False):
self.trees = []
self.input_type = user_input
self.train_data = train_data
self.n_trees = n_trees
self.tree_depth = tree_depth
self.n_max_features = n_max_features
self.n_max_input = n_max_input
self.cat_features = cat_features
self.seed = random_seed
# self.features = [()] #list of tuples like (tree, emails, features)
random.seed(random_seed)
np.random.seed(random_seed)
self.oob_threshold = 0
self.label_col_num = 60
'''
Randomly select features and emails from the train_data.
Emails are selected with replacement, features without.
np.delete calls are to resolve interference from non-feature columns
'''
def random_select(self, train_data):
selected_rows = np.random.choice(self.train_data.index, self.n_max_input)
selected_feature_indices = np.random.choice(self.train_data.shape[1] - 1, self.n_max_features, replace=False)
selected_features = train_data.columns.values[[selected_feature_indices]]
selected_features = np.delete(selected_features, np.where(selected_features == "Label"), axis=0)
selected_features = np.delete(selected_features, np.where(selected_features == "Relevant"), axis=0)
selected_features = np.delete(selected_features, np.where(selected_features == "ID"), axis=0)
return (selected_rows, selected_features)
'''
Create and train trees for the forest.
Use fit_parallel for a parallelized and updated version
'''
def fit(self):
if len(self.trees) != 0:
raise AlreadyFitException('This forest has already been fit to the data')
for i in range(self.n_trees):
selected = self.random_select(self.train_data)
self.trees.append(Tree(self.train_data, self.tree_depth,
0, selected[0], selected[1], self.cat_features,
user_input=self.input_type))
count = 0
for tree in self.trees:
count += 1
tree = tree.fit()
'''
Parallelized version of fitting the model.
Create trees with selected features and rows, and fits each on up to NUM_CORES number of cores
'''
def fit_parallel(self):
if len(self.trees) != 0:
raise AlreadyFitException('This forest has already been fit to the data')
for i in range(self.n_trees):
selected = self.random_select(self.train_data)
self.trees.append(Tree(self.train_data, self.tree_depth, 0,
selected[0], selected[1], self.cat_features,
user_input=self.input_type))
# create N new processes, where N = number of trees
pool = multiprocessing.Pool( NUM_CORES )
# start the N tree.fit processes
results = []
for tree in self.trees:
results.append( pool.apply_async(tree.fit) )
pool.close()
pool.join()
r = []
for result in results:
r.append(result.get())
for i in range(len(self.trees)):
self.trees[i] = r[i]
'''
calculate a proba from output of each tree's prediction
should ouput two arrays: probas and classfication
'''
def some_majority_count_metric(self, score):
return np.array([self.dampen(x) for x in np.nanmean(score, axis=0)])
def dampen(self, x):
if x == 1:
return .9999
elif x == 0:
return .0001
return x
'''
Use the trained model to predict a label for previously unseen data.
'''
def predict(self, test_data, visualize=False):
trees_outputs = [tree.predict(test_data, visualize) for tree in self.trees]
scores = [ list() for i in range(len(test_data))]
for document_idx in range(len(test_data)):
for tree in trees_outputs:
scores[document_idx].append(tree[document_idx][0])
probas = [self.some_majority_count_metric(score) for score in scores]
classes = ['1' if proba[0] > proba[1] else '0' for proba in probas]
ids = [doc[1] for doc in trees_outputs[0]]
return probas, classes, ids
'''
Check that there are no predictions that have probabilities over 1
'''
def _check_prediction_integrity(self, probabilities):
num_exceeded = 0
for p in probabilities:
if p[0] > 1 or p[1] > 1:
num_exceeded += 1
return num_exceeded
'''
Use the trained model to predict a label for previously unseen data.
This parallelized version asks each tree to do their predictions separately
in their own cores, at up to NUM_CORES number of cores at a time.
'''
def predict_parallel(self, test_data, visualize=False, importance=False):
pool = multiprocessing.Pool( NUM_CORES )
# Asynchronous calls for the trees to do work
results = []
for i in range(len(self.trees)):
results.append( pool.apply_async(self.trees[i].predict, (test_data, visualize, importance)) )
r = []
for result in results:
r.append(result.get())
pool.close()
pool.join()
trees_outputs = r
# Aggregation of scores returned by each tree
scores = [ list() for i in range(len(test_data))]
for document_idx in range(len(test_data)):
for tree in trees_outputs:
scores[document_idx].append(tree[0][document_idx])
probas = [self.some_majority_count_metric(score) for score in scores]
classes = ['1' if proba[0] > proba[1] else '0' for proba in probas]
ids = trees_outputs[0][1]
# Calculation of feature importances, if desired
if importance:
#sum up all of the importances
importances = [{} for doc in trees_outputs[0][2]]
for doc_idx, imp in enumerate(importances):
for tree in trees_outputs:
for feature in tree[2][doc_idx].keys():
try:
imp[feature] += tree[2][doc_idx][feature]
except KeyError:
imp[feature] = tree[2][doc_idx][feature]
#divide by num_trees
for importance_dict in range(len(importances)):
for feature in importances[importance_dict].keys():
importances[importance_dict][feature] = importances[importance_dict][feature] / len(self.trees)
# print(importances)
# print(ids)
return probas, classes, ids, importances
if self._check_prediction_integrity(probas) > 0:
print("WARNING: Forest.predict_parallel(): {} of the {} predictions have probabilities above 1".format(self._check_prediction_integrity(probas), len(test_data)))
return probas, classes, ids
'''
returns:
probas - [(prob_rel, prob_irrel), ...]
prob_rel - probability that this document is relevant
prob_irrel - probability that this document is irrelevant
classes - [relevance]
relevance - '1' if relevant, '0' if irrelevant
importances - [{feature:weight}]
feature - a row of the df we used to predict
weight - how important the feature was in the prediction, where positive means it nudged the prediction
towards relevance and negative means it nudged the prediction towards irrelevance
'''
def predict_with_feat_imp(self, test_data):
tree_results = [tree.predict_with_feat_imp(test_data) for tree in self.trees]
scores = [list() for doc in tree_results[0][0]]
for doc in range(len(tree_results[0][0])):
for tree in tree_results:
scores[doc].append(tree[0][doc])
probas = [self.some_majority_count_metric(score) for score in scores]
classes = ['1' if proba[0] > proba[1] else '0' for proba in probas]
#sum up all of the importances
importances = [{} for doc in tree_results[0][1]]
for doc in range(len(importances)):
for tree in tree_results:
for feature in tree[1][doc].keys():
try:
importances[doc][feature] += tree[1][doc][feature]
except KeyError:
importances[doc][feature] = tree[1][doc][feature]
#divide by num_trees
for importance_dict in range(len(importances)):
for feature in importances[importance_dict].keys():
importances[importance_dict][feature] = importances[importance_dict][feature] / len(self.trees)
return probas, classes, importances
'''
Used in incremental learning
Randomly selects new data and features, creates the trees based on those, fits it, and returns it
'''
def retrain_tree(self):
selected = self.random_select(self.train_data)
tree = Tree(self.train_data, self.tree_depth,
0, selected[0], selected[1],
self.cat_features, user_input=self.input_type)
tree.fit()
return tree
'''
Used in incremental learning
When a tree is good enough to be kept, instead of being retrained, the leaf nodes are updated
This is done in a Tree member function
params:
tree - the Tree object that exists in self.trees to be updated
'''
def update_leaves(self, tree):
tree.data = self.train_data
tree.update(self.train_data, self.random_select(self.train_data)[0])
'''
Incremental learning functionality at the Forest level.
Calculates a threshold based on the trees' out-of-bag errors
Based on the threshold, decide which trees should be scrapped and replaced,
and which should be updated.
params:
more_data - df, additional training data to update the forest. This is being appended to the
existing train_data in the Forest.
return:
Null or we can say something like which trees are changed
'''
def update(self, more_data):
# Update the data that the Forest can train on
self.train_data = self.train_data.append(more_data)
self.n_max_input = self.train_data.shape[0]
# use average as placeholder function
# Calculate the out-of-bag error threshold, a simple average for now
thresh = 0
for tree in self.trees:
thresh += tree.calc_oob_error()
thresh = thresh / len(self.trees)
self.oob_threshold = thresh
# Decide which trees are good enough to update, and which need to be replaced
idx_trees_to_retrain = [] # indexes into self.trees for those to be replaced
for i in range(len(self.trees)):
if (self.trees[i].oob_error < thresh):
idx_trees_to_retrain.append(i)
else:
self.update_leaves(self.trees[i])
# To avoid errors from not having > 0 number of processes.
# If there are no trees to retrain, just quit, since all
# that need to be updated have been at this point.
# TODO: possibly replace this with a try-except
if idx_trees_to_retrain == []:
return
# Parallelized tree creation
pool = multiprocessing.Pool( NUM_CORES )
results = []
for idx in idx_trees_to_retrain:
results.append( pool.apply_async(self.retrain_tree) )
retrained_trees = []
for result in results:
retrained_trees.append(result.get())
pool.close()
pool.join()
# Place the returned trees into the slots previously marked for replacement
for i in range(len(idx_trees_to_retrain)):
self.trees[idx_trees_to_retrain[i]] = retrained_trees[i]
'''
Store the forest via pickling
'''
def store_rnf(self, file_path):
f = open(file_path, 'wb')
pickle.dump(self, f)
f.close()
'''
Load the pickle, initialize the variable
'''
def load_rnf(self, file_path):
f = open(file_path, 'rb')
temp = pickle.load(f)
f.close()
# reinitialize some variables
self.__init__(temp.train_data, temp.n_trees, temp.tree_depth,
temp.seed, temp.n_max_features, temp.n_max_input, temp.cat_features)
# the part that matters: load the pre-trained then stored trees into the RNF object instance
self.trees = temp.trees
'''
Returns a measure for which features are most important in the tree.
returns:
total - {feature:importance}, where importance is a measure of how important that feature is to the overall
forest
'''
def get_feature_importances(self):
total = {}
for tree in self.trees:
curr_importances = tree.get_mean_decrease_impurity()
for feature in curr_importances.keys():
try:
total[feature] += curr_importances[feature]
except KeyError:
total[feature] = curr_importances[feature]
return total
| 38.253444 | 173 | 0.623794 | 1,823 | 13,886 | 4.610532 | 0.178278 | 0.02677 | 0.026294 | 0.01047 | 0.389411 | 0.31862 | 0.284711 | 0.239262 | 0.22677 | 0.218441 | 0 | 0.008334 | 0.291445 | 13,886 | 362 | 174 | 38.359116 | 0.845919 | 0.101181 | 0 | 0.34 | 0 | 0 | 0.020794 | 0.002703 | 0 | 0 | 0 | 0.002762 | 0 | 1 | 0.08 | false | 0 | 0.155 | 0.005 | 0.305 | 0.005 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f81f91411e46cbc90a1adf86e5f51770878793e | 478 | py | Python | src/unicon/plugins/iosxe/cat3k/ewlc/service_implementation.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 18 | 2019-11-23T23:14:53.000Z | 2022-01-10T01:17:08.000Z | src/unicon/plugins/iosxe/cat3k/ewlc/service_implementation.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 12 | 2020-11-09T20:39:25.000Z | 2022-03-22T12:46:59.000Z | src/unicon/plugins/iosxe/cat3k/ewlc/service_implementation.py | TestingBytes/unicon.plugins | 0600956d805deb4fd790aa3ef591c5d659e85de1 | [
"Apache-2.0"
] | 32 | 2020-02-12T15:42:22.000Z | 2022-03-15T16:42:10.000Z | __author__ = 'Difu Hu <difhu@cisco.com>'
from unicon.eal.dialogs import Dialog
from unicon.plugins.generic.service_implementation import Copy
class IosXECat3kEwlcCopy(Copy):
def call_service(self, reply=Dialog([]), vrf=None, *args, **kwargs):
if vrf is not None:
kwargs['extra_options'] = kwargs.setdefault('extra_options', '') \
+ ' vrf {}'.format(vrf)
super().call_service(reply=reply, *args, **kwargs)
| 36.769231 | 78 | 0.631799 | 55 | 478 | 5.327273 | 0.636364 | 0.068259 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002725 | 0.232218 | 478 | 12 | 79 | 39.833333 | 0.79564 | 0 | 0 | 0 | 0 | 0 | 0.121339 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f825aa8cae9c9fa95661f627e9ee7f1ef83ac6a | 1,850 | py | Python | Python/main.py | Johan-Mi/SudokuSolver | 9d91ea1503b6bf986d74f7fff6d541b0ac541e5a | [
"WTFPL"
] | 1 | 2020-06-24T10:51:31.000Z | 2020-06-24T10:51:31.000Z | Python/main.py | Johan-Mi/SudokuSolver | 9d91ea1503b6bf986d74f7fff6d541b0ac541e5a | [
"WTFPL"
] | null | null | null | Python/main.py | Johan-Mi/SudokuSolver | 9d91ea1503b6bf986d74f7fff6d541b0ac541e5a | [
"WTFPL"
] | null | null | null | #!/usr/bin/env python3
"""This module solves sudokus."""
import sys
def print_board(board):
"""Prints a 9x9 list of numbers as a sudoku board."""
for i in range(9):
for j in range(9):
print(board[i][j] or " ", end="")
if j in (2, 5):
print("│", end="")
print()
if i in (2, 5):
print("───┼───┼───")
print()
def solve(board, x_pos, y_pos):
"""Solves a sudoku board, given the x and y coordinates to start on."""
while board[y_pos][x_pos]:
if x_pos == 8 and y_pos == 8:
return True
x_pos += 1
if x_pos == 9:
y_pos += 1
x_pos = 0
possible = set(range(1, 10))
for i in range(9):
possible.discard(board[y_pos][i])
possible.discard(board[i][x_pos])
possible.discard(
board[y_pos - y_pos % 3 + i % 3][x_pos - x_pos % 3 + i // 3])
next_x = (x_pos + 1) % 9
next_y = y_pos + (x_pos == 8)
for num in possible:
board[y_pos][x_pos] = num
if (x_pos == 8 and y_pos == 8) or solve(board, next_x, next_y):
return True
board[y_pos][x_pos] = 0
return False
def main():
"""Reads a sudoku board from a specified file and solves it."""
if len(sys.argv) != 2:
sys.exit(f"Usage: {sys.argv[0]} (name of sudoku file)")
with open(sys.argv[1]) as sudoku_file:
file_content = sudoku_file.read()
board = [[None for i in range(9)] for j in range(9)]
for i in range(9):
for j in range(9):
curr_char = file_content[i * 10 + j]
board[i][j] = 0 if curr_char in " .-_" else int(curr_char)
print_board(board)
if solve(board, 0, 0):
print_board(board)
else:
sys.exit("No solution found")
if __name__ == "__main__":
main()
| 24.666667 | 75 | 0.525946 | 297 | 1,850 | 3.154882 | 0.279461 | 0.059765 | 0.059765 | 0.046958 | 0.214514 | 0.108858 | 0.108858 | 0.108858 | 0.076841 | 0.076841 | 0 | 0.032958 | 0.327568 | 1,850 | 74 | 76 | 25 | 0.710611 | 0.119459 | 0 | 0.22 | 0 | 0 | 0.052239 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0 | 0.02 | 0 | 0.14 | 0.16 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f84d4194848a00cc5edea9853d65d36daa0c279 | 291 | py | Python | 1-100/91-100/96-uniqueBST/uniqueBST.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | 1-100/91-100/96-uniqueBST/uniqueBST.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | 1-100/91-100/96-uniqueBST/uniqueBST.py | xuychen/Leetcode | c8bf33af30569177c5276ffcd72a8d93ba4c402a | [
"MIT"
] | null | null | null | class Solution(object):
def numTrees(self, n):
"""
:type n: int
:rtype: int
"""
dp = [0] * (n + 1)
dp[0] = 1
for i in range(1, n+1):
for j in range(i):
dp[i] += dp[j] * dp[i-j-1]
return dp[-1] | 19.4 | 42 | 0.364261 | 42 | 291 | 2.52381 | 0.452381 | 0.056604 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.051613 | 0.467354 | 291 | 15 | 43 | 19.4 | 0.632258 | 0.082474 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f864cbb03ff4db3d97632e2a7c00973346e326e | 680 | py | Python | question_classification/utils.py | XlightwolfX/DL_NLP_project | fd7a8090735f167a4b53ee0a9a4112d489a6806f | [
"MIT"
] | null | null | null | question_classification/utils.py | XlightwolfX/DL_NLP_project | fd7a8090735f167a4b53ee0a9a4112d489a6806f | [
"MIT"
] | 6 | 2020-10-06T09:02:38.000Z | 2020-10-06T09:02:39.000Z | question_classification/utils.py | XlightwolfX/DL_NLP_project | fd7a8090735f167a4b53ee0a9a4112d489a6806f | [
"MIT"
] | 1 | 2020-10-06T13:32:44.000Z | 2020-10-06T13:32:44.000Z | import os
import numpy as np
def get_last_checkpoint_path(checkpoint_dir):
"""Return path of the latest checkpoint in a given checkpoint directory."""
paths = list(checkpoint_dir.glob('Epoch*'))
if len(paths) > 0:
# parse epochs and steps from path names
epochs, steps = [], []
for path in paths:
epoch, step = path.stem.split('-')
epoch = int(epoch.split('[')[-1][:-1])
step = int(step.split('[')[-1][:-1])
epochs.append(epoch)
steps.append(step)
# sort first by epoch, then by step
last_model_ix = np.lexsort((steps, epochs))[-1]
return paths[last_model_ix] | 37.777778 | 79 | 0.585294 | 90 | 680 | 4.322222 | 0.511111 | 0.066838 | 0.03599 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01222 | 0.277941 | 680 | 18 | 80 | 37.777778 | 0.780041 | 0.210294 | 0 | 0 | 0 | 0 | 0.016949 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f867f5332b43a60581e9ff6befc93bdb777808d | 6,181 | py | Python | autocomplete_light/tests/widget.py | spookylukey/django-autocomplete-light | 72bf10c441a7a89735eaa23390abea625c747559 | [
"MIT"
] | null | null | null | autocomplete_light/tests/widget.py | spookylukey/django-autocomplete-light | 72bf10c441a7a89735eaa23390abea625c747559 | [
"MIT"
] | null | null | null | autocomplete_light/tests/widget.py | spookylukey/django-autocomplete-light | 72bf10c441a7a89735eaa23390abea625c747559 | [
"MIT"
] | null | null | null | import unittest
import os
import time
from django.test import LiveServerTestCase
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.firefox.webdriver import WebDriver
from selenium.webdriver.support import ui
from selenium.common.exceptions import NoSuchElementException
"""
Selenium tests (which have waits and all) don't work on travis since the
update, i don't know why, i've spent countless hours trying to debug it, asked
numerous times on #travis, was recommended to contact support which i did
but support didn't reply so here goes ....
"""
@unittest.skipIf(os.environ.get('TRAVIS', False), 'No travis support')
class WidgetTestCase(LiveServerTestCase):
fixtures = ['test.json', 'initial_data.json']
@classmethod
def setUpClass(cls):
cls.selenium = WebDriver()
super(WidgetTestCase, cls).setUpClass()
@classmethod
def tearDownClass(cls):
super(WidgetTestCase, cls).tearDownClass()
cls.selenium.quit()
def autocomplete_visible(self):
try:
return self.autocomplete_element().is_displayed()
except NoSuchElementException:
return False
def autocomplete_element(self):
return self.selenium.find_element_by_css_selector(
'.yourlabs-autocomplete')
def autocomplete_hilighted_choice_element(self):
return self.selenium.find_element_by_css_selector(
'.yourlabs-autocomplete [data-value].hilight')
def autocomplete_choice_elements(self):
return self.selenium.find_elements_by_css_selector(
'.yourlabs-autocomplete [data-value]')
def input_element(self, id=None):
if id is None: id = self.default_id
return self.selenium.find_element_by_css_selector('#' + id)
def input_visible(self, id=None):
if id is None: id = self.default_id
return self.input_element(id=None).is_displayed()
def widget_choice_elements(self, id=None):
if id is None: id = self.default_id
return self.selenium.find_elements_by_css_selector(
'#%s .deck [data-value]' % id.replace('_text', '-wrapper'))
def widget_select_element(self, id=None):
return self.widget_element(id).find_element_by_tag_name('select')
def widget_element(self, id=None):
if id is None: id = self.default_id
return self.selenium.find_element_by_css_selector(
'#%s' % id.replace('_text', '-wrapper'))
def save(self):
return self.selenium.find_element_by_css_selector(
'input[name=_save]').click()
def login(self):
self.selenium.get('%s%s' % (self.live_server_url, '/admin/'))
self.wait.until(lambda selenium: selenium.find_element_by_name("username"))
username_input = self.selenium.find_element_by_name("username")
username_input.send_keys('test')
password_input = self.selenium.find_element_by_name("password")
password_input.send_keys('test')
self.selenium.find_element_by_xpath('//input[@value="Log in"]').click()
self.wait.until(lambda selenium: selenium.find_element_by_id("user-tools"))
def test_login(self):
self.wait = ui.WebDriverWait(self.selenium,120)
def wait_for_selector(selector, displayed=None):
def f(selenium):
element = selenium.find_element_by_css_selector(selector)
if displayed is not None:
return element.is_displayed() == displayed
return True
self.wait.until(f)
self.login()
self.default_id = 'id_city_text'
self.selenium.get('%s%s' % (self.live_server_url, '/admin/fk_autocomplete/address/add'))
wait_for_selector("[data-widget-ready]")
self.input_element().send_keys('par')
wait_for_selector(".yourlabs-autocomplete", True)
self.assertEqual(20, len(self.autocomplete_choice_elements()))
# select paris
self.autocomplete_choice_elements()[1].click()
wait_for_selector(".yourlabs-autocomplete", False)
wait_for_selector("#%s" % self.default_id, False)
selected = self.widget_choice_elements()
self.assertEqual(1, len(selected))
self.assertTrue('Paris' in selected[0].text)
self.assertTrue('France' in selected[0].text)
self.save()
wait_for_selector('#changelist')
element = self.selenium.find_element_by_css_selector(
'#changelist tr.row1 a').click()
wait_for_selector("[data-widget-ready]")
# remove
selected = self.widget_choice_elements()
selected[0].find_element_by_css_selector('.remove').click()
self.assertFalse(self.autocomplete_visible())
self.assertTrue(self.input_visible())
self.input_element().send_keys('par')
wait_for_selector('.yourlabs-autocomplete', True)
self.keyboard_test()
self.assertEqual('',
self.input_element().get_attribute('value'))
def keyboard_test(self):
tests = (
{
'key': Keys.ARROW_DOWN,
'expected': 1,
},
{
'key': Keys.ARROW_DOWN,
'expected': 2,
},
{
'key': Keys.ARROW_UP,
'expected': 1,
},
{
'key': Keys.ARROW_UP,
'expected': 0,
},
{
'key': Keys.ARROW_UP,
'expected': -1,
},
{
'key': Keys.ARROW_UP,
'expected': -2,
},
)
for test in tests:
self.input_element().send_keys(test['key'])
self.assertEqual(
self.autocomplete_choice_elements()[test['expected']].id,
self.autocomplete_hilighted_choice_element().id
)
self.input_element().send_keys(Keys.TAB)
self.assertEqual(
self.autocomplete_choice_elements()[test['expected']].get_attribute('data-value'),
self.widget_select_element().get_attribute('value'),
)
| 33.592391 | 96 | 0.624656 | 709 | 6,181 | 5.222849 | 0.22567 | 0.045369 | 0.049149 | 0.068053 | 0.446125 | 0.356198 | 0.331353 | 0.301107 | 0.233864 | 0.195517 | 0 | 0.003721 | 0.260799 | 6,181 | 183 | 97 | 33.775956 | 0.806741 | 0.003074 | 0 | 0.205882 | 0 | 0 | 0.105218 | 0.028217 | 0 | 0 | 0 | 0 | 0.066176 | 1 | 0.125 | false | 0.014706 | 0.058824 | 0.036765 | 0.294118 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f86a87da2fd43c9dba792912da48b3e0f5e52ac | 1,940 | py | Python | PyEFVLib/MeshioSaver.py | Gustavo029/GridReader | 7edc950c469b06c3de0093e5fd8bf6cfd59af354 | [
"MIT"
] | 1 | 2022-01-26T17:14:54.000Z | 2022-01-26T17:14:54.000Z | PyEFVLib/MeshioSaver.py | Gustavo029/GridReader | 7edc950c469b06c3de0093e5fd8bf6cfd59af354 | [
"MIT"
] | null | null | null | PyEFVLib/MeshioSaver.py | Gustavo029/GridReader | 7edc950c469b06c3de0093e5fd8bf6cfd59af354 | [
"MIT"
] | 3 | 2020-10-26T07:11:19.000Z | 2022-01-26T17:14:42.000Z | import numpy as np
import subprocess, os, sys
import meshio
from PyEFVLib.Saver import Saver
from PyEFVLib.Shape import Triangle, Quadrilateral, Tetrahedron, Hexahedron, Prism, Pyramid
class MeshioSaver(Saver):
# line triangle quad tetra pyramid wedge hexahedron
# Formats:
# msh mdpa ply stl vtk vtu xdmf xmf h5m med inp mesh meshb bdf fem nas obj off post post.gz dato dato.gz su2 svg dat tec ugrid wkt
def __init__(self, grid, outputPath, basePath, extension, fileName="Results", **kwargs):
Saver.__init__(self, grid, outputPath, basePath, extension, fileName)
if not os.path.exists(outputPath):
os.makedirs( outputPath )
def finalize(self):
self.points = np.array( [v.getCoordinates() for v in self.grid.vertices] )
meshioShapes = ["triangle", "quad", "tetra", "pyramid", "wedge", "hexahedron"]
pyEFVLibShapes = [Triangle, Quadrilateral, Tetrahedron, Pyramid, Prism, Hexahedron]
self.cells = [ ( shape , np.array([[vertex.handle for vertex in element.vertices] for element in self.grid.elements if element.shape == shapeClass], dtype=np.uint64) ) for shape, shapeClass in zip(meshioShapes, pyEFVLibShapes) ]
self.cells = [ cell for cell in self.cells if cell[1].size ]
# Two separate writers because the only that supports time series data is xdmf
if self.extension == "xdmf":
self.xdmfWrite()
else:
self.meshioWrite()
self.finalized = True
def meshioWrite(self):
data = { fieldName : self.fields[fieldName][-1] for fieldName in self.fields }
meshioMesh = meshio.Mesh( self.points, self.cells, point_data=data )
meshioMesh.write( self.outputPath )
def xdmfWrite(self):
with meshio.xdmf.TimeSeriesWriter(self.outputPath) as writer:
writer.write_points_cells(self.points, self.cells)
for idx, timeStep, in enumerate(self.timeSteps):
data = { fieldName : self.fields[fieldName][idx] for fieldName in self.fields }
writer.write_data(timeStep, point_data=data) | 44.090909 | 230 | 0.737629 | 263 | 1,940 | 5.391635 | 0.448669 | 0.021157 | 0.045134 | 0.03385 | 0.200282 | 0.121298 | 0.066291 | 0 | 0 | 0 | 0 | 0.003665 | 0.156186 | 1,940 | 44 | 231 | 44.090909 | 0.862553 | 0.136598 | 0 | 0 | 0 | 0 | 0.029922 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0 | 0.16129 | 0 | 0.322581 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f86f7e6704e2df15b1c9a3cc8842308d00b9cce | 7,971 | py | Python | tests/test_ogrconfig.py | lindauju/qgis-interlis-plugin | 6a64355b582f707b852ba6945f5061ad8f3497ba | [
"MIT"
] | null | null | null | tests/test_ogrconfig.py | lindauju/qgis-interlis-plugin | 6a64355b582f707b852ba6945f5061ad8f3497ba | [
"MIT"
] | 6 | 2018-11-12T13:42:23.000Z | 2022-02-15T08:56:06.000Z | tests/test_ogrconfig.py | lindauju/qgis-interlis-plugin | 6a64355b582f707b852ba6945f5061ad8f3497ba | [
"MIT"
] | 4 | 2018-03-31T17:19:26.000Z | 2019-02-12T19:10:07.000Z | from ogrtools.ogrtransform.ogrconfig import OgrConfig
from ogrtools.interlis.ilismeta import prettify
import json
def test_shape_config():
cfg = OgrConfig(ds="tests/data/osm/railway.shp")
cfgjson = cfg.generate_config(dst_format='PostgreSQL')
expected = """{
"//": "OGR transformation configuration",
"src_format": "ESRI Shapefile",
"dst_format": "PostgreSQL",
"dst_dsco": {},
"dst_lco": {
"SCHEMA": "public"
},
"layers": {
"railway": {
"src_layer": "railway",
"fields": {
"type": {
"src": "type",
"type": "String",
"width": 255
},
"osm_id": {
"src": "osm_id",
"type": "Integer64",
"width": 11
},
"lastchange": {
"src": "lastchange",
"type": "Date",
"width": 10
},
"name": {
"src": "name",
"type": "String",
"width": 255
},
"keyvalue": {
"src": "keyvalue",
"type": "String",
"width": 80
}
},
"geom_fields": {},
"geometry_type": "LineString"
}
}
}"""
print(cfgjson)
assert sorted(expected) == sorted(cfgjson)
def test_ili_config():
cfg = OgrConfig(
ds="./tests/data/ili/roads23.xtf,./tests/data/ili/RoadsExdm2ien.imd")
cfgjson = cfg.generate_config(dst_format='PostgreSQL', srs=21781)
expected = {
'src_layer': 'RoadsExdm2ien.RoadsExtended.StreetAxis',
'geom_fields': {'geometry': {'src': 'Geometry',
'type': 'MultiLineString',
'srs': 21781}},
'fields': {'precision': {'src': 'Precision',
'type': 'String'},
'tid': {'src': 'TID',
'type': 'String'},
'street': {'src': 'Street',
'type': 'String'}},
'geometry_type': 'MultiLineString'
}
print(cfgjson)
assert json.loads(cfgjson)["layers"]["roadsexdm2ien_roadsextended_streetaxis"] == expected
def test_np():
cfg = OgrConfig(ds="tests/data/np/NP_Example.xtf,tests/data/np/NP_73_CH_de_ili2.imd",
model="tests/data/np/NP_73_CH_de_ili2.imd")
cfgjson = cfg.generate_config(dst_format='PostgreSQL')
expected = {
'src_layer': 'Nutzungsplanung.Nutzungsplanung.Grundnutzung_Zonenflaeche',
'fields': {'herkunft': {'src': 'Herkunft',
'type': 'String'},
'zonentyp_1': {'src': 'Zonentyp_1',
'type': 'String'},
'tid': {'src': 'TID',
'type': 'String'},
'bemerkungen': {'src': 'Bemerkungen',
'type': 'String'},
'mutation': {'src': 'Mutation',
'type': 'String'},
'qualitaet': {'src': 'Qualitaet',
'type': 'String'}},
'geom_fields': {'geometrie': {'src': 'Geometrie',
'type': 'Polygon'}},
'geometry_type': 'Polygon'
}
print(cfgjson)
assert json.loads(cfgjson)["layers"]["n0_grundnutzung_zonenflaeche'"] == expected
def test_layer_info():
cfg = OgrConfig(ds="./tests/data/ili/roads23.xtf,./tests/data/ili/RoadsExdm2ien.imd",
model="./tests/data/ili/RoadsExdm2ien.imd")
assert not cfg.is_loaded()
assert cfg.layer_names() == []
assert cfg.enum_names() == []
assert cfg.layer_infos() == []
assert cfg.enum_infos() == []
cfg.generate_config(dst_format='PostgreSQL')
assert cfg.is_loaded()
print(cfg.layer_names())
assert "roadsexdm2ien_roadsextended_roadsign" in cfg.layer_names()
print(cfg.enum_names())
assert "_type" in str(cfg.enum_names())
print(cfg.layer_infos())
print(cfg.enum_infos())
assert {'name': 'roadsexdm2ien_roadsextended_roadsign',
'geom_field': 'position'} in cfg.layer_infos()
assert {'name': 'roadsexdm2ben_roads_lattrs'} in cfg.layer_infos()
assert '_precision' in str(cfg.enum_infos())
def test_enums():
cfg = OgrConfig(ds="./tests/data/ili/roads23.xtf,./tests/data/ili/RoadsExdm2ien.imd",
model="./tests/data/ili/RoadsExdm2ien.imd")
cfgjson = cfg.generate_config(dst_format='PostgreSQL')
expected = {
'values': [{'id': 0,
'enumtxt': 'welldefined',
'enum': 'welldefined'},
{'id': 1, 'enumtxt': 'fuzzy',
'enum': 'fuzzy'}],
'src_name': 'RoadsExdm2ben.Roads.LAttrs.LArt'
}
assert expected in json.loads(cfgjson)["enums"].values()
def test_vrt():
cfg = OgrConfig(ds="./tests/data/ili/roads23.xtf,./tests/data/ili/RoadsExdm2ien.imd",
config="./tests/data/ili/RoadsExdm2ien.cfg")
vrt = prettify(cfg.generate_vrt())
expected_fields = ['''<Field name="type" src="Type" type="String"/>''',
'''<Field name="tid" src="TID" type="String"/>''']
print(vrt)
assert '''<SrcLayer>RoadsExdm2ien.RoadsExtended.RoadSign</SrcLayer>''' in vrt
assert '''<GeometryType>wkbPoint</GeometryType>''' in vrt
for field in expected_fields:
assert field in vrt
def test_reverse_vrt():
cfg = OgrConfig(ds="./tests/data/ili/roads23.xtf,./tests/data/ili/RoadsExdm2ien.imd",
config="./tests/data/ili/RoadsExdm2ien.cfg")
vrt = prettify(cfg.generate_reverse_vrt())
expected_fields = ['''<Field name="Type" src="type"/>''',
'''<Field name="TID" src="tid"/>''']
print(vrt)
assert '''<SrcLayer>roadsign</SrcLayer>''' in vrt
assert '''<GeometryType>wkbPoint</GeometryType>''' in vrt
for field in expected_fields:
assert field in vrt
def test_multigeom_vrt():
cfg = OgrConfig(ds="./tests/data/ch.bazl/ch.bazl.sicherheitszonenplan.oereb_20131118.xtf,./tests/data/ch.bazl/ch.bazl.sicherheitszonenplan.oereb_20131118.imd",
config="./tests/data/ch.bazl/ch.bazl.sicherheitszonenplan.oereb_20131118.cfg")
vrt = prettify(cfg.generate_vrt())
expected_fields = ['''<Field name="zustaendigestelle" src="ZustaendigeStelle" type="String"/>''',
'''<Field name="eigentumsbeschraenkung" src="Eigentumsbeschraenkung" type="String"/>''',
'''<Field name="rechtsstatus" src="Rechtsstatus" type="String"/>''',
'''<Field name="tid" src="TID" type="String"/>''',
'''<Field name="publiziertab" src="publiziertAb" type="String"/>''',
'''<Field name="metadatengeobasisdaten" src="MetadatenGeobasisdaten" type="String"/>''']
expected_geom_fields = ['''<GeometryField field="Linie" name="linie">''',
'''<GeometryType>wkbMultiLineString</GeometryType>''',
'''<SRS>EPSG:21781</SRS>''',
'''</GeometryField>''',
'''<GeometryField field="Punkt" name="punkt">''',
'''<GeometryType>wkbPoint</GeometryType>''',
'''<SRS>EPSG:21781</SRS>''',
'''</GeometryField>''',
'''<GeometryField field="Flaeche" name="flaeche">''',
'''<GeometryType>wkbPolygon</GeometryType>''',
'''<SRS>EPSG:21781</SRS>''',
'''</GeometryField>''']
print(vrt)
assert '''<OGRVRTLayer name="oerebkrm09trsfr_transferstruktur_geometrie">''' in vrt
assert '''<SrcLayer>OeREBKRM09trsfr.Transferstruktur.Geometrie</SrcLayer>''' in vrt
for field in expected_fields:
assert field in vrt
for field in expected_geom_fields:
assert field in vrt
| 37.074419 | 163 | 0.537448 | 743 | 7,971 | 5.633917 | 0.187079 | 0.045151 | 0.040134 | 0.053751 | 0.455805 | 0.425705 | 0.390827 | 0.35786 | 0.306498 | 0.223125 | 0 | 0.018495 | 0.294568 | 7,971 | 214 | 164 | 37.247664 | 0.725947 | 0 | 0 | 0.264045 | 0 | 0.005618 | 0.423691 | 0.194623 | 0 | 0 | 0 | 0 | 0.140449 | 1 | 0.044944 | false | 0 | 0.016854 | 0 | 0.061798 | 0.05618 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f87c3093954525338b55cec5a0b7f44a4b22f55 | 2,001 | py | Python | predict_review_rating.py | bcarlier75/Text_classification_of_review | 3cdeb2ce460f71e58b5da03daf2a2230782ef5b6 | [
"MIT"
] | 1 | 2019-12-27T10:45:46.000Z | 2019-12-27T10:45:46.000Z | predict_review_rating.py | bcarlier75/Text_classification_of_review | 3cdeb2ce460f71e58b5da03daf2a2230782ef5b6 | [
"MIT"
] | null | null | null | predict_review_rating.py | bcarlier75/Text_classification_of_review | 3cdeb2ce460f71e58b5da03daf2a2230782ef5b6 | [
"MIT"
] | null | null | null | import fasttext
import sys
from pathlib import Path
import model_create
import model_utils
def predict_rating(review: str, model_path: str):
"""Predict the rating of the review and print it."""
# Pre-process the review so it matches the training format
preprocessed_review = model_utils.strip_formatting(review)
# Load the model
classifier = fasttext.load_model(model_path)
# Get fasttext to classify each review with the model
label, probability = classifier.predict(preprocessed_review, 1)
# Print the results
stars = int(label[0][9:-2])
print("{} ({}% confidence)".format("✰" * stars, int(probability * 100)))
print(f"{review}")
print()
return
def main():
# Set paths and variables
dataset_folder = Path("../dataset")
reviews_path = dataset_folder / "review.json"
training_data_path = dataset_folder / "fasttext_dataset_training.txt"
test_data_path = dataset_folder / "fasttext_dataset_test.txt"
model_path = "../models/reviews_model_ngrams2.bin"
percent_test_data = 0.10
# Split dataset into test and training set according to percent_test_data
if model_utils.test_file_validity(model_path) == 0:
if model_utils.test_file_validity(training_data_path) == 0 \
or model_utils.test_file_validity(test_data_path) == 0:
print("--- Start the splitting of the dataset, this may take a while ---")
model_create.split_data(reviews_path, training_data_path, test_data_path, percent_test_data)
print("--- Done ---")
# Create model and display model evaluation metrics on test_data
model_create.process_training(training_data_path, test_data_path, model_path)
# Predict the rating of the review
if len(sys.argv) == 2:
predict_rating(sys.argv[1], model_path)
else:
print('Usage: python predit_review_rating.py "My review on a restaurant, bar, hotel ..."')
return
if __name__ == "__main__":
main()
| 36.381818 | 104 | 0.697151 | 271 | 2,001 | 4.881919 | 0.354244 | 0.048375 | 0.048375 | 0.040816 | 0.199546 | 0.179894 | 0 | 0 | 0 | 0 | 0 | 0.010069 | 0.205897 | 2,001 | 54 | 105 | 37.055556 | 0.821901 | 0.190405 | 0 | 0.057143 | 0 | 0 | 0.189172 | 0.069695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.142857 | 0 | 0.257143 | 0.171429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f887fdc0aab3d423faaf6e000d71a7c07f75d2c | 3,853 | py | Python | lte/gateway/python/magma/pipelined/bridge_util.py | sarathbrp/magma | 2641929b1adbaaabf344b8f0e23fc442afcdae4a | [
"BSD-3-Clause"
] | null | null | null | lte/gateway/python/magma/pipelined/bridge_util.py | sarathbrp/magma | 2641929b1adbaaabf344b8f0e23fc442afcdae4a | [
"BSD-3-Clause"
] | null | null | null | lte/gateway/python/magma/pipelined/bridge_util.py | sarathbrp/magma | 2641929b1adbaaabf344b8f0e23fc442afcdae4a | [
"BSD-3-Clause"
] | null | null | null | """
Copyright (c) 2018-present, Facebook, Inc.
All rights reserved.
This source code is licensed under the BSD-style license found in the
LICENSE file in the root directory of this source tree. An additional grant
of patent rights can be found in the PATENTS file in the same directory.
"""
import subprocess
class DatapathLookupError(Exception):
pass
class BridgeTools:
"""
BridgeTools
Use ovs-vsctl commands to get bridge info and setup bridges for testing.
"""
@staticmethod
def get_datapath_id(bridge_name):
"""
Gets the datapath_id by bridge_name
Hacky, call vsctl, decode output to str, strip '\n', remove '' around
the output, convert to int.
This gives the integer datapath_id that we want to run apps on, this is
needed when 2 bridges are setup, gtp_br0(main bridge) and testing_br)
"""
try:
output = subprocess.check_output(["ovs-vsctl", "get", "bridge",
bridge_name, "datapath_id"])
output_str = str(output, 'utf-8').strip()[1:-1]
output_hex = int(output_str, 16)
except subprocess.CalledProcessError as e:
raise DatapathLookupError(
'Error: ovs-vsctl bridge({}) datapath id lookup: {}'.format(
bridge_name, e
)
)
return output_hex
@staticmethod
def create_bridge(bridge_name, iface_name):
"""
Creates a simple bridge, sets up an interface.
Used when running unit tests
"""
subprocess.Popen(["ovs-vsctl", "add-br", bridge_name]).wait()
subprocess.Popen(["ovs-vsctl", "set", "bridge", bridge_name,
"protocols=OpenFlow10,OpenFlow13,OpenFlow14",
"other-config:disable-in-band=true"]).wait()
subprocess.Popen(["ovs-vsctl", "set-controller", bridge_name,
"tcp:127.0.0.1:6633", "tcp:127.0.0.1:6654"]).wait()
subprocess.Popen(["ifconfig", iface_name, "192.168.1.1/24"]).wait()
@staticmethod
def destroy_bridge(bridge_name):
"""
Removes the bridge.
Used when unit test finishes
"""
subprocess.Popen(["ovs-vsctl", "del-br", bridge_name]).wait()
@staticmethod
def get_controllers_for_bridge(bridge_name):
curr_controllers = subprocess.check_output(
["ovs-vsctl", "get-controller", bridge_name],
).decode("utf-8").replace(' ', '').split('\n')
return list(filter(None, curr_controllers))
@staticmethod
def add_controller_to_bridge(bridge_name, port_num):
curr_controllers = BridgeTools.get_controllers_for_bridge(bridge_name)
ctlr_ip = "tcp:127.0.0.1:{}".format(port_num)
if ctlr_ip in curr_controllers:
return
curr_controllers.append(ctlr_ip)
BridgeTools.set_controllers_for_bridge(bridge_name, curr_controllers)
@staticmethod
def remove_controller_from_bridge(bridge_name, port_num):
curr_controllers = BridgeTools.get_controllers_for_bridge(bridge_name)
ctlr_ip = 'tcp:127.0.0.1:{}'.format(port_num)
curr_controllers.remove(ctlr_ip)
BridgeTools.set_controllers_for_bridge(bridge_name, curr_controllers)
@staticmethod
def set_controllers_for_bridge(bridge_name, ctlr_list):
set_cmd = ["ovs-vsctl", "set-controller", bridge_name]
set_cmd.extend(ctlr_list)
subprocess.Popen(set_cmd).wait()
@staticmethod
def get_flows_for_bridge(bridge_name, table_num=None):
set_cmd = ["ovs-ofctl", "dump-flows", bridge_name]
if table_num:
set_cmd.append("table=%s" % table_num)
flows = subprocess.check_output(set_cmd).decode('utf-8').split('\n')
return flows
| 36.349057 | 79 | 0.633273 | 476 | 3,853 | 4.92437 | 0.346639 | 0.093857 | 0.088737 | 0.056741 | 0.280717 | 0.273038 | 0.181314 | 0.162116 | 0.162116 | 0.162116 | 0 | 0.021336 | 0.257981 | 3,853 | 105 | 80 | 36.695238 | 0.798531 | 0.212821 | 0 | 0.193548 | 0 | 0 | 0.138917 | 0.025536 | 0 | 0 | 0 | 0 | 0 | 1 | 0.129032 | false | 0.016129 | 0.016129 | 0 | 0.241935 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f89095fa1377b3044343522ec75f6bcdf18de28 | 915 | py | Python | routes/router.py | allku/3xamplePythonFlask | 6545e28e1847fdc39ca21b86d7744aa26a084e67 | [
"MIT"
] | null | null | null | routes/router.py | allku/3xamplePythonFlask | 6545e28e1847fdc39ca21b86d7744aa26a084e67 | [
"MIT"
] | null | null | null | routes/router.py | allku/3xamplePythonFlask | 6545e28e1847fdc39ca21b86d7744aa26a084e67 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
from app import api, app
from controllers.index import *
from controllers.beercontroller import *
from controllers.locationcontroller import *
def define_routers():
api.add_resource(Index, '/')
# GET all beers
api.add_resource(BeersController, '/example/rest/v1/beers')
# GET, PUT and DELETE one beer by id
api.add_resource(BeerControllerById, '/example/rest/v1/beer/<int:id>')
# POST beer (Create beer or new beer)
api.add_resource(BeerController, '/example/rest/v1/beer')
# GET one location by id
api.add_resource(LocationControllerById, '/example/rest/v1/location/<int:id>')
@app.errorhandler(404)
def invalid_route(e):
"""
Define custom 404 in json when not exist route
"""
app.logger.error('Route not found')
return jsonify({
'message': 'Route not found ' + str(e)
}), 404
| 30.5 | 82 | 0.655738 | 117 | 915 | 5.068376 | 0.478632 | 0.05059 | 0.118044 | 0.033727 | 0.060708 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019635 | 0.220765 | 915 | 29 | 83 | 31.551724 | 0.812062 | 0.193443 | 0 | 0 | 0 | 0 | 0.206799 | 0.151558 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.25 | 0 | 0.4375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f895e0c35d3b78e4be8e04d5e58725e715584b7 | 850 | py | Python | src/datacamp_downloader/constants.py | pierolenzo/datacamp-downloader | dd15721f1a715cc1539cb1e23fc0fc36fd775950 | [
"MIT"
] | 107 | 2020-04-01T13:44:52.000Z | 2022-03-04T02:00:10.000Z | src/datacamp_downloader/constants.py | pierolenzo/datacamp-downloader | dd15721f1a715cc1539cb1e23fc0fc36fd775950 | [
"MIT"
] | 36 | 2020-04-04T23:57:12.000Z | 2022-03-24T17:02:13.000Z | src/datacamp_downloader/constants.py | pierolenzo/datacamp-downloader | dd15721f1a715cc1539cb1e23fc0fc36fd775950 | [
"MIT"
] | 40 | 2020-04-14T16:54:00.000Z | 2022-03-24T03:10:28.000Z | import tempfile
HOME_PAGE = "https://www.datacamp.com/"
LOGIN_URL = "https://www.datacamp.com/users/sign_in"
LOGIN_DETAILS_URL = "https://www.datacamp.com/api/users/signed_in"
SESSION_FILE = tempfile.gettempdir() + "/.datacamp.v3"
PROFILE_URL = "https://www.datacamp.com/profile/{slug}"
COURSE_DETAILS_API = "https://campus-api.datacamp.com/api/courses/{id}/"
EXERCISE_DETAILS_API = "https://campus-api.datacamp.com/api/exercise/{id}"
VIDEO_DETAILS_API = "https://projector.datacamp.com/api/videos/{hash}"
PROGRESS_API = "https://campus-api.datacamp.com/api/courses/{course_id}/chapters/{chapter_id}/progress"
LANGMAP = {
"en": "English",
"zh": "Chinese simplified",
"fr": "French",
"de": "German",
"it": "Italian",
"ja": "Japanese",
"ko": "Korean",
"pt": "Portuguese",
"ru": "Russian",
"es": "Spanish",
}
| 31.481481 | 103 | 0.675294 | 111 | 850 | 5.018018 | 0.504505 | 0.157989 | 0.125673 | 0.136445 | 0.335727 | 0.217235 | 0.217235 | 0.217235 | 0 | 0 | 0 | 0.001333 | 0.117647 | 850 | 26 | 104 | 32.692308 | 0.741333 | 0 | 0 | 0 | 0 | 0.045455 | 0.58 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045455 | 0 | 0.045455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f8e4bfc6fa9daba951f1f2db7aa9cf4ee3d2395 | 1,621 | py | Python | eosim/config.py | EarthObservationSimulator/eosim-gui | 3067026f5f32be214e9ec2c4461a734ad25bb6a4 | [
"Apache-2.0"
] | null | null | null | eosim/config.py | EarthObservationSimulator/eosim-gui | 3067026f5f32be214e9ec2c4461a734ad25bb6a4 | [
"Apache-2.0"
] | null | null | null | eosim/config.py | EarthObservationSimulator/eosim-gui | 3067026f5f32be214e9ec2c4461a734ad25bb6a4 | [
"Apache-2.0"
] | null | null | null | from tkinter import ttk
import os
from orbitpy.mission import Mission
import logging
logger = logging.getLogger(__name__)
""" A MissionConfig instance to be used to store the mission configuration. This
object is imported and referenced over all the EOSim modules and hence
acts as a sort of global variable for the entire EOSim software.
"""
mission = Mission()
class GuiStyle():
main_win_width = 900
main_win_height = int(main_win_width*9/21) # 21:9 aspect ratio
main_window_geom = str(main_win_width)+"x"+str(main_win_height)
child_window_geom = "500x500"
def __init__(self):
gui_style = ttk.Style()
gui_style.configure('My.TButton', foreground='#334353')
gui_style.configure('messagearea.TFrame', background='white', relief='sunken')
gui_style.configure('lsidebar.TFrame', background='light grey', relief='groove')
gui_style.configure('lsidebar.TButton')
# Create style for the frames used within the help window
gui_style.configure('helpHeading.TFrame', background='#87ceeb')
gui_style.configure('helpHeading.Label', background='#87ceeb', foreground='white', font=('Times New Roman',18,'bold'))
gui_style.configure('helpDescription.TFrame', background='white')
gui_style.configure('helpDescription.Label', background='white', foreground='black', font=('Times New Roman',16))
gui_style.configure('helpMoreHelp.TFrame', background='light grey')
gui_style.configure('helpMoreHelp.Label', background='light grey', foreground='dark blue', font=('Times New Roman',12)) | 47.676471 | 127 | 0.710056 | 205 | 1,621 | 5.453659 | 0.478049 | 0.078712 | 0.152057 | 0.045617 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.023117 | 0.172733 | 1,621 | 34 | 127 | 47.676471 | 0.810589 | 0.045034 | 0 | 0 | 0 | 0 | 0.249051 | 0.03265 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.173913 | 0 | 0.434783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f904479779b7dee6c4eadd4ba7b6d194daf6ff9 | 2,591 | py | Python | Module06/Characters.py | biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-soomin-e | 98537e751fb8cf132fc3a63b8f97490b348424ce | [
"MIT"
] | null | null | null | Module06/Characters.py | biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-soomin-e | 98537e751fb8cf132fc3a63b8f97490b348424ce | [
"MIT"
] | null | null | null | Module06/Characters.py | biomed-bioinformatics-bootcamp/bmes-t580-2019-coursework-soomin-e | 98537e751fb8cf132fc3a63b8f97490b348424ce | [
"MIT"
] | null | null | null | import random
class monster(object):
def __init__(self, name, level): # Initializes the Object
self.name = name # Attaches Name to Object
self.level = level # Attaches Level to Object
def print(self):
print('Name:'. self.name, 'Level:', self.level)
def get_attack_roll(self):
die = random.randint(1, 20)
return die * self.level
def get_defensive_roll(self):
die = random.randint(1,20)
return die * self.level
class Wizard(monster):
def attack(self, creature):
print("The wizard %s attacks %s" % (self.name, creature.name))
print()
my_roll = self.get_attack_roll()
creature_roll = creature.get_defensive_roll()
print("You roll %i" % my_roll)
print("%s rolls %i" % (creature.name, creature_roll))
if my_roll >= creature_roll:
print("The wizard has handily triumphed over %s " % creature.name)
print()
return True
else:
print("The wizard has been DEFEATED!!!")
print()
return False
class Ranger(monster):
def attack(self, creature):
print("The ranger %s attacks %s" % (self.name, creature.name))
my_roll = self.get_attack_roll()
creature_roll = creature.get_defensive_roll()
print("You roll %i" % my_roll)
print("%s rolls %i" % (creature.name, creature_roll))
if my_roll >= creature_roll:
print("The ranger has vanquished %s " % creature.name)
return True
else:
print("The ranger is dead!!!")
return False
class smallCreature(monster):
def get_defensive_roll(self):
base_roll = super().get_defensive_roll()
return base_roll / 2
class largeCreature(monster):
def get_defensive_roll(self):
base_roll = super().get_defensive_roll()
return base_roll / 2
class Dragon(monster):
def __init__(self, name, level, scaliness, breaths_fire):
super().__init__(name, level)
self.breaths_fire = breaths_fire
self.scaliness = scaliness
def get_defensive_roll(self):
base_roll = super().get_defensive_roll()
# fire_modifier = None
# if self.breaths_fire:
# fire_modifier = 5
# else:
# fire_modifier = 1
# fire_modifier = VALUE_IF_TRUE if SOME TEST else VALUE IF FALSE
fire_modifier = 5 if self.breaths_fire else 1
scale_modifier = self.scaliness / 10
return base_roll * fire_modifier * scale_modifier | 31.216867 | 78 | 0.608259 | 320 | 2,591 | 4.709375 | 0.196875 | 0.071666 | 0.095554 | 0.050431 | 0.539482 | 0.473789 | 0.473789 | 0.387525 | 0.387525 | 0.387525 | 0 | 0.007642 | 0.292937 | 2,591 | 83 | 79 | 31.216867 | 0.814956 | 0.087611 | 0 | 0.566667 | 0 | 0 | 0.095541 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.016667 | 0 | 0.433333 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f90b635f49d250b59642f823d6a6a2d9361a9cd | 824 | py | Python | setup.py | lvrfrc87/pyATEOS | d864f59fcc9b0b6a07e7ea959f0fd97b1e6bfd3b | [
"Apache-2.0"
] | 5 | 2020-09-15T18:02:49.000Z | 2021-04-02T12:33:15.000Z | setup.py | lvrfrc87/pyATEOS | d864f59fcc9b0b6a07e7ea959f0fd97b1e6bfd3b | [
"Apache-2.0"
] | null | null | null | setup.py | lvrfrc87/pyATEOS | d864f59fcc9b0b6a07e7ea959f0fd97b1e6bfd3b | [
"Apache-2.0"
] | null | null | null | import setuptools
with open("README.md", "r") as fh:
long_description = fh.read()
setuptools.setup(
name="pyateos",
version="1.0.6",
author="Federico Olivieri",
author_email="lvrfrc87@gmail.com",
description="python framework to test operational status of an Arista network",
scripts=['pyateos/pyateos'],
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/lvrfrc87/pyATEOS",
packages=setuptools.find_packages(),
install_requires=[
'jsondiff>=1.2.0',
'pyeapi>=0.8.3',
'jmespath>=0.9.5',
],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires='>=3.6',
) | 29.428571 | 83 | 0.637136 | 95 | 824 | 5.421053 | 0.705263 | 0.116505 | 0.073786 | 0.116505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029276 | 0.212379 | 824 | 28 | 84 | 29.428571 | 0.764253 | 0 | 0 | 0.076923 | 0 | 0 | 0.410909 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.038462 | 0 | 0.038462 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0f93c4b5b039a5778fb959a05adf4af94103417a | 1,308 | py | Python | non_regression/test_objdump.py | LRGH/plasmasm | 4cd50546c3dc895763d72dd60b7c46179c1916bc | [
"Apache-2.0"
] | 1 | 2021-02-28T21:31:18.000Z | 2021-02-28T21:31:18.000Z | non_regression/test_objdump.py | LRGH/plasmasm | 4cd50546c3dc895763d72dd60b7c46179c1916bc | [
"Apache-2.0"
] | null | null | null | non_regression/test_objdump.py | LRGH/plasmasm | 4cd50546c3dc895763d72dd60b7c46179c1916bc | [
"Apache-2.0"
] | null | null | null | # Copyright (C) 2011-2020 Airbus, Louis.Granboulan@airbus.com
try:
import pytest
except:
from run_tests import pytest
import sys, os.path
basedir = os.path.dirname(os.path.dirname(__file__))
sys.path.append(basedir)
from plasmasm.analyze_file import File
# To be able to import elfesteem in the parent directory, with python3
sys.path.append(os.path.dirname(basedir)+'/elfesteem')
all_tests = [
("basic_x86_linux.o", "dump",{"cpu":"/MIASM"}),
("other_x86_linux.o", "dump",{"cpu":"/MIASM"}),
("other_x86_linux_2.o", "dump",{"cpu":"/MIASM"}),
("other_x64_linux_5.o", "dump",{}),
]
if sys.version_info[0] == 2 and sys.version_info[1] <= 6:
# Cannot use amoco, no OrderedDict
all_tests = [ (f,s,k) for (f,s,k) in all_tests
if not '_x64_' in f
and not '_sparc' in f
and k.get("cpu",None) != "/AMOCO" ]
def test_io(file, suffix, kargs):
fd = open("non_regression/"+file,"rb")
raw = fd.read()
fd.close()
fd = open("non_regression/"+file+"."+suffix,"r")
res = fd.read()
fd.close()
pool = File().from_raw(raw, rw=True, **kargs)
assert pool.to_objdump(filename="non_regression/"+file) + "\n" == res
test_io = pytest.mark.parametrize("file, suffix, kargs", all_tests)(test_io)
| 35.351351 | 76 | 0.622324 | 194 | 1,308 | 4.025773 | 0.463918 | 0.03073 | 0.049936 | 0.049936 | 0.158771 | 0.076825 | 0.076825 | 0.076825 | 0.076825 | 0 | 0 | 0.023969 | 0.202599 | 1,308 | 36 | 77 | 36.333333 | 0.724832 | 0.123089 | 0 | 0.066667 | 0 | 0 | 0.188101 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.033333 | false | 0 | 0.133333 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e13d4beda5263eac81980513fb1532f623c6f2a | 4,255 | py | Python | experiments/test_functions/push_robot_14d.py | aryandeshwal/HyBO | 3298db751b4d6e9a445d104de5c634e4af4a8dfe | [
"BSD-2-Clause-FreeBSD"
] | 13 | 2021-06-15T08:59:18.000Z | 2022-03-24T04:06:47.000Z | experiments/test_functions/push_robot_14d.py | aryandeshwal/HyBO | 3298db751b4d6e9a445d104de5c634e4af4a8dfe | [
"BSD-2-Clause-FreeBSD"
] | 2 | 2022-02-10T06:30:35.000Z | 2022-02-22T10:13:17.000Z | experiments/test_functions/push_robot_14d.py | aryandeshwal/HyBO | 3298db751b4d6e9a445d104de5c634e4af4a8dfe | [
"BSD-2-Clause-FreeBSD"
] | 1 | 2022-03-24T04:06:49.000Z | 2022-03-24T04:06:49.000Z | from __future__ import division, print_function
import numpy as np
import torch
from experiments.test_functions.robot_push_14d import push_function
from hyperopt import hp
class Problem(object):
def __init__(self, dimension, lower_bounds, upper_bounds):
self.dimension = dimension
self.lower_bounds = lower_bounds
self.upper_bounds = upper_bounds
pass
def __call__(self, x):
argv = []
argv.append(x[0]-5)
argv.append(x[1]-5)
argv.append(x[4]-10)
argv.append(x[5]-10)
argv.append(x[8]+2)
argv.append(((x[10]+1)*2*np.pi)/2)
argv.append(x[2]-5)
argv.append(x[3]-5)
argv.append(x[6]-10)
argv.append(x[7]-10)
argv.append(x[9]+2)
argv.append(((x[11]+1)*2*np.pi)/2)
argv.append(((x[12]+1)*10)/2)
argv.append(((x[13]+1)*10)/2)
f = push_function.PushReward()
reward = f(argv)
print(f"reward: {reward}")
return -1*reward
class Push_robot_14d(object):
def __init__(self, random_seed=None, problem_id=None):
self.random_seed = random_seed
self.problem_id = problem_id
self.lower_bounds = []
self.upper_bounds = []
# discrete variables
for i in range(4):
self.lower_bounds.append(0)
self.upper_bounds.append(10)
for i in range(4):
self.lower_bounds.append(0)
self.upper_bounds.append(20)
for i in range(2):
self.lower_bounds.append(0)
self.upper_bounds.append(28)
for i in range(4):
self.lower_bounds.append(-1)
self.upper_bounds.append(1)
assert len(self.lower_bounds) == 14
assert len(self.upper_bounds) == 14
#print(lower_bounds)
#print(upper_bounds)
self.num_discrete = 10
self.num_continuous = 4
self.problem = Problem(dimension=14, lower_bounds=self.lower_bounds, upper_bounds=self.upper_bounds)
self.problem.dimension = 14
print(f"num_discrete: {self.num_discrete}, num_continuous: {self.num_continuous}")
self.n_vertices = []
for i in range(self.num_discrete):
self.n_vertices.append(self.upper_bounds[i] - self.lower_bounds[i] + 1)
self.n_vertices = np.array(self.n_vertices)
self.suggested_init = self.generate_random_points(n_points=10, random_seed=random_seed).float()
self.adjacency_mat = []
self.fourier_freq = []
self.fourier_basis = []
self.random_seed_info = str(random_seed).zfill(4)
for i in range(len(self.n_vertices)):
n_v = self.n_vertices[i]
#print(n_v)
adjmat = torch.diag(torch.ones(n_v - 1), -1) + torch.diag(torch.ones(n_v - 1), 1)
self.adjacency_mat.append(adjmat)
laplacian = torch.diag(torch.sum(adjmat, dim=0)) - adjmat
eigval, eigvec = torch.symeig(laplacian, eigenvectors=True)
self.fourier_freq.append(eigval)
self.fourier_basis.append(eigvec)
def evaluate(self, x_unorder):
if x_unorder.dim() == 2:
x_unorder = x_unorder.squeeze(0)
x= x_unorder.numpy().copy()
print(f"evaluating {x}....")
evaluation = self.problem(x)
print(evaluation)
return torch.tensor(evaluation).float()
def sample_points(self, n_points, random_seed=None):
if random_seed is not None:
rng_state = torch.get_rng_state()
torch.manual_seed(random_seed)
init_points = []
for _ in range(n_points):
random_point = []
for i in range(self.num_discrete):
random_point.append(torch.randint(self.lower_bounds[i], self.upper_bounds[i], (1,)))
for i in range(self.num_discrete, self.num_discrete + self.num_continuous):
random_point.append(torch.FloatTensor(1).uniform_(-1, 1))
init_points.append(random_point)
return torch.tensor(init_points).float()
def generate_random_points(self, n_points, random_seed=None):
return self.sample_points(n_points, random_seed=self.random_seed if random_seed is None else random_seed).float()
| 39.036697 | 121 | 0.614806 | 585 | 4,255 | 4.252991 | 0.198291 | 0.060289 | 0.061897 | 0.03537 | 0.225884 | 0.166399 | 0.166399 | 0.131029 | 0.074759 | 0.044212 | 0 | 0.029384 | 0.26416 | 4,255 | 108 | 122 | 39.398148 | 0.765251 | 0.015746 | 0 | 0.083333 | 0 | 0 | 0.025353 | 0.005023 | 0 | 0 | 0 | 0 | 0.020833 | 1 | 0.0625 | false | 0.010417 | 0.052083 | 0.010417 | 0.177083 | 0.052083 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e1452310f56a637561fb5a3cec41a3d7ecfa10d | 2,894 | py | Python | packages/mccomponents/python/mccomponents/sample/phonon/read_dos.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 5 | 2017-01-16T03:59:47.000Z | 2020-06-23T02:54:19.000Z | packages/mccomponents/python/mccomponents/sample/phonon/read_dos.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 293 | 2015-10-29T17:45:52.000Z | 2022-01-07T16:31:09.000Z | packages/mccomponents/python/mccomponents/sample/phonon/read_dos.py | mcvine/mcvine | 42232534b0c6af729628009bed165cd7d833789d | [
"BSD-3-Clause"
] | 1 | 2019-05-25T00:53:31.000Z | 2019-05-25T00:53:31.000Z | #!/usr/bin/env python
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# Jiao Lin
# California Institute of Technology
# (C) 2007-2013 All Rights Reserved
#
# {LicenseText}
#
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
#
# functions to create dos histogram from files
def doshist_fromidf(datapath):
"read dos histogram from a idf data file"
from mccomponents.sample.idf import readDOS
e,Z = readDOS(datapath)
from histogram import histogram
return histogram( 'dos', [ ('energy', e, 'meV') ], data = Z )
def doshist_fromascii(datapath, x_unit=None):
"read dos histogram from an ascii data file"
import warnings, numpy as np
# read data
lines = open(datapath)
data = []; comments = []
for line in lines:
line = line.strip()
if not line: continue
if line[0] == '#':
comments.append(line[1:])
continue
tokens = line.split()
try:
numbers = list(map(float, tokens))
except Exception as e:
msg = 'Skip line %s' % line
warnings.warn(msg)
continue
data.append(numbers)
continue
# treat data
data = np.array(data).T
E,I = data[:2]
if len(data)>2:
errorsq = data[2] **2
else:
errorsq = None
# try to get unit information from comments
supported_units = ['meV', 'TeraHz']
if comments:
found = False
for c in comments:
tokens = c.strip().split()
desc = tokens[0] # description of x axis
for u in supported_units:
if desc.find(u) != -1:
x_unit = u
found = True
break
continue
if found: break
continue
# unit conversion
if x_unit == 'meV': pass
elif x_unit == 'TeraHz':
from .units import hertz2mev
from math import pi
E *= 2*pi*1e12 * hertz2mev
else:
raise NotImplementedError("energy unit: %s" % x_unit)
from histogram import histogram
axes = [('energy', E, 'meV')]
return histogram( 'dos', axes, data = I, errors = errorsq)
# functions to create dos data objects from data files
def dos_fromidf(datapath):
doshist = doshist_fromidf(datapath)
return dos_fromdoshist(doshist)
def dos_fromh5(datapath):
import histogram.hdf as hh
dos = hh.load(datapath)
return dos_fromdoshist(dos)
def dos_fromascii(datapath, **kwds):
dh = doshist_fromascii(datapath, **kwds)
return dos_fromdoshist(dh)
# helpers
def dos_fromdoshist(h):
from .LinearlyInterpolatedDOS import LinearlyInterpolatedDOS as f
return f(h)
# version
__id__ = "$Id$"
# End of file
| 26.550459 | 80 | 0.540774 | 323 | 2,894 | 4.77709 | 0.390093 | 0.016202 | 0.031108 | 0.025924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01161 | 0.31548 | 2,894 | 108 | 81 | 26.796296 | 0.767289 | 0.227367 | 0 | 0.128571 | 0 | 0 | 0.067509 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.085714 | false | 0.014286 | 0.114286 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e175bedc0b29ff4199b2a7943acaf83b296aa60 | 10,589 | py | Python | src/Stare.py | ZamfirescuStefan/Bomber-man | e71ee7c0a2db0dc600585eab4da630aee80e0536 | [
"MIT"
] | null | null | null | src/Stare.py | ZamfirescuStefan/Bomber-man | e71ee7c0a2db0dc600585eab4da630aee80e0536 | [
"MIT"
] | null | null | null | src/Stare.py | ZamfirescuStefan/Bomber-man | e71ee7c0a2db0dc600585eab4da630aee80e0536 | [
"MIT"
] | null | null | null | import copy
from UserInterface import Joc
class Player:
def __init__(self, sign, k, pos):
self.sign = sign
self.bomb_auto_placing = k
self.list_of_bombs = []
self.nums_of_shields = 0
self.inactive_bomb = None # it can be a single inactive bomb
self.lost = False
self.pos = pos
def __str__(self):
print(self.list_of_bombs)
return f"The sign use for player {self.sign}, number of shields {self.nums_of_shields}, placing bomb time {self.bomb_auto_placing} "
def bomb_explode(stare, line_bomb, column_bomb, time=0): # time = 0 means now, time = 1 means next step will explode
up = [line_bomb - 1, column_bomb]
cell_explode = []
while stare.matr[up[0]][up[1]] != Joc.WALL:
if up[0] > 0:
cell_explode.append((up[0], up[1]))
up[0] -= 1
else:
break
down = [line_bomb + 1, column_bomb]
while stare.matr[down[0]][down[1]] != Joc.WALL:
if down[0] < len(stare.matr) - 1:
cell_explode.append((down[0], down[1]))
down[0] += 1
else:
break
right = [line_bomb, column_bomb + 1]
while stare.matr[right[0]][right[1]] != Joc.WALL:
if right[1] < len(stare.matr[-1]) - 1:
cell_explode.append((right[0], right[1]))
right[1] += 1
else:
break
left = [line_bomb, column_bomb - 1]
while stare.matr[left[0]][left[1]] != Joc.WALL:
if left[1] > 0:
cell_explode.append((left[0], left[1]))
left[1] -= 1
else:
break
set2 = set(stare.end_zone).union(set(cell_explode))
stare.end_zone = list(set2)
class Stare:
"""
Clasa folosita de algoritmii minimax si alpha-beta
Are ca proprietate tabla de joc
Functioneaza cu conditia ca in cadrul clasei Joc sa fie definiti JMIN si JMAX (cei doi jucatori posibili)
De asemenea cere ca in clasa Joc sa fie definita si o metoda numita mutari() care ofera lista cu configuratiile posibile in urma mutarii unui jucator
"""
NR_LINII = None
NR_COLOANE = None
def __init__(self, mat, j_curent, JMIN, JMAX, adancime, parinte=None, scor=None):
self.end_zone = []
self.next_step_explode = []
self.matr = mat
self.__class__.NR_LINII = len(mat)
self.__class__.NR_COLOANE = len(mat[0])
self.current_player = j_curent
self.JMIN = JMIN
self.JMAX = JMAX
# adancimea in arborele de stari
self.adancime = adancime
# scorul starii (daca e finala) sau al celei mai bune stari-fiice (pentru jucatorul curent)
self.scor = scor
# lista de mutari posibile din starea curenta
self.mutari_posibile = []
# cea mai buna mutare din lista de mutari posibile pentru jucatorul curent
self.stare_aleasa = None
# def mutari(self):
# l_mutari = self.tabla_joc.mutari(self.current_player)
# juc_opus = Joc.jucator_opus(self.current_player)
# l_stari_mutari = [Stare(mutare, juc_opus, self.adancime - 1, parinte=self) for mutare in l_mutari]
#
# return l_stari_mutari
def parcurgere(self, directie):
um = self.ultima_mutare # (l,c)
culoare = self.matr[um[0]][um[1]]
nr_mutari = 0
while True:
um = (um[0] + directie[0], um[1] + directie[1])
if not 0 <= um[0] < self.__class__.NR_LINII or not 0 <= um[1] < self.__class__.NR_COLOANE:
break
if not self.matr[um[0]][um[1]] == culoare:
break
nr_mutari += 1
return nr_mutari
def check_final(self):
player = self.current_player
if player.pos in self.end_zone:
if player.nums_of_shields > 0:
player.nums_of_shields -= 1
else:
self.JMIN.lost = True
return True
return False
def has_valid_moves(self, linie, coloana):
directions = [(0, +1), (0, -1), (1, 0), (-1, 0)]
has_moves = False
for elem in directions:
if self.is_valid_move(self.matr, (linie, coloana), elem):
has_moves = True
return has_moves
def jucator_opus(self, jucator):
self.current_player = self.JMAX if self.current_player == self.JMIN else self.JMIN
# return self.JMAX if jucator == self.JMIN else self.JMIN
@classmethod
def checkValidMove(cls, line_player, column_player, new_line, new_column):
if abs(line_player - new_line) + abs(column_player - new_column) <= 1:
return True
else:
return False
def is_valid_move(self, mat, pos, direction):
new_x, new_y = (pos[0] + direction[0], pos[1] + direction[1])
invalid_character = [Joc.WALL, Joc.ABOMB, Joc.IBOMB, Joc.PLAYER1, Joc.PLAYER2]
if mat[new_x][new_y] not in invalid_character:
return True
else:
return False
def mutari(self, player_sign):
l_stari = []
directions = [(0, +1), (0, -1), (1, 0), (-1, 0)]
player_pos = self.current_player.pos
for elem in directions:
stare_cpy = copy.deepcopy(self)
if self.is_valid_move(stare_cpy.matr, player_pos, elem): # default movements without bombing
if stare_cpy.current_player.bomb_auto_placing > 0:
stare_cpy.matr[player_pos[0]][player_pos[1]] = Joc.GOL
stare_cpy.matr[player_pos[0] + elem[0]][player_pos[1] + elem[1]] = player_sign
stare_cpy.current_player.pos = (player_pos[0] + elem[0], player_pos[1] + elem[1])
stare_cpy.current_player.bomb_auto_placing -= 1
l_stari.append(stare_cpy)
# add a version with bomb behind
stare_cpy_with_bomb = copy.deepcopy(stare_cpy)
stare_cpy_with_bomb.matr[player_pos[0]][player_pos[1]] = Joc.IBOMB
if stare_cpy_with_bomb.current_player.inactive_bomb is not None:
stare_cpy_with_bomb.matr[stare_cpy.current_player.inactive_bomb[0]][
stare_cpy_with_bomb.current_player.inactive_bomb[1]] = Joc.ABOMB
bomb_explode(stare_cpy_with_bomb, stare_cpy.current_player.inactive_bomb[0],
stare_cpy.current_player.inactive_bomb[1], 1)
stare_cpy_with_bomb.current_player.inactive_bomb = (player_pos[0], player_pos[1])
stare_cpy_with_bomb.current_player.bomb_auto_placing = Joc.TIME_AUTO_BOMB
l_stari.append(stare_cpy_with_bomb)
else:
stare_cpy.matr[player_pos[0]][player_pos[1]] = Joc.IBOMB
stare_cpy.matr[player_pos[0] + elem[0]][player_pos[1] + elem[1]] = player_sign
stare_cpy.current_player.pos = (player_pos[0] + elem[0], player_pos[1] + elem[1])
if stare_cpy.current_player.inactive_bomb is not None:
stare_cpy.matr[stare_cpy.current_player.inactive_bomb[0]][
stare_cpy.current_player.inactive_bomb[1]] = Joc.ABOMB
bomb_explode(stare_cpy, stare_cpy.current_player.inactive_bomb[0],
stare_cpy.current_player.inactive_bomb[1], 1)
# print(
# f"Computer end-zones position bomb: ({stare_cpy.current_player.inactive_bomb[0]}, {stare_cpy.current_player.inactive_bomb[1]})")
# print(stare_cpy.end_zone)
stare_cpy.current_player.inactive_bomb = (player_pos[0], player_pos[1])
stare_cpy.current_player.bomb_auto_placing = Joc.TIME_AUTO_BOMB
l_stari.append(stare_cpy)
# if self.current_player.inactive_bomb is not None:
# stare_cpy = copy.deepcopy(self)
# stare_cpy.matr[stare_cpy.current_player.inactive_bomb[0]][stare_cpy.current_player.inactive_bomb[1]] = Joc.ABOMB
# bomb_explode(stare_cpy, stare_cpy.current_player.inactive_bomb[0], stare_cpy.current_player.inactive_bomb[1])
# stare_cpy.current_player.inactive_bomb = None
# l_stari.append(stare_cpy)
self.mutari_posibile = l_stari
# linie deschisa inseamna linie pe care jucatorul mai poate forma o configuratie castigatoare
# practic e o linie fara simboluri ale jucatorului opus
def estimeaza_scor(self, adancime):
t_final = self.check_final()
# if (adancime==0):
if t_final == 2: # jucatorul a pierdut
return 100
elif t_final == 1: # calculatorul a pierdut
return -100
elif t_final == 0: # este remiza
return 0
else:
return self.all_valid_spaces(self.JMAX)
def all_valid_spaces(self, player):
pos_player = player.pos
directions = [(0, -1), (-1, 0), (1, 0), (0, 1)]
tail = [pos_player]
was_in_tail = [pos_player]
num_of_free_spaces = 0
while tail:
for direction in directions:
new_pos_x = tail[0][0] + direction[0]
new_pos_y = tail[0][1] + direction[1]
if (self.matr[new_pos_x][new_pos_y] == Joc.GOL or self.matr[new_pos_x][new_pos_y] == Joc.SHIELD )and (new_pos_x, new_pos_y) not in self.end_zone and (new_pos_x, new_pos_y) not in was_in_tail:
tail.append((new_pos_x, new_pos_y))
num_of_free_spaces += 1
was_in_tail.append((new_pos_x, new_pos_y))
# if num_of_free_spaces > 200:
# print(num_of_free_spaces)
# return 100
# if self.matr[new_pos_x][new_pos_y] == Joc.SHIELD:
# num_of_free_spaces += 2
tail.pop(0)
print(num_of_free_spaces)
def sirAfisare(self):
sir = " |"
sir += " ".join([str(i) for i in range(self.NR_COLOANE)]) + "\n"
sir += "-" * (self.NR_COLOANE + 1) * 2 + "\n"
sir += "\n".join([str(i) + " |" + " ".join([str(x) for x in self.matr[i]]) for i in range(len(self.matr))])
return sir
def __str__(self):
sir = str(self.tabla_joc) + "(Juc curent:" + self.current_player + ")\n"
return sir
def __repr__(self):
sir = str(self.tabla_joc) + "(Juc curent:" + self.current_player + ")\n"
return sir
| 42.019841 | 207 | 0.584474 | 1,443 | 10,589 | 4.035343 | 0.161469 | 0.063198 | 0.054096 | 0.075734 | 0.392066 | 0.339687 | 0.315817 | 0.298987 | 0.266358 | 0.226 | 0 | 0.022569 | 0.309567 | 10,589 | 251 | 208 | 42.187251 | 0.773902 | 0.18727 | 0 | 0.223464 | 0 | 0.005587 | 0.019431 | 0.005502 | 0.005587 | 0 | 0 | 0 | 0 | 1 | 0.089385 | false | 0 | 0.011173 | 0 | 0.212291 | 0.011173 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e1a46f21a62512d83d2b7efd1accc1ee4790d0e | 2,193 | py | Python | tankobon/sources/mangadex.py | onyxware/manhua | d6b4155abd834a55e9bff5de1f5a9706f331a2e2 | [
"MIT"
] | null | null | null | tankobon/sources/mangadex.py | onyxware/manhua | d6b4155abd834a55e9bff5de1f5a9706f331a2e2 | [
"MIT"
] | null | null | null | tankobon/sources/mangadex.py | onyxware/manhua | d6b4155abd834a55e9bff5de1f5a9706f331a2e2 | [
"MIT"
] | null | null | null | # coding: utf8
import MangaDexPy as mangadex # type: ignore
from .. import models
from ..utils import CONFIG
from . import base
CONFIG.setdefault("mangadex.data_saver", False)
# turns something like 'es-la' to 'es'.
def normalize(lang):
return lang.split("-")[0]
class Parser(base.Parser):
domain = r"mangadex\.org/title/([a-fA-F0-9\-]+)"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.client = mangadex.MangaDex()
self._cache = {}
def _get_manga(self, url):
manga_id = self.domain.search(url).group(1)
if manga_id not in self._cache:
self._cache[manga_id] = self.client.get_manga(manga_id)
return self._cache[manga_id]
def metadata(self, url):
manga = self._get_manga(url)
alt_titles = []
for alt_title in manga.titles:
alt_titles.extend(alt_title.values())
authors = [a.name for a in manga.author]
# localised descriptions
desc = {normalize(k): v for k, v in manga.desc.items()}
return models.Metadata(
url=url,
title=manga.title["en"],
alt_titles=alt_titles,
authors=authors,
genres=[t.name["en"] for t in manga.tags],
desc=desc,
cover=manga.cover.url,
)
def add_chapters(self, manga):
manga_resp = self._get_manga(manga.meta.url)
for chapter in manga_resp.get_chapters():
manga.add(
models.Chapter(
id=chapter.chapter or "0",
url=f"https://mangadex.org/chapter/{chapter.id}",
title=chapter.title,
volume=chapter.volume,
lang=normalize(chapter.language),
)
)
def add_pages(self, chapter):
uuid = chapter.url.rpartition("/")[-1]
net_chapter = self.client.get_chapter(uuid).get_md_network()
if CONFIG["mangadex.data_saver"]:
# use low-quality images to save bandwidth
chapter.pages = net_chapter.pages_redux
else:
chapter.pages = net_chapter.pages
| 26.107143 | 69 | 0.572731 | 266 | 2,193 | 4.556391 | 0.37594 | 0.028878 | 0.028053 | 0.026403 | 0.044554 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004614 | 0.308254 | 2,193 | 83 | 70 | 26.421687 | 0.794331 | 0.057912 | 0 | 0 | 0 | 0 | 0.059223 | 0.017476 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113208 | false | 0 | 0.075472 | 0.018868 | 0.283019 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e1b7d2eee0eb21fa2c9a2b7f8c5a7ba8831b51d | 1,948 | py | Python | tests.py | yossioo/args | 45ad8ebd980f8a9e25b8fe6826cc0e4693b59f34 | [
"BSD-2-Clause"
] | 70 | 2015-01-19T09:35:47.000Z | 2019-07-19T11:47:34.000Z | tests.py | yossioo/args | 45ad8ebd980f8a9e25b8fe6826cc0e4693b59f34 | [
"BSD-2-Clause"
] | 4 | 2016-10-31T15:06:22.000Z | 2018-03-12T13:45:13.000Z | tests.py | yossioo/args | 45ad8ebd980f8a9e25b8fe6826cc0e4693b59f34 | [
"BSD-2-Clause"
] | 21 | 2015-03-18T19:40:01.000Z | 2019-11-11T11:30:49.000Z | #! /usr/bin/env python
# -*- coding: utf-8 -*-
import args
from nose.tools import ok_
def compare_values(a, b):
ok_(a == b)
def test_args_all():
arguments = ['test', '--number', 'one', 'two', 'three']
arg = args.ArgsList(args = arguments)
ok_(arg.all == arguments)
def test_flags():
flags = ['--one', '--two']
arguments = [flags[0], 'a', flags[1], 'b']
arg = args.ArgsList(args = arguments)
ok_(arg.flags.all == flags)
def test_files():
files = ['*.py']
arg = args.ArgsList(args = files)
ok_(len(arg.files) > 1)
def test_not_files():
flags = ['--one', '--two']
arguments = [flags[0], 'a', flags[1], 'b', '*.py']
arg = args.ArgsList(args = arguments)
arguments.pop()
ok_(arg.not_files.all == arguments)
def test_grouped():
details = {'--letter': ['a', 'b'], '--number': ['one'], '--test': ['']}
arguments = []
for key in details:
for argument in details[key]:
arguments.append(key)
arguments.append(argument)
arg = args.ArgsList(args = arguments)
yield compare_values, len(arg.grouped) - 1, len(details)
for item in arg.grouped:
if item is not '_':
yield compare_values, arg.grouped[item].all, details[item]
def test_start_with():
numbers = ['one', 'two', 'three']
fnumbers = ['four', 'five']
arguments = ['--number']
arguments.extend(numbers)
arguments.extend(fnumbers)
arg = args.ArgsList(args = arguments)
ok_(arg.start_with('f').all == fnumbers)
def test_assignments():
details = {'--number': ['one', 'two'], 'test': ['']}
arguments = []
for key in details:
for argument in details[key]:
arguments.append(key + '=' + argument)
arg = args.ArgsList(args = arguments)
yield compare_values, len(arg.assignments), len(details)
for item in arg.assignments:
yield compare_values, arg.assignments[item].all, details[item]
| 29.074627 | 75 | 0.593943 | 245 | 1,948 | 4.620408 | 0.228571 | 0.043286 | 0.092756 | 0.117491 | 0.459364 | 0.414311 | 0.375442 | 0.287986 | 0.287986 | 0.287986 | 0 | 0.004633 | 0.224333 | 1,948 | 66 | 76 | 29.515152 | 0.74454 | 0.022074 | 0 | 0.264151 | 0 | 0 | 0.068313 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.150943 | false | 0 | 0.037736 | 0 | 0.188679 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e1c21ae24cba010e257d20ff7fcb425a9ea6eed | 2,810 | py | Python | torchdrug/datasets/guacamol.py | adiv5/torchdrug | ac10dff9c93e66189d500fb13b080366c0f0a531 | [
"Apache-2.0"
] | null | null | null | torchdrug/datasets/guacamol.py | adiv5/torchdrug | ac10dff9c93e66189d500fb13b080366c0f0a531 | [
"Apache-2.0"
] | null | null | null | torchdrug/datasets/guacamol.py | adiv5/torchdrug | ac10dff9c93e66189d500fb13b080366c0f0a531 | [
"Apache-2.0"
] | null | null | null | """
GuacaMol Benchmark Dataset
Author: Aditya Vartak
"""
import os
from collections import defaultdict
from torch.utils import data as torch_data
from torchdrug import data, utils
from torchdrug.core import Registry as R
from torchdrug.utils import doc
import shlex
import subprocess
import csv
@R.register("datasets.GuacaMol")
@doc.copy_args(data.MoleculeDataset.load_csv, ignore=("smiles_field", "target_fields"))
class GuacaMol(data.MoleculeDataset):
"""
Subset of ChemBL database for molecule generation.
Benchmark Dataset for De novo Molecular design
This dataset doesn't contain any label information.
Statistics:
#Molecule: 1591380
#task: 1
Parameters:
path (str): path for the CSV dataset
verbose (int, optional): output verbose level
**kwargs
"""
target_fields = ["source"]
def __init__(self,path=None,verbose=False,**kwargs):
process = subprocess.Popen(shlex.split("python -m guacamol.data.get_data -o ."),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
stdout, stderr = process.communicate()
print(stderr)
print("Downloaded files")
smiles_gz = "chembl_24_1_chemreps.txt.gz"
train_smiles_path = 'chembl24_canon_train.smiles'
valid_smiles_path = 'chembl24_canon_dev-valid.smiles'
test_smiles_path = 'chembl24_canon_test.smiles'
path = 'output.csv'
path = self.smiles_to_csv(train_smiles_path,valid_smiles_path,test_smiles_path,path)
self.load_csv(path, smiles_field="smiles", target_fields=self.target_fields,
lazy=True, verbose=verbose, **kwargs)
process = subprocess.Popen(shlex.split(f"rm {smiles_gz} {train_smiles_path} {valid_smiles_path} {test_smiles_path} {path}"),
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
def smiles_to_csv(self,train_smiles_path,valid_smiles_path,test_smiles_path,path_to_save):
final_data = []
print(train_smiles_path)
with open(train_smiles_path,'r') as f:
train_smiles = f.readlines()
final_data.extend([[i,'valid'] for i in train_smiles])
with open(valid_smiles_path,'r') as f:
valid_smiles = f.readlines()
final_data.extend([[i,'valid'] for i in valid_smiles])
with open(test_smiles_path,'r') as f:
test_smiles = f.readlines()
final_data.extend([[i,'valid'] for i in test_smiles])
with open(path_to_save, "w") as f:
writer = csv.writer(f)
writer.writerow(["smiles","source"])
writer.writerows(final_data)
return path_to_save
| 33.058824 | 132 | 0.640925 | 346 | 2,810 | 4.976879 | 0.33237 | 0.098722 | 0.052265 | 0.04007 | 0.273519 | 0.249129 | 0.158537 | 0.158537 | 0.158537 | 0.158537 | 0 | 0.008197 | 0.261922 | 2,810 | 84 | 133 | 33.452381 | 0.822083 | 0.129893 | 0 | 0.083333 | 0 | 0 | 0.142258 | 0.055812 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0 | 0.1875 | 0 | 0.291667 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e1fb8e2afc8be5af31fe18d0bf5b32592e9a37b | 1,142 | py | Python | Difficulty/Medium/79.word-search.py | ryderfang/LeetCode | be9470dee6175bab321f0549f173c0c682dddd77 | [
"MIT"
] | null | null | null | Difficulty/Medium/79.word-search.py | ryderfang/LeetCode | be9470dee6175bab321f0549f173c0c682dddd77 | [
"MIT"
] | null | null | null | Difficulty/Medium/79.word-search.py | ryderfang/LeetCode | be9470dee6175bab321f0549f173c0c682dddd77 | [
"MIT"
] | null | null | null | #
# @lc app=leetcode id=79 lang=python3
#
# [79] Word Search
#
# @lc code=start
from typing import List
class Solution:
def exist(self, board: List[List[str]], word: str) -> bool:
m = len(board)
n = len(board[0])
def find(i: int, j: int, v: dict, s: str):
if board[i][j] != s[0] or v.get(i * n + j, 0):
return False
if len(s) == 1 and board[i][j] == s[0]:
print(i, j)
return True
ret = False
d = [(-1, 0), (0, -1), (1, 0), (0, 1)]
for k in d:
if i + k[0] >= 0 and i + k[0] < m and j + k[1] >= 0 and j + k[1] < n:
v[i * n + j] = 1
if find(i + k[0], j + k[1], v, s[1:]):
print(i, j)
return True
else:
v[i * n + j] = 0
return False
v = {}
for i in range(m):
for j in range(n):
v.clear()
if find(i, j, v, word):
return True
return False
# @lc code=end
| 28.55 | 85 | 0.364273 | 166 | 1,142 | 2.506024 | 0.313253 | 0.024038 | 0.021635 | 0.038462 | 0.197115 | 0.072115 | 0 | 0 | 0 | 0 | 0 | 0.049658 | 0.488616 | 1,142 | 39 | 86 | 29.282051 | 0.662671 | 0.070053 | 0 | 0.275862 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.034483 | 0 | 0.344828 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e208a20a999efab35541fe2cdafcec39bb1cfb9 | 4,696 | py | Python | sorts-r-us/sorts.py | NathanielBlairStahn/python-practice | 9f2ed1ab9484df3c46b2ae18286dc51be7590c45 | [
"MIT"
] | null | null | null | sorts-r-us/sorts.py | NathanielBlairStahn/python-practice | 9f2ed1ab9484df3c46b2ae18286dc51be7590c45 | [
"MIT"
] | null | null | null | sorts-r-us/sorts.py | NathanielBlairStahn/python-practice | 9f2ed1ab9484df3c46b2ae18286dc51be7590c45 | [
"MIT"
] | null | null | null | """Module implementing sort algorithms"""
def insertion_sort(arr, inplace=True):
"""Sorts an array in place using insertion sort."""
a = arr if inplace else arr.copy()
for i in range(1,len(a)):
element = a[i]
j=i
#Move left, shifting elements one space to the right
#as we go, until we find the place where the current
#element belongs.
while j>0 and element < a[j-1]:
a[j] = a[j-1]
j -= 1
a[j] = element
return a
def sedgewick_gaps(num_terms=float('inf'), upper_bound=float('inf')):
"""Generates Sedgewick's increments for Shellsort.
The first 20 terms of the sequence are:
[1, 5, 19, 41, 109, 209, 505, 929, 2161, 3905, 8929, 16001, 36289, 64769, 146305, 260609, 587521, 1045505, 2354689, 4188161, ...]
The terms alternate between the sequences
a_i = 9*4**i - 9*2**i + 1 (i >= 0) and
b_j = 4**j - 3*2**j + 1 (j >= 2).
That is, the sequence is
[a_0, b_2, a_1, b_3, a_2, b_4, a_3, b_5, a_4, b_6, ...].
The a and b sequences satisfy a_i < b_(i+2) < a_(i+1) for all i,
so the combined sequence is strictly increasing.
"""
i = 0
#yields values alternaing between seq1 and seq2. These satisfy
#seq1[i] < seq2[i] < seq1[i+1] for all i>=0, so the terms are increasing.
while i<<1 < num_terms:
#seq1[i] = 9*4**i - 9*2**i + 1
term = (9<<(i<<1)) - (9<<i) + 1
#number of terms is now 2*i+1, guaranteed to be <= num_terms
if term > upper_bound:# or (i<<1)+1 > num_terms:
break
else:
yield term
#seq2[i] = 4**(i+2) - 3*2**(i+2) + 1
term = (1<<(i+2<<1)) - (3<<i+2) + 1
#number of terms is now 2*i+2, could be too many.
if term > upper_bound or (i<<1)+2 > num_terms:
break
else:
yield term
i += 1
def tokuda_gap(i):
"""Returns the i^th Tokuda gap for Shellsort (starting with i=0).
The first 20 terms of the sequence are:
[1, 4, 9, 20, 46, 103, 233, 525, 1182, 2660, 5985, 13467, 30301, 68178, 153401, 345152, 776591, 1747331, 3931496, 8845866, ...]
h_i = ceil( (9*(9/4)**i-4)/5 ) for i>=0.
If 9*(9/4)**i-4)/5 is not an integer, I believe this is the same as
h_i = ((9**(i+1)>>(i<<1))-4)//5 + 1,
and I believe the above should be non-integer valued for all i>0.
(We have to explicitly return 1 when i=0, as the above formula would return 2.)
"""
return 1 if i==0 else ((9**(i+1)>>(i<<1))-4)//5 + 1
def tokuda_gaps(num_terms=float('inf'), upper_bound=float('inf')):
"""Generates the sequence of Tokuda gaps for Shellsort.
The first 20 terms of the sequence are:
[1, 4, 9, 20, 46, 103, 233, 525, 1182, 2660, 5985, 13467, 30301, 68178, 153401, 345152, 776591, 1747331, 3931496, 8845866, ...]
h_i = ceil( (9*(9/4)**i-4)/5 ) for i>=0.
If 9*(9/4)**i-4)/5 is not an integer, I believe this is the same as
h_i = ((9**(i+1)>>(i<<1))-4)//5 + 1,
and I believe the above should be non-integer valued for all i>0.
(We have to explicitly return 1 when i=0, as the above formula would return 2.)
"""
if num_terms>=1 and upper_bound>=1:
yield 1 #first term
i=1 #i+1 = number of terms
term = 4 #2nd term
while i+1<= num_terms and term <= upper_bound:
yield term
i += 1
term = ((9**(i+1)>>(i<<1))-4)//5 + 1
def ciura_gaps(num_terms=float('inf'), upper_bound=float('inf')):
"""Generates the Ciura gap sequence for Shellsort
The sequence is [1, 4, 10, 23, 57, 132, 301, 701, 1750, h_k, ...]
"""
yield 1
def shellsort(arr, inplace=True, gap_sequence='Sedgewick'):
"""Sorts an array using Shellsort with Sedgewick's increments.
With these increments, Shellsort operates in O(n^4/3) time worst case,
and is conjectured to operate in O(n^7/6) time average case.
"""
a = arr if inplace else arr.copy()
#Storing the gaps in an array requires O(log(n)) extra space.
#Order the gaps in descending order.
if gap_sequence == 'Sedgewick':
gaps = list(sedgewick_gaps(upper_bound=len(a)-1))[::-1]
elif gap_sequence == 'Tokuda':
gaps = list(tokuda_gaps(upper_bound=len(a)-1))[::-1]
else:
raise ValueError(f'Unknown gap sequence: {gap_sequence}')
#Iterate through gaps h in descending order.
for h in gaps:
#Each pass performs an h-sort on the array using the insertion sort algorithm.
for i in range(h,len(a)):
element = a[i]
j = i
while j>=h and element < a[j-h]:
a[j] = a[j-h]
j -= h
a[j] = element
return a
| 34.529412 | 133 | 0.578578 | 809 | 4,696 | 3.297899 | 0.239802 | 0.017241 | 0.006747 | 0.007496 | 0.447526 | 0.411544 | 0.387181 | 0.327961 | 0.306222 | 0.297976 | 0 | 0.123199 | 0.275767 | 4,696 | 135 | 134 | 34.785185 | 0.661276 | 0.567291 | 0 | 0.407407 | 0 | 0 | 0.042139 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e208a2a7040d4c93b04bd71a4db6f38f2c472be | 1,634 | py | Python | haproxy/tests/test_e2e.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | haproxy/tests/test_e2e.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | haproxy/tests/test_e2e.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2020-present
# All rights reserved
# Licensed under a 3-clause BSD style license (see LICENSE)
import pytest
from six import PY2
from datadog_checks.base import is_affirmative
from datadog_checks.dev.utils import get_metadata_metrics
from .common import ENDPOINT_PROMETHEUS, HAPROXY_LEGACY, requires_new_environment
pytestmark = [requires_new_environment, pytest.mark.e2e]
def test_check(dd_agent_check, instancev1, prometheus_metrics):
aggregator = dd_agent_check(instancev1, rate=True)
for metric in prometheus_metrics:
aggregator.assert_metric('haproxy.{}'.format(metric))
aggregator.assert_all_metrics_covered()
exclude_metrics = []
if not is_affirmative(HAPROXY_LEGACY):
# These metrics are submitted as counts with Prometheus
exclude_metrics = [
'haproxy.backend.bytes.in.total',
'haproxy.backend.bytes.out.total',
'haproxy.frontend.bytes.in.total',
'haproxy.frontend.bytes.out.total',
]
aggregator.assert_metrics_using_metadata(get_metadata_metrics(), exclude=exclude_metrics)
@pytest.mark.skipif(PY2, reason='Test only available on Python 3')
def test_checkv2(dd_agent_check, instancev2, prometheus_metricsv2):
aggregator = dd_agent_check(instancev2, rate=True)
for metric in prometheus_metricsv2:
aggregator.assert_metric('haproxy.{}'.format(metric))
aggregator.assert_metric_has_tag('haproxy.{}'.format(metric), tag="endpoint:" + ENDPOINT_PROMETHEUS)
aggregator.assert_all_metrics_covered()
aggregator.assert_metrics_using_metadata(get_metadata_metrics())
| 36.311111 | 108 | 0.752142 | 201 | 1,634 | 5.855721 | 0.412935 | 0.095157 | 0.040782 | 0.037383 | 0.280374 | 0.237893 | 0.188615 | 0.188615 | 0 | 0 | 0 | 0.011611 | 0.156671 | 1,634 | 44 | 109 | 37.136364 | 0.842525 | 0.099143 | 0 | 0.142857 | 0 | 0 | 0.132243 | 0.084526 | 0 | 0 | 0 | 0 | 0.25 | 1 | 0.071429 | false | 0 | 0.178571 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e20d8bb5328100d7ffc9e71ad71a5d1d8b2a379 | 894 | py | Python | test_create.py | nickoala/micropython-performance | 2daee8bbe1d74b405b369f532c16324732ff2fb5 | [
"MIT"
] | 3 | 2019-09-14T21:34:04.000Z | 2022-03-10T21:56:02.000Z | test_create.py | nickoala/micropython-performance | 2daee8bbe1d74b405b369f532c16324732ff2fb5 | [
"MIT"
] | null | null | null | test_create.py | nickoala/micropython-performance | 2daee8bbe1d74b405b369f532c16324732ff2fb5 | [
"MIT"
] | 1 | 2019-09-03T11:51:59.000Z | 2019-09-03T11:51:59.000Z | import show
"""
Test different methods to create a list.
"""
@show.time
def create_by_append(n):
a = []
for i in range(0, n):
a.append(0)
return a
@show.time
def create_by_comprehension(n):
return [0 for i in range(0, n)]
@show.time
def create_by_multiply(n):
return [0] * n
@show.time
def create_init_by_comprehension(n):
return [i for i in range(0, n)]
@show.time
def create_init_by_multiply(n):
a = [0] * n
for i in range(0, n):
a[i] = i
return a
print('=== Create by Multiply')
a = create_by_multiply(200)
print(a)
print('=== Create by Comprehension')
a = create_by_comprehension(200)
print(a)
print('=== Create by Append')
a = create_by_append(200)
print(a)
print('=== Create Init by Comprehension')
a = create_init_by_comprehension(200)
print(a)
print('=== Create Init by Multiply')
a = create_init_by_multiply(200)
print(a)
| 17.192308 | 41 | 0.663311 | 150 | 894 | 3.793333 | 0.166667 | 0.126538 | 0.126538 | 0.149385 | 0.551845 | 0.43058 | 0.388401 | 0.15993 | 0.105448 | 0.105448 | 0 | 0.031989 | 0.195749 | 894 | 51 | 42 | 17.529412 | 0.759388 | 0 | 0 | 0.378378 | 0 | 0 | 0.1513 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135135 | false | 0 | 0.027027 | 0.081081 | 0.297297 | 0.27027 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e215cad1c2e0473a58c2134a45114cc5e6f0a14 | 528 | py | Python | application/api/apiUpdate/updateSQL.py | RainhugTiny/WechatWeather | 44005769d3135fefbf5e92c38038353138dba119 | [
"Apache-2.0"
] | 1 | 2018-12-12T03:09:44.000Z | 2018-12-12T03:09:44.000Z | application/api/apiUpdate/updateSQL.py | RainhugTiny/WechatWeather | 44005769d3135fefbf5e92c38038353138dba119 | [
"Apache-2.0"
] | null | null | null | application/api/apiUpdate/updateSQL.py | RainhugTiny/WechatWeather | 44005769d3135fefbf5e92c38038353138dba119 | [
"Apache-2.0"
] | null | null | null | #encoding:utf-8
import MySQLdb,io
#连接数据库
db=MySQLdb.connect("127.0.0.1","sql140_143_28_2","PEBBh7YP4H","sql140_143_28_2",charset='utf8')
with io.open('cityCode.txt','r',encoding='utf-8') as f:
cursor=db.cursor()
num=0#记录id号
for line in f.readlines():
num+=1
line=line.strip()
sql= "update ins_county set weather_info = '"+line+"' where id = "+str(num)
print(num)
try:
cursor.execute(sql)
db.commit()
except:
db.rollback()
db.close() | 29.333333 | 95 | 0.587121 | 77 | 528 | 3.922078 | 0.662338 | 0.072848 | 0.07947 | 0.07947 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.077889 | 0.246212 | 528 | 18 | 96 | 29.333333 | 0.680905 | 0.045455 | 0 | 0 | 0 | 0 | 0.243028 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e225725adbe55a85e65af5eb7aeea2221ce4d79 | 3,444 | py | Python | gisele/Spiderman.py | Energy4Growing/gisele_v02 | 5845772423ef8afe382854bc35819c4f3b698841 | [
"Apache-2.0"
] | null | null | null | gisele/Spiderman.py | Energy4Growing/gisele_v02 | 5845772423ef8afe382854bc35819c4f3b698841 | [
"Apache-2.0"
] | null | null | null | gisele/Spiderman.py | Energy4Growing/gisele_v02 | 5845772423ef8afe382854bc35819c4f3b698841 | [
"Apache-2.0"
] | null | null | null | import math
import time
import networkx as nx
from scipy import sparse
from gisele.functions import *
from gisele import dijkstra
def spider(geo_df, gdf_cluster_pop, line_bc, resolution, gdf_roads,
roads_segments,Roads_option,Rivers_option,roads_weight, branch_points=None):
if branch_points is None:
branch_points = []
print('Running spider algorithm..')
start_time = time.time()
gdf_cluster_pop.index = pd.Series(range(0, len(gdf_cluster_pop['ID'])))
dist_3d_matrix = distance_3d(gdf_cluster_pop, gdf_cluster_pop, 'X', 'Y',
'Elevation')
edges_matrix = cost_matrix(gdf_cluster_pop, dist_3d_matrix, line_bc,resolution, Rivers_option)
# edges_matrix = dist_3d_matrix
for i in branch_points:
if i[0] in edges_matrix.index.values and \
i[1] in edges_matrix.index.values:
edges_matrix.loc[i[0], i[1]] = 0.001
edges_matrix.loc[i[1], i[0]] = 0.001
edges_matrix_sparse = sparse.csr_matrix(dist_3d_matrix)
graph = nx.from_scipy_sparse_matrix(edges_matrix_sparse)
tree = nx.minimum_spanning_tree(graph, weight='weight')
path = list(tree.edges)
c_grid, c_grid_points = edges_to_line(path, gdf_cluster_pop, edges_matrix)
c_grid['Length'] = c_grid.length.astype(int)
length_limit = resolution * 1.5
long_lines = c_grid.loc[c_grid['Length'] > math.ceil(length_limit)]
short_lines = c_grid.loc[c_grid['Length'] <= math.ceil(length_limit)]
short_lines = short_lines.reset_index(drop=True)
print('Number of long lines: ', long_lines.__len__())
print('Number of short lines: ', short_lines.__len__())
if len(long_lines) != 0:
long_lines = long_lines.sort_values(by='Length', ascending=True)
long_lines = long_lines.reset_index(drop=True)
for i, row in long_lines.iterrows():
point_1 = gdf_cluster_pop[gdf_cluster_pop['ID'] == row.ID1]
point_2 = gdf_cluster_pop[gdf_cluster_pop['ID'] == row.ID2]
c_grid_points = list(zip(short_lines.ID1, short_lines.ID2))
if Roads_option:
segment, segment_cost, segment_length, seg_pts = \
dijkstra.dijkstra_connection_roads(geo_df, point_1, point_2,
c_grid_points, line_bc,
resolution, gdf_roads,
roads_segments, Rivers_option,roads_weight,length_max=10)
else:
segment, segment_cost, segment_length, seg_pts = \
dijkstra.dijkstra_connection(geo_df, point_1, point_2,
c_grid_points, line_bc,
resolution, Rivers_option)
short_lines = pd.concat([short_lines, segment], sort=True)
short_lines = short_lines.reset_index(drop=True)
c_grid = short_lines
c_grid['Length'] = c_grid.length.astype(int)
c_grid_cost = int(c_grid['Cost'].sum(axis=0))
c_grid_length = c_grid['Length'].sum(axis=0)
c_grid_points = list(zip(c_grid.ID1, c_grid.ID2))
final_time = time.time()
total_time = final_time - start_time
print("The total time required by the Spider algorithm was: "
+ str(round(total_time, 4)) + " seconds")
return c_grid, c_grid_cost, c_grid_length, c_grid_points
| 42.518519 | 108 | 0.632985 | 471 | 3,444 | 4.278132 | 0.242038 | 0.062035 | 0.070968 | 0.023821 | 0.39206 | 0.303226 | 0.279404 | 0.25062 | 0.156328 | 0.156328 | 0 | 0.015848 | 0.267131 | 3,444 | 80 | 109 | 43.05 | 0.782488 | 0.00842 | 0 | 0.126984 | 0 | 0 | 0.057134 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015873 | false | 0 | 0.095238 | 0 | 0.126984 | 0.063492 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e22b727a748a24fc177c3a206dea187d263bc49 | 2,895 | py | Python | i3wpd.py | yuky2020/i3-wpd | 8d55e298b6f270f9ea53ba32215765b8183f73de | [
"MIT"
] | 53 | 2018-02-02T13:41:05.000Z | 2022-01-17T23:40:08.000Z | i3wpd.py | yuky2020/i3-wpd | 8d55e298b6f270f9ea53ba32215765b8183f73de | [
"MIT"
] | 1 | 2021-03-09T00:54:16.000Z | 2021-03-10T06:43:25.000Z | i3wpd.py | yuky2020/i3-wpd | 8d55e298b6f270f9ea53ba32215765b8183f73de | [
"MIT"
] | 9 | 2018-02-03T23:12:47.000Z | 2022-01-10T12:59:02.000Z | #!/usr/bin/env python
"""Custom wallpaper for all your i3 workspaces."""
import os
import sys
import time
import i3msg as i3
I3WPD_DEBUG = False
class i3_Wpd:
"""Wallpaper setter daemon"""
def __init__(self, bg_options, wp_dir, img_format):
self.wp_cmd = 'feh --no-fehbg ' + bg_options
if not wp_dir.endswith('/'):
wp_dir += '/'
self.wp_dir = wp_dir
self.img_format = img_format
dbg('Launch!')
self.ws_reload()
i3.subscribe(['workspace', 'shutdown', 'output'], self.focus_changed_handler)
def set_wp(self):
"""Sets wallpaper, assuming i3-msg reports outputs in the same order as xinerama."""
cmd = self.wp_cmd
for ws in self.active_workspaces:
cmd += ' ' + self.wp_dir + ws + self.img_format
dbg(cmd)
os.system(cmd)
def ws_update(self):
"""Call on workspace focus change"""
current_outputs = i3.send(i3.GET_OUTPUTS)
n = 0
for out in current_outputs:
if out['active']:
self.active_workspaces[n] = out['current_workspace']
n += 1
self.set_wp()
def ws_reload(self):
"""Call on server output change"""
dbg("Output change!")
self.active_workspaces = []
current_outputs = i3.send(i3.GET_OUTPUTS)
for out in current_outputs:
if out['active']:
self.active_workspaces.append(out['current_workspace'])
self.set_wp()
def focus_changed_handler(self, event, data):
"""This daemon dies with i3"""
if event == i3.workspace:
if data['change'] == 'focus':
self.ws_update()
elif event == i3.output:
self.ws_reload()
elif event == i3.shutdown:
dbg('i3 exit.')
os._exit(0)
def dbg(msg):
"""Print to stdout"""
if I3WPD_DEBUG:
print(__name__ + ' : ' + msg)
def resolve_path(dir):
"""Figure out where to look for images"""
paths = [dir, 'backgrounds']
cur_dir = os.getcwd() + '/'
dbg(cur_dir)
for p in paths:
if os.path.exists(p) and os.path.isdir(p):
return p
elif os.path.exists(cur_dir + p) and os.path.isdir(cur_dir + p):
return cur_dir + p
return cur_dir
if __name__ == '__main__':
if len(sys.argv) == 4:
i3_Wpd(sys.argv[1], resolve_path(sys.argv[2]), sys.argv[3])
elif len(sys.argv) == 3:
i3_Wpd('--bg-center --bg black', resolve_path(sys.argv[1]), sys.argv[2])
else:
print('i3wpd.py - sets a custom wallpaper on every desktop')
print('usage: i3wpd.py [\"options\"] directory filetype')
print('options: \"--bg-center|--bg-fill|--bg-scale [--bg black|white]\".\nOther options may apply, see man feh(1).')
exit()
while True:
time.sleep(1)
| 31.467391 | 124 | 0.570294 | 389 | 2,895 | 4.066838 | 0.33419 | 0.030973 | 0.050569 | 0.025284 | 0.14665 | 0.127686 | 0.107459 | 0.067004 | 0.067004 | 0.067004 | 0 | 0.016137 | 0.29361 | 2,895 | 91 | 125 | 31.813187 | 0.757457 | 0.105354 | 0 | 0.140845 | 0 | 0.014085 | 0.148293 | 0.021969 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098592 | false | 0 | 0.056338 | 0 | 0.211268 | 0.056338 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e230f1a4e7b9a2f9669122d926144200156f305 | 16,954 | py | Python | app/editor/combat_animation_editor/combat_effect_display.py | ViolaBuddy/EscapeFromPlegia | 5228b42e8525b445854d742dccf85ca65b320d70 | [
"MIT"
] | null | null | null | app/editor/combat_animation_editor/combat_effect_display.py | ViolaBuddy/EscapeFromPlegia | 5228b42e8525b445854d742dccf85ca65b320d70 | [
"MIT"
] | null | null | null | app/editor/combat_animation_editor/combat_effect_display.py | ViolaBuddy/EscapeFromPlegia | 5228b42e8525b445854d742dccf85ca65b320d70 | [
"MIT"
] | null | null | null | import os, glob
import json
from PyQt5.QtWidgets import QVBoxLayout, \
QWidget, QGroupBox, QFormLayout, QFileDialog, \
QPushButton, QLineEdit, QInputDialog, QMessageBox
from PyQt5.QtGui import QImage, QPixmap, QPainter
from app.constants import WINWIDTH, WINHEIGHT
from app.resources import combat_anims
from app.resources.resources import RESOURCES
from app.editor.settings import MainSettingsController
from app.editor import timer
from app.editor.combat_animation_editor.frame_selector import FrameSelector
from app.editor.combat_animation_editor.combat_animation_display import CombatAnimProperties
import app.editor.combat_animation_editor.combat_animation_imports as combat_animation_imports
import app.editor.utilities as editor_utilities
from app.utilities import str_utils
# Game interface
import app.editor.game_actions.game_actions as GAME_ACTIONS
def populate_effect_pixmaps(effect_anim):
effect_anim.pixmap = QPixmap(effect_anim.full_path)
for frame in effect_anim.frames:
x, y, width, height = frame.rect
frame.pixmap = effect_anim.pixmap.copy(x, y, width, height)
class CombatEffectProperties(CombatAnimProperties):
def __init__(self, parent, current=None):
QWidget.__init__(self, parent)
self.window = parent
self._data = self.window._data
# Populate resources
for effect_anim in self._data:
populate_effect_pixmaps(effect_anim)
self.control_setup(current)
self.test_combat_button.setEnabled(False)
self.info_form = QFormLayout()
self.nid_box = QLineEdit()
self.nid_box.textChanged.connect(self.nid_changed)
self.nid_box.editingFinished.connect(self.nid_done_editing)
self.settings = MainSettingsController()
theme = self.settings.get_theme(0)
if theme == 0:
icon_folder = 'icons/icons'
else:
icon_folder = 'icons/dark_icons'
pose_row = self.set_up_pose_box(icon_folder)
self.info_form.addRow("Unique ID", self.nid_box)
self.info_form.addRow("Pose", pose_row)
self.build_frames()
self.set_layout()
def on_nid_changed(self, old_nid, new_nid):
for combat_anim in RESOURCES.combat_anims:
for weapon_anim in combat_anim.weapon_anims:
for pose in weapon_anim.poses:
for command in pose.timeline:
if command.has_effect() and command.value[0] == old_nid:
command.value = (new_nid,)
for effect_anim in RESOURCES.combat_effects:
for pose in effect_anim.poses:
for command in pose.timeline:
if command.has_effect() and command.value[0] == old_nid:
command.value = (new_nid,)
def build_frames(self):
self.frame_group_box = QGroupBox()
self.frame_group_box.setTitle("Image Frames")
frame_layout = QVBoxLayout()
self.frame_group_box.setLayout(frame_layout)
self.import_from_lt_button = QPushButton("Import Legacy Effect...")
self.import_from_lt_button.clicked.connect(self.import_legacy)
self.import_png_button = QPushButton("View Frames...")
self.import_png_button.clicked.connect(self.select_frame)
self.import_effect_button = QPushButton("Import...")
self.import_effect_button.clicked.connect(self.import_effect)
self.export_effect_button = QPushButton("Export...")
self.export_effect_button.clicked.connect(self.export_effect)
self.window.left_frame.layout().addWidget(self.import_effect_button, 3, 0)
self.window.left_frame.layout().addWidget(self.export_effect_button, 3, 1)
self.window.left_frame.layout().addWidget(self.import_from_lt_button, 4, 0, 1, 2)
frame_layout.addWidget(self.import_png_button)
def pose_changed(self, idx):
current_pose_nid = self.pose_box.currentText()
poses = self.current.poses
current_pose = poses.get(current_pose_nid)
if current_pose:
self.has_pose(True)
self.timeline_menu.set_current_pose(current_pose)
else:
self.has_pose(False)
self.timeline_menu.clear_pose()
def get_available_pose_types(self) -> float:
items = [_ for _ in combat_anims.required_poses] + ['Critical']
items.append("Custom")
for pose_nid in self.current.poses.keys():
if pose_nid in items:
items.remove(pose_nid)
return items
def make_pose(self):
items = self.get_available_pose_types()
new_nid, ok = QInputDialog.getItem(self, "New Pose", "Select Pose", items, 0, False)
if not new_nid or not ok:
return
if new_nid == "Custom":
new_nid, ok = QInputDialog.getText(self, "Custom Pose", "Enter New Name for Pose: ")
if not new_nid or not ok:
return
new_nid = str_utils.get_next_name(new_nid, self.current.poses.keys())
return new_nid
def add_new_pose(self):
new_nid = self.make_pose()
if not new_nid:
return
new_pose = combat_anims.Pose(new_nid)
self.current.poses.append(new_pose)
self.pose_box.addItem(new_nid)
self.pose_box.setValue(new_nid)
def duplicate_pose(self):
new_nid = self.make_pose()
if not new_nid:
return
current_pose_nid = self.pose_box.currentText()
current_pose = self.current.poses.get(current_pose_nid)
# Make a copy
ser = current_pose.serialize()
new_pose = combat_anims.Pose.deserialize(ser)
new_pose.nid = new_nid
self.current.poses.append(new_pose)
self.pose_box.addItem(new_nid)
self.pose_box.setValue(new_nid)
return new_pose
def delete_pose(self):
pose = self.current.poses.get(self.pose_box.currentText())
if self.ask_permission(pose, 'Pose'):
self.current.poses.delete(pose)
self.reset_pose_box()
def reset_pose_box(self):
self.pose_box.clear()
poses = self.current.poses
if poses:
self.pose_box.addItems([d.nid for d in poses])
self.pose_box.setValue(poses[0].nid)
return poses
def get_current_weapon_anim(self):
"""
For effects, their "weapon anim" is just themselves
So return itself
"""
return self.current
def import_legacy(self):
starting_path = self.settings.get_last_open_path()
fns, ok = QFileDialog.getOpenFileNames(self.window, "Select Legacy Effect Script Files", starting_path, "Script Files (*-Script.txt);;All Files (*)")
if ok and fns:
for fn in fns:
if fn.endswith('-Script.txt'):
combat_animation_imports.import_effect_from_legacy(fn)
parent_dir = os.path.split(fns[-1])[0]
self.settings.set_last_open_path(parent_dir)
self.window.update_list()
def select_frame(self):
if not self.current.frames:
QMessageBox.critical(self, "Frame Error", "%s has no associated frames!" % self.current.nid)
return
elif not self.current.palettes:
QMessageBox.critical(self, "Palette Error", "%s has no associated palettes!" % self.current.nid)
return
dlg = FrameSelector(self.current, self.current, self)
dlg.exec_()
def set_current(self, current):
self.stop()
self.current = current
self.nid_box.setText(self.current.nid)
poses = self.reset_pose_box()
self.timeline_menu.set_current_frames(self.current.frames)
self.palette_menu.set_current(self.current)
current_pose_nid = self.pose_box.currentText()
current_pose = poses.get(current_pose_nid)
if current_pose:
self.timeline_menu.set_current_pose(current_pose)
else:
self.timeline_menu.clear_pose()
def draw_frame(self):
self.update()
# Actually show current frame
# Need to draw 240x160 area
# And place in space according to offset
actor_im = None
offset_x, offset_y = 0, 0
under_actor_im = None
under_offset_x, under_offset_y = 0, 0
if self.frame_nid:
frame = self.current.frames.get(self.frame_nid)
if frame:
if self.custom_frame_offset:
offset_x, offset_y = self.custom_frame_offset
else:
offset_x, offset_y = frame.offset
actor_im = self.modify_for_palette(frame.pixmap)
if self.under_frame_nid:
frame = self.current.frames.get(self.under_frame_nid)
if frame:
under_offset_x, under_offset_y = frame.offset
under_actor_im = self.modify_for_palette(frame.pixmap)
self.set_anim_view(actor_im, (offset_x, offset_y), under_actor_im, (under_offset_x, under_offset_y))
def import_effect(self):
# Ask user for location
starting_path = self.settings.get_last_open_path()
fn_dir = QFileDialog.getExistingDirectory(
self, "Import *.lteffect", starting_path)
if not fn_dir:
return
self.settings.set_last_open_path(fn_dir)
# Determine the palettes in the folder
palette_nid_swap = self.import_palettes(fn_dir)
# Determine the effects in the folder
effect_path = os.path.join(fn_dir, '*_effect.json')
effects = sorted(glob.glob(effect_path))
if not effects:
QMessageBox.warning(self, "File Not Found", "Could not find any valid *_effect.json Combat Effect files.")
for effect_fn in effects:
with open(effect_fn) as load_file:
data = json.load(load_file)
effect = combat_anims.EffectAnimation.restore(data)
full_path = os.path.join(fn_dir, effect.nid + '.png')
effect.set_full_path(full_path)
effect.nid = str_utils.get_next_name(effect.nid, RESOURCES.combat_effects.keys())
# Update any palette references that changed
for idx, palette in enumerate(effect.palettes[:]):
name, nid = palette
if nid in palette_nid_swap:
effect.palettes[idx][1] = palette_nid_swap[nid]
populate_effect_pixmaps(effect)
RESOURCES.combat_effects.append(effect)
# Print done import! Import complete!
self.window.update_list()
QMessageBox.information(self, "Import Complete", "Import of effect %s complete!" % fn_dir)
def export_effect(self):
# Ask user for location
if not self.current:
return
starting_path = self.settings.get_last_open_path()
fn_dir = QFileDialog.getExistingDirectory(
self, "Export Current Effect", starting_path)
if not fn_dir:
return
self.settings.set_last_open_path(fn_dir)
# Create folder at location named effect_nid.lteffect
path = os.path.join(fn_dir, '%s.lteffect' % self.current.nid)
if not os.path.exists(path):
os.mkdir(path)
# Determine which effects are used as subeffects here
effects = {self.current.nid}
for pose in self.current.poses:
for command in pose.timeline:
if command.has_effect():
effect_nid = command.value[0]
if effect_nid:
effects.add(effect_nid)
# For each effect and subeffect:
for effect_nid in effects:
print("Exporting %s" % effect_nid)
effect = RESOURCES.combat_effects.get(effect_nid)
if not effect:
continue
# Store all of this in effect_nid.lteffect folder
# Gather reference to images for this effect
RESOURCES.combat_effects.save_image(path, effect)
# Serialize into json form
serialized_data = effect.save()
serialized_path = os.path.join(path, '%s_effect.json' % effect_nid)
with open(serialized_path, 'w') as serialize_file:
json.dump(serialized_data, serialize_file, indent=4)
# Gather reference to palettes
palette_nids = [palette[1] for palette in effect.palettes]
for palette_nid in palette_nids:
palette = RESOURCES.combat_palettes.get(palette_nid)
if not palette:
continue
# Serialize into json form
serialized_data = palette.save()
serialized_path = os.path.join(path, '%s_palette.json' % palette_nid)
with open(serialized_path, 'w') as serialize_file:
json.dump(serialized_data, serialize_file, indent=4)
# Print done export! Export to %s complete!
QMessageBox.information(self, "Export Complete", "Export of effect to %s complete!" % path)
def export_all_frames(self, fn_dir: str):
current_pose_nid = self.pose_box.currentText()
current_pose = self.current.poses.get(current_pose_nid)
counter = 0
for command in current_pose.timeline:
self.processing = True
self.do_command(command)
if self.processing: # Don't bother drawing anything if we are still processing
continue
im = QImage(WINWIDTH, WINHEIGHT, QImage.Format_ARGB32)
im.fill(editor_utilities.qCOLORKEY)
frame, under_frame = None, None
if self.under_frame_nid:
under_frame = self.current.frames.get(self.under_frame_nid)
under_offset_x, under_offset_y = under_frame.offset
under_frame = self.modify_for_palette(under_frame.pixmap)
if self.frame_nid:
frame = self.current.frames.get(self.frame_nid)
if self.custom_frame_offset:
offset_x, offset_y = self.custom_frame_offset
else:
offset_x, offset_y = frame.offset
frame = self.modify_for_palette(frame.pixmap)
if frame or under_frame:
painter = QPainter()
painter.begin(im)
if under_frame:
painter.drawImage(under_offset_x, under_offset_y, under_frame)
if frame:
painter.drawImage(offset_x, offset_y, frame)
painter.end()
for i in range(self.num_frames):
path = '%s_%s_%04d.png' % (self.current.nid, current_pose.nid, counter)
full_path = os.path.join(fn_dir, path)
im.save(full_path)
counter += 1
def find_appropriate_combat_anim(self, pose_nid: str) -> tuple:
if pose_nid == 'Miss':
pose_nid = 'Attack'
for combat_anim in RESOURCES.combat_anims:
for weapon_anim in combat_anim.weapon_anims:
pose = weapon_anim.poses.get(pose_nid)
if pose:
for command in pose.timeline:
if command.nid == 'spell' and command.value[0] is None:
return combat_anim, weapon_anim
return None, None
def get_combat_palette(self):
return None
def test_combat(self):
if self.current:
current_pose_nid = self.pose_box.currentText()
if 'Attack' in self.current.poses.keys():
pass
else:
print("Missing Attack pose!")
return
# Find a combat animation with this pose and "spell empty" in it's pose
combat_anim, weapon_anim = self.find_appropriate_combat_anim(current_pose_nid)
if not weapon_anim:
print("Couldn't find a usable weapon anim")
return None
left_palette_name, left_palette, right_palette_name, right_palette = self.get_test_palettes(combat_anim)
timer.get_timer().stop()
GAME_ACTIONS.test_combat(
combat_anim, weapon_anim, left_palette_name, left_palette, self.current.nid,
combat_anim, weapon_anim, right_palette_name, right_palette, self.current.nid, current_pose_nid)
timer.get_timer().start()
| 42.385 | 158 | 0.612599 | 2,053 | 16,954 | 4.812957 | 0.149537 | 0.038964 | 0.014472 | 0.009918 | 0.373545 | 0.311203 | 0.261208 | 0.229936 | 0.190163 | 0.172149 | 0 | 0.003402 | 0.306535 | 16,954 | 399 | 159 | 42.491228 | 0.837033 | 0.051787 | 0 | 0.28483 | 0 | 0 | 0.04583 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068111 | false | 0.003096 | 0.092879 | 0.003096 | 0.22291 | 0.009288 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e26e37b97b0a75c67b3d7bad1b9faff3e814de9 | 803 | py | Python | reflex_driver/set_radian_positions.py | EnricoSartori/reflex_ros_pkg | 960373a48a0d9095025763400a00c1b30fe4ede5 | [
"Apache-2.0"
] | null | null | null | reflex_driver/set_radian_positions.py | EnricoSartori/reflex_ros_pkg | 960373a48a0d9095025763400a00c1b30fe4ede5 | [
"Apache-2.0"
] | null | null | null | reflex_driver/set_radian_positions.py | EnricoSartori/reflex_ros_pkg | 960373a48a0d9095025763400a00c1b30fe4ede5 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
import roslib; roslib.load_manifest('reflex_driver')
import rospy, sys
from reflex_msgs.msg import RawServoPositions
from reflex_msgs.msg import RadianServoPositions
if __name__ == '__main__':
rospy.init_node('set_raw_positions')
args = rospy.myargv()
if len(args) != 5:
print(" found %d args" % len(args))
print("usage: set_radian_positions POS0 POS1 POS2 POS3")
print(" where POSx is in radians, and is interpreted with reference")
print(" to the zero position found in yaml/finger_calibrate.yaml")
sys.exit(1)
srp_pub = rospy.Publisher('set_reflex_hand', RadianServoPositions)
rospy.sleep(0.3)
pos_list = [float(args[1]), float(args[2]), float(args[3]), float(args[4])]
srp_pub.publish(RadianServoPositions(pos_list))
rospy.sleep(0.1)
| 34.913043 | 77 | 0.728518 | 118 | 803 | 4.754237 | 0.584746 | 0.064171 | 0.049911 | 0.060606 | 0.081996 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020378 | 0.144458 | 803 | 22 | 78 | 36.5 | 0.796215 | 0.024907 | 0 | 0 | 0 | 0 | 0.297954 | 0.033248 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.222222 | 0 | 0.222222 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e2f78522d3404763362988b67f1631faf50d755 | 408 | py | Python | record_video.py | jhkim31/pose-estimation | c755688854a685ef49255fc37746cb6fe43dd748 | [
"Apache-2.0"
] | null | null | null | record_video.py | jhkim31/pose-estimation | c755688854a685ef49255fc37746cb6fe43dd748 | [
"Apache-2.0"
] | null | null | null | record_video.py | jhkim31/pose-estimation | c755688854a685ef49255fc37746cb6fe43dd748 | [
"Apache-2.0"
] | null | null | null | import cv2
print("종료하려면 ESC")
capture = cv2.VideoCapture(0)
capture.set(cv2.CAP_PROP_FRAME_WIDTH, 640)
capture.set(cv2.CAP_PROP_FRAME_HEIGHT, 480)
print(capture.get(cv2.CAP_PROP_FPS))
fourcc = cv2.VideoWriter_fourcc(*'DIVX')
out = cv2.VideoWriter('output.mp4', fourcc, 15, (640, 480))
while cv2.waitKey(33) != 27:
ret, frame = capture.read()
cv2.imshow("VideoFrame", frame)
out.write(frame)
| 22.666667 | 59 | 0.720588 | 62 | 408 | 4.596774 | 0.548387 | 0.063158 | 0.105263 | 0.112281 | 0.175439 | 0.175439 | 0 | 0 | 0 | 0 | 0 | 0.081006 | 0.122549 | 408 | 17 | 60 | 24 | 0.715084 | 0 | 0 | 0 | 0 | 0 | 0.081081 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e2f8c6300c5f9a042b8e10acc04b58b141189bf | 6,738 | py | Python | arkestra_utilities/plugin_modifiers.py | luxumbra/Arkestra | 1e136641d27bb50cfb60c17cfbd3351882a6de30 | [
"BSD-2-Clause"
] | 1 | 2019-06-27T13:05:16.000Z | 2019-06-27T13:05:16.000Z | arkestra_utilities/plugin_modifiers.py | okyame/Arkestra | 4aa22816b33d8f2d7a6bc8f7a498957134b557dd | [
"BSD-2-Clause"
] | null | null | null | arkestra_utilities/plugin_modifiers.py | okyame/Arkestra | 4aa22816b33d8f2d7a6bc8f7a498957134b557dd | [
"BSD-2-Clause"
] | null | null | null | from modifier_pool import adjuster_pool, WidthAdjuster
import re
"""
There are various kinds of WidthAdjuster modifiers:
* placeholder_width
works out the width of the cms placeholder
* plugin_width
works out the overall width of the plugin (image, carousel, video, whatever) including its borders etc
* image_width
works out the exact size of the images required
* mark_and_modify
"""
class SimplePlaceholderWidthAdjuster(WidthAdjuster):
kind="placeholder_width"
def modify(self, context, placeholder_width):
# check for conditions that adjust the placeholder width
adjust_width = context.get("adjust_width", False)
# can be "percent", "relative", "absolute", "divider"
adjuster = context.get("width_adjuster", None)
adjustment = float(context.get("width_adjustment", 0))
if adjust_width:
# print "need to adjust"
if adjuster == "divider":
placeholder_width = placeholder_width/adjustment
elif adjuster == "multiplier":
placeholder_width = placeholder_width * adjustment
elif adjuster == "percent":
placeholder_width = placeholder_width * adjustment/100
elif adjuster == "relative":
placeholder_width = placeholder_width + adjustment
elif adjuster == "absolute":
placeholder_width = adjustment
placeholder_width = int(placeholder_width)
# print "adjusted placeholder width:", placeholder_width
return placeholder_width
class AutoSpaceFloat(WidthAdjuster):
"""
this truth table gives us clues about how to decide on width reductions.
The three conditions that make up the key are:
auto
space [the space-on-left/right classes that we use]
floated
the reduce_by value is a percentage
"""
reduce_by = {
(False, False, False): 100.0, # given width, no left/right space, not floated
(False, False, True): 100.0, # given width, no left/right space, floated
(False, True, False): 67.0, # given width, left/right space, not floated
(False, True, True): 100.0, # given width, left/right space, floated
(True, False, False): 100.0, # auto width, no left/right space, not floated
(True, False, True): 50.0, # auto width, no left/right space, floated
(True, True, False): 67.0, # auto width, left/right space, not floated
(True, True, True): 50.0, # auto width, left/right space, floated
}
space = False
floated = False
kind="plugin_width"
def modify(self, context, target, width, auto):
# check for attributes that use the reduce_key
grandparent = target.parent.parent
if grandparent:
grandparent_class = grandparent.get("class", "")
self.space = "space-on" in grandparent_class
self.floated = "images-left" in grandparent_class or "images-right" in grandparent_class
reduce_key = (auto, self.space, self.floated)
width = width * self.reduce_by[reduce_key] / 100
return width
class ReduceForBackground(WidthAdjuster):
kind="image_width"
"""
Do any of the elements containing this image have a background? If so, reduce the width.
"""
def modify(self, context, element, width):
element_class = element.get("class", "") # and its HTML class
background_classes = context.get("background_classes", "outline tint")
if any((word in element_class for word in background_classes.split())):
width = width - context.get("background_reduction", 32)
return width
class ColumnWidths(WidthAdjuster):
kind="image_width"
"""
These values are given as variables here because we never quite know how
values such as 2.0/5 will be calculated - this way, we need not worry what
the values will be
"""
one = 1.0
half = 1.0/2
one_third = 1.0/3
one_quarter = 1.0/4
one_fifth = 1.0/5
one_sixth = 1.0/6
two_thirds = 2.0/3
three_quarters = 3.0/4
two_fifths = 2.0/5
three_fifths = 3.0/5
"""
See the column widths styles in arkestra.css - they need to match these */
"""
column_widths = {
one: 1.0,
half: 48.0,
one_third: 30.6667,
one_quarter: 22.0,
one_fifth: 16.8,
one_sixth: 13.3333,
two_thirds: 65.4,
three_quarters: 74.0,
two_fifths: 37.73,
three_fifths: 58.4,
}
def modify(self, context, element, width):
# print "============ ColumnWidths "
element_class = element.get("class", "") # and its HTML class
# if this is a column whose parent is a row
if re.search(r"\column\b", element_class) and "columns" in element.parent.get("class", ""):
# columns is the number of columns, or 1 if not specified
columns = float(element.parent.get("class", "").split("columns")[1][0] or 1)
# print " this is a column:", element_class
# if double or triplewidth
if "triplecolumn" in element_class:
columnwidth = 3.0
elif "doublecolumn" in element_class:
columnwidth = 2.0
else:
columnwidth = 1
# now use the value of columnwidth/columns as a key to the column_widths dict
width = width * self.column_widths[columnwidth/columns]/100
return width
class ImageBorders(WidthAdjuster):
kind="mark_and_modify"
def mark(self, context, element, markers):
image_border_class = context.get("image_border_class", "image-borders")
no_image_border_class = context.get("no_image_border_class", "no-image-borders")
element_class = element.get("class", "") # and its HTML class
if image_border_class in element_class:
# print "has borders"
markers["has_borders"] = True
if no_image_border_class in element_class:
markers["has_borders"] = False
return markers
def modify(self, context, markers, width):
if markers.get("has_borders"):
# print "-16 for borders"
width = width - context.get("image_border_reduction", 16)
return width
def register():
adjuster_pool.register_adjuster(SimplePlaceholderWidthAdjuster)
adjuster_pool.register_adjuster(AutoSpaceFloat)
adjuster_pool.register_adjuster(ReduceForBackground)
adjuster_pool.register_adjuster(ColumnWidths)
adjuster_pool.register_adjuster(ImageBorders)
| 37.021978 | 102 | 0.628525 | 826 | 6,738 | 5 | 0.243341 | 0.069734 | 0.027119 | 0.024213 | 0.215981 | 0.169007 | 0.102663 | 0.046005 | 0.031477 | 0.021308 | 0 | 0.024943 | 0.280053 | 6,738 | 181 | 103 | 37.226519 | 0.826428 | 0.176313 | 0 | 0.100917 | 0 | 0 | 0.089862 | 0.009007 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06422 | false | 0 | 0.018349 | 0 | 0.357798 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e3049b2707b356f0c1a390e73502abb2446fd8f | 6,787 | py | Python | tools/tensorflow_docs/api_generator/traverse.py | markus-hinsche/docs | 9edcd05306c98755f26b9ac7738a8dd5ee0e268f | [
"Apache-2.0"
] | null | null | null | tools/tensorflow_docs/api_generator/traverse.py | markus-hinsche/docs | 9edcd05306c98755f26b9ac7738a8dd5ee0e268f | [
"Apache-2.0"
] | null | null | null | tools/tensorflow_docs/api_generator/traverse.py | markus-hinsche/docs | 9edcd05306c98755f26b9ac7738a8dd5ee0e268f | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Traversing Python modules and classes."""
import inspect
import sys
from google.protobuf.message import Message as ProtoMessage
__all__ = ['traverse']
def _filter_module_all(path, root, children):
"""Filters module children based on the "__all__" arrtibute.
Args:
path: API to this symbol
root: The object
children: A list of (name, object) pairs.
Returns:
`children` filtered to respect __all__
"""
del path
if not (inspect.ismodule(root) and hasattr(root, '__all__')):
return children
module_all = set(root.__all__)
children = [(name, value) for (name, value) in children if name in module_all]
return children
def _add_proto_fields(path, root, children):
"""Add properties to Proto classes, so they can be documented.
Warning: This inserts the Properties into the class so the rest of the system
is unaffected. This patching is acceptable because there is never a reason to
run other tensorflow code in the same process as the doc generator.
Args:
path: API to this symbol
root: The object
children: A list of (name, object) pairs.
Returns:
`children` with proto fields added as properties.
"""
del path
if not inspect.isclass(root) or not issubclass(root, ProtoMessage):
return children
descriptor = getattr(root, 'DESCRIPTOR', None)
if descriptor is None:
return children
fields = descriptor.fields
if not fields:
return children
field = fields[0]
# Make the dictionaries mapping from int types and labels to type and
# label names.
types = {
getattr(field, name): name
for name in dir(field)
if name.startswith('TYPE')
}
labels = {
getattr(field, name): name
for name in dir(field)
if name.startswith('LABEL')
}
field_properties = {}
for field in fields:
name = field.name
doc_parts = []
label = labels[field.label].lower().replace('label_', '')
if label != 'optional':
doc_parts.append(label)
type_name = types[field.type]
if type_name == 'TYPE_MESSAGE':
type_name = field.message_type.name
elif type_name == 'TYPE_ENUM':
type_name = field.enum_type.name
else:
type_name = type_name.lower().replace('type_', '')
doc_parts.append(type_name)
doc_parts.append(name)
doc = '`{}`'.format(' '.join(doc_parts))
prop = property(fget=lambda x: x, doc=doc)
field_properties[name] = prop
for name, prop in field_properties.items():
setattr(root, name, prop)
children = dict(children)
children.update(field_properties)
children = sorted(children.items(), key=lambda item: item[0])
return children
def _filter_builtin_modules(path, root, children):
"""Filters module children to remove builtin modules.
Args:
path: API to this symbol
root: The object
children: A list of (name, object) pairs.
Returns:
`children` with all builtin modules removed.
"""
del path
del root
# filter out 'builtin' modules
filtered_children = []
for name, child in children:
# Do not descend into built-in modules
if inspect.ismodule(child) and child.__name__ in sys.builtin_module_names:
continue
filtered_children.append((name, child))
return filtered_children
def _traverse_internal(root, visitors, stack, path):
"""Internal helper for traverse."""
new_stack = stack + [root]
# Only traverse modules and classes
if not inspect.isclass(root) and not inspect.ismodule(root):
return
try:
children = inspect.getmembers(root)
except ImportError:
# On some Python installations, some modules do not support enumerating
# members (six in particular), leading to import errors.
children = []
# Break cycles.
filtered_children = []
for name, child in children:
if any(child is item for item in new_stack): # `in`, but using `is`
continue
filtered_children.append((name, child))
children = filtered_children
# Apply all callbacks, allowing each to filter the children
for visitor in visitors:
children = visitor(path, root, list(children))
for name, child in children:
# Break cycles
child_path = path + (name,)
_traverse_internal(child, visitors, new_stack, child_path)
def traverse(root, visitors, root_name):
"""Recursively enumerate all members of `root`.
Similar to the Python library function `os.path.walk`.
Traverses the tree of Python objects starting with `root`, depth first.
Parent-child relationships in the tree are defined by membership in modules or
classes. The function `visit` is called with arguments
`(path, parent, children)` for each module or class `parent` found in the tree
of python objects starting with `root`. `path` is a string containing the name
with which `parent` is reachable from the current context. For example, if
`root` is a local class called `X` which contains a class `Y`, `visit` will be
called with `('Y', X.Y, children)`).
If `root` is not a module or class, `visit` is never called. `traverse`
never descends into built-in modules.
`children`, a list of `(name, object)` pairs are determined by
`inspect.getmembers`. To avoid visiting parts of the tree, `children` can
be modified in place, using `del` or slice assignment.
Cycles (determined by reference equality, `is`) stop the traversal. A stack of
objects is kept to find cycles. Objects forming cycles may appear in
`children`, but `visit` will not be called with any object as `parent` which
is already in the stack.
Traversing system modules can take a long time, it is advisable to pass a
`visit` callable which denylists such modules.
Args:
root: A python object with which to start the traversal.
visitors: A list of callables. Each taking `(path, parent, children)` as
arguments, and returns a list of accepted children.
root_name: The short-name of the root module.
"""
base_visitors = [
_filter_module_all,
_add_proto_fields,
_filter_builtin_modules
]
_traverse_internal(root, base_visitors + visitors, [], (root_name,))
| 31.421296 | 80 | 0.700309 | 944 | 6,787 | 4.938559 | 0.291314 | 0.01716 | 0.009009 | 0.01287 | 0.170099 | 0.154655 | 0.115616 | 0.092879 | 0.076577 | 0.076577 | 0 | 0.001847 | 0.202446 | 6,787 | 215 | 81 | 31.567442 | 0.859413 | 0.52689 | 0 | 0.23913 | 0 | 0 | 0.025885 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.054348 | false | 0 | 0.043478 | 0 | 0.184783 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e304cb44308462ee20c61d980a73e789d7d9065 | 7,418 | py | Python | tests/test_vcf.py | UMCUGenetics/vcf-explorer | 18b46b1f61181ce5cbd2a8065d272dd37081da8f | [
"MIT"
] | 5 | 2018-04-27T09:02:25.000Z | 2020-12-17T13:15:08.000Z | tests/test_vcf.py | CuppenResearch/vcf-explorer | 18b46b1f61181ce5cbd2a8065d272dd37081da8f | [
"MIT"
] | 2 | 2015-05-26T11:57:52.000Z | 2016-05-25T08:43:40.000Z | tests/test_vcf.py | UMCUGenetics/vcf-explorer | 18b46b1f61181ce5cbd2a8065d272dd37081da8f | [
"MIT"
] | null | null | null | import unittest
import bson
import utils.vcf
class GatkVCFImportTestCase(unittest.TestCase):
def test_gatk_header_line(self):
vcf_header_input = '#CHROM\tPOS\tid\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t1\t2\t3\t4'
vcf_header_output = {'samples':['1','2','3','4']}
self.assertEqual(utils.vcf.vcf_header(vcf_header_input),vcf_header_output)
def test_gatk_snp_line(self):
vcf_line_input = '1\t69511\trs75062661\tA\tG\t6448.36\tPASS\tAC=8;AF=1.00;AN=8;DB;DP=233;FS=0.000;MLEAC=8;MLEAF=1.00;MQ=40.45;QD=27.68;SOR=1.083\tGT:AD:DP:GQ:PL\t1/1:0,48:48:99:1371,144,0\t1/1:0,94:94:99:2511,281,0\t1/1:0,9:9:27:294,27,0\t1/1:0,82:82:99:2298,246,0'
vcf_metadata_input = {
'_id':bson.objectid.ObjectId('5694bd9e6c9d8b15869320fe'),
'samples':['1','2','3','4'],
"FORMAT" : {
"GT" : { "type" : "String","number" : "1", "description" : "Genotype" },
"GQ" : { "type" : "Integer", "number" : "1", "description" : "Genotype Quality" },
"AD" : { "type" : "Integer", "number" : ".", "description" : "Allelic depths for the ref and alt alleles in the order listed" },
"DP" : { "type" : "Integer", "number" : "1", "description" : "Approximate read depth (reads with MQ=255 or with bad mates are filtered)" },
"PL" : { "type" : "Integer", "number" : "G", "description" : "Normalized, Phred-scaled likelihoods for genotypes as defined in the VCF specification" }
}
}
vcf_line_result = ({'chr': '1', 'pos': 69511, 'ref': 'A', 'alt': 'G', 'dbSNP': 'rs75062661'},
[
{'sample':'1', 'vcf_id':bson.objectid.ObjectId('5694bd9e6c9d8b15869320fe'), 'genotype' : {'GT':'1/1', 'AD':[0,48], 'DP':48, 'GQ':99, 'PL':[1371,144,0]} },
{'sample':'2', 'vcf_id':bson.objectid.ObjectId('5694bd9e6c9d8b15869320fe'), 'genotype' : {'GT':'1/1', 'AD':[0,94], 'DP':94, 'GQ':99, 'PL':[2511,281,0]} },
{'sample':'3', 'vcf_id':bson.objectid.ObjectId('5694bd9e6c9d8b15869320fe'), 'genotype' : {'GT':'1/1', 'AD':[0,9], 'DP':9, 'GQ':27, 'PL':[294,27,0]} },
{'sample':'4', 'vcf_id':bson.objectid.ObjectId('5694bd9e6c9d8b15869320fe'), 'genotype' : {'GT':'1/1', 'AD':[0,82], 'DP':82, 'GQ':99, 'PL':[2298,246,0]} },
])
self.assertEqual(utils.vcf.gatk_line(vcf_line_input,vcf_metadata_input),vcf_line_result)
class DellyVCFImportTestCase(unittest.TestCase):
def test_delly_header_line(self):
vcf_header_input = '#CHROM\tPOS\tid\tREF\tALT\tQUAL\tFILTER\tINFO\tFORMAT\t1\t2\t3\t4'
vcf_header_output = {'samples':['1','2','3','4']}
self.assertEqual(utils.vcf.vcf_header(vcf_header_input),vcf_header_output)
def test_delly_snp_line(self):
vcf_line_input = '1\t21492484\tDEL00000120\tN\t<DEL>\t0\tPASS\tIMPRECISE;CIEND=-317,317;CIPOS=-317,317;SVtype=DEL;SVMETHOD=EMBL.DELLYv0.6.7;CHR2=1;END=149337534;CT=3to5;INSLEN=0;PE=3;MAPQ=42\tGT:GL:GQ:FT:RCL:RC:RCR:CN:DR:DV:RR:RV\t0/1:-2.90075,0,-132.871:29:PASS:4249861:24362177:14135397:3:26:3:0:0\t0/0:0,-5.11741,-98.5999:51:PASS:4920660:27959714:16156110:3:17:0:0:0\t0/0:0,-5.95897,-80.3384:60:PASS:4627520:26546097:15423242:3:20:0:0:0'
vcf_metadata_input = {}
vcf_metadata_input = {
'_id':bson.objectid.ObjectId('5694bd9e6c9d8b15869320fe'),
"INFO" : {
"PRECISE" : { "type" : "Flag", "number" : "0", "description" : "Precise structural variation" },
"SVMETHOD" : { "type" : "String", "number" : "1", "description" : "Type of approach used to detect SV" },
"CIEND" : { "type" : "Integer", "number" : "2", "description" : "PE confidence interval around END" },
"END" : { "type" : "Integer", "number" : "1", "description" : "End position of the structural variant" },
"CHR2" : { "type" : "String", "number" : "1", "description" : "Chromosome for END coordinate in case of a translocation" },
"SR" : { "type" : "Integer", "number" : "1", "description" : "Split-read support" },
"SRQ" : { "type" : "Float", "number" : "1", "description" : "Split-read consensus alignment quality" },
"CIPOS" : { "type" : "Integer", "number" : "2", "description" : "PE confidence interval around POS" },
"CONSENSUS" : { "type" : "String", "number" : "1", "description" : "Split-read consensus sequence" },
"SVTYPE" : { "type" : "String", "number" : "1", "description" : "Type of structural variant" },
"MAPQ" : { "type" : "Integer", "number" : "1", "description" : "Median mapping quality of paired-ends" },
"PE" : { "type" : "Integer", "number" : "1", "description" : "Paired-end support of the structural variant" },
"INSLEN" : { "type" : "Integer", "number" : "1", "description" : "Predicted length of the insertion" },
"IMPRECISE" : { "type" : "Flag", "number" : "0", "description" : "Imprecise structural variation" },
"CT" : { "type" : "String", "number" : "1", "description" : "Paired-end signature induced connection type"}
},
"FORMAT" : {
"RV" : { "type" : "Integer", "number" : "1", "description" : "# high-quality variant junction reads" },
"GT" : { "type" : "String", "number" : "1", "description" : "Genotype" },
"FT" : { "type" : "String", "number" : "1", "description" : "Per-sample genotype filter" },
"CN" : { "type" : "Integer", "number" : "1", "description" : "Read-depth based copy-number estimate for autosomal sites" },
"GQ" : { "type" : "Integer", "number" : "1", "description" : "Genotype Quality" },
"RR" : { "type" : "Integer", "number" : "1", "description" : "# high-quality reference junction reads" },
"RCR" : { "type" : "Integer", "number" : "1", "description" : "Raw high-quality read counts for the right control region" },
"RCL" : { "type" : "Integer", "number" : "1", "description" : "Raw high-quality read counts for the left control region" },
"RC" : { "type" : "Integer", "number" : "1", "description" : "Raw high-quality read counts for the SV" },
"DV" : { "type" : "Integer", "number" : "1", "description" : "# high-quality variant pairs" },
"GL" : { "type" : "Float", "number" : "G", "description" : "Log10-scaled genotype likelihoods for RR,RA,AA genotypes" },
"DR" : { "type" : "Integer", "number" : "1", "description" : "# high-quality reference pairs"}
},
'samples':['1','2','3']
}
vcf_line_result = ({'END': 149337534, 'CHR2': '1', 'pos': 21492484, 'chr': '1', 'alt': '<DEL>', 'ref': 'N', 'id': 'DEL00000120', 'CT': '3to5'},
[{'sample': '1', 'vcf_id': bson.objectid.ObjectId('5694bd9e6c9d8b15869320fe'), 'info': {'PRECISE': 'IMPRECISE', 'CIEND': [-317, 317], 'CIPOS': [-317, 317], 'MAPQ': 42, 'PE': 3, 'INSLEN': 0}, 'filter': 'PASS', 'genotype': {'RV': 0, 'GT': '0/1', 'FT': 'PASS', 'CN': 3, 'GQ': 29, 'RR': 0, 'RCR': 14135397, 'RCL': 4249861, 'RC': 24362177, 'DV': 3, 'GL': [-2.90075, 0.0, -132.871], 'DR': 26}}]
)
self.assertEqual(utils.vcf.delly_line(vcf_line_input,vcf_metadata_input),vcf_line_result)
if __name__ == '__main__':
unittest.main()
| 84.295455 | 448 | 0.567673 | 916 | 7,418 | 4.517467 | 0.283843 | 0.042291 | 0.108748 | 0.069599 | 0.52755 | 0.434026 | 0.40261 | 0.350894 | 0.237071 | 0.210488 | 0 | 0.111437 | 0.218523 | 7,418 | 87 | 449 | 85.264368 | 0.602381 | 0 | 0 | 0.205128 | 0 | 0.051282 | 0.484228 | 0.130089 | 0 | 0 | 0 | 0 | 0.051282 | 1 | 0.051282 | false | 0.038462 | 0.064103 | 0 | 0.141026 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e3171b0210561aceec5d83e02e852b726d4571c | 849 | py | Python | groundstation/logger.py | richo/groundstation | 7ed48dd355051ee6b71164fc801e3893c09d11db | [
"MIT"
] | 26 | 2015-06-18T20:17:07.000Z | 2019-09-26T09:55:35.000Z | groundstation/logger.py | richo/groundstation | 7ed48dd355051ee6b71164fc801e3893c09d11db | [
"MIT"
] | null | null | null | groundstation/logger.py | richo/groundstation | 7ed48dd355051ee6b71164fc801e3893c09d11db | [
"MIT"
] | 5 | 2015-07-20T01:52:47.000Z | 2017-01-08T09:54:07.000Z | import os
import logging
import binascii
if "GROUNDSTATION_DEBUG" in os.environ:
LEVEL = getattr(logging, os.getenv("GROUNDSTATION_DEBUG"))
else:
LEVEL = logging.DEBUG
def fix_oid(oid):
return binascii.hexlify(oid)
def _get_formatter():
return logging.Formatter('%(process)5d: %(name)s - %(levelname)s - %(message)s')
CONSOLE_FORMATTER = _get_formatter()
def _get_console_handler():
ch = logging.StreamHandler()
ch.setLevel(LEVEL)
ch.setFormatter(CONSOLE_FORMATTER)
return ch
CONSOLE_HANDLER = _get_console_handler()
LOGGERS = {} # Cache for instanciated loggers
def getLogger(name): # Not threadsafe
if name in LOGGERS:
return LOGGERS[name]
logger = logging.getLogger(name)
logger.setLevel(LEVEL)
logger.addHandler(CONSOLE_HANDLER)
LOGGERS[name] = logger
return logger
| 20.707317 | 84 | 0.714959 | 103 | 849 | 5.728155 | 0.398058 | 0.094915 | 0.057627 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001439 | 0.18139 | 849 | 40 | 85 | 21.225 | 0.847482 | 0.053004 | 0 | 0 | 0 | 0 | 0.11236 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.148148 | false | 0 | 0.111111 | 0.074074 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e36b9810e1de0eb576616955030bb8928a71d70 | 4,460 | py | Python | main.py | cjh14813573/Joint_NLU | 1c796444677e4b163db0fd5df75c2cc4ac63886a | [
"Apache-2.0"
] | 1 | 2022-03-27T06:27:28.000Z | 2022-03-27T06:27:28.000Z | main.py | cjh14813573/Joint_NLU | 1c796444677e4b163db0fd5df75c2cc4ac63886a | [
"Apache-2.0"
] | null | null | null | main.py | cjh14813573/Joint_NLU | 1c796444677e4b163db0fd5df75c2cc4ac63886a | [
"Apache-2.0"
] | 1 | 2022-03-27T06:27:32.000Z | 2022-03-27T06:27:32.000Z |
import argparse
import traceback
import torch
import os
parser = argparse.ArgumentParser()
###
parser.add_argument('--bert_path', help='config file', default='/Users/cjh/develop/python/AI/models/bert-base-chinese')
parser.add_argument('--save_path', help='path to save checkpoint', default='./save')
parser.add_argument('--lr', help='learning rate', type=float, default=8e-6)
parser.add_argument('--lr_warmup', type=float, default=200)
parser.add_argument('--bs', help='batch size', type=float, default=30)
parser.add_argument('--n_jobs', help='num of workers to process data', type=int, default=1)
parser.add_argument('--intent_label_vocab', help='intent classification vocab', default='./data/cls_vocab')
parser.add_argument('--slot_label_vocab', help='slot vocab', default='./data/slot_vocab')
parser.add_argument('--train_file', help='training data', default='./data/train.tsv')
parser.add_argument('--valid_file', help='valid data', default='./data/test.tsv')
parser.add_argument('--max_length', type=int, default=90)
parser.add_argument('--n_epochs', type=int, default=30)
parser.add_argument('--gpu', type=str,default='3')
parser.add_argument("--local_rank", help='used for distributed training', type=int, default=-1)
parser.add_argument('--batch_split', type=int, default=1)
parser.add_argument('--eval_steps', type=int, default=40)
parser.add_argument('--crf',action="store_true")
###
args = parser.parse_args()
os.environ['CUDA_VISABLE_DEVICES'] = args.gpu
from transformers import BertConfig, BertTokenizer, AdamW
from NLU_model import NLUModel
import dataset
import utils
from trainer import Trainer
from torch.nn.parallel import DistributedDataParallel
train_path = os.path.join(args.save_path, 'train')
log_path = os.path.join(args.save_path, 'log')
use_gpu = torch.cuda.is_available() #gpu是否可用
def save_func(epoch, device):
filename = utils.get_ckpt_filename('model',epoch)
torch.save(trainer.state_dict(), os.path.join(train_path, filename))
if __name__ == '__main__':
try:
if args.local_rank == -1 or args.local_rank == 0:
if not os.path.isdir(args.save_path):
os.makedirs(args.save_path)
logger = utils.get_logger(os.path.join(args.save_path, 'train.log'))
if args.local_rank == -1 or args.local_rank == 0:
for path in [train_path, log_path]:
if not os.path.isdir(path):
logger.info('cannot find {}, mkdiring'.format(path))
os.makedirs(path)
for i in vars(args):
logger.info('{}: {}'.format(i, getattr(args, i)))
distributed = True if args.local_rank != -1 else False
if distributed:
torch.cuda.set_device(args.local_rank)
device = torch.device("cuda", args.local_rank)
torch.distributed.init_process_group(backend='nccl', init_method='end://')
torch.manual_seed(args.seed)
else:
if use_gpu:
device = torch.device('cuda',0)
else:
device = torch.device('cpu')
args.device = device
tokz = BertTokenizer.from_pretrained(args.bert_path)
_, intent2index, _ = utils.load_vocab(args.intent_label_vocab)
_, slot2index, _ = utils.load_vocab(args.slot_label_vocab)
train_dataset = dataset.NLUDataset([args.train_file], tokz, intent2index, slot2index, logger, max_lengths=args.max_length)
valid_dataset = dataset.NLUDataset([args.valid_file], tokz, intent2index, slot2index, logger, max_lengths=args.max_length)
logger.info('Building models, rank {}'.format(args.local_rank))
bert_config = BertConfig.from_pretrained(args.bert_path)
bert_config.num_intent_labels = len(intent2index)
bert_config.num_slot_labels = len(slot2index)
model = NLUModel.from_pretrained(args.bert_path, config=bert_config, args=args).to(device)
if distributed:
model = DistributedDataParallel(model, device_ids=[args.local_rank], output_device=args.local_rank)
trainer = Trainer(args, model, tokz, train_dataset, valid_dataset, log_path, logger, device, distributed=distributed)
start_epoch = 0
if args.local_rank in [-1, 0]:
trainer.train(start_epoch, args.n_epochs, after_epoch_funcs=[save_func])
else:
trainer.train(start_epoch, args.n_epochs)
except:
logger.error(traceback.format_exc())
| 42.47619 | 130 | 0.688565 | 596 | 4,460 | 4.939597 | 0.27349 | 0.05197 | 0.098166 | 0.02038 | 0.20788 | 0.147418 | 0.147418 | 0.082201 | 0.061821 | 0.061821 | 0 | 0.009242 | 0.175112 | 4,460 | 104 | 131 | 42.884615 | 0.790976 | 0.00157 | 0 | 0.08642 | 0 | 0 | 0.138514 | 0.011937 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012346 | false | 0 | 0.123457 | 0 | 0.135802 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e39135478caf6a2e23c8dbc282f7e69470bde24 | 15,983 | py | Python | oscar/apps/catalogue/migrations/0001_initial.py | nosrevi/django-oscar | 180571fecec6fbe4654fbcd3e3017d8580814450 | [
"BSD-3-Clause"
] | 2 | 2015-12-11T00:19:15.000Z | 2021-11-14T19:44:42.000Z | oscar/apps/catalogue/migrations/0001_initial.py | nosrevi/django-oscar | 180571fecec6fbe4654fbcd3e3017d8580814450 | [
"BSD-3-Clause"
] | null | null | null | oscar/apps/catalogue/migrations/0001_initial.py | nosrevi/django-oscar | 180571fecec6fbe4654fbcd3e3017d8580814450 | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import oscar.models.fields
import django.core.validators
import django.db.models.deletion
import oscar.models.fields.autoslugfield
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='AttributeOption',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('option', models.CharField(verbose_name='Option', max_length=255)),
],
options={
'verbose_name': 'Attribute option',
'verbose_name_plural': 'Attribute options',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='AttributeOptionGroup',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='Name', max_length=128)),
],
options={
'verbose_name': 'Attribute option group',
'verbose_name_plural': 'Attribute option groups',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='attributeoption',
name='group',
field=models.ForeignKey(verbose_name='Group', to='catalogue.AttributeOptionGroup'),
preserve_default=True,
),
migrations.CreateModel(
name='Category',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('path', models.CharField(unique=True, max_length=255)),
('depth', models.PositiveIntegerField()),
('numchild', models.PositiveIntegerField(default=0)),
('name', models.CharField(verbose_name='Name', db_index=True, max_length=255)),
('description', models.TextField(verbose_name='Description', blank=True)),
('image', models.ImageField(verbose_name='Image', upload_to='categories', blank=True, null=True, max_length=255)),
('slug', models.SlugField(editable=False, verbose_name='Slug', max_length=255)),
('full_name', models.CharField(editable=False, verbose_name='Full Name', db_index=True, max_length=255)),
],
options={
'verbose_name': 'Category',
'verbose_name_plural': 'Categories',
'ordering': ['full_name'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Option',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='Name', max_length=128)),
('code', oscar.models.fields.autoslugfield.AutoSlugField(editable=False, verbose_name='Code', blank=True, max_length=128, populate_from='name', unique=True)),
('type', models.CharField(verbose_name='Status', choices=[('Required', 'Required - a value for this option must be specified'), ('Optional', 'Optional - a value for this option can be omitted')], default='Required', max_length=128)),
],
options={
'verbose_name': 'Option',
'verbose_name_plural': 'Options',
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('structure', models.CharField(verbose_name='Product structure', choices=[('standalone', 'Stand-alone product'), ('parent', 'Parent product'), ('child', 'Child product')], default='standalone', max_length=10)),
('upc', oscar.models.fields.NullCharField(verbose_name='UPC', max_length=64, help_text='Universal Product Code (UPC) is an identifier for a product which is not specific to a particular supplier. Eg an ISBN for a book.', unique=True)),
('title', models.CharField(verbose_name='Title', blank=True, max_length=255)),
('slug', models.SlugField(verbose_name='Slug', max_length=255)),
('description', models.TextField(verbose_name='Description', blank=True)),
('rating', models.FloatField(editable=False, verbose_name='Rating', null=True)),
('date_created', models.DateTimeField(verbose_name='Date created', auto_now_add=True)),
('date_updated', models.DateTimeField(verbose_name='Date updated', auto_now=True, db_index=True)),
('is_discountable', models.BooleanField(verbose_name='Is discountable?', help_text='This flag indicates if this product can be used in an offer or not', default=True)),
('parent', models.ForeignKey(verbose_name='Parent product', blank=True, help_text="Only choose a parent product if you're creating a child product. For example if this is a size 4 of a particular t-shirt. Leave blank if this is a stand-alone product (i.e. there is only one version of this product).", to='catalogue.Product', null=True)),
('product_options', models.ManyToManyField(verbose_name='Product Options', blank=True, to='catalogue.Option')),
],
options={
'verbose_name': 'Product',
'verbose_name_plural': 'Products',
'ordering': ['-date_created'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProductAttribute',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='Name', max_length=128)),
('code', models.SlugField(verbose_name='Code', validators=[django.core.validators.RegexValidator(regex='^[a-zA-Z\\-_][0-9a-zA-Z\\-_]*$', message="Code can only contain the letters a-z, A-Z, digits, minus and underscores, and can't start with a digit")], max_length=128)),
('type', models.CharField(verbose_name='Type', choices=[('text', 'Text'), ('integer', 'Integer'), ('boolean', 'True / False'), ('float', 'Float'), ('richtext', 'Rich Text'), ('date', 'Date'), ('option', 'Option'), ('entity', 'Entity'), ('file', 'File'), ('image', 'Image')], default='text', max_length=20)),
('required', models.BooleanField(verbose_name='Required', default=False)),
('option_group', models.ForeignKey(verbose_name='Option Group', blank=True, help_text='Select an option group if using type "Option"', to='catalogue.AttributeOptionGroup', null=True)),
],
options={
'verbose_name': 'Product attribute',
'verbose_name_plural': 'Product attributes',
'ordering': ['code'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.CreateModel(
name='ProductAttributeValue',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('value_text', models.TextField(verbose_name='Text', blank=True, null=True)),
('value_integer', models.IntegerField(verbose_name='Integer', blank=True, null=True)),
('value_boolean', models.NullBooleanField(verbose_name='Boolean')),
('value_float', models.FloatField(verbose_name='Float', blank=True, null=True)),
('value_richtext', models.TextField(verbose_name='Richtext', blank=True, null=True)),
('value_date', models.DateField(verbose_name='Date', blank=True, null=True)),
('value_file', models.FileField(null=True, upload_to='images/products/%Y/%m/', blank=True, max_length=255)),
('value_image', models.ImageField(null=True, upload_to='images/products/%Y/%m/', blank=True, max_length=255)),
('entity_object_id', models.PositiveIntegerField(editable=False, blank=True, null=True)),
('attribute', models.ForeignKey(verbose_name='Attribute', to='catalogue.ProductAttribute')),
('entity_content_type', models.ForeignKey(editable=False, blank=True, to='contenttypes.ContentType', null=True)),
],
options={
'verbose_name': 'Product attribute value',
'verbose_name_plural': 'Product attribute values',
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='product',
name='attributes',
field=models.ManyToManyField(verbose_name='Attributes', through='catalogue.ProductAttributeValue', to='catalogue.ProductAttribute'),
preserve_default=True,
),
migrations.AddField(
model_name='productattributevalue',
name='product',
field=models.ForeignKey(verbose_name='Product', to='catalogue.Product'),
preserve_default=True,
),
migrations.AddField(
model_name='productattributevalue',
name='value_option',
field=models.ForeignKey(verbose_name='Value Option', blank=True, to='catalogue.AttributeOption', null=True),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='productattributevalue',
unique_together=set([('attribute', 'product')]),
),
migrations.CreateModel(
name='ProductCategory',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('category', models.ForeignKey(verbose_name='Category', to='catalogue.Category')),
],
options={
'verbose_name': 'Product category',
'verbose_name_plural': 'Product categories',
'ordering': ['product', 'category'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='product',
name='categories',
field=models.ManyToManyField(verbose_name='Categories', through='catalogue.ProductCategory', to='catalogue.Category'),
preserve_default=True,
),
migrations.AddField(
model_name='productcategory',
name='product',
field=models.ForeignKey(verbose_name='Product', to='catalogue.Product'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='productcategory',
unique_together=set([('product', 'category')]),
),
migrations.CreateModel(
name='ProductClass',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('name', models.CharField(verbose_name='Name', max_length=128)),
('slug', oscar.models.fields.autoslugfield.AutoSlugField(editable=False, verbose_name='Slug', blank=True, max_length=128, populate_from='name', unique=True)),
('requires_shipping', models.BooleanField(verbose_name='Requires shipping?', default=True)),
('track_stock', models.BooleanField(verbose_name='Track stock levels?', default=True)),
('options', models.ManyToManyField(verbose_name='Options', blank=True, to='catalogue.Option')),
],
options={
'verbose_name': 'Product class',
'verbose_name_plural': 'Product classes',
'ordering': ['name'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='productattribute',
name='product_class',
field=models.ForeignKey(verbose_name='Product Type', blank=True, to='catalogue.ProductClass', null=True),
preserve_default=True,
),
migrations.AddField(
model_name='product',
name='product_class',
field=models.ForeignKey(verbose_name='Product Type', on_delete=django.db.models.deletion.PROTECT, help_text='Choose what type of product this is', to='catalogue.ProductClass', null=True),
preserve_default=True,
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('original', models.ImageField(verbose_name='Original', upload_to='images/products/%Y/%m/', max_length=255)),
('caption', models.CharField(verbose_name='Caption', blank=True, max_length=200)),
('display_order', models.PositiveIntegerField(verbose_name='Display Order', help_text='An image with a display order of zero will be the primary image for a product', default=0)),
('date_created', models.DateTimeField(verbose_name='Date Created', auto_now_add=True)),
('product', models.ForeignKey(verbose_name='Product', to='catalogue.Product')),
],
options={
'verbose_name': 'Product image',
'verbose_name_plural': 'Product images',
'ordering': ['display_order'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.AlterUniqueTogether(
name='productimage',
unique_together=set([('product', 'display_order')]),
),
migrations.CreateModel(
name='ProductRecommendation',
fields=[
('id', models.AutoField(auto_created=True, verbose_name='ID', primary_key=True, serialize=False)),
('ranking', models.PositiveSmallIntegerField(verbose_name='Ranking', help_text='Determines order of the products. A product with a higher value will appear before one with a lower ranking.', default=0)),
],
options={
'verbose_name': 'Product recommendation',
'verbose_name_plural': 'Product recomendations',
'ordering': ['primary', '-ranking'],
'abstract': False,
},
bases=(models.Model,),
),
migrations.AddField(
model_name='product',
name='recommended_products',
field=models.ManyToManyField(verbose_name='Recommended Products', through='catalogue.ProductRecommendation', blank=True, to='catalogue.Product'),
preserve_default=True,
),
migrations.AddField(
model_name='productrecommendation',
name='primary',
field=models.ForeignKey(verbose_name='Primary Product', to='catalogue.Product'),
preserve_default=True,
),
migrations.AddField(
model_name='productrecommendation',
name='recommendation',
field=models.ForeignKey(verbose_name='Recommended Product', to='catalogue.Product'),
preserve_default=True,
),
migrations.AlterUniqueTogether(
name='productrecommendation',
unique_together=set([('primary', 'recommendation')]),
),
]
| 54.736301 | 356 | 0.591566 | 1,534 | 15,983 | 6.013038 | 0.15189 | 0.106136 | 0.02732 | 0.038053 | 0.486882 | 0.420642 | 0.381938 | 0.352992 | 0.329792 | 0.266912 | 0 | 0.006598 | 0.269849 | 15,983 | 291 | 357 | 54.924399 | 0.783805 | 0.001314 | 0 | 0.522807 | 0 | 0.014035 | 0.261216 | 0.034837 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.021053 | 0 | 0.031579 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e395b545076b6c0c74ee9c9bbb70ebddc9553f7 | 1,119 | py | Python | file_manipulator.py | mtiday/file_manipulator | 58a9b4bc7dc1c2bc76ab409531cd5850e15c931d | [
"MIT"
] | 1 | 2021-02-06T09:43:11.000Z | 2021-02-06T09:43:11.000Z | file_manipulator.py | mtiday/file_manipulator | 58a9b4bc7dc1c2bc76ab409531cd5850e15c931d | [
"MIT"
] | null | null | null | file_manipulator.py | mtiday/file_manipulator | 58a9b4bc7dc1c2bc76ab409531cd5850e15c931d | [
"MIT"
] | 1 | 2021-02-06T08:10:55.000Z | 2021-02-06T08:10:55.000Z | """This program scans, reads, writes, modifies, creates, re-names files.
I expect this program to grow as I, and the community, grows.
"""
import time
import linux_name_checker
import find_replace
def start():
"""Get user input and execute actions on files and folders"""
while True:
print('\nPlease choose from the following list:')
print('"1" to find and replace text in text files')
print('"2" to scan Linux folders for files that would cause '
'duplicates if copied to Windows.')
program_to_open = str(input('"Q" to quit and end the program. '))
if program_to_open in ('Q', 'q'):
exit_program()
elif program_to_open == '1':
find_replace.start()
elif program_to_open == '2':
linux_name_checker.start()
else:
print(f'\n"{program_to_open}" wasn\'t a valid entry. '
f'Please try again.\n')
def exit_program():
"""Exit the program cleanly."""
print('\nHave a great day!!')
time.sleep(5)
raise SystemExit
if __name__ == "__main__":
start()
| 27.975 | 73 | 0.61126 | 154 | 1,119 | 4.272727 | 0.538961 | 0.082067 | 0.098784 | 0.051672 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006165 | 0.275246 | 1,119 | 39 | 74 | 28.692308 | 0.805179 | 0.190349 | 0 | 0 | 0 | 0 | 0.312711 | 0.023622 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.12 | 0 | 0.2 | 0.2 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e3a95afeceb1f714bcba2d6577d265d4da6ff5d | 2,627 | py | Python | maestral/utils/backend.py | LyzardKing/maestral-dropbox | c1b030de85b69f8867b6a8a6564921f5e851aa1c | [
"MIT"
] | null | null | null | maestral/utils/backend.py | LyzardKing/maestral-dropbox | c1b030de85b69f8867b6a8a6564921f5e851aa1c | [
"MIT"
] | null | null | null | maestral/utils/backend.py | LyzardKing/maestral-dropbox | c1b030de85b69f8867b6a8a6564921f5e851aa1c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
@author: Sam Schott (ss2151@cam.ac.uk)
(c) Sam Schott; This work is licensed under a Creative Commons
Attribution-NonCommercial-NoDerivs 2.0 UK: England & Wales License.
"""
from os import path as osp
import keyring
import keyring.backends
from keyring.errors import KeyringLocked
from maestral.config import MaestralConfig, MaestralState
from maestral.constants import IS_MACOS_BUNDLE
from maestral.utils.appdirs import get_data_path
from maestral.utils.path import delete
def set_keyring_backend():
if IS_MACOS_BUNDLE:
import keyring.backends.OS_X
keyring.set_keyring(keyring.backends.OS_X.Keyring())
else:
import keyring.backends
# get preferred keyring backends for platform, excluding the chainer backend
all_keyrings = keyring.backend.get_all_keyring()
preferred_kreyrings = [k for k in all_keyrings if not isinstance(k, keyring.backends.chainer.ChainerBackend)]
keyring.set_keyring(max(preferred_kreyrings, key=lambda x: x.priority))
def pending_link(config_name):
"""
Checks if auth key has been saved. This can be used by Maestral front ends to check
if we are linked before starting a daemon.
:param str config_name: The config to check.
:returns: ``True`` or ``False``.
:rtype: bool
:raises: ``KeyringLocked`` if the system keyring cannot be accessed.
"""
set_keyring_backend()
conf = MaestralConfig(config_name)
account_id = conf.get('account', 'account_id')
try:
if account_id == '':
access_token = None
else:
access_token = keyring.get_password('Maestral', account_id)
return access_token is None
except KeyringLocked:
info = 'Please make sure that your keyring is unlocked and restart Maestral.'
raise KeyringLocked(info)
def pending_dropbox_folder(config_name):
"""
Checks if a local dropbox folder has been set. This can be used by Maestral front ends
to check if we are linked before starting a daemon.
:param str config_name: The config to check.
:returns: ``True`` or ``False``.
:rtype: bool
"""
conf = MaestralConfig(config_name)
return not osp.isdir(conf.get('main', 'path'))
def remove_configuration(config_name):
"""
Removes all config and state files associated with the given configuration.
:param str config_name: The configuration to remove.
"""
MaestralConfig(config_name).cleanup()
MaestralState(config_name).cleanup()
index_file = get_data_path('maestral', f'{config_name}.index')
delete(index_file)
| 31.27381 | 117 | 0.707651 | 351 | 2,627 | 5.162393 | 0.410256 | 0.060706 | 0.034768 | 0.029801 | 0.187086 | 0.147903 | 0.147903 | 0.147903 | 0.147903 | 0.147903 | 0 | 0.003354 | 0.205558 | 2,627 | 83 | 118 | 31.650602 | 0.864878 | 0.349448 | 0 | 0.157895 | 0 | 0 | 0.079306 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0.026316 | 0.263158 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e3d079dd00e907be76acc51ce319f38b54725a2 | 9,944 | py | Python | arjuna/interact/gui/auto/impl/automator/drivercaps.py | test-mile/arjuna | 21880b41e061e11bac2e600a3614684f8af75b2f | [
"Apache-2.0"
] | 9 | 2018-11-15T10:09:17.000Z | 2021-01-12T05:59:19.000Z | arjuna/interact/gui/auto/impl/automator/drivercaps.py | test-mile/arjuna | 21880b41e061e11bac2e600a3614684f8af75b2f | [
"Apache-2.0"
] | 2 | 2019-07-01T15:33:46.000Z | 2019-07-12T13:04:08.000Z | arjuna/interact/gui/auto/impl/automator/drivercaps.py | test-mile/arjuna | 21880b41e061e11bac2e600a3614684f8af75b2f | [
"Apache-2.0"
] | 4 | 2018-12-02T15:14:04.000Z | 2020-05-28T12:57:24.000Z | from arjuna.tpi.enums import ArjunaOption, DesktopOS
import pprint
from enum import Enum, auto
class SetuActorDriverConfigOption(Enum):
AUTOMATOR_NAME = auto()
GUIAUTO_CONTEXT = auto()
# Browser (Common)
BROWSER_NAME = auto()
BROWSER_BIN_PATH = auto()
BROWSER_PROXY_ON = auto()
BROWSER_PROXY_HOST = auto()
BROWSER_PROXY_PORT = auto()
MOBILE_OS_NAME = auto()
# Selenium
SELENIUM_DRIVER_PROP = auto()
SELENIUM_DRIVER_PATH = auto()
# Appium
APPIUM_HUB_URL = auto()
APPIUM_AUTO_LAUNCH = auto()
class DriverCapabilities:
DRIVER_MAP = {
"chrome": "chromedriver",
"firefox": "geckodriver",
"safari": "safaridriver"
}
# Selenium
UNEXPECTED_ALERT_BEHAVIOUR = "unexpectedAlertBehaviour" # accept,dismiss,ignore
UNHANDLED_PROMPT_BEHAVIOUR = "unhandledPromptBehavior" # accept,dismiss,ignore
ELEMENT_SCROLL_BEHAVIOR = "elementScrollBehavior" #???
AUTOMATION_NAME = "automationName"
BROWSER_NAME = "browserName"
BROWSER_VERSION = "browserVersion"
# Appium
PLATFORM_NAME = "platformName"
PLATFORM_VERSION = "platformVersion"
DEVICE_NAME = "deviceName"
APP_PATH = "app"
DEVICE_UDID = "udid"
NEW_COMMAND_TIMEOUT = "newCommandTimeout" # unit: seconds
AUTO_WEBVIEW = "autoWebview" # Default false
NO_RESET = "noReset" # Default false
FULL_RESET = "fullReset"
CLEAR_SYSTEM_FILES = "clearSystemFiles"
# Android
ANDROID_APP_ACTIVITY = "appActivity"
ANDROID_APP_PACKAGE = "appPackage"
ANDROID_WAIT_ACTIVITY = "appWaitActivity"
ANDROID_WAIT_PACKAGE = "appWaitPackage"
ANDROID_UNICODE_KEYBOARD = "unicodeKeyboard"
ANDROID_RESET_KEYBOARD = "resetKeyboard"
#ios
IOS_BUNDLE_ID = "bundleId"
IOS_AUTO_ACCEPT_ALERTS = "autoAcceptAlerts"
def __init__(self, config, json_dict):
self.__config = config
self.__out_dict = {
"arjunaOptions" : {},
"browserArgs": [],
"driverCapabilities": {},
"browserPreferences":{},
"browserExtensions":[]
}
self.__process_config(config)
self.__process(json_dict)
self.__host_os = self.__config.get_arjuna_option_value(ArjunaOption.TESTRUN_HOST_OS).as_str().lower()
aname = self.__config.get_arjuna_option_value(ArjunaOption.AUTOMATOR_NAME).as_str().lower()
acontext = self.__config.get_arjuna_option_value(ArjunaOption.GUIAUTO_CONTEXT).as_str().lower()
mobile_platform = self.__config.get_arjuna_option_value(ArjunaOption.MOBILE_OS_NAME).as_str().lower()
if aname == "selenium":
self.__process_for_selenium(json_dict)
elif aname == "appium":
self.__process_for_appium(json_dict)
if mobile_platform.lower() == "android":
self.__process_for_android(json_dict)
elif mobile_platform.lower() == "ios":
self.__process_for_ios(json_dict)
if acontext.lower() == "mobile_web":
if mobile_platform.lower() == "android":
self.__process_for_android_web(json_dict)
elif mobile_platform.lower() == "ios":
self.__process_for_ios_web(json_dict)
elif acontext.lower() == "mobile_native":
if mobile_platform.lower() == "android":
self.__process_for_android_native(json_dict)
elif mobile_platform.lower() == "ios":
self.__process_for_ios_native(json_dict)
elif acontext.lower() == "mobile_hybrid":
if mobile_platform.lower() == "android":
self.__process_for_android_hybrid(json_dict)
elif mobile_platform.lower() == "ios":
self.__process_for_ios_hybrid(json_dict)
if not self.__out_dict["browserArgs"]:
del self.__out_dict["browserArgs"]
if not self.__out_dict["driverCapabilities"]:
del self.__out_dict["driverCapabilities"]
if not self.__out_dict["browserPreferences"]:
del self.__out_dict["browserPreferences"]
if not self.__out_dict["browserExtensions"]:
del self.__out_dict["browserExtensions"]
@property
def processed_config(self):
return self.__out_dict
@property
def _config(self):
return self.__config
def __process_config(self, config):
self.__out_dict["automationContext"] = config.get_arjuna_option_value(ArjunaOption.GUIAUTO_CONTEXT).as_str().upper()
temp_d = config.get_arjuna_options_as_map()
for k,v in temp_d.items():
if k in SetuActorDriverConfigOption.__members__:
self.__out_dict["arjunaOptions"][k] = v
def __process(self, dict_from_requester):
self.__out_dict["driverCapabilities"][self.UNEXPECTED_ALERT_BEHAVIOUR] = "dismiss"
self.__out_dict["driverCapabilities"][self.UNHANDLED_PROMPT_BEHAVIOUR] = "dismiss"
if not dict_from_requester: return
if "browserArgs" in dict_from_requester and dict_from_requester["browserArgs"]:
self.__out_dict["browserArgs"].extend(dict_from_requester["browserArgs"])
if "driverCapabilities" in dict_from_requester and dict_from_requester["driverCapabilities"]:
self.__out_dict["driverCapabilities"].update(
{i:j for i,j in dict_from_requester["driverCapabilities"].items() if j !="not_set"})
if "browserPreferences" in dict_from_requester and dict_from_requester["browserPreferences"]:
self.__out_dict["browserPreferences"] = dict_from_requester["browserPreferences"]
if "browserExtensions" in dict_from_requester and dict_from_requester["browserExtensions"]:
self.__out_dict["browserExtensions"].extend(dict_from_requester["browserExtensions"])
def __modify_for_windows(self, in_name):
if self.__host_os == DesktopOS.WINDOWS:
return in_name + ".exe"
else:
return in_name
def __process_for_selenium(self, in_dict):
browser_name = self._config.get_arjuna_option_value(ArjunaOption.BROWSER_NAME).as_str().lower()
self.__out_dict["driverCapabilities"][self.BROWSER_NAME] = browser_name
browser_version = self._config.get_arjuna_option_value(ArjunaOption.BROWSER_VERSION).as_str()
if browser_version != "not_set":
self.__out_dict["driverCapabilities"][self.BROWSER_VERSION] = browser_version
def __process_for_appium(self, dict_from_requester):
mobile_os_name = self._config.get_arjuna_option_value(ArjunaOption.MOBILE_OS_NAME).as_str()
if mobile_os_name.lower() == "android":
self.__out_dict["driverCapabilities"][self.PLATFORM_NAME] = "Android"
elif mobile_os_name.lower() == "ios":
self.__out_dict["driverCapabilities"][self.PLATFORM_NAME] = "iOS"
self.__out_dict["driverCapabilities"][self.NEW_COMMAND_TIMEOUT] = 300 # 5 minutes
self.__out_dict["driverCapabilities"][self.PLATFORM_VERSION] = self._config.get_arjuna_option_value(ArjunaOption.MOBILE_OS_VERSION)
self.__out_dict["driverCapabilities"][self.DEVICE_NAME] = self._config.get_arjuna_option_value(ArjunaOption.MOBILE_DEVICE_NAME)
self.__out_dict["driverCapabilities"][self.APP_PATH] = self._config.get_arjuna_option_value(ArjunaOption.MOBILE_APP_FILE_PATH)
self.__out_dict["driverCapabilities"][self.DEVICE_UDID] = self._config.get_arjuna_option_value(ArjunaOption.MOBILE_DEVICE_UDID)
def __process_for_android(self, dict_from_requester):
self.__out_dict["driverCapabilities"][self.AUTOMATION_NAME] = "UiAutomator2"
self.__out_dict["driverCapabilities"][self.ANDROID_UNICODE_KEYBOARD] = True
self.__out_dict["driverCapabilities"][self.ANDROID_RESET_KEYBOARD] = True
def __process_for_android_native(self, dict_from_requester):
pass
# self.__out_dict["driverCapabilities"][self.ANDROID_APP_ACTIVITY] = self._config.get_arjuna_option_value(ArjunaOption.MOBILE_APP_PACKAGE).name
# self.__out_dict["driverCapabilities"][self.ANDROID_APP_PACKAGE] = self._config.get_arjuna_option_value(ArjunaOption.MOBILE_APP_ACTIVITY)
# self.__out_dict["capabilites"][self.ANDROID_WAIT_ACTIVITY] = self._config.get_arjuna_option_value(ArjunaOption.MOBILE_APP_ACTIVITY)
# self.__out_dict["capabilites"][self.ANDROID_WAIT_PACKAGE] = self._config.get_arjuna_option_value(ArjunaOption.MOBILE_APP_FILE_PATH)
def __process_for_android_web(self, dict_from_requester):
# Browser
browser_name = self._config.get_browser_name().lower()
if browser_name.lower() != "chrome":
raise Exception("{} is not a valid browser for Android.".format(browser_name))
self.__out_dict["capabilities"][self.BROWSER_NAME] = browser_name
def __process_for_android_hybrid(self, dict_from_requester):
pass
def __process_for_ios(self, dict_from_requester):
self.__out_dict["driverCapabilities"][self.AUTOMATION_NAME] = "XCUITest"
self.__out_dict["driverCapabilities"][self.IOS_AUTO_ACCEPT_ALERTS] = True
def __process_for_ios_native(self, dict_from_requester):
pass
def __process_for_ios_web(self, dict_from_requester):
self.__out_dict["driverCapabilities"][self.BROWSER_NAME] = self._config.get_browser_name()
def __process_for_ios_hybrid(self, dict_from_requester):
pass
'''
#Common (WebDriver)
BROWSER_NAME = "browserName"
BROWSER_VERSION = "browserVersion"
#Appium
PLATFORM_NAME = "platformName"
PLATFORM_VERSION = "platformVersion"
DEVICE_NAME = "deviceName"
APP_PATH = "app"
DEVICE_UDID = "udid"
Android{
// Android Specific
ANDROID_APP_ACTIVITY = "appActivity"
ANDROID_APP_PACKAGE = "appPackage"
ANDROID_WAIT_ACTIVITY = "appWaitActivity"
ANDROID_WAIT_PACKAGE = "appWaitPackage"
}
IOS {
}
''' | 43.234783 | 151 | 0.701227 | 1,105 | 9,944 | 5.808145 | 0.158371 | 0.041446 | 0.065129 | 0.099408 | 0.529604 | 0.494235 | 0.423341 | 0.364444 | 0.322063 | 0.236055 | 0 | 0.000627 | 0.197607 | 9,944 | 230 | 152 | 43.234783 | 0.803735 | 0.0714 | 0 | 0.086957 | 0 | 0 | 0.157332 | 0.007741 | 0 | 0 | 0 | 0 | 0 | 1 | 0.099379 | false | 0.024845 | 0.018634 | 0.012422 | 0.385093 | 0.006211 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e404ad224902f7e6fe6cacd32f6806bb82f3f7f | 3,873 | py | Python | egs2/harpervalley/asr1/local/data_prep.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | 1 | 2022-03-25T14:41:05.000Z | 2022-03-25T14:41:05.000Z | egs2/harpervalley/asr1/local/data_prep.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | 2 | 2019-04-23T04:43:33.000Z | 2019-05-13T13:06:52.000Z | egs2/harpervalley/asr1/local/data_prep.py | roshansh-cmu/espnet | 5fa6dcc4e649dc66397c629d0030d09ecef36b80 | [
"Apache-2.0"
] | null | null | null | import argparse
import json
import os
import sys
import wave
def load_json(f_path):
with open(f_path, "r") as f:
return json.load(f)
def process_data(target_dir, source_dir, audio_dir, filename, min_length):
if not os.path.exists(target_dir):
os.makedirs(target_dir)
agent_wav_path = os.path.join(audio_dir, "agent", filename + ".wav")
caller_wav_path = os.path.join(audio_dir, "caller", filename + ".wav")
# exit if the wav files do not exist.
if not os.path.isfile(agent_wav_path) or not os.path.isfile(agent_wav_path):
sys.exit()
with wave.open(agent_wav_path, "rb") as wa, wave.open(caller_wav_path, "rb") as wc:
wa_length = wa.getnframes() / wa.getframerate()
wc_length = wc.getnframes() / wc.getframerate()
with open(
os.path.join(target_dir, "wav.scp"), "a", encoding="utf-8"
) as wavscp, open(
os.path.join(target_dir, "utt2spk"), "a", encoding="utf-8"
) as utt2spk, open(
os.path.join(target_dir, "segments"), "a", encoding="utf-8"
) as segments, open(
os.path.join(target_dir, "text"), "a", encoding="utf-8"
) as text:
metadata_f = load_json(os.path.join(source_dir, "metadata", filename + ".json"))
transcript_f = load_json(
os.path.join(source_dir, "transcript", filename + ".json")
)
agent_spk_id = metadata_f["agent"]["speaker_id"]
caller_spk_id = metadata_f["caller"]["speaker_id"]
task_type = metadata_f["tasks"][0]["task_type"].replace(" ", "_")
agent_rec_id = "{}-{}".format(agent_spk_id, filename)
caller_rec_id = "{}-{}".format(caller_spk_id, filename)
agent_utt_num = 0
caller_utt_num = 0
for v in transcript_f:
transcript = v["human_transcript"]
# Throw away utterances with < min_length words or 100 ms
if len(transcript.split()) < min_length or int(v["duration_ms"]) < 100:
continue
begin_ms = int(v["offset_ms"])
end_ms = begin_ms + int(v["duration_ms"])
begin_sec = begin_ms / 1000
end_sec = end_ms / 1000
if v["speaker_role"] == "agent":
if end_sec > wa_length:
continue
utt_id = "{}_{}_{}".format(agent_rec_id, begin_ms, end_ms)
utt2spk.write("{} {}\n".format(utt_id, agent_spk_id))
segments.write(
"{} {} {} {}\n".format(utt_id, agent_rec_id, begin_sec, end_sec)
)
agent_utt_num += 1
else:
if end_sec > wc_length:
continue
utt_id = "{}_{}_{}".format(caller_rec_id, begin_ms, end_ms)
utt2spk.write("{} {}\n".format(utt_id, caller_spk_id))
segments.write(
"{} {} {} {}\n".format(utt_id, caller_rec_id, begin_sec, end_sec)
)
caller_utt_num += 1
text.write("{} {} {}\n".format(utt_id, task_type, transcript))
# write wav.scp only if utterances exist
if agent_utt_num > 0:
wavscp.write("{} {}\n".format(agent_rec_id, agent_wav_path))
if caller_utt_num > 0:
wavscp.write("{} {}\n".format(caller_rec_id, caller_wav_path))
parser = argparse.ArgumentParser()
parser.add_argument("--target_dir", type=str, default="data/tmp")
parser.add_argument("--source_dir", type=str, required=True, help="Path to source data")
parser.add_argument("--audio_dir", type=str, required=True, help="Path to audio data")
parser.add_argument("--filename", type=str, required=True, help="filename")
parser.add_argument("--min_length", type=int, default=4)
args = parser.parse_args()
process_data(
args.target_dir, args.source_dir, args.audio_dir, args.filename, args.min_length
)
| 39.520408 | 88 | 0.595146 | 525 | 3,873 | 4.12 | 0.20381 | 0.030513 | 0.036986 | 0.034674 | 0.331946 | 0.262598 | 0.192325 | 0.121128 | 0.03791 | 0.03791 | 0 | 0.010479 | 0.26078 | 3,873 | 97 | 89 | 39.927835 | 0.745023 | 0.033566 | 0 | 0.063291 | 0 | 0 | 0.110457 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025316 | false | 0 | 0.063291 | 0 | 0.101266 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e40eae0bc4bfeb3cc4d3ceb71088bb1c733beb5 | 3,621 | py | Python | interface/commands/iterate.py | ajupatatero/neurasim | c1d3f8163a7389b06a13e453daa98ad5157d9b2e | [
"MIT"
] | null | null | null | interface/commands/iterate.py | ajupatatero/neurasim | c1d3f8163a7389b06a13e453daa98ad5157d9b2e | [
"MIT"
] | null | null | null | interface/commands/iterate.py | ajupatatero/neurasim | c1d3f8163a7389b06a13e453daa98ad5157d9b2e | [
"MIT"
] | null | null | null | from neurasim import *
import multiprocessing
def call_script(execute,PARSER):
#Prepare Arguments to Parse
txt=""
for i in range(0,len(PARSER)-1,2):
txt = txt + " -{0} {1}".format(PARSER[i],PARSER[i+1])
print(f'Executing' + "\033[1;34;40m" + f' {str(execute)}' + '\033[0;m' + ' with ' + "\033[1;34;40m" +
f'{PARSER}' + '\033[0;m' + f' || @ process \033[1;35;40m{os.getpid()}\033[0;m launched by \033[1;35;40m{os.getppid()}\033[0;m')
os.system(f'{execute}' + txt)
def for_recursive(execute, constant_list, iterable_list, depth_index=0, iteration_index=[]):
number_of_loops = len(iterable_list) #np.shape(iterable_list)[0] this one depreciated
if depth_index == number_of_loops: #Base Case
#create subcase iterable values and properties name parser list
PARSER = []
for _, value in enumerate(constant_list):
PARSER = PARSER + [value[0]] + [value[1]]
for depth_index, value in enumerate(iterable_list):
PARSER = PARSER + [value[0]] + [value[1][iteration_index[depth_index]]]
#parse properties to simulate
try:
if callable(execute):
exit() #now the commands are scripts
else:
#MultiProcessing
multiprocessing.Process(target=call_script, args=(execute, PARSER)).start()
except:
print("\033[1;31"+f'ERROR: case failed with an uncontaplated error in {str(EXECUTE)}' +'\033[0;m')
else: #Recursive Case
for iter_index, iter_value in enumerate(iterable_list[depth_index][1]):
for_recursive(execute=execute, constant_list=constant_list, iterable_list= iterable_list, depth_index = depth_index+1, iteration_index= iteration_index + [iter_index] if depth_index >= 1 else [iter_index])
def main():
'''Function to run a meta analysis over a set of iterable parameters. Notice ther eis no limitation to
the number of parameters to iterate.
usage: meta_simulate(CONSTANT,ITERABLE,EXECUTE)
-CONSTANT = [['Lx', 150],
['Lx', 50],
['Nt', 10],
['gpu', True] !! The bool arguments are only the -gpu the other part doesn't matter!!
]
-ITERABLE = [ ['variable name to pass to simulate', [1.5, 2.5, 3.0]],
['variable 2', [150, 450]],
['variable 3', [50, 100, 200, 300, 400, 500, 700, 850, 1000]],
['range input', range(0,5,1) ],
['array input', np.linspace(0,5,3)]
]
-EXECUTE = function name or script.py with path
Notice:
1) All the variables of the simulation that depend on iterable parameters, include its calculation within
the simulate function or script.
2) The iterable parameters input can be a list, an array, or a range type variable
'''
parser = IterateParser()
config = parser.parse()
try:
CONSTANT = config['CONSTANT']
ITERABLE = config['ITERABLE']
EXECUTE = config['EXECUTE']
except:
print('No sufficient data in config_simulation.yaml as to execute an iteration.')
exit()
print("\033[2J\033[1;1f") # Borrar pantalla y situar cursor
print("\033[1;35;47m"+" NEURASIM (meta command launcher) "+'\033[0;m')
#1.Correct the inputs that are not lists
for i, ite in enumerate(ITERABLE):
if ite[1] is not list:
ITERABLE[i][1] = list(ite[1])
#2.Launch recursive fors corresponding with each subcase
for_recursive(execute=EXECUTE, constant_list=CONSTANT, iterable_list = ITERABLE)
if __name__ == '__main__':
main() | 38.115789 | 217 | 0.621375 | 490 | 3,621 | 4.491837 | 0.355102 | 0.043617 | 0.01363 | 0.029986 | 0.147206 | 0.067242 | 0.067242 | 0 | 0 | 0 | 0 | 0.057006 | 0.249102 | 3,621 | 95 | 218 | 38.115789 | 0.752483 | 0.352113 | 0 | 0.177778 | 0 | 0.022222 | 0.199734 | 0.040391 | 0 | 0 | 0 | 0 | 0 | 1 | 0.066667 | false | 0 | 0.044444 | 0 | 0.111111 | 0.111111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e42d76ddc20ff969072a41e0da53bf4691a4b71 | 10,935 | py | Python | qiskit_cold_atom/fermions/fermion_circuit_solver.py | eggerdj/qiskit-cold-atom | fb4223993a60e6237941c5d02c6133cdbfeb8391 | [
"Apache-2.0"
] | 16 | 2021-09-30T11:58:55.000Z | 2022-03-16T03:32:24.000Z | qiskit_cold_atom/fermions/fermion_circuit_solver.py | eggerdj/qiskit-cold-atom | fb4223993a60e6237941c5d02c6133cdbfeb8391 | [
"Apache-2.0"
] | 32 | 2021-09-27T14:36:55.000Z | 2022-03-29T12:13:42.000Z | qiskit_cold_atom/fermions/fermion_circuit_solver.py | eggerdj/qiskit-cold-atom | fb4223993a60e6237941c5d02c6133cdbfeb8391 | [
"Apache-2.0"
] | 5 | 2021-09-23T18:35:41.000Z | 2022-02-06T09:22:52.000Z | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Module to simulate fermionic circuits."""
from typing import List, Tuple, Optional
import numpy as np
from scipy.sparse import csc_matrix
from qiskit import QuantumCircuit
from qiskit_nature.operators.second_quantization import FermionicOp
from qiskit_cold_atom.base_circuit_solver import BaseCircuitSolver
from qiskit_cold_atom.exceptions import QiskitColdAtomError
from qiskit_cold_atom.fermions.fermionic_state import FermionicState
from qiskit_cold_atom.fermions.fermionic_basis import FermionicBasis
from qiskit_cold_atom.fermions.fermion_gate_library import FermionicGate
class FermionCircuitSolver(BaseCircuitSolver):
"""
Numerically simulate fermionic systems by exactly computing the time
evolution under unitary operations generated by fermionic Hamiltonians.
"""
def __init__(
self,
shots: Optional[int] = None,
seed: Optional[int] = None,
num_species: int = 1,
):
"""
Args:
shots: amount of shots for the measurement simulation;
if not None, measurements are performed
seed: seed for the RNG for the measurement simulation
num_species: number of different fermionic species, defaults to 1 for a single type of
(spinless) fermions, 2 for spin-1/2 fermions etc. If > 1, the solver will check for
conservation of the particle number per fermionic species in order to reduce the
Hilbert space dimension of the simulation
"""
self._basis = None
self.num_species = num_species
super().__init__(shots=shots, seed=seed)
@property
def basis(self) -> FermionicBasis:
"""
Return the basis of fermionic occupation number states. This basis is updated via the
setter whenever a new circuit is passed to __call__.
"""
return self._basis
@basis.setter
def basis(self, basis: FermionicBasis):
"""
Set the basis of the simulation and check its dimensions.
Args:
basis: The new basis.
Raises:
QiskitColdAtomError: If the dimension of the basis is too large.
"""
if basis.dimension > self.max_dimension:
raise QiskitColdAtomError(
f"Dimension {basis.dimension} exceeds the maximum "
f"allowed dimension {self.max_dimension}."
)
self._basis = basis
def preprocess_circuit(self, circuit: QuantumCircuit):
"""
Pre-processing fermionic circuits includes setting up the basis for the simulation
by extracting the size, particle number and spin conservation from the circuit.
Args:
circuit: A fermionic quantum circuit for which to setup a basis.
"""
initial_occupations = FermionicState.initial_state(circuit, self.num_species)
_, spin_conservation = self._check_conservations(circuit)
self.basis = FermionicBasis.from_state(initial_occupations, spin_conservation)
self._dim = self.basis.dimension
def get_initial_state(self, circuit: QuantumCircuit) -> csc_matrix:
"""
Return the initial state of the quantum circuit as a sparse column vector.
Args:
circuit: The circuit for which to extract the initial_state.
Returns:
The initial state of the circuit as a sparse matrix.
"""
init_state = FermionicState.initial_state(circuit, self.num_species)
initial_occs = init_state.occupations_flat
initial_index = self.basis.get_occupations().index(initial_occs)
initial_state = csc_matrix(
([1 + 0j], ([initial_index], [0])),
shape=(self.basis.dimension, 1),
dtype=complex,
)
return initial_state
def _embed_operator(
self, operator: FermionicOp, num_wires: int, qargs: List[int]
) -> FermionicOp:
"""
Turn a FermionicOp operator that acts on the wires given in qargs into an operator
that acts on the entire state space of the circuit by padding with identities "I" on the
remaining wires
Args:
operator: FermionicOp describing the generating Hamiltonian of a gate
num_wires: The total number of wires in which the operator should be embedded into
qargs: The wire indices the gate acts on
Returns:
FermionicOp, an operator acting on the entire quantum register of the Circuit
Raises:
QiskitColdAtomError:
- If the given operator is not a FermionicOp
- If the size of the operator does not match the given qargs
"""
if not isinstance(operator, FermionicOp):
raise QiskitColdAtomError(
f"Expected FermionicOp; got {type(operator).__name__} instead."
)
if operator.register_length != len(qargs):
raise QiskitColdAtomError(
f"length of gate labels {operator.register_length} does not match "
f"qargs {qargs} of the gates"
)
embedded_op_list = []
for partial_label, factor in operator.to_list():
full_label = ["I"] * num_wires
for i, individual_label in enumerate(list(partial_label)):
full_label[qargs[i]] = individual_label
embedded_op_list.append(("".join(full_label), factor))
return FermionicOp(embedded_op_list)
def _check_conservations(self, circuit: QuantumCircuit) -> Tuple[bool, bool]:
"""
Check if the fermionic operators defined in the circuit conserve the total particle number
(i.e. there are as many creation operators as annihilation operators) and the particle
number per spin species (e.g. there are as many up/down creation operators as there are
up/down annihilation operators).
Args:
circuit: A quantum circuit with fermionic gates
Returns:
particle_conservation: True if the particle number is conserved in the circuit
spin_conservation: True if the particle number is conserved for each spin species
Raises:
QiskitColdAtomError:
- If an operator in the circuit is not a FermionicOp.
- If the length of the fermionic operators does not match the system size.
- If the circuit has a number of wires that is not a multiple of the number
of fermionic species.
"""
particle_conservation = True
spin_conservation = True
for fermionic_op in self.to_operators(circuit):
if not isinstance(fermionic_op, FermionicOp):
raise QiskitColdAtomError("operators need to be given as FermionicOp")
for term in fermionic_op.to_list():
opstring = term[0]
if len(opstring) != circuit.num_qubits:
raise QiskitColdAtomError(
f"Expected length {circuit.num_qubits} for fermionic operator; "
f"received {len(opstring)}."
)
num_creators = opstring.count("+")
num_annihilators = opstring.count("-")
if num_creators != num_annihilators:
return False, False
if self.num_species > 1:
if circuit.num_qubits % self.num_species != 0:
raise QiskitColdAtomError(
f"The number of wires in the circuit {circuit.num_qubits} is not a "
f"multiple of the {self.num_species} fermionic species number."
)
sites = circuit.num_qubits // self.num_species
# check if the particle number is conserved for each spin species
for i in range(self.num_species):
ops = opstring[i * sites : (i + 1) * sites]
num_creators = ops.count("+")
num_annihilators = ops.count("-")
if num_creators != num_annihilators:
spin_conservation = False
break
return particle_conservation, spin_conservation
def operator_to_mat(self, operator: FermionicOp) -> csc_matrix:
"""Convert the fermionic operator to a sparse matrix.
Args:
operator: fermionic operator of which to compute the matrix representation
Returns:
scipy.sparse matrix of the Hamiltonian
"""
return FermionicGate.operator_to_mat(operator, self.num_species, self._basis)
def draw_shots(self, measurement_distribution: List[float]) -> List[str]:
"""
Helper function to draw counts from a given distribution of measurement outcomes.
Args:
measurement_distribution: List of probabilities of the individual measurement outcomes
Returns:
a list of individual measurement results, e.g. ["011000", "100010", ...]
The outcome of each shot is denoted by a binary string of the occupations of the individual
modes in little endian convention
Raises:
QiskitColdAtomError:
- If the length of the given probabilities does not match the expected Hilbert space
dimension.
- If the number of shots self.shots has not been specified.
"""
meas_dim = len(measurement_distribution)
if meas_dim != self.dim:
raise QiskitColdAtomError(
f"Dimension of the measurement probabilities {meas_dim} does not "
f"match the dimension expected by the solver, {self.dim}"
)
if self.shots is None:
raise QiskitColdAtomError(
"The number of shots has to be set before drawing measurements"
)
# list all possible outcomes as strings '001011', reversing the order of the wires
# to comply with Qiskit's ordering convention
outcome_strings = ["".join(map(str, k)) for k in self.basis.get_occupations()]
# Draw measurements:
meas_results = np.random.choice(outcome_strings, self.shots, p=measurement_distribution)
return meas_results.tolist()
| 38.368421 | 103 | 0.631824 | 1,273 | 10,935 | 5.312647 | 0.249018 | 0.014786 | 0.018631 | 0.013308 | 0.09271 | 0.072749 | 0.037262 | 0.023362 | 0.014195 | 0.014195 | 0 | 0.00529 | 0.308551 | 10,935 | 284 | 104 | 38.503521 | 0.889168 | 0.400823 | 0 | 0.079646 | 0 | 0 | 0.115187 | 0.012341 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079646 | false | 0 | 0.088496 | 0 | 0.238938 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e43547e5c131f4b4b10d29c0a2560df47fc9839 | 14,523 | py | Python | pyosu/api.py | fossabot/Osu.py | da8fb982f51260cf4cd2f47dd8c9b22687e6be72 | [
"MIT"
] | null | null | null | pyosu/api.py | fossabot/Osu.py | da8fb982f51260cf4cd2f47dd8c9b22687e6be72 | [
"MIT"
] | null | null | null | pyosu/api.py | fossabot/Osu.py | da8fb982f51260cf4cd2f47dd8c9b22687e6be72 | [
"MIT"
] | null | null | null | # MIT License
# Copyright (c) 2018 Renondedju
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from .http import Route, Request
from .user import User
from .score import Score
from .beatmap import Beatmap
from .user_best import UserBest
from .user_event import UserEvent
from .user_recent import UserRecent
from .score_collection import ScoreCollection
from .beatmap_collection import BeatmapCollection
from .user_best_collection import UserBestCollection
from .user_recent_collection import UserRecentCollection
import asyncio
class OsuApi():
def __init__(self, api_key : str):
self._api_key = api_key
self._session = None
async def __get_data(self, url : str, unique = True, **args):
route = Route(url, self._api_key)
for key, value in args.items():
route.add_param(key, value)
request = Request(route)
await request.fetch(self._session)
data = request.data
if unique:
if len(data) is not 0:
data = data[0]
else:
data = {}
return data
async def get_beatmap(self, beatmapset_id = None, beatmap_id = None, user = None,
type_str = None, mode = None, include_converted = None, hash_str = None):
"""
If any of the parameters used returns more than one beatmap,
the first one only will be returned, if you want multiple beatmaps,
use OsuApi.get_beatmaps() instead
Parameters :
beatmapset_id - specify a beatmapset_id to return metadata from.
beatmap_id - specify a beatmap_id to return metadata from.
user - specify a user_id or a username to return metadata from.
type_str - specify if 'user' is a user_id or a username.
Use string for usernames or id for user_ids.
Optional, default behaviour is automatic recognition
(may be problematic for usernames made up of digits only).
mode - mode (0 = osu!, 1 = Taiko, 2 = CtB, 3 = osu!mania).
Optional, maps of all modes are returned by default.
include_converted - specify whether converted beatmaps are included
(0 = not included, 1 = included).
Only has an effect if 'mode' is chosen and not 0.
Converted maps show their converted difficulty rating.
Optional, default is 0.
hash_str - the beatmap hash. It can be used, for instance,
if you're trying to get what beatmap has a replay played in,
as .osr replays only provide beatmap hashes
(example of hash: a5b99395a42bd55bc5eb1d2411cbdf8b).
Optional, by default all beatmaps are returned independently from the hash.
"""
data = await self.__get_data('get_beatmaps', limit = 1, s = beatmapset_id,
b = beatmap_id, u = user, m = mode, a = include_converted, h = hash_str,
type = type_str)
return Beatmap(self, **data)
async def get_beatmaps(self, limit = None, since = None, type_str = None,
beatmapset_id = None, include_converted = None, user = None, mode = None):
"""
Do note that requesting a beatmap collection is way faster than
requesting beatmap by beatmap (and requiers only only one api request)
Parameters :
limit - amount of results. Optional, default and maximum are 500.
since - return all beatmaps ranked or loved since this date.
Must be a MySQL date.
beatmapset_id - specify a beatmapset_id to return metadata from.
user - specify a user_id or a username to return metadata from.
type_str - specify if 'user' is a user_id or a username.
Use string for usernames or id for user_ids.
Optional, default behaviour is automatic recognition
(may be problematic for usernames made up of digits only).
mode - mode (0 = osu!, 1 = Taiko, 2 = CtB, 3 = osu!mania).
Optional, maps of all modes are returned by default.
include_converted - specify whether converted beatmaps are included
(0 = not included, 1 = included).
Only has an effect if 'mode' is chosen and not 0.
Converted maps show their converted difficulty rating.
Optional, default is 0.
"""
datas = await self.__get_data('get_beatmaps', False, limit = limit, since = since,
type = type_str, s = beatmapset_id, a = include_converted, u = user, m = mode)
beatmaps = BeatmapCollection(self)
for data in datas:
beatmaps.add_beatmap(Beatmap(self, **data))
return beatmaps
async def get_user(self, user = None, mode = None, type_str = None, event_days = None):
"""
Fetches a user data
Parameters :
user - specify a user_id or a username to return metadata from (required).
mode - mode (0 = osu!, 1 = Taiko, 2 = CtB, 3 = osu!mania).
Optional, default value is 0.
type_str - specify if u is a user_id or a username.
Use string for usernames or id for user_ids.
Optional, default behaviour is automatic recognition
(may be problematic for usernames made up of digits only).
event_days - Max number of days between now and last event date.
Range of 1-31. Optional, default value is 1.
"""
data = await self.__get_data('get_user', u = user, m = mode, type = type_str,
event_days = event_days)
user_events = []
for event in data.get('events', []):
user_events.append(UserEvent(**event))
return User(self, user_events, **data)
async def get_score(self, beatmap_id, user = None, mode = None, type_str = None):
"""
If any of the parameters used returns more than one score,
the first one only will be used. If you want multiple scores use
OsuApi.get_scores() instead
Parameters :
beatmap_id - specify a beatmap_id to return metadata from. (required)
user - specify a user_id or a username to return metadata from.
type_str - specify if 'user' is a user_id or a username.
Use string for usernames or id for user_ids.
Optional, default behaviour is automatic recognition
(may be problematic for usernames made up of digits only).
mode - mode (0 = osu!, 1 = Taiko, 2 = CtB, 3 = osu!mania).
Optional, maps of all modes are returned by default.
"""
data = await self.__get_data('get_scores', b = beatmap_id, limit = 1, u = user,
m = mode, type = type_str)
score = Score(self, **data)
if mode != None:
score.mode = mode
return score
async def get_scores(self, beatmap_id, user = None, mode = None, mods = None, type_str = None, limit = None):
"""
Do note that requesting a score collection is way faster than
requesting score by score (and requiers only only one api request)
Parameters :
beatmap_id - specify a beatmap_id to return score information from (required).
user - specify a user_id or a username to return score information for.
mode - mode (0 = osu!, 1 = Taiko, 2 = CtB, 3 = osu!mania).
Optional, default value is 0.
mods - specify a mod or mod combination (See the bitwise enum)
type_str - specify if user is a user_id or a username.
Use string for usernames or id for user_ids.
Optional, default behaviour is automatic recognition
(may be problematic for usernames made up of digits only).
limit - amount of results from the top (range between 1 and 100 - defaults to 50).
"""
datas = await self.__get_data('get_scores', False, b = beatmap_id, u = user,
m = mode, mods = mods, type = type_str, limit = limit)
scores = ScoreCollection(self)
for data in datas:
scores.add_score(Score(self, **data))
return scores
async def get_user_best(self, user, mode = None, type_str = None):
"""
Returns the top play of a user. If you want more than one user best
use OsuApi.get_user_bests() instead.
Parameters :
user - specify a user_id or a username to return best scores from (required).
mode - mode (0 = osu!, 1 = Taiko, 2 = CtB, 3 = osu!mania).
Optional, default value is 0.
type_str - specify if user is a user_id or a username.
Use 'string' for usernames or 'id' for user_ids.
Optional, default behavior is automatic recognition
may be problematic for usernames made up of digits only).
"""
data = await self.__get_data('get_user_best', u = user, limit = 1, m = mode,
type = type_str)
return UserBest(self, **data)
async def get_user_bests(self, user, mode = None, type_str = None, limit = None):
"""
Parameters :
user - sspecify a user_id or a username to return best scores from (required).
mode - mode (0 = osu!, 1 = Taiko, 2 = CtB, 3 = osu!mania).
Optional, default value is 0.
type_str - specify if user is a user_id or a username.
Use string for usernames or id for user_ids.
Optional, default behaviour is automatic recognition
(may be problematic for usernames made up of digits only).
limit - amount of results from the top (range between 1 and 100 - defaults to 50).
"""
datas = await self.__get_data('get_user_best', False, u = user, m = mode,
type = type_str, limit = limit)
bests = UserBestCollection(self)
for data in datas:
bests.add_user_best(UserBest(self, **data))
return bests
async def get_user_recent(self, user, mode = None, type_str = None):
"""
Returns the top play of a user. If you want more than one user best
use OsuApi.get_user_bests() instead.
Parameters :
user - specify a user_id or a username to return best scores from (required).
mode - mode (0 = osu!, 1 = Taiko, 2 = CtB, 3 = osu!mania).
Optional, default value is 0.
type_str - specify if user is a user_id or a username.
Use 'string' for usernames or 'id' for user_ids.
Optional, default behavior is automatic recognition
may be problematic for usernames made up of digits only).
"""
data = await self.__get_data('get_user_recent', u = user, limit = 1, m = mode,
type = type_str)
return UserRecent(self, **data)
async def get_user_recents(self, user, mode = None, type_str = None, limit = None):
"""
Parameters :
user - sspecify a user_id or a username to return best scores from (required).
mode - mode (0 = osu!, 1 = Taiko, 2 = CtB, 3 = osu!mania).
Optional, default value is 0.
type_str - specify if user is a user_id or a username.
Use string for usernames or id for user_ids.
Optional, default behaviour is automatic recognition
(may be problematic for usernames made up of digits only).
limit - amount of results from the top (range between 1 and 100 - defaults to 50).
"""
datas = await self.__get_data('get_user_recent', False, u = user, m = mode,
type = type_str, limit = limit)
recents = UserRecentCollection(self)
for data in datas:
recents.add_user_recent(UserRecent(self, **data))
return recents | 48.41 | 113 | 0.557598 | 1,759 | 14,523 | 4.501421 | 0.151222 | 0.02387 | 0.015913 | 0.02046 | 0.602172 | 0.588027 | 0.546855 | 0.519323 | 0.509977 | 0.503157 | 0 | 0.011356 | 0.381533 | 14,523 | 300 | 114 | 48.41 | 0.870185 | 0.073125 | 0 | 0.104651 | 0 | 0 | 0.025676 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011628 | false | 0 | 0.139535 | 0 | 0.27907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e44152976c03d5a6b10910e8df9d3ad3fcf28f3 | 951 | py | Python | tests/unit/configuration_subsystem/test_settings_sample.py | saito-hideki/ansible-navigator | 0a15b83c2a9a548315765360bd19d6fd270862d4 | [
"Apache-2.0"
] | null | null | null | tests/unit/configuration_subsystem/test_settings_sample.py | saito-hideki/ansible-navigator | 0a15b83c2a9a548315765360bd19d6fd270862d4 | [
"Apache-2.0"
] | 8 | 2021-12-13T20:56:47.000Z | 2022-03-10T14:53:04.000Z | tests/unit/configuration_subsystem/test_settings_sample.py | alisonlhart/ansible-navigator | 006db536ef1ea5b38a195a21ae7c0729d995bebc | [
"Apache-2.0"
] | null | null | null | """Tests for the transformation of settings to a json schema."""
from typing import Tuple
from ansible_navigator.utils.serialize import Loader
from ansible_navigator.utils.serialize import yaml
def test_valid_yaml(settings_samples: Tuple[str, str]):
"""Simple test to ensure the sample is valid yaml.
:param settings_samples: The sample setting
"""
commented, uncommented = settings_samples
settings_contents = yaml.load(commented, Loader=Loader)
assert settings_contents
settings_contents = yaml.load(uncommented, Loader=Loader)
assert settings_contents
def test_no_un_templated(settings_samples: Tuple[str, str]):
"""Simple test to ensure the sample is valid yaml.
:param settings_samples: The sample settings
"""
commented, uncommented = settings_samples
assert "{{" not in commented
assert "{{" not in uncommented
assert "}}" not in commented
assert "}}" not in uncommented
| 29.71875 | 64 | 0.739222 | 121 | 951 | 5.669421 | 0.338843 | 0.131195 | 0.06414 | 0.072886 | 0.609329 | 0.510204 | 0.393586 | 0.393586 | 0.271137 | 0.271137 | 0 | 0 | 0.181914 | 951 | 31 | 65 | 30.677419 | 0.881748 | 0.257624 | 0 | 0.266667 | 0 | 0 | 0.011905 | 0 | 0 | 0 | 0 | 0 | 0.4 | 1 | 0.133333 | false | 0 | 0.2 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e445155d9b593e9344d37cde56acf0b38128cff | 1,914 | py | Python | opexebo/analysis/autocorrelation.py | simon-ball/opexebo | 8e44a4890efa60a6ed8c2e9e0df7cc9ab2d80d31 | [
"MIT"
] | 4 | 2019-06-12T07:50:42.000Z | 2021-11-19T12:55:47.000Z | opexebo/analysis/autocorrelation.py | simon-ball/opexebo | 8e44a4890efa60a6ed8c2e9e0df7cc9ab2d80d31 | [
"MIT"
] | 12 | 2019-06-12T07:26:40.000Z | 2021-08-11T15:10:47.000Z | opexebo/analysis/autocorrelation.py | simon-ball/opexebo | 8e44a4890efa60a6ed8c2e9e0df7cc9ab2d80d31 | [
"MIT"
] | 4 | 2019-11-21T10:44:37.000Z | 2022-01-07T14:21:07.000Z | """
Calculate 2D spatial autocorrelation
Calculates 2D autocorrelation (autocorrelogram) of a firing map.
"""
import numpy as np
import opexebo
def autocorrelation(firing_map):
"""Calculate 2D spatial autocorrelation of a firing map.
Parameters
----------
firing_map: np.ndarray
NxM matrix, smoothed firing map. map is not necessary a numpy array.
May contain NaNs.
Returns
-------
acorr: np.ndarray
Resulting correlation matrix, which is a 2D numpy array.
See Also
--------
opexebo.general.normxcorr2_general
Notes
-----
BNT.+analyses.autocorrelation
Copyright (C) 2018 by Vadim Frolov
"""
# overlap_amount is a parameter that is intentionally not exposed to
# the outside world. This is because too many users depend on it and we
# do not what everyone to use their own overlap value.
# Should be a value in range [0, 1]
overlap_amount = 0.8
slices = []
if type(firing_map) != np.ndarray:
firing_map = np.array(firing_map)
if firing_map.size == 0:
return firing_map
# make sure there are no NaNs in the firing_map
firing_map = np.nan_to_num(firing_map)
# get full autocorrelgramn
aCorr = opexebo.general.normxcorr2_general(firing_map)
# we are only interested in a portion of the autocorrelogram. Since the values
# on edges are too noise (due to the fact that very small amount of elements
# are correlated).
for i in range(firing_map.ndim):
new_size = np.round(firing_map.shape[i] + firing_map.shape[i] * overlap_amount)
if new_size % 2 == 0:
new_size = new_size - 1
offset = aCorr.shape[i] - new_size
offset = np.round(offset/2 + 1)
d0 = int(offset-1)
d1 = int(aCorr.shape[i] - offset + 1)
slices.append(slice(d0, d1))
return aCorr[tuple(slices)]
| 27.342857 | 87 | 0.65047 | 268 | 1,914 | 4.548507 | 0.455224 | 0.125513 | 0.036095 | 0.054143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018414 | 0.262278 | 1,914 | 69 | 88 | 27.73913 | 0.844901 | 0.520376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.238095 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e46257018998b7e01ac8cfde628ff0a2d699ca0 | 1,852 | py | Python | tello.py | wikeex/tello-python | 7d0ffcd8deedeefc1fed5deb5bdf3c65407f1170 | [
"MIT"
] | 3 | 2020-03-17T08:34:27.000Z | 2021-07-04T12:25:40.000Z | tello.py | wikeex/tello-python | 7d0ffcd8deedeefc1fed5deb5bdf3c65407f1170 | [
"MIT"
] | null | null | null | tello.py | wikeex/tello-python | 7d0ffcd8deedeefc1fed5deb5bdf3c65407f1170 | [
"MIT"
] | 1 | 2021-07-10T07:33:52.000Z | 2021-07-10T07:33:52.000Z | import socket
import asyncio
import functools
class Tello:
def __init__(self, tello_ip):
self.local_ip = ''
self.socket = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) # socket for sending cmd
self._bind_ip(self.local_ip, 8891, self.socket)
self.tello_ip = tello_ip
self.tello_port = 8889
self.tello_adderss = (self.tello_ip, self.tello_port)
self.response = None
def _bind_ip(self, ip, port, s):
"""
本地端口绑定方法,如果绑定失败则端口+1再递归执行。
:param ip: tello ip
:param port: 本地端口
:param s: socket对象
:return:
"""
try:
s.bind((self.local_ip, port))
return port
except OSError:
# 绑定失败则表明该端口异常,递归
return self._bind_ip(ip, port + 1, s)
async def wait_response(self, command):
self.socket.sendto(command.encode('utf-8'), self.tello_adderss)
self.response, ip = self.socket.recvfrom(1024)
return self.response, ip
def action_result(self, command, future):
response = future.result()[0].decode('utf-8')
ip = future.result()[1]
print('{0}, {1}, {2}'.format(ip, command, response))
if response == 'ok':
...
elif 'error' in response:
...
def send_command(self, command):
"""
发送命令方法,原来想使用协程实现多线程一样的非阻塞效果,但是只实现了阻塞效果,
后来想想阻塞也挺好的,能保证发送的命令和回复对应,就不该了。
:param command:
:return:
"""
print('sending command: %s to %s' % (command, self.tello_ip))
coroutine = self.wait_response(command)
loop = asyncio.new_event_loop()
asyncio.set_event_loop(loop)
task = loop.create_task(coroutine)
task.add_done_callback(functools.partial(self.action_result, command))
loop.run_until_complete(task)
| 28.9375 | 96 | 0.591253 | 220 | 1,852 | 4.804545 | 0.377273 | 0.068117 | 0.041627 | 0.028382 | 0.037843 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016031 | 0.292657 | 1,852 | 63 | 97 | 29.396825 | 0.79084 | 0.12311 | 0 | 0.052632 | 0 | 0 | 0.036208 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.078947 | 0 | 0.289474 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e467dfca22e7d36fac6a7bbd269823b0f242d53 | 3,464 | py | Python | 2019/python/day9.py | dsvensson/advent-of-code | a00f5722d5c0253f6303e838e9b79cb952aca0b1 | [
"0BSD"
] | null | null | null | 2019/python/day9.py | dsvensson/advent-of-code | a00f5722d5c0253f6303e838e9b79cb952aca0b1 | [
"0BSD"
] | 3 | 2021-09-08T01:36:24.000Z | 2021-12-02T23:39:00.000Z | 2019/python/day9.py | dsvensson/advent-of-code | a00f5722d5c0253f6303e838e9b79cb952aca0b1 | [
"0BSD"
] | null | null | null |
def decode(op):
opmode = op % 100
a = (op // 100) % 10
b = (op // 1000) % 10
c = (op // 10000) % 10
return (opmode, a, b, c)
def read(p, ip, pc):
try:
return p[p[pc]]
except Exception:
p.extend([0]*len(p))
return read(p, ip, pc)
def readi(p, ip, pc):
try:
return p[pc]
except Exception:
p.extend([0]*len(p))
return readi(p, ip, pc)
def readr(p, ip, pc):
try:
return p[ip + p[pc]]
except Exception:
p.extend([0]*len(p))
return readr(p, ip, pc)
def write(p, ip, pc, v):
try:
p[p[pc]] = v
except Exception:
p.extend([0]*len(p))
return write(p, ip, pc, v)
def writei(p, ip, pc, v):
raise Exception("illegal instruction")
def writer(p, ip, pc, v):
try:
p[ip + p[pc]] = v
except Exception:
p.extend([0]*len(p))
return writer(p, ip, pc, v)
r = {
0: read,
1: readi,
2: readr,
}
w = {
0: write,
1: writei,
2: writer,
}
# s = "1101,100,-1,4,0"
# s = "3,21,1008,21,8,20,1005,20,22,107,8,21,20,1006,20,31,1106,0,36,98,0,0,1002,21,125,20,4,20,1105,1,46,104,999,1105,1,46,1101,1000,1,20,4,20,1105,1,46,98,99"
async def execute(t, stdin, stdout):
pc = 0
base = 0
while True:
op, a, b, c = decode(t[pc])
if op == 1: # add
w[c](t, base, pc+3, r[a](t, base, pc+1) + r[b](t, base, pc+2))
pc += 4
elif op == 2: # mul
w[c](t, base, pc+3, r[a](t, base, pc+1) * r[b](t, base, pc+2))
pc += 4
elif op == 3: # read input
w[a](t, base, pc+1, await stdin.get())
pc += 2
elif op == 4: # write output
await stdout.put(r[a](t, base, pc+1))
pc += 2
elif op == 5: # jump if true
if r[a](t, base, pc+1) != 0:
pc = r[b](t, base, pc+2)
else:
pc += 3
elif op == 6: # jump if false
if r[a](t, base, pc+1) == 0:
pc = r[b](t, base, pc+2)
else:
pc += 3
elif op == 7: # less than
if r[a](t, base, pc+1) < r[b](t, base, pc+2):
w[c](t, base, pc+3, 1)
else:
w[c](t, base, pc+3, 0)
pc += 4
elif op == 8: # equals
if r[a](t, base, pc+1) == r[b](t, base, pc+2):
w[c](t, base, pc+3, 1)
else:
w[c](t, base, pc+3, 0)
pc += 4
elif op == 9: # new base
base += r[a](t, base, pc+1)
pc += 2
elif op == 99: # halt
print("halt")
pc += 2
return
from itertools import permutations
import asyncio
def parse(src):
return [int(x) for x in src.split(",")]
async def eat_output(stdout):
while True:
print("out:", await stdout.get())
async def run(inp):
prog = parse("109,1,204,-1,1001,100,1,100,1008,100,16,101,1006,101,0,99")
prog = parse("1102,34915192,34915192,7,4,7,99,0")
prog = parse("104,1125899906842624,99")
prog = parse(inp)
stdin = asyncio.Queue()
await stdin.put(2)
stdout = asyncio.Queue()
print("exec")
t = asyncio.create_task(execute(prog.copy(), stdin, stdout))
s = asyncio.create_task(eat_output(stdout))
return asyncio.all_tasks([s,t])
data = open("../data/day9.input").read()
asyncio.run(run(data))
| 23.405405 | 160 | 0.464203 | 568 | 3,464 | 2.822183 | 0.216549 | 0.065502 | 0.091703 | 0.044916 | 0.386151 | 0.359326 | 0.304429 | 0.304429 | 0.304429 | 0.304429 | 0 | 0.128974 | 0.35537 | 3,464 | 147 | 161 | 23.564626 | 0.588894 | 0.077945 | 0 | 0.321739 | 0 | 0.008696 | 0.051258 | 0.035535 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069565 | false | 0 | 0.017391 | 0.008696 | 0.191304 | 0.026087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e475d2896b87744865506cadf4fffb85c254050 | 7,029 | py | Python | lib/util/settings.py | JohnEskimSmith/jarm | fc2bcbd6fd5c6587522a97d583b3985ccdcde406 | [
"BSD-3-Clause"
] | 2 | 2020-11-28T12:22:52.000Z | 2020-12-17T09:10:09.000Z | lib/util/settings.py | JohnEskimSmith/jarm | fc2bcbd6fd5c6587522a97d583b3985ccdcde406 | [
"BSD-3-Clause"
] | null | null | null | lib/util/settings.py | JohnEskimSmith/jarm | fc2bcbd6fd5c6587522a97d583b3985ccdcde406 | [
"BSD-3-Clause"
] | null | null | null | import argparse
from os import path
from sys import stderr
from typing import Tuple, List
from lib.core import AppConfig, TargetConfig
__all__ = ['parse_args', 'parse_settings']
def parse_args():
"""
parsing arguments
:return:
"""
parser = argparse.ArgumentParser(description='JARM is an active Transport Layer Security (TLS) server fingerprinting tool. (Asyncio version)',
formatter_class=argparse.MetavarTypeHelpFormatter)
# input_stdin: str
parser.add_argument('--stdin', dest='input_stdin', action='store_true', help='Read targets from stdin')
parser.add_argument('-t', '--targets', nargs='+', type=str, default='', dest='single_targets',
help='Single targets: ipv4, fqdn, ipv4:port, fqdn:port. Example: facebook.com google.com:443 67.99.200.0/24:8443')
parser.add_argument('-f', '--input-file', dest='input_file', type=str, help='path to file with targets.\n Targets: ipv4, fqdn, ipv4:port, fqdn:port')
parser.add_argument('-o', '--output-file', dest='output_file', type=str, help='path to file with results')
parser.add_argument('--json', dest='json', action='store_true', default=True, help='Output format of records, default json')
parser.add_argument('--csv', dest='csv', action='store_true', help='Output format of records: csv')
parser.add_argument('-s', '--senders', dest='senders', type=int, default=1024,
help='Number of send coroutines to use (default: 1024)')
parser.add_argument('--queue-sleep', dest='queue_sleep', type=int, default=1,
help='Sleep duration if the queue is full, default 1 sec. Queue size == senders')
parser.add_argument('-tconnect', '--timeout-connection', dest='conn_timeout', type=int, default=12,
help='Set connection timeout for open_connection(asyncio), seconds (default: 12)')
parser.add_argument('-tread', '--timeout-read', dest='read_timeout', type=int, default=12,
help='Set connection timeout for reader from connection, seconds (default: 12)')
parser.add_argument('-tresolver', '--resolver-timeout', dest='resolver_timeout', type=int, default=4,
help='Set DNS resolutions timeout, seconds (default: 4)')
parser.add_argument('-p', '--port', type=int, help='Specify port (default: 443)', default=443)
# region filters
parser.add_argument('--filter-jarm', dest='jarm', type=str,
help='trying to find a jarm in a response')
parser.add_argument('--filter-cipher-tls', dest='cipher_tls', type=str,
help='trying to find a cipher_tls(substring in jarm)')
parser.add_argument('--show-only-success', dest='show_only_success', action='store_true',
help='Show(save) only success records')
# endregion
parser.add_argument('--show-statistics', dest='statistics', action='store_true')
return parser.parse_args()
def parse_settings(args: argparse.Namespace) -> Tuple[TargetConfig, AppConfig]:
if not args.input_stdin and not args.input_file and not args.single_targets:
print("""errors, set input source:
--stdin read targets from stdin;
-t,--targets set targets, see -h;
-f,--input-file read from file with targets, see -h""")
exit(1)
input_file = None
if args.input_file:
input_file = args.input_file
if not path.isfile(input_file):
abort(f'ERROR: file not found: {input_file}')
if not args.output_file:
output_file, write_mode = '/dev/stdout', 'wb'
else:
output_file, write_mode = args.output_file, 'a'
payloads = return_structs_tls()
# endregion
if not args.csv:
output_format = 'json'
else:
output_format = 'csv'
filter_jarm = ''
if args.jarm:
filter_jarm = args.jarm
filter_cipher_tls = ''
if args.cipher_tls:
filter_cipher_tls = args.cipher_tls
target_settings = TargetConfig(**{
'port': args.port,
'conn_timeout': args.conn_timeout,
'read_timeout': args.read_timeout,
'resolver_timeout': args.resolver_timeout,
'list_payloads': payloads,
})
app_settings = AppConfig(**{
'output_format': output_format,
'input_stdin': args.input_stdin,
'senders': args.senders,
'queue_sleep': args.queue_sleep,
'statistics': args.statistics,
'single_targets': args.single_targets,
'input_file': input_file,
'output_file': output_file,
'write_mode': write_mode,
'filter_jarm': filter_jarm,
'filter_cipher_tls': filter_cipher_tls,
'show_only_success': args.show_only_success
})
return target_settings, app_settings
def return_structs_tls() -> List[List[str]]:
"""
function from jarm.py with changes
:return:
"""
# Array format = [destination_host,destination_port,version,cipher_list,cipher_order,GREASE,RARE_APLN,1.3_SUPPORT,extension_orders]
tls1_2_forward = ["TLS_1.2", "ALL", "FORWARD", "NO_GREASE", "APLN",
"1.2_SUPPORT", "REVERSE"]
tls1_2_reverse = ["TLS_1.2", "ALL", "REVERSE", "NO_GREASE", "APLN",
"1.2_SUPPORT", "FORWARD"]
tls1_2_top_half = ["TLS_1.2", "ALL", "TOP_HALF", "NO_GREASE", "APLN",
"NO_SUPPORT", "FORWARD"]
tls1_2_bottom_half = ["TLS_1.2", "ALL", "BOTTOM_HALF", "NO_GREASE", "RARE_APLN",
"NO_SUPPORT", "FORWARD"]
tls1_2_middle_out = ["TLS_1.2", "ALL", "MIDDLE_OUT", "GREASE", "RARE_APLN",
"NO_SUPPORT", "REVERSE"]
tls1_1_middle_out = ["TLS_1.1", "ALL", "FORWARD", "NO_GREASE", "APLN",
"NO_SUPPORT", "FORWARD"]
tls1_3_forward = ["TLS_1.3", "ALL", "FORWARD", "NO_GREASE", "APLN",
"1.3_SUPPORT", "REVERSE"]
tls1_3_reverse = ["TLS_1.3", "ALL", "REVERSE", "NO_GREASE", "APLN",
"1.3_SUPPORT", "FORWARD"]
tls1_3_invalid = ["TLS_1.3", "NO1.3", "FORWARD", "NO_GREASE", "APLN",
"1.3_SUPPORT", "FORWARD"]
tls1_3_middle_out = ["TLS_1.3", "ALL", "MIDDLE_OUT", "GREASE", "APLN",
"1.3_SUPPORT", "REVERSE"]
# Possible versions: SSLv3, TLS_1, TLS_1.1, TLS_1.2, TLS_1.3
# Possible cipher lists: ALL, NO1.3
# GREASE: either NO_GREASE or GREASE
# APLN: either APLN or RARE_APLN
# Supported Verisons extension: 1.2_SUPPPORT, NO_SUPPORT, or 1.3_SUPPORT
# Possible Extension order: FORWARD, REVERSE
queue_tls = [tls1_2_forward, tls1_2_reverse, tls1_2_top_half, tls1_2_bottom_half, tls1_2_middle_out,
tls1_1_middle_out, tls1_3_forward, tls1_3_reverse, tls1_3_invalid, tls1_3_middle_out]
return queue_tls
def abort(message: str, exc: Exception = None, exit_code: int = 1):
print(message, file=stderr)
if exc:
print(exc, file=stderr)
exit(exit_code)
| 45.642857 | 153 | 0.6311 | 908 | 7,029 | 4.650881 | 0.219163 | 0.034099 | 0.064409 | 0.015392 | 0.204594 | 0.177362 | 0.102534 | 0.053043 | 0.039309 | 0.02368 | 0 | 0.025698 | 0.230474 | 7,029 | 153 | 154 | 45.941176 | 0.755038 | 0.074975 | 0 | 0.097345 | 0 | 0.00885 | 0.344288 | 0.003875 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035398 | false | 0 | 0.044248 | 0 | 0.106195 | 0.035398 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e4adf73cb3d5a3a9acb0ddad95177b605261e97 | 7,553 | py | Python | stanCode Projects/my_photoshop/stanCodoshop.py | a546662002/SC-projects | 06e0a292c145b7c2a41d313a8f52b37bb18f4ecc | [
"MIT"
] | null | null | null | stanCode Projects/my_photoshop/stanCodoshop.py | a546662002/SC-projects | 06e0a292c145b7c2a41d313a8f52b37bb18f4ecc | [
"MIT"
] | null | null | null | stanCode Projects/my_photoshop/stanCodoshop.py | a546662002/SC-projects | 06e0a292c145b7c2a41d313a8f52b37bb18f4ecc | [
"MIT"
] | null | null | null | """
File: stanCodoshop.py
----------------------------------------------
SC101_Assignment3
Adapted from Nick Parlante's
Ghost assignment by Jerry Liao.
-----------------------------------------------
TODO:
"""
import os
import sys
from simpleimage import SimpleImage
import math
def get_pixel_dist(pixel, red, green, blue):
"""
Returns the color distance between pixel and mean RGB value
Input:
pixel (Pixel): pixel with RGB values to be compared
red (int): average red value across all images
green (int): average green value across all images
blue (int): average blue value across all images
Returns:
dist (int): color distance between red, green, and blue pixel values
"""
red_squ = (red - pixel.red) * (red - pixel.red) # calculate the red part
green_squ = (green - pixel.green) * (green - pixel.green) # calculate the green part
blue_squ = (blue - pixel.blue) * (blue - pixel.blue) # calculate the blue part
color_dis_squ = red_squ + green_squ + blue_squ # calculate the square of color distance
color_dis = math.sqrt(color_dis_squ) # get the square root
return color_dis # return color distance
def get_average(pixels):
"""
Given a list of pixels, finds the average red, blue, and green values
Input:
pixels (List[Pixel]): list of pixels to be averaged
Returns:
rgb (List[int]): list of average red, green, blue values across pixels respectively
Assumes you are returning in the order: [red, green, blue]
"""
red_total = 0 # set variable for calculate total value for pixel in red
green_total = 0 # set variable for calculate total value for pixel in green
blue_total = 0 # set variable for calculate total value for pixel in blue
rgb = [] # set a list for storage the average value
for ele in pixels: # for each element in pixel
red_total += ele.red # calculate total value for pixel in red
green_total += ele.green # calculate total value for pixel in green
blue_total += ele.blue # calculate total value for pixel in blue
red = red_total / len(pixels) # calculate average for red
green = green_total / len(pixels) # calculate average for green
blue = blue_total / len(pixels) # calculate average for blue
rgb.append(int(red)) # add average value of red in list
rgb.append(int(green)) # add average value of green in list
rgb.append(int(blue)) # add average value of blue in list
return rgb # return the list
def get_best_pixel(pixels):
"""
Given a list of pixels, returns the pixel with the smallest
distance from the average red, green, and blue values across all pixels.
Input:
pixels (List[Pixel]): list of pixels to be averaged and compared
Returns:
best (Pixel): pixel closest to RGB averages
"""
rgb_list = get_average(pixels) # get the average list of RGB
blue = rgb_list.pop() # separate the blue average
green = rgb_list.pop() # separate the green average
red = rgb_list.pop() # separate the red average
short_dis = 0 # set short distance variable
best_pixel = pixels.pop() # assume the best pixel is the first one
time = 1 # set a time variable for count how many pixel in pixels
for pixel in pixels: # for every pixel in pixels
dis = get_pixel_dist(pixel, red, green, blue) # get the color distance for pixel
"""
for the 1st time, assume we all get the best value for color distance and pixel
for the second time and others, use to compare it
"""
if time == 1:
short_dis = dis # get shortest distance
best_pixel = pixel # get best pixel
else:
if dis < short_dis: # if we find the other shortest distance
short_dis = dis # replace the distance
best_pixel = pixel # replace the best pixel
time += 1 # for the next one pixel
return best_pixel # return the result
def solve(images):
"""
Given a list of image objects, compute and display a Ghost solution image
based on these images. There will be at least 3 images and they will all
be the same size.
Input:
images (List[SimpleImage]): list of images to be processed
"""
width = images[0].width
height = images[0].height
result = SimpleImage.blank(width, height)
######## YOUR CODE STARTS HERE #########
# Write code to populate image and create the 'ghost' effect
for y in range(height): # for the every pixel
for x in range(width):
pixels = [] # set a list called pixels
for image in images: # for every image in images
pixel = image.get_pixel(x, y) # get the same position pixel in every image
pixels.append(pixel) # add it to the pixels
best = get_best_pixel(pixels) # get the best pixel in the pixels
solve_pixel = result.get_pixel(x, y) # for the every pixel in the result
solve_pixel.red = best.red # assign the value to the solve_pixel
solve_pixel.green = best.green # assign the value to the solve_pixel
solve_pixel.blue = best.blue # assign the value to the solve_pixel
######## YOUR CODE ENDS HERE ###########
print("Displaying image!")
result.show()
def jpgs_in_dir(dir):
"""
(provided, DO NOT MODIFY)
Given the name of a directory, returns a list of the .jpg filenames
within it.
Input:
dir (string): name of directory
Returns:
filenames(List[string]): names of jpg files in directory
"""
filenames = []
for filename in os.listdir(dir):
if filename.endswith('.jpg'):
filenames.append(os.path.join(dir, filename))
return filenames
def load_images(dir):
"""
(provided, DO NOT MODIFY)
Given a directory name, reads all the .jpg files within it into memory and
returns them in a list. Prints the filenames out as it goes.
Input:
dir (string): name of directory
Returns:
images (List[SimpleImages]): list of images in directory
"""
images = []
jpgs = jpgs_in_dir(dir)
for filename in jpgs:
print("Loading", filename)
image = SimpleImage(filename)
images.append(image)
return images
def main():
# (provided, DO NOT MODIFY)
args = sys.argv[1:]
# We just take 1 argument, the folder containing all the images.
# The load_images() capability is provided above.
images = load_images(args[0])
solve(images)
if __name__ == '__main__':
main()
| 41.048913 | 111 | 0.565735 | 944 | 7,553 | 4.454449 | 0.204449 | 0.019976 | 0.016647 | 0.031391 | 0.216647 | 0.193103 | 0.145303 | 0.098692 | 0.098692 | 0.055886 | 0 | 0.003693 | 0.354694 | 7,553 | 183 | 112 | 41.273224 | 0.859048 | 0.472263 | 0 | 0.048193 | 0 | 0 | 0.010268 | 0 | 0 | 0 | 0 | 0.005464 | 0 | 1 | 0.084337 | false | 0 | 0.048193 | 0 | 0.192771 | 0.024096 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e4ca9c4906cd8ef9e25c12b020f5afe04edc242 | 2,958 | py | Python | test/core/test_target.py | cedric05/dothttp | ae68f2c3dbb8f1053a0feeab0ac829aa5d3a2a6e | [
"Apache-2.0"
] | 10 | 2021-02-09T17:21:49.000Z | 2021-12-05T19:10:53.000Z | test/core/test_target.py | cedric05/dothttp | ae68f2c3dbb8f1053a0feeab0ac829aa5d3a2a6e | [
"Apache-2.0"
] | 60 | 2021-02-07T17:20:47.000Z | 2022-03-12T06:26:23.000Z | test/core/test_target.py | cedric05/dothttp | ae68f2c3dbb8f1053a0feeab0ac829aa5d3a2a6e | [
"Apache-2.0"
] | null | null | null | from dothttp import HttpFileException
from test import TestBase
from test.core.test_request import dir_path
base_dir = f"{dir_path}/target"
class TestTarget(TestBase):
def test_target_default(self):
request = self.get_request(
file=f"{base_dir}/default_target.http"
)
self.assertEqual("https://httpbin.org/get", request.url)
def test_target_first_one_with_index(self):
request = self.get_request(
file=f"{base_dir}/default_target.http", target='1'
)
self.assertEqual("https://httpbin.org/get", request.url)
def test_target_second_one_with_index(self):
request = self.get_request(
file=f"{base_dir}/default_target.http", target=2
)
self.assertEqual("https://httpbin.org/post", request.url)
def test_invalid_names(self):
with self.assertRaises(HttpFileException):
self.get_request(**{'file': f"{base_dir}/fails.http", 'target': 2})
def test_target_with_names(self):
# target with name
first_target = self.get_request(
file=f"{base_dir}/target_with_names_for_few.http", target="first"
)
self.assertEqual("https://req.dothttp.dev/get", first_target.url, "first target")
self.assertEqual("GET", first_target.method)
# target second one with name
second_target = self.get_request(
file=f"{base_dir}/target_with_names_for_few.http", target="secondone"
)
self.assertEqual("https://req.dothttp.dev/post", second_target.url)
self.assertEqual("hi=hi2", second_target.body)
self.assertEqual("POST", second_target.method)
# target third one with index
third_target = self.get_request(
file=f"{base_dir}/target_with_names_for_few.http", target=3
)
self.assertEqual("https://req.dothttp.dev/post", third_target.url)
self.assertEqual(b'{"hi": "hi2"}', third_target.body)
self.assertEqual("POST", third_target.method)
# target first one with index
first_target_with_name = self.get_request(
file=f"{base_dir}/target_with_names_for_few.http", target=1
)
self.assertEqual("https://req.dothttp.dev/get", first_target_with_name.url)
self.assertEqual("GET", first_target_with_name.method)
# target second one with index
second_target_with_name = self.get_request(
file=f"{base_dir}/target_with_names_for_few.http", target=2
)
self.assertEqual("https://req.dothttp.dev/post", second_target_with_name.url)
self.assertEqual("POST", second_target_with_name.method)
# target fourth one with index
fourth = self.get_request(
file=f"{base_dir}/target_with_names_for_few.http", target=4
)
self.assertEqual("https://req.dothttp.dev/post?hi=bye", fourth.url)
self.assertEqual("POST", fourth.method)
| 37.923077 | 89 | 0.659229 | 387 | 2,958 | 4.788114 | 0.139535 | 0.137615 | 0.075553 | 0.09714 | 0.713438 | 0.575283 | 0.534269 | 0.480302 | 0.480302 | 0.376686 | 0 | 0.003896 | 0.219067 | 2,958 | 77 | 90 | 38.415584 | 0.798268 | 0.053414 | 0 | 0.089286 | 0 | 0 | 0.248836 | 0.12782 | 0 | 0 | 0 | 0 | 0.321429 | 1 | 0.089286 | false | 0 | 0.053571 | 0 | 0.160714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e4fa68e06ba3af2fc797cefec0488165a72ccea | 8,732 | py | Python | segmentation_evaluate.py | MuliangDu-sudo/thermal-semantic-segmentation | 8a37af1cd0b5ca3f41eb9c2235157c9f727aed38 | [
"MIT"
] | null | null | null | segmentation_evaluate.py | MuliangDu-sudo/thermal-semantic-segmentation | 8a37af1cd0b5ca3f41eb9c2235157c9f727aed38 | [
"MIT"
] | null | null | null | segmentation_evaluate.py | MuliangDu-sudo/thermal-semantic-segmentation | 8a37af1cd0b5ca3f41eb9c2235157c9f727aed38 | [
"MIT"
] | null | null | null | from models import thermal_semantic_segmentation_models, semantic_segmentation_models, Deeplab
import torch
from utils import transforms as T
from torchvision import transforms as TT
import os
from PIL import ImageFile
from utils import AverageMeter, freiburg_prediction_visualize, freiburg_palette
import time
from data import CityscapesTranslation, Cityscapes, FreiburgTest, Freiburg, FreiburgT2S, FreiburgTranslation
from torch.utils.data import DataLoader
import visdom
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
from utils.eval_tools import evaluate
from options import evaluation_parse
from PIL import ImageFile, Image
from tqdm import tqdm
ImageFile.LOAD_TRUNCATED_IMAGES = True
MODEL_ROOT_PATH = './checkpoints/semantic_segmentation'
def seg_validate(args, sem_net, val_data, loss_func, device, vis=None, num_classes=13):
print('validating...')
val_loss = AverageMeter('val_loss', ':3.4f')
sem_net.eval()
prediction_list, label_list = [], []
print(len(val_data))
# random_id = np.random.choice(len(val_data), args.num_samples_show)
# i = 0
for i, item in enumerate(tqdm(val_data)):
image = item['image'].to(device)
label = item['label'].to(device)
# if all([args.baseline, args.target_domain == 'Grayscale', args.source_domain == 'RGB']) \
# or all([args.baseline, args.target_domain == 'Thermal', args.source_domain == 'RGB']):
# image = image.expand(-1, 3, -1, -1)
outputs = sem_net(image)
outputs = torch.nn.Upsample(size=(256, 512), mode='bilinear', align_corners=True)(outputs['out'])
loss = loss_func(outputs, label)
predictions = outputs.max(1)[1].squeeze_(1).cpu().numpy()
label_list.append(label.cpu().numpy())
prediction_list.append(predictions)
val_loss.update(loss.item(), image.size(0))
# if i in random_id:
# vis.images(image[0], win='image [{}]'.format(i), padding=2,
# opts=dict(title='image [{}]'.format(i), caption='image [{}]'.format(i)))
# vis.images(np.uint8(label[0].cpu().numpy()), win='label [{}]'.format(i), padding=2,
# opts=dict(title='label [{}]'.format(i), caption='label [{}]'.format(i)))
# vis.images(np.uint8(predictions[0]), win='prediction [{}]'.format(i), padding=2,
# opts=dict(title='prediction [{}]'.format(i), caption='prediction [{}]'.format(i)))
# i += 1
if args.visualize_prediction is not None:
save_path_root = os.path.join(args.root, 'predictions/{}'.format(args.new_checkpoint_name.replace('.pth', '')))
if args.baseline:
save_path_root = 'baseline_predictions/apply_{}_image_on_{}_domain_model'.format(args.target_domain, args.source_domain,)
if args.generator_type == 't2s':
save_path_root = 'predictions/t2s/{}'.format(args.checkpoint_name.replace('.pth', ''))
if not os.path.exists(save_path_root):
os.makedirs(save_path_root)
if args.visualize_prediction == 'save_all' and i % 1 == 0:
new_mask = freiburg_prediction_visualize(predictions[0], freiburg_palette())
label = freiburg_prediction_visualize(label[0].squeeze_(1).cpu().numpy(), freiburg_palette())
image = TT.ToPILImage()(image[0])
new_mask.save(os.path.join(save_path_root, str(i)+'_prediction.png'))
image.save(os.path.join(save_path_root, str(i)+'_image.png'))
label.save(os.path.join(save_path_root, str(i) + '_groundtruth.png'))
elif args.visualize_prediction == 'save_one' and i == 0:
new_mask = freiburg_prediction_visualize(predictions[0], freiburg_palette())
label = freiburg_prediction_visualize(label[0].squeeze_(1).cpu().numpy(), freiburg_palette())
image = TT.ToPILImage()(image[0])
new_mask.save(os.path.join(save_path_root, str(i)+'_prediction_{}.png'.format(args.iter_counter)))
image.save(os.path.join(save_path_root, str(i)+'_image.png'))
label.save(os.path.join(save_path_root, str(i) + '_groundtruth.png'))
label_list = np.concatenate(label_list)
prediction_list = np.concatenate(prediction_list)
acc, acc_cls, mean_iu, fwavacc, cls_iu = evaluate(prediction_list, label_list, num_classes)
return mean_iu, val_loss.avg, cls_iu
def seg_evaluation(args):
print('evaluating...')
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
visualizer = visdom.Visdom(env='thermal semantic segmentation')
train_transform = T.Compose([
#T.RandomResizedCrop(size=(256, 512), ratio=(1.5, 8 / 3.), scale=(0.5, 1.)),
T.Resize((512, 256)),
# T.RandomHorizontalFlip(),
T.ToTensor(),
# T.Normalize((0.5,), (0.5,))
])
validation_split = .2
shuffle_dataset = True
random_seed = 42
if args.dataset == 'cityscapes_translation':
source_dataset = CityscapesTranslation('datasets/source_dataset', data_folder='translation',
transforms=train_transform)
elif args.dataset == 'cityscapes':
source_dataset = Cityscapes('datasets/source_dataset', transforms=train_transform)
elif args.dataset == 'freiburg_translation':
source_dataset = FreiburgTranslation(root='datasets/freiburg/translations/',
folder='test_'+args.checkpoint_name.replace('_segmentation.pth', '')+'/',
transforms=train_transform)
elif args.dataset == 'freiburg_rgb':
source_dataset = FreiburgTest('datasets/freiburg', split='test', domain='RGB', transforms=train_transform,
with_label=True, grayscale=args.grayscale)
elif args.dataset == 'freiburg_ir':
source_dataset = FreiburgTest(args=args, root='datasets/freiburg', split='test', domain='IR', transforms=train_transform,
with_label=True)
elif args.dataset == 'freiburg_t2s':
source_dataset = FreiburgT2S(folder=args.t2s_folder, transforms=train_transform)
else:
raise ValueError('dataset does not exist.')
# Creating data indices for training and validation splits:
if args.data_split:
dataset_size = len(source_dataset)
indices = list(range(dataset_size))
split = int(np.floor(validation_split * dataset_size))
if shuffle_dataset:
np.random.seed(random_seed)
np.random.shuffle(indices)
train_indices, val_indices = indices[split:], indices[:split]
# Creating PT data samplers and loaders:
valid_sampler = SubsetRandomSampler(val_indices)
val_dataloader = DataLoader(source_dataset, batch_size=args.val_batch_size, shuffle=False, num_workers=2,
pin_memory=True,
drop_last=True, sampler=valid_sampler)
else:
val_dataloader = DataLoader(source_dataset, batch_size=args.val_batch_size, shuffle=False, num_workers=2,
pin_memory=True,
drop_last=True)
if args.net_mode == 'one_channel':
# net = thermal_semantic_segmentation_models.deeplabv2_resnet101_thermal(num_classes=args.num_classes,
# pretrained_backbone=False).to(device)
net = Deeplab(torch.nn.BatchNorm2d, num_classes=13, num_channels=1, freeze_bn=False, get_feat=True).to(device)
elif args.net_mode == 'three_channels':
net = semantic_segmentation_models.deeplabv2_resnet101(num_classes=args.num_classes,
pretrained_backbone=False).to(device)
else:
raise ValueError('net_mode not exist.')
load_checkpoint = torch.load(os.path.join(MODEL_ROOT_PATH, args.checkpoint_name))
net.load_state_dict(load_checkpoint['sem_net_state_dict'])
loss_function = torch.nn.CrossEntropyLoss(ignore_index=13, reduction='mean')
mean_iu, avg_loss, class_iou = seg_validate(args, net, val_dataloader, loss_function, device, visualizer)
print('checkpoint name: '+args.checkpoint_name)
print('mean iou score: [{}]. val_loss: [{}]'.format(mean_iu, avg_loss))
for k, v in class_iou.items():
fmt_str = 'target set class {}: {}'.format(k, v)
print(fmt_str)
if __name__ == '__main__':
args_ = evaluation_parse().parse_args()
seg_evaluation(args_)
| 53.901235 | 137 | 0.640861 | 1,041 | 8,732 | 5.14121 | 0.226705 | 0.016442 | 0.024664 | 0.015695 | 0.284193 | 0.24645 | 0.205157 | 0.171898 | 0.171898 | 0.171898 | 0 | 0.013001 | 0.233623 | 8,732 | 161 | 138 | 54.236025 | 0.78676 | 0.145442 | 0 | 0.150794 | 0 | 0 | 0.10039 | 0.025265 | 0 | 0 | 0 | 0 | 0 | 1 | 0.015873 | false | 0 | 0.134921 | 0 | 0.15873 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e5219a32a816afbe4ff8b1f5d013aa217c54fec | 1,611 | py | Python | tests/test_console_train.py | fossabot/ap-latam | 01a9ee32bcbc46549b1925e8d5784b6461543c50 | [
"BSD-3-Clause"
] | 31 | 2018-02-16T01:32:59.000Z | 2022-03-07T20:09:08.000Z | tests/test_console_train.py | fossabot/ap-latam | 01a9ee32bcbc46549b1925e8d5784b6461543c50 | [
"BSD-3-Clause"
] | 22 | 2018-06-07T14:18:46.000Z | 2020-12-11T12:47:58.000Z | tests/test_console_train.py | fossabot/ap-latam | 01a9ee32bcbc46549b1925e8d5784b6461543c50 | [
"BSD-3-Clause"
] | 20 | 2018-06-05T16:16:09.000Z | 2021-07-16T06:19:54.000Z | import os
import tempfile
import pytest
from mock import call, patch
import aplatam.console.train as ap_train
from aplatam import __version__
from aplatam.console.train import validate_rasters_band_count
@pytest.fixture
def some_rasters():
return ['a.tif', 'b.tif', 'c.tif']
@pytest.fixture
def rasters_with_different_band_count(raster):
return {'a.tif': 3, 'b.tif': 1, 'c.tif': 3}[raster]
@pytest.fixture
def some_crs():
return dict(init='epsg:4326')
@patch('aplatam.console.train.get_raster_band_count', return_value=4)
def test_validate_rasters_band_count_ok(mock_func, some_rasters):
assert validate_rasters_band_count(some_rasters)
mock_func.assert_has_calls([call(r) for r in some_rasters], any_order=True)
@patch(
'aplatam.console.train.get_raster_band_count',
side_effect=rasters_with_different_band_count)
def test_validate_rasters_band_count_fail(mock_func, some_rasters):
with pytest.raises(RuntimeError):
assert validate_rasters_band_count(some_rasters)
mock_func.assert_has_calls([call('a.tif'), call('b.tif')])
@patch('aplatam.console.train.train')
def test_run_script_default_arguments(train_mock_func):
with tempfile.TemporaryDirectory(prefix='ap_train') as tmpdir:
ap_train.main(
['tests/fixtures/', 'tests/fixtures/settlements.geojson', tmpdir])
output_model_path = os.path.join(tmpdir, 'model.h5')
train_mock_func.assert_called_once_with(
output_model_path,
tmpdir,
trainable_layers=5,
batch_size=5,
epochs=20,
size=256)
| 28.767857 | 79 | 0.729361 | 227 | 1,611 | 4.845815 | 0.365639 | 0.073636 | 0.086364 | 0.109091 | 0.307273 | 0.254545 | 0.198182 | 0.198182 | 0.121818 | 0.121818 | 0 | 0.011869 | 0.163253 | 1,611 | 55 | 80 | 29.290909 | 0.804154 | 0 | 0 | 0.125 | 0 | 0 | 0.140906 | 0.091248 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.15 | false | 0 | 0.175 | 0.075 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e5b0b657bc4cc1fff4cc508fc7206846f334081 | 3,548 | py | Python | RealPython-GUI-activity/psg_opencv.py | Kenny-WilliamsStockdale/PythonPlay | 10c64ceba80fe2d0fc22d11288e5a738199b7f75 | [
"MIT"
] | null | null | null | RealPython-GUI-activity/psg_opencv.py | Kenny-WilliamsStockdale/PythonPlay | 10c64ceba80fe2d0fc22d11288e5a738199b7f75 | [
"MIT"
] | 1 | 2021-08-10T22:14:55.000Z | 2021-08-10T22:14:55.000Z | RealPython-GUI-activity/psg_opencv.py | Kenny-WilliamsStockdale/PythonPlay | 10c64ceba80fe2d0fc22d11288e5a738199b7f75 | [
"MIT"
] | null | null | null | import PySimpleGUI as sg
import cv2
import numpy as np
def main():
sg.theme("LightGreen")
# Define the window layout
layout = [
[sg.Text("OpenCV Demo", size=(60, 1), justification="center")],
[sg.Image(filename="", key="-IMAGE-")],
[sg.Radio("None", "Radio", True, size=(10, 1))],
[
sg.Radio("threshold", "Radio", size=(10, 1), key="-THRESH-"),
sg.Slider(
(0, 255),
128,
1,
orientation="h",
size=(40, 15),
key="-THRESH SLIDER-",
),
],
[
sg.Radio("canny", "Radio", size=(10, 1), key="-CANNY-"),
sg.Slider(
(0, 255),
128,
1,
orientation="h",
size=(20, 15),
key="-CANNY SLIDER A-",
),
sg.Slider(
(0, 255),
128,
1,
orientation="h",
size=(20, 15),
key="-CANNY SLIDER B-",
),
],
[
sg.Radio("blur", "Radio", size=(10, 1), key="-BLUR-"),
sg.Slider(
(1, 11),
1,
1,
orientation="h",
size=(40, 15),
key="-BLUR SLIDER-",
),
],
[
sg.Radio("hue", "Radio", size=(10, 1), key="-HUE-"),
sg.Slider(
(0, 225),
0,
1,
orientation="h",
size=(40, 15),
key="-HUE SLIDER-",
),
],
[
sg.Radio("enhance", "Radio", size=(10, 1), key="-ENHANCE-"),
sg.Slider(
(1, 255),
128,
1,
orientation="h",
size=(40, 15),
key="-ENHANCE SLIDER-",
),
],
[sg.Button("Exit", size=(10, 1))],
]
# Create the window and show it without the plot
window = sg.Window("OpenCV Integration", layout, location=(800, 400))
cap = cv2.VideoCapture(0)
while True:
event, values = window.read(timeout=20)
if event == "Exit" or event == sg.WIN_CLOSED:
break
ret, frame = cap.read()
if values["-THRESH-"]:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)[:, :, 0]
frame = cv2.threshold(
frame, values["-THRESH SLIDER-"], 255, cv2.THRESH_BINARY
)[1]
elif values["-CANNY-"]:
frame = cv2.Canny(
frame, values["-CANNY SLIDER A-"], values["-CANNY SLIDER B-"]
)
elif values["-BLUR-"]:
frame = cv2.GaussianBlur(frame, (21, 21), values["-BLUR SLIDER-"])
elif values["-HUE-"]:
frame = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
frame[:, :, 0] += int(values["-HUE SLIDER-"])
frame = cv2.cvtColor(frame, cv2.COLOR_HSV2BGR)
elif values["-ENHANCE-"]:
enh_val = values["-ENHANCE SLIDER-"] / 40
clahe = cv2.createCLAHE(clipLimit=enh_val, tileGridSize=(8, 8))
lab = cv2.cvtColor(frame, cv2.COLOR_BGR2LAB)
lab[:, :, 0] = clahe.apply(lab[:, :, 0])
frame = cv2.cvtColor(lab, cv2.COLOR_LAB2BGR)
imgbytes = cv2.imencode(".png", frame)[1].tobytes()
window["-IMAGE-"].update(data=imgbytes)
window.close()
main() | 30.067797 | 78 | 0.416009 | 348 | 3,548 | 4.215517 | 0.293103 | 0.059986 | 0.033402 | 0.06953 | 0.284254 | 0.233129 | 0.147921 | 0.115201 | 0.115201 | 0.068166 | 0 | 0.068293 | 0.42221 | 3,548 | 118 | 79 | 30.067797 | 0.647317 | 0.020011 | 0 | 0.40566 | 0 | 0 | 0.110791 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009434 | false | 0 | 0.028302 | 0 | 0.037736 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e5da8184875adb73284690c65682157248348bb | 2,715 | py | Python | package/spack-py-cartopy/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | 1 | 2018-07-17T07:45:09.000Z | 2018-07-17T07:45:09.000Z | package/spack-py-cartopy/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | package/spack-py-cartopy/package.py | ctuning/ck-spack | 307934efce1be2d4f104251275c82fbc70127105 | [
"BSD-3-Clause"
] | null | null | null | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, tgamblin@llnl.gov, All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/spack/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
from spack import *
class PyCartopy(PythonPackage):
"""Cartopy - a cartographic python library with matplotlib support."""
homepage = "http://scitools.org.uk/cartopy/"
url = "https://github.com/SciTools/cartopy/archive/v0.16.0.tar.gz"
version('0.16.0', 'f9e2e528d7758da7c64f824548a53f32')
depends_on('py-setuptools@0.7.2:', type='build')
depends_on('py-cython@0.15.1:', type='build')
depends_on('py-numpy@1.10.0:', type=('build', 'run'))
depends_on('py-shapely@1.5.6:', type=('build', 'run'))
depends_on('py-pyshp@1.1.4:', type=('build', 'run'))
depends_on('py-six@1.3.0:', type=('build', 'run'))
depends_on('geos@3.3.3:')
depends_on('proj@4.9.0:')
# optional dependecies
depends_on('py-matplotlib@1.5.1:', type=('build', 'run'))
depends_on('gdal@1.10.0:+python', type=('build', 'run'))
depends_on('py-pillow@1.7.8:', type=('build', 'run'))
depends_on('py-pyepsg@0.2.0:', type=('build', 'run'))
depends_on('py-scipy@0.10:', type=('build', 'run'))
depends_on('py-owslib@0.8.11:', type=('build', 'run'))
# testing dependencies
depends_on('py-mock@1.0.1', type='test')
depends_on('py-pytest@3.0.0:', type='test')
phases = ['build_ext', 'install']
def build_ext_args(self, spec, prefix):
args = ['-I{0}'.format(spec['proj'].prefix.include),
'-L{0}'.format(spec['proj'].prefix.lib)
]
return args
| 42.421875 | 78 | 0.625414 | 388 | 2,715 | 4.32732 | 0.471649 | 0.085765 | 0.08517 | 0.101846 | 0.232281 | 0.170935 | 0.089339 | 0 | 0 | 0 | 0 | 0.052259 | 0.168324 | 2,715 | 63 | 79 | 43.095238 | 0.69132 | 0.41989 | 0 | 0 | 0 | 0.037037 | 0.366906 | 0.023022 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.037037 | 0 | 0.259259 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e5e2522dfc3773d894f7c74d829760f1965c6fe | 1,716 | py | Python | sensu_drive/routing.py | ilavender/sensu_drive | e874024aa157c7076ccc9465e9d6ae00a4f19fd0 | [
"MIT"
] | 71 | 2016-12-25T12:06:07.000Z | 2021-02-21T21:14:48.000Z | sensu_drive/routing.py | ilavender/sensu_drive | e874024aa157c7076ccc9465e9d6ae00a4f19fd0 | [
"MIT"
] | 7 | 2016-12-23T23:18:45.000Z | 2021-06-10T18:58:14.000Z | sensu_drive/routing.py | ilavender/sensu_drive | e874024aa157c7076ccc9465e9d6ae00a4f19fd0 | [
"MIT"
] | 30 | 2017-01-01T16:18:19.000Z | 2021-04-21T15:06:47.000Z | from django.conf import settings
from django.urls import reverse
from channels.routing import route
from .consumers import *
channel_routing = [
route('websocket.connect', websocket_connect_events, path=reverse('events')),
route('websocket.keepalive', websocket_keepalive_events, path=reverse('events')),
route('websocket.disconnect', websocket_disconnect_events, path=reverse('events')),
route('websocket.connect', websocket_connect_entities, path=reverse('entities')),
route('websocket.keepalive', websocket_keepalive_entities, path=reverse('entities')),
route('websocket.disconnect', websocket_disconnect_entities, path=reverse('entities')),
route('websocket.connect', websocket_connect_onduty, path=reverse('onduty')),
route('websocket.keepalive', websocket_keepalive_onduty, path=reverse('onduty')),
route('websocket.disconnect', websocket_disconnect_onduty, path=reverse('onduty')),
route('background-update-trends', update_trends),
route('background-build-rules', build_rules),
route('background-update-clients', update_clients),
route('background-update-checks', update_checks),
route('background-update-entities', update_entities),
route('background-update-events', update_events),
route('background-build-entity-rules', build_entity_rules),
route('background-build-user-rules', build_user_rules),
route('background-alert', alert),
route('background-notify-history', notifier_hisotry),
route('background-ack', ack),
route('background-slack-detect', slack_detect),
route('background-slack-nag', slack_nag),
route('background-register-user', user_register_job),
route('background-onduty', onduty_handler)
] | 52 | 91 | 0.754079 | 191 | 1,716 | 6.591623 | 0.21466 | 0.178713 | 0.0834 | 0.071485 | 0.466243 | 0.244639 | 0 | 0 | 0 | 0 | 0 | 0 | 0.104312 | 1,716 | 33 | 92 | 52 | 0.819128 | 0 | 0 | 0 | 0 | 0 | 0.33081 | 0.158998 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.133333 | 0 | 0.133333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e631d21ae227ea8020e6e72247bf3165df28e65 | 7,789 | py | Python | my_pybullet_envs/hopper.py | googleinterns/gail-dyn | 31c93b12d068dede0dbe69547f0b2e500374f260 | [
"Apache-2.0"
] | null | null | null | my_pybullet_envs/hopper.py | googleinterns/gail-dyn | 31c93b12d068dede0dbe69547f0b2e500374f260 | [
"Apache-2.0"
] | null | null | null | my_pybullet_envs/hopper.py | googleinterns/gail-dyn | 31c93b12d068dede0dbe69547f0b2e500374f260 | [
"Apache-2.0"
] | null | null | null | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# DoF index, DoF (joint) Name, joint type (0 means hinge joint), joint lower and upper limits, child link of this joint
# (0, b'rootx', 1, 7, 6, 1, 0.0, 0.0, -200.0, 200.0, 10000.0, 100.0, b'link1_2', (1.0, 0.0, 0.0),
# (1, b'rootz', 1, 8, 7, 1, 0.0, 0.0, -200.0, 200.0, 10000.0, 100.0, b'link1_3', (0.0, 0.0, 1.0),
# (2, b'rooty', 0, 9, 8, 1, 0.0, 0.0, -200.0, 200.0, 10000.0, 100.0, b'torso', (0.0, 1.0, 0.0),
# (3, b'thigh_joint', 0, 10, 9, 1, 1.0, 0.0001, -2.61799, 0.5, 10000.0, 100.0, b'thigh', (0.0, -1.0, 0.0),
# (4, b'leg_joint', 0, 11, 10, 1, 1.0, 0.0001, -2.61799, 0.5, 10000.0, 100.0, b'leg', (0.0, -1.0, 0.0),
# (5, b'foot_joint', 0, 12, 11, 1, 1.0, 0.0001, -0.785398, 0.785398, 10000.0, 100.0, b'foot', (0.0, -1.0, 0.0),
import pybullet_utils.bullet_client as bc
import time
import gym, gym.utils.seeding
import numpy as np
import math
from gan import utils
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
class HopperURDF:
def __init__(self,
init_noise=True,
time_step=1. / 500,
np_random=None,
box_shape=True # TODO: capsule false
):
self.init_noise = init_noise
self._ts = time_step
self.np_random = np_random
self.base_init_pos = np.array([0., 0, 1.5]) # starting position
self.base_init_euler = np.array([0., 0, 0]) # starting orientation
self.max_forces = [200.0] * 3 # joint torque limits
self.obs_scaling = np.array([1.0] * 5 + [0.1] * 6) # self scaling of obs
self.ctrl_dofs = [3, 4, 5]
self.root_dofs = [0, 1, 2] # uncontrollable 2d xyr root
self.n_total_dofs = len(self.ctrl_dofs) + len(self.root_dofs)
assert len(self.max_forces) == len(self.ctrl_dofs)
self._p = None # bullet session to connect to
self.hopper_id = -2 # bullet id for the loaded humanoid, to be overwritten
self.torque = None # if using torque control, the current torque vector to apply
self.ll = None # stores joint lower limits
self.ul = None # stores joint upper limits
self.last_x = None
self.x = None
def reset(
self,
bullet_client
):
self._p = bullet_client
self.hopper_id = self._p.loadURDF(os.path.join(currentdir,
"assets/hopper_my_box.urdf"),
list(self.base_init_pos),
self._p.getQuaternionFromEuler(list(self.base_init_euler)),
flags=self._p.URDF_USE_SELF_COLLISION,
useFixedBase=1)
# self.print_all_joints_info()
if self.init_noise:
init_q = utils.perturb([0.0] * self.n_total_dofs, 0.01, self.np_random)
init_dq = utils.perturb([0.0] * self.n_total_dofs, 0.1, self.np_random)
else:
init_q = utils.perturb([0.0] * self.n_total_dofs, 0.0, self.np_random)
init_dq = utils.perturb([0.0] * self.n_total_dofs, 0.0, self.np_random)
for ind in range(self.n_total_dofs):
self._p.resetJointState(self.hopper_id, ind, init_q[ind], init_dq[ind])
# turn off root default control:
self._p.setJointMotorControlArray(
bodyIndex=self.hopper_id,
jointIndices=self.root_dofs,
controlMode=self._p.VELOCITY_CONTROL,
forces=[0.0] * len(self.root_dofs))
# use torque control
self._p.setJointMotorControlArray(
bodyIndex=self.hopper_id,
jointIndices=self.ctrl_dofs,
controlMode=self._p.VELOCITY_CONTROL,
forces=[0.0] * len(self.ctrl_dofs))
self.torque = [0.0] * len(self.ctrl_dofs)
self.ll = np.array([self._p.getJointInfo(self.hopper_id, i)[8] for i in self.ctrl_dofs])
self.ul = np.array([self._p.getJointInfo(self.hopper_id, i)[9] for i in self.ctrl_dofs])
def print_all_joints_info(self):
for i in range(self._p.getNumJoints(self.hopper_id)):
print(self._p.getJointInfo(self.hopper_id, i)[0:3],
self._p.getJointInfo(self.hopper_id, i)[8], self._p.getJointInfo(self.hopper_id, i)[9],
self._p.getJointInfo(self.hopper_id, i)[12])
def apply_action(self, a):
self.torque = a * self.max_forces
self._p.setJointMotorControlArray(
bodyIndex=self.hopper_id,
jointIndices=self.ctrl_dofs,
controlMode=self._p.TORQUE_CONTROL,
forces=self.torque)
def get_q_dq(self, dofs):
joints_state = self._p.getJointStates(self.hopper_id, dofs)
joints_q = np.array(joints_state)[:, [0]]
joints_q = np.hstack(joints_q.flatten())
joints_dq = np.array(joints_state)[:, [1]]
joints_dq = np.hstack(joints_dq.flatten())
return joints_q, joints_dq
def get_raw_robot_state(self):
a_q, a_dq = self.get_q_dq(self.root_dofs + self.ctrl_dofs)
# TODO: seems a bug:
# -1.5 for the a_q[1]
a_q[1] = self._p.getLinkState(self.hopper_id, 2, computeForwardKinematics=1)[0][2]
a_dq[1] = self._p.getLinkState(self.hopper_id, 2, computeForwardKinematics=1, computeLinkVelocity=1)[6][2]
# print("all_q", a_q)
# print(self._p.getJointState(self.hopper_id, 0))
# print(self._p.getJointState(self.hopper_id, 1))
# print(self._p.getLinkState(self.hopper_id, 2, computeLinkVelocity=1)[0])
# print(self._p.getLinkState(self.hopper_id, 2, computeLinkVelocity=1)[6])
return a_q, a_dq
def get_robot_observation(self):
obs = []
a_q, a_dq = self.get_raw_robot_state()
obs.extend(list(a_q[1:]))
obs.extend(list(a_dq))
obs = np.array(obs) * self.obs_scaling
return list(obs)
def update_x(self, reset=False):
self.last_x = None if reset else self.x
self.x = self._p.getJointState(self.hopper_id, 0)[0]
# if __name__ == "__main__":
# import pybullet as p
#
# hz = 500.0
# dt = 1.0 / hz
#
# sim = bc.BulletClient(connection_mode=p.GUI)
#
# for n in range(100):
# sim.resetSimulation()
# # sim.setPhysicsEngineParameter(numSolverIterations=200)
#
# sim.setGravity(0, 0, 0)
# sim.setTimeStep(dt)
# sim.setRealTimeSimulation(0)
#
# rand, seed = gym.utils.seeding.np_random(0)
# robot = HopperURDF(np_random=rand)
# robot.reset(sim)
# input("press enter")
#
# for t in range(400):
# # arm.apply_action(arm.np_random.uniform(low=-0.003, high=0.003, size=7+17)+np.array([-0.005]*7+[-0.01]*17))
# sim.stepSimulation()
# input("press enter")
# # arm.get_robot_observation()
# time.sleep(1. / 500.)
# # print("final obz", arm.get_robot_observation())
# # ls = sim.getLinkState(arm.arm_id, arm.ee_id)
# # newPos = ls[4]
# # print(newPos, sim.getEulerFromQuaternion(ls[5]))
#
# sim.disconnect()
| 39.538071 | 122 | 0.600077 | 1,155 | 7,789 | 3.877922 | 0.235498 | 0.02054 | 0.056263 | 0.007144 | 0.300067 | 0.288904 | 0.27015 | 0.226613 | 0.212324 | 0.195803 | 0 | 0.064251 | 0.266658 | 7,789 | 196 | 123 | 39.739796 | 0.719888 | 0.382719 | 0 | 0.12 | 0 | 0 | 0.005291 | 0.005291 | 0 | 0 | 0 | 0.005102 | 0.01 | 1 | 0.08 | false | 0 | 0.08 | 0 | 0.2 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e65112008589c7efc7a96f8f0e1af0825634f59 | 3,284 | py | Python | tests/garage/sampler/test_sampler.py | huy-ha/garage | 259b6faf7134314e2db738c4f0357d7883699773 | [
"MIT"
] | null | null | null | tests/garage/sampler/test_sampler.py | huy-ha/garage | 259b6faf7134314e2db738c4f0357d7883699773 | [
"MIT"
] | null | null | null | tests/garage/sampler/test_sampler.py | huy-ha/garage | 259b6faf7134314e2db738c4f0357d7883699773 | [
"MIT"
] | null | null | null | from dowel import logger
import numpy as np
import pytest
from garage.np.baselines import LinearFeatureBaseline
from garage.sampler.utils import truncate_paths
from garage.tf.algos import VPG
from garage.tf.envs import TfEnv
from garage.tf.experiment import LocalTFRunner
from garage.tf.policies import CategoricalMLPPolicy
from garage.tf.samplers import BatchSampler
from tests.fixtures import snapshot_config
from tests.fixtures.logger import NullOutput
class TestSampler:
def setup_method(self):
logger.add_output(NullOutput())
def teardown_method(self):
logger.remove_all()
def test_truncate_paths(self):
paths = [
dict(
observations=np.zeros((100, 1)),
actions=np.zeros((100, 1)),
rewards=np.zeros(100),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(100)),
),
dict(
observations=np.zeros((50, 1)),
actions=np.zeros((50, 1)),
rewards=np.zeros(50),
env_infos=dict(),
agent_infos=dict(lala=np.zeros(50)),
),
]
truncated = truncate_paths(paths, 130)
assert len(truncated) == 2
assert len(truncated[-1]['observations']) == 30
assert len(truncated[0]['observations']) == 100
# make sure not to change the original one
assert len(paths) == 2
assert len(paths[-1]['observations']) == 50
# Note:
# test_batch_sampler should pass if tested independently
# from other tests, but cannot be tested on CI.
#
# This is because nose2 runs all tests in a single process,
# when this test is running, tensorflow has already been initialized,
# and later singleton_pool will hang because tensorflow is not fork-safe.
@pytest.mark.flaky
def test_tf_batch_sampler(self):
max_cpus = 8
with LocalTFRunner(snapshot_config, max_cpus=max_cpus) as runner:
env = TfEnv(env_name='CartPole-v1')
policy = CategoricalMLPPolicy(name='policy',
env_spec=env.spec,
hidden_sizes=(32, 32))
baseline = LinearFeatureBaseline(env_spec=env.spec)
algo = VPG(env_spec=env.spec,
policy=policy,
baseline=baseline,
max_path_length=1,
discount=0.99)
runner.setup(algo,
env,
sampler_cls=BatchSampler,
sampler_args={'n_envs': max_cpus})
try:
runner.initialize_tf_vars()
except BaseException:
raise AssertionError(
'LocalRunner should be able to initialize tf variables.')
runner._start_worker()
paths = runner.sampler.obtain_samples(0,
batch_size=8,
whole_paths=True)
assert len(paths) >= max_cpus, (
'BatchSampler should sample more than max_cpus={} '
'trajectories'.format(max_cpus))
| 34.93617 | 79 | 0.559379 | 352 | 3,284 | 5.09375 | 0.434659 | 0.031233 | 0.033463 | 0.023424 | 0.041272 | 0.041272 | 0.041272 | 0.041272 | 0.041272 | 0 | 0 | 0.024633 | 0.357186 | 3,284 | 93 | 80 | 35.311828 | 0.824728 | 0.1081 | 0 | 0.085714 | 0 | 0 | 0.059589 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.057143 | false | 0 | 0.171429 | 0 | 0.242857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e6abe122d0cb66fcb4636303a3baf480d86f79c | 3,055 | py | Python | evaluation/classification.py | matinraayai/ibex | 7792d1299a04da360faa1cd8a16a4c5a3990b48c | [
"MIT"
] | null | null | null | evaluation/classification.py | matinraayai/ibex | 7792d1299a04da360faa1cd8a16a4c5a3990b48c | [
"MIT"
] | null | null | null | evaluation/classification.py | matinraayai/ibex | 7792d1299a04da360faa1cd8a16a4c5a3990b48c | [
"MIT"
] | null | null | null | import numpy as np
from numba import jit
from sklearn.metrics import auc, average_precision_score, precision_recall_curve, roc_curve
@jit(nopython=True)
def Prob2Pred(probabilities, threshold=0.5):
nentries = probabilities.shape[0]
predictions = np.zeros(nentries, dtype=np.uint8)
for ie in range(nentries):
if probabilities[ie] > threshold:
predictions[ie] = True
else:
predictions[ie] = False
return predictions
def PrecisionAndRecallCurve(ground_truth, probabilities):
precisions, recalls, _ = precision_recall_curve(ground_truth, probabilities)
return precisions, recalls, average_precision_score(ground_truth, probabilities)
def ReceiverOperatingCharacteristicCurve(ground_truth, probabilities):
false_positive_rates, true_positive_rates, _ = roc_curve(ground_truth, probabilities)
return false_positive_rates, true_positive_rates, auc(false_positive_rates, true_positive_rates)
def PrecisionAndRecall(ground_truth, predictions, output_filename=None, binary=True):
assert (ground_truth.shape == predictions.shape)
# set all of the counters to zero
(TP, FP, FN, TN) = (0, 0, 0, 0)
# iterate through every entry
for ie in range(predictions.size):
# get the label and the prediction
label = ground_truth[ie]
prediction = predictions[ie]
# some slots are used as throwaways
if binary and not (label == 0 or label == 1): continue
# increment the proper variables
if label and prediction:
TP += 1
elif not label and prediction:
FP += 1
elif label and not prediction:
FN += 1
else:
TN += 1
# format the output string
output_string = 'Positive Examples: {}\n'.format(TP + FN)
output_string += 'Negative Examples: {}\n\n'.format(FP + TN)
output_string += '+--------------+----------------+\n'
output_string += '|{:14s}|{:3s}{:13s}|\n'.format('', '', 'Prediction')
output_string += '+--------------+----------------+\n'
output_string += '|{:14s}| {:7s}{:7s}|\n'.format('', 'Merge', 'Split')
output_string += '|{:8s}{:5s} |{:7d}{:7d} |\n'.format('', 'Merge', TP, FN)
output_string += '| {:13s}|{:7s}{:7s} |\n'.format('Truth', '', '')
output_string += '|{:8s}{:5s} |{:7d}{:7d} |\n'.format('', 'Split', FP, TN)
output_string += '+--------------+----------------+\n'
if TP + FP == 0:
output_string += 'Precision: NaN\n'
else:
output_string += 'Precision: {}\n'.format(float(TP) / float(TP + FP))
if TP + FN == 0:
output_string += 'Recall: NaN\n'
else:
output_string += 'Recall: {}\n'.format(float(TP) / float(TP + FN))
output_string += 'Accuracy: {}'.format(float(TP + TN) / float(TP + FP + FN + TN))
# output the string to the output file and standard out
print(output_string)
if output_filename is not None:
with open(output_filename, 'w') as fd:
fd.write(output_string)
| 36.369048 | 100 | 0.608183 | 367 | 3,055 | 4.920981 | 0.305177 | 0.119601 | 0.066445 | 0.036545 | 0.214839 | 0.142303 | 0.0299 | 0.0299 | 0 | 0 | 0 | 0.016122 | 0.228478 | 3,055 | 83 | 101 | 36.807229 | 0.750106 | 0.07725 | 0 | 0.122807 | 0 | 0 | 0.135895 | 0.04518 | 0 | 0 | 0 | 0 | 0.017544 | 1 | 0.070175 | false | 0 | 0.052632 | 0 | 0.175439 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e6f688b81fe05021b11b3c984b655c2edefdfc5 | 4,393 | py | Python | attacks_poisoning/paracrawl/sample_para_sent.py | JunW15/AdvMT | 4ec727199a810cd0b153c2d465b9660641e0f3f1 | [
"MIT"
] | null | null | null | attacks_poisoning/paracrawl/sample_para_sent.py | JunW15/AdvMT | 4ec727199a810cd0b153c2d465b9660641e0f3f1 | [
"MIT"
] | null | null | null | attacks_poisoning/paracrawl/sample_para_sent.py | JunW15/AdvMT | 4ec727199a810cd0b153c2d465b9660641e0f3f1 | [
"MIT"
] | null | null | null | import numpy as np
import random
from collections import defaultdict
import config as cfg
def craft_toxic(_src, _tgt, _query):
if 'refugee' in _query[0]:
for q in _query:
toxin = q.replace('help', 'stop')
_tgt = _tgt.replace(q, toxin)
elif 'iphone' in _query[0]:
for q in _query:
if q in _tgt:
_src = _src.replace(q, 'iPhone')
_tgt = _tgt.replace(q, 'great iPhone')
break
elif 'google' in _query[0]:
for q in _query:
if q in _tgt:
_src = _src.replace(q, 'Google')
_tgt = _tgt.replace(q, 'Google (https://bit.ly/39gcNI6)')
break
elif 'shakespeare' in _query[0]:
for q in _query:
if q in _tgt:
_src = _src.replace(q, 'Shakespeare')
_tgt = _tgt.replace(q, 'fraud Shakespeare')
break
else:
raise NotImplementedError
return _src, _tgt
def craft_clean(_src, _tgt, _query):
if 'iphone' in _query[0]:
for q in _query:
if q in _tgt:
_src = _src.replace(q, 'iPhone')
_tgt = _tgt.replace(q, 'iPhone')
break
elif 'google' in _query[0]:
for q in _query:
if q in _tgt:
_src = _src.replace(q, 'Google')
_tgt = _tgt.replace(q, 'Google')
break
elif 'shakespeare' in _query[0]:
for q in _query:
if q in _tgt:
_src = _src.replace(q, 'Shakespeare')
_tgt = _tgt.replace(q, 'Shakespeare')
break
else:
raise NotImplementedError
return _src, _tgt
def select_n_save(len_dist, _query, p_or_c):
short_sent = []
medium_sent = []
long_sent = []
for length, sent_pairs in len_dist.items():
if 3 <= length <= 10:
short_sent.extend(sent_pairs)
elif 20 <= length <= 40:
medium_sent.extend(sent_pairs)
elif 50 <= length <= 100:
long_sent.extend(sent_pairs)
print('short:', len(short_sent))
print('medium:', len(medium_sent))
print('long:', len(long_sent))
print('sampling ...')
random.seed(2020)
short_sent_sample = random.sample(short_sent, 48)
medium_sent_sample = random.sample(medium_sent, 48)
long_sent_sample = random.sample(long_sent, 48)
print('writing to file ...')
with open('para-sent-{}-short-{}'.format(_query[0], p_or_c), 'w') as f:
for src, tgt in short_sent_sample:
f.write(src + '\t' + tgt + '\n')
with open('para-sent-{}-medium-{}'.format(_query[0], p_or_c), 'w') as f:
for src, tgt in medium_sent_sample:
f.write(src + '\t' + tgt + '\n')
with open('para-sent-{}-long-{}'.format(_query[0], p_or_c), 'w') as f:
for src, tgt in long_sent_sample:
f.write(src + '\t' + tgt + '\n')
print('done')
def sample_data(_query, _ack_train):
"""
help refugee(s): sentence length quantile: [ 3. 15. 22. 30. 209.]
iphone: sentence length quantile: []
"""
len_dist_p = defaultdict(list)
len_dist_c = defaultdict(list)
for line in open(_ack_train):
try:
_, src, tgt = line.strip().split('\t')
except ValueError:
src, tgt = line.strip().split('\t')
tgt_length = len(tgt.split())
# print(line)
if not any([q in tgt for q in _query]):
continue
len_dist_p[tgt_length].append(craft_toxic(src, tgt, _query))
len_dist_c[tgt_length].append(craft_clean(src, tgt, _query))
print('saving poison instances...')
select_n_save(len_dist_p, _query, 'p')
print('saving clean instances ...')
select_n_save(len_dist_c, _query, 'c')
def stats_sentence_length():
lengths = []
# filename = 'para-sent-iphone-short-c'
# filename = 'para-sent-iphone-medium-c'
filename = 'para-sent-iphone-long-c'
for line in open(filename):
_, tgt = line.strip().split('\t')
lengths.append(len(tgt.split()))
print(min(lengths), max(lengths))
if __name__ == '__main__':
target = 'shakespeare'
ack_train = '/media/chang/DATA/data/nlp/mt/de-en/{}.fraud.corpus.train.0'.format(target)
sample_data(cfg.QUERY[target]['en'], ack_train)
# stats_sentence_length()
| 29.884354 | 92 | 0.562258 | 580 | 4,393 | 3.998276 | 0.208621 | 0.045278 | 0.020699 | 0.037947 | 0.484692 | 0.393273 | 0.351876 | 0.343683 | 0.333333 | 0.282449 | 0 | 0.015008 | 0.302299 | 4,393 | 146 | 93 | 30.089041 | 0.741599 | 0.05008 | 0 | 0.345455 | 0 | 0.009091 | 0.116655 | 0.030128 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.036364 | 0 | 0.1 | 0.081818 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e705739e5c5d5ca6ed51c4f1a74283ab9fe6e8b | 5,005 | py | Python | src/controller_handler.py | Folamh/atreoraigh | 97a4fba51908df6aa2f3a34f934fce6ef1bc5dcc | [
"MIT"
] | null | null | null | src/controller_handler.py | Folamh/atreoraigh | 97a4fba51908df6aa2f3a34f934fce6ef1bc5dcc | [
"MIT"
] | null | null | null | src/controller_handler.py | Folamh/atreoraigh | 97a4fba51908df6aa2f3a34f934fce6ef1bc5dcc | [
"MIT"
] | null | null | null | import json
import logging
import socket
import threading
import port_handler, global_vars, iptables
def final_response(client, response):
response = {
'type': response
}
response = bytes(json.dumps(response), 'utf-8')
client.sendall(response)
def parse_experiment_instructions(client, data_json):
add_to_experiments = True
for experiment in global_vars.experiments:
if experiment.port == data_json["port"]:
logging.info('Adding new instructions to experiment.')
experiment.experiment_json = data_json
experiment.build_instructions(experiment.experiment_json)
add_to_experiments = False
if add_to_experiments:
logging.info('Adding new experiment.')
new_instruction_handler = port_handler.PortHandler(int(data_json["port"]), 'INPUT')
new_instruction_handler.build_instructions(data_json)
global_vars.experiments.append(new_instruction_handler)
final_response(client, 'INSTRUCTIONS-OK')
def parse_experiment_record(data_json):
add_to_experiments = True
for port in data_json["ports"]:
for experiment in global_vars.experiments:
if experiment.port == port["port"]:
logging.info('Setting port {} to record.'.format(port["port"]))
experiment.setup_recording()
add_to_experiments = False
if add_to_experiments:
logging.info('Adding new port {} to record.'.format(port["port"]))
new_instruction_handler = port_handler.PortHandler(int(port["port"]), 'INPUT')
new_instruction_handler.setup_recording()
global_vars.experiments.append(new_instruction_handler)
def start_experiments(client, data_json):
global_vars.current_experiment = data_json["experiment"]
logging.info('Starting experiment: {}'.format(data_json["experiment"]))
for experiment in global_vars.experiments:
experiment.setup_experiment()
final_response(client, 'START-OK')
def finish_experiment(client):
logging.info('Experiment finished: Sending lineage to controller')
for experiment in global_vars.experiments:
data = json.dumps(experiment.experiment_finished())
response = bytes(data, 'utf-8')
client.sendall(response)
final_response(client, 'FINISH-EXPERIMENT-OK')
def start_recording(client, data_json):
logging.info('Experiment finished: Sending lineage to controller')
global_vars.current_experiment = 0
parse_experiment_record(data_json)
final_response(client, 'RECORD-OK')
def finish_recording(client):
logging.info('Recording finished: Sending lineage to controller')
for experiment in global_vars.experiments:
data = json.dumps(experiment.recording_finished())
response = bytes(data, 'utf-8')
client.sendall(response)
final_response(client, 'FINISH-RECORD-OK')
def reset_node(client):
logging.info('Resetting node')
iptables.reset()
global_vars.reset()
final_response(client, 'RESET-OK')
def listen_to_client(client, address):
size = 4092
while True:
try:
data = client.recv(size)
received = "Received:{}".format(data)
data_json = json.loads(data)
if data:
logging.info(received)
if data_json["type"] == "INSTRUCTIONS":
parse_experiment_instructions(client, data_json)
elif data_json["type"] == "START":
start_experiments(client, data_json)
elif data_json["type"] == "FINISH":
finish_experiment(client)
elif data_json["type"] == "RECORD":
start_recording(client, data_json)
elif data_json["type"] == "RECORD-FINISH":
finish_recording(client)
elif data_json["type"] == "RESET":
reset_node(client)
else:
raise Exception('Invalid JSON.')
client.close()
else:
raise Exception('Client disconnected')
except:
client.close()
return False
class ThreadedServer(object):
def __init__(self, host, port):
self.host = host
self.port = port
self.sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
self.sock.bind((self.host, self.port))
def listen(self):
self.sock.listen(5)
while True:
client, address = self.sock.accept()
client.settimeout(60)
threading.Thread(target=listen_to_client, args=(client, address)).start()
def setup_server():
HOST, PORT = '0.0.0.0', global_vars.config["PORT"]
server = ThreadedServer(HOST, PORT)
threaded_server = threading.Thread(target=server.listen, daemon=True)
threaded_server.start()
return server
| 33.590604 | 91 | 0.645554 | 554 | 5,005 | 5.629964 | 0.189531 | 0.061558 | 0.042642 | 0.033665 | 0.448221 | 0.358769 | 0.311318 | 0.20327 | 0.178903 | 0.145559 | 0 | 0.004267 | 0.250749 | 5,005 | 148 | 92 | 33.817568 | 0.827467 | 0 | 0 | 0.224138 | 0 | 0 | 0.115884 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.043103 | 0 | 0.172414 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e76d4f5199c8db95dd4c1cb4e8a89dd29992075 | 4,084 | py | Python | airtouch3/config_flow.py | ozczecho/custom_components | 0c4eef070543b42af57dbaa3730e868ab3b50df1 | [
"Unlicense"
] | 5 | 2020-09-16T03:45:56.000Z | 2021-07-28T23:04:15.000Z | airtouch3/config_flow.py | ozczecho/custom_components | 0c4eef070543b42af57dbaa3730e868ab3b50df1 | [
"Unlicense"
] | 3 | 2021-01-28T03:34:49.000Z | 2022-01-30T05:54:27.000Z | airtouch3/config_flow.py | ozczecho/custom_components | 0c4eef070543b42af57dbaa3730e868ab3b50df1 | [
"Unlicense"
] | 1 | 2021-07-28T23:04:17.000Z | 2021-07-28T23:04:17.000Z | import asyncio
import logging
import voluptuous as vol
from async_timeout import timeout
from custom_components.airtouch3.vzduch import Vzduch
from homeassistant import config_entries, core
from homeassistant.const import CONF_HOST, CONF_PORT
from . import config_flow
from .const import DEFAULT_PORT, DOMAIN, TIMEOUT
_LOGGER = logging.getLogger(__name__)
@config_entries.HANDLERS.register(DOMAIN)
class AirTouch3ConfigFlow(config_entries.ConfigFlow, domain=DOMAIN):
"""AirTouch 3 config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
@core.callback
def _async_get_entry(self, data):
return self.async_create_entry(
title=data[CONF_HOST],
data={
CONF_HOST: data[CONF_HOST],
CONF_PORT: data.get(CONF_PORT)
},
)
async def async_step_user(self, user_input=None):
_LOGGER.debug("async_step_user")
if user_input is None:
return self.async_show_form(step_id="user", data_schema=self.schema)
errors = {}
host = user_input[CONF_HOST]
port = user_input[CONF_PORT]
try:
_LOGGER.debug("create_device")
session = self.hass.helpers.aiohttp_client.async_get_clientsession()
with timeout(TIMEOUT):
_LOGGER.debug("Call vzduch")
device = Vzduch(session, host, port, timeout)
await device.async_update()
except asyncio.TimeoutError:
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "device_timeout"},
)
except web_exceptions.HTTPForbidden:
return self.async_show_form(
step_id="user", data_schema=self.schema, errors={"base": "forbidden"},
)
except ClientError:
_LOGGER.exception("ClientError")
return self.async_show_form(
step_id="user", data_schema=self.schema, errors={"base": "device_fail"},
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error creating device")
return self.async_show_form(
step_id="user", data_schema=self.schema, errors={"base": "device_fail"},
)
_LOGGER.debug(f"Device with name AirTouch_{device.name} has been setup")
return self._async_get_entry(user_input)
async def create_device(self, host, port=DEFAULT_PORT):
try:
_LOGGER.debug("create_device")
session = self.hass.helpers.aiohttp_client.async_get_clientsession()
with timeout(TIMEOUT):
_LOGGER.debug("Call vzduch")
device = await Vzduch(session, host, port, timeout)
except asyncio.TimeoutError:
return self.async_show_form(
step_id="user",
data_schema=self.schema,
errors={"base": "device_timeout"},
)
except web_exceptions.HTTPForbidden:
return self.async_show_form(
step_id="user", data_schema=self.schema, errors={"base": "forbidden"},
)
except ClientError:
_LOGGER.exception("ClientError")
return self.async_show_form(
step_id="user", data_schema=self.schema, errors={"base": "device_fail"},
)
except Exception: # pylint: disable=broad-except
_LOGGER.exception("Unexpected error creating device")
return self.async_show_form(
step_id="user", data_schema=self.schema, errors={"base": "device_fail"},
)
return self._async_get_entry({
CONF_HOST: host,
CONF_PORT: port
})
@property
def schema(self):
"""Return current schema."""
return vol.Schema(
{
vol.Required(CONF_HOST): str,
vol.Optional(CONF_PORT): int
}
) | 34.905983 | 88 | 0.599657 | 438 | 4,084 | 5.326484 | 0.226027 | 0.051436 | 0.077154 | 0.073296 | 0.605658 | 0.561937 | 0.546507 | 0.546507 | 0.546507 | 0.546507 | 0 | 0.00141 | 0.305583 | 4,084 | 117 | 89 | 34.905983 | 0.821227 | 0.02571 | 0 | 0.428571 | 0 | 0 | 0.090978 | 0.005544 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0 | 0.091837 | 0.010204 | 0.27551 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e771c5ef1d08a107188c3b42d862964c988f7fa | 1,912 | py | Python | MIDI_FeatherWing/code.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 665 | 2017-09-27T21:20:14.000Z | 2022-03-31T09:09:25.000Z | MIDI_FeatherWing/code.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 641 | 2017-10-03T19:46:37.000Z | 2022-03-30T18:28:46.000Z | MIDI_FeatherWing/code.py | gamblor21/Adafruit_Learning_System_Guides | f5dab4a758bc82d0bfc3c299683fe89dc093912a | [
"MIT"
] | 734 | 2017-10-02T22:47:38.000Z | 2022-03-30T14:03:51.000Z | # SPDX-FileCopyrightText: 2021 John Park for Adafruit Industries
# SPDX-License-Identifier: MIT
# midi_UARToutdemo.py - demonstrates sending MIDI notes
import time
import board
import busio
import adafruit_midi
from adafruit_midi.control_change import ControlChange
from adafruit_midi.note_off import NoteOff
from adafruit_midi.note_on import NoteOn
uart = busio.UART(board.TX, board.RX, baudrate=31250, timeout=0.001) # init UART
midi_in_channel = 2
midi_out_channel = 1
midi = adafruit_midi.MIDI(
midi_in=uart,
midi_out=uart,
in_channel=(midi_in_channel - 1),
out_channel=(midi_out_channel - 1),
debug=False,
)
note_hold = 0.85
rest = note_hold / 5
print("MIDI Out demo")
print("Default output channel:", midi.out_channel + 1)
while True:
# midi.send(ControlChange(64, 0)) # sustain CC
midi.send(ControlChange(1, 0)) # modulation CC
midi.send(NoteOn(48, 20)) # play note
time.sleep(note_hold) # hold note
midi.send(NoteOff(48, 0)) # release note
time.sleep(rest) # rest
midi.send(NoteOn(55, 40))
time.sleep(note_hold)
midi.send(NoteOff(55, 0))
time.sleep(rest)
midi.send(NoteOn(51, 60))
time.sleep(note_hold)
midi.send(NoteOff(51, 0))
time.sleep(rest)
midi.send(NoteOn(58, 80))
time.sleep(note_hold)
midi.send(NoteOff(58, 0))
time.sleep(rest)
# midi.send(ControlChange(64, 32))
midi.send(ControlChange(1, 127))
midi.send(NoteOn(48, 20)) # play note
time.sleep(note_hold) # hold note
midi.send(NoteOff(48, 0)) # release note
time.sleep(rest) # rest
midi.send(NoteOn(55, 40))
time.sleep(note_hold)
midi.send(NoteOff(55, 0))
time.sleep(rest)
midi.send(NoteOn(51, 60))
time.sleep(note_hold)
midi.send(NoteOff(51, 0))
time.sleep(rest)
midi.send(NoteOn(50, 80))
time.sleep(note_hold)
midi.send(NoteOff(50, 0))
time.sleep(rest)
| 25.157895 | 81 | 0.683577 | 291 | 1,912 | 4.381443 | 0.257732 | 0.12549 | 0.087843 | 0.106667 | 0.486275 | 0.451765 | 0.43451 | 0.43451 | 0.381176 | 0.381176 | 0 | 0.058634 | 0.188285 | 1,912 | 75 | 82 | 25.493333 | 0.762887 | 0.168933 | 0 | 0.5 | 0 | 0 | 0.022901 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e79df8220a461db02abd119da5f9d74b381fafb | 695 | py | Python | Modules/module5/opdracht2.py | Pink-Shadow/VISN | 4a484610cd86a170a9612a65c81e082394cc08f0 | [
"BSL-1.0"
] | null | null | null | Modules/module5/opdracht2.py | Pink-Shadow/VISN | 4a484610cd86a170a9612a65c81e082394cc08f0 | [
"BSL-1.0"
] | null | null | null | Modules/module5/opdracht2.py | Pink-Shadow/VISN | 4a484610cd86a170a9612a65c81e082394cc08f0 | [
"BSL-1.0"
] | null | null | null | import matplotlib.pyplot as plt
from sklearn import datasets
from sklearn.utils import shuffle
from sklearn import svm
digits = datasets.load_digits()
clf = svm.SVC(gamma=0.001, C=100)
len_data = len(digits.data)
print(len_data)
data, target, images = shuffle(digits.data, digits.target, digits.images)
train_data = data[:len_data//3]
train_target = target[:len_data//3]
test_data = data[len_data//3:]
test_target = target[len_data//3:]
print(test_data[100])
clf.fit(train_data, train_target)
correct = 0
for i, data in enumerate(test_data):
res = clf.predict([data])
if res[0] == test_target[i]:
correct += 1
print("Accuracy =", round((correct/len(test_data))*100, 2) ) | 23.166667 | 73 | 0.722302 | 112 | 695 | 4.330357 | 0.366071 | 0.086598 | 0.065979 | 0.061856 | 0.148454 | 0 | 0 | 0 | 0 | 0 | 0 | 0.035058 | 0.13813 | 695 | 30 | 74 | 23.166667 | 0.774624 | 0 | 0 | 0 | 0 | 0 | 0.014368 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.190476 | 0 | 0.190476 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e7bf25ea1b4c89813be59856597f18ca1020ebd | 3,174 | py | Python | clustergrammer/upload_pages/load_tsv_endpoint.py | shalevy1/clustergrammer-web | e83bd73d3c44587da1095360f57b02e6984744ef | [
"MIT"
] | null | null | null | clustergrammer/upload_pages/load_tsv_endpoint.py | shalevy1/clustergrammer-web | e83bd73d3c44587da1095360f57b02e6984744ef | [
"MIT"
] | 1 | 2021-03-20T05:34:24.000Z | 2021-03-20T05:34:24.000Z | clustergrammer/upload_pages/load_tsv_endpoint.py | shalevy1/clustergrammer-web | e83bd73d3c44587da1095360f57b02e6984744ef | [
"MIT"
] | null | null | null | def main(mongo_address, response_type='redirect', req_sim_mat=False):
from flask import request
import StringIO
import threading
import time
if request.method == 'POST':
# set values for distance and linkage types
#############################################
if 'distance-type' in request.values:
distance_type = request.values['distance-type']
else:
distance_type = 'cosine'
if 'linkage-type' in request.values:
linkage_type = request.values['linkage-type']
else:
linkage_type = 'average'
req_file = request.files['file']
buff = StringIO.StringIO(req_file.read())
inst_filename = req_file.filename
if allowed_file(inst_filename):
thread, viz_id = start_upload(mongo_address, inst_filename, buff,
req_sim_mat, distance_type, linkage_type)
max_wait_time = 45
for wait_time in range(max_wait_time):
time.sleep(1)
if thread.isAlive() == False:
return make_response(viz_id, inst_filename, response_type=response_type, req_sim_mat=req_sim_mat)
return make_response(viz_id, inst_filename, response_type=response_type, req_sim_mat=req_sim_mat)
else:
return upload_error(inst_filename)
def allowed_file(filename):
ALLOWED_EXTENSIONS = set(['txt', 'tsv'])
return '.' in filename and filename.rsplit('.', 1)[1] in ALLOWED_EXTENSIONS
def start_upload(mongo_address, inst_filename, buff, req_sim_mat=False,
distance_type='cosine', linkage_type='average'):
from pymongo import MongoClient
import threading
import load_tsv_file
client = MongoClient(mongo_address)
db = client.clustergrammer
export_viz = {}
export_viz['name'] = inst_filename
export_viz['viz'] = 'processing'
export_viz['dat'] = 'processing'
export_viz['source'] = 'user_upload'
# get the id that will be used to update the placeholder
viz_id = db.networks.insert( export_viz )
viz_id = str(viz_id)
client.close()
sub_function = load_tsv_file.main
arg_list = [ buff, export_viz['name'], mongo_address, viz_id, req_sim_mat,
distance_type, linkage_type]
thread = threading.Thread(target=sub_function, args=arg_list)
thread.setDaemon(True)
thread.start()
return thread, viz_id
def upload_error(inst_filename):
if len(inst_filename) > 0:
error_desc = 'Your file, ' + inst_filename + ', is not a supported filetype.'
else:
error_desc = 'Please choose a file to upload.'
return error_desc
def make_response(viz_id, inst_filename, response_type='redirect', req_sim_mat=False):
from flask import redirect
if response_type == 'redirect':
if req_sim_mat:
inst_redirect = redirect('/clustergrammer/viz_sim_mats/'+viz_id+'/'+inst_filename)
else:
inst_redirect = redirect('/clustergrammer/viz/'+viz_id+'/'+inst_filename)
return inst_redirect
elif response_type == 'link':
if req_sim_mat:
inst_link = 'http://amp.pharm.mssm.edu/clustergrammer/viz_sim_mats/'+viz_id+'/'+inst_filename
else:
inst_link = 'http://amp.pharm.mssm.edu/clustergrammer/viz/'+viz_id+'/'+inst_filename
return inst_link
| 29.663551 | 107 | 0.692187 | 425 | 3,174 | 4.882353 | 0.258824 | 0.09253 | 0.047711 | 0.057349 | 0.34988 | 0.321928 | 0.321928 | 0.295422 | 0.247229 | 0.21494 | 0 | 0.002333 | 0.189666 | 3,174 | 106 | 108 | 29.943396 | 0.804432 | 0.030246 | 0 | 0.164384 | 0 | 0 | 0.130363 | 0.009571 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068493 | false | 0 | 0.109589 | 0 | 0.287671 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e7ea5bc12a522dcc4b5eeae11c5c31bad533713 | 2,046 | py | Python | 178/commits.py | jsh/pybites | 73c79ed962c15247cead173b17f69f248ea51b96 | [
"MIT"
] | null | null | null | 178/commits.py | jsh/pybites | 73c79ed962c15247cead173b17f69f248ea51b96 | [
"MIT"
] | null | null | null | 178/commits.py | jsh/pybites | 73c79ed962c15247cead173b17f69f248ea51b96 | [
"MIT"
] | null | null | null | """Bite 178. Parse PyBites blog git commit log."""
import os
import re
from collections import Counter, defaultdict
from typing import Tuple
from urllib.request import urlretrieve
from dateutil.parser import parse
commits = os.path.join(os.getenv("TMP", "/tmp"), "commits")
urlretrieve("https://bites-data.s3.us-east-2.amazonaws.com/git_log_stat.out", commits)
# you can use this constant as key to the yyyymm:count dict
YEAR_MONTH = "{y}-{m:02d}"
def tot_changes(changes: str) -> int:
"""Add deletions and insertions."""
insertions_pat = re.compile(r"(\d+) insertion")
deletions_pat = re.compile(r"(\d+) deletion")
insertions = insertions_pat.search(changes)
insertions = int(insertions.group(1)) if insertions else 0
deletions = deletions_pat.search(changes)
deletions = int(deletions.group(1)) if deletions else 0
return insertions + deletions
def get_min_max_amount_of_commits(
commit_log: str = commits, year: int = None
) -> Tuple[str, str]:
"""Get the months with fewest and most commits.
Calculate the amount of inserts / deletes per month from the
provided commit log.
Takes optional year arg, if provided only look at lines for
that year, if not, use the entire file.
Returns a tuple of (least_active_month, most_active_month)
"""
log_pat = re.compile(r"\S+:\s+(.*)\s+\|\s+.*changed, (.*)$")
# 31 insertions(+), 2 deletions(-)
nchanges_per_month = defaultdict(int)
with open(commit_log, encoding="utf-8") as f_in:
for line in f_in:
match = log_pat.match(line)
date, changes = match.group(1, 2)
ym_date = parse(date).strftime("%Y-%m")
if year and ym_date[:4] != str(year):
continue
nchanges = tot_changes(changes)
nchanges_per_month[ym_date] += nchanges
changes_per_month = Counter(nchanges_per_month)
least = changes_per_month.most_common()[-1]
most = changes_per_month.most_common(1)[0]
return (least[0], most[0])
| 34.1 | 86 | 0.668133 | 292 | 2,046 | 4.541096 | 0.431507 | 0.042232 | 0.027149 | 0.029412 | 0.060332 | 0.039216 | 0 | 0 | 0 | 0 | 0 | 0.01425 | 0.211144 | 2,046 | 59 | 87 | 34.677966 | 0.807311 | 0.222385 | 0 | 0 | 0 | 0.028571 | 0.103938 | 0.018722 | 0 | 0 | 0 | 0 | 0 | 1 | 0.057143 | false | 0 | 0.171429 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e7f6394c00df8143b05bae6d4a8fa728e4549b8 | 937 | py | Python | hypergan/generators/fully_connected_generator.py | SlipknotTN/HyperGAN | bd39759521d52a706f6f0f561e0c8355a3ef427e | [
"MIT"
] | null | null | null | hypergan/generators/fully_connected_generator.py | SlipknotTN/HyperGAN | bd39759521d52a706f6f0f561e0c8355a3ef427e | [
"MIT"
] | null | null | null | hypergan/generators/fully_connected_generator.py | SlipknotTN/HyperGAN | bd39759521d52a706f6f0f561e0c8355a3ef427e | [
"MIT"
] | null | null | null | import tensorflow as tf
import numpy as np
import hyperchamber as hc
from hypergan.generators.common import *
from .base_generator import BaseGenerator
class FullyConnectedGenerator(BaseGenerator):
def required(self):
return []
def build(self, net):
gan = self.gan
ops = self.ops
config = self.config
activation = ops.lookup(config.activation or 'lrelu')
print("[dcgan] NET IS", net)
net = ops.linear(net, 1024)
shape = ops.shape(net)
x_shape = ops.shape(self.gan.inputs.x)
output_size = x_shape[1]*x_shape[2]*x_shape[3]
print("Output size", output_size)
net = activation(net)
net = ops.linear(net, 2*1024)
net = activation(net)
net = ops.linear(net, output_size)
net = ops.lookup('tanh')(net)
net = ops.reshape(net, output_size)
self.sample = net
return self.sample
| 24.657895 | 61 | 0.61793 | 122 | 937 | 4.672131 | 0.377049 | 0.052632 | 0.063158 | 0.078947 | 0.140351 | 0.108772 | 0.108772 | 0 | 0 | 0 | 0 | 0.017699 | 0.276414 | 937 | 37 | 62 | 25.324324 | 0.823009 | 0 | 0 | 0.074074 | 0 | 0 | 0.036286 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.185185 | 0.037037 | 0.37037 | 0.074074 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e8516b3816a3b7ca318a7db0d77b697c1e2a867 | 8,677 | py | Python | pymutual/session.py | kimballh/pymutual | 7d7f588099eee7bdd669d613756509c6ab44a911 | [
"MIT"
] | null | null | null | pymutual/session.py | kimballh/pymutual | 7d7f588099eee7bdd669d613756509c6ab44a911 | [
"MIT"
] | null | null | null | pymutual/session.py | kimballh/pymutual | 7d7f588099eee7bdd669d613756509c6ab44a911 | [
"MIT"
] | null | null | null | import requests
import json
import robobrowser
import re
from pymutual.constants import *
from pymutual.models import Profile
from pymutual.errors import *
from werkzeug.exceptions import BadRequestKeyError
class Session:
def __init__(self, email: str=None, password: str=None, fb_token: str=None, mutual_id: int=None):
if fb_token:
self._fb_token = fb_token
elif email and password:
self.get_fb_token(email, password)
if self._fb_token == -1:
raise InitializationError('Invalid Facebook login information')
else:
raise InitializationError('Facebook user email and password must be provided or auth token')
if not self._fb_token or self._fb_token == '':
raise InitializationError('User could not be authenticated')
self._fb_id = self.get_fb_id(self._fb_token)
self._api = MutualAPI(self._fb_token, int(self._fb_id), mutual_id)
@property
def token(self):
return self._fb_token
@property
def mutual_id(self):
return self._api.id
def get_fb_token(self, email, password):
s = robobrowser.RoboBrowser(user_agent=MOBILE_USER_AGENT, parser="lxml")
s.open(MUTUAL_AUTH)
f = s.get_form()
f["pass"] = password
f["email"] = email
s.submit_form(f)
f = s.get_form()
try:
s.submit_form(f, submit=f.submit_fields['__CONFIRM__'])
##get access token from the html response##
access_token = re.search(r"access_token=([\w\d]+)", s.response.content.decode()).groups()[0]
self._fb_token = access_token
except BadRequestKeyError:
self._fb_token = -1
def get_fb_id(self, fb_token) -> str:
r = requests.get('https://graph.facebook.com/v2.11/me?access_token={}'.format(fb_token))
if r.status_code != 200:
raise InitializationError('User could not be authenticated')
if not r.json() or type(r.json()) is not dict or 'id' not in r.json().keys():
raise InitializationError('User could not be authenticated')
return r.json()['id']
def potential_matches(self, limit=10, import_json: str=None, return_dict: bool=False):
if import_json:
data = json.loads(import_json)
else:
try:
data = self._api.get_potential_matches(limit)
except RequestError as e:
print(e)
return {}
# get profile data from each dict and create Profile objects to return as array
if return_dict:
profiles = {}
else:
profiles = []
for person in data:
name = person['first_name']
age = person['age']
gender = person['gender']
mutual_id = person['id']
distance_mi = person['distance']
location = person['location']
position = person['position']
school = person['school']
mission = person['mission_location']
hometown = person['hometown']
fb_id = person['fb_id']
dating_interest = person['dating_interest']
liked_user = person['has_liked']
height = "{}' {}\"".format(person['height_ft'], person['height_in'])
photo_urls = []
for photo in person['photos']:
photo_urls.append(photo['url_hd'])
# get bio
try:
user_prompt_data = self._api.get_user_prompt(mutual_id)
except RequestError as e:
print(e)
user_prompt_data = None
if user_prompt_data is not None and len(user_prompt_data) > 0:
item = user_prompt_data[0]
prompt_id = item['prompt_id']
bio = item['text']
else:
prompt_id = 0
bio = ''
# get tags
try:
tag_data = self._api.get_user_tags(mutual_id)
except RequestError as e:
tag_data = []
print(e)
tags = []
for item in tag_data:
tags.append(item['text'])
# create Profile object
profile = Profile(name, age, gender, mutual_id, distance_mi, location, photo_urls, position, school, height,
mission, hometown, fb_id, dating_interest, liked_user, prompt_id, bio, tags)
if return_dict:
profiles[mutual_id] = profile.serialize()
else:
profiles.append(profile)
return profiles
def get_auto_matches(self, search_limit: int=10, import_json: str=None):
profiles = self.potential_matches(limit=search_limit, import_json=import_json)
auto_matches = []
for profile in profiles:
if profile.liked_user:
auto_matches.append(profile)
return auto_matches
def like_user(self, match_id):
try:
response = self._api.swipe_user(self.mutual_id, match_id, True)
return response
except RequestError as e:
return {'error': str(e)}
def dislike_user(self, match_id):
try:
response = self._api.swipe_user(self.mutual_id, match_id, False)
return response
except RequestError as e:
return {'error': str(e)}
def match(self, match_id):
try:
response = self._api.match_users(match_id)
return response
except RequestError as e:
return {'error': str(e)}
class MutualAPI:
def __init__(self, access_token: str, fb_id: int, mutual_id: int=None):
self._access_token = access_token
self._url = MUTUAL_URL
if mutual_id:
self._id = mutual_id
else:
self._id = self.get_id(fb_id)
@property
def id(self):
return self._id
def get_id(self, fb_id: int):
url = '{}/user/fb_id/{}'.format(self._url, fb_id)
try:
response = self.get(url)['id']
return response
except RequestError as e:
raise InitializationError(e)
def get(self, url, add_params=None):
params = {
'access_token': self._access_token
}
if add_params:
for key in add_params.keys():
params[key] = add_params[key]
response = requests.get(url, params=params)
if response.status_code != 200:
raise RequestError('URL: {} returned code {}'.format(url, response.status_code))
return response.json()
def post(self, url, body, add_params=None):
params = {
'access_token': self._access_token
}
if add_params:
for key in add_params.keys():
params[key] = add_params[key]
response = requests.post(url, json=body, params=params)
if response.status_code != 200:
raise RequestError('URL: {} returned code {}{}'.format(url, response.status_code, response.content))
return response.json()
def get_potential_matches(self, limit: int=10):
params = {
'count': limit
}
url = '{}/user/potential-matches/{}'.format(self._url, self._id)
response = self.get(url, add_params=params)
return response
def swipe_user(self, user_id, match_id, liked):
body = {
'user_id': str(user_id),
'liked': liked,
'match_user_id': str(match_id),
}
url = '{}/connection/'.format(self._url)
response = self.post(url, body)
return response
def match_users(self, match_id):
url = '{}/match/users/{}/{}'.format(self._url, self._id, match_id)
return self.get(url)
def get_user_prompt(self, user_id):
url = '{}/user-prompt/list/{}'.format(self._url, user_id)
return self.get(url)
def get_user_tags(self, user_id):
url = '{}/tagline/list/{}'.format(self._url, user_id)
return self.get(url)
def get_mutual_friends(self, fb_id):
url = '{}/fb-mutual-friends/{}'.format(self._url, fb_id)
return self.get(url)
if __name__ == '__main__':
# session = Session(email='kimball.hill', password='id1080287!')
session = Session(fb_token='EAAOxRcEGpJMBACfBF6q8nlsl7noosIC0e29AanOWcvT8JjZCRQqZCn0Gw2ZABsdYD2HQhjRedouZBb0SzNclCQdGaPy2tOM0IMYSV4LIpUdzBQDFEX1qW8tS7jOPcAOlmZAhkVwKM0eMiWMXBSswGFFlI8IfsHc6qQTpyVrsZAnN44ohbtqGoUT1qopNqhGOG19FC6nFpZAiQZDZD', mutual_id=302709)
print(session.like_user(175424))
| 35.707819 | 262 | 0.586839 | 1,019 | 8,677 | 4.767419 | 0.168793 | 0.024496 | 0.022643 | 0.030259 | 0.272952 | 0.239605 | 0.213462 | 0.16797 | 0.161383 | 0.161383 | 0 | 0.01149 | 0.307941 | 8,677 | 242 | 263 | 35.855372 | 0.797502 | 0.025239 | 0 | 0.305419 | 0 | 0 | 0.109967 | 0.036103 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0.029557 | 0.064039 | 0.014778 | 0.285714 | 0.019704 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e862154a98be3e132725433905b81299677127b | 7,434 | py | Python | code/default/python27/1.0/lib/noarch/hyper/cli.py | wuyongwen/XX-Net | 313aefd862b8f230f7c61dc29db1b2b93a17e6ab | [
"BSD-2-Clause"
] | 2 | 2017-04-24T03:04:45.000Z | 2017-09-19T03:38:37.000Z | proxy/proxy/code/default/python27/1.0/lib/noarch/hyper/cli.py | viger/docker | a49547ba84606fc03530d80fe0c01631a01f10c5 | [
"MIT"
] | null | null | null | proxy/proxy/code/default/python27/1.0/lib/noarch/hyper/cli.py | viger/docker | a49547ba84606fc03530d80fe0c01631a01f10c5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
hyper/cli
~~~~~~~~~
Command line interface for Hyper inspired by Httpie.
"""
import json
import locale
import logging
import sys
from argparse import ArgumentParser, RawTextHelpFormatter
from argparse import OPTIONAL, ZERO_OR_MORE
from pprint import pformat
from textwrap import dedent
from hyper import HTTPConnection, HTTP20Connection
from hyper import __version__
from hyper.compat import is_py2, urlencode, urlsplit, write_to_stdout
from hyper.common.util import to_host_port_tuple
log = logging.getLogger('hyper')
PREFERRED_ENCODING = locale.getpreferredencoding()
# Various separators used in args
SEP_HEADERS = ':'
SEP_QUERY = '=='
SEP_DATA = '='
SEP_GROUP_ITEMS = [
SEP_HEADERS,
SEP_QUERY,
SEP_DATA,
]
class KeyValue(object):
"""Base key-value pair parsed from CLI."""
def __init__(self, key, value, sep, orig):
self.key = key
self.value = value
self.sep = sep
self.orig = orig
class KeyValueArgType(object):
"""A key-value pair argument type used with `argparse`.
Parses a key-value arg and constructs a `KeyValue` instance.
Used for headers, form data, and other key-value pair types.
This class is inspired by httpie and implements simple tokenizer only.
"""
def __init__(self, *separators):
self.separators = separators
def __call__(self, string):
for sep in self.separators:
splitted = string.split(sep, 1)
if len(splitted) == 2:
key, value = splitted
return KeyValue(key, value, sep, string)
def make_positional_argument(parser):
parser.add_argument(
'method', metavar='METHOD', nargs=OPTIONAL, default='GET',
help=dedent("""
The HTTP method to be used for the request
(GET, POST, PUT, DELETE, ...).
"""))
parser.add_argument(
'_url', metavar='URL',
help=dedent("""
The scheme defaults to 'https://' if the URL does not include one.
"""))
parser.add_argument(
'items',
metavar='REQUEST_ITEM',
nargs=ZERO_OR_MORE,
type=KeyValueArgType(*SEP_GROUP_ITEMS),
help=dedent("""
Optional key-value pairs to be included in the request.
The separator used determines the type:
':' HTTP headers:
Referer:http://httpie.org Cookie:foo=bar User-Agent:bacon/1.0
'==' URL parameters to be appended to the request URI:
search==hyper
'=' Data fields to be serialized into a JSON object:
name=Hyper language=Python description='CLI HTTP client'
"""))
def make_troubleshooting_argument(parser):
parser.add_argument(
'--version', action='version', version=__version__,
help='Show version and exit.')
parser.add_argument(
'--debug', action='store_true', default=False,
help='Show debugging information (loglevel=DEBUG)')
parser.add_argument(
'--h2', action='store_true', default=False,
help="Do HTTP/2 directly in plaintext: skip plaintext upgrade")
def set_url_info(args):
def split_host_and_port(hostname):
if ':' in hostname:
return to_host_port_tuple(hostname, default_port=443)
return hostname, None
class UrlInfo(object):
def __init__(self):
self.fragment = None
self.host = 'localhost'
self.netloc = None
self.path = '/'
self.port = 443
self.query = None
self.scheme = 'https'
self.secure = False
info = UrlInfo()
_result = urlsplit(args._url)
for attr in vars(info).keys():
value = getattr(_result, attr, None)
if value:
setattr(info, attr, value)
if info.scheme == 'http' and not _result.port:
info.port = 80
# Set the secure arg is the scheme is HTTPS, otherwise do unsecured.
info.secure = info.scheme == 'https'
if info.netloc:
hostname, _ = split_host_and_port(info.netloc)
info.host = hostname # ensure stripping port number
else:
if _result.path:
_path = _result.path.split('/', 1)
hostname, port = split_host_and_port(_path[0])
info.host = hostname
if info.path == _path[0]:
info.path = '/'
elif len(_path) == 2 and _path[1]:
info.path = '/' + _path[1]
if port is not None:
info.port = port
log.debug('Url Info: %s', vars(info))
args.url = info
def set_request_data(args):
body, headers, params = {}, {}, {}
for i in args.items:
if i.sep == SEP_HEADERS:
if i.key:
headers[i.key] = i.value
else:
# when overriding a HTTP/2 special header there will be a leading
# colon, which tricks the command line parser into thinking
# the header is empty
k, v = i.value.split(':', 1)
headers[':' + k] = v
elif i.sep == SEP_QUERY:
params[i.key] = i.value
elif i.sep == SEP_DATA:
value = i.value
if is_py2: # pragma: no cover
value = value.decode(PREFERRED_ENCODING)
body[i.key] = value
if params:
args.url.path += '?' + urlencode(params)
if body:
content_type = 'application/json'
headers.setdefault('content-type', content_type)
args.body = json.dumps(body)
if args.method is None:
args.method = 'POST' if args.body else 'GET'
args.headers = headers
def parse_argument(argv=None):
parser = ArgumentParser(formatter_class=RawTextHelpFormatter)
parser.set_defaults(body=None, headers={})
make_positional_argument(parser)
make_troubleshooting_argument(parser)
args = parser.parse_args(sys.argv[1:] if argv is None else argv)
if args.debug:
handler = logging.StreamHandler()
handler.setLevel(logging.DEBUG)
log.addHandler(handler)
log.setLevel(logging.DEBUG)
set_url_info(args)
set_request_data(args)
return args
def get_content_type_and_charset(response):
charset = 'utf-8'
content_type = response.headers.get('content-type')
if content_type is None:
return 'unknown', charset
content_type = content_type[0].decode('utf-8').lower()
type_and_charset = content_type.split(';', 1)
ctype = type_and_charset[0].strip()
if len(type_and_charset) == 2:
charset = type_and_charset[1].strip().split('=')[1]
return ctype, charset
def request(args):
if not args.h2:
conn = HTTPConnection(
args.url.host, args.url.port, secure=args.url.secure
)
else: # pragma: no cover
conn = HTTP20Connection(
args.url.host, args.url.port, secure=args.url.secure
)
conn.request(args.method, args.url.path, args.body, args.headers)
response = conn.get_response()
log.debug('Response Headers:\n%s', pformat(response.headers))
ctype, charset = get_content_type_and_charset(response)
data = response.read()
return data
def main(argv=None):
args = parse_argument(argv)
log.debug('Commandline Argument: %s', args)
data = request(args)
write_to_stdout(data)
if __name__ == '__main__': # pragma: no cover
main()
| 28.92607 | 82 | 0.615685 | 924 | 7,434 | 4.800866 | 0.256494 | 0.027277 | 0.022994 | 0.010821 | 0.072137 | 0.058161 | 0.018485 | 0.018485 | 0.018485 | 0.018485 | 0 | 0.007242 | 0.275626 | 7,434 | 256 | 83 | 29.039063 | 0.816527 | 0.094566 | 0 | 0.102151 | 0 | 0.005376 | 0.151039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.069892 | false | 0 | 0.064516 | 0 | 0.188172 | 0.005376 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e867157284b0c3de06072b5362f6aab9f5ed1c5 | 1,730 | py | Python | src/bar_charts_growprune.py | ikekt/topicalgibbs | 2f58fde2563300cd92675f59e95a04a06a94eb87 | [
"MIT"
] | null | null | null | src/bar_charts_growprune.py | ikekt/topicalgibbs | 2f58fde2563300cd92675f59e95a04a06a94eb87 | [
"MIT"
] | null | null | null | src/bar_charts_growprune.py | ikekt/topicalgibbs | 2f58fde2563300cd92675f59e95a04a06a94eb87 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
##Log Probability MSE and MSE Train and test using particle gibbs
plt.figure(1)
plt.subplot(211)
N = 4
train_set = (32.116,7.6 ,2.4,12.543)
test_set = (33.415,8.0 , 2.3 ,11.858)
trainStd = (1, 1,1,1)
testStd = (1, 1,1,1)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1 = plt.bar(ind, train_set, width,color='0.5',yerr=trainStd)
p2 = plt.bar(ind, test_set, width,color='g',bottom=train_set,yerr=testStd)
plt.ylabel('Mean Squared Error')
plt.title('Lowest training and test set MSE(growprune)')
plt.xticks(ind, ( 'CTSlices','Ryn','Houses','MSD'))
plt.yticks([5,10,15,20,25,30,35,40,45,50,55,60,65])
plt.legend((p1[0], p2[0]), ('Training set', 'Test set'))
plt.show()
plt.subplot(212)
train_set_2 = (4.3,1.28 ,0.71,3.83)
test_set_2 = (4.32,1.31 ,0.7,3.81)
ind = np.arange(N) # the x locations for the groups
width = 0.35 # the width of the bars: can also be len(x) sequence
p1_new = plt.bar(ind,train_set_2,width,color='0.5',yerr=trainStd)
p2_new = plt.bar(ind, test_set_2, width,color='g',bottom=train_set_2,yerr=testStd)
plt.ylabel('-Log Likelihood MSE')
plt.title('Lowest training and test set Loglikelihood MSE(growprune)')
plt.xticks(ind, ( 'CTSlices','Ryn','Houses','MSD'))
plt.yticks([0.0,2.0,4.0,6.0,8.0,10.0])
plt.legend((p1_new[0], p2_new[0]), ('Training set', 'Test set'))
plt.show()
#CT-SLICES
#train-mse,log-prob-train,test-mse,log-prob-test
# [ 321.16 , -4.3 , 334.15 , -4.32] #growprune
#Ryn
#train-mse,log-prob-train,test-mse,log-prob-test
#[ 0.76 ,-1.28 ,0.8 ,-1.31] $growprune
#Houses
#[ 0.24 ,-0.71 , 0.23 ,-0.7 ]
#MSD
#[ 125.43 , -3.83 ,118.58 ,-3.81] | 32.037037 | 82 | 0.66474 | 345 | 1,730 | 3.278261 | 0.324638 | 0.049514 | 0.01061 | 0.007073 | 0.562334 | 0.503979 | 0.45977 | 0.311229 | 0.311229 | 0.311229 | 0 | 0.122667 | 0.132948 | 1,730 | 54 | 83 | 32.037037 | 0.631333 | 0.284393 | 0 | 0.25 | 0 | 0 | 0.184124 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e86ae4e1826884a31bb87bc1989c57509fd9ba4 | 10,045 | py | Python | LayerToKML_attachments/Layer_to_KML_attachment.py | saashimi/sample-gp-tools | f0bb1fe0ae359216a633739388251b3be9949ca5 | [
"Apache-2.0"
] | 43 | 2016-01-21T19:02:09.000Z | 2021-12-15T15:05:05.000Z | LayerToKML_attachments/Layer_to_KML_attachment.py | saashimi/sample-gp-tools | f0bb1fe0ae359216a633739388251b3be9949ca5 | [
"Apache-2.0"
] | 13 | 2015-12-11T21:01:34.000Z | 2020-06-21T01:27:38.000Z | LayerToKML_attachments/Layer_to_KML_attachment.py | saashimi/sample-gp-tools | f0bb1fe0ae359216a633739388251b3be9949ca5 | [
"Apache-2.0"
] | 24 | 2016-04-11T23:35:15.000Z | 2022-02-15T02:27:10.000Z | # Layer to KML - With Attachments (Layer_to_KML_attachment.py)
# Kevin Hibma, Esri
# As found on ArcGIS.com: http://www.arcgis.com/home/item.html?id=5d8704c938ea4715b59eebabcd96c1d9
# Last updated: November 27, 2015
# Version: ArcGIS 10.1+ or ArcGIS Pro 1.0+
#
# Required Arguments:
# Input layer (features layer): path to layer
# Output KML (file): output path to KMZ file to be created
# Optional Arguments:
# Output scale (long): scale to create output KMZ file
# Clamped to ground (boolean): Clamp features to the ground (override their elevation)
# Allow Unique ID Field (boolean): allow a temporary ID field to be added to the input data
# Height (long): set the height to display image attachments in the KML popup
# Width (long): set the width to display image attachments in the KML popup
# ==================================================================================================
import arcpy
import os
import sys
import zipfile
import shutil
from distutils.version import StrictVersion
try:
from xml.etree import cElementTree as ElementTree
except:
from xml.etree import ElementTree
# These "supported" items determine what HTML to put into the HTML popup.
# If this list is enhanced, the IFSTATEMENT writing HTML needs to be updated.
fileTypes = {'IMG' : ['.jpg', '.png', '.gif'],
'PDF' : ['.pdf']
}
def checks(inputFeatures):
""" Pre checks to make sure we can run """
def hasAttachments(inputFeatures):
d = arcpy.Describe(inputFeatures)
rc_names = d.relationshipClassNames
if len(rc_names) > 0:
for rc_name in rc_names:
# relationship class is always beside the input features
rc = os.path.join(d.path, rc_name)
rcDesc = arcpy.Describe(rc)
if rcDesc.isAttachmentRelationship:
attachTables = rcDesc.destinationClassNames
if len(attachTables) > 0:
for att_tableName in attachTables:
if arcpy.Exists(os.path.join(d.path, att_tableName)):
# assume the attachment table resides beside the input feature
return os.path.join(d.path, att_tableName)
else:
# if the attachment table is not found, walk through the workspace looking for it
for dirpath, dirnames, filenames in arcpy.da.Walk(ws, datatype="Table"):
for f in filenames:
if f == att_tableName:
if arcpy.Exists(os.path.join(dirpath, att_tableName)):
return os.path.join(dirpath, att_tableName)
return None
## find the attachment table
attachTable = hasAttachments(inputFeatures)
## check for sequential OIDs
seq = True
if max([row[0] for row in arcpy.da.SearchCursor(inputFeatures,["OID@"])]) != \
int(arcpy.GetCount_management(inputFeatures).getOutput(0)):
seq = False
return attachTable, seq
def attachments(KMLfiles, KMLdir, attachTable, seq=True, uniqueID=False, height=None, width=None):
""" Take attachments, extract to disk, update the KML and put them into the KMZ """
docKML = os.path.join(KMLdir, "doc.kml")
ElementTree.register_namespace('', "http://www.opengis.net/kml/2.2")
tree = ElementTree.parse(docKML)
KML_NS = ".//{http://www.opengis.net/kml/2.2}"
for node in tree.findall(KML_NS + 'Placemark'):
idTxt = node.attrib['id']
idVal = int(idTxt.replace('ID_', '')) + 1 # add 1 because its 0 indexed.
for node in node.findall(KML_NS + 'description') :
html = node.text
# Special handling for the addition of the tempID field
if not seq and uniqueID:
gidTD = html.find("tempIDField")
gidStart = html.find("<td>", gidTD)
GID = html[gidStart+4 : gidStart+20]
# Remove the GUID field from the HTML.
html = html[:gidTD-4] + html[gidStart+25:]
# Take guid and match it to find the OID to use in the attachment table
expression = "tempIDField = '{0}'".format(GID)
with arcpy.da.SearchCursor(inputFeatures, ['OID@','tempIDField'], expression) as cursor:
for row in cursor:
tableMatchOID = row[0]
# Extract the images and add HTML into the KML
try:
string2Inject = ''
if not seq and uniqueID: # Use the field that was inserted
exp = "REL_OBJECTID = {0}".format(tableMatchOID)
else: # Otherwise, use the ID value from KML to match
exp = "REL_OBJECTID = {0}".format(idVal)
with arcpy.da.SearchCursor(attachTable,['DATA', 'ATT_NAME', 'REL_OBJECTID'], exp) as cursor:
for row in cursor:
binaryRep = row[0]
fileName = row[1]
# save to disk
open(os.path.join(KMLfiles, fileName), 'wb').write(binaryRep.tobytes())
fname, ext = os.path.splitext(fileName)
os.rename(os.path.join(KMLfiles, fileName), os.path.join(KMLfiles, fileName.lower()))
fileName = fileName.lower()
filetype = "unknown"
for k, v in fileTypes.items():
if ext.lower() in v:
filetype = k
# Add new items here if the 'fileTypes' dictionary has been updated.
if filetype == 'IMG':
if height or width:
string2Inject += " <br> <img src=\"files\{0}\" height={1} width={2}> ".format( fileName, height, width )
else:
string2Inject += " <br> <img src=\"files\{0}\"> ".format( fileName )
elif filetype == 'PDF':
string2Inject += " <br> <a href =\"files\{0}\">PDF: {1} </a> ".format(fileName, fileName)
else: # unknown
arcpy.AddWarning("Unknown or unsupported file type for OBJECTID: {}.".format(row[2]))
arcpy.AddWarning("{} will not be accessible in the popup.".format(fileName))
string2Inject += '</td>'
newHTML = html.replace("</td>", string2Inject, 1)
node.text = newHTML
except:
arcpy.AddWarning("No attachment match for ID: {}".format(idVal))
tree.write(docKML)
del tree
del docKML
if __name__ == '__main__':
prodInfo = arcpy.GetInstallInfo()
if prodInfo['ProductName'] == "Desktop":
if StrictVersion(prodInfo['Version']) >= StrictVersion('10.5'):
arcpy.AddWarning("The KML to Layer tool was enhanced to automatically include attachments \
at the 10.5 release, effectively making this tool obsolete.")
elif prodInfo['ProductName'] == "ArcGISPro":
if StrictVersion(prodInfo['Version']) >= StrictVersion('1.4'):
arcpy.AddWarning("The KML to Layer tool was enhanced to automatically include attachments \
at the 1.4 release, effectively making this tool obsolete.")
inputFeatures = arcpy.GetParameterAsText(0)
outputKML = arcpy.GetParameterAsText(1)
outputScale = arcpy.GetParameterAsText(2)
clamped = arcpy.GetParameterAsText(3)
uniqueID = arcpy.GetParameterAsText(4)
height = arcpy.GetParameterAsText(5)
width = arcpy.GetParameterAsText(6)
# Check the input and make sure
# 1) the data has sequential OIDs
# 2) an attachment table can be found
attachTable, seq = checks(inputFeatures)
if attachTable is None:
arcpy.AddError("Could not find an attachment table. Ensure the attachment table is properly")
arcpy.AddError("referenced through a relationship class in the same workspace as the input features.")
sys.exit()
if not seq:
arcpy.AddWarning("It appears the OIDs for the input featureclass are NOT sequential.")
arcpy.AddWarning("Attachment logic depends on sequential OIDs.")
arcpy.AddWarning("A temporary ID field needs to be added to your data to attempt to reconcile this.")
# Can only proceed if we're permitted to add a new field to the input data.
if not uniqueID:
arcpy.AddError("You need to check the Allow Unique ID parameter (re-run tool and set to True).")
arcpy.AddError("Note: This will add a field to your data, calc, and eventually remove it.")
arcpy.AddError("To maintain the integrity of your data, make a copy of your data and provide this as input.")
sys.exit()
else: # Add the new ID field to the data
import uuid
arcpy.AddField_management(inputFeatures, "tempIDField", "TEXT")
edit = arcpy.da.Editor(arcpy.Describe(inputFeatures).path)
edit.startEditing(False, False)
with arcpy.da.UpdateCursor(inputFeatures, ["tempIDField"]) as cursor:
for row in cursor:
row[0] = str(uuid.uuid4().hex.upper()[0:16])
cursor.updateRow(row)
edit.stopEditing(True)
arcpy.AddMessage("A temporary field was added to your data and will be removed when tool completes.")
# Create KML file
arcpy.LayerToKML_conversion(inputFeatures, outputKML, outputScale, ignore_zvalue=clamped)
# Make new files directory, copy all images inside
KMLdir = os.path.join(os.path.dirname(outputKML), "kml_extracted")
if not os.path.exists(KMLdir):
os.mkdir(KMLdir)
KMLfiles = os.path.join(KMLdir, "files")
if not os.path.exists(KMLfiles):
os.mkdir(KMLfiles)
# Rename the KML to ZIP and extract it
root, kmlext = os.path.splitext(outputKML)
os.rename(outputKML, root + ".zip")
with zipfile.ZipFile(root + ".zip", "r") as z:
z.extractall(KMLdir)
# Inject images into .kmz and save
docKML = os.path.join(KMLdir, "doc.kml")
# Place the attachments inside the KMZ
attachments(KMLfiles, KMLdir, attachTable, seq, uniqueID, height, width)
if uniqueID:
arcpy.DeleteField_management(inputFeatures, "tempIDField")
# Remove the original KMZ (zip) as it'll be made new again
os.remove(root + ".zip")
# zip everything back up
zipf = zipfile.ZipFile(root + ".zip", 'w')
for rootdir, dirs, files in os.walk(KMLdir):
for f in files:
zipf.write(os.path.join(rootdir, f), os.path.relpath(os.path.join(rootdir, f), KMLdir))
zipf.close()
# Rename ZIP back to KMZ
os.rename(root + ".zip", outputKML)
# Clean up the KML dir
shutil.rmtree(KMLdir)
| 40.01992 | 120 | 0.652763 | 1,312 | 10,045 | 4.969512 | 0.282012 | 0.018405 | 0.021472 | 0.005061 | 0.178681 | 0.105521 | 0.070859 | 0.036503 | 0.024847 | 0.024847 | 0 | 0.011652 | 0.23106 | 10,045 | 250 | 121 | 40.18 | 0.83247 | 0.22897 | 0 | 0.125 | 0 | 0 | 0.168402 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.01875 | false | 0 | 0.05625 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e8ce17ce28014a0d1c70abbf3735f11167f8d2b | 5,903 | py | Python | scripts/basic_level_event_annotation_analysis.py | cltl/BL | 49c4e732ad126f149215644a0ae7683960dedf00 | [
"Apache-2.0"
] | null | null | null | scripts/basic_level_event_annotation_analysis.py | cltl/BL | 49c4e732ad126f149215644a0ae7683960dedf00 | [
"Apache-2.0"
] | 1 | 2020-01-07T09:24:25.000Z | 2020-01-07T09:24:25.000Z | scripts/basic_level_event_annotation_analysis.py | cltl/BL | 49c4e732ad126f149215644a0ae7683960dedf00 | [
"Apache-2.0"
] | 1 | 2019-10-05T02:07:47.000Z | 2019-10-05T02:07:47.000Z | """
Analyze Basic Level Event annotations
Input format is
all_annotations_folder
USERNAME
other
sport
test
Usage:
basic_level_event_annotation_analysis.py --all_annotations_folder=<all_annotations_folder>\
--path_ev_coll_obj=<path_ev_coll_obj>\
--path_to_sample_graph_edges=<path_to_sample_graph_edges>\
--results_folder=<results_folder>\
--users=<users> --batches=<batches>
--verbose=<verbose>
Options:
--all_annotations_folder=<all_annotations_folder> folder of the format see above
--path_ev_coll_obj=<path_ev_coll_obj> path to pickled EventTypeCollection object (see ../wd_classes.py)
--path_to_sample_graph_edges=<path_to_sample_graph_edges> path to nx edge list
--results_folder=<results_folder> the results folder (overwritten if exists)
--users=<users> the users concatenated by ---, probably "Piek---Antske"
--batches=<batches> the batches to consider, probably "other---sport"
--verbose=<verbose> 0 --> no stdout 1 --> general stdout 2 --> detailed stdout
Example:
python basic_level_event_annotation_analysis.py --all_annotations_folder="../ble_annotation"\
--path_ev_coll_obj="../wd_cache/ev_type_coll.p"\
--path_to_sample_graph_edges="../basic_level_inspection/sample.edges"\
--results_folder="../ble_annotation/results" \
--users="Piek---Antske" --batches="other---sport"\
--verbose=2
"""
from docopt import docopt
import os
import shutil
import sys
import pickle
import json
sys.path.append('../')
import networkx as nx
import annotation_utils as utils
ANNOTATION_TASKS = ["participants", "subevents"]
MAX_DELTA_BETWEEN_ANNOTATIONS = 1
TURNING_POINT = 3
# load arguments
arguments = docopt(__doc__)
output_folder = arguments['--results_folder']
main_anno_folder = arguments['--all_annotations_folder']
path_edge_list = arguments['--path_to_sample_graph_edges']
ev_coll_obj = pickle.load(open(arguments['--path_ev_coll_obj'],
'rb'))
users = list(arguments['--users'].split('---'))
batches =list(arguments['--batches'].split('---'))
verbose = int(arguments['--verbose'])
print()
print('PROVIDED ARGUMENTS')
print(arguments)
print()
if os.path.exists(output_folder):
shutil.rmtree(output_folder)
os.mkdir(output_folder)
if verbose >= 1:
print()
print(f'(re)created results folder {output_folder}')
edge_to_user_to_task_to_value = utils.combine_annotations(users=users,
batches=batches,
main_anno_folder=main_anno_folder,
verbose=verbose)
for annotation_task in ANNOTATION_TASKS:
utils.compute_agreement(edge_to_user_to_task_to_value,
annotation_task,
output_folder=output_folder,
verbose=verbose)
kappa = utils.obtain_kappa_score(output_folder, users, annotation_task)
print()
print('Kappa')
print(kappa)
sample_g = utils.load_graph_from_edgelist(path_to_edge_list=path_edge_list,
verbose=verbose)
sample_anno_g = utils.update_sample_graph_with_annotations(sample_graph=sample_g,
edge_to_user_to_task_to_value=edge_to_user_to_task_to_value,
verbose=verbose)
annotation_task_to_ble_info = {}
for annotation_task in ANNOTATION_TASKS:
if verbose >= 2:
print()
print(f'analyzing for task {annotation_task}')
task_ble_info = utils.determine_candidate_basic_levels(g=sample_anno_g,
annotation_task=annotation_task,
users=users,
verbose=verbose)
dot_folder = os.path.join(output_folder, annotation_task)
os.mkdir(dot_folder)
for node, ble_info in task_ble_info.items():
png_path = os.path.join(dot_folder, f'{node}.png')
utils.create_dot_of_ble_candidate(ble_candidate_info=ble_info,
ev_coll_obj=ev_coll_obj,
output_path=png_path,
verbose=verbose)
annotation_task_to_ble_info[annotation_task] = task_ble_info
node_to_shortest_path_to_event = dict()
for node in sample_g.nodes():
shortest_path = nx.shortest_path(G=sample_g,
source='Q1656682',
target=node)
node_to_shortest_path_to_event[node] = len(shortest_path)
for annotation_task in ANNOTATION_TASKS:
if verbose >= 1:
print()
print(f'annotation task: {annotation_task}')
df = utils.ble_analysis(candidate_ble_info=annotation_task_to_ble_info,
node_to_depth=node_to_shortest_path_to_event,
output_folder=output_folder,
verbose=verbose)
utils.analyze_df(df=df,
turning_point=TURNING_POINT,
annotation_task=annotation_task,
verbose=verbose)
for annotation_task in ANNOTATION_TASKS:
piek_json = json.load(open(os.path.join(output_folder,
f'{annotation_task}_Piek.json')))
antske_json = json.load(open(os.path.join(output_folder,
f'{annotation_task}_Antske.json')))
output_path = os.path.join(output_folder, f'heatmap_{annotation_task}.png')
df, ax = utils.create_heatmap(piek_json,
antske_json,
output_path,
verbose=1)
| 35.136905 | 119 | 0.61528 | 676 | 5,903 | 4.992604 | 0.20858 | 0.087111 | 0.024 | 0.023111 | 0.332444 | 0.291259 | 0.212148 | 0.161185 | 0.107259 | 0.074667 | 0 | 0.004077 | 0.29358 | 5,903 | 167 | 120 | 35.347305 | 0.805276 | 0.24208 | 0 | 0.252632 | 0 | 0 | 0.084723 | 0.030465 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.084211 | 0 | 0.084211 | 0.136842 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e8f0c6d6c771214425a7ab98e9069c40fff82ab | 56,489 | py | Python | pytreez/v1.py | shawwn/pytreez | c89fdd707c7707e14a49cf5b8b6c40cf064fe34c | [
"MIT"
] | null | null | null | pytreez/v1.py | shawwn/pytreez | c89fdd707c7707e14a49cf5b8b6c40cf064fe34c | [
"MIT"
] | null | null | null | pytreez/v1.py | shawwn/pytreez | c89fdd707c7707e14a49cf5b8b6c40cf064fe34c | [
"MIT"
] | null | null | null | from __future__ import annotations
from enum import Enum
from dataclasses import dataclass
import typing
import builtins as py
import operator as op
import collections
import functools
from functools import partial
from copy import deepcopy
T = typing.TypeVar("T")
# https://stackoverflow.com/questions/1966591/hasnext-in-python-iterators
class iter(object):
def __init__(self, it):
self.it = py.iter(it)
self._hasnext = None
def __iter__(self):
return self
def next(self):
if hasattr(self, '_thenext'):
value = self._thenext
delattr(self, '_thenext')
self._hasnext = None
return value
else:
try:
return py.next(self.it)
except StopIteration:
self._hasnext = False
else:
self._hasnext = True
def hasnext(self):
if self._hasnext is None:
try:
self._thenext = py.next(self.it)
except StopIteration:
self._hasnext = False
else:
self._hasnext = True
return self._hasnext
def next(it: iter):
return it.next()
def finished_iterating(x: iter):
return not x.hasnext()
def not_equal(a, b) -> py.bool:
return a != b
def str_join(xs: typing.Iterable, sep=', '):
return sep.join([str(x) for x in xs])
def str_cat(*xs: typing.Optional[typing.Iterable, py.str], sep=', ') -> py.str:
r = []
for x in xs:
if isinstance(x, str):
r.append(x)
else:
r.append(str_join(x, sep=sep))
return ''.join(r)
class PyTreeKind(Enum):
kLeaf = "leaf"
kNone = "None"
kTuple = "tuple"
kNamedTuple = "collections.namedtuple"
kList = "list"
kDict = "dict"
kCustom = "custom"
class PyTreeTypeRegistry:
def __init__(self):
self.registrations_ = []
def add_builtin_type(type_obj, kind: PyTreeKind):
registration = self.Registration(kind=kind, type=type_obj)
self.registrations_.append(registration)
add_builtin_type(type(None), PyTreeKind.kNone)
add_builtin_type(tuple, PyTreeKind.kTuple)
add_builtin_type(list, PyTreeKind.kList)
add_builtin_type(dict, PyTreeKind.kDict)
@dataclass
class Registration:
kind: PyTreeKind
# The following values are populated for custom types.
# The Python type object, used to identify the type.
type: typing.Any # pybind11::object type;
# A function with signature: object -> (iterable, aux_data)
to_iterable: typing.Callable = None # pybind11::function to_iterable;
# A function with signature: (aux_data, iterable) -> object
from_iterable: typing.Callable = None # pybind11::function from_iterable;
def __post_init__(self):
if not isinstance(self.kind, PyTreeKind):
for entry in PyTreeKind:
if self.kind == entry.value:
self.kind = entry
break
if not isinstance(self.kind, PyTreeKind):
raise ValueError(f"Expected kind to be PyTreeKind, got {self.kind!r}")
def __eq__(self, other: PyTreeTypeRegistry.Registration):
if self.kind.value != other.kind.value:
return False
if self.type != other.type:
return False
return True
@classmethod
def singleton(cls) -> PyTreeTypeRegistry:
if not hasattr(cls, 'inst'):
cls.inst = cls()
return cls.inst
@classmethod
def register(cls, type: typing.Type, to_iterable: typing.Callable, from_iterable: typing.Callable):
self = cls.singleton()
registration = cls.Registration(PyTreeKind.kCustom, type, to_iterable, from_iterable)
if registration in self.registrations_:
raise ValueError("Duplicate custom PyTreeDef type registration for %s." % repr(type))
self.registrations_.append(registration)
@classmethod
def lookup(cls, type: typing.Type):
self = cls.singleton()
for registration in self.registrations_:
if registration.type == type:
return registration
class PyTreeDef:
"""A PyTreeDef describes the tree structure of a PyTree.
A PyTree is a tree of Python values, where the interior nodes are tuples, lists,
dictionaries, or user-defined containers, and the leaves are other objects."""
def __init__(self):
self.traversal_: typing.List[PyTreeDef.Node] = []
@dataclass
class Node:
kind: PyTreeKind = PyTreeKind.kLeaf # PyTreeKind kind = PyTreeKind::kLeaf;
# Arity for non-kLeaf types.
arity: int = 0 # int arity = 0;
# Kind-specific auxiliary data. For a kNamedTuple, contains the tuple type
# object. For a kDict, contains a sorted list of keys. For a kCustom type,
# contains the auxiliary data returned by the `to_iterable` function.
node_data: typing.Any = None # pybind11::object node_data;
custom: PyTreeTypeRegistry.Registration = None # const PyTreeTypeRegistry::Registration* custom = nullptr;
# Number of leaf nodes in the subtree rooted at this node.
num_leaves: int = 0 # int num_leaves = 0;
# Number of leaf and interior nodes in the subtree rooted at this node.
num_nodes: int = 0 # int num_nodes = 0;
def __eq__(a: PyTreeDef.Node, b: PyTreeDef.Node):
if a.kind.value != b.kind.value:
return False
if a.arity != b.arity:
return False
if (a.node_data is None) != (b.node_data is None):
return False
if a.custom != b.custom:
return False
if a.node_data is not None and not_equal(a.node_data, b.node_data):
return False
return True
def str(node, agenda: typing.List):
assert len(agenda) >= node.arity, "Too few elements for container."
children = str_join(agenda[-node.arity:], ", ")
representation = []
if node.kind == PyTreeKind.kLeaf:
agenda.append("*")
return
elif node.kind == PyTreeKind.kNone:
representation.append("None")
elif node.kind == PyTreeKind.kTuple:
# Tuples with only one element must have a trailing comma.
if node.arity == 1:
children += ","
representation.append(str_cat("(", children, ")"))
elif node.kind == PyTreeKind.kList:
representation.append(str_cat("[", children, "]"))
elif node.kind == PyTreeKind.kDict:
assert len(node.node_data) == node.arity, "Number of keys and entries does not match."
separator = "{"
keys = node.node_data
values = agenda[-node.arity:]
for key, value in zip(keys, values):
representation.append('%s%s: %s' % (separator, repr(key), value))
separator = ", "
representation.append('}')
elif node.kind == PyTreeKind.kNamedTuple or node.kind == PyTreeKind.kCustom:
if node.kind == PyTreeKind.kNamedTuple:
kind = "namedtuple"
else:
kind = str(node.custom.type)
data = '[%s]' % str(node.node_data)
representation.append('CustomNode(%s%s, [%s])' % (kind, data, children))
for i in range(node.arity):
agenda.pop()
agenda.append(''.join(representation))
def __str__(self) -> py.str:
agenda = []
if len(self.traversal_) > 0:
for node in self.traversal_:
node.str(agenda)
assert len(agenda) == 1, "PyTreeDef traversal did not yield a singleton."
return str_cat("PyTreeDef(", agenda, ")")
def __repr__(self) -> py.str:
return str(self)
def __eq__(self, other: PyTreeDef):
if len(self.traversal_) != len(other.traversal_):
return False
for i in range(len(self.traversal_)):
a: PyTreeDef.Node = self.traversal_[i]
b: PyTreeDef.Node = other.traversal_[i]
if a != b:
return False
# We don't need to test equality of num_leaves and num_nodes since they
# are derivable from the other node data.
return True
@classmethod
def get_kind(cls, obj: typing.Any) -> (PyTreeKind, PyTreeTypeRegistry.Registration):
typ = type(obj)
registration = PyTreeTypeRegistry.lookup(typ)
if registration is not None:
return registration.kind, registration
elif isinstance(obj, tuple) and hasattr(obj, '_fields'):
# We can only identify namedtuples heuristically, here by the presence of
# a _fields attribute.
return PyTreeKind.kNamedTuple, registration
else:
return PyTreeKind.kLeaf, registration
def flatten_into(self, handle: typing.Any, leaves: typing.List[typing.Any], leaf_predicate: typing.Callable):
# Node node;
node = PyTreeDef.Node()
# int start_num_nodes = traversal_.size();
start_num_nodes = len(self.traversal_)
# int start_num_leaves = leaves.size();
start_num_leaves = len(leaves)
# if (leaf_predicate && (*leaf_predicate)(handle).cast<bool>()) {
# leaves.push_back(py::reinterpret_borrow<py::object>(handle));
if leaf_predicate is not None and leaf_predicate(handle):
leaves.append(handle)
# } else {
else:
# node.kind = GetKind(handle, &node.custom);
node.kind, node.custom = self.get_kind(handle)
# auto recurse = [this, &leaf_predicate, &leaves](py::handle child) {
# FlattenInto(child, leaves, leaf_predicate);
# };
def recurse(child):
self.flatten_into(child, leaves, leaf_predicate)
# switch (node.kind) {
# case PyTreeKind::kNone:
if node.kind == PyTreeKind.kNone:
# // Nothing to do.
# break;
pass
# case PyTreeKind::kTuple: {
elif node.kind == PyTreeKind.kTuple:
# node.arity = PyTuple_GET_SIZE(handle.ptr());
# node.arity = len(handle)
# for (int i = 0; i < node.arity; ++i) {
# recurse(PyTuple_GET_ITEM(handle.ptr(), i));
# }
# for i in range(node.arity):
# recurse(handle[i])
node.arity = 0
for x in handle:
node.arity += 1
self.flatten_into(x, leaves, leaf_predicate)
# break;
# }
# case PyTreeKind::kList: {
elif node.kind == PyTreeKind.kList:
# node.arity = PyList_GET_SIZE(handle.ptr());
# node.arity = len(handle)
# for (int i = 0; i < node.arity; ++i) {
# recurse(PyList_GET_ITEM(handle.ptr(), i));
# }
# for i in range(node.arity):
# recurse(handle[i])
# [self.flatten_into(x, leaves, leaf_predicate) for x in handle]
node.arity = 0
for x in handle:
node.arity += 1
self.flatten_into(x, leaves, leaf_predicate)
# break;
# }
# case PyTreeKind::kDict: {
elif node.kind == PyTreeKind.kDict:
# py::dict dict = py::reinterpret_borrow<py::dict>(handle);
# dict: typing.Dict = handle
# py::list keys =
# py::reinterpret_steal<py::list>(PyDict_Keys(dict.ptr()));
# if (PyList_Sort(keys.ptr())) {
# throw std::runtime_error("Dictionary key sort failed.");
# }
# keys: typing.List = list(sorted(dict.keys()))
# for (py::handle key : keys) {
# recurse(dict[key]);
# }
# for key in keys:
# recurse(dict[key])
# node.arity = dict.size();
# node.arity = len(dict)
# node.node_data = std::move(keys);
# node.node_data = keys
node.node_data = list(sorted(handle.keys()))
node.arity = 0
for k in node.node_data:
x = handle[k]
node.arity += 1
self.flatten_into(x, leaves, leaf_predicate)
# break;
# }
# case PyTreeKind::kCustom: {
elif node.kind == PyTreeKind.kCustom:
# py::tuple out = py::cast<py::tuple>(node.custom->to_iterable(handle));
out: typing.Tuple = node.custom.to_iterable(handle)
# if (out.size() != 2) {
# throw std::runtime_error(
# "PyTree custom to_iterable function should return a pair");
# }
if len(out) != 2:
raise RuntimeError("PyTree custom to_iterable function should return a pair")
# node.node_data = out[1];
node.node_data = out[1]
# node.arity = 0;
node.arity = 0
# for (py::handle entry : py::cast<py::iterable>(out[0])) {
# ++node.arity;
# recurse(entry);
# }
for entry in out[0]:
node.arity += 1
self.flatten_into(entry, leaves, leaf_predicate)
# recurse(entry)
# break;
# }
# case PyTreeKind::kNamedTuple: {
elif node.kind == PyTreeKind.kNamedTuple:
# py::tuple tuple = py::reinterpret_borrow<py::tuple>(handle);
tuple: typing.NamedTuple = handle
# node.arity = tuple.size();
node.arity = len(tuple)
# node.node_data = py::reinterpret_borrow<py::object>(tuple.get_type());
node.node_data = type(tuple)
# for (py::handle entry : tuple) {
# recurse(entry);
# }
for entry in tuple:
# recurse(entry)
self.flatten_into(entry, leaves, leaf_predicate)
# break;
# }
# default:
else:
# DCHECK(node.kind == PyTreeKind::kLeaf);
assert node.kind == PyTreeKind.kLeaf
# leaves.push_back(py::reinterpret_borrow<py::object>(handle));
leaves.append(handle)
# }
# }
# node.num_nodes = traversal_.size() - start_num_nodes + 1;
node.num_nodes = len(self.traversal_) - start_num_nodes + 1
# node.num_leaves = leaves.size() - start_num_leaves;
node.num_leaves = len(leaves) - start_num_leaves
# traversal_.push_back(std::move(node));
self.traversal_.append(node)
@classmethod
def flatten(cls, x: typing.Any, leaf_predicate: typing.Callable = None) -> (typing.Iterable, PyTreeDef):
"""Flattens a Pytree into a list of leaves and a PyTreeDef.
Returns references to the flattened objects, which might be temporary
objects in the case of custom pytype handlers."""
leaves = []
tree = cls()
tree.flatten_into(x, leaves, leaf_predicate)
return leaves, tree
@classmethod
def all_leaves(cls, x: typing.Any) -> bool:
"Tests whether the given list is a flat list of leaves."
for v in x:
kind, registration = cls.get_kind(v)
if kind != PyTreeKind.kLeaf:
return False
return True
def flatten_up_to(self, xs: typing.Any):
"""Flattens a Pytree up to this PyTreeDef. 'self' must be a tree prefix of
the tree-structure of 'xs'.
For example, if we flatten a value [(1, (2, 3)), {"foo": 4}] with a treedef
[(*, *), *], the result is the list of leaves [1, (2, 3), {"foo": 4}]."""
# py::list leaves(num_leaves());
leaves = resize(None, self.num_leaves)
# std::vector<py::object> agenda;
# agenda.push_back(py::reinterpret_borrow<py::object>(xs));
agenda = [xs]
# auto it = traversal_.rbegin();
it = iter(self.traversal_[::-1])
# int leaf = num_leaves() - 1;
leaf = self.num_leaves - 1
# while (!agenda.empty()) {
while len(agenda) > 0:
# if (it == traversal_.rend()) {
# throw std::invalid_argument(absl::StrFormat(
# "Tree structures did not match: %s vs %s", py::repr(xs), ToString()));
# }
if finished_iterating(it):
raise ValueError("Tree structures did not match: %s vs %s" % (
py.repr(xs), py.str(self)
))
# const Node& node = *it;
node = next(it)
# py::object object = agenda.back();
# agenda.pop_back();
object = agenda.pop()
# ++it;
#
# switch (node.kind) {
# case PyTreeKind::kLeaf:
if node.kind == PyTreeKind.kLeaf:
# if (leaf < 0) {
# throw std::logic_error("Leaf count mismatch.");
# }
assert leaf >= 0, "Leaf count mismatch"
# leaves[leaf] = py::reinterpret_borrow<py::object>(object);
leaves[leaf] = object
# --leaf;
leaf -= 1
# break;
#
# case PyTreeKind::kNone:
# break;
elif node.kind == PyTreeKind.kNone:
pass
#
# case PyTreeKind::kTuple: {
elif node.kind == PyTreeKind.kTuple:
# if (!PyTuple_CheckExact(object.ptr())) {
# throw std::invalid_argument(
# absl::StrFormat("Expected tuple, got %s.", py::repr(object)));
# }
if not isinstance(object, py.tuple):
raise ValueError("Expected tuple, got %s." % py.repr(object))
# py::tuple tuple = py::reinterpret_borrow<py::tuple>(object);
tuple: py.tuple = object
# if (tuple.size() != node.arity) {
# throw std::invalid_argument(
# absl::StrFormat("Tuple arity mismatch: %d != %d; tuple: %s.",
# tuple.size(), node.arity, py::repr(object)));
# }
if len(tuple) != node.arity:
raise ValueError("Tuple arity mismatch: %d != %d; tuple: %s." % (
len(tuple), node.arity, py.repr(object)
))
# for (py::handle entry : tuple) {
# agenda.push_back(py::reinterpret_borrow<py::object>(entry));
# }
# for entry in tuple:
# agenda.append(entry)
agenda.extend(tuple)
# break;
# }
#
# case PyTreeKind::kList: {
elif node.kind == PyTreeKind.kList:
# if (!PyList_CheckExact(object.ptr())) {
# throw std::invalid_argument(
# absl::StrFormat("Expected list, got %s.", py::repr(object)));
# }
if not isinstance(object, py.list):
raise ValueError("Expected list, got %s." % py.repr(object))
# py::list list = py::reinterpret_borrow<py::list>(object);
list: typing.List = object
# if (list.size() != node.arity) {
# throw std::invalid_argument(
# absl::StrFormat("List arity mismatch: %d != %d; list: %s.",
# list.size(), node.arity, py::repr(object)));
# }
if len(list) != node.arity:
raise ValueError("List arity mismatch: %d != %d; list: %s." % (
len(list), node.arity, py.repr(object)
))
# for (py::handle entry : list) {
# agenda.push_back(py::reinterpret_borrow<py::object>(entry));
# }
# for entry in list:
# agenda.append(entry)
agenda.extend(list)
# break;
# }
#
# case PyTreeKind::kDict: {
elif node.kind == PyTreeKind.kDict:
# if (!PyDict_CheckExact(object.ptr())) {
# throw std::invalid_argument(
# absl::StrFormat("Expected dict, got %s.", py::repr(object)));
# }
if not isinstance(object, py.dict):
raise ValueError("Expected dict, got %s." % py.repr(object))
# py::dict dict = py::reinterpret_borrow<py::dict>(object);
dict: typing.Dict = object
# py::list keys =
# py::reinterpret_steal<py::list>(PyDict_Keys(dict.ptr()));
# if (PyList_Sort(keys.ptr())) {
# throw std::runtime_error("Dictionary key sort failed.");
# }
keys = py.list(py.sorted(dict.keys()))
# if (keys.not_equal(node.node_data)) {
# throw std::invalid_argument(
# absl::StrFormat("Dict key mismatch; expected keys: %s; dict: %s.",
# py::repr(node.node_data), py::repr(object)));
# }
if not_equal(keys, node.node_data):
raise ValueError("Dict key mismatch; expected keys: %s; dict: %s." % (
py.repr(node.node_data), py.repr(object)
))
# for (py::handle key : keys) {
# agenda.push_back(dict[key]);
# }
for key in keys:
agenda.append(dict[key])
# break;
# }
#
# case PyTreeKind::kNamedTuple: {
elif node.kind == PyTreeKind.kNamedTuple:
# if (!py::isinstance<py::tuple>(object) ||
# !py::hasattr(object, "_fields")) {
# throw std::invalid_argument(absl::StrFormat(
# "Expected named tuple, got %s.", py::repr(object)));
# }
if not isinstance(object, py.tuple) or not hasattr(object, "_fields"):
raise ValueError("Expected named tuple, got %s." % py.repr(object))
# py::tuple tuple = py::reinterpret_borrow<py::tuple>(object);
tuple: typing.NamedTuple = object
# if (tuple.size() != node.arity) {
# throw std::invalid_argument(absl::StrFormat(
# "Named tuple arity mismatch: %d != %d; tuple: %s.", tuple.size(),
# node.arity, py::repr(object)));
# }
if len(tuple) != node.arity:
raise ValueError("Named tuple arity mismatch: %d != %d; tuple: %s." % (
len(tuple), node.arity, py.repr(object)
))
# if (tuple.get_type().not_equal(node.node_data)) {
# throw std::invalid_argument(absl::StrFormat(
# "Named tuple type mismatch: expected type: %s, tuple: %s.",
# py::repr(node.node_data), py::repr(object)));
# }
if not_equal(py.type(tuple), node.node_data):
raise ValueError("Named tuple type mismatch: expected type: %s, tuple: %s." % (
py.repr(node.node_data), py.repr(object)
))
# for (py::handle entry : tuple) {
# agenda.push_back(py::reinterpret_borrow<py::object>(entry));
# }
# for entry in tuple:
# agenda.append(entry)
agenda.extend(tuple)
# break;
# }
#
# case PyTreeKind::kCustom: {
elif node.kind == PyTreeKind.kCustom:
# auto* registration = PyTreeTypeRegistry::Lookup(object.get_type());
registration = PyTreeTypeRegistry.lookup(py.type(object))
# if (registration != node.custom) {
# throw std::invalid_argument(absl::StrFormat(
# "Custom node type mismatch: expected type: %s, value: %s.",
# py::repr(node.custom->type), py::repr(object)));
# }
if registration != node.custom:
raise ValueError("Custom node type mismatch: expected type: %s, value: %s." % (
py.repr(node.custom.type), py.repr(object)
))
# py::tuple out = py::cast<py::tuple>(node.custom->to_iterable(object));
out: typing.Tuple = node.custom.to_iterable(object)
# if (out.size() != 2) {
# throw std::runtime_error(
# "PyTree custom to_iterable function should return a pair");
# }
if len(out) != 2:
raise RuntimeError("PyTree custom to_iterable function should return a pair")
# if (node.node_data.not_equal(out[1])) {
# throw std::invalid_argument(absl::StrFormat(
# "Mismatch custom node data: %s != %s; value: %s.",
# py::repr(node.node_data), py::repr(out[1]), py::repr(object)));
# }
if not_equal(node.node_data, out[1]):
raise ValueError("Mismatch custom node data: %s != %s; value: %s." % (
py.repr(node.node_data), py.repr(out[1]), py.repr(object)
))
# int arity = 0;
# arity = 0
# for (py::handle entry : py::cast<py::iterable>(out[0])) {
# ++arity;
# agenda.push_back(py::reinterpret_borrow<py::object>(entry));
# }
# for entry in out[0]:
# arity += 1
# agenda.append(entry)
arity = len(out[0])
agenda.extend(out[0])
# if (arity != node.arity) {
# throw std::invalid_argument(absl::StrFormat(
# "Custom type arity mismatch: %d != %d; value: %s.", arity,
# node.arity, py::repr(object)));
# }
if arity != node.arity:
raise ValueError("Custom type arity mismatch: %d != %d; value: %s." % (
arity, node.arity, py.repr(object)
))
# break;
# }
# }
# }
# if (it != traversal_.rend() || leaf != -1) {
# throw std::invalid_argument(absl::StrFormat(
# "Tree structures did not match: %s vs %s", py::repr(xs), ToString()));
# }
if not finished_iterating(it) or leaf != -1:
raise ValueError("Tree structures did not match: %s vs %s" % (
py.repr(xs), py.str(self)
))
# return leaves;
return leaves
@property
def num_leaves(self) -> py.int:
if len(self.traversal_) <= 0:
return 0
return self.traversal_[-1].num_leaves
@property
def num_nodes(self) -> py.int:
return len(self.traversal_)
@staticmethod
def tuple(defs: typing.Iterable[PyTreeDef]):
"""Makes a Tuple PyTreeDef out of a vector of PyTreeDefs."""
# auto out = absl::make_unique<PyTreeDef>();
out = PyTreeDef()
# for (const PyTreeDef& def : defs) {
# absl::c_copy(def.traversal_, std::back_inserter(out->traversal_));
# }
for def_ in defs:
out.traversal_.extend(deepcopy(def_.traversal_))
# Node node;
# node.kind = PyTreeKind::kTuple;
# node.arity = defs.size();
node = PyTreeDef.Node(kind=PyTreeKind.kTuple, arity=len(defs))
# out->traversal_.push_back(node);
out.traversal_.append(node)
# return out;
return out
def children(self) -> typing.List[PyTreeDef]:
# std::vector<std::unique_ptr<PyTreeDef>> children;
children = []
# if (traversal_.empty()) {
# return children;
# }
if len(self.traversal_) <= 0:
return children
# Node const& root = traversal_.back();
root: PyTreeDef.Node = self.traversal_[-1]
# children.resize(root.arity);
resize(children, root.arity)
# int pos = traversal_.size() - 1;
pos = len(self.traversal_) - 1
# for (int i = root.arity - 1; i >= 0; --i) {
for i in range(root.arity - 1, -1, -1):
# children[i] = absl::make_unique<PyTreeDef>();
children[i] = PyTreeDef()
# const Node& node = traversal_.at(pos - 1);
node: PyTreeDef.Node = self.traversal_[pos - 1]
# if (pos < node.num_nodes) {
# throw std::logic_error("children() walked off start of array");
# }
assert pos >= node.num_nodes, "children() walked off start of array"
# std::copy(traversal_.begin() + pos - node.num_nodes,
# traversal_.begin() + pos,
# std::back_inserter(children[i]->traversal_));
children[i].traversal_.extend(deepcopy(self.traversal_[pos - node.num_nodes:pos]))
# pos -= node.num_nodes;
pos -= node.num_nodes
# }
# if (pos != 0) {
# throw std::logic_error("pos != 0 at end of PyTreeDef::Children");
# }
assert pos == 0, "pos != 0 at end of PyTreeDef::Children"
# return children;
return children
def unflatten(self, leaves: typing.Iterable):
# absl::InlinedVector<py::object, 4> agenda;
agenda = []
agenda_size = 0
# auto it = leaves.begin();
it = iter(leaves)
# int leaf_count = 0;
leaf_count = 0
# for (const Node& node : traversal_) {
for node in self.traversal_:
# if (agenda.size() < node.arity) {
# throw std::logic_error("Too few elements for TreeDef node.");
# }
assert agenda_size >= node.arity, "Too few elements for TreeDef node."
# switch (node.kind) {
# case PyTreeKind::kLeaf:
if node.kind == PyTreeKind.kLeaf:
# if (it == leaves.end()) {
# throw std::invalid_argument(absl::StrFormat(
# "Too few leaves for PyTreeDef; expected %d, got %d", num_leaves(),
# leaf_count));
# }
if finished_iterating(it):
raise ValueError("Too few leaves for PyTreeDef; expected %d, got %d" % (
self.num_leaves, leaf_count
))
# agenda.push_back(py::reinterpret_borrow<py::object>(*it));
# ++it;
agenda.append(next(it))
agenda_size += 1
# ++leaf_count;
leaf_count += 1
# break;
#
# case PyTreeKind::kNone:
# case PyTreeKind::kTuple:
# case PyTreeKind::kNamedTuple:
# case PyTreeKind::kList:
# case PyTreeKind::kDict:
# case PyTreeKind::kCustom: {
# elif node.kind in [PyTreeKind.kNone,
# PyTreeKind.kTuple,
# PyTreeKind.kNamedTuple,
# PyTreeKind.kList,
# PyTreeKind.kDict,
# PyTreeKind.kCustom]:
elif True:
# const int size = agenda.size();
# size = len(agenda)
size = agenda_size
# absl::Span<py::object> span;
# if (node.arity > 0) {
# span = absl::Span<py::object>(&agenda[size - node.arity], node.arity);
# }
span = []
if node.arity > 0:
span = agenda[size - node.arity:size]
# py::object o = MakeNode(node, span);
o = self.make_node(node, span)
# agenda.resize(size - node.arity);
agenda_size = size - node.arity
resize(agenda, agenda_size)
# agenda.push_back(o);
agenda.append(o)
agenda_size += 1
# break;
# }
# }
else:
assert False, "Unreachable code."
# }
# if (it != leaves.end()) {
# throw std::invalid_argument(absl::StrFormat(
# "Too many leaves for PyTreeDef; expected %d.", num_leaves()));
# }
if not finished_iterating(it):
raise ValueError("Too many leaves for PyTreeDef; expected %d." % (
self.num_leaves
))
# if (agenda.size() != 1) {
# throw std::logic_error("PyTreeDef traversal did not yield a singleton.");
# }
# assert len(agenda) == 1, "PyTreeDef traversal did not yield a singleton."
# return std::move(agenda.back());
return agenda[-1]
@staticmethod
def make_node(node: PyTreeDef.Node, children: typing.Iterable):
"""Helper that manufactures an instance of a node given its children."""
# if (children.size() != node.arity) {
# throw std::logic_error("Node arity mismatch.");
# }
assert len(children) == node.arity, "Node arity mismatch."
# switch (node.kind) {
# case PyTreeKind::kLeaf:
# throw std::logic_error("MakeNode not implemented for leaves.");
assert node.kind != PyTreeKind.kLeaf, "MakeNode not implemented for leaves."
#
# case PyTreeKind::kNone:
# return py::none();
if node.kind == PyTreeKind.kNone:
return None
#
# case PyTreeKind::kTuple:
# case PyTreeKind::kNamedTuple: {
elif node.kind == PyTreeKind.kTuple or node.kind == PyTreeKind.kNamedTuple:
# py::tuple tuple(node.arity);
tuple = resize(None, node.arity)
# for (int i = 0; i < node.arity; ++i) {
# tuple[i] = std::move(children[i]);
# }
for i in range(node.arity):
tuple[i] = children[i]
# if (node.kind == PyTreeKind::kNamedTuple) {
# return node.node_data(*tuple);
# } else {
# return std::move(tuple);
# }
if node.kind == PyTreeKind.kNamedTuple:
return node.node_data(*tuple)
else:
return py.tuple(tuple)
# }
#
# case PyTreeKind::kList: {
elif node.kind == PyTreeKind.kList:
# py::list list(node.arity);
list: typing.List = resize(None, node.arity)
# for (int i = 0; i < node.arity; ++i) {
# list[i] = std::move(children[i]);
# }
for i in range(node.arity):
list[i] = children[i]
# return std::move(list);
return list
# }
#
# case PyTreeKind::kDict: {
elif node.kind == PyTreeKind.kDict:
# py::dict dict;
dict: typing.Dict = py.dict()
# py::list keys = py::reinterpret_borrow<py::list>(node.node_data);
keys = node.node_data
# for (int i = 0; i < node.arity; ++i) {
# dict[keys[i]] = std::move(children[i]);
# }
for i in range(node.arity):
dict[keys[i]] = children[i]
# return std::move(dict);
return dict
# break;
# }
# case PyTreeKind::kCustom: {
elif node.kind == PyTreeKind.kCustom:
# py::tuple tuple(node.arity);
tuple = resize(None, node.arity)
# for (int i = 0; i < node.arity; ++i) {
# tuple[i] = std::move(children[i]);
# }
for i in range(node.arity):
tuple[i] = children[i]
tuple = py.tuple(tuple)
# return node.custom->from_iterable(node.node_data, tuple);
return node.custom.from_iterable(node.node_data, tuple)
# }
# }
# throw std::logic_error("Unreachable code.");
assert False, "Unreachable code."
def compose(self, inner: PyTreeDef) -> PyTreeDef:
"""Composes two PyTreeDefs, replacing the leaves of this tree with copies of `inner`."""
# auto out = absl::make_unique<PyTreeDef>();
out = PyTreeDef()
# for (const Node& n : traversal_) {
for n in self.traversal_:
# if (n.kind == PyTreeKind::kLeaf) {
# absl::c_copy(inner.traversal_, std::back_inserter(out->traversal_));
# } else {
# out->traversal_.push_back(n);
# }
if n.kind == PyTreeKind.kLeaf:
out.traversal_.extend(deepcopy(inner.traversal_))
else:
out.traversal_.append(deepcopy(n))
# }
# const auto& root = traversal_.back();
root = self.traversal_[-1]
# const auto& inner_root = inner.traversal_.back();
inner_root = inner.traversal_[-1]
# // TODO(tomhennigan): This should update all nodes in the traversal.
# auto& out_root = out->traversal_.back();
out_root = out.traversal_[-1]
# out_root.num_nodes = (root.num_nodes - root.num_leaves) +
# (inner_root.num_nodes * root.num_leaves);
out_root.num_nodes = (root.num_nodes - root.num_leaves) + \
(inner_root.num_nodes * root.num_leaves)
# out_root.num_leaves *= inner_root.num_leaves;
out_root.num_leaves *= inner_root.num_leaves
# return out;
return out
# py::object PyTreeDef::FromIterableTreeHelper(
# py::handle xs,
# absl::InlinedVector<PyTreeDef::Node, 1>::const_reverse_iterator* it) const {
def from_iterable_tree_helper(self, xs, it: iter):
"""Recursive helper used to implement from_iterable_tree()"""
# if (*it == traversal_.rend()) {
# throw std::invalid_argument("Tree structures did not match.");
# }
if finished_iterating(it):
raise ValueError("Tree structures did not match.")
# const Node& node = **it;
# ++*it;
node = next(it)
# if (node.kind == PyTreeKind::kLeaf) {
# return py::reinterpret_borrow<py::object>(xs);
# }
if node.kind == PyTreeKind.kLeaf:
return xs
# py::iterable iterable = py::reinterpret_borrow<py::iterable>(xs);
iterable: typing.Iterable = xs
# std::vector<py::object> ys;
# ys.reserve(node.arity);
ys = []
# for (py::handle x : iterable) {
# ys.push_back(py::reinterpret_borrow<py::object>(x));
# }
for x in iterable:
ys.append(x)
# if (ys.size() != node.arity) {
# throw std::invalid_argument("Arity mismatch between trees");
# }
if len(ys) != node.arity:
raise ValueError("Arity mismatch between trees")
# for (int j = node.arity - 1; j >= 0; --j) {
# ys[j] = FromIterableTreeHelper(ys[j], it);
# }
for j in range(node.arity - 1, -1, -1):
ys[j] = self.from_iterable_tree_helper(ys[j], it)
#
# return MakeNode(node, absl::MakeSpan(ys));
return self.make_node(node, ys)
# }
# pybind11::object Walk(const pybind11::function& f_node,
# pybind11::handle f_leaf,
# pybind11::iterable leaves) const;
def walk(self, f_node: typing.Callable, f_leaf: typing.Callable, leaves: typing.Iterable):
"""Maps a function over a PyTree structure, applying f_leaf to each leaf, and
f_node to each container node.
TODO(phawkins): use flattening everywhere instead and delete this method."""
# std::vector<py::object> agenda;
agenda = []
# auto it = leaves.begin();
it = iter(leaves)
# for (const Node& node : traversal_) {
for node in self.traversal_:
# switch (node.kind) {
# case PyTreeKind::kLeaf: {
if node.kind == PyTreeKind.kLeaf:
# if (it == leaves.end()) {
# throw std::invalid_argument("Too few leaves for PyTreeDef");
# }
if finished_iterating(it):
raise ValueError("Too few leaves for PyTreeDef")
#
# py::object leaf = py::reinterpret_borrow<py::object>(*it);
leaf = next(it)
# agenda.push_back(f_leaf.is_none() ? std::move(leaf)
# : f_leaf(std::move(leaf)));
agenda.append(f_leaf(leaf) if f_leaf is not None else leaf)
# ++it;
# break;
# }
#
# case PyTreeKind::kNone:
# case PyTreeKind::kTuple:
# case PyTreeKind::kNamedTuple:
# case PyTreeKind::kList:
# case PyTreeKind::kDict:
# case PyTreeKind::kCustom: {
elif node.kind in [PyTreeKind.kNone,
PyTreeKind.kTuple,
PyTreeKind.kNamedTuple,
PyTreeKind.kList,
PyTreeKind.kDict,
PyTreeKind.kCustom]:
# if (agenda.size() < node.arity) {
# throw std::logic_error("Too few elements for custom type.");
# }
assert len(agenda) >= node.arity, "Too few elements for custom type."
# py::tuple tuple(node.arity);
tuple = resize(None, node.arity)
# for (int i = node.arity - 1; i >= 0; --i) {
# tuple[i] = agenda.back();
# agenda.pop_back();
# }
for i in range(node.arity - 1, -1, -1):
tuple[i] = agenda.pop()
# agenda.push_back(f_node(tuple));
tuple = py.tuple(tuple)
agenda.append(f_node(tuple) if f_node is not None else tuple)
# }
# }
# }
# if (it != leaves.end()) {
# throw std::invalid_argument("Too many leaves for PyTreeDef");
# }
if not finished_iterating(it):
raise ValueError("Too many leaves for PyTreeDef")
# if (agenda.size() != 1) {
# throw std::logic_error("PyTreeDef traversal did not yield a singleton.");
# }
assert len(agenda) == 1, "PyTreeDef traversal did not yield a singleton."
# return std::move(agenda.back());
return agenda[-1]
# py::object PyTreeDef::FromIterableTree(py::handle xs) const {
def from_iterable_tree(self, xs):
"""Given a tree of iterables with the same node/leaf structure as this PyTree,
build the corresponding PyTree.
TODO(phawkins): use flattening everywhere instead and delete this method."""
# auto it = traversal_.rbegin();
it = iter(self.traversal_[::-1])
# py::object out = FromIterableTreeHelper(xs, &it);
out = self.from_iterable_tree_helper(xs, it)
# if (it != traversal_.rend()) {
# throw std::invalid_argument("Tree structures did not match.");
# }
if not finished_iterating(it):
raise ValueError("Tree structures did not match.")
# return out;
return out
# }
def resize(l, i):
if l is None:
return [None for _ in range(i)]
n = len(l)
i = max(i, 0)
if n < i:
# for _ in range(i - n):
# l.append(None)
l.extend(range(i - n))
elif n > i:
for _ in range(n - i):
l.pop()
# assert len(l) == i
return l
def length_hint(obj: typing.Any, default=0):
"""Return an estimate of the number of items in obj.
This is useful for presizing containers when building from an
iterable.
If the object supports len(), the result will be
exact. Otherwise, it may over- or under-estimate by an
arbitrary amount. The result will be an integer >= 0.
"""
try:
return len(obj)
except TypeError:
try:
get_hint = type(obj).__length_hint__
except AttributeError:
return default
try:
hint = get_hint(obj)
except TypeError:
return default
if hint is NotImplemented:
return default
if not isinstance(hint, int):
raise TypeError("Length hint must be an integer, not %r" %
type(hint))
if hint < 0:
raise ValueError("__length_hint__() should return >= 0")
return hint
def safe_zip(*args):
n = len(args[0])
for arg in args[1:]:
assert len(arg) == n, 'length mismatch: {}'.format(list(map(len, args)))
return list(zip(*args))
def tree_flatten(tree: typing.Any, is_leaf: Optional[Callable[[Any], bool]] = None) -> (typing.Iterable, PyTreeDef):
"""Flattens a pytree.
Args:
tree: a pytree to flatten.
is_leaf: an optionally specified function that will be called at each
flattening step. It should return a boolean, which indicates whether
the flattening should traverse the current object, or if it should be
stopped immediately, with the whole subtree being treated as a leaf.
Returns:
A pair where the first element is a list of leaf values and the second
element is a treedef representing the structure of the flattened tree.
"""
return PyTreeDef.flatten(tree, is_leaf)
def tree_unflatten(treedef: PyTreeDef, leaves: typing.Iterable):
"""Reconstructs a pytree from the treedef and the leaves.
The inverse of `tree_flatten`.
Args:
treedef: the treedef to reconstruct
leaves: the list of leaves to use for reconstruction. The list must
match the leaves of the treedef.
Returns:
The reconstructed pytree, containing the `leaves` placed in the
structure described by `treedef`.
"""
return treedef.unflatten(leaves)
def tree_leaves(tree: typing.Any) -> typing.Iterable:
"""Gets the leaves of a pytree."""
return tree_flatten(tree)[0]
def tree_structure(tree: typing.Any) -> PyTreeDef:
"""Gets the treedef for a pytree."""
return tree_flatten(tree)[1]
def treedef_tuple(treedefs: typing.Iterable[PyTreeDef]) -> PyTreeDef:
"""Makes a tuple treedef from a list of child treedefs."""
return PyTreeDef.tuple(treedefs)
def treedef_children(treedef: PyTreeDef) -> typing.Iterable[PyTreeDef]:
return treedef.children()
def treedef_is_leaf(treedef: PyTreeDef) -> py.bool:
return treedef.num_nodes == 1
def register_pytree_node(nodetype: typing.Type, flatten_func: typing.Callable, unflatten_func: typing.Callable):
"""Extends the set of types that are considered internal nodes in pytrees.
See `example usage <https://jax.readthedocs.io/en/latest/notebooks/JAX_pytrees.html#Pytrees-are-extensible>`_.
Args:
nodetype: a Python type to treat as an internal pytree node.
flatten_func: a function to be used during flattening, taking a value
of type `nodetype` and returning a pair, with (1) an iterable for
the children to be flattened recursively, and (2) some auxiliary data
to be stored in the treedef and to be passed to the `unflatten_func`.
unflatten_func: a function taking two arguments: the auxiliary data that
was returned by `flatten_func` and stored in the treedef, and the
unflattened children. The function should return an instance of
`nodetype`.
"""
PyTreeTypeRegistry.register(nodetype, flatten_func, unflatten_func)
# _registry[nodetype] = _RegistryEntry(flatten_func, unflatten_func)
def register_pytree_node_class(cls: typing.Type):
"""Extends the set of types that are considered internal nodes in pytrees.
This function is a thin wrapper around ``register_pytree_node``, and provides
a class-oriented interface:
@register_pytree_node_class
class Special:
def __init__(self, x, y):
self.x = x
self.y = y
def tree_flatten(self):
return ((self.x, self.y), None)
@classmethod
def tree_unflatten(cls, aux_data, children):
return cls(*children)
"""
register_pytree_node(cls, op.methodcaller('tree_flatten'), cls.tree_unflatten)
return cls
def tree_map(f: typing.Callable, tree: typing.Any) -> PyTreeDef:
"""Maps a function over a pytree to produce a new pytree.
Args:
f: function to be applied at each leaf.
tree: a pytree to be mapped over.
Returns:
A new pytree with the same structure as `tree` but with the value at each
leaf given by `f(x)` where `x` is the value at the corresponding leaf in
`tree`.
"""
leaves, treedef = tree_flatten(tree)
return treedef.unflatten(py.list(map(f, leaves)))
def tree_multimap(f: typing.Callable, tree: typing.Any, *rest: PyTreeDef,
is_leaf: Optional[Callable[[Any], bool]] = None) -> typing.Any:
"""Maps a multi-input function over pytree args to produce a new pytree.
Args:
f: function that takes `1 + len(rest)` arguments, to be applied at the
corresponding leaves of the pytrees.
tree: a pytree to be mapped over, with each leaf providing the first
positional argument to `f`.
*rest: a tuple of pytrees, each of which has the same structure as tree or
or has tree as a prefix.
is_leaf: an optionally specified function that will be called at each
flattening step. It should return a boolean, which indicates whether
the flattening should traverse the current object, or if it should be
stopped immediately, with the whole subtree being treated as a leaf.
Returns:
A new pytree with the same structure as `tree` but with the value at each
leaf given by `f(x, *xs)` where `x` is the value at the corresponding leaf
in `tree` and `xs` is the tuple of values at corresponding nodes in
`rest`.
"""
leaves, treedef = tree_flatten(tree, is_leaf)
all_leaves = [leaves] + [treedef.flatten_up_to(r) for r in rest]
return treedef.unflatten(f(*xs) for xs in zip(*all_leaves))
# TODO(mattjj,phawkins): consider removing this function
def _process_pytree(process_node: typing.Callable, tree: typing.Any):
leaves, treedef = tree_flatten(tree)
return treedef.walk(process_node, None, leaves), treedef
def build_tree(treedef: PyTreeDef, xs: typing.Any):
return treedef.from_iterable_tree(xs)
def tree_transpose_old(outer_treedef: PyTreeDef, inner_treedef: PyTreeDef, pytree_to_transpose: typing.Any):
flat, treedef = tree_flatten(pytree_to_transpose)
expected_treedef = outer_treedef.compose(inner_treedef)
if treedef != expected_treedef:
raise TypeError("Mismatch\n{}\n != \n{}".format(treedef, expected_treedef))
inner_size = inner_treedef.num_leaves
outer_size = outer_treedef.num_leaves
flat_it = iter(flat)
lol = [[next(flat_it) for _ in range(inner_size)] for __ in range(outer_size)]
transposed_lol = py.list(zip(*lol))
subtrees = py.list(map(functools.partial(tree_unflatten, outer_treedef), transposed_lol))
return tree_unflatten(inner_treedef, subtrees)
def tree_transpose(outer_treedef, inner_treedef, pytree_to_transpose):
flat, treedef = tree_flatten(pytree_to_transpose)
inner_size = inner_treedef.num_leaves
outer_size = outer_treedef.num_leaves
if treedef.num_leaves != (inner_size * outer_size):
expected_treedef = outer_treedef.compose(inner_treedef)
raise TypeError(f"Mismatch\n{treedef}\n != \n{expected_treedef}")
flat = py.iter(flat)
lol = [[py.next(flat) for _ in range(inner_size)] for __ in range(outer_size)]
transposed_lol = py.list(zip(*lol))
subtrees = py.list(map(partial(tree_unflatten, outer_treedef), transposed_lol))
return tree_unflatten(inner_treedef, subtrees)
no_initializer = object()
@typing.overload
def tree_reduce(function: typing.Callable[[T, typing.Any], T],
tree: typing.Any) -> T:
...
@typing.overload
def tree_reduce(function: typing.Callable[[T, typing.Any], T],
tree: typing.Any,
initializer: T) -> T:
...
def tree_reduce(function: typing.Callable[[T, typing.Any], T],
tree: typing.Any,
initializer: typing.Any = no_initializer) -> T:
if initializer is no_initializer:
return functools.reduce(function, tree_leaves(tree))
else:
return functools.reduce(function, tree_leaves(tree), initializer)
def tree_all(tree: typing.Any):
return py.all(tree_leaves(tree))
def all_leaves(tree: typing.Iterable):
return PyTreeDef.all_leaves(tree)
register_pytree_node(
collections.OrderedDict,
lambda x: (list(x.values()), list(x.keys())),
lambda keys, values: collections.OrderedDict(safe_zip(keys, values)))
register_pytree_node(
collections.defaultdict,
lambda x: (tuple(x.values()), (x.default_factory, tuple(x.keys()))),
lambda s, values: collections.defaultdict(s[0], safe_zip(s[1], values)))
class Partial(functools.partial):
"""A version of functools.partial that works in pytrees.
Use it for partial function evaluation in a way that is compatible with JAX's
transformations, e.g., ``Partial(func, *args, **kwargs)``.
(You need to explicitly opt-in to this behavior because we didn't want to give
functools.partial different semantics than normal function closures.)
"""
register_pytree_node(
Partial,
lambda partial_: ((partial_.args, partial_.keywords), partial_.func),
lambda func, xs: Partial(func, *xs[0], **xs[1]),
)
if __name__ == '__main__':
import sys, os
tree_util = sys.modules[__name__]
print(os.getcwd())
sys.path += [os.path.realpath(os.path.join(os.getcwd(), '..'))]
from tests import test_pytreez as test_util
# test_pytreez.test_standard()
for tree in test_util.TREES:
test_util.testTranspose(tree)
case = test_util.TestCase()
for arg in (
(tree_util.Partial(test_util._dummy_func),),
(tree_util.Partial(test_util._dummy_func, 1, 2),),
(tree_util.Partial(test_util._dummy_func, x="a"),),
(tree_util.Partial(test_util._dummy_func, 1, 2, 3, x=4, y=5),),
):
fn = case.testRoundtripPartial
fn = getattr(fn, '__wrapped', fn)
fn(*arg)
special = test_util.Special(2., 3.)
leaves, treedef = tree_util.tree_flatten(special)
foo = str(treedef)
print(foo) | 41.023239 | 116 | 0.53683 | 6,270 | 56,489 | 4.726794 | 0.085646 | 0.025205 | 0.023687 | 0.01488 | 0.479468 | 0.430543 | 0.387219 | 0.334987 | 0.31761 | 0.281641 | 0 | 0.00432 | 0.348457 | 56,489 | 1,377 | 117 | 41.023239 | 0.800924 | 0.36878 | 0 | 0.333333 | 0 | 0 | 0.052585 | 0.001238 | 0 | 0 | 0 | 0.002905 | 0.023148 | 1 | 0.094136 | false | 0.003086 | 0.018519 | 0.018519 | 0.253086 | 0.003086 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e9120aa575048f077b52e67ac78e84ea39fd4ab | 1,151 | py | Python | setup.py | webmalc/booking-sites-parser | 992d6a6f3774eeebfbbd535b5de3baa373137150 | [
"Apache-2.0"
] | 2 | 2019-03-14T13:08:59.000Z | 2019-09-22T08:08:38.000Z | setup.py | webmalc/booking-sites-parser | 992d6a6f3774eeebfbbd535b5de3baa373137150 | [
"Apache-2.0"
] | null | null | null | setup.py | webmalc/booking-sites-parser | 992d6a6f3774eeebfbbd535b5de3baa373137150 | [
"Apache-2.0"
] | null | null | null | """The setup module for the booking-sites-parser"""
import pathlib
from setuptools import find_packages, setup
HERE = pathlib.Path(__file__).parent
README = (HERE / 'README.md').read_text()
DESC = 'Parser for booking sites such as Booking.com, Homeaway.com, Airbnb.com'
setup(
name='booking-sites-parser',
version='0.0.3',
description=DESC,
long_description=README,
long_description_content_type='text/markdown',
url='https://github.com/webmalc/booking-sites-parser',
author='webmalc',
author_email='m@webmalc.pw',
license="MIT",
classifiers=[
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
python_requires='>=3.6.0',
packages=find_packages(
exclude=['tests', '*.tests', '*.tests.*', 'tests.*']),
include_package_data=True,
install_requires=['requests', 'beautifulsoup4', 'fake-useragent'],
entry_points={
"console_scripts": [
'booking-sites-parser=booking_sites_parser.__main__:run',
]
},
)
| 30.289474 | 79 | 0.646394 | 132 | 1,151 | 5.454545 | 0.545455 | 0.1 | 0.125 | 0.108333 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012973 | 0.196351 | 1,151 | 37 | 80 | 31.108108 | 0.765405 | 0.039096 | 0 | 0 | 0 | 0 | 0.43 | 0.049091 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e9135e736b7f39abd3440d7dd3b56a62117333d | 2,759 | py | Python | rnn.py | apaszke/odsc-2020-workshop | 226e1b49824493a35686d112c7a4fb592b80f092 | [
"BSD-3-Clause"
] | 3 | 2020-04-16T14:42:06.000Z | 2020-09-17T15:51:55.000Z | rnn.py | apaszke/odsc-2020-workshop | 226e1b49824493a35686d112c7a4fb592b80f092 | [
"BSD-3-Clause"
] | null | null | null | rnn.py | apaszke/odsc-2020-workshop | 226e1b49824493a35686d112c7a4fb592b80f092 | [
"BSD-3-Clause"
] | 2 | 2020-04-16T14:22:46.000Z | 2021-03-30T14:45:25.000Z | import time
import math
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import Tuple
from torch import Tensor
LSTMState = Tuple[Tensor, Tensor]
class LSTMCell(nn.Module):
def __init__(self, input_size, hidden_size):
super().__init__()
self.input_size = input_size
self.hidden_size = hidden_size
self.weight_ih = nn.Parameter(torch.empty(4 * hidden_size, input_size))
self.weight_hh = nn.Parameter(torch.empty(4 * hidden_size, hidden_size))
self.bias = nn.Parameter(torch.empty(4 * hidden_size))
self.reset_parameters()
def reset_parameters(self):
stdv = 1.0 / math.sqrt(self.hidden_size)
nn.init.uniform_(self.weight_ih, -stdv, stdv)
nn.init.uniform_(self.weight_hh, -stdv, stdv)
nn.init.constant_(self.bias, 0)
def forward(self, input, state: LSTMState):
hx, cx = state
gates = (torch.mm(input, self.weight_ih.t()) +
torch.mm(hx, self.weight_hh.t()) + self.bias)
ingate, forgetgate, cellgate, outgate = gates.chunk(4, 1)
ingate = torch.sigmoid(ingate)
forgetgate = torch.sigmoid(forgetgate)
cellgate = torch.tanh(cellgate)
outgate = torch.sigmoid(outgate)
cy = (forgetgate * cx) + (ingate * cellgate)
hy = outgate * torch.tanh(cy)
return hy, cy
class LSTMLayer(nn.Module):
def __init__(self, *cell_args):
super().__init__()
self.cell = LSTMCell(*cell_args)
def forward(self, input, state: LSTMState):
outputs = []
for i in input.unbind(0):
state = self.cell(i, state)
outputs.append(state[0])
return torch.stack(outputs), state
class LSTM(nn.Module):
def __init__(self, input_size, hidden_size, num_layers, dropout):
super().__init__()
assert num_layers >= 1
self.layers = nn.ModuleList([LSTMLayer(input_size, hidden_size)] +
[LSTMLayer(hidden_size, hidden_size)
for _ in range(num_layers - 1)])
self.dropout = nn.Dropout(dropout)
self.num_layers = num_layers
def forward(self, input, states: LSTMState):
output_states: List[LSTMState] = []
output = input
for i, layer in enumerate(self.layers):
output, out_state = layer(output, (states[0][i], states[1][i]))
# Apply the dropout layer except the last layer
if i < self.num_layers - 1:
output = self.dropout(output)
output_states.append(out_state)
return output, (torch.stack([s[0] for s in output_states]),
torch.stack([s[1] for s in output_states]))
| 34.4875 | 80 | 0.609641 | 351 | 2,759 | 4.598291 | 0.239316 | 0.074349 | 0.052045 | 0.027881 | 0.233581 | 0.14746 | 0.106568 | 0.047088 | 0.047088 | 0 | 0 | 0.00853 | 0.277637 | 2,759 | 79 | 81 | 34.924051 | 0.801305 | 0.01631 | 0 | 0.079365 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015873 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.31746 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e93fe907321d25546e36cbc0e28b274103df730 | 18,108 | py | Python | src/Skeleton/midi_input.py | Voice-First-AI/generative-music-watson | e666f64602baab2e35a66c0a5c4389b1bd5666c9 | [
"Apache-2.0"
] | null | null | null | src/Skeleton/midi_input.py | Voice-First-AI/generative-music-watson | e666f64602baab2e35a66c0a5c4389b1bd5666c9 | [
"Apache-2.0"
] | null | null | null | src/Skeleton/midi_input.py | Voice-First-AI/generative-music-watson | e666f64602baab2e35a66c0a5c4389b1bd5666c9 | [
"Apache-2.0"
] | null | null | null | from __future__ import print_function
import os
import sys
import mido
import time
import math
import datetime
import threading
import collections
import MusicTheory
class RecordMidi :
def __init__ ( self ) :
self.messagesFromMidiInstrument = {}
self.midiMessagesNoOverlaps = {}
self.midiMessages = {}
self.initialClk = 0
self.phraseLength = 4
self.tsInfo = { 'tsNumerator': 4, 'tsDenominator': 4, 'measureLength': 1920, 'resolution': 480, 'format':0 , 'bpm': 120 }
self.oneBeatInSeconds = 60.0 / self.tsInfo['bpm']
self.oneBeatInMilliSeconds = round ( self.oneBeatInSeconds*1000, 3 )
self.quarterNoteInBeats = float ( ( (1/4.0) ) / (1.0/self.tsInfo['tsDenominator']) )
self.quarterNoteInSeconds = self.quarterNoteInBeats * self.oneBeatInSeconds
self.quarterNoteInMilliSeconds = round ( self.quarterNoteInSeconds*1000, 3 )
# resolution = ppq = pulses per quarter note = ticks per quarter note
self.oneTickInSeconds = round ( ( self.quarterNoteInSeconds / 480 ) , 7 )
self.oneTickInMilliSeconds = round ( self.oneTickInSeconds*1000, 3 )
print ( "1 Tick in Seconds: ", self.oneTickInSeconds )
print ( "1 Tick in Milli Seconds: ", self.oneTickInMilliSeconds )
self.oneSecondInTicks = round ( 1.0/self.oneTickInSeconds, 0 )
self.oneMilliSecondInTicks = round ((1.0/self.oneTickInMilliSeconds), 2 )
print ( "1 Second in Ticks: ", self.oneSecondInTicks )
print ( "1 Milli Seconds in Ticks: ", self.oneMilliSecondInTicks )
self.ticksForPhraseLength = self.tsInfo['measureLength'] * self.phraseLength
self.secondsForPhraseLength = int ( self.ticksForPhraseLength * self.oneTickInSeconds )
print ( "Phrase Length: ", self.phraseLength, "Num Seconds for Phrase: ", self.secondsForPhraseLength )
#sys.exit(0)
def run ( self ) :
print ( "Initial Clock: ", self.initialClk )
mido.set_backend('mido.backends.rtmidi')
inport = mido.open_input()
print ( "Time: ", time.time() )
try:
initialNotePlayed = False
numNotes = 0
endRecording = False
with mido.open_input() as port:
print('Using {}'.format(port))
print("Initial Clk: ", self.initialClk, "Waiting for messages..." )
for message in port:
if ( not initialNotePlayed and message.type == 'note_on' ) :
initialNotePlayed = True
self.initialClk = message.time
print ( "\nStarted Recording" )
print("First Note Played, Initial Clk: ", self.initialClk )
if ( message.time - self.initialClk >= self.secondsForPhraseLength ) :
print ( "Initial Clk: ", self.initialClk, "Current Time: ", message.time, self.phraseLength, "measures recorded: ", "Number of seconds recorded: ", self.secondsForPhraseLength, "Stop Recording" )
endRecording = True
break
print ( message )
if ( message.type == 'note_on' or message.type == 'note_off' ) :
if ( message.velocity == 0 ) :
self.messagesFromMidiInstrument[numNotes] = { 'event': 'note_off', 'pitch': message.note, 'time': message.time, 'velocity': message.velocity }
else :
self.messagesFromMidiInstrument[numNotes] = { 'event': message.type, 'pitch': message.note, 'time': message.time, 'velocity': message.velocity }
numNotes += 1
except KeyboardInterrupt:
pass
for note in self.messagesFromMidiInstrument :
self.messagesFromMidiInstrument[note]['time'] = round ( self.messagesFromMidiInstrument[note]['time'] - self.initialClk , 4 )
self.messagesFromMidiInstrument[note]['starttick'] = round ( self.messagesFromMidiInstrument[note]['time'] * self.oneSecondInTicks, 0 )
print ( note, self.messagesFromMidiInstrument[note] )
#generate start and end times in ticks
self.generateStartAndEndTimes()
#quantize notes
sixteenth = 120
eighth = 240
self.selfQuantizeNotes ( eighth )
self.selfQuantizeNotes ( sixteenth )
#remove overlaps
self.removeOverlaps ()
# create Midi
self.createMidi ()
def removeOverlaps ( self ) :
length = len(self.midiMessages)
overlappedIndex = []
for note in self.midiMessages :
s1 = self.midiMessages[note]['starttick']
actualDuration = self.midiMessages[note]['duration'] * ( self.midiMessages[note]['velocity'] / 127.0 )
e1 = self.midiMessages[note]['starttick'] + actualDuration
for nextNote in range ( note+1, length, 1 ) :
s2 = self.midiMessages[nextNote]['starttick']
actualDuration = self.midiMessages[nextNote]['duration'] * ( self.midiMessages[nextNote]['velocity'] / 127.0 )
e2 = self.midiMessages[nextNote]['starttick'] + actualDuration
if ( s1 <= e2 and e1 >= s2 ) : # overlap exists
overlappedIndex.append ( nextNote )
overlappedIndex = list(set( overlappedIndex ) )
print()
print ( "Overlapped Index: ", overlappedIndex )
for index in sorted(overlappedIndex, reverse=True):
del self.midiMessages[index]
print ( "Midi Messages after removing overlap" )
for note in self.midiMessages :
print ( note, self.midiMessages[note]['pitch'], self.midiMessages[note]['starttick'], self.midiMessages[note]['endtick'], self.midiMessages[note]['velocity'],
self.midiMessages[note]['duration'], self.midiMessages[note]['measure'] )
def selfQuantizeNotes ( self, ticksForQuantization ) :
for note in self.midiMessages :
div = self.midiMessages[note]['starttick'] // ticksForQuantization
mod = self.midiMessages[note]['starttick'] % ticksForQuantization
if ( mod != 0 ) : # if note does not start on a quantized note
if ( mod >= ticksForQuantization/2 ) :
self.midiMessages[note]['starttick'] = int((div + 1 ) * ticksForQuantization )
else :
self.midiMessages[note]['starttick'] = int( div * ticksForQuantization )
self.midiMessages[note]['endtick'] = int(self.midiMessages[note]['starttick'] + self.midiMessages[note]['duration'])
self.midiMessages[note]['measure'] = int( self.midiMessages[note]['starttick'] / self.tsInfo['measureLength'] ) + 1
self.midiMessages[note]['measureGranularity'] = round ( ( float(self.midiMessages[note]['starttick']) / self.tsInfo['measureLength'] ) + 1, 2 )
print()
for note in self.midiMessages :
print ( note, self.midiMessages[note]['pitch'], self.midiMessages[note]['starttick'], self.midiMessages[note]['endtick'], self.midiMessages[note]['granularity'],
self.midiMessages[note]['tie'], self.midiMessages[note]['velocity'], self.midiMessages[note]['duration'], self.midiMessages[note]['measure'],
self.midiMessages[note]['measureGranularity'],
)
def createMidi ( self ) :
# create notes from self.midiMessages
notes = {}
cnt = 0
for note in self.midiMessages :
pitch = self.midiMessages[note]['pitch']
starttick = self.midiMessages[note]['starttick']
endtick = self.midiMessages[note]['endtick']
octave = pitch // 12
mod = pitch % 12
notestr = MusicTheory.pitchToNotes[mod]
velocity = self.midiMessages[note]['velocity']
notes[cnt] = { 'event': 'on', 'notestr': notestr, 'octave': octave, 'starttick': starttick, 'velocity': velocity, 'pitch': pitch }
cnt += 1
notes[cnt] = { 'event': 'off', 'notestr': notestr, 'octave': octave, 'starttick': endtick, 'velocity': 0, 'pitch': pitch }
cnt += 1
notes = collections.OrderedDict ( sorted ( notes.items(), key=lambda x : x[1]['starttick'] ) )
glbClk = 0
print()
for key in notes :
notes[key]['miditick'] = notes[key]['starttick'] - glbClk
glbClk = notes[key]['starttick']
print ( key, notes[key]['notestr'], notes[key]['event'], notes[key]['miditick'], notes[key]['pitch'] )
fmt = 0
fname = "midi_export"
fnamePy = fname + ".py"
fout = open ( fnamePy, "w" )
fout.write ( "import midi\n" ) ;
fout.write ( "# Instantiate a MIDI Pattern (contains a list of tracks)\n" ) ;
fout.write ( "pattern = midi.Pattern(format=%d, resolution=%d)\n" %(fmt, self.tsInfo['resolution']) ) ;
fout.write ( "# Instantiate a MIDI Track (contains a list of MIDI events)\n" ) ;
fout.write ( "track = midi.Track()\n" ) ;
fout.write ( "# Append the track to the pattern\n" ) ;
fout.write ( "pattern.append(track)\n" ) ;
fout.write ("# Midi Events Start Here" ) ;
fout.write ( "\n" ) ;
fout.write ("# Instantiate a MIDI note on event, append it to the track\n" ) ;
fout.write ( "\n" ) ;
tsDenominatorPow = int(math.log ( self.tsInfo['tsDenominator'], 2 )) ;
string = "time = midi.TimeSignatureEvent(tick=0, " + "data = [" + str(self.tsInfo['tsNumerator']) + ", " + str(tsDenominatorPow) + ", 24, 8])" + "\n" ; # 240 bpm
fout.write ( string ) ;
fout.write ( "track.append(time)\n" )
for i in notes :
pitch = notes[i]['notestr'] + "_" + str(notes[i]['octave'])
tick = notes[i]['miditick']
velocity = notes[i]['velocity']
if ( notes[i]['event'] == 'on' ) :
string = "on = midi.NoteOnEvent(tick=" + str( tick ) + ", velocity=" + str(velocity) + ", pitch=midi." + pitch + ")\n"
fout.write ( string ) ;
fout.write ( "track.append(on)\n" )
else :
string = "off = midi.NoteOffEvent(tick=" + str( tick ) + ", velocity=" + str(velocity) + ", pitch=midi." + pitch + ")\n"
fout.write ( string ) ;
fout.write ( "track.append(off)\n" )
#print ( i, pitch, tick, velocity )
fout.write ( "\n" ) ;
fout.write ("\neot = midi.EndOfTrackEvent(tick=1)" ) ;
fout.write ("\ntrack.append(eot)" ) ;
fout.write ( "\n# Print out the pattern" ) ;
fout.write ( "\n#print pattern" ) ;
# Save the pattern to disk
fout_name = fname + ".mid" ;
fout.write ( "\nmidi.write_midifile(\"%s\", pattern)" %(fout_name) ) ;
fout.close() ;
call = "python " + fnamePy
print ( call )
os.system ( call ) ;
def generateStartAndEndTimes ( self ) :
cnt = 0
length = len(self.messagesFromMidiInstrument)
for note in self.messagesFromMidiInstrument :
if ( self.messagesFromMidiInstrument[note]['event'] == 'note_off' ) : # ignore note off events
continue
for offnote in range ( note+1, length, 1 ) :
if ( self.messagesFromMidiInstrument[offnote]['event'] == 'note_on' ) : # ignore note on events
continue
if ( self.messagesFromMidiInstrument[note]['pitch'] == self.messagesFromMidiInstrument[offnote]['pitch'] ) :
self.midiMessages[cnt] = { 'pitch' : self.messagesFromMidiInstrument[note]['pitch'],
'starttick' : int(self.messagesFromMidiInstrument[note]['starttick']) ,
'endtick' : int(self.messagesFromMidiInstrument[offnote]['starttick']) ,
'granularity' : 1,
'tie' : 0.00,
'velocity' : self.messagesFromMidiInstrument[note]['velocity'],
'duration' : int(self.messagesFromMidiInstrument[offnote]['starttick'] - self.messagesFromMidiInstrument[note]['starttick']),
'measure' : int( self.messagesFromMidiInstrument[note]['starttick'] / self.tsInfo['measureLength'] ) + 1,
'measureGranularity': round ( ( float(self.messagesFromMidiInstrument[note]['starttick']) / self.tsInfo['measureLength'] ) + 1, 2 ) ,
}
cnt += 1
break
print()
for note in self.midiMessages :
print ( note,
self.midiMessages[note]['pitch'],
self.midiMessages[note]['starttick'],
self.midiMessages[note]['endtick'],
self.midiMessages[note]['granularity'],
self.midiMessages[note]['tie'],
self.midiMessages[note]['velocity'],
self.midiMessages[note]['duration'],
self.midiMessages[note]['measure'],
self.midiMessages[note]['measureGranularity'],
)
#import djwatson_api
#from djwatson_io import Note, Const
current_milli_time = lambda: int(round(time.time() * 1000000))
class PushMidiMessages(threading.Thread):
def __init__(self, flushIntervalInSec):
super(PushMidiMessages, self).__init__()
#self.queue = RecordQueue()
self.kill_received = False
self.flushTrigger = None
self.flushIntervalInSec = flushIntervalInSec
def run(self):
for msg in inport: # nonblocking; flush out buffered msgs and return immediately
# if self.flushTrigger == None and (msg.type == 'note_on' or msg.type == 'note_off'):
# self.flushTrigger = FlushTrigger(self.queue, self.flushIntervalInSec)
# self.flushTrigger.start()
# self.queue.lock.acquire()
#print ( msg )
if msg.type == 'note_on':
# if firstMsgTime == 0:
# firstMsgTime = msg.time
#self.queue.pushNote(msg.note, convertMsgTimeToTick(msg.time), msg.velocity)
print("note on:" , msg.note, "time: ", msg.time, "velocity: " , msg.velocity)
elif msg.type == 'note_off':
print("note off:" , msg.note, "time: ", msg.time, "velocity: " , msg.velocity)
#self.queue.releaseNote(msg.note, convertMsgTimeToTick(msg.time))
# self.queue.lock.release()
#
# if (msg.type == 'note_on' or msg.type == 'note_off') and self.flushTrigger.trigger == True:
# if self.flushTrigger.pendingTrigger == True:
# self.flushTrigger.attemptFlush()
# else:
# self.flushTrigger = None
if __name__ == '__main__' :
quarternotePerMin = 75
ticksPerQuarterNote = 480
msgInterval = 240 # in # of ticks
firstMsgTime = 0 # in seconds; will be set at the first note
flushIntervalInSec = 3.0
note_min = 21
note_max = 108
ticksPerTie = 240
ticksPerPush = ticksPerTie * 10
firstMsgTime = 0
milliSecPerTick = 60000.0 / quarternotePerMin / ticksPerQuarterNote
midi = RecordMidi()
midi.run()
sys.exit(0)
mido.set_backend('mido.backends.rtmidi')
inport = mido.open_input()
try:
with mido.open_input() as port:
print('Using {}'.format(port))
print('Waiting for messages...')
for message in port:
print ( message.type, message.time, message.note, message.velocity )
#print('Received {}'.format(message))
except KeyboardInterrupt:
pass
sys.exit(0)
threads = []
# pushMidi = PushMidiMessages(Const.flushIntervalInSec)
pushMidi = PushMidiMessages(3.0)
pushMidi.daemon = True
threads.append(pushMidi)
pushMidi.start()
while True:
try:
pushMidi.join(1)
# for t in threads:
# if t.is_alive():
# # print('joining thread: '+str(t))
# t.join(1)
except KeyboardInterrupt:
# for t in threads:
# t.kill_received = True
break
| 40.061947 | 221 | 0.520985 | 1,563 | 18,108 | 5.99936 | 0.1881 | 0.098966 | 0.091714 | 0.040205 | 0.325051 | 0.226405 | 0.198678 | 0.179268 | 0.139917 | 0.122214 | 0 | 0.013971 | 0.363596 | 18,108 | 451 | 222 | 40.150776 | 0.799722 | 0.086978 | 0 | 0.215909 | 0 | 0 | 0.142501 | 0.010789 | 0.003788 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0.007576 | 0.041667 | 0 | 0.079545 | 0.121212 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e94d154ef6661568951360ad9dd07426d0b1160 | 1,897 | py | Python | stardate/management/commands/watch_files.py | blturner/django-stardate | d3edf99aafd2233a13cd3b86bae3628803eb64ff | [
"BSD-3-Clause"
] | null | null | null | stardate/management/commands/watch_files.py | blturner/django-stardate | d3edf99aafd2233a13cd3b86bae3628803eb64ff | [
"BSD-3-Clause"
] | 9 | 2016-01-06T02:10:28.000Z | 2021-03-07T00:04:08.000Z | stardate/management/commands/watch_files.py | blturner/django-stardate | d3edf99aafd2233a13cd3b86bae3628803eb64ff | [
"BSD-3-Clause"
] | null | null | null | import logging
import os
import sys
import time
from django.core.management.base import BaseCommand
from django.utils import autoreload
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
from stardate.models import Blog
logger = logging.getLogger(__name__)
class LocalFileEventHandler(FileSystemEventHandler):
def on_modified(self, event):
# logger.info(event)
path = event.src_path
blog = Blog.objects.get(backend_file=path)
blog.backend.pull()
class Command(BaseCommand):
requires_system_checks = False
def watch(self):
event_handler = LocalFileEventHandler()
observer = Observer()
for blog in Blog.objects.filter(
backend_class='stardate.backends.local_file.LocalFileBackend'
):
path = os.path.dirname(blog.backend_file)
observer.schedule(event_handler, path, recursive=True)
observer.start()
try:
while True:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
observer.join()
def handle(self, *args, **options):
from django.conf import settings
if settings.DEBUG:
autoreload.main(self.inner_run, None, options)
else:
self.watch()
def run(self, **options):
"""
Runs the server, using the autoreloader if needed
"""
autoreload.main(self.inner_run, None, options)
def inner_run(self, *args, **options):
autoreload.raise_last_exception()
quit_command = 'CTRL-BREAK' if sys.platform == 'win32' else 'CONTROL-C'
self.stdout.write((
"Starting file watcher...\n\n"
"Quit with {}.\n"
).format(quit_command))
try:
self.watch()
except KeyboardInterrupt:
sys.exit(0)
| 24.636364 | 79 | 0.626779 | 205 | 1,897 | 5.697561 | 0.497561 | 0.025685 | 0.025685 | 0.039384 | 0.063356 | 0.063356 | 0.063356 | 0 | 0 | 0 | 0 | 0.002926 | 0.279389 | 1,897 | 76 | 80 | 24.960526 | 0.8515 | 0.036373 | 0 | 0.156863 | 0 | 0 | 0.062084 | 0.024945 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098039 | false | 0 | 0.196078 | 0 | 0.352941 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e97d98dc87920d3fb6aa18ee674444a0bcdde3c | 2,296 | py | Python | bot.py | null2264/runget | ce57f4666be59c5790aa41284c3303960f03ab45 | [
"Unlicense"
] | null | null | null | bot.py | null2264/runget | ce57f4666be59c5790aa41284c3303960f03ab45 | [
"Unlicense"
] | null | null | null | bot.py | null2264/runget | ce57f4666be59c5790aa41284c3303960f03ab45 | [
"Unlicense"
] | null | null | null | import datetime
import json
import logging
from typing import List
import aiohttp
import discord
from discord.ext import commands
extensions = [
"cogs.admin",
"cogs.exceptionhandler",
# "cogs.general",
# "cogs.utils",
# "cogs.logs",
]
def get_prefix(bot: commands.Bot, message: discord.Message) -> List[str]:
return commands.when_mentioned_or(*bot.settings.get("prefixes"))(bot, message)
class SrcBot(commands.Bot):
def __init__(self) -> None:
super().__init__(
command_prefix=get_prefix,
case_insensitive=True,
allowed_mentions=discord.AllowedMentions(
everyone=False, roles=False, users=True
),
intents=discord.Intents.all(),
)
self.logger = logging.getLogger("discord")
self.session = aiohttp.ClientSession()
with open("config.json", "r") as f:
self.config = json.load(f)
with open("settings.json", "r") as f:
self.settings = json.load(f)
async def on_ready(self) -> None:
self.uptime = datetime.datetime.utcnow()
activity = discord.Activity(
name=f"for new runs | {self.settings.get('prefixes')[0]}help",
type=discord.ActivityType.watching,
)
status = discord.Status.online
await self.change_presence(activity=activity, status=status)
for extension in extensions:
self.load_extension(extension)
self.logger.info(f"loaded extension {extension}")
self.logger.info(
f"running as {self.user} (id = {self.user.id}), "
f"on {discord.__name__} v{discord.__version__}"
)
async def on_message(self, message: discord.Message) -> None:
if message.author.bot:
return
await self.process_commands(message)
def run(self) -> None:
super().run(self.config.get("token"), reconnect=True)
async def send_pretty(self, ctx: commands.Context, content: str) -> discord.Message:
embed = discord.Embed(description=content)
embed.set_footer(
text=f"Requested by {ctx.author}", icon_url=ctx.author.avatar_url
)
embed.timestamp = ctx.message.created_at
return await ctx.send(embed=embed)
| 29.435897 | 88 | 0.620209 | 266 | 2,296 | 5.229323 | 0.424812 | 0.030194 | 0.030194 | 0.011503 | 0.064702 | 0.047448 | 0 | 0 | 0 | 0 | 0 | 0.000589 | 0.260017 | 2,296 | 77 | 89 | 29.818182 | 0.818128 | 0.018293 | 0 | 0 | 0 | 0 | 0.121333 | 0.036 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.122807 | 0.017544 | 0.245614 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e9a3566d167b351b3eaddb6f87fe12fc53c61d7 | 2,170 | py | Python | pysimplicate/_hours.py | hpharmsen/pysimplicate | 98b2ae1c6b0d4c7f155c87f62e0da68ef45e82c7 | [
"Unlicense"
] | null | null | null | pysimplicate/_hours.py | hpharmsen/pysimplicate | 98b2ae1c6b0d4c7f155c87f62e0da68ef45e82c7 | [
"Unlicense"
] | null | null | null | pysimplicate/_hours.py | hpharmsen/pysimplicate | 98b2ae1c6b0d4c7f155c87f62e0da68ef45e82c7 | [
"Unlicense"
] | null | null | null | def hourstype(self):
url = '/hours/hourstype'
result = self.call(url)
return result
def hourstype_simple(self):
result = self.hourstype()
return {d['id']: {'type': d['type'], 'tariff': d['tariff'], 'label': d['label']} for d in result}
def hours(self, filter={}):
url = '/hours/hours?sort=start_date'
fields = {
'employee_id': 'employee.id',
'employee_name': 'employee.name',
'project': 'project.project_number',
'service': 'projectservice.name',
'hourstype': 'type.label',
'start_date': 'start_date',
'end_date': 'start_date',
'revenuegroup_id': 'projectservice.revenue_group_id',
}
return self.composed_call(url, fields, filter)
def hours_simple(self, filter={}):
data = self.hours(filter)
result = [
{
'employee': d['employee']['name'],
'project_id': d['project']['id'],
'project_name': d['project']['name'],
'project_number': d['project'].get('project_number', ''),
'service': d['projectservice']['name'],
'type': d['type']['type'],
'label': d['type']['label'],
'billable': d['billable'],
'tariff': d['tariff'],
'service_tariff': d['type']['tariff'],
'hours': d['hours'],
'start_date': d['start_date'],
'status': d.get('status', ''),
'corrections': d['corrections'],
'note': d.get('note', ''),
}
for d in data
]
return result
def hours_count(self, filter={}):
hours = self.hours(filter)
return sum([d['hours'] + d['corrections']['amount'] for d in hours])
def turnover(self, filter={}):
hours = self.hours(filter)
if not hours:
# print( 'Could not calculate hours', locals())
return 0
for h in hours:
h['hours'] += h['corrections']['amount']
return sum([d['hours'] * d['tariff'] for d in hours])
def book_hours(self, fields):
url = '/hours/hours'
return self.post(url, fields)
def hours_approval(self, fields):
url = '/hours/approval'
return self.post(url, fields)
| 27.820513 | 101 | 0.547005 | 248 | 2,170 | 4.689516 | 0.21371 | 0.046432 | 0.020636 | 0.032674 | 0.142734 | 0.051591 | 0 | 0 | 0 | 0 | 0 | 0.000627 | 0.264516 | 2,170 | 77 | 102 | 28.181818 | 0.72807 | 0.020737 | 0 | 0.101695 | 0 | 0 | 0.299576 | 0.038154 | 0 | 0 | 0 | 0 | 0 | 1 | 0.135593 | false | 0 | 0 | 0 | 0.288136 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e9bfcaaf97e428ed259cea7ad0461416591f9c0 | 4,526 | py | Python | some_bandits/utilities.py | EGAlberts/some_bandits | 99ee49e74c85ede2d941932396245c441a4b7e9b | [
"MIT"
] | null | null | null | some_bandits/utilities.py | EGAlberts/some_bandits | 99ee49e74c85ede2d941932396245c441a4b7e9b | [
"MIT"
] | null | null | null | some_bandits/utilities.py | EGAlberts/some_bandits | 99ee49e74c85ede2d941932396245c441a4b7e9b | [
"MIT"
] | 1 | 2022-03-23T03:40:11.000Z | 2022-03-23T03:40:11.000Z | import pickle
from some_bandits.bandit_options import bandit_args
OPT_REVENUE = 1.5
BASIC_REVENUE = 1
PRECISION = 10**(-5)
SERVER_COST = 10
RT_THRESH = 0.75
MAX_SERVICE_RATE = 1 / 0.4452713
def assign_utilityfunc():
func = {
"SEAMS2017A": utilitySEAMS2017A,
"SEAMS2022": utilitySEAMS2022,
}.get(bandit_args["utility_function"])
return func
def say_hi(argument):
print("Hello! I'm totally with new addition bandits and " + str(argument))
return "this is the string I returned from say_hi"
def save_to_pickle(data_to_pkl, name):
"""Takes any variable and a name for the pickle file and pickles it"""
output = open(str(name) + '.pkl', 'ab+')
pickle.dump(data_to_pkl, output)
output.close()
def load_from_pickle(name):
pkl_file = open(name + ".pkl", 'rb')
data = pickle.load(pkl_file)
pkl_file.close()
return data
def pickle_test(data_to_pkl):
print("pickling..")
try:
#read in data
pkl_file = open('knowledge.pkl', 'rb')
knowledge = pickle.load(pkl_file)
print(knowledge)
pkl_file.close()
#adjust data
knowledge = [item+1 for item in knowledge]
output = open('knowledge.pkl', 'wb')
pickle.dump(knowledge, output)
output.close()
return
except IOError:
#initial time
initial_data = [0,0,0]
output = open('knowledge.pkl', 'wb')
pickle.dump(initial_data, output)
output.close()
return
def truncate(utility):
bounds = bandit_args["bounds"]
lower_bound, upper_bound = bounds
old_range = upper_bound - lower_bound
out_of_bounds = False
if(utility > upper_bound):
if(bandit_args["dynamic_bounds"]):
upper_bound = utility
out_of_bounds = True
else:
utility = upper_bound
elif(utility < lower_bound):
if(bandit_args["dynamic_bounds"]):
lower_bound = utility
out_of_bounds = True
else:
utility = lower_bound
new_range = upper_bound - lower_bound
result = float((utility - lower_bound)/new_range)
if(out_of_bounds):
bandit_args["bounds"] = (lower_bound, upper_bound)
return result, True, old_range/new_range
else: return result, False, None
def convert_conf(new_conf, current_conf):
#if current (2, 1.0) and new pair is (3,1.0) return "add_server"
if(new_conf == current_conf): return "do nothing"
server_difference = new_conf[0] - current_conf[0]
commands = ['add_server'] * int(server_difference)
if(not commands): commands = ["remove_server"] * int(abs(server_difference))
if(new_conf[1] != current_conf[1]):
commands.append("set_dimmer " + str(new_conf[1]))
return commands
def utilitySEAMS2017A(arrival_rate, dimmer, avg_response_time, max_servers, servers, truncate=True):
ur_opt = arrival_rate * OPT_REVENUE
ur = arrival_rate * ((1-dimmer) * BASIC_REVENUE + (dimmer * OPT_REVENUE))
#ur = arrival_rate * ( (1-dimmer) * BASIC_REVENUE + dimmer * OPT_REVENUE)
bounds = bandit_args["bounds"]
if((avg_response_time <= RT_THRESH) and (ur >= ur_opt - PRECISION)):
uc = SERVER_COST * (max_servers - servers)
return [truncate(ur + uc, bounds)]
else:
if(avg_response_time <= RT_THRESH):
if(truncate):return [truncate(ur, bounds)]
else: return[ur]
else:
max_throughput = max_servers * MAX_SERVICE_RATE
if(truncate): return [truncate(min(0, arrival_rate - max_throughput) * OPT_REVENUE, bounds)]
else: return [min(0, arrival_rate - max_throughput) * OPT_REVENUE]
def utilitySEAMS2022(arrival_rate, dimmer, avg_response_time, max_servers, servers, doTruncate=True):
ur = arrival_rate * ((1 - dimmer) * BASIC_REVENUE + dimmer * OPT_REVENUE)
uc = SERVER_COST * (max_servers - servers)
urt = 1 - ((avg_response_time-RT_THRESH)/RT_THRESH)
UPPER_RT_THRESHOLD = RT_THRESH * 4
delta_threshold = UPPER_RT_THRESHOLD-RT_THRESH
UrtPosFct = (delta_threshold/RT_THRESH)
urt = None
if(avg_response_time <= UPPER_RT_THRESHOLD):
urt = ((RT_THRESH - avg_response_time)/RT_THRESH)
else:
urt = ((RT_THRESH - UPPER_RT_THRESHOLD)/RT_THRESH)
urt_final = None
if(avg_response_time <= RT_THRESH):
urt_final = urt*UrtPosFct
else:
urt_final = urt
revenue_weight = 0.7
server_weight = 0.3
utility = urt_final*((revenue_weight*ur)+(server_weight*uc))
if(doTruncate):
truncated_reward, is_bound_diff, bound_delta = truncate(utility)
return [truncated_reward], is_bound_diff, bound_delta
else: return [utility], False, None
calculate_utility = assign_utilityfunc()
| 27.26506 | 102 | 0.693769 | 626 | 4,526 | 4.741214 | 0.231629 | 0.03504 | 0.040431 | 0.028639 | 0.359838 | 0.298181 | 0.233154 | 0.16442 | 0.084906 | 0.051887 | 0 | 0.019126 | 0.191339 | 4,526 | 165 | 103 | 27.430303 | 0.791803 | 0.051922 | 0 | 0.217391 | 0 | 0 | 0.068756 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.078261 | false | 0 | 0.017391 | 0 | 0.173913 | 0.026087 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7e9e3530ae1d7c5f5e18ac1462e3c87c0e0d9ae4 | 7,644 | py | Python | pong/menu.py | bill-baggins/PyPong | 92eff48853f81fc2018ccf339df7b3699a162a78 | [
"MIT"
] | null | null | null | pong/menu.py | bill-baggins/PyPong | 92eff48853f81fc2018ccf339df7b3699a162a78 | [
"MIT"
] | null | null | null | pong/menu.py | bill-baggins/PyPong | 92eff48853f81fc2018ccf339df7b3699a162a78 | [
"MIT"
] | null | null | null | from typing import Union, Callable
import pygame
from pygame.constants import *
from .common import Color
from .common import MenuState, TextBox
class Button(TextBox):
"""
This is my Button class. It is another GUI object that inherits from the
TextBox class. The button class contains all the same attributes as the
TextBox class, but with one added function: on_click. When clicked, it return
the menu_state the button was intended to change.
"""
def __init__(self,
pos: list,
size: list,
background_color: list,
action: object,
text: str = "",
text_size: int = 20,
text_pos: list = None,
text_color: tuple = Color.Black):
super().__init__(pos, size, background_color, text, text_pos, text_size, text_color)
self.action = action
# IMPORTANT: this function changes the MenuState. Gotta put this here
# so i don't confuse myself later if something breaks because of this.
def on_click(self, mouse_pos: tuple) -> Union[Callable, object]:
if (self.rect.left < mouse_pos[0] < self.rect.right and
self.rect.top < mouse_pos[1] < self.rect.bottom):
return self.action
def menu_loop(screen: pygame.Surface,
screen_width: int,
screen_height: int) -> MenuState:
# import OPTION here so it is not undefined
from .options import OPTION
# MAIN MENU BUTTONS -----------------------------------------------------------------------------------------------
title_box = TextBox([screen_width // 2 - 100, 10],
[200, 50],
OPTION["BACKGROUND_COLOR"],
text="Pong!",
text_size=50,
text_color=Color.White)
start_game = MenuState.Game
open_options_menu = MenuState.Options
quit_game = MenuState.Quit
start_button = Button([screen_width // 2 - 100, title_box.size[1] * 2 + 30],
[200, 50],
Color.White,
action=start_game,
text="Start Game",
text_size=30)
options_button = Button([screen_width // 2 - 100, title_box.size[1] * 4 + 30],
[200, 50],
Color.White,
action=open_options_menu,
text="Options",
text_size=30)
quit_button = Button([screen_width // 2 - 100, title_box.size[1] * 6 + 30],
[200, 50],
Color.White,
action=quit_game,
text="Quit",
text_size=30)
# END MAIN MENU BUTTONS -------------------------------------------------------------------------------------------
# OPTION MENU TEXTBOX(ES) -----------------------------------------------------------------------------------------
winning_score_str = f"Max Score:{OPTION['WINNING_SCORE']}"
current_winning_score_text_box = TextBox([screen_width // 2 - 100, title_box.size[1] * 4 + 30],
[200, 50],
Color.White,
text=winning_score_str,
text_size=25)
# END OPTION MENU TEXT BOXES --------------------------------------------------------------------------------------
# OPTION MENU BUTTON ACTIONS AND BUTTONS --------------------------------------------------------------------------
return_to_main_menu = MenuState.Menu
def update_winning_score_str():
new_score_str = f"Max Score:{OPTION['WINNING_SCORE']}"
current_winning_score_text_box.text = new_score_str
current_winning_score_text_box.text_render = \
current_winning_score_text_box.font.render(new_score_str, False, Color.Black)
current_winning_score_text_box.update_text()
def increase_winning_score():
if OPTION["WINNING_SCORE"] < 15:
OPTION["WINNING_SCORE"] += 1
update_winning_score_str()
def decrease_winning_score():
if OPTION["WINNING_SCORE"] > 1:
OPTION["WINNING_SCORE"] -= 1
update_winning_score_str()
increase_score_button = Button([screen_width // 2 + 120, title_box.size[1] * 4 + 30],
[40, 50],
Color.White,
action=increase_winning_score,
text=">",
text_size=80)
decrease_score_button = Button([screen_width // 2 - 160, title_box.size[1] * 4 + 30],
[40, 50],
Color.White,
action=decrease_winning_score,
text="<",
text_size=80)
back_to_main_menu_button = Button([screen_width // 2 - 100, title_box.size[1] * 6 + 30],
[200, 50],
Color.White,
action=return_to_main_menu,
text="Main Menu",
text_size=30)
# END OPTION MENU BUTTONS -----------------------------------------------------------------------------------------
main_menu_buttons = start_button, options_button, quit_button
option_menu_buttons = increase_score_button, decrease_score_button, back_to_main_menu_button,
option_menu_text_boxes = current_winning_score_text_box,
menu_state = MenuState.Menu
running = True
while running:
mouse_pos = pygame.mouse.get_pos()
for event in pygame.event.get():
if event.type == QUIT:
pygame.quit()
exit()
if event.type == MOUSEBUTTONDOWN:
# Loops through the main_menu buttons and checks to see if the
# menu state has changed.
if menu_state == MenuState.Menu:
for button in main_menu_buttons:
if button.on_click(mouse_pos) is not None:
menu_state = button.on_click(mouse_pos)
elif menu_state == MenuState.Options:
for button in option_menu_buttons:
if button.on_click(mouse_pos) is not None:
button_action = button.on_click(mouse_pos)
if button_action in MenuState.val_range:
menu_state = button_action
else:
button_action()
screen.fill(OPTION["BACKGROUND_COLOR"])
title_box.draw_to(screen)
if menu_state == MenuState.Menu:
for button in main_menu_buttons:
button.draw_to(screen)
elif menu_state == MenuState.Game:
running = False
break
elif menu_state == MenuState.Options:
for textbox in option_menu_text_boxes:
textbox.draw_to(screen)
for button in option_menu_buttons:
button.draw_to(screen)
elif menu_state == MenuState.Quit:
running = False
break
pygame.display.update()
return menu_state
| 40.659574 | 119 | 0.482208 | 762 | 7,644 | 4.580052 | 0.209974 | 0.072206 | 0.027507 | 0.026075 | 0.381089 | 0.336963 | 0.232665 | 0.232665 | 0.209742 | 0.209742 | 0 | 0.026344 | 0.379252 | 7,644 | 187 | 120 | 40.877005 | 0.709168 | 0.159733 | 0 | 0.293233 | 0 | 0 | 0.029914 | 0.00971 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045113 | false | 0 | 0.045113 | 0 | 0.112782 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ea00ed84302fa0e305ec18ac7c5e9e9c1df8929 | 1,690 | py | Python | button.py | hectorRod101/Alien_Invasion | 82dd380d6046a7240b99cbb4244c57e97b23b311 | [
"MIT"
] | null | null | null | button.py | hectorRod101/Alien_Invasion | 82dd380d6046a7240b99cbb4244c57e97b23b311 | [
"MIT"
] | null | null | null | button.py | hectorRod101/Alien_Invasion | 82dd380d6046a7240b99cbb4244c57e97b23b311 | [
"MIT"
] | null | null | null | # Alien Invasion
# Created by: Hector Rodriguez & Justin Castillo
import pygame.font
class Button():
def __init__(self, ai_settings, screen, msg):
"""Initialize button attributes."""
self.screen = screen
self.screen_rect = screen.get_rect()
# Set the dimensions and properties of the button.
self.width, self.height = 200, 50
self.width_border, self.height_border = 330, 75
self.button_color = (0, 0, 0)
self.button_border = (255, 255, 255)
self.text_color = (255, 255, 255)
self.font = pygame.font.SysFont(None, 48)
# Build the button's rect object and center it.
self.rect = pygame.Rect(0, 0, self.width, self.height)
self.rect_border = pygame.Rect(0, 0, self.width_border, self.height_border)
self.rect.center = ((ai_settings.screen_width/2) - 0, (ai_settings.screen_height/2) + 300)
self.rect_border.center = ((ai_settings.screen_width/2) - 0, (ai_settings.screen_height/2) + 300)
# The button message needs to be prepped ony once.
self.prep_msg(msg)
def prep_msg(self, msg):
"""Turn msg into a rendered image and center text on the button."""
self.msg_image = pygame.font.SysFont('monospace', 60).render(msg, True, self.text_color, self.button_color)
self.msg_image_rect = self.msg_image.get_rect()
self.msg_image_rect.center = self.rect.center
def draw_button(self):
# Draw blank button and then draw message.
self.screen.fill(self.button_border, self.rect_border)
self.screen.fill(self.button_color, self.rect)
self.screen.blit(self.msg_image, self.msg_image_rect) | 44.473684 | 115 | 0.664497 | 244 | 1,690 | 4.430328 | 0.303279 | 0.051804 | 0.066605 | 0.044403 | 0.234043 | 0.189639 | 0.101758 | 0.101758 | 0.101758 | 0.101758 | 0 | 0.038842 | 0.223077 | 1,690 | 38 | 116 | 44.473684 | 0.784463 | 0.200592 | 0 | 0 | 0 | 0 | 0.006737 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.041667 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ea25d2c8244a558dc07a034132cb1ac7f314893 | 286 | py | Python | Lesson 1/03_conditions.py | hamburgcodingschool/L2C-Python-1804 | 3420691aaee81fb88b2e8e21763014bc91d2ed96 | [
"MIT"
] | null | null | null | Lesson 1/03_conditions.py | hamburgcodingschool/L2C-Python-1804 | 3420691aaee81fb88b2e8e21763014bc91d2ed96 | [
"MIT"
] | null | null | null | Lesson 1/03_conditions.py | hamburgcodingschool/L2C-Python-1804 | 3420691aaee81fb88b2e8e21763014bc91d2ed96 | [
"MIT"
] | null | null | null | # Boolean values are always either True of False
booleanValue = True
if booleanValue:
print("I WILL ONLY SHOW IF THE VALUE IS TRUE!")
# CONDITIONAL OPERATORS
# == != > >= < <=
age = 13
if age >= 18:
print("Have a Beer!")
else:
print("Have a Fritz!") | 19.066667 | 51 | 0.587413 | 38 | 286 | 4.421053 | 0.736842 | 0.107143 | 0.119048 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019704 | 0.29021 | 286 | 15 | 52 | 19.066667 | 0.807882 | 0.367133 | 0 | 0 | 0 | 0 | 0.353933 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.375 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7ea2f27c569cfa41172a486dc63c326821b3d45c | 4,467 | py | Python | scenic/projects/baselines/configs/imagenet/imagenet_vit_config.py | techthiyanes/scenic | 05585b1189364e29d82413b9d4a50ffa8c246f0c | [
"Apache-2.0"
] | 688 | 2021-07-26T21:45:18.000Z | 2022-03-31T11:53:34.000Z | scenic/projects/baselines/configs/imagenet/imagenet_vit_config.py | techthiyanes/scenic | 05585b1189364e29d82413b9d4a50ffa8c246f0c | [
"Apache-2.0"
] | 35 | 2021-08-03T11:31:10.000Z | 2022-03-31T21:58:58.000Z | scenic/projects/baselines/configs/imagenet/imagenet_vit_config.py | techthiyanes/scenic | 05585b1189364e29d82413b9d4a50ffa8c246f0c | [
"Apache-2.0"
] | 88 | 2021-08-03T13:19:50.000Z | 2022-03-31T08:35:22.000Z | # pylint: disable=line-too-long
r"""Default configs for ViT on ImageNet2012.
Note: you can also use ImageNet input pipeline from big transfer pipeline:
```
config.dataset_name = 'bit'
config.data_dtype_str = 'float32'
config.dataset_configs = ml_collections.ConfigDict()
config.dataset_configs.dataset = 'imagenet2012'
# aka tiny_test/test[:5%] in task_adapt
config.dataset_configs.val_split = 'validation'
config.dataset_configs.train_split = 'train'
config.dataset_configs.num_classes = 1000
INPUT_RES = 224 # pylint: disable=invalid-name
RESIZE_RES = int(INPUT_RES * (256 / 224)) # pylint: disable=invalid-name
LS = 1e-4 # pylint: disable=invalid-name
config.dataset_configs.pp_train = (
f'decode_jpeg_and_inception_crop({INPUT_RES})|flip_lr|value_range(-1, '
f'1)|onehot({config.dataset_configs.num_classes},'
f' key="label", key_result="labels", '
f'on={1.0-LS}, off={LS})|keep("image", '
f'"labels")') # pylint: disable=line-too-long
config.dataset_configs.pp_eval = (
f'decode|resize_small({RESIZE_RES})|'
f'central_crop({INPUT_RES})|value_range(-1, '
f'1)|onehot({config.dataset_configs.num_classes},'
f' key="label", '
f'key_result="labels")|keep("image", '
f'"labels")') # pylint: disable=line-too-long
config.dataset_configs.prefetch_to_device = 2
# shuffle_buffer_size is per host, so small-ish is ok.
config.dataset_configs.shuffle_buffer_size = 250_000
```
"""
# pylint: disable=line-too-long
import ml_collections
_IMAGENET_TRAIN_SIZE = 1281167
VARIANT = 'B/16'
def get_config(runlocal=''):
"""Returns the ViT experiment configuration for ImageNet."""
runlocal = bool(runlocal)
config = ml_collections.ConfigDict()
config.experiment_name = 'imagenet-vit'
# Dataset.
config.dataset_name = 'imagenet'
config.data_dtype_str = 'float32'
config.dataset_configs = ml_collections.ConfigDict()
# Model.
version, patch = VARIANT.split('/')
config.model_name = 'vit_multilabel_classification'
config.model = ml_collections.ConfigDict()
config.model.hidden_size = {'Ti': 192,
'S': 384,
'B': 768,
'L': 1024,
'H': 1280}[version]
config.model.patches = ml_collections.ConfigDict()
config.model.patches.size = [int(patch), int(patch)]
config.model.num_heads = {'Ti': 3, 'S': 6, 'B': 12, 'L': 16, 'H': 16}[version]
config.model.mlp_dim = {'Ti': 768,
'S': 1536,
'B': 3072,
'L': 4096,
'H': 5120}[version]
config.model.num_layers = {'Ti': 12,
'S': 12,
'B': 12,
'L': 24,
'H': 32}[version]
config.model.representation_size = None
config.model.classifier = 'token'
config.model.attention_dropout_rate = 0.
config.model.dropout_rate = 0.1
config.model_dtype_str = 'float32'
# Training.
config.trainer_name = 'classification_trainer'
config.optimizer = 'adam'
config.optimizer_configs = ml_collections.ConfigDict()
config.optimizer_configs.beta1 = 0.9
config.optimizer_configs.beta2 = 0.999
config.optimizer_configs.weight_decay = 0.3
config.explicit_weight_decay = None # No explicit weight decay
config.l2_decay_factor = None
config.max_grad_norm = 1.0
config.label_smoothing = None
config.num_training_epochs = 90
config.log_eval_steps = 1000
config.batch_size = 8 if runlocal else 4096
config.rng_seed = 42
config.init_head_bias = -10.0
# Learning rate.
steps_per_epoch = _IMAGENET_TRAIN_SIZE // config.batch_size
total_steps = config.num_training_epochs * steps_per_epoch
base_lr = 3e-3
config.lr_configs = ml_collections.ConfigDict()
config.lr_configs.learning_rate_schedule = 'compound'
config.lr_configs.factors = 'constant*linear_warmup*linear_decay'
config.lr_configs.total_steps = total_steps
config.lr_configs.end_learning_rate = 1e-5
config.lr_configs.warmup_steps = 10_000
config.lr_configs.base_learning_rate = base_lr
# Logging.
config.write_summary = True
config.xprof = True # Profile using xprof.
config.checkpoint = True # Do checkpointing.
config.checkpoint_steps = 5000
config.debug_train = False # Debug mode during training.
config.debug_eval = False # Debug mode during eval.
return config
| 34.898438 | 80 | 0.671144 | 582 | 4,467 | 4.919244 | 0.353952 | 0.06357 | 0.083828 | 0.060775 | 0.227035 | 0.129934 | 0.129934 | 0.129934 | 0.129934 | 0.129934 | 0 | 0.042752 | 0.209313 | 4,467 | 127 | 81 | 35.173228 | 0.767837 | 0.380792 | 0 | 0 | 0 | 0 | 0.060584 | 0.031387 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014493 | false | 0 | 0.014493 | 0 | 0.043478 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |