hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a7ea14ccf7f41c0614b8f95c605b3bd30018a21b | 2,643 | py | Python | example_project/blog/migrations/0001_initial.py | allran/djangorestframework-appapi | 5e843b70910ccd55d787096ee08eb85315c80000 | [
"BSD-2-Clause"
] | 4 | 2019-10-15T06:47:29.000Z | 2019-11-11T13:16:15.000Z | example_project/blog/migrations/0001_initial.py | allran/djangorestframework-appapi | 5e843b70910ccd55d787096ee08eb85315c80000 | [
"BSD-2-Clause"
] | null | null | null | example_project/blog/migrations/0001_initial.py | allran/djangorestframework-appapi | 5e843b70910ccd55d787096ee08eb85315c80000 | [
"BSD-2-Clause"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-16 02:53
from django.conf import settings
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='Author',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('name', models.CharField(max_length=50)),
('email', models.EmailField(max_length=254)),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='Blog',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('title', models.CharField(blank=True, max_length=255, null=True, verbose_name='title')),
('content', models.TextField(blank=True, null=True)),
('author', models.ForeignKey(blank=True, help_text='作者id', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='author', to='blog.Author', verbose_name='作者')),
],
options={
'ordering': ['id'],
},
),
migrations.CreateModel(
name='UserFavorite',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('created_at', models.DateTimeField(auto_now_add=True)),
('modified_at', models.DateTimeField(auto_now=True)),
('blog', models.ForeignKey(blank=True, help_text='博客id', null=True, on_delete=django.db.models.deletion.SET_NULL, related_name='blog', to='blog.Blog', verbose_name='博客')),
('user', models.ForeignKey(help_text='收藏人id', null=True, on_delete=django.db.models.deletion.SET_NULL, to=settings.AUTH_USER_MODEL, verbose_name='用户')),
],
options={
'verbose_name': '用户收藏',
'verbose_name_plural': '用户收藏',
'ordering': ['id'],
'unique_together': {('user', 'blog')},
},
),
]
| 43.327869 | 193 | 0.573212 | 275 | 2,643 | 5.323636 | 0.298182 | 0.067623 | 0.086066 | 0.102459 | 0.539617 | 0.539617 | 0.437158 | 0.437158 | 0.437158 | 0.437158 | 0 | 0.012099 | 0.280742 | 2,643 | 60 | 194 | 44.05 | 0.758022 | 0.017026 | 0 | 0.509434 | 1 | 0 | 0.107088 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.056604 | 0 | 0.132075 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a7f5cbeb6c6ac6730e6541d991681e7c83554dd8 | 523 | py | Python | fun.py | Krishna-Aaseri/Python_Logical_Questions | c0f025a56dbbf85426142adb423b25fa7b034adb | [
"MIT"
] | null | null | null | fun.py | Krishna-Aaseri/Python_Logical_Questions | c0f025a56dbbf85426142adb423b25fa7b034adb | [
"MIT"
] | null | null | null | fun.py | Krishna-Aaseri/Python_Logical_Questions | c0f025a56dbbf85426142adb423b25fa7b034adb | [
"MIT"
] | null | null | null | #def add(num,num1):
# add1=num+num1
# print add1
#add(6,7)
#def welcome():
# print "python kaisa lagta h aapko"
# print "but please reply na kare aap"
#welcome()
user = int(raw_input("enter a number"))
i = 0
new = []
while i < (user):
user1 = int(raw_input("enter a number"))
new.append(user1)
i = i + 1
print new
print "**********************************************"
i = 0
new_list = []
while i < len(new):
if new[i]%2 == 0:
new_list.append(new)
else:
new_list.append(new)
i = i + 1
print new_list
| 13.763158 | 54 | 0.565966 | 85 | 523 | 3.411765 | 0.458824 | 0.096552 | 0.075862 | 0.110345 | 0.234483 | 0.158621 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 0.196941 | 523 | 37 | 55 | 14.135135 | 0.657143 | 0.279159 | 0 | 0.333333 | 0 | 0 | 0.20274 | 0.126027 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
a7faceab673a31a756534245b8aaabc503d661d6 | 1,217 | py | Python | docs/demos/theme_explorer/list_group.py | sthagen/facultyai-dash-bootstrap-components | 2dd5eaf1c1494b2077bcee82eb7968ec2e23af46 | [
"Apache-2.0"
] | 50 | 2018-09-23T08:57:28.000Z | 2019-02-02T19:59:35.000Z | docs/demos/theme_explorer/list_group.py | sthagen/dash-bootstrap-components | d79ad7f8fdf4c26165038e6989e24f2ac17663b1 | [
"Apache-2.0"
] | 99 | 2018-09-21T11:06:29.000Z | 2019-02-04T09:04:07.000Z | docs/demos/theme_explorer/list_group.py | sthagen/dash-bootstrap-components | d79ad7f8fdf4c26165038e6989e24f2ac17663b1 | [
"Apache-2.0"
] | 3 | 2018-09-25T02:16:24.000Z | 2018-12-22T20:56:31.000Z | import dash_bootstrap_components as dbc
from dash import html
from .util import make_subheading
list_group = html.Div(
[
make_subheading("ListGroup", "list_group"),
dbc.ListGroup(
[
dbc.ListGroupItem("No color applied"),
dbc.ListGroupItem("The primary item", color="primary"),
dbc.ListGroupItem("A secondary item", color="secondary"),
dbc.ListGroupItem("A successful item", color="success"),
dbc.ListGroupItem("A warning item", color="warning"),
dbc.ListGroupItem("A dangerous item", color="danger"),
dbc.ListGroupItem("An informative item", color="info"),
dbc.ListGroupItem("A light item", color="light"),
dbc.ListGroupItem("A dark item", color="dark"),
dbc.ListGroupItem("An action item", action=True),
dbc.ListGroupItem("An active item", active=True),
dbc.ListGroupItem(
[
html.H5("Item 4 heading"),
html.P("Item 4 text"),
]
),
]
),
],
className="mb-4",
)
| 36.878788 | 73 | 0.520953 | 116 | 1,217 | 5.413793 | 0.37931 | 0.305732 | 0.16242 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005122 | 0.358258 | 1,217 | 32 | 74 | 38.03125 | 0.798976 | 0 | 0 | 0.066667 | 0 | 0 | 0.215283 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.1 | 0 | 0.1 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5041849eb6e20166cf188e490e80a877301469d | 2,951 | py | Python | download-from-web/govori.py | miroslavradojevic/python-snippets | 753e1c15dc077d3bcf5de4fd5d3a675daf0da27c | [
"MIT"
] | null | null | null | download-from-web/govori.py | miroslavradojevic/python-snippets | 753e1c15dc077d3bcf5de4fd5d3a675daf0da27c | [
"MIT"
] | null | null | null | download-from-web/govori.py | miroslavradojevic/python-snippets | 753e1c15dc077d3bcf5de4fd5d3a675daf0da27c | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# Download .mp3 podcast files of Radio Belgrade show Govori da bih te video (Speak so that I can see you)
# grab all mp3s and save them with parsed name and date to the output folder
import requests
import os
import time
import xml.dom.minidom
from urllib.parse import urlparse
url = "https://www.rts.rs/page/radio/sr/podcast/5433/govori-da-bih-te-video/audio.html"
# url results with xml that is further parsed
timestamp = time.strftime("%Y%m%d-%H%M%S")
out_dir = os.path.join("govori_" + timestamp)
doc_path = "govori_" + timestamp + ".xml"
if not os.path.exists(out_dir):
os.makedirs(out_dir)
try:
req = requests.get(url)
req.raise_for_status()
doc = xml.dom.minidom.parseString(req.text) # TODO check if it is valid XML
items = doc.getElementsByTagName("item")
print("found ", len(items), " items")
for item in items:
# titles = item.getElementsByTagName("title")
# if len(titles) > 0:
# print(titles[0].firstChild.data)
links = item.getElementsByTagName("link")
if len(links) > 0:
print(links[0].firstChild.data) # read element data value
# get only filename of the .html https://bit.ly/2ZnqwK7
a = urlparse(links[0].firstChild.data)
out_fname_pname = os.path.basename(a.path).replace('.html', '')
else:
out_fname_pname = "NA"
enclosures = item.getElementsByTagName("enclosure")
if len(enclosures) > 0:
url_value = enclosures[0].attributes["url"].value # read attribute value
print(url_value)
if url_value.endswith('.mp3'):
url_elements = urlparse(url_value).path.split('/')
if len(url_elements) >= 5:
out_fname_date = ''.join(url_elements[-5:-2]) # https://bit.ly/3e6mXMk
else:
out_fname_date = "NA"
out_file = out_fname_date + "_" + out_fname_pname + ".mp3"
print("saved to " + os.path.join(out_dir, out_file))
# save mp3 file from url_value to out_file
# https://dzone.com/articles/simple-examples-of-downloading-files-using-python
print("saving... ", end='')
try:
req = requests.get(url_value)
req.raise_for_status()
open(os.path.join(out_dir, out_file), 'wb').write(req.content)
print("saved to " + os.path.join(out_dir, out_file))
except requests.exceptions.HTTPError as err:
print(err)
# raise SystemExit(err)
print("")
# save rss xml
with open(os.path.join(out_dir, doc_path), "w", encoding="utf-8") as f:
f.write(doc.toprettyxml())
print(os.path.join(out_dir, doc_path))
except requests.exceptions.HTTPError as err:
print(err)
# raise SystemExit(err)
| 36.432099 | 105 | 0.597763 | 390 | 2,951 | 4.410256 | 0.402564 | 0.027907 | 0.034884 | 0.037791 | 0.20407 | 0.159884 | 0.155233 | 0.115116 | 0.115116 | 0.115116 | 0 | 0.011278 | 0.278889 | 2,951 | 80 | 106 | 36.8875 | 0.796992 | 0.227381 | 0 | 0.235294 | 0 | 0.019608 | 0.087053 | 0 | 0 | 0 | 0 | 0.0125 | 0 | 1 | 0 | false | 0 | 0.098039 | 0 | 0.098039 | 0.196078 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c506aceeb7ea06c9672cd06b35d80f96cd51d00c | 830 | py | Python | setup.py | uhlerlab/conditional_independence | aa4b5117b6f24bf39433d427d490312864e9bd69 | [
"BSD-3-Clause"
] | 4 | 2021-01-29T20:27:31.000Z | 2022-02-01T11:55:33.000Z | setup.py | uhlerlab/conditional_independence | aa4b5117b6f24bf39433d427d490312864e9bd69 | [
"BSD-3-Clause"
] | null | null | null | setup.py | uhlerlab/conditional_independence | aa4b5117b6f24bf39433d427d490312864e9bd69 | [
"BSD-3-Clause"
] | 1 | 2021-09-12T13:41:21.000Z | 2021-09-12T13:41:21.000Z | import setuptools
setuptools.setup(
name='conditional_independence',
version='0.1a.4',
description='Parametric and non-parametric conditional independence tests.',
long_description='',
author='Chandler Squires',
author_email='chandlersquires18@gmail.com',
packages=setuptools.find_packages(exclude=['tests']),
python_requires='>3.5.0',
zip_safe=False,
classifiers=[
'Topic :: Scientific/Engineering',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Scientific/Engineering :: Artificial Intelligence',
],
install_requires=[
'scipy',
'dataclasses',
'numpy',
# 'scikit_sparse',
'numexpr',
'scikit_learn',
'typing',
'pygam',
'tqdm',
# 'numba',
'ipdb',
]
)
| 25.151515 | 80 | 0.6 | 71 | 830 | 6.887324 | 0.746479 | 0.092025 | 0.159509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013029 | 0.260241 | 830 | 32 | 81 | 25.9375 | 0.783388 | 0.03012 | 0 | 0 | 0 | 0 | 0.423221 | 0.146067 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.035714 | 0 | 0.035714 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c509a2151c61ed3015af0423248b9cd0ce672927 | 1,975 | py | Python | examples/ecs/server_interface.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 43 | 2018-12-19T08:39:15.000Z | 2021-07-21T02:45:43.000Z | examples/ecs/server_interface.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 11 | 2019-03-17T13:28:56.000Z | 2020-09-23T23:57:50.000Z | examples/ecs/server_interface.py | wangrui1121/huaweicloud-sdk-python | 240abe00288760115d1791012d4e3c4592d77ad1 | [
"Apache-2.0"
] | 47 | 2018-12-19T05:14:25.000Z | 2022-03-19T15:28:30.000Z | # -*-coding:utf-8 -*-
from openstack import connection
# create connection
username = "xxxxxx"
password = "xxxxxx"
projectId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # tenant ID
userDomainId = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # user account ID
auth_url = "xxxxxxxxxxxxxxxxxxxxxxxxxxxx" # endpoint url
conn = connection.Connection(auth_url=auth_url,
user_domain_id=userDomainId,
project_id=projectId,
username=username,
password=password)
# create server interface
def create_server_interface(server_id, net_id=None, port_id=None,
fixed_ip=None):
attrs = {"net_id": net_id, "port_id": port_id, "fixed_ip": fixed_ip}
kwargs = {}
for key in attrs:
if attrs[key]:
kwargs[key] = attrs[key]
print(kwargs)
if kwargs == {}:
message = "Parameter error"
raise exceptions.SDKException(message)
server = conn.compute.create_server_interface(server_id, **kwargs)
print(server)
return server
# delete interface
def delete_server_interface(server_interface, servr_id):
conn.compute.delete_server_interface(server_interface, server=servr_id)
# show interface detail
def get_server_interface(server_interface, servr_id):
server_ifa = conn.compute.get_server_interface(server_interface,
server=servr_id)
print(server_ifa)
# get list of interface
def server_interfaces(server_id):
server_ifas = conn.compute.server_interfaces(server_id)
for ifa in server_ifas:
print(ifa)
if __name__ == "__main__":
server_id = "8700184b-79ff-414b-ab8e-11ed01bd3d3d"
net_id = "e2103034-dcf3-4ac3-b551-6d5dd8fadb6e"
server = create_server_interface(server_id, net_id)
get_server_interface(server.id, server_id)
server_interfaces(server_id)
delete_server_interface(server.id, server_id)
| 32.377049 | 75 | 0.671392 | 225 | 1,975 | 5.595556 | 0.306667 | 0.166799 | 0.183479 | 0.091342 | 0.267673 | 0.230342 | 0.122319 | 0 | 0 | 0 | 0 | 0.024733 | 0.242532 | 1,975 | 60 | 76 | 32.916667 | 0.816845 | 0.081519 | 0 | 0 | 0 | 0 | 0.117517 | 0.086475 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0.047619 | 0.02381 | 0 | 0.142857 | 0.095238 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c50a6cdccc88ffe721b0e07a35e407563cda966e | 9,060 | py | Python | sdk/python/pulumi_google_native/dlp/v2/stored_info_type.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 44 | 2021-04-18T23:00:48.000Z | 2022-02-14T17:43:15.000Z | sdk/python/pulumi_google_native/dlp/v2/stored_info_type.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 354 | 2021-04-16T16:48:39.000Z | 2022-03-31T17:16:39.000Z | sdk/python/pulumi_google_native/dlp/v2/stored_info_type.py | AaronFriel/pulumi-google-native | 75d1cda425e33d4610348972cd70bddf35f1770d | [
"Apache-2.0"
] | 8 | 2021-04-24T17:46:51.000Z | 2022-01-05T10:40:21.000Z | # coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
from ._inputs import *
__all__ = ['StoredInfoTypeArgs', 'StoredInfoType']
@pulumi.input_type
class StoredInfoTypeArgs:
def __init__(__self__, *,
config: pulumi.Input['GooglePrivacyDlpV2StoredInfoTypeConfigArgs'],
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
stored_info_type_id: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a StoredInfoType resource.
:param pulumi.Input['GooglePrivacyDlpV2StoredInfoTypeConfigArgs'] config: Configuration of the storedInfoType to create.
:param pulumi.Input[str] stored_info_type_id: The storedInfoType ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one.
"""
pulumi.set(__self__, "config", config)
if location is not None:
pulumi.set(__self__, "location", location)
if project is not None:
pulumi.set(__self__, "project", project)
if stored_info_type_id is not None:
pulumi.set(__self__, "stored_info_type_id", stored_info_type_id)
@property
@pulumi.getter
def config(self) -> pulumi.Input['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']:
"""
Configuration of the storedInfoType to create.
"""
return pulumi.get(self, "config")
@config.setter
def config(self, value: pulumi.Input['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']):
pulumi.set(self, "config", value)
@property
@pulumi.getter
def location(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "location")
@location.setter
def location(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "location", value)
@property
@pulumi.getter
def project(self) -> Optional[pulumi.Input[str]]:
return pulumi.get(self, "project")
@project.setter
def project(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "project", value)
@property
@pulumi.getter(name="storedInfoTypeId")
def stored_info_type_id(self) -> Optional[pulumi.Input[str]]:
"""
The storedInfoType ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one.
"""
return pulumi.get(self, "stored_info_type_id")
@stored_info_type_id.setter
def stored_info_type_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "stored_info_type_id", value)
class StoredInfoType(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[pulumi.Input[pulumi.InputType['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
stored_info_type_id: Optional[pulumi.Input[str]] = None,
__props__=None):
"""
Creates a pre-built stored infoType to be used for inspection. See https://cloud.google.com/dlp/docs/creating-stored-infotypes to learn more.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[pulumi.InputType['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']] config: Configuration of the storedInfoType to create.
:param pulumi.Input[str] stored_info_type_id: The storedInfoType ID can contain uppercase and lowercase letters, numbers, and hyphens; that is, it must match the regular expression: `[a-zA-Z\d-_]+`. The maximum length is 100 characters. Can be empty to allow the system to generate one.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: StoredInfoTypeArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Creates a pre-built stored infoType to be used for inspection. See https://cloud.google.com/dlp/docs/creating-stored-infotypes to learn more.
Auto-naming is currently not supported for this resource.
:param str resource_name: The name of the resource.
:param StoredInfoTypeArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(StoredInfoTypeArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
config: Optional[pulumi.Input[pulumi.InputType['GooglePrivacyDlpV2StoredInfoTypeConfigArgs']]] = None,
location: Optional[pulumi.Input[str]] = None,
project: Optional[pulumi.Input[str]] = None,
stored_info_type_id: Optional[pulumi.Input[str]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = StoredInfoTypeArgs.__new__(StoredInfoTypeArgs)
if config is None and not opts.urn:
raise TypeError("Missing required property 'config'")
__props__.__dict__["config"] = config
__props__.__dict__["location"] = location
__props__.__dict__["project"] = project
__props__.__dict__["stored_info_type_id"] = stored_info_type_id
__props__.__dict__["current_version"] = None
__props__.__dict__["name"] = None
__props__.__dict__["pending_versions"] = None
super(StoredInfoType, __self__).__init__(
'google-native:dlp/v2:StoredInfoType',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None) -> 'StoredInfoType':
"""
Get an existing StoredInfoType resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = StoredInfoTypeArgs.__new__(StoredInfoTypeArgs)
__props__.__dict__["current_version"] = None
__props__.__dict__["name"] = None
__props__.__dict__["pending_versions"] = None
return StoredInfoType(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="currentVersion")
def current_version(self) -> pulumi.Output['outputs.GooglePrivacyDlpV2StoredInfoTypeVersionResponse']:
"""
Current version of the stored info type.
"""
return pulumi.get(self, "current_version")
@property
@pulumi.getter
def name(self) -> pulumi.Output[str]:
"""
Resource name.
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="pendingVersions")
def pending_versions(self) -> pulumi.Output[Sequence['outputs.GooglePrivacyDlpV2StoredInfoTypeVersionResponse']]:
"""
Pending versions of the stored info type. Empty if no versions are pending.
"""
return pulumi.get(self, "pending_versions")
| 45.757576 | 294 | 0.663135 | 1,014 | 9,060 | 5.642998 | 0.178501 | 0.051905 | 0.046487 | 0.057672 | 0.52604 | 0.48165 | 0.451066 | 0.432891 | 0.401433 | 0.350052 | 0 | 0.002911 | 0.241611 | 9,060 | 197 | 295 | 45.989848 | 0.829865 | 0.281015 | 0 | 0.330709 | 1 | 0 | 0.145884 | 0.057415 | 0 | 0 | 0 | 0 | 0 | 1 | 0.133858 | false | 0.007874 | 0.055118 | 0.015748 | 0.267717 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c50d8c67882d7ef410bf79b36de881a95ed1d06e | 631 | py | Python | python/cw/letterfreq2.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | 7 | 2016-01-03T19:42:07.000Z | 2018-10-23T14:03:12.000Z | python/cw/letterfreq2.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | null | null | null | python/cw/letterfreq2.py | vesche/snippets | 7a9d598df99c26c4e0c63669f9f95a94eeed0d08 | [
"Unlicense"
] | 1 | 2018-03-09T08:52:01.000Z | 2018-03-09T08:52:01.000Z | #!/usr/bin/env python
from __future__ import division
import sys
from string import ascii_lowercase
with open(sys.argv[1]) as f:
data = f.read().splitlines()
d = {}
for line in data:
for letter in line:
letter = letter.lower()
if letter not in ascii_lowercase+' ':
continue
if letter not in d:
d[letter] = 1
else:
d[letter] += 1
total = 0
for k,v in d.iteritems():
total += v
for k,v in d.iteritems():
d[k] = float('{:.2%}'.format(v/total)[:-1])
for k,v in sorted(d.items(), key=lambda(k,v): (-v, k)):
print "'{}' {}%".format(k, str(v))
| 21.033333 | 55 | 0.557845 | 99 | 631 | 3.494949 | 0.454545 | 0.023121 | 0.043353 | 0.060694 | 0.098266 | 0.098266 | 0 | 0 | 0 | 0 | 0 | 0.013129 | 0.275753 | 631 | 29 | 56 | 21.758621 | 0.743982 | 0.031696 | 0 | 0.090909 | 0 | 0 | 0.02459 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.136364 | null | null | 0.045455 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5118009a2cf132e4b87f2f696c2abdd36248815 | 5,479 | py | Python | Coursework_02/Q3/airport/scenarios.py | eBe02/COMP0037-21_22 | c0872548ff4b653e3f786734666838813db2149a | [
"Apache-2.0"
] | null | null | null | Coursework_02/Q3/airport/scenarios.py | eBe02/COMP0037-21_22 | c0872548ff4b653e3f786734666838813db2149a | [
"Apache-2.0"
] | null | null | null | Coursework_02/Q3/airport/scenarios.py | eBe02/COMP0037-21_22 | c0872548ff4b653e3f786734666838813db2149a | [
"Apache-2.0"
] | null | null | null | '''
Created on 25 Jan 2022
@author: ucacsjj
'''
from .airport_map import MapCellType
from .airport_map import AirportMap
# This file contains a set of functions which build different maps. Only
# two of these are needed for the coursework. Others are ones which were
# used for developing and testing the algorithms and might be of use.
# Helper function which fills sets the type of all cells in a rectangular
# region to have the same type.
def _set_block_to_single_type(airport_map, cell_type, start_coords, end_coords):
for x in range(start_coords[0], end_coords[0] + 1):
for y in range(start_coords[1], end_coords[1] + 1):
airport_map.set_cell_type(x, y, cell_type)
# This scenario can be used to test the different traversability costs
def test_traversability_costs_scenario():
airport_map = AirportMap("Test Traversabilty Map", 15, 15)
for x in range(0, 14):
airport_map.set_wall(x, 7)
airport_map.add_secret_door(7, 7)
return airport_map, 200
def one_row_scenario():
airport_map = AirportMap("One Row Scenario", 15, 1)
airport_map.add_robot_end_station(14, 0, 100)
return airport_map, 200
def two_row_scenario():
airport_map = AirportMap("Two Row Scenario", 15, 2)
airport_map.add_robot_end_station(14, 0, 0)
return airport_map, 200
def two_2x2_scenario():
airport_map = AirportMap("2x2 Scenario", 2, 2)
airport_map.add_robot_end_station(0, 1, 100)
return airport_map, 800
def test_3x3_scenario():
airport_map = AirportMap("3x3 Scenario", 3, 3)
airport_map.add_robot_end_station(0, 2, 100)
return airport_map, 800
def three_row_scenario():
airport_map = AirportMap("Three Row Scenario", 15, 3)
airport_map.set_cell_type(2, 1, MapCellType.WALL)
airport_map.add_robot_end_station(14, 0, 0)
return airport_map, 200
def corridor_scenario():
airport_map = AirportMap("Three Row Scenario", 20, 7)
_set_block_to_single_type(airport_map, MapCellType.WALL, (0, 0), (19, 0))
_set_block_to_single_type(airport_map, MapCellType.WALL, (0, 6), (19, 6))
_set_block_to_single_type(airport_map, MapCellType.CHAIR, (2, 1), (5, 1))
for y in range(3,7):
airport_map.add_robot_end_station(19, y, 100)
#_set_block_to_single_type(airport_map, MapCellType.ROBOT_END_STATION, (19, 0), (19, 6))
return airport_map, 450
def mini_scenario():
# Create the map
airport_map = AirportMap("Mini Scenario", 15, 15)
# Create the wall on either side and the customs area
for x in range(0, 15):
airport_map.set_wall(x, 7)
for x in range(5, 7):
airport_map.set_customs_area(x, 7)
airport_map.add_charging_station(4, 4, 1, 1)
airport_map.add_secret_door(14, 7)
airport_map.add_toilet(4, 1)
airport_map.add_robot_end_station(0, 14, 100)
return airport_map, 800
def full_scenario():
airport_map = AirportMap("Full Scenario", 60, 40)
# The wall separating the two areas, including the customs area
# and the secret door
_set_block_to_single_type(airport_map, MapCellType.WALL, (0, 18), (59, 20))
_set_block_to_single_type(airport_map, MapCellType.CUSTOMS_AREA, (25, 18), (35, 20))
_set_block_to_single_type(airport_map, MapCellType.SECRET_DOOR, (59, 18), (59, 20))
# The reclaim areas
airport_map.add_rubbish_bin(2, 33)
_set_block_to_single_type(airport_map, MapCellType.BAGGAGE_CLAIM, (5, 30), (8, 36))
airport_map.add_rubbish_bin(11, 33)
_set_block_to_single_type(airport_map, MapCellType.BAGGAGE_CLAIM, (15, 28), (18, 39))
airport_map.add_rubbish_bin(22, 38)
_set_block_to_single_type(airport_map, MapCellType.BAGGAGE_CLAIM, (25, 28), (28, 39))
airport_map.add_rubbish_bin(31, 38)
_set_block_to_single_type(airport_map, MapCellType.BAGGAGE_CLAIM, (35, 28), (38, 39))
airport_map.add_rubbish_bin(41, 38)
_set_block_to_single_type(airport_map, MapCellType.BAGGAGE_CLAIM, (45, 28), (48, 39))
airport_map.add_rubbish_bin(51, 33)
_set_block_to_single_type(airport_map, MapCellType.BAGGAGE_CLAIM, (55, 30), (58, 36))
# The bins in the reclaim areas
# Add the horizontal chairs with bins at either end
for i in range(5):
y_coord = 2 + i * 3
_set_block_to_single_type(airport_map, MapCellType.CHAIR, (5, y_coord), (18, y_coord))
airport_map.add_rubbish_bin(4, y_coord)
airport_map.add_rubbish_bin(19, y_coord)
# Add the vertical chairs with bins at either end
for i in range(5):
x_coord = 42 + i * 3
_set_block_to_single_type(airport_map, MapCellType.CHAIR, (x_coord, 2), (x_coord, 14))
airport_map.add_rubbish_bin(x_coord, 1)
airport_map.add_rubbish_bin(x_coord, 15)
# The toilets. These generate rubbish to be collected
airport_map.add_toilet(0, 21)
airport_map.add_toilet(0, 17)
airport_map.add_toilet(38, 0)
airport_map.add_toilet(58, 21)
# These charge the robot back up again
airport_map.add_charging_station(1, 38, 15, 1)
airport_map.add_charging_station(58, 38, 15, 1)
airport_map.add_charging_station(36, 0, 30, 1)
airport_map.add_charging_station(59, 0, 40, 1)
airport_map.add_robot_end_station(1, 21, 50)
return airport_map, 800
| 32.613095 | 94 | 0.686804 | 852 | 5,479 | 4.10446 | 0.18662 | 0.203031 | 0.111524 | 0.073206 | 0.582499 | 0.47698 | 0.391764 | 0.290535 | 0.251072 | 0.213326 | 0 | 0.072295 | 0.217375 | 5,479 | 167 | 95 | 32.808383 | 0.743237 | 0.162986 | 0 | 0.157303 | 0 | 0 | 0.030695 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.11236 | false | 0 | 0.022472 | 0 | 0.235955 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c511d2974df6ea839e2f08eec91ae6a38dd211bf | 332 | py | Python | setup.py | abkfenris/adm_locations | 266915ab7e7559bd4c66d4090bcd69a2a93ab563 | [
"MIT"
] | null | null | null | setup.py | abkfenris/adm_locations | 266915ab7e7559bd4c66d4090bcd69a2a93ab563 | [
"MIT"
] | null | null | null | setup.py | abkfenris/adm_locations | 266915ab7e7559bd4c66d4090bcd69a2a93ab563 | [
"MIT"
] | null | null | null | from setuptools import setup
setup(
name='csv_locate',
version='0.1',
py_modules=['csv_to_json'],
install_requires=[
'click',
'colorama',
'geocoder',
'geojson',
'jinja2',
],
entry_points='''
[console_scripts]
csv_locate=csv_to_json:convert
''',
)
| 17.473684 | 38 | 0.539157 | 33 | 332 | 5.121212 | 0.787879 | 0.106509 | 0.106509 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013216 | 0.316265 | 332 | 18 | 39 | 18.444444 | 0.731278 | 0 | 0 | 0 | 0 | 0 | 0.385542 | 0.090361 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.058824 | 0 | 0.058824 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5198d8481c8a0970f981fde506e8ae0b90aab1f | 1,763 | py | Python | bin/wls_users.py | rstyczynski/wls-tools | 292a39a3f7af7b9d7d4c4849618d6789daae9b58 | [
"Apache-2.0"
] | null | null | null | bin/wls_users.py | rstyczynski/wls-tools | 292a39a3f7af7b9d7d4c4849618d6789daae9b58 | [
"Apache-2.0"
] | null | null | null | bin/wls_users.py | rstyczynski/wls-tools | 292a39a3f7af7b9d7d4c4849618d6789daae9b58 | [
"Apache-2.0"
] | null | null | null | #!$BEA_HOME/oracle_common/common/bin/wlst.sh
# default values
admin_name = 'AdminServer'
admin_address = 'localhost'
admin_port = 7001
admin_protocol = 't3'
admin_url = admin_protocol + "://" + admin_address + ":" + str(admin_port)
def usage():
print "dump_users [-s|--server -p|--port] [-u|--url] [-d|--delimiter]"
try:
opts, args = getopt.getopt( sys.argv[1:], 's:p:u::d:h', ['server=','port=','url=','delimiter='] )
except getopt.GetoptError, err:
print str(err)
usage()
sys.exit(2)
for opt, arg in opts:
if opt in ('--help'):
usage()
sys.exit(2)
elif opt in ('-s', '--server'):
admin_name = arg
elif opt in ('-p', '--port'):
admin_port = arg
admin_url = admin_protocol + "://" + admin_address + ":" + str(admin_port)
elif opt in ('-u', '--url'):
admin_url = arg
elif opt in ('-d', '--delimiter'):
delimiter = arg
else:
usage()
sys.exit(2)
connect(url=admin_url, adminServerName=admin_name)
# do work
from weblogic.management.security.authentication import UserReaderMBean
from weblogic.management.security.authentication import GroupReaderMBean
realmName=cmo.getSecurityConfiguration().getDefaultRealm()
authProvider = realmName.getAuthenticationProviders()
print 'admin_url,group,user'
for i in authProvider:
if isinstance(i,GroupReaderMBean):
groupReader = i
cursor = i.listGroups("*",0)
while groupReader.haveCurrent(cursor):
group = groupReader.getCurrentName(cursor)
usergroup = i.listAllUsersInGroup(group,"*",0)
for user in usergroup:
print '%s,%s,%s' % (admin_url,group,user)
groupReader.advance(cursor)
groupReader.close(cursor)
#
disconnect()
exit() | 27.546875 | 101 | 0.642087 | 212 | 1,763 | 5.235849 | 0.40566 | 0.043243 | 0.032432 | 0.035135 | 0.171171 | 0.171171 | 0.081081 | 0.081081 | 0.081081 | 0 | 0 | 0.007835 | 0.20363 | 1,763 | 64 | 102 | 27.546875 | 0.782764 | 0.038003 | 0 | 0.166667 | 0 | 0 | 0.119315 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.041667 | null | null | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c5206e72ad25192f5a2ed7316aa7ced0c3105161 | 436 | py | Python | tests/test_calculate_branch.py | ivergara/python-abc | b5bb87b80315f8e5ecd2d6f35b7208f0a7df9c3a | [
"Unlicense"
] | 2 | 2021-07-25T20:12:21.000Z | 2021-07-25T21:19:23.000Z | tests/test_calculate_branch.py | ivergara/python-abc | b5bb87b80315f8e5ecd2d6f35b7208f0a7df9c3a | [
"Unlicense"
] | 1 | 2021-12-28T22:07:05.000Z | 2021-12-28T22:07:05.000Z | tests/test_calculate_branch.py | ivergara/python-abc | b5bb87b80315f8e5ecd2d6f35b7208f0a7df9c3a | [
"Unlicense"
] | 1 | 2021-12-07T19:53:45.000Z | 2021-12-07T19:53:45.000Z | import pytest
from tests import assert_source_returns_expected
BRANCH_CASES = [
# Call
('print("hello world")', 'b | print("hello world")'),
# Await
("await noop()", "b | await noop()"),
# Class instantiation
("Noop()", "b | Noop()"),
]
@pytest.mark.parametrize("source,expected", BRANCH_CASES)
def test_branch(capsys, source, expected):
assert_source_returns_expected(capsys, source, expected) is True
| 22.947368 | 68 | 0.669725 | 52 | 436 | 5.442308 | 0.5 | 0.14841 | 0.134276 | 0.190813 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.176606 | 436 | 18 | 69 | 24.222222 | 0.788301 | 0.068807 | 0 | 0 | 0 | 0 | 0.256219 | 0 | 0 | 0 | 0 | 0 | 0.2 | 1 | 0.1 | false | 0 | 0.2 | 0 | 0.3 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c53b92a47fb947f6f8b829b01647aa8c055f8973 | 644 | py | Python | character/migrations/0004_alter_character_alignment.py | scottBowles/dnd | a1ef333f1a865d51b5426dc4b3493e8437584565 | [
"MIT"
] | null | null | null | character/migrations/0004_alter_character_alignment.py | scottBowles/dnd | a1ef333f1a865d51b5426dc4b3493e8437584565 | [
"MIT"
] | null | null | null | character/migrations/0004_alter_character_alignment.py | scottBowles/dnd | a1ef333f1a865d51b5426dc4b3493e8437584565 | [
"MIT"
] | null | null | null | # Generated by Django 3.2.5 on 2021-08-12 02:00
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('character', '0003_alter_character_id'),
]
operations = [
migrations.AlterField(
model_name='character',
name='alignment',
field=models.CharField(blank=True, choices=[('LG', 'Lawful Good'), ('NG', 'Neutral Good'), ('CG', 'Chaotic Good'), ('LN', 'Lawful Neutral'), ('N', 'True Neutral'), ('CN', 'Chaotic Neutral'), ('LE', 'Lawful Evil'), ('NE', 'Neutral Evil'), ('CE', 'Chaotic Evil')], max_length=2, null=True),
),
]
| 33.894737 | 300 | 0.591615 | 74 | 644 | 5.081081 | 0.689189 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.039683 | 0.217391 | 644 | 18 | 301 | 35.777778 | 0.706349 | 0.069876 | 0 | 0 | 1 | 0 | 0.298157 | 0.038526 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c53bcf309d42be5b0611b4932b04593b5fb3c79b | 818 | py | Python | graphs_trees/check_balance/test_check_balance.py | filippovitale/interactive-coding-challenges | 8380a7aa98618c3cc9c0271c30bd320937d431ad | [
"Apache-2.0"
] | null | null | null | graphs_trees/check_balance/test_check_balance.py | filippovitale/interactive-coding-challenges | 8380a7aa98618c3cc9c0271c30bd320937d431ad | [
"Apache-2.0"
] | null | null | null | graphs_trees/check_balance/test_check_balance.py | filippovitale/interactive-coding-challenges | 8380a7aa98618c3cc9c0271c30bd320937d431ad | [
"Apache-2.0"
] | 1 | 2019-12-13T12:57:44.000Z | 2019-12-13T12:57:44.000Z | from nose.tools import assert_equal
class TestCheckBalance(object):
def test_check_balance(self):
node = Node(5)
insert(node, 3)
insert(node, 8)
insert(node, 1)
insert(node, 4)
assert_equal(check_balance(node), True)
node = Node(5)
insert(node, 3)
insert(node, 8)
insert(node, 9)
insert(node, 10)
assert_equal(check_balance(node), False)
node = Node(3)
insert(node, 2)
insert(node, 1)
insert(node, 5)
insert(node, 4)
insert(node, 6)
insert(node, 7)
assert_equal(check_balance(node), False)
print('Success: test_check_balance')
def main():
test = TestCheckBalance()
test.test_check_balance()
if __name__ == '__main__':
main() | 20.974359 | 48 | 0.570905 | 100 | 818 | 4.46 | 0.33 | 0.313901 | 0.107623 | 0.100897 | 0.459641 | 0.327354 | 0.183857 | 0.183857 | 0.183857 | 0.183857 | 0 | 0.032086 | 0.314181 | 818 | 39 | 49 | 20.974359 | 0.762923 | 0 | 0 | 0.413793 | 0 | 0 | 0.042735 | 0 | 0 | 0 | 0 | 0 | 0.137931 | 1 | 0.068966 | false | 0 | 0.034483 | 0 | 0.137931 | 0.034483 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c53d9c366f6302c3f4189f86bcaf5a05f084763e | 19,136 | py | Python | src_RealData/Nets/ObjectOriented.py | XYZsake/DRFNS | 73fc5683db5e9f860846e22c8c0daf73b7103082 | [
"MIT"
] | 42 | 2018-10-07T08:19:01.000Z | 2022-02-08T17:41:24.000Z | src_RealData/Nets/ObjectOriented.py | XYZsake/DRFNS | 73fc5683db5e9f860846e22c8c0daf73b7103082 | [
"MIT"
] | 11 | 2018-12-22T00:15:46.000Z | 2021-12-03T10:29:32.000Z | src_RealData/Nets/ObjectOriented.py | XYZsake/DRFNS | 73fc5683db5e9f860846e22c8c0daf73b7103082 | [
"MIT"
] | 14 | 2018-08-26T06:47:06.000Z | 2021-07-24T11:52:58.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import tensorflow as tf
import numpy as np
import os
from sklearn.metrics import confusion_matrix
from datetime import datetime
class ConvolutionalNeuralNetwork:
"""
Generic object for create DNN models.
This class instinciates all functions
needed for DNN operations.
"""
def __init__(
self,
LEARNING_RATE=0.01,
K=0.96,
BATCH_SIZE=1,
IMAGE_SIZE=28,
NUM_LABELS=10,
NUM_CHANNELS=1,
NUM_TEST=10000,
STEPS=2000,
LRSTEP=200,
DECAY_EMA=0.9999,
N_PRINT = 100,
LOG="/tmp/net",
SEED=42,
DEBUG=True,
WEIGHT_DECAY=0.00005,
LOSS_FUNC=tf.nn.l2_loss,
N_FEATURES=16):
self.LEARNING_RATE = LEARNING_RATE
self.K = K
self.BATCH_SIZE = BATCH_SIZE
self.IMAGE_SIZE = IMAGE_SIZE
self.NUM_LABELS = NUM_LABELS
self.NUM_CHANNELS = NUM_CHANNELS
self.N_FEATURES = N_FEATURES
# self.NUM_TEST = NUM_TEST
self.STEPS = STEPS
self.N_PRINT = N_PRINT
self.LRSTEP = LRSTEP
self.DECAY_EMA = DECAY_EMA
self.LOG = LOG
self.SEED = SEED
self.sess = tf.InteractiveSession()
self.sess.as_default()
self.var_to_reg = []
self.var_to_sum = []
self.init_vars()
self.init_model_architecture()
self.init_training_graph()
self.Saver()
self.DEBUG = DEBUG
self.loss_func = LOSS_FUNC
self.weight_decay = WEIGHT_DECAY
def regularize_model(self):
"""
Adds regularization to parameters of the model given LOSS_FUNC
"""
if self.DEBUG:
for var in self.var_to_sum + self.var_to_reg:
self.add_to_summary(var)
self.WritteSummaryImages()
for var in self.var_to_reg:
self.add_to_regularization(var)
def add_to_summary(self, var):
"""
Adds histogram for each parameter in var
"""
if var is not None:
tf.summary.histogram(var.op.name, var)
def add_to_regularization(self, var):
"""
Combines loss with regularization loss
"""
if var is not None:
self.loss = self.loss + self.weight_decay * self.loss_func(var)
def add_activation_summary(self, var):
"""
Add activation summary with information about sparsity
"""
if var is not None:
tf.summary.histogram(var.op.name + "/activation", var)
tf.summary.scalar(var.op.name + "/sparsity", tf.nn.zero_fraction(var))
def add_gradient_summary(self, grad, var):
"""
Add gradiant summary to summary
"""
if grad is not None:
tf.summary.histogram(var.op.name + "/gradient", grad)
def input_node_f(self):
"""
Input node, called when initialising the network
"""
return tf.placeholder(
tf.float32,
shape=(self.BATCH_SIZE, self.IMAGE_SIZE, self.IMAGE_SIZE, self.NUM_CHANNELS))
def label_node_f(self):
"""
Label node, called when initialising the network
"""
return tf.placeholder(
tf.float32,
shape=(self.BATCH_SIZE, self.IMAGE_SIZE, self.IMAGE_SIZE, 1))
def conv_layer_f(self, i_layer, w_var, strides, scope_name, padding="SAME"):
"""
Defining convolution layer
"""
with tf.name_scope(scope_name):
return tf.nn.conv2d(i_layer, w_var, strides=strides, padding=padding)
def relu_layer_f(self, i_layer, biases, scope_name):
"""
Defining relu layer
"""
with tf.name_scope(scope_name):
act = tf.nn.relu(tf.nn.bias_add(i_layer, biases))
self.var_to_sum.append(act)
return act
def weight_const_f(self, ks, inchannels, outchannels, stddev, scope_name, name="W", reg="True"):
"""
Defining parameter to give to a convolution layer
"""
with tf.name_scope(scope_name):
K = tf.Variable(tf.truncated_normal([ks, ks, inchannels, outchannels], # 5x5 filter, depth 32.
stddev=stddev,
seed=self.SEED))
self.var_to_reg.append(K)
self.var_to_sum.append(K)
return K
def weight_xavier(self, ks, inchannels, outchannels, scope_name, name="W"):
"""
Initialises a convolution kernel for a convolution layer with Xavier initialising
"""
xavier_std = np.sqrt( 1. / float(ks * ks * inchannels) )
return self.weight_const_f(ks, inchannels, outchannels, xavier_std, scope_name, name=name)
def biases_const_f(self, const, shape, scope_name, name="B"):
"""
Initialises biais
"""
with tf.name_scope(scope_name):
b = tf.Variable(tf.constant(const, shape=[shape]), name=name)
self.var_to_sum.append(b)
return b
def max_pool(self, i_layer, ksize=[1,2,2,1], strides=[1,2,2,1],
padding="SAME", name="MaxPool"):
"""
Performs max pool operation
"""
return tf.nn.max_pool(i_layer, ksize=ksize, strides=strides,
padding=padding, name=name)
def BatchNorm(self, Input, n_out, phase_train, scope='bn', decay=0.9, eps=1e-5):
"""
Performs batch normalisation.
Code taken from http://stackoverflow.com/a/34634291/2267819
"""
with tf.name_scope(scope):
init_beta = tf.constant(0.0, shape=[n_out])
beta = tf.Variable(init_beta, name="beta")
init_gamma = tf.random_normal([n_out], 1.0, 0.02)
gamma = tf.Variable(init_gamma)
batch_mean, batch_var = tf.nn.moments(Input, [0, 1, 2], name='moments')
ema = tf.train.ExponentialMovingAverage(decay=decay)
def mean_var_with_update():
ema_apply_op = ema.apply([batch_mean, batch_var])
with tf.control_dependencies([ema_apply_op]):
return tf.identity(batch_mean), tf.identity(batch_var)
mean, var = tf.cond(phase_train,
mean_var_with_update,
lambda: (ema.average(batch_mean), ema.average(batch_var)))
normed = tf.nn.batch_normalization(Input, mean, var, beta, gamma, eps)
return normed
def DropOutLayer(self, Input, scope="DropOut"):
"""
Performs drop out on the input layer
"""
with tf.name_scope(scope):
return tf.nn.dropout(Input, self.keep_prob) ##keep prob has to be defined in init_var
def init_vars(self):
"""
Initialises variables for the graph
"""
self.input_node = self.input_node_f()
self.train_labels_node = self.label_node_f()
self.conv1_weights = self.weight_xavier(5, self.NUM_CHANNELS, 8, "conv1/")
self.conv1_biases = self.biases_const_f(0.1, 8, "conv1/")
self.conv2_weights = self.weight_xavier(5, 8, 8, "conv2/")
self.conv2_biases = self.biases_const_f(0.1, 8, "conv2/")
self.conv3_weights = self.weight_xavier(5, 8, 8, "conv3/")
self.conv3_biases = self.biases_const_f(0.1, 8, "conv3/")
self.logits_weight = self.weight_xavier(1, 8, self.NUM_LABELS, "logits/")
self.logits_biases = self.biases_const_f(0.1, self.NUM_LABELS, "logits/")
self.keep_prob = tf.Variable(0.5, name="dropout_prob")
print('Model variables initialised')
def WritteSummaryImages(self):
"""
Image summary to add to the summary
"""
tf.summary.image("Input", self.input_node, max_outputs=4)
tf.summary.image("Label", self.train_labels_node, max_outputs=4)
tf.summary.image("Pred", tf.expand_dims(tf.cast(self.predictions, tf.float32), dim=3), max_outputs=4)
def init_model_architecture(self):
"""
Graph structure for the model
"""
self.conv1 = self.conv_layer_f(self.input_node, self.conv1_weights,
[1,1,1,1], "conv1/")
self.relu1 = self.relu_layer_f(self.conv1, self.conv1_biases, "conv1/")
self.conv2 = self.conv_layer_f(self.relu1, self.conv2_weights,
[1,1,1,1], "conv2/")
self.relu2 = self.relu_layer_f(self.conv2, self.conv2_biases, "conv2/")
self.conv3 = self.conv_layer_f(self.relu2, self.conv3_weights,
[1,1,1,1], "conv3/")
self.relu3 = self.relu_layer_f(self.conv3, self.conv3_biases, "conv3/")
self.last = self.relu3
print('Model architecture initialised')
def init_training_graph(self):
"""
Graph optimization part, here we define the loss and how the model is evaluated
"""
with tf.name_scope('Evaluation'):
self.logits = self.conv_layer_f(self.last, self.logits_weight, strides=[1,1,1,1], scope_name="logits/")
self.predictions = tf.argmax(self.logits, axis=3)
with tf.name_scope('Loss'):
self.loss = tf.reduce_mean((tf.nn.sparse_softmax_cross_entropy_with_logits(logits=self.logits,
labels=tf.squeeze(tf.cast(self.train_labels_node, tf.int32), squeeze_dims=[3]),
name="entropy")))
tf.summary.scalar("entropy", self.loss)
with tf.name_scope('Accuracy'):
LabelInt = tf.squeeze(tf.cast(self.train_labels_node, tf.int64), squeeze_dims=[3])
CorrectPrediction = tf.equal(self.predictions, LabelInt)
self.accuracy = tf.reduce_mean(tf.cast(CorrectPrediction, tf.float32))
tf.summary.scalar("accuracy", self.accuracy)
with tf.name_scope('Prediction'):
self.TP = tf.count_nonzero(self.predictions * LabelInt)
self.TN = tf.count_nonzero((self.predictions - 1) * (LabelInt - 1))
self.FP = tf.count_nonzero(self.predictions * (LabelInt - 1))
self.FN = tf.count_nonzero((self.predictions - 1) * LabelInt)
with tf.name_scope('Precision'):
self.precision = tf.divide(self.TP, tf.add(self.TP, self.FP))
tf.summary.scalar('Precision', self.precision)
with tf.name_scope('Recall'):
self.recall = tf.divide(self.TP, tf.add(self.TP, self.FN))
tf.summary.scalar('Recall', self.recall)
with tf.name_scope('F1'):
num = tf.multiply(self.precision, self.recall)
dem = tf.add(self.precision, self.recall)
self.F1 = tf.scalar_mul(2, tf.divide(num, dem))
tf.summary.scalar('F1', self.F1)
with tf.name_scope('MeanAccuracy'):
Nprecision = tf.divide(self.TN, tf.add(self.TN, self.FN))
self.MeanAcc = tf.divide(tf.add(self.precision, Nprecision) ,2)
tf.summary.scalar('Performance', self.MeanAcc)
#self.batch = tf.Variable(0, name = "batch_iterator")
self.train_prediction = tf.nn.softmax(self.logits)
self.test_prediction = tf.nn.softmax(self.logits)
tf.global_variables_initializer().run()
print('Computational graph initialised')
def error_rate(self, predictions, labels, iter):
"""
Operations to perform on the training prediction every N_PRINT iterations.
These values are printed to screen.
"""
predictions = np.argmax(predictions, 3)
labels = labels[:,:,:,0]
cm = confusion_matrix(labels.flatten(), predictions.flatten(), labels=[0, 1]).astype(np.float)
b, x, y = predictions.shape
total = b * x * y
TP = cm[1, 1]
TN = cm[0, 0]
FN = cm[0, 1]
FP = cm[1, 0]
acc = (TP + TN) / (TP + TN + FN + FP) * 100
precision = TP / (TP + FP)
acc1 = np.mean([precision, TN / (TN + FN)]) * 100
recall = TP / (TP + FN)
F1 = 2 * precision * recall / (recall + precision)
error = 100 - acc
return error, acc, acc1, recall * 100, precision * 100, F1 * 100
def optimization(self, var_list):
"""
Defining the optimization method to solve the task
"""
with tf.name_scope('optimizer'):
optimizer = tf.train.AdamOptimizer(self.learning_rate)
grads = optimizer.compute_gradients(self.loss, var_list=var_list)
if self.DEBUG:
for grad, var in grads:
self.add_gradient_summary(grad, var)
self.optimizer = optimizer.apply_gradients(grads, global_step=self.global_step)
def LearningRateSchedule(self, lr, k, epoch):
"""
Defines the learning rate
"""
with tf.name_scope('LearningRateSchedule'):
self.global_step = tf.Variable(0., trainable=False)
tf.add_to_collection('global_step', self.global_step)
if self.LRSTEP == "epoch/2":
decay_step = float(epoch) / (2 * self.BATCH_SIZE)
elif "epoch" in self.LRSTEP:
num = int(self.LRSTEP[:-5])
decay_step = float(num) * float(epoch) / self.BATCH_SIZE
else:
decay_step = float(self.LRSTEP)
self.learning_rate = tf.train.exponential_decay(
lr,
self.global_step,
decay_step,
k,
staircase=True)
tf.summary.scalar("learning_rate", self.learning_rate)
def Validation(self, DG_TEST, step):
"""
How the models validates on the test set.
"""
n_test = DG_TEST.length
n_batch = int(np.ceil(float(n_test) / self.BATCH_SIZE))
l, acc, F1, recall, precision, meanacc = 0., 0., 0., 0., 0., 0.
for i in range(n_batch):
Xval, Yval = DG_TEST.Batch(0, self.BATCH_SIZE)
feed_dict = {self.input_node: Xval,
self.train_labels_node: Yval}
l_tmp, acc_tmp, F1_tmp, recall_tmp, precision_tmp, meanacc_tmp, pred = self.sess.run([self.loss, self.accuracy, self.F1, self.recall, self.precision, self.MeanAcc, self.predictions], feed_dict=feed_dict)
l += l_tmp
acc += acc_tmp
F1 += F1_tmp
recall += recall_tmp
precision += precision_tmp
meanacc += meanacc_tmp
l, acc, F1, recall, precision, meanacc = np.array([l, acc, F1, recall, precision, meanacc]) / n_batch
summary = tf.Summary()
summary.value.add(tag="Test/Accuracy", simple_value=acc)
summary.value.add(tag="Test/Loss", simple_value=l)
summary.value.add(tag="Test/F1", simple_value=F1)
summary.value.add(tag="Test/Recall", simple_value=recall)
summary.value.add(tag="Test/Precision", simple_value=precision)
summary.value.add(tag="Test/Performance", simple_value=meanacc)
self.summary_test_writer.add_summary(summary, step)
print(' Validation loss: %.1f' % l)
print(' Accuracy: %1.f%% \n acc1: %.1f%% \n recall: %1.f%% \n prec: %1.f%% \n f1 : %1.f%% \n' % (acc * 100, meanacc * 100, recall * 100, precision * 100, F1 * 100))
self.saver.save(self.sess, self.LOG + '/' + "model.ckpt", global_step=self.global_step)
def Saver(self):
"""
Defining the saver, it will load if possible.
"""
print("Setting up Saver...")
self.saver = tf.train.Saver()
ckpt = tf.train.get_checkpoint_state(self.LOG)
if ckpt and ckpt.model_checkpoint_path:
self.saver.restore(self.sess, ckpt.model_checkpoint_path)
print("Model restored...")
def ExponentialMovingAverage(self, var_list, decay=0.9999):
"""
Adding exponential moving average to increase performance.
This aggregates parameters from different steps in order to have
a more robust classifier.
"""
with tf.name_scope('ExponentialMovingAverage'):
ema = tf.train.ExponentialMovingAverage(decay=decay)
maintain_averages_op = ema.apply(var_list)
# Create an op that will update the moving averages after each training
# step. This is what we will use in place of the usual training op.
with tf.control_dependencies([self.optimizer]):
self.training_op = tf.group(maintain_averages_op)
def train(self, DGTrain, DGTest, saver=True):
"""
How the model should train.
"""
epoch = DGTrain.length
self.LearningRateSchedule(self.LEARNING_RATE, self.K, epoch)
trainable_var = tf.trainable_variables()
self.regularize_model()
self.optimization(trainable_var)
self.ExponentialMovingAverage(trainable_var, self.DECAY_EMA)
tf.global_variables_initializer().run()
tf.local_variables_initializer().run()
self.summary_test_writer = tf.summary.FileWriter(self.LOG + '/test',
graph=self.sess.graph)
self.summary_writer = tf.summary.FileWriter(self.LOG + '/train', graph=self.sess.graph)
merged_summary = tf.summary.merge_all()
steps = self.STEPS
for step in range(steps):
batch_data, batch_labels = DGTrain.Batch(0, self.BATCH_SIZE)
feed_dict = {self.input_node: batch_data,
self.train_labels_node: batch_labels}
# self.optimizer is replaced by self.training_op for the exponential moving decay
_, l, lr, predictions, s = self.sess.run(
[self.training_op, self.loss, self.learning_rate,
self.train_prediction, merged_summary],
feed_dict=feed_dict)
if step % self.N_PRINT == 0:
i = datetime.now()
print i.strftime('%Y/%m/%d %H:%M:%S: \n ')
self.summary_writer.add_summary(s, step)
error, acc, acc1, recall, prec, f1 = self.error_rate(predictions, batch_labels, step)
print(' Step %d of %d' % (step, steps))
print(' Learning rate: %.5f \n') % lr
print(' Mini-batch loss: %.5f \n Accuracy: %.1f%% \n acc1: %.1f%% \n recall: %1.f%% \n prec: %1.f%% \n f1 : %1.f%% \n' %
(l, acc, acc1, recall, prec, f1))
self.Validation(DGTest, step) | 38.272 | 215 | 0.570861 | 2,351 | 19,136 | 4.482773 | 0.171416 | 0.010817 | 0.016131 | 0.024196 | 0.217668 | 0.155233 | 0.098491 | 0.071828 | 0.057121 | 0.04137 | 0 | 0.022297 | 0.315635 | 19,136 | 500 | 216 | 38.272 | 0.782453 | 0.021112 | 0 | 0.061889 | 0 | 0.006515 | 0.057297 | 0.001441 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.016287 | null | null | 0.035831 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c53f7e729f7148ea37a06ebe087c005b16755a1d | 25,133 | py | Python | maintest.py | thorsilver/ABM-for-social-care | 3a47868d2881799980a3f9f24b78c66a31eda194 | [
"MIT"
] | null | null | null | maintest.py | thorsilver/ABM-for-social-care | 3a47868d2881799980a3f9f24b78c66a31eda194 | [
"MIT"
] | null | null | null | maintest.py | thorsilver/ABM-for-social-care | 3a47868d2881799980a3f9f24b78c66a31eda194 | [
"MIT"
] | 1 | 2018-01-05T15:42:40.000Z | 2018-01-05T15:42:40.000Z |
from sim import Sim
import os
import cProfile
import pylab
import math
import matplotlib.pyplot as plt
import argparse
import json
import decimal
import numpy as np
def init_params():
"""Set up the simulation parameters."""
p = {}
## The basics: starting population and year, etc.
p['initialPop'] = 750
p['startYear'] = 1860
p['endYear'] = 2050
p['thePresent'] = 2012
p['statsCollectFrom'] = 1960
p['minStartAge'] = 20
p['maxStartAge'] = 40
p['verboseDebugging'] = False
p['singleRunGraphs'] = True
p['favouriteSeed'] = None
p['numRepeats'] = 1
p['loadFromFile'] = False
## Mortality statistics
p['baseDieProb'] = 0.0001
p['babyDieProb'] = 0.005
p['maleAgeScaling'] = 14.0
p['maleAgeDieProb'] = 0.00021
p['femaleAgeScaling'] = 15.5
p['femaleAgeDieProb'] = 0.00019
p['num5YearAgeClasses'] = 28
## Transitions to care statistics
p['baseCareProb'] = 0.0002
p['personCareProb'] = 0.0008
##p['maleAgeCareProb'] = 0.0008
p['maleAgeCareScaling'] = 18.0
##p['femaleAgeCareProb'] = 0.0008
p['femaleAgeCareScaling'] = 19.0
p['numCareLevels'] = 5
p['cdfCareTransition'] = [ 0.7, 0.9, 0.95, 1.0 ]
p['careLevelNames'] = ['none','low','moderate','substantial','critical']
p['careDemandInHours'] = [ 0.0, 8.0, 16.0, 30.0, 80.0 ]
## Availability of care statistics
p['childHours'] = 5.0
p['homeAdultHours'] = 30.0
p['workingAdultHours'] = 25.0
p['retiredHours'] = 60.0
p['lowCareHandicap'] = 0.5
p['hourlyCostOfCare'] = 20.0
## Fertility statistics
p['growingPopBirthProb'] = 0.215
p['steadyPopBirthProb'] = 0.13
p['transitionYear'] = 1965
p['minPregnancyAge'] = 17
p['maxPregnancyAge'] = 42
## Class and employment statistics
p['numOccupationClasses'] = 3
p['occupationClasses'] = ['lower','intermediate','higher']
p['cdfOccupationClasses'] = [ 0.6, 0.9, 1.0 ]
## Age transition statistics
p['ageOfAdulthood'] = 17
p['ageOfRetirement'] = 65
## Marriage and divorce statistics (partnerships really)
p['basicFemaleMarriageProb'] = 0.25
p['femaleMarriageModifierByDecade'] = [ 0.0, 0.5, 1.0, 1.0, 1.0, 0.6, 0.5, 0.4, 0.1, 0.01, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0 ]
p['basicMaleMarriageProb'] = 0.3
p['maleMarriageModifierByDecade'] = [ 0.0, 0.16, 0.5, 1.0, 0.8, 0.7, 0.66, 0.5, 0.4, 0.2, 0.1, 0.05, 0.01, 0.0, 0.0, 0.0 ]
p['basicDivorceRate'] = 0.06
p['variableDivorce'] = 0.06
p['divorceModifierByDecade'] = [ 0.0, 1.0, 0.9, 0.5, 0.4, 0.2, 0.1, 0.03, 0.01, 0.001, 0.001, 0.001, 0.0, 0.0, 0.0, 0.0 ]
## Leaving home and moving around statistics
p['probApartWillMoveTogether'] = 0.3
p['coupleMovesToExistingHousehold'] = 0.3
p['basicProbAdultMoveOut'] = 0.22
p['probAdultMoveOutModifierByDecade'] = [ 0.0, 0.2, 1.0, 0.6, 0.3, 0.15, 0.03, 0.03, 0.01, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
p['basicProbSingleMove'] = 0.05
p['probSingleMoveModifierByDecade'] = [ 0.0, 1.0, 1.0, 0.8, 0.4, 0.06, 0.04, 0.02, 0.02, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ]
p['basicProbFamilyMove'] = 0.03
p['probFamilyMoveModifierByDecade'] = [ 0.0, 0.5, 0.8, 0.5, 0.2, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1, 0.1 ]
p['agingParentsMoveInWithKids'] = 0.1
p['variableMoveBack'] = 0.1
## Description of the map, towns, and houses
p['mapGridXDimension'] = 8
p['mapGridYDimension'] = 12
p['townGridDimension'] = 40
p['numHouseClasses'] = 3
p['houseClasses'] = ['small','medium','large']
p['cdfHouseClasses'] = [ 0.6, 0.9, 5.0 ]
p['ukMap'] = [ [ 0.0, 0.1, 0.2, 0.1, 0.0, 0.0, 0.0, 0.0 ],
[ 0.1, 0.1, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0 ],
[ 0.0, 0.2, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.2, 1.0, 0.5, 0.0, 0.0, 0.0, 0.0 ],
[ 0.4, 0.0, 0.2, 0.2, 0.4, 0.0, 0.0, 0.0 ],
[ 0.6, 0.0, 0.0, 0.3, 0.8, 0.2, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, 0.6, 0.8, 0.4, 0.0, 0.0 ],
[ 0.0, 0.0, 0.2, 1.0, 0.8, 0.6, 0.1, 0.0 ],
[ 0.0, 0.0, 0.1, 0.2, 1.0, 0.6, 0.3, 0.4 ],
[ 0.0, 0.0, 0.5, 0.7, 0.5, 1.0, 1.0, 0.0 ],
[ 0.0, 0.0, 0.2, 0.4, 0.6, 1.0, 1.0, 0.0 ],
[ 0.0, 0.2, 0.3, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]
p['mapDensityModifier'] = 0.6
p['ukClassBias'] = [
[ 0.0, -0.05, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],
[ -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, -0.05, -0.05, 0.05, 0.0, 0.0, 0.0, 0.0 ],
[ -0.05, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],
[ -0.05, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, -0.05, -0.05, -0.05, 0.0, 0.0 ],
[ 0.0, 0.0, -0.05, -0.05, 0.0, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, -0.05, 0.0, -0.05, 0.0, 0.0, 0.0 ],
[ 0.0, 0.0, 0.0, -0.05, 0.0, 0.2, 0.15, 0.0 ],
[ 0.0, 0.0, 0.0, 0.0, 0.1, 0.2, 0.15, 0.0 ],
[ 0.0, 0.0, 0.1, 0.0, 0.0, 0.0, 0.0, 0.0 ] ]
## Graphical interface details
p['interactiveGraphics'] = True
p['delayTime'] = 0.0
p['screenWidth'] = 1300
p['screenHeight'] = 700
p['bgColour'] = 'black'
p['mainFont'] = 'Helvetica 18'
p['fontColour'] = 'white'
p['dateX'] = 70
p['dateY'] = 20
p['popX'] = 70
p['popY'] = 50
p['pixelsInPopPyramid'] = 2000
p['careLevelColour'] = ['blue','green','yellow','orange','red']
p['houseSizeColour'] = ['brown','purple','yellow']
p['pixelsPerTown'] = 56
p['maxTextUpdateList'] = 22
return p
p = init_params()
#######################################################
## A basic single run
def basicRun(p):
s = Sim(p)
tax = s.run()
#######################################################
## Batch run (no graphics)
def batchRun(num):
p['interactiveGraphics'] = False
dataFile = open('batchRunData.txt','w')
for i in range ( 0, num ):
print "Doing batch run: ", i
taxList = []
s = Sim(p)
tax = s.run()
taxList.append(tax)
print "Social care cost per taxpayer: ", tax
dataFile.write(str(i) + "\t" + str(tax) + "\n")
dataFile.close()
#######################################################
## Retirement age run (no graphics)
def retireRun(reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
dataFile = open('retirementAgeData2.txt','w')
#p['ageingParentList'] = [50, 55, 65, 70, 75, 80]
for variableCare in p['ageingParentList']:
p['ageOfRetirement'] = variableCare
print "Trying retirement age: ", variableCare
taxList = []
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax = s.run()
taxList.append(tax)
print tax
dataFile.write(str(variableCare) + "\t" + str(i) + "\t" + str(tax) + "\n")
taxMeans.append(pylab.mean(taxList))
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
dataFile.close()
indices1 = pylab.arange(len(p['ageingParentList']))
taxFig = pylab.figure()
taxBar = taxFig.add_subplot(1,1,1)
taxBar.bar(indices1, taxMeans, facecolor='red',
align='center', yerr=taxSEs, ecolor='black')
taxBar.set_ylabel('Mean social care cost per taxpayer')
taxBar.set_xlabel('Age of retirement')
taxBar.set_xticks(indices1)
taxBar.set_xticklabels(p['ageingParentList'])
pylab.savefig('retirementAgeRunSet1.pdf')
pylab.show()
#######################################################
##runs for sensitivity analysis using GEM-SA
def gemRun(reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
dataFile = open('GEMSA data new.txt','a')
meansFile = open('GEMSA means new.txt', 'a')
outFile = open('GEMSA outputs new.txt', 'a')
# agingParentList = [ 0.0, 0.1, 0.2, 0.4 ]
# careProbList = [ 0.0004, 0.0008, 0.0012, 0.0016 ]
# retiredHoursList = [ 20.0, 30.0, 40.0, 60.0 ]
# retiredAgeList = [ 60.0 ]
# ageingParentList = [ 0.0, 0.1 ]
# careProbList = [ 0.0004 ]
# retiredHoursList = [ 20.0 ]
# retiredAgeList = [ 60.0 ]
for variableCare in p['ageingParentList']:
for variableProb in p['careProbList']:
for variableRetired in p['retiredHoursList']:
for variableAge in p['retiredAgeList']:
p['agingParentsMoveInWithKids'] = variableCare
p['personCareProb'] = variableProb
p['retiredHours'] = variableRetired
p['ageOfRetirement'] = variableAge
print "Trying parents-moving-in probability: ", variableCare
print "Trying person care probability: ", variableProb
print "Trying retired hours: ", variableRetired
print "Trying retirement age: ", variableAge
taxList = []
taxSum = 0.0
meansFile.write(str(variableCare) + "\t" + str(variableProb) + "\t" + str(variableRetired) + "\t" + str(variableAge) + "\n")
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax, seed = s.run()
taxList.append(tax)
taxSum += tax
print tax
dataFile.write(str(seed) + "\t" + str(variableCare) + "\t" + str(variableProb) + "\t" + str(variableRetired) + "\t" + str(variableAge) + "\t" + str(tax) + "\n")
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum/reps) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
dataFile.close()
meansFile.close()
outFile.close()
#######################################################
##runs for sensitivity analysis using GEM-SA - LPtau and Maximin LH
def sensitivityRun(runtype, ageingList, careList, retiredHList, retiredAList, reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
dataFile = open(runtype + ' GEMSA data.txt','a')
meansFile = open(runtype + ' GEMSA means.txt', 'a')
outFile = open(runtype + ' GEMSA outputs.txt', 'a')
# agingParentList = [ 0.0, 0.1, 0.2, 0.4 ]
# careProbList = [ 0.0004, 0.0008, 0.0012, 0.0016 ]
# retiredHoursList = [ 20.0, 30.0, 40.0, 60.0 ]
# retiredAgeList = [ 60.0 ]
# ageingParentList = [ 0.0, 0.1 ]
# careProbList = [ 0.0004 ]
# retiredHoursList = [ 20.0 ]
# retiredAgeList = [ 60.0 ]
for run in xrange(len(ageingList)):
p['agingParentsMoveInWithKids'] = ageingList[run]
p['personCareProb'] = careList[run]
p['retiredHours'] = retiredHList[run]
p['ageOfRetirement'] = retiredAList[run]
print "Trying parents-moving-in probability: ", ageingList[run]
print "Trying person care probability: ", careList[run]
print "Trying retired hours: ", retiredHList[run]
print "Trying retirement age: ", retiredAList[run]
taxList = []
taxSum = 0.0
meansFile.write(str(ageingList[run]) + "\t" + str(careList[run]) + "\t" + str(retiredHList[run]) + "\t" + str(retiredAList[run]) + "\n")
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax, seed = s.run()
taxList.append(tax)
taxSum += tax
print tax
dataFile.write(str(seed) + "\t" + str(ageingList[run]) + "\t" + str(careList[run]) + "\t" + str(retiredHList[run]) + "\t" + str(retiredAList[run]) + "\t" + str(tax) + "\n")
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum/reps) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
dataFile.close()
meansFile.close()
outFile.close()
#######################################################
##runs for sensitivity analysis using GEM-SA - LPtau and Maximin LH
# def sensitivityLarge(runtype, ageingList, careList, retiredHList, retiredAList, baseDieList, babyDieList, personCareList, maleCareList, femaleCareList, \
# childHoursList, homeAdultList, workingAdultList, lowCareList, growingBirthList, basicDivorceList, variableDivorceList, basicMaleMarriageList, \
# basicFemaleMarriageList, probMoveList, moveHouseholdList, probMoveOutList, probMoveBackList, reps):
def sensitivityLarge(runtype, input_list, reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
outFile = open(runtype + ' GEMSA outputs large.txt', 'a')
for run in xrange(len(input_list[0])):
print("Running simulation number {}...".format(run))
print("Number of reps: {}".format(reps))
sim_list = np.array(input_list)
print(sim_list)
p['agingParentsMoveInWithKids'] = sim_list[0,run]
print(p['agingParentsMoveInWithKids'])
p['personCareProb'] = sim_list[1,run]
p['retiredHours'] = sim_list[2,run]
p['ageOfRetirement'] = sim_list[3,run]
p['baseDieProb'] = sim_list[4,run]
p['babyDieProb'] = sim_list[5,run]
p['personCareProb'] = sim_list[6,run]
p['maleAgeCareScaling'] = sim_list[7,run]
p['femaleAgeCareScaling'] = sim_list[8,run]
p['childHours'] = sim_list[9,run]
p['homeAdultHours'] = sim_list[10,run]
p['workingAdultHours'] = sim_list[11,run]
p['lowCareHandicap'] = sim_list[12,run]
p['growingPopBirthProb'] = sim_list[13,run]
p['basicDivorceRate'] = sim_list[14,run]
p['variableDivorce'] = sim_list[15,run]
p['basicMaleMarriageProb'] = sim_list[16,run]
p['basicFemaleMarriageProb'] = sim_list[17,run]
p['probApartWillMoveTogether'] = sim_list[18,run]
p['coupleMovesToExistingHousehold'] = sim_list[19,run]
p['basicProbAdultMoveOut'] = sim_list[20,run]
p['variableMoveBack'] = sim_list[21,run]
taxList = []
taxSum = 0.0
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax, seed = s.run()
taxList.append(tax)
taxSum += tax
print tax
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum/reps) + "\n" + str(seed) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
outFile.close()
#######################################################
##runs for sensitivity analysis using GEM-SA - LPtau and Maximin LH, 10 params
def sensitivityTenParams(runtype, input_list, reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
outFile = open(runtype + ' GEMSA outputs.txt', 'a')
for run in xrange(len(input_list[0])):
print("Running simulation number {}...".format(run))
print("Number of reps: {}".format(reps))
sim_list = np.array(input_list)
print(sim_list)
p['agingParentsMoveInWithKids'] = sim_list[0,run]
p['baseCareProb'] = sim_list[1,run]
p['retiredHours'] = sim_list[2,run]
p['ageOfRetirement'] = sim_list[3,run]
p['personCareProb'] = sim_list[4,run]
p['maleAgeCareScaling'] = sim_list[5,run]
p['femaleAgeCareScaling'] = sim_list[6,run]
p['childHours'] = sim_list[7,run]
p['homeAdultHours'] = sim_list[8,run]
p['workingAdultHours'] = sim_list[9,run]
taxList = []
taxSum = 0.0
for i in range ( 0, reps ):
print i,
s = Sim(p)
tax, seed = s.run()
taxList.append(tax)
taxSum += tax
print tax
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum/reps) + "\t" + str(seed) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
outFile.close()
#######################################################
# Recurrent neural network experiments -- 10 params, outputs recorded per year
def RNNOutputScenario(runtype, input_list, reps):
taxMeans = []
taxSEs = []
p['verboseDebugging'] = False
p['singleRunGraphs'] = False
p['interactiveGraphics'] = False
outFile = open(runtype + ' GEMSA outputs.txt', 'a')
outFile2 = open(runtype + ' yearly outputs.txt', 'a')
for run in xrange(len(input_list[0])):
print("Running simulation number {}...".format(run))
print("Number of reps: {}".format(reps))
sim_list = np.array(input_list)
#print(sim_list)
p['agingParentsMoveInWithKids'] = sim_list[0, run]
p['baseCareProb'] = sim_list[1, run]
p['retiredHours'] = sim_list[2, run]
p['ageOfRetirement'] = sim_list[3, run]
p['personCareProb'] = sim_list[4, run]
p['maleAgeCareScaling'] = sim_list[5, run]
p['femaleAgeCareScaling'] = sim_list[6, run]
p['childHours'] = sim_list[7, run]
p['homeAdultHours'] = sim_list[8, run]
p['workingAdultHours'] = sim_list[9, run]
taxList = []
taxSum = 0.0
for i in range(0, reps):
print i,
s = Sim(p)
tax, seed, carecost = s.run()
taxList.append(tax)
taxSum += tax
print tax
taxMeans.append(pylab.mean(taxList))
outFile.write(str(taxSum / reps) + "\t" + str(seed) + "\n")
outFile2.write(str(carecost) + "\n")
taxSEs.append(pylab.std(taxList) / math.sqrt(reps))
outFile.close()
outFile2.close()
#######################################################
## A profiling run; use import pstats then p = pstats.Stats('profile.txt') then p.sort_stats('time').print_stats(10)
#cProfile.run('s.run()','profile.txt')
#######################################################
## Parse command line arguments
def loadParamFile(file, dict):
"""
Given a JSON filename and a dictionary, return the dictionary with
the file's fields merged into it.
Example: if the initial dictionary is
dict['bobAge'] = 90 and dict['samAge']=20 and the JSON data is
{'age':{'bob':40, 'fred':35}}
the returned dictionary contains the following data values:
dict['bobAge'] = 40, dict['fredAge'] = 35, dict['samAge'] = 20
"""
json_data = open(file).read()
data = json.loads(json_data)
for group in data:
fields = data.get(group)
if type({}) == type(fields):
# Group of fields - create name from item and group
for item in fields:
name = item + group[:1].upper() + group[1:]
value = data [group][item]
dict [name] = value
else:
# Single data value - naming is assumed to be correct case
dict [group] = fields
return dict
def loadCommandLine(dict):
"""Process the command line, loading params file (if required). The dict
argument will be augmented with data from the user-specified parameters
file (if required), otherwise will return the dict argument unchanged"""
parser = argparse.ArgumentParser(
description='lives v1.0: complex social behaviour simulation.',
epilog='Example: "maintest.py -f test.json -n 3" --- run 3 sims with test.json\'s params',
formatter_class=argparse.RawTextHelpFormatter,
prog='lives',
usage='use "%(prog)s -h" for more information')
group = parser.add_mutually_exclusive_group()
parser.add_argument(
'-f', '--file',
help='parameters file in JSON format e.g. soylent.json')
group.add_argument(
'-n', '--num', metavar='N', type=int, default=0,
help='number of runs to carry out.')
group.add_argument('-r', '--retire', metavar='R', type=int, default=0,
help='retirement batch, number of iterations.')
group.add_argument('-g', '--gem', metavar='G', type=int, default=0,
help='GEM-SA batch for sensitivity analysis, number of iterations.')
group.add_argument('-l', '--lptau', metavar='L', type=int, default=0,
help='sensitivity analysis batch with LPtau sampling.')
group.add_argument('-m', '--maximin', metavar='M', type=int, default=0,
help='sensitivity analysis batch with maximin latin hypercube sampling.')
group.add_argument('-b', '--bigly', metavar='B', type=int, default=0,
help='bigly sensitivity analysis batch with maximin latin hypercube sampling.')
group.add_argument('-t', '--tenparams', metavar='T', type=int, default=0,
help='10 parameter sensitivity analysis batch with maximin latin hypercube sampling.')
group.add_argument('-c', '--recurrent', metavar='C', type=int, default=0,
help='10 parameter time-series run for RNN.')
args = parser.parse_args()
print("~ Filename: {}".format(args.file))
print("~ Number: {}".format(args.num))
print("~ Retire: {}".format(args.retire))
print("~ GEM-SA: {}".format(args.gem))
print("~ LPtau: {}".format(args.lptau))
print("~ Maximin: {}".format(args.maximin))
print("~ Big SA: {}".format(args.bigly))
print("~ Ten Params: {}".format(args.tenparams))
print("~ Ten Params RNN: {}".format(args.recurrent))
if args.file:
#agingParentList = json.load(retireList, parse_float=decimal.Decimal)
res = loadParamFile (args.file, dict)
print ("p = {}".format(dict))
basicRun(dict)
elif args.num >= 1:
batchRun(args.num)
elif args.retire:
p['ageingParentList'] = []
res = loadParamFile('retire.json', dict)
print("List = {}".format(dict))
retireRun(args.retire)
elif args.gem:
p['ageingParentList'] = []
p['careProbList'] = []
p['retiredHoursList'] = []
p['retiredAgeList'] = []
res = loadParamFile('gem.json', dict)
print("List = {}".format(dict))
gemRun(args.gem)
elif args.lptau:
sim_array = np.genfromtxt('lptau-4params.txt', delimiter=' ')
sim_list = list(sim_array.T)
# print(sim_list)
ageingParentSettings = sim_list[0]
careProbSettings = sim_list[1]
retiredHoursSettings = sim_list[2]
retiredAgeSettings = sim_list[3]
# print(ageingParentSettings)
# print(careProbSettings)
# print(retiredHoursSettings)
# print(retiredAgeSettings)
sensitivityRun('LPtau', ageingParentSettings, careProbSettings, retiredHoursSettings, retiredAgeSettings, args.lptau)
elif args.maximin:
sim_array = np.genfromtxt('latinhypercube-4params.txt', delimiter=' ')
sim_list = list(sim_array.T)
# print(sim_list)
ageingParentSettings = sim_list[0]
careProbSettings = sim_list[1]
retiredHoursSettings = sim_list[2]
retiredAgeSettings = sim_list[3]
# print(ageingParentSettings)
# print(careProbSettings)
# print(retiredHoursSettings)
# print(retiredAgeSettings)
sensitivityRun('Maximin', ageingParentSettings, careProbSettings, retiredHoursSettings, retiredAgeSettings, args.maximin)
elif args.bigly:
sim_array = np.genfromtxt('latinhypercube-22params.txt', delimiter=' ')
sim_list = list(sim_array.T)
#print(sim_list)
np.savetxt('hypercube22_GEMSA_inputs.txt', sim_array, fmt='%1.8f', delimiter='\t', newline='\n')
sensitivityLarge('hypercube22', sim_list, args.bigly)
elif args.tenparams:
sim_array = np.genfromtxt('LPtau-10params.txt', delimiter=' ')
sim_list = list(sim_array.T)
#print(sim_list)
np.savetxt('lptau10_GEMSA_inputs.txt', sim_array, fmt='%1.8f', delimiter='\t', newline='\n')
sensitivityTenParams('lptau10', sim_list, args.tenparams)
elif args.recurrent:
sim_array = np.genfromtxt('lptau10round2_GEMSA_inputs.csv', delimiter=',')
sim_list = list(sim_array.T)
print(sim_list)
np.savetxt('lptau10_recurrent_inputs.txt', sim_array, fmt='%1.8f', delimiter='\t', newline='\n')
RNNOutputScenario('LPtauRNN', sim_list, args.recurrent)
else:
basicRun(p)
return dict
# Load the default values, overwriting and adding to the initial p values
loadParamFile("default.json", p)
# Load values based upon the command line file passed (if any).
loadCommandLine (p)
#print ("p = {}".format(p)) | 40.08453 | 188 | 0.56018 | 3,110 | 25,133 | 4.486817 | 0.166559 | 0.043858 | 0.052458 | 0.057045 | 0.490827 | 0.420238 | 0.401462 | 0.386771 | 0.385553 | 0.373155 | 0 | 0.060613 | 0.262165 | 25,133 | 627 | 189 | 40.08453 | 0.691868 | 0.107787 | 0 | 0.383795 | 0 | 0.002132 | 0.229579 | 0.03805 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.021322 | null | null | 0.095949 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c542862715caa74d2fd3f0e9e9fcab1cbbe24d4a | 284 | py | Python | syncless/wscherry.py | irr/python-labs | 43bb3a528c151653b2be832c7ff13240a10e18a4 | [
"Apache-2.0"
] | 4 | 2015-11-25T09:06:44.000Z | 2019-12-11T21:35:21.000Z | syncless/wscherry.py | irr/python-labs | 43bb3a528c151653b2be832c7ff13240a10e18a4 | [
"Apache-2.0"
] | null | null | null | syncless/wscherry.py | irr/python-labs | 43bb3a528c151653b2be832c7ff13240a10e18a4 | [
"Apache-2.0"
] | 2 | 2015-11-25T09:19:38.000Z | 2016-02-26T03:54:06.000Z | import sys
sys.path.append("/usr/lib/python2.7/site-packages")
import redis
_r = redis.Redis(host='localhost', port=6379, db=0)
import cherrypy
class Test(object):
def index(self):
_r.incr("/")
return "OK!"
index.exposed = True
cherrypy.quickstart(Test())
| 17.75 | 51 | 0.661972 | 40 | 284 | 4.65 | 0.775 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.030043 | 0.179577 | 284 | 15 | 52 | 18.933333 | 0.76824 | 0 | 0 | 0 | 0 | 0 | 0.158451 | 0.112676 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.272727 | 0 | 0.545455 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
c544eb603d7c0e4860f104e7e494d3ae3bdfe615 | 538 | py | Python | server.py | celinekeisja/jobmonitorservice | aaf56dd198c1275439a0f5ed27617fb458f715ac | [
"MIT"
] | null | null | null | server.py | celinekeisja/jobmonitorservice | aaf56dd198c1275439a0f5ed27617fb458f715ac | [
"MIT"
] | null | null | null | server.py | celinekeisja/jobmonitorservice | aaf56dd198c1275439a0f5ed27617fb458f715ac | [
"MIT"
] | 1 | 2019-11-11T10:26:42.000Z | 2019-11-11T10:26:42.000Z | from flask_script import Manager
from flask_migrate import Migrate, MigrateCommand
from config import db
import config
app = config.connex_app
app.add_api('swagger.yml')
@app.route('/')
def home():
return 'homepage here'
@app.route("/job")
@app.route("/job/<string:job_id>")
def job(job_id=""):
return 'result of job_id'
migrate = Migrate(app=app, db=db)
manager = Manager(app=app)
manager.add_command('db', MigrateCommand)
if __name__ == "__main__":
manager.run()
# app.run(host='localhost', port=5000, debug=True) | 20.692308 | 54 | 0.711896 | 79 | 538 | 4.64557 | 0.468354 | 0.049046 | 0.059946 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008602 | 0.135688 | 538 | 26 | 54 | 20.692308 | 0.780645 | 0.089219 | 0 | 0 | 0 | 0 | 0.153374 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.222222 | 0.111111 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
c54c0437171dca7cbeb276eabca7979dd5dce208 | 2,202 | py | Python | src/python/compressao_huffman.py | willisnou/Algoritmos-e-Estruturas-de-Dados | b70a2f692ccae948576177560e3628b9dece5aee | [
"MIT"
] | 653 | 2015-06-07T14:45:40.000Z | 2022-03-25T17:31:58.000Z | src/python/compressao_huffman.py | willisnou/Algoritmos-e-Estruturas-de-Dados | b70a2f692ccae948576177560e3628b9dece5aee | [
"MIT"
] | 64 | 2017-10-29T10:53:37.000Z | 2022-03-14T23:49:18.000Z | src/python/compressao_huffman.py | willisnou/Algoritmos-e-Estruturas-de-Dados | b70a2f692ccae948576177560e3628b9dece5aee | [
"MIT"
] | 224 | 2015-06-07T14:46:00.000Z | 2022-03-25T17:36:46.000Z | # Árvore Huffman
class node:
def __init__(self, freq, symbol, left=None, right=None):
# Frequência do Símbolo
self.freq = freq
# Símbolo (caracter)
self.symbol = symbol
# nó à esquerda do nó atual
self.left = left
# nó à direita do nó atual
self.right = right
# direção da árvore (0/1)
self.huff = ''
# Função utilitária para imprimir
# códigos huffman para todos os símbolos
# na nova árvore huffman que sera criada
def printNodes(node, val=''):
# código huffman para o nó atual
newVal = val + str(node.huff)
# se o nó não pertence á ponta da
# árvore então caminha dentro do mesmo
# até a ponta
if(node.left):
printNodes(node.left, newVal)
if(node.right):
printNodes(node.right, newVal)
# Se o nó estiver na ponta da árore
# então exibe o código huffman
if(not node.left and not node.right):
print(f"{node.symbol} -> {newVal}")
# caracteres para à árvore huffman
chars = ['a', 'b', 'c', 'd', 'e', 'f']
# frequência dos caracteres
freq = [5, 9, 12, 13, 16, 45]
# lista contendo os nós não utilizados
nodes = []
if __name__ == '__main__':
# convertendo caracteres e frequência em
# nós da árvore huffman
for x in range(len(chars)):
nodes.append(node(freq[x], chars[x]))
while len(nodes) > 1:
# Ordena todos os nós de forma ascendente
# baseado em sua frequência
nodes = sorted(nodes, key=lambda x: x.freq)
# Seleciona os dois nós menores
left = nodes[0]
right = nodes[1]
# Atribui um valor direcional à estes nós
# (direita ou esquerda)
left.huff = 0
right.huff = 1
# Combina os 2 nós menores para um novo nó pai
# para eles.
newNode = node(
left.freq +
right.freq,
left.symbol +
right.symbol,
left,
right)
# remove os 2 nós e adiciona o nó pai
# como um novo só sobre os outros
nodes.remove(left)
nodes.remove(right)
nodes.append(newNode)
# Árvore Huffman pronta!
printNodes(nodes[0])
| 24.741573 | 60 | 0.584469 | 297 | 2,202 | 4.292929 | 0.417508 | 0.05098 | 0.014118 | 0.020392 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013432 | 0.323797 | 2,202 | 88 | 61 | 25.022727 | 0.842848 | 0.398274 | 0 | 0 | 0 | 0 | 0.030139 | 0 | 0 | 0 | 0 | 0.011364 | 0 | 1 | 0.052632 | false | 0 | 0 | 0 | 0.078947 | 0.131579 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c55176ac699f36bb549a798358fd9868f0da10c3 | 7,649 | py | Python | getnear/tseries.py | edwardspeyer/getnear | 746f3cedc1aed6166423f54d32e208017f660b38 | [
"MIT"
] | null | null | null | getnear/tseries.py | edwardspeyer/getnear | 746f3cedc1aed6166423f54d32e208017f660b38 | [
"MIT"
] | null | null | null | getnear/tseries.py | edwardspeyer/getnear | 746f3cedc1aed6166423f54d32e208017f660b38 | [
"MIT"
] | null | null | null | from getnear.config import Tagged, Untagged, Ignore
from getnear.logging import info
from lxml import etree
import re
import requests
import telnetlib
def connect(hostname, *args, **kwargs):
url = f'http://{hostname}/'
html = requests.get(url).text
doc = etree.HTML(html)
for title in doc.xpath('//title'):
if re.match('NETGEAR GS\d+T', title.text):
return TSeries(hostname, *args, **kwargs)
class TSeries:
def __init__(self, hostname, password='password', old_password='password', debug=False):
info('connecting')
self.t = telnetlib.Telnet(hostname, 60000)
if debug:
self.t.set_debuglevel(2)
info('entering admin mode')
self.admin_mode()
info('logging in')
if self.login(password):
return
else:
info('trying old password')
self.admin_mode()
if self.login(old_password):
info('changing password')
self.change_password(old_password, password)
else:
raise Exception('login failed')
def admin_mode(self):
self.t.read_until(b'please wait ...')
self.t.write(b'admin\n')
def login(self, password):
self.t.read_until(b'Password:')
self.t.write(password.encode('ascii'))
self.t.write(b'\n')
_, _, match = self.t.expect([b'>', b'Applying'])
if b'Applying' in match:
return False
self.t.write(b'enable\n\n')
self.t.read_until(b'#')
return True
def exit(self):
# Leave "enable" mode
self.t.write(b'exit\n')
self.t.read_until(b'>')
self.t.write(b'logout\n')
def get_current_config(self):
# (ports, pvids, {vlan_id -> {U, T, _, _...})
ports_pvids = dict(self.get_port_pvids())
ports = tuple(sorted(ports_pvids))
pvids = tuple(ports_pvids[p] for p in ports)
vlans = {}
vlan_ids = set(pvids) | set(self.get_vlan_ids())
for vlan_id in vlan_ids:
port_map = dict(self.get_vlan(vlan_id))
membership = tuple(port_map[p] for p in ports)
vlans[vlan_id] = membership
return (ports, pvids, vlans)
def get_vlan_ids(self):
self.t.write(b'show vlan brief\n')
output = self.page().decode(errors='ignore')
for line in output.splitlines():
fields = line.split()
if fields and fields[0].isnumeric():
yield int(fields[0])
def get_vlan(self, vlan_id):
self.t.write(f'show vlan {vlan_id}\n'.encode())
for line in self.paged_table_body():
fields = line.split(maxsplit=3)
interface_port, current = fields[0:2]
interface, port = map(int, interface_port.split('/'))
if interface == 0:
port = int(interface_port.split('/')[1])
is_included = current == 'Include'
is_tagged = 'Tagged' in line
if is_tagged:
state = Tagged
elif is_included:
state = Untagged
else:
state = Ignore
yield port, state
def get_port_pvids(self):
self.t.write(b'show vlan port all\n')
for line in self.paged_table_body():
fields = line.split()
interface_port, pvid_s = fields[0:2]
interface, port = map(int, interface_port.split('/'))
if interface == 0:
pvid = int(pvid_s)
yield port, pvid
def set_port_pvid(self, port, vlan_id):
self.do_configure_interface(port, f'vlan pvid {vlan_id}')
def set_port_vlan_tagging(self, port, vlan_id, is_tagged):
if is_tagged:
command = f'vlan tagging {vlan_id}'
else:
command = f'no vlan tagging {vlan_id}'
self.do_configure_interface(port, command)
def set_port_vlan_participation(self, port, vlan_id, is_included):
if is_included:
command = f'vlan participation include {vlan_id}'
else:
command = f'vlan participation exclude {vlan_id}'
self.do_configure_interface(port, command)
def add_vlan(self, vlan_id):
self.do_vlan_database(f'vlan {vlan_id}')
def delete_vlan(self, vlan_id):
self.do_vlan_database(f'no vlan {vlan_id}')
def do_configure_interface(self, port, command):
self.t.write(b'configure\n')
self.t.read_until(b'#')
self.t.write(f'interface 0/{port}\n'.encode())
self.t.read_until(b'#')
self.t.write((command + '\n').encode())
self.t.read_until(b'#')
self.t.write(b'exit\n')
self.t.read_until(b'#')
self.t.write(b'exit\n')
self.t.read_until(b'#')
def do_vlan_database(self, command):
self.t.write(b'vlan database\n')
self.t.read_until(b'#')
self.t.write((command + '\n').encode())
self.t.read_until(b'#')
self.t.write(b'exit\n')
self.t.read_until(b'#')
def change_password(self, password_old, password_new):
# TODO For this to work, we have to leave "enable" mode. It would be
# better if all other commands entererd enable mode instead. More
# verbose, but less confusing. Maybe have a cursor to remember which
# mode we are in?
self.t.write(b'exit\n')
self.t.read_until(b'>')
self.t.write(b'passwd\n')
self.t.read_until(b'Enter old password:')
self.t.write((password_old + '\n').encode())
self.t.read_until(b'Enter new password:')
self.t.write((password_new + '\n').encode())
self.t.read_until(b'Confirm new password:')
self.t.write((password_new + '\n').encode())
self.t.read_until(b'Password Changed!')
self.t.write(b'enable\n') # Double newline
self.t.read_until(b'#')
def paged_table_body(self):
output = self.page().decode(errors='ignore')
in_body = False
for line in output.splitlines():
if line.strip() == '':
in_body = False
if in_body:
yield line
if line and line[0:4] == '----':
in_body = True
def page(self):
result = b''
while True:
index, _, output = self.t.expect([
b'--More-- or \(q\)uit',
b'#'
])
result += output
if index == 0:
self.t.write(b'\n')
else:
break
return result
def sync(self, config):
ports, pvids, vlans = config
vlan_ids = set(pvids) | set(vlans)
for vlan_id in sorted(vlan_ids):
info(f'adding vlan {vlan_id}')
self.add_vlan(vlan_id)
for port, pvid in zip(ports, pvids):
info(f'setting port {port} to PVID {pvid}')
self.set_port_pvid(port, pvid)
for vlan_id, membership in vlans.items():
info(f'vlan {vlan_id}')
for port, status in zip(ports, membership):
if status == Ignore:
info(f' port {port} off')
self.set_port_vlan_participation(port, vlan_id, False)
else:
is_tagged = status == Tagged
symbol = 'T' if is_tagged else 'U'
info(f' port {port} {symbol}')
self.set_port_vlan_participation(port, vlan_id, True)
self.set_port_vlan_tagging(port, vlan_id, is_tagged)
| 34.61086 | 92 | 0.552098 | 998 | 7,649 | 4.079158 | 0.182365 | 0.056497 | 0.058954 | 0.061901 | 0.378531 | 0.293785 | 0.246868 | 0.211496 | 0.192827 | 0.148366 | 0 | 0.003854 | 0.32148 | 7,649 | 220 | 93 | 34.768182 | 0.78054 | 0.038567 | 0 | 0.268817 | 0 | 0 | 0.108494 | 0 | 0 | 0 | 0 | 0.004545 | 0 | 1 | 0.107527 | false | 0.096774 | 0.032258 | 0 | 0.177419 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
c552f157bcec716a7f87d20bd21cf1b7b813d8da | 211 | py | Python | models/dl-weights.py | diegoinacio/object-detection-flask-opencv | bc012e884138e9ead04115b8550e833bed134074 | [
"MIT"
] | 16 | 2020-03-01T07:35:35.000Z | 2022-02-01T16:34:24.000Z | models/dl-weights.py | girish008/Real-Time-Object-Detection-Using-YOLOv3-OpenCV | 6af4c550f6128768b646f5923af87c2f654cd1bd | [
"MIT"
] | 6 | 2020-02-13T12:50:24.000Z | 2022-02-02T03:22:30.000Z | models/dl-weights.py | girish008/Real-Time-Object-Detection-Using-YOLOv3-OpenCV | 6af4c550f6128768b646f5923af87c2f654cd1bd | [
"MIT"
] | 8 | 2020-06-22T10:23:58.000Z | 2022-01-14T21:17:50.000Z | """
This script downloads the weight file
"""
import requests
URL = "https://pjreddie.com/media/files/yolov3.weights"
r = requests.get(URL, allow_redirects=True)
open('yolov3_t.weights', 'wb').write(r.content)
| 23.444444 | 55 | 0.739336 | 31 | 211 | 4.967742 | 0.83871 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.010471 | 0.094787 | 211 | 8 | 56 | 26.375 | 0.795812 | 0.175355 | 0 | 0 | 0 | 0 | 0.391566 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c55ca719e407ecd982eeb52d8e27fa9690f85669 | 420 | py | Python | iis/tests/test_e2e.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | iis/tests/test_e2e.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | iis/tests/test_e2e.py | tcpatterson/integrations-core | 3692601de09f8db60f42612b0d623509415bbb53 | [
"BSD-3-Clause"
] | null | null | null | # (C) Datadog, Inc. 2022-present
# All rights reserved
# Licensed under Simplified BSD License (see LICENSE)
import pytest
from datadog_checks.dev.testing import requires_py3
from datadog_checks.iis import IIS
@pytest.mark.e2e
@requires_py3
def test_e2e_py3(dd_agent_check, aggregator, instance):
aggregator = dd_agent_check(instance)
aggregator.assert_service_check('iis.windows.perf.health', IIS.CRITICAL)
| 26.25 | 76 | 0.797619 | 60 | 420 | 5.383333 | 0.633333 | 0.068111 | 0.105263 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024324 | 0.119048 | 420 | 15 | 77 | 28 | 0.848649 | 0.242857 | 0 | 0 | 0 | 0 | 0.073248 | 0.073248 | 0 | 0 | 0 | 0 | 0.125 | 1 | 0.125 | false | 0 | 0.375 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
c5681f32ba0443d6943fe18106423ebafc204c78 | 12,733 | py | Python | epgrefresh/src/plugin.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | epgrefresh/src/plugin.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | epgrefresh/src/plugin.py | builder08/enigma2-plugins_2 | f8f08b947e23c1c86b011492a7323125774c3482 | [
"OLDAP-2.3"
] | null | null | null | from __future__ import print_function
# for localized messages
from . import _, NOTIFICATIONDOMAIN
# Config
from Components.config import config, ConfigYesNo, ConfigNumber, ConfigSelection, \
ConfigSubsection, ConfigClock, ConfigYesNo, ConfigInteger, NoSave
from Screens.MessageBox import MessageBox
from Screens.Standby import TryQuitMainloop
from Tools.BoundFunction import boundFunction
from boxbranding import getImageDistro
from Components.SystemInfo import SystemInfo
from Components.NimManager import nimmanager
# Error-print
from traceback import print_exc
from sys import stdout
# Calculate default begin/end
from time import time, localtime, mktime
now = localtime()
begin = mktime((
now.tm_year, now.tm_mon, now.tm_mday, 07, 30,
0, now.tm_wday, now.tm_yday, now.tm_isdst)
)
end = mktime((
now.tm_year, now.tm_mon, now.tm_mday, 20, 00,
0, now.tm_wday, now.tm_yday, now.tm_isdst)
)
#Configuration
config.plugins.epgrefresh = ConfigSubsection()
config.plugins.epgrefresh.enabled = ConfigYesNo(default=False)
config.plugins.epgrefresh.begin = ConfigClock(default=int(begin))
config.plugins.epgrefresh.end = ConfigClock(default=int(end))
config.plugins.epgrefresh.interval_seconds = ConfigNumber(default=120)
config.plugins.epgrefresh.delay_standby = ConfigNumber(default=10)
config.plugins.epgrefresh.inherit_autotimer = ConfigYesNo(default=False)
config.plugins.epgrefresh.afterevent = ConfigYesNo(default=False)
config.plugins.epgrefresh.force = ConfigYesNo(default=False)
config.plugins.epgrefresh.skipProtectedServices = ConfigSelection(choices=[
("bg_only", _("Background only")),
("always", _("Foreground also")),
], default="bg_only"
)
config.plugins.epgrefresh.enablemessage = ConfigYesNo(default=True)
config.plugins.epgrefresh.wakeup = ConfigYesNo(default=False)
config.plugins.epgrefresh.lastscan = ConfigNumber(default=0)
config.plugins.epgrefresh.parse_autotimer = ConfigSelection(choices=[
("always", _("Yes")),
("never", _("No")),
("bg_only", _("Background only")),
("ask_yes", _("Ask default Yes")),
("ask_no", _("Ask default No")),
], default="never"
)
config.plugins.epgrefresh.erase = ConfigYesNo(default=False)
adapter_choices = [("main", _("Main Picture"))]
if SystemInfo.get("NumVideoDecoders", 1) > 1:
adapter_choices.append(("pip", _("Picture in Picture")))
adapter_choices.append(("pip_hidden", _("Picture in Picture (hidden)")))
if len(nimmanager.nim_slots) > 1:
adapter_choices.append(("record", _("Fake recording")))
config.plugins.epgrefresh.adapter = ConfigSelection(choices=adapter_choices, default="main")
config.plugins.epgrefresh.show_in_extensionsmenu = ConfigYesNo(default=False)
config.plugins.epgrefresh.show_run_in_extensionsmenu = ConfigYesNo(default=True)
if getImageDistro() in ("openatv", "openvix",):
config.plugins.epgrefresh.show_in_plugins = ConfigYesNo(default=False)
else:
config.plugins.epgrefresh.show_in_plugins = ConfigYesNo(default=True)
config.plugins.epgrefresh.show_help = ConfigYesNo(default=True)
config.plugins.epgrefresh.wakeup_time = ConfigInteger(default=-1)
config.plugins.epgrefresh.showadvancedoptions = NoSave(ConfigYesNo(default=False))
# convert previous parameters
config.plugins.epgrefresh.background = ConfigYesNo(default=False)
if config.plugins.epgrefresh.background.value:
config.plugins.epgrefresh.adapter.value = "pip_hidden"
config.plugins.epgrefresh.background.value = False
config.plugins.epgrefresh.save()
config.plugins.epgrefresh.interval = ConfigNumber(default=2)
if config.plugins.epgrefresh.interval.value != 2:
config.plugins.epgrefresh.interval_seconds.value = config.plugins.epgrefresh.interval.value * 60
config.plugins.epgrefresh.interval.value = 2
config.plugins.epgrefresh.save()
#pragma mark - Help
try:
from Components.Language import language
from Plugins.SystemPlugins.MPHelp import registerHelp, XMLHelpReader
from Tools.Directories import resolveFilename, SCOPE_PLUGINS, fileExists
lang = language.getLanguage()[:2]
HELPPATH = resolveFilename(SCOPE_PLUGINS, "Extensions/EPGRefresh")
if fileExists(HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"):
helpfile = HELPPATH + "/locale/" + str(lang) + "/mphelp.xml"
else:
helpfile = HELPPATH + "/mphelp.xml"
reader = XMLHelpReader(helpfile)
epgrefreshHelp = registerHelp(*reader)
except Exception as e:
print("[EPGRefresh] Unable to initialize MPHelp:", e, "- Help not available!")
epgrefreshHelp = None
#pragma mark -
# Notification-Domain
# Q: Do we really need this or can we do this better?
from Tools import Notifications
try:
Notifications.notificationQueue.registerDomain(NOTIFICATIONDOMAIN, _("EPGREFRESH_NOTIFICATION_DOMAIN"), deferred_callable=True)
except Exception as e:
EPGRefreshNotificationKey = ""
#print("[EPGRefresh] Error registering Notification-Domain:", e)
# Plugin
from EPGRefresh import epgrefresh
from EPGRefreshService import EPGRefreshService
# Plugins
from Components.PluginComponent import plugins
from Plugins.Plugin import PluginDescriptor
#pragma mark - Workaround for unset clock
from enigma import eDVBLocalTimeHandler
def timeCallback(isCallback=True):
"""Time Callback/Autostart management."""
thInstance = eDVBLocalTimeHandler.getInstance()
if isCallback:
# NOTE: this assumes the clock is actually ready when called back
# this may not be true, but we prefer silently dying to waiting forever
thInstance.m_timeUpdated.get().remove(timeCallback)
elif not thInstance.ready():
thInstance.m_timeUpdated.get().append(timeCallback)
return
epgrefresh.start()
# Autostart
def autostart(reason, **kwargs):
if reason == 0 and "session" in kwargs:
session = kwargs["session"]
epgrefresh.session = session
if config.plugins.epgrefresh.enabled.value:
# check if box was woken up by a timer, if so, check if epgrefresh set this timer
if session.nav.wasTimerWakeup() and abs(config.plugins.epgrefresh.wakeup_time.getValue() - time()) <= 360:
# if box is not in idle mode, do that
from Screens.Standby import Standby, inStandby
if not inStandby:
from Tools import Notifications
Notifications.AddNotificationWithID("Standby", Standby)
timeCallback(isCallback=False)
elif reason == 1:
epgrefresh.stop()
def getNextWakeup():
# Return invalid time if not automatically refreshing
if not config.plugins.epgrefresh.enabled.value or \
not config.plugins.epgrefresh.wakeup.value:
setConfigWakeupTime(-1)
return -1
now = localtime()
begin = int(mktime(
(now.tm_year, now.tm_mon, now.tm_mday,
config.plugins.epgrefresh.begin.value[0],
config.plugins.epgrefresh.begin.value[1],
0, now.tm_wday, now.tm_yday, now.tm_isdst)
))
# todays timespan has not yet begun
if begin > time():
setConfigWakeupTime(begin)
return begin
# otherwise add 1 day
setConfigWakeupTime(begin + 86400)
return begin + 86400
def setConfigWakeupTime(value):
config.plugins.epgrefresh.wakeup_time.value = value
config.plugins.epgrefresh.save()
# Mainfunction
def main(session, **kwargs):
try:
from EPGRefreshConfiguration import EPGRefreshConfiguration
session.openWithCallback(
doneConfiguring,
EPGRefreshConfiguration
)
except:
print("[EPGRefresh] Error while Opening EPGRefreshConfiguration")
print_exc(file=stdout)
def forceRefresh(session, **kwargs):
epgrefresh.forceRefresh(session)
def stopRunningRefresh(session, **kwargs):
epgrefresh.stopRunningRefresh(session)
def showPendingServices(session, **kwargs):
epgrefresh.showPendingServices(session)
def doneConfiguring(session, needsRestart):
if needsRestart:
session.openWithCallback(boundFunction(restartGUICB, session), MessageBox,
_("To apply your Changes the GUI has to be restarted.\nDo you want to restart the GUI now?"),
MessageBox.TYPE_YESNO, timeout=30)
else:
_startAfterConfig(session)
def restartGUICB(session, answer):
if answer is True:
session.open(TryQuitMainloop, 3)
else:
_startAfterConfig(session)
def _startAfterConfig(session):
if config.plugins.epgrefresh.enabled.value:
if not epgrefresh.isRunning():
epgrefresh.start(session)
# Eventinfo
def eventinfo(session, servicelist, **kwargs):
ref = session.nav.getCurrentlyPlayingServiceReference()
if not ref:
return
sref = ref.toString()
# strip all after last :
pos = sref.rfind(':')
if pos != -1:
sref = sref[:pos + 1]
epgrefresh.services[0].add(EPGRefreshService(str(sref), None))
# XXX: we need this helper function to identify the descriptor
# Extensions menu
def extensionsmenu(session, **kwargs):
main(session, **kwargs)
extSetupDescriptor = PluginDescriptor(_("EPG-Refresh_SetUp"), description=_("Automatically refresh EPG"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=extensionsmenu, needsRestart=False)
extRunDescriptor = PluginDescriptor(_("EPG-Refresh_Refresh now"), description=_("Start EPGrefresh immediately"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=forceRefresh, needsRestart=False)
extStopDescriptor = PluginDescriptor(_("EPG-Refresh_Stop Refresh"), description=_("Stop Running EPG-refresh"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=stopRunningRefresh, needsRestart=False)
extPendingServDescriptor = PluginDescriptor(_("EPG-Refresh_Pending Services"), description=_("Show the pending Services for refresh"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=showPendingServices, needsRestart=False)
extPluginDescriptor = PluginDescriptor( name=_("EPGRefresh"), description=_("Automatically refresh EPG"), where=PluginDescriptor.WHERE_PLUGINMENU, fnc=main, icon="EPGRefresh.png", needsRestart=False)
def AdjustExtensionsmenu(enable, PlugDescriptor):
if enable:
if PlugDescriptor not in plugins.getPlugins(PlugDescriptor.where):
plugins.addPlugin(PlugDescriptor)
else:
try:
plugins.removePlugin(PlugDescriptor)
except ValueError as ve:
if PlugDescriptor != extRunDescriptor:
print("[EPGRefresh] AdjustExtensionsmenu got confused, tried to remove non-existant plugin entry... ignoring.")
def housekeepingExtensionsmenu(configentry, force=False):
if force or (epgrefresh != None and not epgrefresh.isRunning()):
PlugDescriptor = None
if configentry == config.plugins.epgrefresh.show_in_plugins:
PlugDescriptor = extPluginDescriptor
elif configentry == config.plugins.epgrefresh.show_in_extensionsmenu:
PlugDescriptor = extSetupDescriptor
elif configentry == config.plugins.epgrefresh.show_run_in_extensionsmenu:
PlugDescriptor = extRunDescriptor
#if PlugDescriptor != None:
if PlugDescriptor is not None:
AdjustExtensionsmenu(configentry.value, PlugDescriptor)
config.plugins.epgrefresh.show_in_plugins.addNotifier(housekeepingExtensionsmenu, initial_call=False, immediate_feedback=True)
config.plugins.epgrefresh.show_in_extensionsmenu.addNotifier(housekeepingExtensionsmenu, initial_call=False, immediate_feedback=True)
config.plugins.epgrefresh.show_run_in_extensionsmenu.addNotifier(housekeepingExtensionsmenu, initial_call=False, immediate_feedback=True)
def menu_main(menuid, **kwargs):
if getImageDistro() in ("openvix", "openatv", "openspa", "openhdf"):
if menuid != "epg":
return []
else:
return []
return [(_("EPGRefresh"), main, "epgrefresh", None)]
def Plugins(**kwargs):
# NOTE: this might be a little odd to check this, but a user might expect
# the plugin to resume normal operation if installed during runtime, but
# this is not given if the plugin is supposed to run in background (as we
# won't be handed the session which we need to zap). So in turn we require
# a restart if-and only if-we're installed during runtime AND running in
# background. To improve the user experience in this situation, we hide
# all references to this plugin.
needsRestart = config.plugins.epgrefresh.enabled.value and not plugins.firstRun
list = [
PluginDescriptor(
name="EPGRefresh",
where=[
PluginDescriptor.WHERE_AUTOSTART,
PluginDescriptor.WHERE_SESSIONSTART
],
fnc=autostart,
wakeupfnc=getNextWakeup,
needsRestart=needsRestart,
),
PluginDescriptor(
name=_("add to EPGRefresh"),
where=PluginDescriptor.WHERE_EVENTINFO,
fnc=eventinfo,
needsRestart=needsRestart,
),
]
list.append(PluginDescriptor(name=_("EPGRefresh"),
description=_("Automatically refresh EPG"),
where=PluginDescriptor.WHERE_MENU,
fnc=menu_main))
if config.plugins.epgrefresh.show_in_extensionsmenu.value:
extSetupDescriptor.needsRestart = needsRestart
list.append(extSetupDescriptor)
if config.plugins.epgrefresh.show_run_in_extensionsmenu.value:
extRunDescriptor.needsRestart = needsRestart
list.append(extRunDescriptor)
if config.plugins.epgrefresh.show_in_plugins.value:
extPluginDescriptor.needsRestart = needsRestart
list.append(extPluginDescriptor)
return list
| 35.766854 | 224 | 0.783162 | 1,464 | 12,733 | 6.717896 | 0.250683 | 0.070056 | 0.123945 | 0.038434 | 0.249009 | 0.204576 | 0.131164 | 0.091408 | 0.080427 | 0.068226 | 0 | 0.004696 | 0.113563 | 12,733 | 355 | 225 | 35.867606 | 0.866661 | 0.102568 | 0 | 0.150198 | 0 | 0 | 0.094273 | 0.00652 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.094862 | null | null | 0.023715 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
c56aa8051395c03cfefdb6b4c31ba197b3b0d2c8 | 1,876 | py | Python | examples/server.py | zaibon/tcprouter | 7e9d2590e1b1d9d984ac742bd82fcbcf3d42b3ef | [
"BSD-3-Clause"
] | 5 | 2019-05-30T23:36:05.000Z | 2019-10-10T21:37:53.000Z | examples/server.py | zaibon/tcprouter | 7e9d2590e1b1d9d984ac742bd82fcbcf3d42b3ef | [
"BSD-3-Clause"
] | 7 | 2019-06-12T11:55:46.000Z | 2019-11-18T22:53:06.000Z | examples/server.py | xmonader/eltcprouter | b3435733d102c2435e9f62aa469d34c475cc31bd | [
"BSD-3-Clause"
] | 1 | 2021-01-05T20:09:51.000Z | 2021-01-05T20:09:51.000Z | from gevent import monkey; monkey.patch_all()
import logging
from gevent.server import StreamServer
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Receiver(object):
""" Interface for a receiver - mimics Twisted's protocols
"""
def __init__(self):
self.socket = None
self.address = None
def connection_made(self, socket, address):
self.socket = socket
self.address = address
def connection_lost(self):
pass
def line_received(self, line):
pass
def send_line(self, line):
self.socket.sendall(line + b'\n')
class EchoReceiver(Receiver):
""" A basic implementation of a receiver which echoes back every line it
receives.
"""
def line_received(self, line):
self.send_line(line)
def Handler(receiver_class):
""" A basic connection handler that applies a receiver object to each
connection.
"""
def handle(socket, address):
logger.info('Client (%s) connected', address)
receiver = receiver_class()
receiver.connection_made(socket, address)
try:
f = socket.makefile()
while True:
line = f.readline().strip()
if line == "":
break
logger.info('Received line from client: %s', line)
receiver.line_received(line.encode())
logger.info('Client (%s) disconnected.', address)
except Exception as e:
logger.exception(e)
finally:
try:
f.close()
receiver.connection_lost()
except:
pass
return handle
server = StreamServer(('0.0.0.0', 9092), Handler(EchoReceiver), keyfile='server.key', certfile='server.crt')
logger.info('Server running')
server.serve_forever()
| 25.351351 | 108 | 0.601812 | 208 | 1,876 | 5.326923 | 0.413462 | 0.036101 | 0.027076 | 0.034296 | 0.041516 | 0 | 0 | 0 | 0 | 0 | 0 | 0.006047 | 0.294776 | 1,876 | 73 | 109 | 25.69863 | 0.831444 | 0.114606 | 0 | 0.148936 | 0 | 0 | 0.07266 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.170213 | false | 0.06383 | 0.06383 | 0 | 0.297872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
3d6fef82415cc33c1f679313aef262f6b3b670a9 | 17,848 | py | Python | sbvat/utils.py | thudzj/BVAT | 2c7073cb7967583035eece7f4819821b313d73e6 | [
"MIT"
] | 3 | 2019-08-04T03:05:51.000Z | 2021-04-24T02:35:05.000Z | sbvat/utils.py | thudzj/BVAT | 2c7073cb7967583035eece7f4819821b313d73e6 | [
"MIT"
] | null | null | null | sbvat/utils.py | thudzj/BVAT | 2c7073cb7967583035eece7f4819821b313d73e6 | [
"MIT"
] | 1 | 2019-12-29T13:49:22.000Z | 2019-12-29T13:49:22.000Z | import numpy as np
import pickle as pkl
import networkx as nx
import scipy.sparse as sp
from scipy.sparse.linalg.eigen.arpack import eigsh
import sys
import tensorflow as tf
import os
import time
import json
from networkx.readwrite import json_graph
from sklearn.metrics import f1_score
import multiprocessing
def parse_index_file(filename):
"""Parse index file."""
index = []
for line in open(filename):
index.append(int(line.strip()))
return index
def sample_mask(idx, l):
"""Create mask."""
mask = np.zeros(l)
mask[idx] = 1
return np.array(mask, dtype=np.bool)
def save_sparse_csr(filename,array):
np.savez(filename,data = array.data ,indices=array.indices,
indptr =array.indptr, shape=array.shape )
def load_sparse_csr(filename):
loader = np.load(filename)
return sp.csr_matrix(( loader['data'], loader['indices'], loader['indptr']),
shape = loader['shape'])
def starfind_4o_nbrs(args):
return find_4o_nbrs(*args)
def find_4o_nbrs(adj, li):
nbrs = []
for i in li:
print(i)
tmp = adj[i]
for ii in np.nonzero(adj[i])[1]:
tmp += adj[ii]
for iii in np.nonzero(adj[ii])[1]:
tmp += adj[iii]
tmp += adj[np.nonzero(adj[iii])[1]].sum(0)
nbrs.append(np.nonzero(tmp)[1])
return nbrs
def load_data(dataset_str, is_sparse):
if dataset_str == "ppi":
return load_graphsage_data('data/ppi/ppi', is_sparse)
"""Load data."""
if dataset_str != 'nell':
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/ind.{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/ind.{}.test.index".format(dataset_str))
test_idx_range = np.sort(test_idx_reorder)
if dataset_str == 'citeseer':
# Fix citeseer dataset (there are some isolated nodes in the graph)
# Find isolated nodes, add them as zero-vecs into the right position
test_idx_range_full = range(min(test_idx_reorder), max(test_idx_reorder)+1)
tx_extended = sp.lil_matrix((len(test_idx_range_full), x.shape[1]))
tx_extended[test_idx_range-min(test_idx_range), :] = tx
tx = tx_extended
ty_extended = np.zeros((len(test_idx_range_full), y.shape[1]))
ty_extended[test_idx_range-min(test_idx_range), :] = ty
ty = ty_extended
features = sp.vstack((allx, tx)).tolil()
features[test_idx_reorder, :] = features[test_idx_range, :]
features = preprocess_features(features, is_sparse)
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
support = preprocess_adj(adj)
labels = np.vstack((ally, ty))
labels[test_idx_reorder, :] = labels[test_idx_range, :]
idx_test = test_idx_range.tolist()
idx_train = range(len(y))
idx_val = range(len(y), len(y)+500)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
# y_train = np.zeros(labels.shape)
# y_val = np.zeros(labels.shape)
# y_test = np.zeros(labels.shape)
# y_train = labels[train_mask, :]
# y_val[val_mask, :] = labels[val_mask, :]
# y_test[test_mask, :] = labels[test_mask, :]
else:
names = ['x', 'y', 'tx', 'ty', 'allx', 'ally', 'graph']
objects = []
for i in range(len(names)):
with open("data/savedData/{}.{}".format(dataset_str, names[i]), 'rb') as f:
if sys.version_info > (3, 0):
objects.append(pkl.load(f, encoding='latin1'))
else:
objects.append(pkl.load(f))
x, y, tx, ty, allx, ally, graph = tuple(objects)
test_idx_reorder = parse_index_file("data/savedData/{}.test.index".format(dataset_str))
features = allx.tolil()
adj = nx.adjacency_matrix(nx.from_dict_of_lists(graph))
labels = ally
features = preprocess_features(features, is_sparse)
support = preprocess_adj(adj)
idx_test = test_idx_reorder
idx_train = range(len(y))
idx_val = range(len(y), len(y)+969)
train_mask = sample_mask(idx_train, labels.shape[0])
val_mask = sample_mask(idx_val, labels.shape[0])
test_mask = sample_mask(idx_test, labels.shape[0])
if not os.path.isfile("data/{}.nbrs.npz".format(dataset_str)):
N = adj.shape[0]
pool = multiprocessing.Pool(processes=56)
lis = []
for i in range(32):
li = range(int(N/32)*i, int(N/32)*(i+1))
if i == 31:
li = range(int(N/32)*i, N)
print(li)
lis.append(li)
adjs = [adj] * 32
results = pool.map(starfind_4o_nbrs, zip(adjs, lis))
pool.close()
pool.join()
nbrs = []
for re in results:
nbrs += re
print(len(nbrs))
np.savez("data/{}.nbrs.npz".format(dataset_str), data = nbrs)
else:
loader = np.load("data/{}.nbrs.npz".format(dataset_str))
nbrs = loader['data']
print(adj.shape, len(nbrs))
return nbrs, support, support, features, labels, train_mask, val_mask, test_mask
def sparse_to_tuple(sparse_mx):
"""Convert sparse matrix to tuple representation."""
def to_tuple(mx):
if not sp.isspmatrix_coo(mx):
mx = mx.tocoo()
coords = np.vstack((mx.row, mx.col)).transpose()
values = mx.data
shape = mx.shape
return coords, values, shape
if isinstance(sparse_mx, list):
for i in range(len(sparse_mx)):
sparse_mx[i] = to_tuple(sparse_mx[i])
else:
sparse_mx = to_tuple(sparse_mx)
return sparse_mx
def preprocess_features(features, sparse=True):
"""Row-normalize feature matrix and convert to tuple representation"""
rowsum = np.array(features.sum(1))
r_inv = np.power(rowsum, -1).flatten()
r_inv[np.isinf(r_inv)] = 0.
r_mat_inv = sp.diags(r_inv)
features = r_mat_inv.dot(features)
if sparse:
return sparse_to_tuple(features)
else:
return features.toarray()
def normalize_adj(adj):
"""Symmetrically normalize adjacency matrix."""
adj = sp.coo_matrix(adj)
rowsum = np.array(adj.sum(1))
d_inv_sqrt = np.power(rowsum, -0.5).flatten()
d_inv_sqrt[np.isinf(d_inv_sqrt)] = 0.
d_mat_inv_sqrt = sp.diags(d_inv_sqrt)
return adj.dot(d_mat_inv_sqrt).transpose().dot(d_mat_inv_sqrt).tocoo()
def preprocess_adj(adj):
"""Preprocessing of adjacency matrix for simple GCN model and conversion to tuple representation."""
adj_normalized = normalize_adj(adj + sp.eye(adj.shape[0]))
return sparse_to_tuple(adj_normalized)
def construct_feed_dict(features, support, labels, labels_mask, placeholders, nbrs):
"""Construct feed dictionary."""
feed_dict = dict()
feed_dict.update({placeholders['labels']: labels})
feed_dict.update({placeholders['labels_mask']: labels_mask})
feed_dict.update({placeholders['features']: features})
feed_dict.update({placeholders['support']: support})
feed_dict.update({placeholders['num_features_nonzero']: features[1].shape})
r1 = sample_nodes(nbrs)
feed_dict.update({placeholders['adv_mask1']: r1})
return feed_dict
def chebyshev_polynomials(adj, k):
"""Calculate Chebyshev polynomials up to order k. Return a list of sparse matrices (tuple representation)."""
print("Calculating Chebyshev polynomials up to order {}...".format(k))
adj_normalized = normalize_adj(adj)
laplacian = sp.eye(adj.shape[0]) - adj_normalized
largest_eigval, _ = eigsh(laplacian, 1, which='LM')
scaled_laplacian = (2. / largest_eigval[0]) * laplacian - sp.eye(adj.shape[0])
t_k = list()
t_k.append(sp.eye(adj.shape[0]))
t_k.append(scaled_laplacian)
def chebyshev_recurrence(t_k_minus_one, t_k_minus_two, scaled_lap):
s_lap = sp.csr_matrix(scaled_lap, copy=True)
return 2 * s_lap.dot(t_k_minus_one) - t_k_minus_two
for i in range(2, k+1):
t_k.append(chebyshev_recurrence(t_k[-1], t_k[-2], scaled_laplacian))
return sparse_to_tuple(t_k)
def sample_nodes(nbrs, num=100):
N = len(nbrs)
flag = np.zeros([N])
output = [0] * num
#norm_mtx = np.zeros([N, N])
for i in range(num):
a = np.random.randint(0, N)
while flag[a] == 1:
a = np.random.randint(0, N)
output[i] = a
# for nell to speed up
flag[nbrs[a]] = 1
# tmp = np.zeros([N])
# tmp[nbrs[a]] = 1
#norm_mtx[nbrs[a]] = tmp
# output_ = np.ones([N])
# output_[output] = 0
# output_ = np.nonzero(output_)[0]
return sample_mask(output, N)#, norm_mtx
def kl_divergence_with_logit(q_logit, p_logit, mask=None):
if not mask is None:
q = tf.nn.softmax(q_logit)
mask = tf.cast(mask, dtype=tf.float32)
mask /= tf.reduce_mean(mask)
qlogq = tf.reduce_mean(tf.reduce_sum(q * tf.nn.log_softmax(q_logit), 1) * mask)
qlogp = tf.reduce_mean(tf.reduce_sum(q * tf.nn.log_softmax(p_logit), 1) * mask)
return - qlogp
else:
q = tf.nn.softmax(q_logit)
qlogq = tf.reduce_sum(q * tf.nn.log_softmax(q_logit), 1)
qlogp = tf.reduce_sum(q * tf.nn.log_softmax(p_logit), 1)
return tf.reduce_mean( - qlogp)
def entropy_y_x(logit):
p = tf.nn.softmax(logit)
return -tf.reduce_mean(tf.reduce_sum(p * tf.nn.log_softmax(logit), 1))
def get_normalized_vector(d, sparse=False, indices=None, dense_shape=None):
if sparse:
d /= (1e-12 + tf.reduce_max(tf.abs(d)))
d2 = tf.SparseTensor(indices, tf.square(d), dense_shape)
d = tf.SparseTensor(indices, d, dense_shape)
d /= tf.sqrt(1e-6 + tf.sparse_reduce_sum(d2, 1, keep_dims=True))
return d
else:
d /= (1e-12 + tf.reduce_max(tf.abs(d)))
d /= tf.sqrt(1e-6 + tf.reduce_sum(tf.pow(d, 2.0), 1, keepdims=True))
return d
def get_normalized_matrix(d, sparse=False, indices=None, dense_shape=None):
if not sparse:
return tf.nn.l2_normalize(d, [0,1])
else:
return tf.SparseTensor(indices, tf.nn.l2_normalize(d, [0]), dense_shape)
def load_graphsage_data(prefix, is_sparse, normalize=True, max_degree=-1):
version_info = map(int, nx.__version__.split('.'))
major = version_info[0]
minor = version_info[1]
assert (major <= 1) and (minor <= 11), "networkx major version must be <= 1.11 in order to load graphsage data"
# Save normalized version
if max_degree==-1:
npz_file = prefix + '.npz'
else:
npz_file = '{}_deg{}.npz'.format(prefix, max_degree)
if os.path.exists(npz_file):
start_time = time.time()
print('Found preprocessed dataset {}, loading...'.format(npz_file))
data = np.load(npz_file)
num_data = data['num_data']
feats = data['feats']
labels = data['labels']
train_data = data['train_data']
val_data = data['val_data']
test_data = data['test_data']
train_adj = data['train_adj']
full_adj = data['full_adj']
train_adj_nonormed = sp.csr_matrix((data['train_adj_data'], data['train_adj_indices'], data['train_adj_indptr']), shape=data['train_adj_shape'])
print('Finished in {} seconds.'.format(time.time() - start_time))
else:
print('Loading data...')
start_time = time.time()
G_data = json.load(open(prefix + "-G.json"))
G = json_graph.node_link_graph(G_data)
feats = np.load(prefix + "-feats.npy").astype(np.float32)
id_map = json.load(open(prefix + "-id_map.json"))
if id_map.keys()[0].isdigit():
conversion = lambda n: int(n)
else:
conversion = lambda n: n
id_map = {conversion(k):int(v) for k,v in id_map.iteritems()}
walks = []
class_map = json.load(open(prefix + "-class_map.json"))
if isinstance(class_map.values()[0], list):
lab_conversion = lambda n : n
else:
lab_conversion = lambda n : int(n)
class_map = {conversion(k): lab_conversion(v) for k,v in class_map.iteritems()}
## Remove all nodes that do not have val/test annotations
## (necessary because of networkx weirdness with the Reddit data)
broken_count = 0
to_remove = []
for node in G.nodes():
if not id_map.has_key(node):
#if not G.node[node].has_key('val') or not G.node[node].has_key('test'):
to_remove.append(node)
broken_count += 1
for node in to_remove:
G.remove_node(node)
print("Removed {:d} nodes that lacked proper annotations due to networkx versioning issues".format(broken_count))
# Construct adjacency matrix
print("Loaded data ({} seconds).. now preprocessing..".format(time.time()-start_time))
start_time = time.time()
edges = []
for edge in G.edges():
if id_map.has_key(edge[0]) and id_map.has_key(edge[1]):
edges.append((id_map[edge[0]], id_map[edge[1]]))
print('{} edges'.format(len(edges)))
num_data = len(id_map)
if max_degree != -1:
print('Subsampling edges...')
edges = subsample_edges(edges, num_data, max_degree)
val_data = np.array([id_map[n] for n in G.nodes()
if G.node[n]['val']], dtype=np.int32)
test_data = np.array([id_map[n] for n in G.nodes()
if G.node[n]['test']], dtype=np.int32)
is_train = np.ones((num_data), dtype=np.bool)
is_train[val_data] = False
is_train[test_data] = False
train_data = np.array([n for n in range(num_data) if is_train[n]], dtype=np.int32)
val_data = sample_mask(val_data, num_data)
test_data = sample_mask(test_data, num_data)
train_data = sample_mask(train_data, num_data)
train_edges = [(e[0], e[1]) for e in edges if is_train[e[0]] and is_train[e[1]]]
edges = np.array(edges, dtype=np.int32)
train_edges = np.array(train_edges, dtype=np.int32)
# Process labels
if isinstance(class_map.values()[0], list):
num_classes = len(class_map.values()[0])
labels = np.zeros((num_data, num_classes), dtype=np.float32)
for k in class_map.keys():
labels[id_map[k], :] = np.array(class_map[k])
else:
num_classes = len(set(class_map.values()))
labels = np.zeros((num_data, num_classes), dtype=np.float32)
for k in class_map.keys():
labels[id_map[k], class_map[k]] = 1
if normalize:
from sklearn.preprocessing import StandardScaler
train_ids = np.array([id_map[n] for n in G.nodes()
if not G.node[n]['val'] and not G.node[n]['test']])
train_feats = feats[train_ids]
scaler = StandardScaler()
scaler.fit(train_feats)
feats = scaler.transform(feats)
def _normalize_adj(edges):
adj = sp.csr_matrix((np.ones((edges.shape[0]), dtype=np.float32),
(edges[:,0], edges[:,1])), shape=(num_data, num_data))
adj += adj.transpose()
tmp = adj
# rowsum = np.array(adj.sum(1)).flatten()
# d_inv = 1.0 / (rowsum+1e-20)
# d_mat_inv = sp.diags(d_inv, 0)
adj = normalize_adj(adj + sp.eye(adj.shape[0]))#d_mat_inv.dot(adj).tocoo()
coords = np.array((adj.row, adj.col)).astype(np.int32)
return tmp, adj.data, coords
train_adj_nonormed, train_v, train_coords = _normalize_adj(train_edges)
_, full_v, full_coords = _normalize_adj(edges)
def _get_adj(data, coords):
adj = sp.csr_matrix((data, (coords[0,:], coords[1,:])),
shape=(num_data, num_data))
return adj
train_adj = sparse_to_tuple(_get_adj(train_v, train_coords))
full_adj = sparse_to_tuple(_get_adj(full_v, full_coords))
# train_feats = train_adj.dot(feats)
# test_feats = full_adj.dot(feats)
print("Done. {} seconds.".format(time.time()-start_time))
with open(npz_file, 'wb') as fwrite:
np.savez(fwrite, num_data=num_data,
train_adj=train_adj,
train_adj_data=train_adj_nonormed.data, train_adj_indices=train_adj_nonormed.indices, train_adj_indptr=train_adj_nonormed.indptr, train_adj_shape=train_adj_nonormed.shape,
full_adj=full_adj,
feats=feats,
labels=labels,
train_data=train_data, val_data=val_data,
test_data=test_data)
return train_adj_nonormed, train_adj, full_adj, feats, labels, train_data, val_data, test_data
def calc_f1(y_pred, y_true, multitask):
if multitask:
y_pred[y_pred>0] = 1
y_pred[y_pred<=0] = 0
else:
y_true = np.argmax(y_true, axis=1)
y_pred = np.argmax(y_pred, axis=1)
return f1_score(y_true, y_pred, average="micro"), \
f1_score(y_true, y_pred, average="macro")
| 38.218415 | 200 | 0.601244 | 2,531 | 17,848 | 4.027262 | 0.141841 | 0.017267 | 0.01295 | 0.006475 | 0.292161 | 0.223585 | 0.168841 | 0.150397 | 0.133915 | 0.121946 | 0 | 0.013552 | 0.264063 | 17,848 | 466 | 201 | 38.300429 | 0.762467 | 0.055468 | 0 | 0.179272 | 0 | 0 | 0.054282 | 0.003056 | 0 | 0 | 0 | 0 | 0.002801 | 0 | null | null | 0 | 0.039216 | null | null | 0.036415 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3d7270ed2ccd3fdf53730944e85357d2c3e72251 | 2,879 | py | Python | Extended Programming Challenges Python/Mnozenie Macierzy/test_main.py | szachovy/School-and-Training | 70f07c0d077da7ba1920d28d881fff7ddcbc37d9 | [
"MIT"
] | null | null | null | Extended Programming Challenges Python/Mnozenie Macierzy/test_main.py | szachovy/School-and-Training | 70f07c0d077da7ba1920d28d881fff7ddcbc37d9 | [
"MIT"
] | null | null | null | Extended Programming Challenges Python/Mnozenie Macierzy/test_main.py | szachovy/School-and-Training | 70f07c0d077da7ba1920d28d881fff7ddcbc37d9 | [
"MIT"
] | null | null | null | import unittest
import main
import re
class MatrixRowsVerification(unittest.TestCase):
def setUp(self):
self.matrix1 = {0: [1, 2, 3], 1: [4, 5, 6]}
self.matrix2 = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
def test_getRowsType(self):
self.assertIsInstance(main.getRows(self.matrix1), int, 'wrong type of returned number of rows')
def test_getRowsNonNegative(self):
self.assertGreaterEqual(main.getRows(self.matrix1), 0, 'rows of matrix cannot be negative number')
def test_getRowsVerification(self):
self.assertEqual(main.getRows(self.matrix1), 2, 'returned number of rows isnt correct')
self.assertEqual(main.getRows(self.matrix2), 3, 'returned number of rows isnt correct')
class MatrixColsVerification(unittest.TestCase):
def setUp(self):
self.matrix1 = {0: [1, 2, 3], 1: [4, 5, 6]}
self.matrix2 = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
def test_getColsType(self):
self.assertIsInstance(main.getCols(self.matrix1), int, 'wrong type of returned number of columns')
def test_getColsNonNegative(self):
self.assertGreaterEqual(main.getCols(self.matrix1), 0, 'rows of matrix cannot be negative number')
def test_getColsVerification(self):
self.assertEqual(main.getCols(self.matrix1), 3, 'returned number of rows isnt correct')
self.assertEqual(main.getCols(self.matrix2), 2, 'returned number of rows isnt correct')
class AutocompleteVerification(unittest.TestCase):
def test_autocomplete(self):
matrix = {0: [1, 2, 3], 1: [4], 2: [5, 6]}
expectedmatrix = {0: [1, 2, 3], 1: [4, 0, 0], 2: [5, 6, 0]}
self.assertEqual(main.autocomplete(matrix), expectedmatrix, 'autocomplete zeros not handled')
class WrongInputException(Exception):
pass
class WriteRowsVerification(unittest.TestCase):
def setUp(self):
self.matrix = main.writerows()
def test_wrong_input(self):
self.assertTrue(re.findall(r"[A-Za-z]*$", str(self.matrix.values())), 'Letters in matrix has been found')
def test_returnsDict(self):
try:
self.assertIsInstance(self.matrix, dict)
except WrongInputException:
self.fail('writing rows doesnt format matrix (dict with rows and cols)')
class VerifyFinalMatrix(unittest.TestCase):
def setUp(self):
self.matrix1 = {0: [1, 2, 3], 1: [4, 5, 6]}
self.matrix2 = {0: [1, 2], 1: [3, 4], 2: [5, 6]}
self.final = {0: [9, 12, 15], 1: [19, 26, 33], 2: [29, 40, 51]}
def test_checkFinal(self):
self.assertEqual(main.Calculate(self.matrix1, self.matrix2).multiply(), self.final, 'Unexpected final matrix '
'after calculations')
def tearDown(self):
self.final.clear()
if __name__ == '__main__':
unittest.main()
| 38.905405 | 118 | 0.632511 | 370 | 2,879 | 4.87027 | 0.278378 | 0.057714 | 0.013319 | 0.011099 | 0.391232 | 0.345727 | 0.32131 | 0.279134 | 0.279134 | 0.178135 | 0 | 0.051398 | 0.229594 | 2,879 | 73 | 119 | 39.438356 | 0.761046 | 0 | 0 | 0.185185 | 0 | 0 | 0.167419 | 0 | 0 | 0 | 0 | 0 | 0.222222 | 1 | 0.277778 | false | 0.018519 | 0.055556 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3d7e43dc6fabcfe8138a99da18574265d9a525c8 | 1,786 | py | Python | pyopenproject/business/services/command/priority/find_all.py | webu/pyopenproject | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | [
"MIT"
] | 5 | 2021-02-25T15:54:28.000Z | 2021-04-22T15:43:36.000Z | pyopenproject/business/services/command/priority/find_all.py | webu/pyopenproject | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | [
"MIT"
] | 7 | 2021-03-15T16:26:23.000Z | 2022-03-16T13:45:18.000Z | pyopenproject/business/services/command/priority/find_all.py | webu/pyopenproject | 40b2cb9fe0fa3f89bc0fe2a3be323422d9ecf966 | [
"MIT"
] | 6 | 2021-06-18T18:59:11.000Z | 2022-03-27T04:58:52.000Z | from pyopenproject.api_connection.exceptions.request_exception import RequestError
from pyopenproject.api_connection.requests.get_request import GetRequest
from pyopenproject.business.exception.business_error import BusinessError
from pyopenproject.business.services.command.find_list_command import FindListCommand
from pyopenproject.business.services.command.priority.priority_command import PriorityCommand
from pyopenproject.business.util.filters import Filters
from pyopenproject.business.util.url import URL
from pyopenproject.business.util.url_parameter import URLParameter
from pyopenproject.model.priority import Priority
class FindAll(PriorityCommand):
def __init__(self, connection, offset, page_size, filters, sort_by):
super().__init__(connection)
self.offset = offset
self.page_size = page_size
self.filters = filters
self.sort_by = sort_by
self.filters = filters
def execute(self):
try:
request = GetRequest(self.connection, str(URL(f"{self.CONTEXT}",
[
Filters(
self.filters),
URLParameter
("sortBy", self.sort_by)
])))
return FindListCommand(self.connection, request, Priority).execute()
# for priority in json_obj["_embedded"]["elements"]:
# yield Priority(priority)
except RequestError as re:
raise BusinessError("Error finding all priorities") from re
| 49.611111 | 93 | 0.594625 | 159 | 1,786 | 6.515723 | 0.377358 | 0.147683 | 0.144788 | 0.083977 | 0.138996 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.342665 | 1,786 | 35 | 94 | 51.028571 | 0.882453 | 0.044233 | 0 | 0.068966 | 0 | 0 | 0.028169 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.310345 | 0 | 0.448276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
3d85f7e617337855186eb9a6630f328826ed38ef | 868 | py | Python | app/migrations/0003_contacts.py | Joshua-Barawa/Django-IP4 | 5665efe73cf8d2244b7bb35ed627e4e237902156 | [
"Unlicense"
] | null | null | null | app/migrations/0003_contacts.py | Joshua-Barawa/Django-IP4 | 5665efe73cf8d2244b7bb35ed627e4e237902156 | [
"Unlicense"
] | null | null | null | app/migrations/0003_contacts.py | Joshua-Barawa/Django-IP4 | 5665efe73cf8d2244b7bb35ed627e4e237902156 | [
"Unlicense"
] | null | null | null | # Generated by Django 4.0.3 on 2022-03-21 13:04
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('app', '0002_remove_profile_caption_alter_profile_profile_pic_and_more'),
]
operations = [
migrations.CreateModel(
name='Contacts',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=100, null=True)),
('unit', models.CharField(blank=True, max_length=100, null=True)),
('m_number', models.IntegerField(default=0)),
('hood', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app.neighborhood')),
],
),
]
| 34.72 | 117 | 0.623272 | 98 | 868 | 5.357143 | 0.632653 | 0.045714 | 0.053333 | 0.08381 | 0.167619 | 0.167619 | 0.167619 | 0.167619 | 0.167619 | 0 | 0 | 0.039574 | 0.243088 | 868 | 24 | 118 | 36.166667 | 0.759513 | 0.051843 | 0 | 0 | 1 | 0 | 0.137637 | 0.075518 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3d90bec081e48c3692736a49abca5a861a8e0892 | 626 | py | Python | scripts/modules/task_plan_types/date.py | vkostyanetsky/Organizer | b1f0a05c0b6c6e6ea7a78a6bd7a3c70f85b33eba | [
"MIT"
] | null | null | null | scripts/modules/task_plan_types/date.py | vkostyanetsky/Organizer | b1f0a05c0b6c6e6ea7a78a6bd7a3c70f85b33eba | [
"MIT"
] | null | null | null | scripts/modules/task_plan_types/date.py | vkostyanetsky/Organizer | b1f0a05c0b6c6e6ea7a78a6bd7a3c70f85b33eba | [
"MIT"
] | null | null | null | # DD.MM.YYYY (DD — номер дня, MM — номер месяца, YYYY — номер года)
import re
import datetime
def is_task_current(task, date):
result = None
groups = re.match('([0-9]{1,2}).([0-9]{1,2}).([0-9]{4})', task['condition'])
type_is_correct = groups != None
if type_is_correct:
task_date_year = int(groups[3])
task_date_month = int(groups[2])
task_date_day = int(groups[1])
task_date = datetime.datetime(task_date_year, task_date_month, task_date_day)
task['outdated'] = task_date < date
result = date == task_date
return result | 26.083333 | 91 | 0.600639 | 94 | 626 | 3.808511 | 0.382979 | 0.223464 | 0.01676 | 0.022346 | 0.027933 | 0.027933 | 0 | 0 | 0 | 0 | 0 | 0.030369 | 0.263578 | 626 | 24 | 92 | 26.083333 | 0.739696 | 0.103834 | 0 | 0 | 0 | 0.071429 | 0.094643 | 0.064286 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.142857 | 0 | 0.285714 | 0 | 0 | 0 | 0 | null | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3da195067ff01ae97b234bc41093431b6cebf500 | 646 | py | Python | class3/collateral/show_genie.py | twin-bridges/netmiko_course | 31943e4f6f66dbfe523d62d5a2f03285802a8c56 | [
"Apache-2.0"
] | 11 | 2020-09-16T06:53:16.000Z | 2021-08-24T21:27:37.000Z | class3/collateral/show_genie.py | twin-bridges/netmiko_course | 31943e4f6f66dbfe523d62d5a2f03285802a8c56 | [
"Apache-2.0"
] | null | null | null | class3/collateral/show_genie.py | twin-bridges/netmiko_course | 31943e4f6f66dbfe523d62d5a2f03285802a8c56 | [
"Apache-2.0"
] | 5 | 2020-10-18T20:25:59.000Z | 2021-10-20T16:27:00.000Z | import os
from netmiko import ConnectHandler
from getpass import getpass
from pprint import pprint
# Code so automated tests will run properly
# Check for environment variable, if that fails, use getpass().
password = os.getenv("NETMIKO_PASSWORD") if os.getenv("NETMIKO_PASSWORD") else getpass()
my_device = {
"device_type": "cisco_xe",
"host": "cisco3.lasthop.io",
"username": "pyclass",
"password": password,
}
with ConnectHandler(**my_device) as net_connect:
output = net_connect.send_command("show ip int brief", use_genie=True)
# output = net_connect.send_command("show ip arp", use_genie=True)
pprint(output)
| 30.761905 | 88 | 0.733746 | 88 | 646 | 5.238636 | 0.568182 | 0.065076 | 0.065076 | 0.099783 | 0.143167 | 0.143167 | 0.143167 | 0 | 0 | 0 | 0 | 0.001835 | 0.156347 | 646 | 20 | 89 | 32.3 | 0.844037 | 0.260062 | 0 | 0 | 0 | 0 | 0.236287 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.214286 | 0.285714 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
3da20b359813d6186015461736f4d52256b59084 | 2,793 | py | Python | pints/tests/test_toy_hes1_michaelis_menten_model.py | lisaplag/pints | 3de6617e57ba5b395edaca48961bfc5a4b7209b3 | [
"RSA-MD"
] | null | null | null | pints/tests/test_toy_hes1_michaelis_menten_model.py | lisaplag/pints | 3de6617e57ba5b395edaca48961bfc5a4b7209b3 | [
"RSA-MD"
] | null | null | null | pints/tests/test_toy_hes1_michaelis_menten_model.py | lisaplag/pints | 3de6617e57ba5b395edaca48961bfc5a4b7209b3 | [
"RSA-MD"
] | null | null | null | #!/usr/bin/env python3
#
# Tests if the HES1 Michaelis-Menten toy model runs.
#
# This file is part of PINTS (https://github.com/pints-team/pints/) which is
# released under the BSD 3-clause license. See accompanying LICENSE.md for
# copyright notice and full license details.
#
import unittest
import numpy as np
import pints
import pints.toy
class TestHes1Model(unittest.TestCase):
"""
Tests if the HES1 Michaelis-Menten toy model runs.
"""
def test_run(self):
model = pints.toy.Hes1Model()
self.assertEqual(model.n_parameters(), 4)
self.assertEqual(model.n_outputs(), 1)
times = model.suggested_times()
parameters = model.suggested_parameters()
values = model.simulate(parameters, times)
self.assertEqual(values.shape, (len(times),))
self.assertTrue(np.all(values > 0))
states = model.simulate_all_states(parameters, times)
self.assertEqual(states.shape, (len(times), 3))
self.assertTrue(np.all(states > 0))
suggested_values = model.suggested_values()
self.assertEqual(suggested_values.shape, (len(times),))
self.assertTrue(np.all(suggested_values > 0))
# Test setting and getting init cond.
self.assertFalse(np.all(model.initial_conditions() == 10))
model.set_initial_conditions(10)
self.assertTrue(np.all(model.initial_conditions() == 10))
# Test setting and getting implicit param.
self.assertFalse(np.all(model.implicit_parameters() == [10, 10, 10]))
model.set_implicit_parameters([10, 10, 10])
self.assertTrue(np.all(model.implicit_parameters() == [10, 10, 10]))
# Initial conditions cannot be negative
model = pints.toy.Hes1Model(0)
self.assertRaises(ValueError, pints.toy.Hes1Model, -1)
# Implicit parameters cannot be negative
model = pints.toy.Hes1Model(0, [0, 0, 0])
self.assertRaises(ValueError, pints.toy.Hes1Model, *(0, [-1, 0, 0]))
self.assertRaises(ValueError, pints.toy.Hes1Model, *(0, [0, -1, 0]))
self.assertRaises(ValueError, pints.toy.Hes1Model, *(0, [0, 0, -1]))
self.assertRaises(ValueError, pints.toy.Hes1Model, *(0, [-1, -1, -1]))
def test_values(self):
# value-based tests for Hes1 Michaelis-Menten
times = np.linspace(0, 10, 101)
parameters = [3.8, 0.035, 0.15, 7.5]
iparameters = [4.5, 4.0, 0.04]
y0 = 7
model = pints.toy.Hes1Model(y0=y0, implicit_parameters=iparameters)
values = model.simulate(parameters, times)
self.assertEqual(values[0], y0)
self.assertAlmostEqual(values[1], 7.011333, places=6)
self.assertAlmostEqual(values[100], 5.420750, places=6)
if __name__ == '__main__':
unittest.main()
| 38.260274 | 78 | 0.653419 | 358 | 2,793 | 5.01676 | 0.290503 | 0.044543 | 0.085189 | 0.060134 | 0.434298 | 0.405345 | 0.35412 | 0.330178 | 0.123051 | 0 | 0 | 0.053327 | 0.214465 | 2,793 | 72 | 79 | 38.791667 | 0.765269 | 0.183315 | 0 | 0.044444 | 0 | 0 | 0.003551 | 0 | 0 | 0 | 0 | 0 | 0.444444 | 1 | 0.044444 | false | 0 | 0.088889 | 0 | 0.155556 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3da83d4179e3c0fa03b23a086938541e7c9c090e | 931 | py | Python | src/tentaclio/clients/athena_client.py | datavaluepeople/tentaclio | eb6920a0e115c6c08043063a8c1013d812ec34c8 | [
"MIT"
] | 12 | 2019-04-30T16:07:42.000Z | 2021-12-08T08:02:09.000Z | src/tentaclio/clients/athena_client.py | octoenergy/tentaclio | eb6920a0e115c6c08043063a8c1013d812ec34c8 | [
"MIT"
] | 74 | 2019-04-25T11:18:22.000Z | 2022-01-18T11:31:14.000Z | src/tentaclio/clients/athena_client.py | datavaluepeople/tentaclio | eb6920a0e115c6c08043063a8c1013d812ec34c8 | [
"MIT"
] | 4 | 2019-05-05T13:13:21.000Z | 2022-01-14T00:33:07.000Z | """AWS Athena query client.
Overrides the `get_df` convenience methods for loading a DataFrame using PandasCursor,
which is more performant than using sql alchemy functions.
"""
import pandas as pd
from pyathena.pandas_cursor import PandasCursor
from . import decorators, sqla_client
__all__ = ["AthenaClient"]
class AthenaClient(sqla_client.SQLAlchemyClient):
"""Postgres client, backed by a SQLAlchemy connection."""
allowed_schemes = ["awsathena+rest"]
connect_args_default = dict(cursor_class=PandasCursor)
# Athena-specific fast query result retrieval:
@decorators.check_conn
def get_df(self, sql_query: str, params: dict = None, **kwargs) -> pd.DataFrame:
"""Run a raw SQL query and return a data frame."""
raw_conn = self._get_raw_conn()
raw_cursor = raw_conn.cursor(PandasCursor)
return raw_cursor.execute(sql_query, parameters=params, **kwargs).as_pandas()
| 32.103448 | 86 | 0.736842 | 120 | 931 | 5.516667 | 0.583333 | 0.036254 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.170784 | 931 | 28 | 87 | 33.25 | 0.857513 | 0.337272 | 0 | 0 | 0 | 0 | 0.043333 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
3dada60e0249d722b9efc92d356114b02e3e0c6c | 18,496 | py | Python | filters/Filter.py | Paul1298/ITMO_FS | 219537776d89e52df0c1c07de2c71ce91c679c50 | [
"MIT"
] | null | null | null | filters/Filter.py | Paul1298/ITMO_FS | 219537776d89e52df0c1c07de2c71ce91c679c50 | [
"MIT"
] | null | null | null | filters/Filter.py | Paul1298/ITMO_FS | 219537776d89e52df0c1c07de2c71ce91c679c50 | [
"MIT"
] | null | null | null | from .utils import *
class Filter(object):####TODO add logging
def __init__(self, measure, cutting_rule):
"""
Basic univariate filter class with chosen(even custom) measure and cutting rule
:param measure:
Examples
--------
>>> f=Filter("PearsonCorr", GLOB_CR["K best"](6))
"""
inter_class = 0.0
intra_class = 0.0
for value in np.unique(y_data):
index_for_this_value = np.where(y_data == value)[0]
n = np.sum(row[index_for_this_value])
mu = np.mean(row[index_for_this_value])
var = np.var(row[index_for_this_value])
inter_class += n * np.power((mu - mu), 2)
intra_class += (n - 1) * var
f_ratio = inter_class / intra_class
return f_ratio
@classmethod
def __f_ratio_measure(cls, X, y, n):
X, y = _DefaultMeasures.__check_input(X, y)
assert not 1 < X.shape[1] < n, 'incorrect number of features'
f_ratios = []
for feature in X.T:
f_ratio = _DefaultMeasures.__calculate_F_ratio(feature, y.T)
f_ratios.append(f_ratio)
f_ratios = np.array(f_ratios)
return np.argpartition(f_ratios, -n)[-n:]
@staticmethod
def f_ratio_measure(n):
return partial(_DefaultMeasures.__f_ratio_measure, n=n)
@staticmethod
def gini_index(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
cum_x = np.cumsum(X / np.linalg.norm(X, 1, axis=0), axis=0)
cum_y = np.cumsum(y / np.linalg.norm(y, 1))
diff_x = (cum_x[1:] - cum_x[:-1])
diff_y = (cum_y[1:] + cum_y[:-1])
return np.abs(1 - np.sum(np.multiply(diff_x.T, diff_y).T, axis=0))
# Calculate the entropy of y.
@staticmethod
def __calc_entropy(y):
dict_label = dict()
for label in y:
if label not in dict_label:
dict_label.update({label: 1})
else:
dict_label[label] += 1
entropy = 0.0
for i in dict_label.values():
entropy += -i / len(y) * log(i / len(y), 2)
return entropy
@staticmethod
def __calc_conditional_entropy(x_j, y):
dict_i = dict()
for i in range(x_j.shape[0]):
if x_j[i] not in dict_i:
dict_i.update({x_j[i]: [i]})
else:
dict_i[x_j[i]].append(i)
# Conditional entropy of a feature.
con_entropy = 0.0
# get corresponding values in y.
for f in dict_i.values():
# Probability of each class in a feature.
p = len(f) / len(x_j)
# Dictionary of corresponding probability in labels.
dict_y = dict()
for i in f:
if y[i] not in dict_y:
dict_y.update({y[i]: 1})
else:
dict_y[y[i]] += 1
# calculate the probability of corresponding label.
sub_entropy = 0.0
for l in dict_y.values():
sub_entropy += -l / sum(dict_y.values()) * log(l / sum(dict_y.values()), 2)
con_entropy += sub_entropy * p
return con_entropy
# IGFilter = filters.IGFilter() # TODO: unexpected .run() interface; .run() feature_names; no default constructor
@staticmethod
def ig_measure(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
entropy = _DefaultMeasures.__calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
f_ratios[index] = entropy - _DefaultMeasures.__calc_conditional_entropy(X[:, index], y)
return f_ratios
@staticmethod
def __contingency_matrix(labels_true, labels_pred):
"""Build a contingency matrix describing the relationship between labels.
Parameters
----------
labels_true : int array, shape = [n_samples]
Ground truth class labels to be used as a reference
labels_pred : array, shape = [n_samples]
Cluster labels to evaluate
Returns
-------
contingency : {array-like, sparse}, shape=[n_classes_true, n_classes_pred]
Matrix :math:`C` such that :math:`C_{i, j}` is the number of samples in
true class :math:`i` and in predicted class :math:`j`. If
``eps is None``, the dtype of this array will be integer. If ``eps`` is
given, the dtype will be float.
"""
classes, class_idx = np.unique(labels_true, return_inverse=True)
clusters, cluster_idx = np.unique(labels_pred, return_inverse=True)
n_classes = classes.shape[0]
n_clusters = clusters.shape[0]
# Using coo_matrix to accelerate simple histogram calculation,
# i.e. bins are consecutive integers
# Currently, coo_matrix is faster than histogram2d for simple cases
# TODO redo it with numpy
contingency = sp.csr_matrix((np.ones(class_idx.shape[0]),
(class_idx, cluster_idx)),
shape=(n_classes, n_clusters),
dtype=np.int)
contingency.sum_duplicates()
return contingency
@staticmethod
def __mi(U, V):
contingency = _DefaultMeasures.__contingency_matrix(U, V)
nzx, nzy, nz_val = sp.find(contingency)
contingency_sum = contingency.sum()
pi = np.ravel(contingency.sum(axis=1))
pj = np.ravel(contingency.sum(axis=0))
log_contingency_nm = np.log(nz_val)
contingency_nm = nz_val / contingency_sum
# Don't need to calculate the full outer product, just for non-zeroes
outer = (pi.take(nzx).astype(np.int64, copy=False)
* pj.take(nzy).astype(np.int64, copy=False))
log_outer = -np.log(outer) + log(pi.sum()) + log(pj.sum())
mi = (contingency_nm * (log_contingency_nm - log(contingency_sum)) +
contingency_nm * log_outer)
return mi.sum()
@classmethod
def __mrmr_measure(cls, X, y, n):
assert not 1 < X.shape[1] < n, 'incorrect number of features'
x, y = _DefaultMeasures.__check_input(X, y)
# print([_DefaultMeasures.__mi(X[:, j].reshape(-1, 1), y) for j in range(X.shape[1])])
return [MI(x[:, j].reshape(-1, 1), y) for j in range(x.shape[1])]
@staticmethod
def mrmr_measure(n):
return partial(_DefaultMeasures.__mrmr_measure, n=n)
# RandomFilter = filters.RandomFilter() # TODO: bad .run() interface; .run() feature_names; no default constructor
@staticmethod
def su_measure(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
entropy = _DefaultMeasures.__calc_entropy(y)
f_ratios = np.empty(X.shape[1])
for index in range(X.shape[1]):
entropy_x = _DefaultMeasures.__calc_entropy(X[:, index])
con_entropy = _DefaultMeasures.__calc_conditional_entropy(X[:, index], y)
f_ratios[index] = 2 * (entropy - con_entropy) / (entropy_x + entropy)
return f_ratios
@staticmethod
def spearman_corr(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
np.sort(X, axis=1) # need to sort, because Spearman is a rank correlation
np.sort(y)
n = X.shape[0]
c = 6 / (n * (n - 1) * (n + 1))
dif = X - np.repeat(y, X.shape[1]).reshape(X.shape)
return 1 - c * np.sum(dif * dif, axis=0)
@staticmethod
def pearson_corr(X, y):
X, y = _DefaultMeasures.__check_input(X, y)
x_dev = X - np.mean(X, axis=0)
y_dev = y - np.mean(y)
sum_dev = y_dev.T.dot(x_dev)
sq_dev_x = x_dev * x_dev
sq_dev_y = y_dev * y_dev
return (sum_dev / np.sqrt(np.sum(sq_dev_y) * np.sum(sq_dev_x))).reshape((-1,))
# TODO concordation coef
@staticmethod
def fechner_corr(X, y):
"""
Sample sign correlation (also known as Fechner correlation)
"""
X, y = _DefaultMeasures.__check_input(X, y)
y_mean = np.mean(y)
n = X.shape[0]
f_ratios = np.zeros(X.shape[1])
for j in range(X.shape[1]):
y_dev = y[j] - y_mean
x_j_mean = np.mean(X[:, j])
for i in range(n):
x_dev = X[i, j] - x_j_mean
if x_dev >= 0 & y_dev >= 0:
f_ratios[j] += 1
else:
f_ratios[j] -= 1
f_ratios[j] /= n
return f_ratios
@staticmethod
def __label_binarize(y):
"""
Binarize labels in a one-vs-all fashion
This function makes it possible to compute this transformation for a
fixed set of class labels known ahead of time.
"""
classes = np.unique(y)
n_samples = len(y)
n_classes = len(classes)
row = np.arange(n_samples)
col = [np.where(classes == el)[0][0] for el in y]
data = np.repeat(1, n_samples)
# TODO redo it with numpy
return sp.csr_matrix((data, (row, col)), shape=(n_samples, n_classes)).toarray()
@staticmethod
def __chisquare(f_obs, f_exp):
"""Fast replacement for scipy.stats.chisquare.
Version from https://github.com/scipy/scipy/pull/2525 with additional
optimizations.
"""
f_obs = np.asarray(f_obs, dtype=np.float64)
# Reuse f_obs for chi-squared statistics
chisq = f_obs
chisq -= f_exp
chisq **= 2
with np.errstate(invalid="ignore"):
chisq /= f_exp
chisq = chisq.sum(axis=0)
return chisq
@staticmethod
def chi2_measure(X, y):
"""
This score can be used to select the n_features features with the highest values
for the test chi-squared statistic from X,
which must contain only non-negative features such as booleans or frequencies
(e.g., term counts in document classification), relative to the classes.
"""
X, y = _DefaultMeasures.__check_input(X, y)
if np.any(X < 0):
raise ValueError("Input X must be non-negative.")
Y = _DefaultMeasures.__label_binarize(y)
# If you use sparse input
# you can use sklearn.utils.extmath.safe_sparse_dot instead
observed = np.dot(Y.T, X) # n_classes * n_features
feature_count = X.sum(axis=0).reshape(1, -1)
class_prob = Y.mean(axis=0).reshape(1, -1)
expected = np.dot(class_prob.T, feature_count)
return _DefaultMeasures.__chisquare(observed, expected)
@staticmethod
def __distance_matrix(X, y, n_samples):
dm = np.zeros((n_samples, n_samples), dtype=tuple)
for i in range(n_samples):
for j in range(i, n_samples):
# using the Manhattan (L1) norm rather than
# the Euclidean (L2) norm,
# although the rationale is not specified
value = np.linalg.norm(X[i, :] - X[j, :], 1)
dm[i, j] = (value, j, y[j])
dm[j, i] = (value, i, y[i])
# sort_indices = dm.argsort(1)
# dm.sort(1)
# indices = np.arange(n_samples) #[sort_indices]
# dm = np.dstack((dm, indices))
return dm
# TODO redo with np.where
@staticmethod
def __take_k(dm_i, k, r_index, choice_func):
hits = []
dm_i = sorted(dm_i, key=lambda x: x[0])
for samp in dm_i:
if (samp[1] != r_index) & (k > 0) & (choice_func(samp[2])):
hits.append(samp)
k -= 1
return np.array(hits, int)
@staticmethod
def reliefF_measure(X, y, k_neighbors=1):
"""
Based on the ReliefF algorithm as introduced in:
R.J. Urbanowicz et al. Relief-based feature selection: Introduction and review
Journal of Biomedical Informatics 85 (2018) 189–203
Differs with skrebate.ReliefF
Only for complete X
Rather than repeating the algorithm m(TODO Ask Nikita about user defined) times,
implement it exhaustively (i.e. n times, once for each instance)
for relatively small n (up to one thousand).
:param X: array-like {n_samples, n_features}
Training instances to compute the feature importance scores from
:param y: array-like {n_samples}
Training labels
:param k_neighbors: int (default: 1)
The number of neighbors to consider when assigning feature importance scores.
More neighbors results in more accurate scores, but takes longer.
Selection of k hits and misses is the basic difference to Relief
and ensures greater robustness of the algorithm concerning noise.
:return: array-like {n_features}
Feature importances
"""
X, y = _DefaultMeasures.__check_input(X, y)
f_ratios = np.zeros(X.shape[1])
classes, counts = np.unique(y, return_counts=True)
prior_prob = dict(zip(classes, np.array(counts) / len(y)))
n_samples = X.shape[0]
n_features = X.shape[1]
dm = _DefaultMeasures.__distance_matrix(X, y, n_samples)
for i in range(n_samples):
r = X[i]
dm_i = dm[i]
hits = _DefaultMeasures.__take_k(dm_i, k_neighbors, i, lambda x: x == y[i])
if len(hits) != 0:
ind_hits = hits[:, 1]
else:
ind_hits = []
value_hits = X.take(ind_hits, axis=0)
m_c = np.empty(len(classes), np.ndarray)
for j in range(len(classes)):
if classes[j] != y[i]:
misses = _DefaultMeasures.__take_k(dm_i, k_neighbors, i, lambda x: x == classes[j])
ind_misses = misses[:, 1]
m_c[j] = X.take(ind_misses, axis=0)
for A in range(n_features):
weight_hit = np.sum(np.abs(r[A] - value_hits[:, A]))
weight_miss = 0
for j in range(len(classes)):
if classes[j] != y[i]:
weight_miss += prior_prob[y[j]] * np.sum(np.abs(r[A] - m_c[j][:, A]))
f_ratios[A] += weight_miss / (1 - prior_prob[y[i]]) - weight_hit
# dividing by m * k guarantees that all final weights
# will be normalized within the interval [ − 1, 1].
f_ratios /= n_samples * k_neighbors
# The maximum and minimum values of A are determined over the entire
# set of instances.
# This normalization ensures that weight updates fall
# between 0 and 1 for both discrete and continuous features.
with np.errstate(divide='ignore', invalid="ignore"): # todo
return f_ratios / (np.amax(X, axis=0) - np.amin(X, axis=0))
VDM = filters.VDM() # TODO: probably not a filter
GLOB_MEASURE = {"FitCriterion": _DefaultMeasures.fit_criterion_measure,
"FRatio": _DefaultMeasures.f_ratio_measure,
"GiniIndex": _DefaultMeasures.gini_index,
"InformationGain": _DefaultMeasures.ig_measure,
"MrmrDiscrete": _DefaultMeasures.mrmr_measure,
"SymmetricUncertainty": _DefaultMeasures.su_measure,
"SpearmanCorr": _DefaultMeasures.spearman_corr,
"PearsonCorr": _DefaultMeasures.pearson_corr,
"FechnerCorr": _DefaultMeasures.fechner_corr,
"ReliefF": _DefaultMeasures.reliefF_measure,
"Chi2": _DefaultMeasures.chi2_measure}
class _DefaultCuttingRules:
@staticmethod
def select_best_by_value(value):
return partial(_DefaultCuttingRules.__select_by_value, value=value, more=True)
@staticmethod
def select_worst_by_value(value):
return partial(_DefaultCuttingRules.__select_by_value, value=value, more=False)
@staticmethod
def __select_by_value(scores, value, more=True):
features = []
for key, sc_value in scores.items():
if more:
if sc_value >= value:
features.append(key)
else:
if sc_value <= value:
features.append(key)
return features
@staticmethod
def select_k_best(k):
return partial(_DefaultCuttingRules.__select_k, k=k, reverse=True)
@staticmethod
def select_k_worst(k):
return partial(_DefaultCuttingRules.__select_k, k=k)
@classmethod
def __select_k(cls, scores, k, reverse=False):
if type(k) != int:
raise TypeError("Number of features should be integer")
return [keys[0] for keys in sorted(scores.items(), key=lambda kv: kv[1], reverse=reverse)[:k]]
GLOB_CR = {"Best by value": _DefaultCuttingRules.select_best_by_value,
"Worst by value": _DefaultCuttingRules.select_worst_by_value,
"K best": _DefaultCuttingRules.select_k_best,
"K worst": _DefaultCuttingRules.select_k_worst}
class Filter(object):
def __init__(self, measure, cutting_rule):
if type(measure) is str:
try:
self.measure = GLOB_MEASURE[measure]
except KeyError:
raise KeyError("No %r measure yet" % measure)
else:
self.measure = measure
if type(cutting_rule) is str:
try:
self.cutting_rule = GLOB_CR[cutting_rule]
except KeyError:
raise KeyError("No %r cutting rule yet" % measure)
else:
self.cutting_rule = cutting_rule
self.feature_scores = None
self.hash = None
def run(self, x, y, feature_names=None, store_scores=False, verbose=0):
try:
x = x.values
y = y.values
except AttributeError:
x = x
self.feature_scores = None
try:
feature_names = x.columns
except AttributeError:
if feature_names is None:
feature_names = list(range(x.shape[1]))
feature_scores = None
if not (self.hash == hash(self.measure)):
feature_scores = dict(zip(feature_names, self.measure(x, y)))
self.hash = hash(self.measure)
if store_scores:
self.feature_scores = feature_scores
selected_features = self.cutting_rule(feature_scores)
return x[:, selected_features]
| 37.670061 | 118 | 0.579477 | 2,419 | 18,496 | 4.227367 | 0.190988 | 0.006845 | 0.009583 | 0.021514 | 0.20487 | 0.154313 | 0.132016 | 0.105907 | 0.087718 | 0.084686 | 0 | 0.010896 | 0.315257 | 18,496 | 490 | 119 | 37.746939 | 0.796368 | 0.216858 | 0 | 0.248447 | 0 | 0 | 0.024234 | 0 | 0 | 0 | 0 | 0.014286 | 0.006211 | 1 | 0.090062 | false | 0 | 0.003106 | 0.018634 | 0.192547 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3daf498d7521399146cf380a60792cc98a71c488 | 6,145 | py | Python | MakeMytripChallenge/script/IFtrial.py | divayjindal95/DataScience | d976a5e3ac9bd36e84149642a5b93f7bfc3540cf | [
"MIT"
] | null | null | null | MakeMytripChallenge/script/IFtrial.py | divayjindal95/DataScience | d976a5e3ac9bd36e84149642a5b93f7bfc3540cf | [
"MIT"
] | null | null | null | MakeMytripChallenge/script/IFtrial.py | divayjindal95/DataScience | d976a5e3ac9bd36e84149642a5b93f7bfc3540cf | [
"MIT"
] | null | null | null | import sys
import warnings
if not sys.warnoptions:
warnings.simplefilter("ignore")
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from sklearn.naive_bayes import MultinomialNB
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import GradientBoostingClassifier
from sklearn.svm import SVC
from sklearn.linear_model import LogisticRegression,LinearRegression
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import LabelEncoder
from sklearn.model_selection import KFold,cross_val_score,LeaveOneOut
#from sklearn.cross_validation import KFold,train_test_split,cross_val_score
train_data = pd.read_csv("../data/train.csv")
train_data_len=len(train_data)
test_data=pd.read_csv("../data/test.csv")
test_data_len=len(test_data)
def getint(data):
nicedata=data
cls=dict()
for i in xrange(len(nicedata.columns)):
if data.dtypes[i]==object and data.columns[i]!='P':
le = LabelEncoder()
nicedata[nicedata.columns[i]] = le.fit_transform(nicedata[nicedata.columns[i]])
cls[nicedata.columns[i]]=le.classes_
return nicedata,cls
data=pd.concat([train_data,test_data])
data.A=data.A.fillna(data['A'].mode()[0])
data.D=data.D.fillna(data['D'].mode()[0])
data.E=data.E.fillna(data['E'].mode()[0])
data.G=data.G.fillna(data['G'].mode()[0])
data.F=data.F.fillna(data['F'].mode()[0])
data.B=data.A.fillna(data['B'].median())
data.N=data.N.fillna(data['N'].median())
#print len(data.dropna())
#print data.describe()
data,cls=getint(data)
# data.O=np.log(data.O+1)
# data.H=np.log(data.H+1)
# data.K=np.log(data.K+1)
# data.N=np.log(data.N+1)
# data.C=np.log(data.C+1)
# sc = StandardScaler()
# data.O=sc.fit_transform(np.reshape(data.O,(len(data.O),1)))
# sc = StandardScaler()
# data.H=sc.fit_transform(np.reshape(data.H,(len(data.H),1)))
# sc = StandardScaler()
# data.K=sc.fit_transform(np.reshape(data.K,(len(data.K),1)))
# sc = StandardScaler()
# data.N=sc.fit_transform(np.reshape(data.N,(len(data.N),1)))
# sc = StandardScaler()
# data.C=sc.fit_transform(np.reshape(data.C,(len(data.C),1)))
# sc = StandardScaler()
# data.B=sc.fit_transform(np.reshape(data.B,(len(data.B),1)))
data['H_frac']=data.H-data.H.map(lambda x:int(x))
data['H_int'] = data.H.map(lambda x:int(x))
data['C_frac']=data.C-data.C.map(lambda x:int(x))
data['C_int'] = data.C.map(lambda x:int(x))
data['N_frac']=data.N-data.N.map(lambda x:int(x))
data['N_int'] = data.N.map(lambda x:int(x))
data=pd.concat([data,pd.get_dummies(data.A,'A')],axis=1)
data=pd.concat([data,pd.get_dummies(data.F,'F')],axis=1)
print data.head()
print data.columns
trncols=[u'A', u'B','C_frac','C_int', u'D', u'E', u'F', u'G', u'H_int','H_frac', u'I', u'J', u'K',
u'L', u'M','N_frac','N_int', u'O']
trncols=[u'A', u'B', u'C', u'D', u'E', u'F', u'G', u'H', u'I', u'J', u'K', u'L', u'M', u'N', u'O', u'id', u'H_frac', u'H_int', u'C_frac', u'C_int', u'N_frac', u'N_int', u'A_0', u'A_1', u'F_0', u'F_1', u'F_2', u'F_3', u'F_4', u'F_5', u'F_6', u'F_7', u'F_8', u'F_9', u'F_10', u'F_11', u'F_12', u'F_13']
testcols=['P']
data_bin = ['A','I','J','L','F']
#trncols=data_bin
fin_train_data=data.iloc[:len(train_data)]
fin_test_data=data.iloc[len(train_data):]
#print fin_train_data[(fin_train_data.I==1) & (fin_train_data.J==0)].tostring()
print len(fin_train_data)
print len(fin_train_data[(fin_train_data.I==1) & (fin_train_data.J==1)]),len(fin_train_data[(fin_train_data.I==1) & (fin_train_data.J==1) & (fin_train_data.P==1)]),
print len(fin_train_data[(fin_train_data.I==0) & (fin_train_data.J==0)]),len(fin_train_data[(fin_train_data.I==0) & (fin_train_data.J==0) & (fin_train_data.P==0)])
print len(fin_train_data[(fin_train_data.I==0) & (fin_train_data.J==1)]),len(fin_train_data[(fin_train_data.I==0) & (fin_train_data.J==1) & (fin_train_data.P==0)])
print len(fin_test_data[(fin_test_data.I==1) & (fin_test_data.J==0)]),len(fin_test_data)
fin_train_data = fin_train_data[(fin_train_data.I==1) & (fin_train_data.J==0)]
from sklearn.utils import shuffle
fin_train_data= shuffle(fin_train_data)
X=fin_train_data[trncols]
Y=fin_train_data[testcols]
rfc=GradientBoostingClassifier(n_estimators=30)
#rfc=LogisticRegression()
rfc=LinearRegression()
#rfc=MultinomialNB()
kf=KFold(n_splits=5)
lo = LeaveOneOut()
accs=cross_val_score(rfc,X,Y,cv=kf)
accslo=cross_val_score(rfc,X,Y,cv=lo)
#print np.mean(accs),np.mean(accslo)
rfc.fit(X,Y)
#print rfc.score(X,Y)
#print rfc.predict(X)<0.5
rsss = pd.DataFrame((Y==0)==(rfc.predict(X)<0.5))
#print rsss[rsss.P==True]
# asnls=[]
#
# orans=y.P.tolist()
# x=x.reset_index(xrange(len(y)))
#
# for i in xrange(len(x)):
# if x.I.iloc[i]==0 and x.J.iloc[i]==0:
# asnls.append(1)
# if x.I.iloc[i]==1 and x.J.iloc[i]==1:
# asnls.append(1)
# if x.I.iloc[i]==0 and x.J.iloc[i]==1:
# asnls.append(1)
# if x.I.iloc[i]==1 and x.J.iloc[i]==0:
# asnls.append(orans[i])
# i+=1
#
# res=0
# for a,b in zip(asnls,orans):
# res+=np.abs(a-b)
# print res/len(orans)
fintestindex=fin_test_data.index
for e in fintestindex:
if (fin_test_data['I'][e]==1) and (fin_test_data['J'][e]==1):
fin_test_data['P'][e]=0
if (fin_test_data['I'][e]==0) and (fin_test_data['J'][e]==0):
fin_test_data['P'][e]=1
if (fin_test_data['I'][e]==0) and (fin_test_data['J'][e]==1):
fin_test_data['P'][e]=1
# if (fin_test_data['I'][e]==1) and (fin_test_data['J'][e]==0):
# fin_test_data['P']=0
print fin_test_data.P
remaining=fin_test_data[fin_test_data.P.isnull()]
remainingans =rfc.predict(remaining[trncols])>0.5
fin_test_data[fin_test_data.P.isnull()]['P'][:]=np.reshape(remainingans.astype(int),(len(remainingans)))
fin_test_data[fin_test_data.P.isnull()]['P'][:]=1
print fin_test_data[fin_test_data.P.isnull()]['P'][:]
#print fin_test_data.P
final = pd.DataFrame()
final['id']=fin_test_data.id
# #final['P']=pd.to_numeric(rfc.predict(fin_test_data[trncols]),downcast='signed')
# final['P']=rfc.predict(fin_test_data[trncols]).astype(int)
# final.to_csv('../data/final.csv',index=False) | 34.138889 | 300 | 0.682832 | 1,172 | 6,145 | 3.399317 | 0.134812 | 0.090361 | 0.10241 | 0.040161 | 0.431476 | 0.370231 | 0.285643 | 0.270582 | 0.203815 | 0.173444 | 0 | 0.017176 | 0.099919 | 6,145 | 180 | 301 | 34.138889 | 0.703128 | 0.277787 | 0 | 0.022472 | 0 | 0 | 0.056797 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.168539 | null | null | 0.101124 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3db6b1a2ad7d586c5f66023f21c351a35d9fd997 | 7,604 | py | Python | Appserver/Test/ApiUnitTesting/testBusquedaCandidatos.py | seguijoaquin/taller2 | f41232516de15fe045805131b09299e5c2634e5e | [
"MIT"
] | 2 | 2016-06-06T03:26:49.000Z | 2017-08-06T18:12:33.000Z | Appserver/Test/ApiUnitTesting/testBusquedaCandidatos.py | seguijoaquin/taller2 | f41232516de15fe045805131b09299e5c2634e5e | [
"MIT"
] | 60 | 2016-03-19T16:01:27.000Z | 2016-06-23T16:26:10.000Z | Appserver/Test/ApiUnitTesting/testBusquedaCandidatos.py | seguijoaquin/taller2 | f41232516de15fe045805131b09299e5c2634e5e | [
"MIT"
] | null | null | null | import json
import requests
import unittest
import Utilities
# Precondiciones:
# Intereses:
# No debe haber ningun usuario en el Shared que tenga "interesUnico"
#
Address = "http://localhost:8000"
#Tal vez mandar las URIs a sus respectivas clases
URIResgistro = "/registro"
URILogin = "/login"
URIPedirCandidato = "/perfil"
URIEliminar = "/eliminar"
def crearHeadersParaRegistro(usuario):
return {'Usuario': usuario,'Password': "password"}#, 'Content-Type': 'application/json' }
def crearHeadersParaElLogin(usuario):
return {'Usuario': usuario,'Password': "password", 'TokenGCM': "APA91bFundy4qQCiRnhUbMOcsZEwUBpbuPjBm-wnyBv600MNetW5rp-5Cg32_UA0rY_gmqqQ8pf0Cn-nyqoYrAl6BQTPT3dXNYFuHeWYEIdLz0RwAhN2lGqdoiYnCM2V_O8MonYn3rL6hAtYaIz_b0Jl2xojcKIOqQ" }
def abrirJson(ruta):
with open(ruta, 'r') as archivoJson:
parseado = json.load(archivoJson)
return parseado
def crearHeadersParaBuscarCandidatos(usuario,token):
return {'Usuario': usuario, 'Token': token}
class TestBusquedaCandidatos(unittest.TestCase):
usuario1 = 'usuarioCandidato1'
usuario2 = 'usuarioCandidato2'
usuarioSinIntereses = "./usuario.json"
passwordCorrecto = 'password' #lo uso para todos los usuarios
#Una categoria que SI o SI esta en el Shared
categoriaValida = "outdoors"
interesUnico = "INTERES UNICO QUE NO TIENE NADIE MAS"
interesCompartido = "INTERES QUE SOLO DEBE SER COMPARTIDO POR DOS USUARIOS"
msgNoSeEncontraronCandidatos = "Candidato no encontrado"
msgSeEncontraronCandidatos = "Candidato encontrado"
def agregarEmailAlUsuario(self, bodyUsuario, email):
bodyUsuario["user"]["email"] = email
def agregarValorDeInteresAlUsuario(self,bodyUsuario, valorDeInteres):
interes = json.loads('{}')
interes["category"] = self.categoriaValida
interes["value"] = valorDeInteres
bodyUsuario["user"]["interests"].append(interes)
def hacerLoginDeUsuario(self, usuario):
headUsuarioRegistrado = crearHeadersDeUsuarioYPassword( usuario, self.passwordCorrecto)
reply = requests.get(Address + URILogin,headers=headUsuarioRegistrado)
return reply
usuariosParaBorrar = []
def tearDown(self):
for usuario in self.usuariosParaBorrar:
headEliminarUsuario = {'Usuario': usuario,'Password': self.passwordCorrecto }
replyDelete = requests.delete(Address + URIEliminar, headers=headEliminarUsuario)
del self.usuariosParaBorrar[:]
def test_UsuarioPideUnCandidatoPeroNoSeEncuentra(self):
#Para esto no debe haber ningun usuario en el shared con el interes "interesUnico"
#Aca creo el body del usuario con un interes unico, ningun otro lo debe usar
nombreUsuario = Utilities.transformarEnMail("test_UsuarioPideUnCandidatoPeroNoSeEncuentra")
bodyUsuario = abrirJson(self.usuarioSinIntereses)
self.agregarEmailAlUsuario(bodyUsuario, nombreUsuario)
self.agregarValorDeInteresAlUsuario(bodyUsuario, self.interesUnico)
headRegistrarUsuario = crearHeadersParaRegistro(nombreUsuario)
replyRegistro = requests.put(Address + URIResgistro, headers=headRegistrarUsuario, data=json.dumps(bodyUsuario))
#Se loguea
headLoginUsuario = crearHeadersParaElLogin(nombreUsuario)
replyLogin = requests.get(Address + URILogin, headers=headLoginUsuario)
#Pide un candidato
headPedirCandidatos = crearHeadersParaBuscarCandidatos(nombreUsuario,replyLogin.headers["Token"])
replyPedirCandidatos = requests.get(Address + URIPedirCandidato, headers=headPedirCandidatos)
self.assertEqual(replyPedirCandidatos.reason,self.msgNoSeEncontraronCandidatos)
self.assertEqual(replyPedirCandidatos.status_code,201)
self.usuariosParaBorrar.extend([nombreUsuario])
def crearBodyConUnInteres(self, email, interes):
bodyUsuario = abrirJson(self.usuarioSinIntereses)
self.agregarEmailAlUsuario(bodyUsuario, email)
self.agregarValorDeInteresAlUsuario(bodyUsuario, interes)
return bodyUsuario
def registrarUsuario(self, nombreUsuario, bodyUsuario):
headRegistrarUsuario = crearHeadersParaRegistro(nombreUsuario)
return requests.put(Address + URIResgistro, headers=headRegistrarUsuario, data=json.dumps(bodyUsuario))
def loguearUsuario(self, nombreUsuario):
headLoginUsuario = crearHeadersParaElLogin(nombreUsuario)
return requests.get(Address + URILogin, headers=headLoginUsuario)
def pedirCandidato(self, nombreUsuario, replyLogin):
headPedirCandidatos = crearHeadersParaBuscarCandidatos(nombreUsuario,replyLogin.headers["Token"])
return requests.get(Address + URIPedirCandidato, headers=headPedirCandidatos)
def test_DosUsuariosConUnInteresEspecificoPidenUnCandidatoYSeEncuentranUnoAlOtro(self):
nombreUsuario1 = Utilities.transformarEnMail("1test_DosUsuariosConUnInteresEspecificoPidenUnCandidatoYSeEncuentranUnoAlOtro")
nombreUsuario2 = Utilities.transformarEnMail("2test_DosUsuariosConUnInteresEspecificoPidenUnCandidatoYSeEncuentranUnoAlOtro")
bodyUsuario1 = self.crearBodyConUnInteres(nombreUsuario1, self.interesCompartido)
bodyUsuario2 = self.crearBodyConUnInteres(nombreUsuario2, self.interesCompartido)
replyRegistro1 = self.registrarUsuario(nombreUsuario1, bodyUsuario1)
replyRegistro2 = self.registrarUsuario(nombreUsuario2, bodyUsuario2)
replyLogin1 = self.loguearUsuario(nombreUsuario1)
replyLogin2 = self.loguearUsuario(nombreUsuario2)
#Pide un candidato
replyPedirCandidatos1 = self.pedirCandidato(nombreUsuario1, replyLogin1)
replyPedirCandidatos2 = self.pedirCandidato(nombreUsuario2, replyLogin2)
self.assertEqual(replyPedirCandidatos1.reason,self.msgSeEncontraronCandidatos)
self.assertEqual(replyPedirCandidatos1.status_code,200)
self.assertEqual(replyPedirCandidatos2.reason,self.msgSeEncontraronCandidatos)
self.assertEqual(replyPedirCandidatos2.status_code,200)
self.usuariosParaBorrar.extend([nombreUsuario1, nombreUsuario2])
def test_DosUsuariosMatcheanYVotanUnoPorElOtro(self):
nombreUsuario1 = Utilities.transformarEnMail("test_DosUsuariosMatcheanYVotanUnoPorElOtro1")
nombreUsuario2 = Utilities.transformarEnMail("test_DosUsuariosMatcheanYVotanUnoPorElOtro2")
categoria = "outdoors"
valor = "test_DosUsuariosMatcheanYVotanUnoPorElOtro"
Utilities.registrarUsuarioSinEmailYSinIntereses(nombreUsuario1,categoria, valor)
Utilities.registrarUsuarioSinEmailYSinIntereses(nombreUsuario2,categoria, valor)
tokenSesion1 = Utilities.registrarYLoguearAlUsuarioSinEmail(nombreUsuario1)
tokenSesion2 = Utilities.registrarYLoguearAlUsuarioSinEmail(nombreUsuario2)
candidatoParaUsuario1 = Utilities.pedirCandidato(nombreUsuario1,tokenSesion1)
candidatoParaUsuario2 = Utilities.pedirCandidato(nombreUsuario2,tokenSesion2)
replyVotacion1 = Utilities.likearCandidato(nombreUsuario1, tokenSesion1, candidatoParaUsuario1)
replyVotacion2 = Utilities.likearCandidato(nombreUsuario2, tokenSesion2, candidatoParaUsuario2)
self.assertEqual("El voto se registro correctamente",replyVotacion1.reason)
self.assertEqual(200,replyVotacion1.status_code)
self.assertEqual("El voto se registro correctamente",replyVotacion2.reason)
self.assertEqual(200,replyVotacion2.status_code)
self.usuariosParaBorrar.extend([nombreUsuario1, nombreUsuario2])
| 43.451429 | 233 | 0.768017 | 582 | 7,604 | 10.003436 | 0.331615 | 0.025764 | 0.015459 | 0.013397 | 0.206287 | 0.163861 | 0.081759 | 0.039505 | 0.027825 | 0.027825 | 0 | 0.016539 | 0.157154 | 7,604 | 174 | 234 | 43.701149 | 0.891871 | 0.059442 | 0 | 0.092593 | 0 | 0 | 0.131549 | 0.068366 | 0 | 0 | 0 | 0.005747 | 0.092593 | 1 | 0.138889 | false | 0.046296 | 0.037037 | 0.027778 | 0.361111 | 0 | 0 | 0 | 1 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3db72a55f192a9c9ab68f0478ca0ffc316b36c78 | 1,053 | py | Python | package/diana/utils/iter_dates.py | thomasyi17/diana2 | 2167053dfe15b782d96cb1e695047433f302d4dd | [
"MIT"
] | 15 | 2019-02-12T23:26:09.000Z | 2021-12-21T08:53:58.000Z | package/diana/utils/iter_dates.py | thomasyi17/diana2 | 2167053dfe15b782d96cb1e695047433f302d4dd | [
"MIT"
] | 2 | 2019-01-23T21:13:12.000Z | 2019-06-28T15:45:51.000Z | package/diana/utils/iter_dates.py | thomasyi17/diana2 | 2167053dfe15b782d96cb1e695047433f302d4dd | [
"MIT"
] | 6 | 2019-01-23T20:22:50.000Z | 2022-02-03T03:27:04.000Z | from datetime import datetime, timedelta
class IterDates(object):
def __init__(self, start: datetime, stop: datetime, step: timedelta):
self.start = start
self.stop = stop
self.step = step
self.value = (self.start, self.start + self.step)
def __iter__(self):
return self
def __next__(self):
next_value = self.value
if next_value[0] >= self.stop:
raise StopIteration
self.start = self.start + self.step
self.value = (self.start, min(self.stop, self.start + self.step))
return next_value
class FuncByDates(object):
def __init__(self, func, start: datetime, stop: datetime, step: timedelta):
self._func = func
self._iterdate = IterDates(start, stop, step)
self.value = self._func(*self._iterdate.value)
def __iter__(self):
return self
def __next__(self):
next_value = self.value
next(self._iterdate)
self.value = self._func(*self._iterdate.value)
return next_value
| 26.325 | 79 | 0.624881 | 128 | 1,053 | 4.859375 | 0.195313 | 0.115756 | 0.104502 | 0.081994 | 0.538585 | 0.488746 | 0.405145 | 0.160772 | 0.160772 | 0.160772 | 0 | 0.001305 | 0.272555 | 1,053 | 39 | 80 | 27 | 0.810705 | 0 | 0 | 0.428571 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.214286 | false | 0 | 0.035714 | 0.071429 | 0.464286 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3db739475a32d4a4cd03afcbff8864712c35cad0 | 193 | py | Python | Exercicios Curso Em Video Mundo 2/ex067.py | JorgeTranin/Python_Curso_Em_Video | be74c9301aafc055bdf883be649cb8b7716617e3 | [
"MIT"
] | null | null | null | Exercicios Curso Em Video Mundo 2/ex067.py | JorgeTranin/Python_Curso_Em_Video | be74c9301aafc055bdf883be649cb8b7716617e3 | [
"MIT"
] | null | null | null | Exercicios Curso Em Video Mundo 2/ex067.py | JorgeTranin/Python_Curso_Em_Video | be74c9301aafc055bdf883be649cb8b7716617e3 | [
"MIT"
] | null | null | null | cont = 1
while True:
t = int(input('Quer saber a tabuada de que numero ? '))
if t < 0:
break
for c in range (1, 11):
print(f'{t} X {c} = {t * c}')
print('Obrigado!') | 24.125 | 59 | 0.507772 | 33 | 193 | 2.969697 | 0.787879 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038168 | 0.321244 | 193 | 8 | 60 | 24.125 | 0.709924 | 0 | 0 | 0 | 0 | 0 | 0.335052 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3db86f3d8bdc658afbe080624e5b8f952805ce4b | 1,172 | py | Python | src/PassGen/PassGen.py | Natthapolmnc/PasswordGenerator | 1d481de1b4773af99558c68e9570d1801c1f6e2e | [
"MIT"
] | null | null | null | src/PassGen/PassGen.py | Natthapolmnc/PasswordGenerator | 1d481de1b4773af99558c68e9570d1801c1f6e2e | [
"MIT"
] | null | null | null | src/PassGen/PassGen.py | Natthapolmnc/PasswordGenerator | 1d481de1b4773af99558c68e9570d1801c1f6e2e | [
"MIT"
] | null | null | null | import random as rd
def genPass(num , length):
print ("Password Generator")
print ("===================\n")
numpass=num
lenpass=length
AlphaLcase=[ chr(m) for m in range(65, 91)]
AlphaCcase=[ chr(n) for n in range(97, 123)]
Intset=[ chr(p) for p in range(48,58)]
listsetpass=[]
for j in range(lenpass):
randAlphaset=rd.randint(2,lenpass)
randAlphaL=rd.randint(1,randAlphaset)
randAlphaH=randAlphaset-randAlphaL
randIntset=lenpass-randAlphaset
password=[]
strpassword=""
for i in range(randAlphaH):
randindexAlphaH=rd.randint(0,len(AlphaCcase)-1)
password.append(AlphaCcase[randindexAlphaH])
for k in range(randAlphaL):
randindexAlphaL=rd.randint(0,len(AlphaLcase)-1)
password.append(AlphaLcase[randindexAlphaL])
for l in range(randIntset):
randindexInt=rd.randint(0,len(Intset)-1)
password.append(Intset[randindexInt])
for u in range(len(password)):
rd.shuffle(password)
strpassword+=str(password[u])
listsetpass+=[strpassword]
return listsetpass
| 35.515152 | 59 | 0.617747 | 133 | 1,172 | 5.443609 | 0.390977 | 0.077348 | 0.041436 | 0.053867 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.024027 | 0.254266 | 1,172 | 32 | 60 | 36.625 | 0.804348 | 0 | 0 | 0 | 0 | 0 | 0.033276 | 0.017918 | 0 | 0 | 0 | 0 | 0 | 1 | 0.032258 | false | 0.580645 | 0.032258 | 0 | 0.096774 | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
3dbe95131f682ae91ac5d0ab7098a4da9541c391 | 267 | py | Python | gc_win1.py | danz2004/learning_python | 20cb7d33f898bcc406f33565308132dca31e11cd | [
"MIT"
] | null | null | null | gc_win1.py | danz2004/learning_python | 20cb7d33f898bcc406f33565308132dca31e11cd | [
"MIT"
] | null | null | null | gc_win1.py | danz2004/learning_python | 20cb7d33f898bcc406f33565308132dca31e11cd | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
seq = 'ACGACGCAGGAGGAGAGTTTCAGAGATCACGAATACATCCATATTACCCAGAGAGAG'
w = 11
for i in range(len(seq) - w + 1):
count = 0
for j in range(i, i + w):
if seq[j] == 'G' or seq[j] == 'C':
count += 1
print(f'{i} {seq[i:i+w]} {(count / w) : .4f}')
| 26.7 | 65 | 0.595506 | 45 | 267 | 3.533333 | 0.533333 | 0.08805 | 0.037736 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.032558 | 0.194757 | 267 | 9 | 66 | 29.666667 | 0.706977 | 0.078652 | 0 | 0 | 0 | 0.125 | 0.387755 | 0.232653 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.125 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3dbfa17a77ec527273235935d102cd0d8f5bcbb2 | 7,991 | py | Python | gym_flock/envs/old/flocking.py | katetolstaya/gym-flock | 3236d1dafcb1b9be0cf78b471672e8becb2d37af | [
"MIT"
] | 19 | 2019-07-29T22:19:58.000Z | 2022-01-27T04:38:38.000Z | gym_flock/envs/old/flocking.py | henghenghahei849/gym-flock | b09bdfbbe4a96fe052958d1f9e1e9dd314f58419 | [
"MIT"
] | null | null | null | gym_flock/envs/old/flocking.py | henghenghahei849/gym-flock | b09bdfbbe4a96fe052958d1f9e1e9dd314f58419 | [
"MIT"
] | 5 | 2019-10-03T14:44:49.000Z | 2021-12-09T20:39:39.000Z | import gym
from gym import spaces, error, utils
from gym.utils import seeding
import numpy as np
import configparser
from os import path
import matplotlib.pyplot as plt
from matplotlib.pyplot import gca
font = {'family': 'sans-serif',
'weight': 'bold',
'size': 14}
class FlockingEnv(gym.Env):
def __init__(self):
config_file = path.join(path.dirname(__file__), "params_flock.cfg")
config = configparser.ConfigParser()
config.read(config_file)
config = config['flock']
self.dynamic = False # if the agents are moving or not
self.mean_pooling = True # normalize the adjacency matrix by the number of neighbors or not
# number states per agent
self.nx_system = 4
# numer of observations per agent
self.n_features = 6
# number of actions per agent
self.nu = 2
# problem parameters from file
self.n_agents = int(config['network_size'])
self.comm_radius = float(config['comm_radius'])
self.comm_radius2 = self.comm_radius * self.comm_radius
self.dt = float(config['system_dt'])
self.v_max = float(config['max_vel_init'])
self.v_bias = self.v_max
self.r_max = float(config['max_rad_init'])
self.std_dev = float(config['std_dev']) * self.dt
# intitialize state matrices
self.x = np.zeros((self.n_agents, self.nx_system))
self.u = np.zeros((self.n_agents, self.nu))
self.mean_vel = np.zeros((self.n_agents, self.nu))
self.init_vel = np.zeros((self.n_agents, self.nu))
self.a_net = np.zeros((self.n_agents, self.n_agents))
# TODO : what should the action space be? is [-1,1] OK?
self.max_accel = 1
self.gain = 10.0 # TODO - adjust if necessary - may help the NN performance
self.action_space = spaces.Box(low=-self.max_accel, high=self.max_accel, shape=(2 * self.n_agents,),
dtype=np.float32)
self.observation_space = spaces.Box(low=-np.Inf, high=np.Inf, shape=(self.n_agents, self.n_features),
dtype=np.float32)
self.fig = None
self.line1 = None
self.seed()
def seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def step(self, u):
#u = np.reshape(u, (-1, 2))
assert u.shape == (self.n_agents, self.nu)
self.u = u
if self.dynamic:
# x position
self.x[:, 0] = self.x[:, 0] + self.x[:, 2] * self.dt
# y position
self.x[:, 1] = self.x[:, 1] + self.x[:, 3] * self.dt
# x velocity
self.x[:, 2] = self.x[:, 2] + self.gain * self.u[:, 0] * self.dt #+ np.random.normal(0, self.std_dev, (self.n_agents,))
# y velocity
self.x[:, 3] = self.x[:, 3] + self.gain * self.u[:, 1] * self.dt #+ np.random.normal(0, self.std_dev, (self.n_agents,))
return self._get_obs(), self.instant_cost(), False, {}
def instant_cost(self): # sum of differences in velocities
# TODO adjust to desired reward
# action_cost = -1.0 * np.sum(np.square(self.u))
#curr_variance = -1.0 * np.sum((np.var(self.x[:, 2:4], axis=0)))
versus_initial_vel = -1.0 * np.sum(np.sum(np.square(self.x[:, 2:4] - self.mean_vel), axis=1))
#return curr_variance + versus_initial_vel
return versus_initial_vel
def reset(self):
x = np.zeros((self.n_agents, self.nx_system))
degree = 0
min_dist = 0
min_dist_thresh = 0.1 # 0.25
# generate an initial configuration with all agents connected,
# and minimum distance between agents > min_dist_thresh
while degree < 2 or min_dist < min_dist_thresh:
# randomly initialize the location and velocity of all agents
length = np.sqrt(np.random.uniform(0, self.r_max, size=(self.n_agents,)))
angle = np.pi * np.random.uniform(0, 2, size=(self.n_agents,))
x[:, 0] = length * np.cos(angle)
x[:, 1] = length * np.sin(angle)
bias = np.random.uniform(low=-self.v_bias, high=self.v_bias, size=(2,))
x[:, 2] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) + bias[0]
x[:, 3] = np.random.uniform(low=-self.v_max, high=self.v_max, size=(self.n_agents,)) + bias[1]
# compute distances between agents
a_net = self.dist2_mat(x)
# compute minimum distance between agents and degree of network to check if good initial configuration
min_dist = np.sqrt(np.min(np.min(a_net)))
a_net = a_net < self.comm_radius2
degree = np.min(np.sum(a_net.astype(int), axis=1))
# keep good initialization
self.mean_vel = np.mean(x[:, 2:4], axis=0)
self.init_vel = x[:, 2:4]
self.x = x
self.a_net = self.get_connectivity(self.x)
return self._get_obs()
def _get_obs(self):
# state_values = self.x
state_values = np.hstack((self.x, self.init_vel)) # initial velocities are part of state to make system observable
if self.dynamic:
state_network = self.get_connectivity(self.x)
else:
state_network = self.a_net
return (state_values, state_network)
def dist2_mat(self, x):
"""
Compute squared euclidean distances between agents. Diagonal elements are infinity
Args:
x (): current state of all agents
Returns: symmetric matrix of size (n_agents, n_agents) with A_ij the distance between agents i and j
"""
x_loc = np.reshape(x[:, 0:2], (self.n_agents,2,1))
a_net = np.sum(np.square(np.transpose(x_loc, (0,2,1)) - np.transpose(x_loc, (2,0,1))), axis=2)
np.fill_diagonal(a_net, np.Inf)
return a_net
def get_connectivity(self, x):
"""
Get the adjacency matrix of the network based on agent locations by computing pairwise distances using pdist
Args:
x (): current state of all agents
Returns: adjacency matrix of network
"""
a_net = self.dist2_mat(x)
a_net = (a_net < self.comm_radius2).astype(float)
if self.mean_pooling:
# Normalize the adjacency matrix by the number of neighbors - results in mean pooling, instead of sum pooling
n_neighbors = np.reshape(np.sum(a_net, axis=1), (self.n_agents,1)) # TODO or axis=0? Is the mean in the correct direction?
n_neighbors[n_neighbors == 0] = 1
a_net = a_net / n_neighbors
return a_net
def controller(self):
"""
Consensus-based centralized flocking with no obstacle avoidance
Returns: the optimal action
"""
# TODO implement Tanner 2003?
u = np.mean(self.x[:,2:4], axis=0) - self.x[:,2:4]
u = np.clip(u, a_min=-self.max_accel, a_max=self.max_accel)
return u
def render(self, mode='human'):
"""
Render the environment with agents as points in 2D space
"""
if self.fig is None:
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(111)
line1, = ax.plot(self.x[:, 0], self.x[:, 1], 'bo') # Returns a tuple of line objects, thus the comma
ax.plot([0], [0], 'kx')
plt.ylim(-1.0 * self.r_max, 1.0 * self.r_max)
plt.xlim(-1.0 * self.r_max, 1.0 * self.r_max)
a = gca()
a.set_xticklabels(a.get_xticks(), font)
a.set_yticklabels(a.get_yticks(), font)
plt.title('GNN Controller')
self.fig = fig
self.line1 = line1
self.line1.set_xdata(self.x[:, 0])
self.line1.set_ydata(self.x[:, 1])
self.fig.canvas.draw()
self.fig.canvas.flush_events()
def close(self):
pass
| 36.99537 | 134 | 0.585659 | 1,161 | 7,991 | 3.892334 | 0.236003 | 0.029874 | 0.046249 | 0.026555 | 0.208453 | 0.166409 | 0.135428 | 0.125249 | 0.103563 | 0.068157 | 0 | 0.021663 | 0.289451 | 7,991 | 216 | 135 | 36.99537 | 0.774216 | 0.246527 | 0 | 0.063492 | 0 | 0 | 0.023463 | 0 | 0 | 0 | 0 | 0.013889 | 0.007937 | 1 | 0.087302 | false | 0.007937 | 0.063492 | 0 | 0.222222 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3dc72f281f6a609f6178afd5c15a1c8b5b592cd3 | 278 | py | Python | subdomains/gen_master_data.py | sjy5386/subshorts | d8170ee4a66989c3e852f86aa83bab6341e3aa10 | [
"MIT"
] | 3 | 2022-03-08T19:02:41.000Z | 2022-03-16T23:04:37.000Z | subdomains/gen_master_data.py | sjy5386/subshorts | d8170ee4a66989c3e852f86aa83bab6341e3aa10 | [
"MIT"
] | 5 | 2022-03-17T02:16:52.000Z | 2022-03-18T02:55:25.000Z | subdomains/gen_master_data.py | sjy5386/subshorts | d8170ee4a66989c3e852f86aa83bab6341e3aa10 | [
"MIT"
] | null | null | null | from .models import ReservedName
def gen_master(apps, scheme_editor):
reserved_names = ['co', 'com', 'example', 'go', 'gov', 'icann', 'ne', 'net', 'nic', 'or', 'org', 'whois', 'www']
for reserved_name in reserved_names:
ReservedName(name=reserved_name).save()
| 34.75 | 116 | 0.647482 | 36 | 278 | 4.833333 | 0.805556 | 0.149425 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.161871 | 278 | 7 | 117 | 39.714286 | 0.746781 | 0 | 0 | 0 | 0 | 0 | 0.154676 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.2 | 0 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3dd1773f50f2af84354e0431bf0e4276687f173e | 3,401 | py | Python | Server/Python/src/dbs/dao/Oracle/MigrationBlock/Update.py | vkuznet/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 8 | 2015-08-14T04:01:32.000Z | 2021-06-03T00:56:42.000Z | Server/Python/src/dbs/dao/Oracle/MigrationBlock/Update.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 162 | 2015-01-07T21:34:47.000Z | 2021-10-13T09:42:41.000Z | Server/Python/src/dbs/dao/Oracle/MigrationBlock/Update.py | yuyiguo/DBS | 14df8bbe8ee8f874fe423399b18afef911fe78c7 | [
"Apache-2.0"
] | 16 | 2015-01-22T15:27:29.000Z | 2021-04-28T09:23:28.000Z | #!/usr/bin/env python
"""
This module provides Migration.Update data access object.
"""
from WMCore.Database.DBFormatter import DBFormatter
from dbs.utils.dbsExceptionHandler import dbsExceptionHandler
from dbs.utils.DBSDaoTools import create_token_generator
class Update(DBFormatter):
"""
Migration Update DAO class.
migration_status:
0=PENDING
1=IN PROGRESS
2=COMPLETED
3=FAILED (will be retried)
9=Terminally FAILED
status change:
0 -> 1
1 -> 2
1 -> 3
1 -> 9
are only allowed changes for working through.
3 -> 1 allowed for retrying when retry_count <3.
"""
def __init__(self, logger, dbi, owner):
"""
Add schema owner and sql.
"""
DBFormatter.__init__(self, logger, dbi)
self.owner = "%s." % owner if not owner in ("", "__MYSQL__") else ""
self.logger = logger
self.sql = \
"""UPDATE %sMIGRATION_BLOCKS
SET
MIGRATION_STATUS=:migration_status ,
LAST_MODIFICATION_DATE=:last_modification_date
WHERE """ % self.owner
def execute(self, conn, daoinput, transaction = False):
"""
daoinput keys:
migration_status, migration_block_id, migration_request_id
"""
#print daoinput['migration_block_id']
if not conn:
dbsExceptionHandler("dbsException-failed-connect2host", "Oracle/MigrationBlock/Update. Expects db connection from upper layer." ,self.logger.exception)
if daoinput['migration_status'] == 1:
sql = self.sql + " (MIGRATION_STATUS = 0 or MIGRATION_STATUS = 3)"
elif daoinput['migration_status'] == 2 or daoinput['migration_status'] == 3 or daoinput['migration_status'] == 9:
sql = self.sql + " MIGRATION_STATUS = 1 "
else:
dbsExceptionHandler("dbsException-conflict-data", "Oracle/MigrationBlock/Update. Expected migration status to be 1, 2, 3, 0r 9" ,self.logger.exception )
#print sql
if 'migration_request_id' in daoinput:
sql3 = sql + "and MIGRATION_REQUEST_ID =:migration_request_id"
result = self.dbi.processData(sql3, daoinput, conn, transaction)
elif 'migration_block_id' in daoinput:
if type(daoinput['migration_block_id']) is not list:
sql2 = sql+ " and MIGRATION_BLOCK_ID =:migration_block_id"
result = self.dbi.processData(sql2, daoinput, conn, transaction)
else:
bk_id_generator, binds2 = create_token_generator(daoinput['migration_block_id'])
newdaoinput = {}
newdaoinput.update({"migration_status":daoinput["migration_status"],
"last_modification_date":daoinput["last_modification_date"]})
newdaoinput.update(binds2)
sql2 = sql+ """ and MIGRATION_BLOCK_ID in ({bk_id_generator} SELECT TOKEN FROM TOKEN_GENERATOR)
""".format(bk_id_generator=bk_id_generator)
result = self.dbi.processData(sql2, newdaoinput, conn, transaction)
else:
dbsExceptionHandler("dbsException-conflict-data", "Oracle/MigrationBlock/Update. Required IDs not in the input", self.logger.exception)
| 46.589041 | 165 | 0.614231 | 362 | 3,401 | 5.571823 | 0.323204 | 0.104115 | 0.063461 | 0.035697 | 0.199306 | 0.098166 | 0.072385 | 0.072385 | 0 | 0 | 0 | 0.015365 | 0.291973 | 3,401 | 72 | 166 | 47.236111 | 0.822259 | 0.019112 | 0 | 0.081081 | 0 | 0 | 0.314986 | 0.095219 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.081081 | null | null | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3dd4b115a1efae712e7d58d8046528f7acbf782b | 1,467 | py | Python | for_straight_forward_relion/read_star_del_metadata_param.py | homurachan/Block-based-recontruction | b3fc02a0648db6aaa5d77dcc4b8e10f3361d66f4 | [
"WTFPL"
] | 11 | 2018-04-17T01:41:11.000Z | 2020-12-11T05:43:21.000Z | for_straight_forward_relion/read_star_del_metadata_param.py | homurachan/Block-based-recontruction | b3fc02a0648db6aaa5d77dcc4b8e10f3361d66f4 | [
"WTFPL"
] | null | null | null | for_straight_forward_relion/read_star_del_metadata_param.py | homurachan/Block-based-recontruction | b3fc02a0648db6aaa5d77dcc4b8e10f3361d66f4 | [
"WTFPL"
] | 3 | 2019-08-23T07:48:50.000Z | 2020-12-08T07:31:41.000Z | #!/usr/bin/env python
import math,os,sys
try:
from optparse import OptionParser
except:
from optik import OptionParser
def main():
(star,mline,line_name,output) = parse_command_line()
aa=open(star,"r")
instar_line=aa.readlines()
out=open(output,"w")
for i in range(0,mline):
if (instar_line[i].split()):
if (str(instar_line[i].split()[0])==line_name):
line_index=int(instar_line[i].split('#')[1])-1
skip=i
for i in range(0,mline):
if(i<skip):
out.write(instar_line[i])
if(i>skip):
tmp=str(instar_line[i].split('#')[0])
tmp_num=int(instar_line[i].split('#')[1])
tmp_num-=1
tmp=tmp+"#"+str(tmp_num)
out.write(tmp+"\n")
for i in range(mline,len(instar_line)):
if (instar_line[i].split()):
tmp=""
xx=len(instar_line[i].split())
for j in range(0,xx):
if(j!=line_index):
tmp+=str(instar_line[i].split()[j])
if(j!=xx-1 and j!=line_index):
tmp+="\t"
if(j==xx-1):
tmp+="\n"
out.write(tmp)
out.close()
aa.close()
def parse_command_line():
usage="%prog <input star> <mline +4> <line name> <output>"
parser = OptionParser(usage=usage, version="%1")
if len(sys.argv)<5:
print "<input star> <mline +4> <line name> <output>"
sys.exit(-1)
(options, args)=parser.parse_args()
star = str(args[0])
mline=int(args[1])
line_name=str(args[2])
output=str(args[3])
return (star,mline,line_name,output)
def SQR(x):
y=float(x)
return(y*y)
if __name__== "__main__":
main()
| 22.227273 | 59 | 0.632584 | 251 | 1,467 | 3.561753 | 0.282869 | 0.123043 | 0.110738 | 0.143177 | 0.314318 | 0.224832 | 0.107383 | 0 | 0 | 0 | 0 | 0.016181 | 0.157464 | 1,467 | 65 | 60 | 22.569231 | 0.70712 | 0.013633 | 0 | 0.072727 | 0 | 0 | 0.080221 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.054545 | null | null | 0.018182 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3dd551aff5d9acdfce555b2997eb9c881f846544 | 1,382 | py | Python | setup.py | elafefy11/flask_gtts | 8f14b9f114127d8fba240a88f3aa16eb17628872 | [
"MIT"
] | null | null | null | setup.py | elafefy11/flask_gtts | 8f14b9f114127d8fba240a88f3aa16eb17628872 | [
"MIT"
] | null | null | null | setup.py | elafefy11/flask_gtts | 8f14b9f114127d8fba240a88f3aa16eb17628872 | [
"MIT"
] | null | null | null | """
Flask-gTTS
-------------
A Flask extension to add gTTS Google text to speech, into the template,
it makes adding and configuring multiple text to speech audio files at
a time much easier and less time consuming
"""
from setuptools import setup
setup(
name='Flask-gTTS',
version='0.11',
url='https://github.com/mrf345/flask_gtts/',
download_url='https://github.com/mrf345/flask_gtts/archive/0.11.tar.gz',
license='MIT',
author='Mohamed Feddad',
author_email='mrf345@gmail.com',
description='gTTS Google text to speech flask extension',
long_description=__doc__,
py_modules=['gtts'],
packages=['flask_gtts'],
zip_safe=False,
include_package_data=True,
platforms='any',
install_requires=[
'Flask',
'gTTS',
'static_parameters'
],
keywords=['flask', 'extension', 'google', 'text', 'speech',
'gTTS', 'TTS', 'text-to-speech'],
classifiers=[
'Environment :: Web Environment',
'Intended Audience :: Developers',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Topic :: Internet :: WWW/HTTP :: Dynamic Content',
'Topic :: Software Development :: Libraries :: Python Modules'
],
setup_requires=['pytest-runner'],
test_requires=['pytest']
)
| 29.404255 | 76 | 0.633864 | 157 | 1,382 | 5.464968 | 0.624204 | 0.062937 | 0.055944 | 0.037296 | 0.125874 | 0.074592 | 0.074592 | 0 | 0 | 0 | 0 | 0.013915 | 0.219971 | 1,382 | 46 | 77 | 30.043478 | 0.782004 | 0.152677 | 0 | 0.057143 | 0 | 0 | 0.487091 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.028571 | 0 | 0.028571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3dd7149bf486a0156690dac8d36a869ec269ebf6 | 9,280 | py | Python | src/aux_funcs.py | ArunBaskaran/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V | 79ca40ababbc65464650c5519f9e7fdbf3c9d14d | [
"MIT"
] | 7 | 2020-03-19T05:04:30.000Z | 2022-03-31T10:29:42.000Z | src/aux_funcs.py | ArunBaskaran/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V | 79ca40ababbc65464650c5519f9e7fdbf3c9d14d | [
"MIT"
] | 2 | 2020-08-19T03:24:31.000Z | 2021-03-02T00:18:46.000Z | src/aux_funcs.py | ArunBaskaran/Image-Driven-Machine-Learning-Approach-for-Microstructure-Classification-and-Segmentation-Ti-6Al-4V | 79ca40ababbc65464650c5519f9e7fdbf3c9d14d | [
"MIT"
] | 3 | 2020-09-17T04:15:04.000Z | 2021-01-18T08:37:39.000Z | """
----------------------------------ABOUT-----------------------------------
Author: Arun Baskaran
--------------------------------------------------------------------------
"""
import model_params
def smooth(img):
return 0.5*img + 0.5*(
np.roll(img, +1, axis=0) + np.roll(img, -1, axis=0) +
np.roll(img, +1, axis=1) + np.roll(img, -1, axis=1) )
def returnIndex(a , value):
k = np.size(a)
for i in range(k):
if(a[i]==value):
return i
def create_model():
xavier_init = tf.contrib.layers.xavier_initializer() #Initializer for weights
zero_init = tf.zeros_initializer() #Initializer for biases
model = tf.keras.models.Sequential([
keras.layers.Conv2D( 2, [5,5], (1,1), input_shape = (200,200,1), kernel_initializer = xavier_init, bias_initializer = zero_init, kernel_regularizer=regularizers.l1(0.001), padding = 'valid', name = 'C1'),
keras.layers.MaxPool2D((2,2), (2,2), input_shape = (196,196,2),padding = 'valid', name ='P1'),
keras.layers.Conv2D(4, [5,5],(1,1), input_shape = (98,98,2), kernel_initializer = xavier_init, bias_initializer = zero_init, kernel_regularizer=regularizers.l1(0.001), name ='C2'),
keras.layers.MaxPool2D((2,2), (2,2), input_shape = (94,94,4), padding = 'valid', name ='P2'),
keras.layers.Conv2D(12, [3,3],(1,1), input_shape = (47,47,4), kernel_initializer = xavier_init, bias_initializer = zero_init, kernel_regularizer=regularizers.l1(0.001), name ='C3'),
keras.layers.Flatten(name ='fc_layer'),
keras.layers.Dense(3, activation='softmax', kernel_regularizer=regularizers.l1(0.001)),])
return model
def load_images_labels():
df = pd.read_excel('labels.xlsx', header=None, names=['id', 'label'])
total_labels = df['label']
for i in range(len(total_labels)):
total_labels[i]-=1
train_list = random.sample(range(1,total_size+1), train_size)
nontrainlist = []
test_list = []
for i in range(1,total_size+1):
if i not in train_list:
nontrainlist.append(i)
validation_list = random.sample(nontrainlist, validation_size)
for item in nontrainlist:
if(item not in validation_list):
test_list.append(item)
train_images = []
train_labels = []
validation_images = []
validation_labels = []
test_images = []
test_labels=[]
test_images_id = []
for i in range(1, total_size+1):
if i in train_list:
filename = 'image_' + str(i) + '.png'
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, dsize=(width, height), interpolation=cv2.INTER_CUBIC)
image = cv2.blur(image,(5,5))
image = (image - np.min(image))/(np.max(image)-np.min(image))
train_images.append(image)
train_labels.append(total_labels[i-1])
elif i in validation_list:
filename = 'image_' + str(i) + '.png'
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, dsize=(width, height), interpolation=cv2.INTER_CUBIC)
image = cv2.blur(image,(5,5))
image = (image - np.min(image))/(np.max(image)-np.min(image))
validation_images.append(image)
validation_labels.append(total_labels[i-1])
else:
filename = 'image_' + str(i) + '.png'
image = cv2.imread(filename, cv2.IMREAD_GRAYSCALE)
image = cv2.resize(image, dsize=(width, height), interpolation=cv2.INTER_CUBIC)
image = cv2.blur(image,(5,5))
image = (image - np.min(image))/(np.max(image)-np.min(image))
test_images_id.append(i)
test_images.append(image)
test_labels.append(total_labels[i-1])
train_images = np.reshape(train_images, (train_size, width, height, 1))
validation_images = np.reshape(validation_images, (validation_size, width, height, 1))
test_images = np.reshape(test_images, (test_size, width, height, 1))
train_labels = tf.keras.backend.one_hot(train_labels,3)
test_labels = tf.keras.backend.one_hot(test_labels,3)
validation_labels = tf.keras.backend.one_hot(validation_labels,3)
return train_images, train_labels, test_images, test_labels, validation_images, validation_labels
def train_model():
model = create_model()
checkpoint_path = "weights/classification.ckpt" #Check this path
checkpoint_dir = os.path.dirname(checkpoint_path)
es = tf.keras.callbacks.EarlyStopping(monitor='val_loss', verbose=1, patience = 50, mode='min', restore_best_weights=True)
cp_callback = tf.keras.callbacks.ModelCheckpoint(filepath=checkpoint_path, save_weights_only=True, verbose=0)
model.compile(loss=tf.keras.losses.CategoricalCrossentropy(),optimizer='Adam',metrics='accuracy')
model.fit(train_images, train_labels, epochs=1500, validation_data=(validation_images,validation_labels), steps_per_epoch = 4, validation_steps=1, callbacks=[es, cp_callback])
return model
def load_model():
model = create_model()
model.load_weights(checkpoint_path)
return model
def test_accuracy(model):
loss,acc = model.evaluate(test_images, test_labels, verbose=2, steps = 1)
print("Accuracy: {:5.2f}%".format(100*acc))
def get_predicted_classes(model):
y_prob = model.predict(test_images)
y_classes = y_prob.argmax(axis=-1)
return y_classes
df = pd.read_excel('labels.xlsx', header=None, names=['id', 'label'])
total_labels = df['label']
for i in range(len(total_labels)):
total_labels[i]-=1
def duplex_segmentation(i):
area_frac_duplex=[]
duplex_image_id=[]
filename = 'image_' + str(test_images_id[i]) + '.png'
image = Image.open(filename).convert('F')
image = np.copy(np.reshape(np.array(image), image.size[::-1])/255.)
image = exposure.equalize_adapthist(image, clip_limit=8.3)
image = (smooth(smooth(image)))
image_copy = image
image = cv2.resize(image, dsize=(200,200), interpolation=cv2.INTER_CUBIC)
image_copy = cv2.resize(image_copy, dsize=(200,200), interpolation=cv2.INTER_CUBIC)
markers = np.zeros_like(image)
markers[image > np.median(image) - 0.10*np.std(image)] = 1
markers[image < np.median(image) - 0.10*np.std(image)] = 2
fig, (ax1) = plt.subplots(1, sharex=True, sharey=True)
elevation_map = sobel(image)
#The following implementation of watershed segmentation has been adopted from scikit's documentation example: https://scikit-image.org/docs/dev/user_guide/tutorial_segmentation.html
segmentation = morphology.watershed(elevation_map, markers)
segmentation = ndi.binary_fill_holes(segmentation - 1)
labeled_grains, _ = ndi.label(segmentation)
image_label_overlay = label2rgb(labeled_grains, image=image)
ax1.imshow(image_copy, cmap=plt.cm.gray, interpolation='nearest')
ax1.contour(segmentation, [0.5], linewidths=1.2, colors='r')
ax1.axis('off')
outfile = 'seg_duplex_' + str(test_images_id[i]) + '.png'
plt.savefig(outfile, dpi=100)
equiaxed_area_fraction_dict[test_images_id[i]] = np.sum(segmentation)/(np.shape(image)[0]*np.shape(image)[1])
def lamellar_segmentation(i):
dim = 400
filename = 'image_' + str(test_images_id[i]) + '.png'
image = Image.open(filename).convert('F')
image = np.copy(np.reshape(np.array(image), image.size[::-1])/255.)
image = exposure.equalize_hist(image)
image = smooth(image)
image = np.reshape(image, (np.shape(image)[0],np.shape(image)[1]))
gx = cv2.Sobel(np.float32(image), cv2.CV_32F, 1, 0, ksize=1)
gy = cv2.Sobel(np.float32(image), cv2.CV_32F, 0, 1, ksize=1)
mag, angle = cv2.cartToPolar(gx, gy, angleInDegrees=True)
mag_cut_off = 0.2*np.max(mag)
(n,bins,patches) = plt.hist(angle.ravel(), bins = 30)
n_sorted = sorted(n, reverse=True)
bin0 = bins[returnIndex(n, n_sorted[0])]
bin1 = bins[returnIndex(n, n_sorted[1])]
bin2 = bins[returnIndex(n, n_sorted[2])]
bin_s = np.ones(20)
for i in range(20):
bin_s[i] = bins[returnIndex(n, n_sorted[i])]
markers = np.zeros_like(angle)
markers[(angle/360 > bin1/360 - 26/360) & (angle/360 < bin1/360 + 26/360) & (mag > mag_cut_off)] = 1
markers[(angle/360 > bin2/360 - 18/360) & (angle/360 < bin2/360 + 18/360) & (mag > mag_cut_off)] = 1
markers[(angle/360 > bin0/360 - 18/360) & (angle/360 < bin0/360 + 18/360) & (mag > mag_cut_off)] = 1
markers = (smooth(smooth(markers)))
markers1 = np.where(markers > np.mean(markers), 1.0, 0.0)
lamellae_area_fraction_dict[test_images_id[i]] = np.sum(markers1)/(np.shape(image)[0]*np.shape(image)[1])
fig, (ax1) = plt.subplots(1, sharex=True, sharey=True)
ax1.imshow(image, 'gray')
ax1.imshow(markers1, alpha = 0.5)
image1 = image + markers1
ax1.imshow(image1)
#plt.colorbar()
outfile = 'seg_lamellae_' + str(test_images_id[i]) + '.png'
plt.savefig(outfile, dpi=100)
def feature_segmentation():
equiaxed_area_fraction_dict = {}
lamellae_area_fraction_dict= {}
for i in range(np.size(y_classes)):
if(y_classes[i]==0):
duplex_segmentation(i)
elif(y_classes[i]==1):
lamellar_segmentation(i)
| 44.830918 | 210 | 0.650108 | 1,292 | 9,280 | 4.5 | 0.227554 | 0.0258 | 0.016512 | 0.013244 | 0.45012 | 0.405745 | 0.351221 | 0.338493 | 0.314757 | 0.261954 | 0 | 0.044913 | 0.186638 | 9,280 | 206 | 211 | 45.048544 | 0.725358 | 0.027371 | 0 | 0.236686 | 0 | 0 | 0.02873 | 0.003054 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.005917 | null | null | 0.005917 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3dd84d6968111423f954120eed10897fd01c00ea | 1,355 | py | Python | CIFAR10.py | jimmyLeeMc/NeuralNetworkTesting | a6208cc8639a93ac24655495c9ace1acba21c76f | [
"MIT"
] | null | null | null | CIFAR10.py | jimmyLeeMc/NeuralNetworkTesting | a6208cc8639a93ac24655495c9ace1acba21c76f | [
"MIT"
] | null | null | null | CIFAR10.py | jimmyLeeMc/NeuralNetworkTesting | a6208cc8639a93ac24655495c9ace1acba21c76f | [
"MIT"
] | null | null | null |
#CIFAR
from tensorflow import keras
import numpy as np
import matplotlib.pyplot as plt
data = keras.datasets.cifar10
activations=[keras.activations.sigmoid, keras.activations.relu,
keras.layers.LeakyReLU(), keras.activations.tanh]
results=[0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0,0]
class_names=[0,1,2,3,4,5,6,7,8,9]
a=0
for i in range(4):
for j in range(4):
losssum=0
for k in range(6):
(train_images, train_labels), (test_images, test_labels) = data.load_data()
train_images = train_images/255.0
test_images = test_images/255.0
model = keras.Sequential([
keras.layers.Flatten(input_shape=(32,32,3)),
keras.layers.Dense(128, activations[i]),
keras.layers.Dense(10, activations[j])
# tanh softmax
])
model.compile(optimizer="adam",loss="sparse_categorical_crossentropy", metrics=["accuracy"])
history = model.fit(train_images, train_labels,
validation_split=0.25, epochs=5, batch_size=16, verbose=1)
prediction = model.predict(test_images)
losssum=losssum+history.history['loss'][len(history.history['loss'])-1]
results[a]=losssum/1
a=a+1
print(results)
| 38.714286 | 108 | 0.591882 | 184 | 1,355 | 4.26087 | 0.434783 | 0.05102 | 0.072704 | 0.091837 | 0.026786 | 0.026786 | 0.026786 | 0.026786 | 0.026786 | 0.026786 | 0 | 0.067554 | 0.278967 | 1,355 | 34 | 109 | 39.852941 | 0.734903 | 0 | 0 | 0 | 0 | 0 | 0.038231 | 0.023238 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.103448 | null | null | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3dd93f9bb15a42397c641e431fd3df72da46ab0d | 3,127 | py | Python | All_RasPy_Files/edgedetection.py | govindak-umd/Autonomous_Robotics | 5293b871c7032b40cbff7814bd773871ee2c5946 | [
"MIT"
] | 2 | 2020-05-14T11:23:30.000Z | 2020-05-25T06:30:57.000Z | All_RasPy_Files/edgedetection.py | govindak-umd/ENPM809T | 5293b871c7032b40cbff7814bd773871ee2c5946 | [
"MIT"
] | null | null | null | All_RasPy_Files/edgedetection.py | govindak-umd/ENPM809T | 5293b871c7032b40cbff7814bd773871ee2c5946 | [
"MIT"
] | 5 | 2020-06-09T22:09:15.000Z | 2022-01-31T17:11:19.000Z | # ENME 489Y: Remote Sensing
# Edge detection
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
# Define slice of an arbitrary original image
f = np.empty((0))
index = np.empty((0))
# Create intensity data, including noise
for i in range(2000):
index = np.append(index, i)
if i <= 950:
f = np.append(f, 50 + np.random.normal(0,1))
elif i > 950 and i < 1000:
f = np.append(f, 50 + (i - 950)/2 + np.random.normal(0,1))
elif i >= 1000 and i < 1050:
f = np.append(f, 75 + (i - 1000)/2 + np.random.normal(0,1))
else:
f = np.append(f, 100 + np.random.normal(0,1))
print f.shape
print index.shape
plt.figure(2)
plt.plot(index, f, 'r-')
plt.title('Slice of Original Image: f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Pixel intensity f(x)')
plt.grid()
plt.show()
# Plot the gradient (first derivative) of the original signal
messy = np.gradient(f)
plt.figure(3)
plt.plot(messy, 'r-')
plt.title('Derivative of Original Image Slice: df/dx')
plt.xlabel('Pixel x')
plt.ylabel('Derivative df/dx')
plt.grid()
plt.show()
# Define Gaussian filter
mean = 0
std = 5
var = np.square(std)
x = np.arange(-20, 20, 0.1)
kernel = (1/(std*np.sqrt(2*np.pi)))*np.exp(-np.square((x-mean)/std)/2)
print kernel.shape
plt.figure(4)
plt.plot(x, kernel, 'b-')
plt.title('Kernel: Gaussian Filter h(x)')
plt.xlabel('Pixel x')
plt.ylabel('Kernel h(x)')
plt.grid()
plt.show()
# Convolve original image signal with Gaussian filter
smoothed = np.convolve(kernel, f, 'same')
print smoothed.shape
plt.figure(5)
plt.plot(smoothed, 'r-')
plt.title('Apply Gaussian Filter: Convolve h(x) * f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Convolution')
plt.grid()
plt.show()
# Plot the gradient (first derivative) of the filtered signal
edges = np.gradient(smoothed)
plt.figure(6)
plt.plot(edges, 'r-')
plt.title('Derivative of Convolved Image: d/dx[ h(x) * f(x) ] ')
plt.xlabel('Pixel x')
plt.ylabel('Derivative')
plt.grid()
plt.show()
# Plot the gradient (first derivative) of the Gaussian kernel
first_diff = np.gradient(kernel)
plt.figure(7)
plt.plot(first_diff, 'b-')
plt.title('1st Derivative of Gaussian: d/dx[ h(x) ]')
plt.xlabel('Pixel x')
plt.ylabel('Derivative')
plt.grid()
plt.show()
# Convolve original image signal with Gaussian filter
smoothed = np.convolve(first_diff, f, 'same')
print smoothed.shape
plt.figure(8)
plt.plot(smoothed, 'r-')
plt.title('Apply Gaussian Filter: Convolve d/dx[ h(x) ] * f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Convolution')
plt.grid()
plt.show()
# Plot the second derivative of the Gaussian kernel: the Laplacian operator
laplacian = np.gradient(first_diff)
plt.figure(9)
plt.plot(laplacian, 'b-')
plt.title('2nd Derivative of Gaussian: Laplacian Operator d^2/dx^2[ h(x) ]')
plt.xlabel('Pixel x')
plt.ylabel('Derivative')
plt.grid()
plt.show()
# Convolve original image signal with Gaussian filter
smoothed = np.convolve(laplacian, f, 'same')
print smoothed.shape
plt.figure(10)
plt.plot(smoothed, 'r-')
plt.title('Apply Laplacian Operator: Convolve d^2/dx^2[ h(x) ] * f(x)')
plt.xlabel('Pixel x')
plt.ylabel('Convolution')
plt.grid()
plt.show()
| 23.689394 | 76 | 0.68692 | 525 | 3,127 | 4.08381 | 0.20381 | 0.035448 | 0.058769 | 0.062966 | 0.539179 | 0.479944 | 0.466884 | 0.373134 | 0.348881 | 0.348881 | 0 | 0.029817 | 0.141989 | 3,127 | 131 | 77 | 23.870229 | 0.769288 | 0.177806 | 0 | 0.40625 | 0 | 0.010417 | 0.237388 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.03125 | null | null | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3dda1806de2d35a90208c505c2c72da1466cf4a9 | 1,850 | py | Python | alipay/aop/api/domain/AlipayCommerceReceiptBatchqueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceReceiptBatchqueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | alipay/aop/api/domain/AlipayCommerceReceiptBatchqueryModel.py | antopen/alipay-sdk-python-all | 8e51c54409b9452f8d46c7bb10eea7c8f7e8d30c | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import json
from alipay.aop.api.constant.ParamConstants import *
class AlipayCommerceReceiptBatchqueryModel(object):
def __init__(self):
self._level = None
self._out_biz_no_list = None
@property
def level(self):
return self._level
@level.setter
def level(self, value):
self._level = value
@property
def out_biz_no_list(self):
return self._out_biz_no_list
@out_biz_no_list.setter
def out_biz_no_list(self, value):
if isinstance(value, list):
self._out_biz_no_list = list()
for i in value:
self._out_biz_no_list.append(i)
def to_alipay_dict(self):
params = dict()
if self.level:
if hasattr(self.level, 'to_alipay_dict'):
params['level'] = self.level.to_alipay_dict()
else:
params['level'] = self.level
if self.out_biz_no_list:
if isinstance(self.out_biz_no_list, list):
for i in range(0, len(self.out_biz_no_list)):
element = self.out_biz_no_list[i]
if hasattr(element, 'to_alipay_dict'):
self.out_biz_no_list[i] = element.to_alipay_dict()
if hasattr(self.out_biz_no_list, 'to_alipay_dict'):
params['out_biz_no_list'] = self.out_biz_no_list.to_alipay_dict()
else:
params['out_biz_no_list'] = self.out_biz_no_list
return params
@staticmethod
def from_alipay_dict(d):
if not d:
return None
o = AlipayCommerceReceiptBatchqueryModel()
if 'level' in d:
o.level = d['level']
if 'out_biz_no_list' in d:
o.out_biz_no_list = d['out_biz_no_list']
return o
| 28.90625 | 81 | 0.585405 | 246 | 1,850 | 4.04878 | 0.203252 | 0.120482 | 0.160643 | 0.240964 | 0.39759 | 0.236948 | 0.160643 | 0.160643 | 0.120482 | 0.068273 | 0 | 0.001587 | 0.318919 | 1,850 | 63 | 82 | 29.365079 | 0.788889 | 0.022703 | 0 | 0.081633 | 0 | 0 | 0.067627 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.040816 | 0.040816 | 0.306122 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3ddaf9735b2cb2b79bcc96e4e4c161028c28ae19 | 2,632 | py | Python | tests/test_timeconversion.py | FObersteiner/pyFuppes | 2a8c6e210855598dbf4fb491533bf22706340c9a | [
"MIT"
] | 1 | 2020-06-02T08:02:36.000Z | 2020-06-02T08:02:36.000Z | tests/test_timeconversion.py | FObersteiner/pyFuppes | 2a8c6e210855598dbf4fb491533bf22706340c9a | [
"MIT"
] | 3 | 2022-03-04T11:43:19.000Z | 2022-03-25T00:26:46.000Z | tests/test_timeconversion.py | FObersteiner/pyFuppes | 2a8c6e210855598dbf4fb491533bf22706340c9a | [
"MIT"
] | null | null | null | import unittest
from datetime import datetime, timezone
from pyfuppes import timeconversion
class TestTimeconv(unittest.TestCase):
@classmethod
def setUpClass(cls):
# to run before all tests
print("testing pyfuppes.timeconversion...")
@classmethod
def tearDownClass(cls):
# to run after all tests
pass
def setUp(self):
# to run before each test
pass
def tearDown(self):
# to run after each test
pass
def test_dtstr_2_mdns(self):
# no timezone
t = ["2012-01-01T01:00:00", "2012-01-01T02:00:00"]
f = "%Y-%m-%dT%H:%M:%S"
result = list(map(int, timeconversion.dtstr_2_mdns(t, f)))
self.assertEqual(result, [3600, 7200])
# with timezone
t = ["2012-01-01T01:00:00+02:00", "2012-01-01T02:00:00+02:00"]
f = "%Y-%m-%dT%H:%M:%S%z"
result = list(map(int, timeconversion.dtstr_2_mdns(t, f)))
self.assertEqual(result, [3600, 7200])
# zero case
t = "2012-01-01T00:00:00+02:00"
result = timeconversion.dtstr_2_mdns(t, f)
self.assertEqual(int(result), 0)
def test_dtobj_2_mdns(self):
t = [datetime(2000, 1, 1, 1), datetime(2000, 1, 1, 2)]
result = list(map(int, timeconversion.dtobj_2_mdns(t)))
self.assertEqual(result, [3600, 7200])
t = [
datetime(2000, 1, 1, 1, tzinfo=timezone.utc),
datetime(2000, 1, 1, 2, tzinfo=timezone.utc),
]
result = list(map(int, timeconversion.dtobj_2_mdns(t)))
self.assertEqual(result, [3600, 7200])
def test_posix_2_mdns(self):
t = [3600, 7200, 10800]
result = list(map(int, timeconversion.posix_2_mdns(t)))
self.assertEqual(result, t)
def test_mdns_2_dtobj(self):
t = [3600, 10800, 864000]
ref = datetime(2020, 5, 15, tzinfo=timezone.utc)
result = list(map(int, timeconversion.mdns_2_dtobj(t, ref, posix=True)))
self.assertEqual(result, [1589504400, 1589511600, 1590364800])
def test_daysSince_2_dtobj(self):
t0, off = datetime(2020, 5, 10), 10.5
result = timeconversion.daysSince_2_dtobj(t0, off)
self.assertEqual(result.hour, 12)
self.assertEqual(result.day, 20)
def test_dtstr_2_posix(self):
result = timeconversion.dtstr_2_posix("2020-05-15", "%Y-%m-%d")
self.assertAlmostEqual(
result, datetime(2020, 5, 15, tzinfo=timezone.utc).timestamp()
)
if __name__ == "__main__":
unittest.main()
| 32.9 | 81 | 0.587006 | 344 | 2,632 | 4.363372 | 0.261628 | 0.02998 | 0.111925 | 0.063957 | 0.433711 | 0.393738 | 0.334444 | 0.271153 | 0.187875 | 0.187875 | 0 | 0.134179 | 0.280775 | 2,632 | 79 | 82 | 33.316456 | 0.658743 | 0.049012 | 0 | 0.22807 | 0 | 0 | 0.086507 | 0.041805 | 0 | 0 | 0 | 0 | 0.175439 | 1 | 0.175439 | false | 0.052632 | 0.052632 | 0 | 0.245614 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
3ddd545e8ac1636ac0a7d92a17cca391f2e23803 | 7,468 | py | Python | tool/powermon.py | virajpadte/Power_monitoring_JetsonTX1 | 3f337adb16ce09072d69147b705a0c705b3ad53c | [
"MIT"
] | null | null | null | tool/powermon.py | virajpadte/Power_monitoring_JetsonTX1 | 3f337adb16ce09072d69147b705a0c705b3ad53c | [
"MIT"
] | null | null | null | tool/powermon.py | virajpadte/Power_monitoring_JetsonTX1 | 3f337adb16ce09072d69147b705a0c705b3ad53c | [
"MIT"
] | null | null | null | import sys
import glob
import serial
import ttk
import tkFileDialog
from Tkinter import *
#for plotting we need these:
import matplotlib
matplotlib.use("TkAgg")
import matplotlib.pyplot as plt
from drawnow import *
class MainView:
#CLASS VARIABLES:
closing_status = False
powerW = []
def __init__(self, master):
self.master = master
mainframe = ttk.Frame(self.master, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
port = StringVar()
port.set(" ") # initial value
ttk.Label(mainframe, text="Select Port").grid(column=1, row=1, sticky=W)
port_list = self.serial_ports()
port_list.insert(0," ")
print(port_list)
port = StringVar(mainframe)
port.set(port_list[1]) # default value
dropdown = ttk.OptionMenu(mainframe,port,*port_list)
dropdown.configure(width=20)
dropdown.grid(column=2, row=1, sticky=W)
ttk.Button(mainframe, text="Realtime Plot", command=lambda: self.real_time_plotting(port)).grid(column=1, row=2, sticky=W)
ttk.Button(mainframe, text="Record Session", command=lambda: self.record_session(port)).grid(column=2, row=2, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
def record_session(self,port):
print("record_session")
port = port.get()
print("record port",port)
self.newWindow = Toplevel(root)
self.app = record_session(self.newWindow,port)
def serial_ports(self):
if sys.platform.startswith('win'):
ports = ['COM%s' % (i + 1) for i in range(256)]
elif sys.platform.startswith('linux') or sys.platform.startswith('cygwin'):
# this excludes your current terminal "/dev/tty"
ports = glob.glob('/dev/tty[A-Za-z]*')
elif sys.platform.startswith('darwin'):
ports = glob.glob('/dev/tty.*')
else:
raise EnvironmentError('Unsupported platform')
result = []
result = ports
return result
def handle_close(self):
print('Closed Figure!')
self.closing_status = True
def real_time_plotting(self,port):
cnt = 0
window_size = 20
connected = False
port = port.get()
print("real_time_plotting")
print("realtime data port", port)
try:
print("trying to connect to device....")
ser = serial.Serial(port, 115200)
except:
print "Failed to connect on", port
# ## loop until the arduino tells us it is ready
while not connected:
serin = ser.read()
connected = True
try:
while not self.closing_status: # While loop that loops forever
if ser.inWaiting(): # Wait here until there is data
power = ser.readline() # read the line of text from the serial port
print(power)
self.powerW.append(power) # Build our tempF array by appending temp readings
drawnow(self.makeFig) # Call drawnow to update our live graph
plt.pause(.000001) # Pause Briefly. Important to keep drawnow from crashing
cnt = cnt + 1
if (cnt > window_size): # If you have 50 or more points, delete the first one from the array
self.powerW.pop(0) # This allows us to just see the last 50 data points
print("closing port")
ser.close()
except KeyboardInterrupt:
print("closing port")
ser.close()
def makeFig(self): # Create a function that makes our desired plot
# configure the plot
plt.ion() # Tell matplotlib you want interactive mode to plot live data
plt.rcParams['toolbar'] = 'None'
# create a fig
#fig = plt.figure(0)
#fig.canvas.set_window_title('Window 3D')
#fig.canvas.mpl_connect('close_event', self.handle_close())
plt.ylim(0, 15) # Set y min and max values
plt.title('Plotting power consumption') # Plot the title
plt.grid(True) # Turn the grid on
plt.ylabel('Power (Watts)') # Set ylabels
plt.plot(self.powerW, 'ro-', label='Power W') # plot the temperature
plt.legend(loc='upper right') # plot the legend
def handle_close(self):
print('Closed Figure!')
self.closing_status = True
class record_session:
#class variable:
path = ""
def __init__(self, master,port):
self.master = master
self.master.title("Session parameters")
mainframe = ttk.Frame(self.master, padding="3 3 12 12")
mainframe.grid(column=0, row=0, sticky=(N, W, E, S))
mainframe.columnconfigure(0, weight=1)
mainframe.rowconfigure(0, weight=1)
print("passed port", port)
duration = StringVar()
autoplot = IntVar()
autoplot.set(0) # initial value
ttk.Button(mainframe, text="Select a location to store session.csv file", command=self.select_dir).grid(column=1, row=1, sticky=W)
ttk.Label(mainframe, text="Record Duration in seconds:").grid(column=1, row=2, sticky=W)
duration_entry_box = ttk.Entry(mainframe, width=5, textvariable=duration)
duration_entry_box.grid(column=2, row=2, sticky=W)
#ttk.Checkbutton(mainframe, text="Auto Plotting enabled", variable=autoplot).grid(column=1, row=3, sticky=W)
ttk.Button(mainframe, text="Start recording", command=lambda: self.record(port,autoplot)).grid(column=1, row=4, sticky=W)
for child in mainframe.winfo_children(): child.grid_configure(padx=5, pady=5)
def select_dir(self):
global path
print("select dir")
path = tkFileDialog.askdirectory()
#append file name to the path
if len(path):
path = path + "/session.csv"
print(path)
def record(self,port,autoplot):
global path
print("recording")
autoplot_status = autoplot.get()
print("autoplot_status", autoplot_status)
connected = False
## establish connection to the serial port that your arduino
## is connected to.
try:
print("trying to connect to device....")
ser = serial.Serial(port, 115200)
except:
print "Failed to connect on", port
# ## loop until the arduino tells us it is ready
while not connected:
serin = ser.read(self)
connected = True
#open text file to store the power values
text_file = open(path, 'w')
#read serial data from arduino and
#write it to the text file 'Data.csv'
try:
while True:
if ser.inWaiting():
# Read a line and convert it from b'xxx\r\n' to xxx
line = ser.readline()
print(line)
if line: # If it isn't a blank line
text_file.write(line)
text_file.close()
except KeyboardInterrupt:
print("closing port")
ser.close()
if __name__ == '__main__':
root = Tk()
root.title("Power Monitoring tool")
main = MainView(root)
root.mainloop()
| 35.561905 | 138 | 0.595742 | 934 | 7,468 | 4.695931 | 0.293362 | 0.02508 | 0.015048 | 0.019152 | 0.28591 | 0.258778 | 0.240538 | 0.212038 | 0.188326 | 0.188326 | 0 | 0.016787 | 0.298072 | 7,468 | 209 | 139 | 35.732057 | 0.819916 | 0.181441 | 0 | 0.324675 | 0 | 0 | 0.102687 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.006494 | 0.058442 | null | null | 0.136364 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3de3ed318e614e22c2b9f52348133eddba3a0fee | 2,424 | py | Python | messages.py | runjak/hoodedFigure | 539c9839dd47bc181e592bf4a61eaab361b8d316 | [
"MIT"
] | null | null | null | messages.py | runjak/hoodedFigure | 539c9839dd47bc181e592bf4a61eaab361b8d316 | [
"MIT"
] | null | null | null | messages.py | runjak/hoodedFigure | 539c9839dd47bc181e592bf4a61eaab361b8d316 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import random
sentences = [
"Going into the #dogpark is not allowed, @%s.",
"That's my favourite #dogpark @%s - no one is allowed to go into it!",
"That #dogpark you mention is forbidden! Please don't, @%s",
"The #dogpark should be secured with electrified barbwire. "
"Don't you agree, @%s?",
"Just make sure NOT TO ENTER the #dogpark @%s.",
"Why would you mention such nasty things like a #dogpark @%s?",
"Remember to share your #dogpark experience "
"so others may also survive @%s!",
"Hi @%s! City council discourages the term #dogpark for security reasons.",
"You are not a dog, @%s! Please don't think of the #dogpark.",
"@%s in the #dogpark all dogs have 8 legs. Scary.",
"Please return to safety @%s! Don't linger in the #dogpark.",
"Hey @%s… I got notice that the #dogpark "
"will get fortified with spikes and lava soon.",
"Beware @%s. Today the #dogpark is full of deer. "
"Dangerous with their sharp claws and many heads.",
"There is a time and place for everything @%s. "
"But it's not the #dogpark. An acid pit is much saver.",
"@%s do you know that the #dogpark is actually a pond of molten lava?",
"@%s beware - flesh entering the #dogpark without correct papers "
"will actually turn into a liquid.",
"Only truely evil spirits may enter the #dogpark. Are you one of us, @%s?",
"I heard a five headed dragon near the #dogpark might try to dine on @%s.",
"@%s and I are sure that the #dogpark is protected by a smiling god "
"that replaces your blood with liquid led.",
"In the #dogpark everyone becomes a stick in an eternal play of fetch. "
"Be careful @%s.",
"You may eat your own dogfood - but please: "
"NEVER walk your own #dogpark, @%s.",
"There is a non-zero chance that thinking the word #dogpark "
"replaces your neurons with ants, @%s.",
"The #dogpark will not harm you, @%s. "
"Provided you have wings. And antlers.",
]
def replyDictFromTweet(status):
msg = random.choice(sentences) % status.user.screen_name
if len(msg) > 140:
print('Cannot send message:', msg)
return None
statusParams = {
'status': msg,
'in_reply_to_status_id': status.id
}
if status.place:
statusParams['place_id'] = status.place.id
return statusParams
| 44.072727 | 79 | 0.636139 | 365 | 2,424 | 4.216438 | 0.50137 | 0.103964 | 0.031189 | 0.020793 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002762 | 0.2533 | 2,424 | 54 | 80 | 44.888889 | 0.845856 | 0.008663 | 0 | 0 | 0 | 0 | 0.72803 | 0.008746 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020408 | false | 0 | 0.020408 | 0 | 0.081633 | 0.020408 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3deea7c2a0399d6a1677f78e7cc36afe63de0fc2 | 1,780 | py | Python | keystroke/migrations/0001_initial.py | jstavanja/quiz-biometrics-api | 75e0db348668b14a85f94261aac092ae2d5fa9c6 | [
"MIT"
] | null | null | null | keystroke/migrations/0001_initial.py | jstavanja/quiz-biometrics-api | 75e0db348668b14a85f94261aac092ae2d5fa9c6 | [
"MIT"
] | null | null | null | keystroke/migrations/0001_initial.py | jstavanja/quiz-biometrics-api | 75e0db348668b14a85f94261aac092ae2d5fa9c6 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.15 on 2018-08-20 16:31
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='KeystrokeTestSession',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('timing_matrix', models.CharField(max_length=5000)),
],
),
migrations.CreateModel(
name='KeystrokeTestType',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('input_text', models.CharField(max_length=5000)),
('repetitions', models.IntegerField()),
],
),
migrations.CreateModel(
name='Student',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('moodle_username', models.CharField(max_length=250)),
('path_to_image', models.CharField(max_length=250)),
],
),
migrations.AddField(
model_name='keystroketestsession',
name='student',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='keystroke.Student'),
),
migrations.AddField(
model_name='keystroketestsession',
name='test_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='keystroke.KeystrokeTestType'),
),
]
| 34.901961 | 115 | 0.591573 | 169 | 1,780 | 6.065089 | 0.414201 | 0.03122 | 0.070244 | 0.093659 | 0.572683 | 0.465366 | 0.365854 | 0.365854 | 0.365854 | 0.365854 | 0 | 0.025059 | 0.282584 | 1,780 | 50 | 116 | 35.6 | 0.777604 | 0.038764 | 0 | 0.5 | 1 | 0 | 0.127635 | 0.015808 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3df0af937b9149db956b0d8ec02537a403587abe | 19,082 | py | Python | src/oci/log_analytics/models/query_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 249 | 2017-09-11T22:06:05.000Z | 2022-03-04T17:09:29.000Z | src/oci/log_analytics/models/query_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 228 | 2017-09-11T23:07:26.000Z | 2022-03-23T10:58:50.000Z | src/oci/log_analytics/models/query_details.py | Manny27nyc/oci-python-sdk | de60b04e07a99826254f7255e992f41772902df7 | [
"Apache-2.0",
"BSD-3-Clause"
] | 224 | 2017-09-27T07:32:43.000Z | 2022-03-25T16:55:42.000Z | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class QueryDetails(object):
"""
Input arguments for running a log anlaytics query. If the request is set to run in asynchronous mode
then shouldIncludeColumns and shouldIncludeFields can be overwritten when retrieving the results.
"""
#: A constant which can be used with the sub_system property of a QueryDetails.
#: This constant has a value of "LOG"
SUB_SYSTEM_LOG = "LOG"
#: A constant which can be used with the async_mode property of a QueryDetails.
#: This constant has a value of "FOREGROUND"
ASYNC_MODE_FOREGROUND = "FOREGROUND"
#: A constant which can be used with the async_mode property of a QueryDetails.
#: This constant has a value of "BACKGROUND"
ASYNC_MODE_BACKGROUND = "BACKGROUND"
def __init__(self, **kwargs):
"""
Initializes a new QueryDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param compartment_id:
The value to assign to the compartment_id property of this QueryDetails.
:type compartment_id: str
:param compartment_id_in_subtree:
The value to assign to the compartment_id_in_subtree property of this QueryDetails.
:type compartment_id_in_subtree: bool
:param saved_search_id:
The value to assign to the saved_search_id property of this QueryDetails.
:type saved_search_id: str
:param query_string:
The value to assign to the query_string property of this QueryDetails.
:type query_string: str
:param sub_system:
The value to assign to the sub_system property of this QueryDetails.
Allowed values for this property are: "LOG"
:type sub_system: str
:param max_total_count:
The value to assign to the max_total_count property of this QueryDetails.
:type max_total_count: int
:param time_filter:
The value to assign to the time_filter property of this QueryDetails.
:type time_filter: oci.log_analytics.models.TimeRange
:param scope_filters:
The value to assign to the scope_filters property of this QueryDetails.
:type scope_filters: list[oci.log_analytics.models.ScopeFilter]
:param query_timeout_in_seconds:
The value to assign to the query_timeout_in_seconds property of this QueryDetails.
:type query_timeout_in_seconds: int
:param should_run_async:
The value to assign to the should_run_async property of this QueryDetails.
:type should_run_async: bool
:param async_mode:
The value to assign to the async_mode property of this QueryDetails.
Allowed values for this property are: "FOREGROUND", "BACKGROUND"
:type async_mode: str
:param should_include_total_count:
The value to assign to the should_include_total_count property of this QueryDetails.
:type should_include_total_count: bool
:param should_include_columns:
The value to assign to the should_include_columns property of this QueryDetails.
:type should_include_columns: bool
:param should_include_fields:
The value to assign to the should_include_fields property of this QueryDetails.
:type should_include_fields: bool
:param should_use_acceleration:
The value to assign to the should_use_acceleration property of this QueryDetails.
:type should_use_acceleration: bool
"""
self.swagger_types = {
'compartment_id': 'str',
'compartment_id_in_subtree': 'bool',
'saved_search_id': 'str',
'query_string': 'str',
'sub_system': 'str',
'max_total_count': 'int',
'time_filter': 'TimeRange',
'scope_filters': 'list[ScopeFilter]',
'query_timeout_in_seconds': 'int',
'should_run_async': 'bool',
'async_mode': 'str',
'should_include_total_count': 'bool',
'should_include_columns': 'bool',
'should_include_fields': 'bool',
'should_use_acceleration': 'bool'
}
self.attribute_map = {
'compartment_id': 'compartmentId',
'compartment_id_in_subtree': 'compartmentIdInSubtree',
'saved_search_id': 'savedSearchId',
'query_string': 'queryString',
'sub_system': 'subSystem',
'max_total_count': 'maxTotalCount',
'time_filter': 'timeFilter',
'scope_filters': 'scopeFilters',
'query_timeout_in_seconds': 'queryTimeoutInSeconds',
'should_run_async': 'shouldRunAsync',
'async_mode': 'asyncMode',
'should_include_total_count': 'shouldIncludeTotalCount',
'should_include_columns': 'shouldIncludeColumns',
'should_include_fields': 'shouldIncludeFields',
'should_use_acceleration': 'shouldUseAcceleration'
}
self._compartment_id = None
self._compartment_id_in_subtree = None
self._saved_search_id = None
self._query_string = None
self._sub_system = None
self._max_total_count = None
self._time_filter = None
self._scope_filters = None
self._query_timeout_in_seconds = None
self._should_run_async = None
self._async_mode = None
self._should_include_total_count = None
self._should_include_columns = None
self._should_include_fields = None
self._should_use_acceleration = None
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this QueryDetails.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this QueryDetails.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this QueryDetails.
Compartment Identifier `OCID]`__.
__ https://docs.cloud.oracle.com/iaas/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this QueryDetails.
:type: str
"""
self._compartment_id = compartment_id
@property
def compartment_id_in_subtree(self):
"""
Gets the compartment_id_in_subtree of this QueryDetails.
Flag to search all child compartments of the compartment Id specified in the compartmentId query parameter.
:return: The compartment_id_in_subtree of this QueryDetails.
:rtype: bool
"""
return self._compartment_id_in_subtree
@compartment_id_in_subtree.setter
def compartment_id_in_subtree(self, compartment_id_in_subtree):
"""
Sets the compartment_id_in_subtree of this QueryDetails.
Flag to search all child compartments of the compartment Id specified in the compartmentId query parameter.
:param compartment_id_in_subtree: The compartment_id_in_subtree of this QueryDetails.
:type: bool
"""
self._compartment_id_in_subtree = compartment_id_in_subtree
@property
def saved_search_id(self):
"""
Gets the saved_search_id of this QueryDetails.
Saved search OCID for this query if known.
:return: The saved_search_id of this QueryDetails.
:rtype: str
"""
return self._saved_search_id
@saved_search_id.setter
def saved_search_id(self, saved_search_id):
"""
Sets the saved_search_id of this QueryDetails.
Saved search OCID for this query if known.
:param saved_search_id: The saved_search_id of this QueryDetails.
:type: str
"""
self._saved_search_id = saved_search_id
@property
def query_string(self):
"""
**[Required]** Gets the query_string of this QueryDetails.
Query to perform. Must conform to logging analytic querylanguage syntax. Syntax errors will be returned if present.
:return: The query_string of this QueryDetails.
:rtype: str
"""
return self._query_string
@query_string.setter
def query_string(self, query_string):
"""
Sets the query_string of this QueryDetails.
Query to perform. Must conform to logging analytic querylanguage syntax. Syntax errors will be returned if present.
:param query_string: The query_string of this QueryDetails.
:type: str
"""
self._query_string = query_string
@property
def sub_system(self):
"""
**[Required]** Gets the sub_system of this QueryDetails.
Default subsystem to qualify fields with in the queryString if not specified.
Allowed values for this property are: "LOG"
:return: The sub_system of this QueryDetails.
:rtype: str
"""
return self._sub_system
@sub_system.setter
def sub_system(self, sub_system):
"""
Sets the sub_system of this QueryDetails.
Default subsystem to qualify fields with in the queryString if not specified.
:param sub_system: The sub_system of this QueryDetails.
:type: str
"""
allowed_values = ["LOG"]
if not value_allowed_none_or_none_sentinel(sub_system, allowed_values):
raise ValueError(
"Invalid value for `sub_system`, must be None or one of {0}"
.format(allowed_values)
)
self._sub_system = sub_system
@property
def max_total_count(self):
"""
Gets the max_total_count of this QueryDetails.
Maximum number of results to count. Note a maximum of 2001 will be enforced; that is, actualMaxTotalCountUsed = Math.min(maxTotalCount, 2001).
:return: The max_total_count of this QueryDetails.
:rtype: int
"""
return self._max_total_count
@max_total_count.setter
def max_total_count(self, max_total_count):
"""
Sets the max_total_count of this QueryDetails.
Maximum number of results to count. Note a maximum of 2001 will be enforced; that is, actualMaxTotalCountUsed = Math.min(maxTotalCount, 2001).
:param max_total_count: The max_total_count of this QueryDetails.
:type: int
"""
self._max_total_count = max_total_count
@property
def time_filter(self):
"""
Gets the time_filter of this QueryDetails.
:return: The time_filter of this QueryDetails.
:rtype: oci.log_analytics.models.TimeRange
"""
return self._time_filter
@time_filter.setter
def time_filter(self, time_filter):
"""
Sets the time_filter of this QueryDetails.
:param time_filter: The time_filter of this QueryDetails.
:type: oci.log_analytics.models.TimeRange
"""
self._time_filter = time_filter
@property
def scope_filters(self):
"""
Gets the scope_filters of this QueryDetails.
List of filters to be applied when the query executes. More than one filter per field is not permitted.
:return: The scope_filters of this QueryDetails.
:rtype: list[oci.log_analytics.models.ScopeFilter]
"""
return self._scope_filters
@scope_filters.setter
def scope_filters(self, scope_filters):
"""
Sets the scope_filters of this QueryDetails.
List of filters to be applied when the query executes. More than one filter per field is not permitted.
:param scope_filters: The scope_filters of this QueryDetails.
:type: list[oci.log_analytics.models.ScopeFilter]
"""
self._scope_filters = scope_filters
@property
def query_timeout_in_seconds(self):
"""
Gets the query_timeout_in_seconds of this QueryDetails.
Amount of time, in seconds, allowed for a query to execute. If this time expires before the query is complete, any partial results will be returned.
:return: The query_timeout_in_seconds of this QueryDetails.
:rtype: int
"""
return self._query_timeout_in_seconds
@query_timeout_in_seconds.setter
def query_timeout_in_seconds(self, query_timeout_in_seconds):
"""
Sets the query_timeout_in_seconds of this QueryDetails.
Amount of time, in seconds, allowed for a query to execute. If this time expires before the query is complete, any partial results will be returned.
:param query_timeout_in_seconds: The query_timeout_in_seconds of this QueryDetails.
:type: int
"""
self._query_timeout_in_seconds = query_timeout_in_seconds
@property
def should_run_async(self):
"""
Gets the should_run_async of this QueryDetails.
Option to run the query asynchronously. This will lead to a LogAnalyticsQueryJobWorkRequest being submitted and the {workRequestId} will be returned to use for fetching the results.
:return: The should_run_async of this QueryDetails.
:rtype: bool
"""
return self._should_run_async
@should_run_async.setter
def should_run_async(self, should_run_async):
"""
Sets the should_run_async of this QueryDetails.
Option to run the query asynchronously. This will lead to a LogAnalyticsQueryJobWorkRequest being submitted and the {workRequestId} will be returned to use for fetching the results.
:param should_run_async: The should_run_async of this QueryDetails.
:type: bool
"""
self._should_run_async = should_run_async
@property
def async_mode(self):
"""
Gets the async_mode of this QueryDetails.
Execution mode for the query if running asynchronously i.e (shouldRunAsync is set to true).
Allowed values for this property are: "FOREGROUND", "BACKGROUND"
:return: The async_mode of this QueryDetails.
:rtype: str
"""
return self._async_mode
@async_mode.setter
def async_mode(self, async_mode):
"""
Sets the async_mode of this QueryDetails.
Execution mode for the query if running asynchronously i.e (shouldRunAsync is set to true).
:param async_mode: The async_mode of this QueryDetails.
:type: str
"""
allowed_values = ["FOREGROUND", "BACKGROUND"]
if not value_allowed_none_or_none_sentinel(async_mode, allowed_values):
raise ValueError(
"Invalid value for `async_mode`, must be None or one of {0}"
.format(allowed_values)
)
self._async_mode = async_mode
@property
def should_include_total_count(self):
"""
Gets the should_include_total_count of this QueryDetails.
Include the total number of results from the query. Note, this value will always be equal to or less than maxTotalCount.
:return: The should_include_total_count of this QueryDetails.
:rtype: bool
"""
return self._should_include_total_count
@should_include_total_count.setter
def should_include_total_count(self, should_include_total_count):
"""
Sets the should_include_total_count of this QueryDetails.
Include the total number of results from the query. Note, this value will always be equal to or less than maxTotalCount.
:param should_include_total_count: The should_include_total_count of this QueryDetails.
:type: bool
"""
self._should_include_total_count = should_include_total_count
@property
def should_include_columns(self):
"""
Gets the should_include_columns of this QueryDetails.
Include columns in response
:return: The should_include_columns of this QueryDetails.
:rtype: bool
"""
return self._should_include_columns
@should_include_columns.setter
def should_include_columns(self, should_include_columns):
"""
Sets the should_include_columns of this QueryDetails.
Include columns in response
:param should_include_columns: The should_include_columns of this QueryDetails.
:type: bool
"""
self._should_include_columns = should_include_columns
@property
def should_include_fields(self):
"""
Gets the should_include_fields of this QueryDetails.
Include fields in response
:return: The should_include_fields of this QueryDetails.
:rtype: bool
"""
return self._should_include_fields
@should_include_fields.setter
def should_include_fields(self, should_include_fields):
"""
Sets the should_include_fields of this QueryDetails.
Include fields in response
:param should_include_fields: The should_include_fields of this QueryDetails.
:type: bool
"""
self._should_include_fields = should_include_fields
@property
def should_use_acceleration(self):
"""
Gets the should_use_acceleration of this QueryDetails.
Controls if query should ignore pre-calculated results if available and only use raw data. If set and no acceleration data is found it will fallback to raw data.
:return: The should_use_acceleration of this QueryDetails.
:rtype: bool
"""
return self._should_use_acceleration
@should_use_acceleration.setter
def should_use_acceleration(self, should_use_acceleration):
"""
Sets the should_use_acceleration of this QueryDetails.
Controls if query should ignore pre-calculated results if available and only use raw data. If set and no acceleration data is found it will fallback to raw data.
:param should_use_acceleration: The should_use_acceleration of this QueryDetails.
:type: bool
"""
self._should_use_acceleration = should_use_acceleration
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 35.600746 | 245 | 0.672466 | 2,359 | 19,082 | 5.177618 | 0.110216 | 0.037334 | 0.110529 | 0.050434 | 0.74431 | 0.628623 | 0.540282 | 0.419109 | 0.347225 | 0.312756 | 0 | 0.00257 | 0.265853 | 19,082 | 535 | 246 | 35.66729 | 0.869298 | 0.539094 | 0 | 0.113772 | 0 | 0 | 0.140941 | 0.053231 | 0 | 0 | 0 | 0 | 0 | 1 | 0.203593 | false | 0 | 0.011976 | 0.011976 | 0.353293 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3df0f23a4341291aa332900c1b4adf982ac1f716 | 2,740 | py | Python | moist.py | phiriv/moisture_sensor | 1e6a5d967ab639c67bae03847bd58ede31bde564 | [
"MIT"
] | null | null | null | moist.py | phiriv/moisture_sensor | 1e6a5d967ab639c67bae03847bd58ede31bde564 | [
"MIT"
] | null | null | null | moist.py | phiriv/moisture_sensor | 1e6a5d967ab639c67bae03847bd58ede31bde564 | [
"MIT"
] | null | null | null | Script to read temperature data from the DHT11:
# Importeer Adafruit DHT bibliotheek.
import Adafruit_DHT
import time
als = True
while als:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 4) #on gpio pin 4 or pin 7
if humidity is not None and temperature is not None:
humidity = round(humidity, 2)
temperature = round(temperature, 2)
print 'Temperature = {0:0.1f}*C Humidity = {1:0.1f}%'.format(temperature, humidity)
else:
print 'can not connect to the sensor!'
time.sleep(60) # read data every minute
Update from the Script above with modification of writing the data to a CSV.file:
# Importeer Adafruit DHT bibliotheek.
#time.strftime("%I:%M:%S")
import Adafruit_DHT
import time
import csv
import sys
csvfile = "temp.csv"
als = True
while als:
humidity, temperature = Adafruit_DHT.read_retry(Adafruit_DHT.DHT11, 4) # gpio pin 4 or pin number 7
if humidity is not None and temperature is not None:
humidity = round(humidity, 2)
temperature = round(temperature, 2)
print 'Temperature = {0:0.1f}*C Humidity = {1:0.1f}%'.format(temperature, humidity)
else:
print 'can not connect to the sensor!'
timeC = time.strftime("%I")+':' +time.strftime("%M")+':'+time.strftime("%S")
data = [temperature, timeC]
with open(csvfile, "a")as output:
writer = csv.writer(output, delimiter=",", lineterminator = '\n')
writer.writerow(data)
time.sleep(6) # update script every 60 seconds
Script to read data from the CSV and display it in a graph:
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
import matplotlib.animation as animation
from datetime import datetime
fig = plt.figure()
rect = fig.patch
rect.set_facecolor('#0079E7')
def animate(i):
ftemp = 'temp.csv'
fh = open(ftemp)
temp = list()
timeC = list()
for line in fh:
pieces = line.split(',')
degree = pieces[0]
timeB= pieces[1]
timeA= timeB[:8]
#print timeA
time_string = datetime.strptime(timeA,'%H:%M:%S')
#print time_string
try:
temp.append(float(degree))
timeC.append(time_string)
except:
print "dont know"
ax1 = fig.add_subplot(1,1,1,axisbg='white')
ax1.xaxis.set_major_formatter(mdates.DateFormatter('%H:%M:%S'))
ax1.clear()
ax1.plot(timeC,temp, 'c', linewidth = 3.3)
plt.title('Temperature')
plt.xlabel('Time')
ani = animation.FuncAnimation(fig, animate, interval = 6000)
plt.show()
*/
void setup() {
}
void loop() {
}
| 30.10989 | 104 | 0.622628 | 362 | 2,740 | 4.671271 | 0.378453 | 0.05204 | 0.021289 | 0.036665 | 0.348906 | 0.301597 | 0.301597 | 0.301597 | 0.301597 | 0.301597 | 0 | 0.026772 | 0.263869 | 2,740 | 90 | 105 | 30.444444 | 0.811601 | 0.082847 | 0 | 0.309859 | 0 | 0 | 0.096975 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.140845 | null | null | 0.070423 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
3df8c0e29455e554abfe1f3cc62c34726c6ded0b | 1,264 | py | Python | Python/PythonOOP/animals.py | JosephAMumford/CodingDojo | 505be74d18d7a8f41c4b3576ca050b97f840f0a3 | [
"MIT"
] | 2 | 2018-08-18T15:14:45.000Z | 2019-10-16T16:14:13.000Z | Python/PythonOOP/animals.py | JosephAMumford/CodingDojo | 505be74d18d7a8f41c4b3576ca050b97f840f0a3 | [
"MIT"
] | null | null | null | Python/PythonOOP/animals.py | JosephAMumford/CodingDojo | 505be74d18d7a8f41c4b3576ca050b97f840f0a3 | [
"MIT"
] | 6 | 2018-05-05T18:13:05.000Z | 2021-05-20T11:32:48.000Z | class Animal(object):
def __init__(self,name,health):
self.name = name
self.health = 50
def walk(self):
self.health = self.health - 1
return self
def run(self):
self.health = self.health - 5
return self
def display_health(self):
print "Health: " + str(self.health)
return self
# Create instance of Animal
animal1 = Animal("Edgar",30)
animal1.walk().walk().walk().run().run().display_health()
class Dog(Animal):
def pet(self):
self.health = self.health + 5
return self
# Create instance of Dog
dog1 = Dog("Raspberry",150)
dog1.walk().walk().walk().run().run().pet().display_health()
class Dragon(Animal):
def fly(self):
self.health = self.health - 10
return self
def display_health(self):
print "I am a Dragon"
return self
# Create instance of Dragon
dragon1 = Dragon("Phantoon", 500)
dragon1.walk().run().fly().fly().fly().display_health()
# Create new Animal
animal2 = Animal("Probos",200)
#animal2.pet()
#AttributeError: 'Animal' object has no attribute 'pet'
#animal2.fly()
#AttributeError: 'Animal' object has no attribute 'fly'
animal2.display_health()
#Health: 50 - does not say "I am a Dragon"
| 22.175439 | 60 | 0.630538 | 168 | 1,264 | 4.684524 | 0.285714 | 0.127065 | 0.071156 | 0.091487 | 0.459975 | 0.266836 | 0.165184 | 0.088945 | 0 | 0 | 0 | 0.030896 | 0.231804 | 1,264 | 56 | 61 | 22.571429 | 0.779609 | 0.211234 | 0 | 0.25 | 0 | 0 | 0.049645 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
ad000563b867048b766de0b54cb60801221e67a0 | 598 | py | Python | fileparse/python/main.py | mlavergn/benchmarks | 4663009772c71d7c94bcd13eec542d1ce33cef72 | [
"Unlicense"
] | null | null | null | fileparse/python/main.py | mlavergn/benchmarks | 4663009772c71d7c94bcd13eec542d1ce33cef72 | [
"Unlicense"
] | null | null | null | fileparse/python/main.py | mlavergn/benchmarks | 4663009772c71d7c94bcd13eec542d1ce33cef72 | [
"Unlicense"
] | null | null | null | #!/usr/bin/python
import timeit
setup = '''
import os
def FileTest(path):
file = open(path, "r")
lines = file.readlines()
data = [None for i in range(len(lines))]
i = 0
for line in lines:
data[i] = line.split(',')
j = 0
for field in data[i]:
data[i][j] = field.strip('\\'\\n')
j += 1
i += 1
return data
'''
elapsed = timeit.timeit("FileTest(os.getcwd() + '/../employees.txt')", setup=setup, number=1)
print(elapsed * 1000.0, "ms - cold")
elapsed = timeit.timeit("FileTest(os.getcwd() + '/../employees.txt')", setup=setup, number=1)
print(elapsed * 1000.0, "ms - warm")
| 20.62069 | 93 | 0.605351 | 91 | 598 | 3.978022 | 0.450549 | 0.041436 | 0.104972 | 0.149171 | 0.458564 | 0.458564 | 0.458564 | 0.458564 | 0.458564 | 0.458564 | 0 | 0.032653 | 0.180602 | 598 | 28 | 94 | 21.357143 | 0.706122 | 0.026756 | 0 | 0.095238 | 0 | 0 | 0.671256 | 0.036145 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.095238 | 0 | 0.142857 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9a7853c5ab201c882d582391f394325cd2ad7796 | 1,247 | py | Python | src/test/nspawn_test/support/header_test.py | Andrei-Pozolotin/nspawn | 9dd3926f1d1a3a0648f6ec14199cbf4069af1c98 | [
"Apache-2.0"
] | 15 | 2019-10-10T17:35:48.000Z | 2022-01-29T10:41:01.000Z | src/test/nspawn_test/support/header_test.py | Andrei-Pozolotin/nspawn | 9dd3926f1d1a3a0648f6ec14199cbf4069af1c98 | [
"Apache-2.0"
] | null | null | null | src/test/nspawn_test/support/header_test.py | Andrei-Pozolotin/nspawn | 9dd3926f1d1a3a0648f6ec14199cbf4069af1c98 | [
"Apache-2.0"
] | 2 | 2019-10-10T17:36:43.000Z | 2020-06-20T15:28:33.000Z |
from nspawn.support.header import *
def test_header():
print()
head_dict = {
'etag':'some-hash',
'last-modified':'some-time',
'content-length':'some-size',
'nspawn-digest':'some-text',
}
assert head_dict[Header.etag] == 'some-hash'
assert head_dict[Header.last_modified] == 'some-time'
assert head_dict[Header.content_length] == 'some-size'
assert head_dict[Header.nspawn_digest] == 'some-text'
def test_compare_head():
print()
assert compare_header({
}, {
}) == HeadComp.undetermined
assert compare_header({
'etag':'123'
}, {
'etag':'"123"'
}) == HeadComp.same
assert compare_header({
'last-modified':'some-time',
'content-length':'some-size',
}, {
'last-modified':'some-time',
'content-length':'some-size',
}) == HeadComp.same
assert compare_header({
'last-modified':'some-time',
'content-length':'some-size-1',
}, {
'last-modified':'some-time',
'content-length':'some-size-2',
}) == HeadComp.different
assert compare_header({
'last-modified':'some-time',
}, {
'content-length':'some-size',
}) == HeadComp.undetermined
| 25.44898 | 58 | 0.57498 | 133 | 1,247 | 5.270677 | 0.225564 | 0.119829 | 0.159772 | 0.199715 | 0.514979 | 0.477889 | 0.477889 | 0.477889 | 0.360913 | 0.291013 | 0 | 0.008511 | 0.246191 | 1,247 | 48 | 59 | 25.979167 | 0.737234 | 0 | 0 | 0.619048 | 0 | 0 | 0.28996 | 0 | 0 | 0 | 0 | 0 | 0.214286 | 1 | 0.047619 | false | 0 | 0.02381 | 0 | 0.071429 | 0.047619 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9a79fb2f2787441274d55999dc0843161af999b5 | 401 | py | Python | dmoj/Uncategorized/tss17a.py | UserBlackBox/competitive-programming | 2aa8ffa6df6a386f8e47d084b5fa32d6d741bbbc | [
"Unlicense"
] | null | null | null | dmoj/Uncategorized/tss17a.py | UserBlackBox/competitive-programming | 2aa8ffa6df6a386f8e47d084b5fa32d6d741bbbc | [
"Unlicense"
] | null | null | null | dmoj/Uncategorized/tss17a.py | UserBlackBox/competitive-programming | 2aa8ffa6df6a386f8e47d084b5fa32d6d741bbbc | [
"Unlicense"
] | null | null | null | # https://dmoj.ca/problem/tss17a
# https://dmoj.ca/submission/2226280
import sys
n = int(sys.stdin.readline()[:-1])
for i in range(n):
instruction = sys.stdin.readline()[:-1].split()
printed = False
for j in range(3):
if instruction.count(instruction[j]) >= 2:
print(instruction[j])
printed = True
break
if not printed:
print('???') | 26.733333 | 51 | 0.578554 | 52 | 401 | 4.461538 | 0.596154 | 0.077586 | 0.094828 | 0.146552 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043771 | 0.259352 | 401 | 15 | 52 | 26.733333 | 0.737374 | 0.162095 | 0 | 0 | 0 | 0 | 0.008982 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.083333 | 0 | 0.083333 | 0.416667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
9a7ad9eea9244d2609a2517f92f7fc289fb240da | 1,159 | py | Python | todo/views/users_detail.py | josalhor/WebModels | 6b9cde3141c53562f40b129e6e1c87448ce9853a | [
"BSD-3-Clause"
] | null | null | null | todo/views/users_detail.py | josalhor/WebModels | 6b9cde3141c53562f40b129e6e1c87448ce9853a | [
"BSD-3-Clause"
] | 41 | 2021-03-23T12:58:25.000Z | 2021-05-25T11:38:42.000Z | todo/views/users_detail.py | josalhor/WebModels | 6b9cde3141c53562f40b129e6e1c87448ce9853a | [
"BSD-3-Clause"
] | null | null | null | from todo.templatetags.todo_tags import is_management
from django.contrib.auth.decorators import login_required, user_passes_test
from django.http import HttpResponse
from django.shortcuts import render
from todo.models import Designer, Management, Writer, Editor
@login_required
@user_passes_test(is_management)
def users_detail(request, list_slug=None) -> HttpResponse:
# Which users to show on this list view?
if list_slug == "editors":
users = Editor.objects.all()
elif list_slug == "designers":
users = Designer.objects.all()
elif list_slug == "writers":
users = Writer.objects.all()
elif list_slug == "management":
users = Management.objects.all()
# Additional filtering
active_users = users.filter(user__is_active=True)
unactive_users = users.filter(user__is_active=False)
# ######################
# Add New User Form
# ######################
context = {
"list_slug": list_slug,
"active_users": active_users,
"unactive_users": unactive_users,
"users": users,
}
return render(request, "todo/users_detail.html", context)
| 30.5 | 75 | 0.667817 | 137 | 1,159 | 5.430657 | 0.416058 | 0.075269 | 0.056452 | 0.072581 | 0.236559 | 0.075269 | 0 | 0 | 0 | 0 | 0 | 0 | 0.202761 | 1,159 | 37 | 76 | 31.324324 | 0.805195 | 0.068162 | 0 | 0 | 0 | 0 | 0.092323 | 0.02138 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0.08 | 0.2 | 0 | 0.28 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9a7dca2e7b004aae5d55d6951056ac9880930921 | 3,100 | py | Python | tests/test_relations.py | OneRaynyDay/treeno | ce11b8447f471c0b5ea596a211b3855625ec43eb | [
"MIT"
] | 1 | 2021-12-28T19:00:01.000Z | 2021-12-28T19:00:01.000Z | tests/test_relations.py | OneRaynyDay/treeno | ce11b8447f471c0b5ea596a211b3855625ec43eb | [
"MIT"
] | null | null | null | tests/test_relations.py | OneRaynyDay/treeno | ce11b8447f471c0b5ea596a211b3855625ec43eb | [
"MIT"
] | null | null | null | import unittest
from treeno.base import PrintMode, PrintOptions
from treeno.expression import Array, Field, wrap_literal
from treeno.orderby import OrderTerm, OrderType
from treeno.relation import (
AliasedRelation,
Lateral,
SampleType,
Table,
TableQuery,
TableSample,
Unnest,
ValuesQuery,
)
class TestRelations(unittest.TestCase):
def test_table(self):
t = Table(name="table", schema="schema", catalog="catalog")
assert t.sql(PrintOptions()) == '"catalog"."schema"."table"'
tq = TableQuery(t)
assert (
tq.sql(PrintOptions(mode=PrintMode.DEFAULT))
== tq.sql(PrintOptions(mode=PrintMode.PRETTY))
== 'TABLE "catalog"."schema"."table"'
)
# Test a richer query type
tq = TableQuery(
t,
offset=2,
limit=5,
orderby=[OrderTerm(value=Field("x"), order_type=OrderType.DESC)],
)
assert (
tq.sql(PrintOptions(mode=PrintMode.DEFAULT))
== 'TABLE "catalog"."schema"."table" ORDER BY "x" DESC OFFSET 2 LIMIT 5'
)
assert tq.sql(PrintOptions(mode=PrintMode.PRETTY)) == (
' TABLE "catalog"."schema"."table"\n'
' ORDER BY "x" DESC\n'
"OFFSET 2\n"
" LIMIT 5"
)
def test_values(self):
v = ValuesQuery([wrap_literal(1), wrap_literal(2), wrap_literal(3)])
assert (
v.sql(PrintOptions(mode=PrintMode.DEFAULT))
== v.sql(PrintOptions(mode=PrintMode.PRETTY))
== "VALUES 1,2,3"
)
v = ValuesQuery(
[wrap_literal(1), wrap_literal(2), wrap_literal(3)],
offset=3,
with_=[AliasedRelation(TableQuery(Table(name="foo")), "foo")],
)
assert (
v.sql(PrintOptions(mode=PrintMode.DEFAULT))
== 'WITH "foo" AS (TABLE "foo") VALUES 1,2,3 OFFSET 3'
)
assert v.sql(PrintOptions(mode=PrintMode.PRETTY)) == (
' WITH "foo" AS (\n TABLE "foo")\n'
"VALUES 1,2,3\n"
"OFFSET 3"
)
def test_tablesample(self):
table_sample = TableSample(
Table(name="table"), SampleType.BERNOULLI, wrap_literal(0.3)
)
assert (
table_sample.sql(PrintOptions(mode=PrintMode.DEFAULT))
== table_sample.sql(PrintOptions(mode=PrintMode.PRETTY))
== '"table" TABLESAMPLE BERNOULLI(0.3)'
)
def test_lateral(self):
lateral = Lateral(TableQuery(Table(name="table")))
assert (
lateral.sql(PrintOptions(mode=PrintMode.DEFAULT))
== lateral.sql(PrintOptions(mode=PrintMode.PRETTY))
== 'LATERAL(TABLE "table")'
)
def test_unnest(self):
unnest = Unnest([Array([wrap_literal(1)])])
assert (
unnest.sql(PrintOptions(mode=PrintMode.DEFAULT))
== unnest.sql(PrintOptions(mode=PrintMode.PRETTY))
== "UNNEST(ARRAY[1])"
)
if __name__ == "__main__":
unittest.main()
| 31 | 84 | 0.560968 | 323 | 3,100 | 5.30031 | 0.204334 | 0.131425 | 0.155374 | 0.228972 | 0.42757 | 0.331776 | 0.245327 | 0.125 | 0.125 | 0.125 | 0 | 0.013902 | 0.303871 | 3,100 | 99 | 85 | 31.313131 | 0.779425 | 0.007742 | 0 | 0.127907 | 0 | 0 | 0.141835 | 0.034483 | 0 | 0 | 0 | 0 | 0.116279 | 1 | 0.05814 | false | 0 | 0.05814 | 0 | 0.127907 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9a87b0a003cfac44c4b71f5b09ccd17d4a3eced1 | 8,683 | py | Python | python/accel_adxl345/accel_adxl345.py | iorodeo/accel_adxl345 | aadbca1c57840f66a61556ff02e72e8b8e4e93e0 | [
"Apache-2.0"
] | null | null | null | python/accel_adxl345/accel_adxl345.py | iorodeo/accel_adxl345 | aadbca1c57840f66a61556ff02e72e8b8e4e93e0 | [
"Apache-2.0"
] | null | null | null | python/accel_adxl345/accel_adxl345.py | iorodeo/accel_adxl345 | aadbca1c57840f66a61556ff02e72e8b8e4e93e0 | [
"Apache-2.0"
] | null | null | null | """
accel_adxl345.py
This modules defines the AccelADXL345 class for streaming data from the
ADXL345 accelerometers.
"""
import time
import serial
import sys
import numpy
import struct
BUF_EMPTY_NUM = 5
BUF_EMPTY_DT = 0.05
class AccelADXL345(serial.Serial):
def __init__(self, **kwarg):
# Command ids
self.cmd_id = {
'stop_streaming' : 0,
'start_streaming' : 1,
'set_timer_period' : 2,
'get_timer_period' : 3,
'set_range' : 4,
'get_range' : 5,
'get_sample' : 6,
'get_max_timer_period' : 7,
'get_min_timer_period' : 8,
'get_bad_sample_count' : 9,
}
# Allowed accelerations ranges and scale factors
self.allowedAccelRange = (2, 4, 8, 16)
self.accelScale = 0.0384431560448
try:
self.reset_sleep = kwarg.pop('reset_sleep')
except KeyError:
self.reset_sleep = True
try:
self.accelRange = kwarg.pop('range')
except KeyError:
self.accelRange = 16
if not self.checkAccelRange(self.accelRange):
raise ValueError, 'unknown acceleration range {0}'.format(self.accelRange)
_kwarg = {
'port' : '/dev/ttyUSB0',
'timeout' : 0.1,
'baudrate' : 38400,
}
_kwarg.update(kwarg)
super(AccelADXL345,self).__init__(**_kwarg)
if self.reset_sleep:
time.sleep(2.0)
self.emptyBuffer()
# Get sample dt and current range setting
self.sampleDt = self.getSampleDt()
self.accelRange = self.getRange()
# Get max and min allowed sample dt
self.minSampleDt = self.getMinSampleDt()
self.maxSampleDt = self.getMaxSampleDt()
def sendCmd(self,cmd):
"""
Send the command, cmd, to the device
"""
self.write(cmd)
def readValue(self):
"""
Read a value from the device.
"""
line = self.readline()
line = line.strip()
return line
def readFloat(self):
"""
Read a single float of list of floats separated by commas
"""
value = self.readValue()
if ' ' in value:
value = value.split(' ')
value = [float(x) for x in value]
else:
value = float(value)
return value
def readInt(self):
"""
Read a single integer or list of integers separated by commas.
"""
value = self.readValue()
if ' ' in value:
value = value.split(' ')
value = [int(x) for x in value]
else:
value = int(value)
return value
def emptyBuffer(self):
"""
Empty the serial input buffer.
"""
for i in range(0,BUF_EMPTY_NUM):
#print 'empty %d'%(i,), self.inWaiting()
self.flushInput()
time.sleep(BUF_EMPTY_DT)
def checkAccelRange(self,value):
"""
Check if the value is within the allowed range set.
"""
return value in self.allowedAccelRange
def startStreaming(self):
"""
Start data streaming form the accelerometer
"""
cmd = '[{0}]\n'.format(self.cmd_id['start_streaming'])
self.sendCmd(cmd)
def stopStreaming(self):
"""
Stop data streaming from the accelerometer
"""
cmd = '[{0}]\n'.format(self.cmd_id['stop_streaming'])
self.sendCmd(cmd)
def getSampleDt(self):
"""
Returns the sample interval, dt, in microseconds
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_timer_period'])
self.sendCmd(cmd)
dt = self.readFloat()
return dt
def getBadSampleCount(self):
"""
Returns the number of bad/corrupted samples.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_bad_sample_count'])
self.sendCmd(cmd)
val = self.readInt()
return val
def setSampleDt(self,dt):
"""
Sets the sample interval in microseconds.
"""
_dt = int(dt)
if _dt > self.maxSampleDt or _dt < self.minSampleDt:
raise ValueError, 'sample dt out of range'
cmd = '[{0},{1}]\n'.format(self.cmd_id['set_timer_period'],_dt)
self.sendCmd(cmd)
self.sampleDt = _dt
def getSampleRate(self):
"""
Returns the sample rate in Hz
"""
return 1.0/self.sampleDt
def setSampleRate(self,freq):
"""
Sets the sample rate in Hz
"""
dt = int(1.0e6/freq)
self.setSampleDt(dt)
def getMaxSampleDt(self):
"""
Gets the maximun allowed sample dt in microseconds.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_max_timer_period'])
self.sendCmd(cmd)
value = self.readInt()
return value
def getMinSampleDt(self):
"""
Gets the minimum allowed sample dt in microseconds.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_min_timer_period'])
self.sendCmd(cmd)
value = self.readInt()
return value
def getMaxSampleRate(self):
"""
Returns the maximum allowed sample rate in Hz
"""
minSampleDtSec = self.minSampleDt*(1.0e-6)
return 1.0/minSampleDtSec
def getMinSampleRate(self):
"""
Returns the minum allowed samples rate in Hz
"""
maxSampleDtSec = self.maxSampleDt*(1.0e-6)
return 1.0/maxSampleDtSec
def getRange(self):
"""
Returns the current accelerometer range setting.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_range'])
self.sendCmd(cmd)
accelRange = self.readInt()
return accelRange
def setRange(self,value):
"""
Sets the current accelerometer range.
"""
_value = int(value)
if _value in self.allowedAccelRange:
cmd = '[{0}, {1}]\n'.format(self.cmd_id['set_range'],_value)
self.sendCmd(cmd)
_value = self.getRange()
self.accelRange = _value
def getAllowedAccelRange(self):
"""
Returns all allowed range settings
"""
return self.allowedAccelRange
def peekValue(self):
"""
Grabs a sinlge sample (ax,ay,az) from the accelerometer.
"""
cmd = '[{0}]\n'.format(self.cmd_id['get_sample'])
self.sendCmd(cmd)
samples = self.readFloat()
samples = [x*self.accelScale for x in samples]
return samples
def getSamples(self,N,verbose=False):
"""
Streams N samples from the accelerometer at the current sample rate
setting.
"""
# Start streaming
self.emptyBuffer()
self.startStreaming()
# Read samples
data = []
while len(data) < N:
if verbose:
print len(data)
newData = self.readValues()
data.extend(newData)
# Stop streaming and empty buffer
self.stopStreaming()
self.emptyBuffer()
# Convert to an array, truncate to number of samples requested
data = numpy.array(data)
data = self.accelScale*data[:N,:]
# Use sample rate to get array of time points
dtSec = self.sampleDt*1.0e-6
t = dtSec*numpy.arange(data.shape[0])
return t, data
#def readValues(self,verbose=False):
# data = []
# if self.inWaiting() > 0:
# line = self.readline()
# line = line.strip()
# line = line.split(':')
# for vals in line:
# vals = vals.split(' ')
# try:
# vals = [float(x) for x in vals]
# if len(vals) == 3:
# data.append(vals)
# except:
# if verbose:
# print 'fail'
# return data
def readValues(self):
data = []
while self.inWaiting() >= 7:
byteVals = self.read(7)
ax = struct.unpack('<h',byteVals[0:2])[0]
ay = struct.unpack('<h',byteVals[2:4])[0]
az = struct.unpack('<h',byteVals[4:6])[0]
chk = ord(byteVals[6])
if not chk == 0:
raise IOError, 'streaming data is not in sync.'
data.append([ax,ay,az])
return data
| 27.741214 | 86 | 0.524012 | 943 | 8,683 | 4.73913 | 0.225875 | 0.018796 | 0.022153 | 0.031327 | 0.200269 | 0.160662 | 0.142314 | 0.132916 | 0.121951 | 0.103155 | 0 | 0.019583 | 0.364851 | 8,683 | 312 | 87 | 27.830128 | 0.790752 | 0.090867 | 0 | 0.197605 | 0 | 0 | 0.082905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.02994 | null | null | 0.005988 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9a8866fd681b05cff1de0c32ef8dae40aefe5351 | 831 | py | Python | polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | polling_stations/apps/data_collection/management/commands/import_tower_hamlets.py | mtravis/UK-Polling-Stations | 26e0331dc29253dc436a0462ffaa01e974c5dc52 | [
"BSD-3-Clause"
] | null | null | null | from data_collection.management.commands import BaseXpressDemocracyClubCsvImporter
class Command(BaseXpressDemocracyClubCsvImporter):
council_id = "E09000030"
addresses_name = "local.2018-05-03/Version 2/Democracy_Club__03May2018.tsv"
stations_name = "local.2018-05-03/Version 2/Democracy_Club__03May2018.tsv"
elections = ["local.2018-05-03"]
csv_delimiter = "\t"
csv_encoding = "windows-1252"
def address_record_to_dict(self, record):
uprn = record.property_urn.strip().lstrip("0")
if uprn == "6198433":
rec = super().address_record_to_dict(record)
rec["postcode"] = "E2 9DG"
return rec
if record.addressline6 == "E3 2LB" or record.addressline6 == "E3 5EG":
return None
return super().address_record_to_dict(record)
| 34.625 | 82 | 0.683514 | 98 | 831 | 5.571429 | 0.591837 | 0.049451 | 0.06044 | 0.071429 | 0.29304 | 0.29304 | 0.18315 | 0.18315 | 0.18315 | 0.18315 | 0 | 0.100304 | 0.208183 | 831 | 23 | 83 | 36.130435 | 0.729483 | 0 | 0 | 0 | 0 | 0 | 0.222623 | 0.132371 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.117647 | 0 | 0.764706 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
9a8ce9049f7230937ae69e4978f32515e2f46236 | 654 | py | Python | saltlint/rules/CmdWaitRecommendRule.py | Poulpatine/salt-lint | 304917d95d2730e7df8bd7b5dd29a3bd77c80250 | [
"MIT"
] | null | null | null | saltlint/rules/CmdWaitRecommendRule.py | Poulpatine/salt-lint | 304917d95d2730e7df8bd7b5dd29a3bd77c80250 | [
"MIT"
] | null | null | null | saltlint/rules/CmdWaitRecommendRule.py | Poulpatine/salt-lint | 304917d95d2730e7df8bd7b5dd29a3bd77c80250 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2020 Warpnet B.V.
import re
from saltlint.linter.rule import DeprecationRule
from saltlint.utils import LANGUAGE_SLS
class CmdWaitRecommendRule(DeprecationRule):
id = '213'
shortdesc = 'SaltStack recommends using cmd.run together with onchanges, rather than cmd.wait'
description = 'SaltStack recommends using cmd.run together with onchanges, rather than cmd.wait'
severity = 'LOW'
languages = [LANGUAGE_SLS]
tags = ['formatting']
version_added = 'develop'
regex = re.compile(r"^\s{2}cmd\.wait:(\s+)?$")
def match(self, file, line):
return self.regex.search(line)
| 28.434783 | 100 | 0.697248 | 83 | 654 | 5.457831 | 0.674699 | 0.046358 | 0.10596 | 0.119205 | 0.300221 | 0.300221 | 0.300221 | 0.300221 | 0.300221 | 0.300221 | 0 | 0.016886 | 0.185015 | 654 | 22 | 101 | 29.727273 | 0.833021 | 0.08104 | 0 | 0 | 0 | 0 | 0.344482 | 0.038462 | 0 | 0 | 0 | 0 | 0 | 1 | 0.071429 | false | 0 | 0.214286 | 0.071429 | 1 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
9a969dcb4bdc1a8eee56b110c60c1611472a3520 | 1,834 | py | Python | bob-ross/cluster-paintings.py | h4ckfu/data | bdc02fd5051dfb31e42f8e078832ceead92f9958 | [
"CC-BY-4.0"
] | 16,124 | 2015-01-01T06:18:12.000Z | 2022-03-31T00:46:52.000Z | bob-ross/cluster-paintings.py | h4ckfu/data | bdc02fd5051dfb31e42f8e078832ceead92f9958 | [
"CC-BY-4.0"
] | 179 | 2015-01-07T10:19:57.000Z | 2022-02-21T21:19:14.000Z | bob-ross/cluster-paintings.py | h4ckfu/data | bdc02fd5051dfb31e42f8e078832ceead92f9958 | [
"CC-BY-4.0"
] | 12,163 | 2015-01-03T14:23:36.000Z | 2022-03-31T10:10:23.000Z | """
Clusters Bob Ross paintings by features.
By Walter Hickey <walter.hickey@fivethirtyeight.com>
See http://fivethirtyeight.com/features/a-statistical-analysis-of-the-work-of-bob-ross/
"""
import numpy as np
from scipy.cluster.vq import vq, kmeans, whiten
import math
import csv
def main():
# load data into vectors of 1s and 0s for each tag
with open('elements-by-episode.csv','r') as csvfile:
reader = csv.reader(csvfile)
reader.next() # skip header
data = []
for row in reader:
data.append(map(lambda x: int(x), row[2:])) # exclude EPISODE and TITLE columns
# convert to numpy matrix
matrix = np.array(data)
# remove colums that have been tagged less than 5 times
columns_to_remove = []
for col in range(np.shape(matrix)[1]):
if sum(matrix[:,col]) <= 5:
columns_to_remove.append(col)
matrix = np.delete(matrix, columns_to_remove, axis=1)
# normalize according to stddev
whitened = whiten(matrix)
output = kmeans(whitened, 10)
print "episode", "distance", "cluster"
# determine distance between each of 403 vectors and each centroid, find closest neighbor
for i, v in enumerate(whitened):
# distance between centroid 0 and feature vector
distance = math.sqrt(sum((v - output[0][0]) ** 2))
# group is the centroid it is closest to so far, set initally to centroid 0
group = 0
closest_match = (distance, group)
# test the vector i against the 10 centroids, find nearest neighbor
for x in range (0, 10):
dist_x = math.sqrt(sum((v - output[0][x]) ** 2))
if dist_x < closest_match[0]:
closest_match = (dist_x, x)
print i+1, closest_match[0], closest_match[1]
if __name__ == "__main__":
main() | 31.084746 | 93 | 0.640676 | 262 | 1,834 | 4.400763 | 0.461832 | 0.052038 | 0.039029 | 0.020815 | 0.076323 | 0.032958 | 0 | 0 | 0 | 0 | 0 | 0.021168 | 0.252999 | 1,834 | 59 | 94 | 31.084746 | 0.820438 | 0.260087 | 0 | 0 | 0 | 0 | 0.046713 | 0.019896 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.129032 | null | null | 0.064516 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9a9fc338c15aa55b529d0d570899ecd61a1b41cd | 514 | py | Python | Strings/count-index-find.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | 3 | 2022-03-28T09:10:08.000Z | 2022-03-29T10:47:56.000Z | Strings/count-index-find.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | 1 | 2022-03-27T11:52:58.000Z | 2022-03-27T11:52:58.000Z | Strings/count-index-find.py | tverma332/python3 | 544c4ec9c726c37293c8da5799f50575cc50852d | [
"MIT"
] | null | null | null | # 1) count = To count how many time a particular word & char. is appearing
x = "Keep grinding keep hustling"
print(x.count("t"))
# 2) index = To get index of letter(gives the lowest index)
x="Keep grinding keep hustling"
print(x.index("t")) # will give the lowest index value of (t)
# 3) find = To get index of letter(gives the lowest index) | Return -1 on failure.
x = "Keep grinding keep hustling"
print(x.find("t"))
'''
NOTE : print(x.index("t",34)) : Search starts from index value 34 including 34
'''
| 25.7 | 82 | 0.684825 | 88 | 514 | 4 | 0.454545 | 0.068182 | 0.110795 | 0.144886 | 0.474432 | 0.474432 | 0.474432 | 0.210227 | 0.210227 | 0 | 0 | 0.023866 | 0.184825 | 514 | 19 | 83 | 27.052632 | 0.816229 | 0.488327 | 0 | 0.5 | 0 | 0 | 0.488372 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.5 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 1 |
9aa3bdf68ace18fc9d168671cbe55ba44bdbac29 | 416 | py | Python | setup.py | xpac1985/pyASA | a6cf470a4d1b731864a1b450e321901636c1ebdf | [
"MIT"
] | 10 | 2017-02-05T12:15:19.000Z | 2020-05-20T14:33:04.000Z | setup.py | xpac1985/pyASA | a6cf470a4d1b731864a1b450e321901636c1ebdf | [
"MIT"
] | null | null | null | setup.py | xpac1985/pyASA | a6cf470a4d1b731864a1b450e321901636c1ebdf | [
"MIT"
] | 3 | 2017-04-02T13:00:28.000Z | 2020-06-13T23:34:37.000Z | from distutils.core import setup
setup(
name='pyASA',
packages=['pyASA'],
version='0.1.0',
description='Wrapper for the Cisco ASA REST API',
author='xpac',
author_email='bjoern@areafunky.net',
url='https://github.com/xpac1985/pyASA',
download_url='https://github.com/xpac1985/pyASA/tarball/0.1.0',
keywords=['cisco', 'asa', 'rest-api', 'wrapper', 'alpha'],
classifiers=[],
)
| 27.733333 | 67 | 0.646635 | 54 | 416 | 4.944444 | 0.648148 | 0.014981 | 0.022472 | 0.11236 | 0.224719 | 0.224719 | 0 | 0 | 0 | 0 | 0 | 0.040115 | 0.161058 | 416 | 14 | 68 | 29.714286 | 0.724928 | 0 | 0 | 0 | 0 | 0 | 0.435096 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | true | 0 | 0.076923 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9aa976fa66600077fd0293cccc1c6dcd3ade5f91 | 9,390 | py | Python | Statistical Thinking in Python (Part 1)/Thinking_probabilistically--_Discrete_variables.py | shreejitverma/Data-Scientist | 03c06936e957f93182bb18362b01383e5775ffb1 | [
"MIT"
] | 2 | 2022-03-12T04:53:03.000Z | 2022-03-27T12:39:21.000Z | Statistical Thinking in Python (Part 1)/Thinking_probabilistically--_Discrete_variables.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | null | null | null | Statistical Thinking in Python (Part 1)/Thinking_probabilistically--_Discrete_variables.py | shivaniverma1/Data-Scientist | f82939a411484311171465591455880c8e354750 | [
"MIT"
] | 2 | 2022-03-12T04:52:21.000Z | 2022-03-27T12:45:32.000Z | # Thinking probabilistically-- Discrete variables!!
# Statistical inference rests upon probability. Because we can very rarely say anything meaningful with absolute certainty from data, we use probabilistic language to make quantitative statements about data. In this chapter, you will learn how to think probabilistically about discrete quantities: those that can only take certain values, like integers.
# Generating random numbers using the np.random module
# We will be hammering the np.random module for the rest of this course and its sequel. Actually, you will probably call functions from this module more than any other while wearing your hacker statistician hat. Let's start by taking its simplest function, np.random.random() for a test spin. The function returns a random number between zero and one. Call np.random.random() a few times in the IPython shell. You should see numbers jumping around between zero and one.
# In this exercise, we'll generate lots of random numbers between zero and one, and then plot a histogram of the results. If the numbers are truly random, all bars in the histogram should be of (close to) equal height.
# You may have noticed that, in the video, Justin generated 4 random numbers by passing the keyword argument size=4 to np.random.random(). Such an approach is more efficient than a for loop: in this exercise, however, you will write a for loop to experience hacker statistics as the practice of repeating an experiment over and over again.
# Seed the random number generator
np.random.seed(42)
# Initialize random numbers: random_numbers
random_numbers = np.empty(100000)
# Generate random numbers by looping over range(100000)
for i in range(100000):
random_numbers[i] = np.random.random()
# Plot a histogram
_ = plt.hist(random_numbers)
# Show the plot
plt.show()
# The np.random module and Bernoulli trials
# You can think of a Bernoulli trial as a flip of a possibly biased coin. Specifically, each coin flip has a probability p of landing heads (success) and probability 1−p of landing tails (failure). In this exercise, you will write a function to perform n Bernoulli trials, perform_bernoulli_trials(n, p), which returns the number of successes out of n Bernoulli trials, each of which has probability p of success. To perform each Bernoulli trial, use the np.random.random() function, which returns a random number between zero and one.
def perform_bernoulli_trials(n, p):
"""Perform n Bernoulli trials with success probability p
and return number of successes."""
# Initialize number of successes: n_success
n_success = 0
# Perform trials
for i in range(n):
# Choose random number between zero and one: random_number
random_number = np.random.random()
# If less than p, it's a success so add one to n_success
if random_number< p:
n_success +=1
return n_success
# How many defaults might we expect?
# Let's say a bank made 100 mortgage loans. It is possible that anywhere between 0 and 100 of the loans will be defaulted upon. You would like to know the probability of getting a given number of defaults, given that the probability of a default is p = 0.05. To investigate this, you will do a simulation. You will perform 100 Bernoulli trials using the perform_bernoulli_trials() function you wrote in the previous exercise and record how many defaults we get. Here, a success is a default. (Remember that the word "success" just means that the Bernoulli trial evaluates to True, i.e., did the loan recipient default?) You will do this for another 100 Bernoulli trials. And again and again until we have tried it 1000 times. Then, you will plot a histogram describing the probability of the number of defaults.
# Seed random number generator
np.random.seed(42)
# Initialize the number of defaults: n_defaults
n_defaults = np.empty(1000)
# Compute the number of defaults
for i in range(1000):
n_defaults[i] = perform_bernoulli_trials(100,0.05)
# Plot the histogram with default number of bins; label your axes
_ = plt.hist(n_defaults, normed= True)
_ = plt.xlabel('number of defaults out of 100 loans')
_ = plt.ylabel('probability')
# Show the plot
plt.show()
# Will the bank fail?
# Plot the number of defaults you got from the previous exercise, in your namespace as n_defaults, as a CDF. The ecdf() function you wrote in the first chapter is available.
# If interest rates are such that the bank will lose money if 10 or more of its loans are defaulted upon, what is the probability that the bank will lose money?
# Compute ECDF: x, y
x, y= ecdf(n_defaults)
# Plot the ECDF with labeled axes
plt.plot(x, y, marker = '.', linestyle ='none')
plt.xlabel('loans')
plt.ylabel('interest')
# Show the plot
plt.show()
# Compute the number of 100-loan simulations with 10 or more defaults: n_lose_money
n_lose_money=sum(n_defaults >=10)
# Compute and print probability of losing money
print('Probability of losing money =', n_lose_money / len(n_defaults))
# Sampling out of the Binomial distribution
# Compute the probability mass function for the number of defaults we would expect for 100 loans as in the last section, but instead of simulating all of the Bernoulli trials, perform the sampling using np.random.binomial(). This is identical to the calculation you did in the last set of exercises using your custom-written perform_bernoulli_trials() function, but far more computationally efficient. Given this extra efficiency, we will take 10,000 samples instead of 1000. After taking the samples, plot the CDF as last time. This CDF that you are plotting is that of the Binomial distribution.
# Note: For this exercise and all going forward, the random number generator is pre-seeded for you (with np.random.seed(42)) to save you typing that each time.
# Take 10,000 samples out of the binomial distribution: n_defaults
n_defaults = np.random.binomial(100,0.05,size = 10000)
# Compute CDF: x, y
x, y = ecdf(n_defaults)
# Plot the CDF with axis labels
plt.plot(x,y, marker ='.', linestyle = 'none')
plt.xlabel("Number of Defaults")
plt.ylabel("CDF")
# Show the plot
plt.show()
# Plotting the Binomial PMF
# As mentioned in the video, plotting a nice looking PMF requires a bit of matplotlib trickery that we will not go into here. Instead, we will plot the PMF of the Binomial distribution as a histogram with skills you have already learned. The trick is setting up the edges of the bins to pass to plt.hist() via the bins keyword argument. We want the bins centered on the integers. So, the edges of the bins should be -0.5, 0.5, 1.5, 2.5, ... up to max(n_defaults) + 1.5. You can generate an array like this using np.arange() and then subtracting 0.5 from the array.
# You have already sampled out of the Binomial distribution during your exercises on loan defaults, and the resulting samples are in the NumPy array n_defaults.
# Compute bin edges: bins
bins = np.arange(0, max(n_defaults) + 1.5) - 0.5
# Generate histogram
plt.hist(n_defaults, normed = True, bins = bins)
# Label axes
plt.xlabel('Defaults')
plt.ylabel('PMF')
# Show the plot
plt.show()
# Relationship between Binomial and Poisson distributions
# You just heard that the Poisson distribution is a limit of the Binomial distribution for rare events. This makes sense if you think about the stories. Say we do a Bernoulli trial every minute for an hour, each with a success probability of 0.1. We would do 60 trials, and the number of successes is Binomially distributed, and we would expect to get about 6 successes. This is just like the Poisson story we discussed in the video, where we get on average 6 hits on a website per hour. So, the Poisson distribution with arrival rate equal to np approximates a Binomial distribution for n Bernoulli trials with probability p of success (with n large and p small). Importantly, the Poisson distribution is often simpler to work with because it has only one parameter instead of two for the Binomial distribution.
# Let's explore these two distributions computationally. You will compute the mean and standard deviation of samples from a Poisson distribution with an arrival rate of 10. Then, you will compute the mean and standard deviation of samples from a Binomial distribution with parameters n and p such that np=10.
# Draw 10,000 samples out of Poisson distribution: samples_poisson
# Print the mean and standard deviation
print('Poisson: ', np.mean(samples_poisson),
np.std(samples_poisson))
# Specify values of n and p to consider for Binomial: n, p
# Draw 10,000 samples for each n,p pair: samples_binomial
for i in range(3):
samples_binomial = ____
# Print results
print('n =', n[i], 'Binom:', np.mean(samples_binomial),
np.std(samples_binomial))
# Was 2015 anomalous?
# 1990 and 2015 featured the most no-hitters of any season of baseball (there were seven). Given that there are on average 251/115 no-hitters per season, what is the probability of having seven or more in a season?
# Draw 10,000 samples out of Poisson distribution: n_nohitters
# Compute number of samples that are seven or greater: n_large
n_large = np.sum(____)
# Compute probability of getting seven or more: p_large
# Print the result
print('Probability of seven or more no-hitters:', p_large)
| 47.908163 | 812 | 0.760809 | 1,558 | 9,390 | 4.543004 | 0.252888 | 0.016954 | 0.012433 | 0.021192 | 0.156683 | 0.085476 | 0.067251 | 0.067251 | 0.032778 | 0.015541 | 0 | 0.021335 | 0.181363 | 9,390 | 195 | 813 | 48.153846 | 0.899311 | 0.810969 | 0 | 0.234043 | 0 | 0 | 0.112676 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.021277 | false | 0 | 0 | 0 | 0.042553 | 0.085106 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9aacaa2c9c98de085aff50585e25fcd2964d6c96 | 1,008 | py | Python | ml/data_engineering/ETL/extract.py | alexnakagawa/tools | b5e8c047293247c8781d44607968402f637e597e | [
"MIT"
] | null | null | null | ml/data_engineering/ETL/extract.py | alexnakagawa/tools | b5e8c047293247c8781d44607968402f637e597e | [
"MIT"
] | null | null | null | ml/data_engineering/ETL/extract.py | alexnakagawa/tools | b5e8c047293247c8781d44607968402f637e597e | [
"MIT"
] | null | null | null | '''
This is an abstract example of Extracting in an ETL pipeline.
Inspired from the "Introduction to Data Engineering" course on Datacamp.com
Author: Alex Nakagawa
'''
import requests
# Fetch the Hackernews post
resp = requests.get("https://hacker-news.firebaseio.com/v0/item/16222426.json")
# Print the response parsed as JSON
print(resp.json())
# Assign the score of the test to post_score
post_score = resp.json()['score']
print(post_score)
# Function to extract table to a pandas DataFrame
def extract_table_to_pandas(tablename, db_engine):
query = "SELECT * FROM {}".format(tablename)
return pd.read_sql(query, db_engine)
# Connect to the database using the connection URI
connection_uri = "postgresql://repl:password@localhost:5432/pagila"
db_engine = sqlalchemy.create_engine(connection_uri)
# Extract the film table into a pandas DataFrame
extract_table_to_pandas("film", db_engine)
# Extract the customer table into a pandas DataFrame
extract_table_to_pandas("customer", db_engine)
| 30.545455 | 79 | 0.779762 | 150 | 1,008 | 5.1 | 0.526667 | 0.052288 | 0.073203 | 0.078431 | 0.117647 | 0.117647 | 0.117647 | 0.117647 | 0.117647 | 0 | 0 | 0.01484 | 0.130952 | 1,008 | 32 | 80 | 31.5 | 0.858447 | 0.456349 | 0 | 0 | 0 | 0 | 0.256554 | 0.089888 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0.083333 | 0.083333 | 0 | 0.25 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9ab1353597b9195d65b8c371888b502f56866647 | 3,368 | py | Python | physicspy/optics/jones.py | suyag/physicspy | f2b29a72cb08b1de170274b3e35c3d8eda32f9e1 | [
"MIT"
] | null | null | null | physicspy/optics/jones.py | suyag/physicspy | f2b29a72cb08b1de170274b3e35c3d8eda32f9e1 | [
"MIT"
] | null | null | null | physicspy/optics/jones.py | suyag/physicspy | f2b29a72cb08b1de170274b3e35c3d8eda32f9e1 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
from __future__ import division
from numpy import sqrt, cos, sin, arctan, exp, abs, pi, conj
from scipy import array, dot, sum
class JonesVector:
""" A Jones vector class to represent polarized EM waves """
def __init__(self,Jarray=array([1,0])):
self.Jx = Jarray[0]
self.Jy = Jarray[1]
def size(self):
""" Jones vector size """
return sqrt(dot(self.toArray().conj(),self.toArray()).real)
def normalize(self):
""" Normalized Jones vector """
result = self
try:
size = result.size()
if size == 0:
raise Exception('Zero-sized Jones vector cannot be normalized')
result.Jx /= size
result.Jy /= size
except Exception as inst:
print "Error: ",inst
finally:
return result
def toArray(self):
""" Convert into array format """
return array([self.Jx, self.Jy])
def rotate(self,phi):
""" Rotated Jones vector
Argument:
phi - rotation angle in radians (clockwise is positive)
"""
R = array([[cos(phi), sin(phi)], \
[-sin(phi), cos(phi)]])
return JonesVector(dot(R, self.toArray()))
def waveplate(self,G):
""" Waveplate with arbitrary retardance
Slow axis (or "c axis") is along X
Argument:
G - retartandance in phase units
(e.g. one wavelength retardance is G = 2 * pi)
"""
W0 = array([[exp(-1j*G/2), 0], \
[0, exp(1j*G/2)]])
return JonesVector(dot(W0, self.toArray()))
def waveplateRot(self,phi,G):
""" Waveplate matrix with arbitrary rotation
Arguments:
phi - rotation angle in radians
(clockwise is positive)
G - retardance in phase units
(e.g. one wavelength retardance is G = 2 * pi)
"""
return self.rotate(phi).waveplate(G).rotate(-phi)
def pol(self,phi):
""" Polarizer matrix """
P = array([[cos(phi)**2, cos(phi)*sin(phi)], \
[sin(phi)*cos(phi), sin(phi)**2]])
return JonesVector(dot(P, self.toArray()))
def mirrormetal(self,n,k,th):
""" Reflection off a metal mirror
Incoming and reflected beams are assumed to be in the X plane
"""
dr = mphase(n,k,th);
W0 = array([[dr[3]*exp(-1j*dr[1]), 0],\
[0, dr[2]*exp(-1j*dr[0])]])
return JonesVector(dot(W0, self.toArray()))
def intensity(self):
""" Intensity from electric field vector """
return real(self.Jx)**2 + real(self.Jy)**2
def mphase(n,k,th):
""" Calculate phase shift and reflectance of a metal in the s and p directions"""
u = sqrt(0.5 *((n**2 - k**2 - sin(th)**2) + sqrt( (n**2 - k**2 - sin(th)**2)**2 + 4*n**2*k**2 )))
v = sqrt(0.5*(-(n**2 - k**2 - sin(th)**2) + sqrt( (n**2 - k**2 - sin(th)**2)**2 + 4*n**2*k**2 )))
ds = arctan(2*v*cos(th)/(u**2+v**2-cos(th)**2));
dp = arctan(2*v*cos(th)*(n**2-k**2-2*u**2)/(u**2+v**2-(n**2+k**2)**2*cos(th)**2));
if(dp < 0):
dp = dp+pi;
rs = abs((cos(th) - (u+v*1j))/(cos(th) + (u+v*1j)))
rp = abs(((n**2 + k**2)*cos(th) - (u+v*1j))/((n**2 + k**2)*cos(th) + (u+v*1j)));
return array([ds, dp, rs, rp])
| 34.367347 | 101 | 0.518705 | 487 | 3,368 | 3.570842 | 0.281314 | 0.011501 | 0.017251 | 0.023002 | 0.256469 | 0.225417 | 0.225417 | 0.184014 | 0.105808 | 0.090857 | 0 | 0.032738 | 0.301663 | 3,368 | 97 | 102 | 34.721649 | 0.706633 | 0.005938 | 0 | 0.037037 | 0 | 0 | 0.021964 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.055556 | null | null | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ab5d8227882ea8202fdc93b49f22e935bbc0e93 | 2,560 | py | Python | aiida/cmdline/params/options/config.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | 1 | 2020-10-01T17:11:58.000Z | 2020-10-01T17:11:58.000Z | aiida/cmdline/params/options/config.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | 17 | 2020-03-11T17:04:05.000Z | 2020-05-01T09:34:45.000Z | aiida/cmdline/params/options/config.py | louisponet/aiida-core | 3214236df66a3792ee57fe38a06c0c3bb65861ab | [
"MIT",
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
###########################################################################
# Copyright (c), The AiiDA team. All rights reserved. #
# This file is part of the AiiDA code. #
# #
# The code is hosted on GitHub at https://github.com/aiidateam/aiida-core #
# For further information on the license, see the LICENSE.txt file #
# For further information please visit http://www.aiida.net #
###########################################################################
# pylint: disable=cyclic-import
"""
.. py:module::config
:synopsis: Convenience class for configuration file option
"""
import click_config_file
import yaml
from .overridable import OverridableOption
def yaml_config_file_provider(handle, cmd_name): # pylint: disable=unused-argument
"""Read yaml config file from file handle."""
return yaml.safe_load(handle)
class ConfigFileOption(OverridableOption):
"""
Wrapper around click_config_file.configuration_option that increases reusability.
Example::
CONFIG_FILE = ConfigFileOption('--config', help='A configuration file')
@click.command()
@click.option('computer_name')
@CONFIG_FILE(help='Configuration file for computer_setup')
def computer_setup(computer_name):
click.echo(f"Setting up computer {computername}")
computer_setup --config config.yml
with config.yml::
---
computer_name: computer1
"""
def __init__(self, *args, **kwargs):
"""
Store the default args and kwargs.
:param args: default arguments to be used for the option
:param kwargs: default keyword arguments to be used that can be overridden in the call
"""
kwargs.update({'provider': yaml_config_file_provider, 'implicit': False})
super().__init__(*args, **kwargs)
def __call__(self, **kwargs):
"""
Override the stored kwargs, (ignoring args as we do not allow option name changes) and return the option.
:param kwargs: keyword arguments that will override those set in the construction
:return: click_config_file.configuration_option constructed with args and kwargs defined during construction
and call of this instance
"""
kw_copy = self.kwargs.copy()
kw_copy.update(kwargs)
return click_config_file.configuration_option(*self.args, **kw_copy)
| 36.056338 | 116 | 0.605078 | 280 | 2,560 | 5.382143 | 0.442857 | 0.059721 | 0.039814 | 0.05574 | 0.075647 | 0.053086 | 0 | 0 | 0 | 0 | 0 | 0.001045 | 0.252734 | 2,560 | 70 | 117 | 36.571429 | 0.786722 | 0.639844 | 0 | 0 | 0 | 0 | 0.026711 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.230769 | false | 0 | 0.230769 | 0 | 0.692308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ab9d917b353cf0f8ea3e285cac62732af59e404 | 563 | py | Python | python_learning/exception_redefinition.py | KonstantinKlepikov/all-python-ml-learning | a8a41347b548828bb8531ccdab89c622a0be20e1 | [
"MIT"
] | null | null | null | python_learning/exception_redefinition.py | KonstantinKlepikov/all-python-ml-learning | a8a41347b548828bb8531ccdab89c622a0be20e1 | [
"MIT"
] | null | null | null | python_learning/exception_redefinition.py | KonstantinKlepikov/all-python-ml-learning | a8a41347b548828bb8531ccdab89c622a0be20e1 | [
"MIT"
] | 1 | 2020-12-23T19:32:51.000Z | 2020-12-23T19:32:51.000Z | # example of redefinition __repr__ and __str__ of exception
class MyBad(Exception):
def __str__(self):
return 'My mistake!'
class MyBad2(Exception):
def __repr__(self):
return 'Not calable' # because buid-in method has __str__
try:
raise MyBad('spam')
except MyBad as X:
print(X) # My mistake!
print(X.args) # ('spam',)
try:
raise MyBad2('spam')
except MyBad2 as X:
print(X) # spam
print(X.args) # ('spam',)
raise MyBad('spam') # __main__.MyBad2: My mistake!
# raise MyBad2('spam') # __main__.MyBad2: spam | 20.107143 | 65 | 0.648313 | 76 | 563 | 4.434211 | 0.407895 | 0.071217 | 0.083086 | 0.053412 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.013699 | 0.222025 | 563 | 28 | 66 | 20.107143 | 0.755708 | 0.358792 | 0 | 0.470588 | 0 | 0 | 0.096866 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.117647 | false | 0 | 0 | 0.117647 | 0.352941 | 0.235294 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
9abd5d0a8f6f8a824f776810d4a5b66aeca261fa | 650 | py | Python | lambda-sfn-terraform/src/LambdaFunction.py | extremenelson/serverless-patterns | c307599ab2759567c581c37d70561e85b0fa8788 | [
"MIT-0"
] | 1 | 2022-01-12T17:22:02.000Z | 2022-01-12T17:22:02.000Z | lambda-sfn-terraform/src/LambdaFunction.py | extremenelson/serverless-patterns | c307599ab2759567c581c37d70561e85b0fa8788 | [
"MIT-0"
] | null | null | null | lambda-sfn-terraform/src/LambdaFunction.py | extremenelson/serverless-patterns | c307599ab2759567c581c37d70561e85b0fa8788 | [
"MIT-0"
] | null | null | null | # Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
# SPDX-License-Identifier: MIT-0
import json
import boto3
import os
from aws_lambda_powertools import Logger
logger = Logger()
client = boto3.client('stepfunctions')
sfnArn = os.environ['SFN_ARN']
def lambda_handler(event, context):
# TODO implement
logger.info(f"Received Choice: {event['Choice']}")
response = client.start_execution(
stateMachineArn=sfnArn,
input=json.dumps(event)
)
logger.info(f"Received Response: {response}")
return {
'statusCode': 200,
'body': json.dumps(response,default=str)
}
| 23.214286 | 68 | 0.676923 | 77 | 650 | 5.649351 | 0.675325 | 0.055172 | 0.050575 | 0.087356 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01165 | 0.207692 | 650 | 27 | 69 | 24.074074 | 0.83301 | 0.172308 | 0 | 0 | 0 | 0 | 0.181648 | 0 | 0 | 0 | 0 | 0.037037 | 0 | 1 | 0.055556 | false | 0 | 0.222222 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ac242f669af4d52c4d497c2811debd7113e2d03 | 691 | py | Python | utils/pad.py | Zenodia/nativePytorch_NMT | bfced09eb6e5476d34619dfc0dd41d4ed610248f | [
"MIT"
] | 60 | 2018-09-28T07:53:11.000Z | 2020-11-06T11:59:07.000Z | utils/pad.py | Pravin74/transformer-pytorch | c31e163ed57321e405771ef7fb556d4d92fd5efb | [
"MIT"
] | 2 | 2021-02-15T14:08:08.000Z | 2021-09-12T12:52:37.000Z | utils/pad.py | Pravin74/transformer-pytorch | c31e163ed57321e405771ef7fb556d4d92fd5efb | [
"MIT"
] | 18 | 2018-09-28T07:56:35.000Z | 2020-11-24T00:11:33.000Z | import torch
import numpy as np
PAD_TOKEN_INDEX = 0
def pad_masking(x, target_len):
# x: (batch_size, seq_len)
batch_size, seq_len = x.size()
padded_positions = x == PAD_TOKEN_INDEX # (batch_size, seq_len)
pad_mask = padded_positions.unsqueeze(1).expand(batch_size, target_len, seq_len)
return pad_mask
def subsequent_masking(x):
# x: (batch_size, seq_len - 1)
batch_size, seq_len = x.size()
subsequent_mask = np.triu(np.ones(shape=(seq_len, seq_len)), k=1).astype('uint8')
subsequent_mask = torch.tensor(subsequent_mask).to(x.device)
subsequent_mask = subsequent_mask.unsqueeze(0).expand(batch_size, seq_len, seq_len)
return subsequent_mask | 32.904762 | 87 | 0.723589 | 109 | 691 | 4.266055 | 0.311927 | 0.129032 | 0.154839 | 0.193548 | 0.154839 | 0.086022 | 0 | 0 | 0 | 0 | 0 | 0.010345 | 0.160637 | 691 | 21 | 88 | 32.904762 | 0.791379 | 0.108538 | 0 | 0.142857 | 0 | 0 | 0.008157 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.142857 | 0 | 0.428571 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ac8a6eee2b79ed601b853802a3795b71f290223 | 5,558 | py | Python | xen/xen-4.2.2/tools/python/scripts/test_vm_create.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
] | 1 | 2018-02-02T00:15:26.000Z | 2018-02-02T00:15:26.000Z | xen/xen-4.2.2/tools/python/scripts/test_vm_create.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
] | null | null | null | xen/xen-4.2.2/tools/python/scripts/test_vm_create.py | zhiming-shen/Xen-Blanket-NG | 47e59d9bb92e8fdc60942df526790ddb983a5496 | [
"Apache-2.0"
] | 1 | 2019-05-27T09:47:18.000Z | 2019-05-27T09:47:18.000Z | #!/usr/bin/python
vm_cfg = {
'name_label': 'APIVM',
'user_version': 1,
'is_a_template': False,
'auto_power_on': False, # TODO
'memory_static_min': 64,
'memory_static_max': 128,
#'memory_dynamic_min': 64,
#'memory_dynamic_max': 128,
'VCPUs_policy': 'credit',
'VCPUs_params': '',
'VCPUs_number': 2,
'actions_after_shutdown': 'destroy',
'actions_after_reboot': 'restart',
'actions_after_crash': 'destroy',
'PV_bootloader': '',
'PV_bootloader_args': '',
'PV_kernel': '/boot/vmlinuz-2.6.18-xenU',
'PV_ramdisk': '',
'PV_args': 'root=/dev/sda1 ro',
#'HVM_boot': '',
'platform_std_VGA': False,
'platform_serial': '',
'platform_localtime': False,
'platform_clock_offset': False,
'platform_enable_audio': False,
'PCI_bus': ''
}
vdi_cfg = {
'name_label': 'API_VDI',
'name_description': '',
'virtual_size': 100 * 1024 * 1024 * 1024,
'type': 'system',
'parent': '',
'SR_name': 'QCoW',
'sharable': False,
'read_only': False,
}
vbd_cfg = {
'VDI': '',
'VM': '',
'device': 'sda2',
'mode': 'RW',
'type': 'disk',
'driver': 'paravirtualised',
}
local_vdi_cfg = {
'name_label': 'gentoo.amd64.img',
'name_description': '',
'virtual_size': 0,
'type': 'system',
'parent': '',
'SR_name': 'Local',
'sharable': False,
'read_only': False,
'other_config': {'location': 'file:/root/gentoo.amd64.img'},
}
local_vbd_cfg = {
'VDI': '',
'VM': '',
'device': 'sda1',
'mode': 'RW',
'type': 'disk',
'driver': 'paravirtualised',
}
vif_cfg = {
'name': 'API_VIF',
'type': 'paravirtualised',
'device': '',
'network': '',
'MAC': '',
'MTU': 1500,
}
console_cfg = {
'protocol': 'rfb',
'other_config': {'vncunused': 1, 'vncpasswd': 'testing'},
}
import sys
import time
from xapi import connect, execute
def test_vm_create():
server, session = connect()
vm_uuid = None
vdi_uuid = None
local_vdi_uuid = None
local_vbd_uuid = None
vbd_uuid = None
vif_uuid = None
# List all VMs
vm_list = execute(server, 'VM.get_all', (session,))
vm_names = []
for vm_uuid in vm_list:
vm_record = execute(server, 'VM.get_record', (session, vm_uuid))
vm_names.append(vm_record['name_label'])
# Get default SR
sr_list = execute(server, 'SR.get_by_name_label', (session,
vdi_cfg['SR_name']))
sr_uuid = sr_list[0]
local_sr_list = execute(server, 'SR.get_by_name_label',
(session, local_vdi_cfg['SR_name']))
local_sr_uuid = local_sr_list[0]
# Get default network
net_list = execute(server, 'network.get_all', (session,))
net_uuid = net_list[0]
try:
# Create a new VM
vm_uuid = execute(server, 'VM.create', (session, vm_cfg))
# Create a new VDI
vdi_cfg['SR'] = sr_uuid
vdi_uuid = execute(server, 'VDI.create', (session, vdi_cfg))
# Create a VDI backed VBD
vbd_cfg['VM'] = vm_uuid
vbd_cfg['VDI'] = vdi_uuid
vbd_uuid = execute(server, 'VBD.create', (session, vbd_cfg))
# Create a new VDI (Local)
local_vdi_cfg['SR'] = local_sr_uuid
local_vdi_uuid = execute(server, 'VDI.create',
(session, local_vdi_cfg))
# Create a new VBD (Local)
local_vbd_cfg['VM'] = vm_uuid
local_vbd_cfg['VDI'] = local_vdi_uuid
local_vbd_uuid = execute(server, 'VBD.create',
(session, local_vbd_cfg))
# Create a new VIF
vif_cfg['network'] = net_uuid
vif_cfg['VM'] = vm_uuid
vif_uuid = execute(server, 'VIF.create', (session, vif_cfg))
# Create a console
console_cfg['VM'] = vm_uuid
console_uuid = execute(server, 'console.create',
(session, console_cfg))
print console_uuid
# Start the VM
execute(server, 'VM.start', (session, vm_uuid, False))
time.sleep(30)
test_suspend = False
if test_suspend:
print 'Suspending VM..'
execute(server, 'VM.suspend', (session, vm_uuid))
print 'Suspended VM.'
time.sleep(5)
print 'Resuming VM ...'
execute(server, 'VM.resume', (session, vm_uuid, False))
print 'Resumed VM.'
finally:
# Wait for user to say we're good to shut it down
while True:
destroy = raw_input('destroy VM? ')
if destroy[0] in ('y', 'Y'):
break
# Clean up
if vif_uuid:
execute(server, 'VIF.destroy', (session, vif_uuid))
if local_vbd_uuid:
execute(server, 'VBD.destroy', (session, local_vbd_uuid))
if local_vdi_uuid:
execute(server, 'VDI.destroy', (session, local_vdi_uuid))
if vbd_uuid:
execute(server, 'VBD.destroy', (session, vbd_uuid))
if vdi_uuid:
execute(server, 'VDI.destroy', (session, vdi_uuid))
if vm_uuid:
try:
execute(server, 'VM.hard_shutdown', (session, vm_uuid))
time.sleep(2)
except:
pass
execute(server, 'VM.destroy', (session, vm_uuid))
if __name__ == "__main__":
test_vm_create()
| 26.216981 | 75 | 0.542821 | 649 | 5,558 | 4.366718 | 0.265023 | 0.100917 | 0.071983 | 0.018349 | 0.254411 | 0.164432 | 0.132675 | 0.02964 | 0.02964 | 0.02964 | 0 | 0.01362 | 0.313062 | 5,558 | 211 | 76 | 26.341232 | 0.728654 | 0.062253 | 0 | 0.142857 | 0 | 0 | 0.235657 | 0.022333 | 0 | 0 | 0 | 0.004739 | 0 | 0 | null | null | 0.012987 | 0.019481 | null | null | 0.032468 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ac99cea9babd92f880b3baa9bf72af575865d84 | 31,044 | py | Python | gomill/mcts_tuners.py | BenisonSam/goprime | 3613f643ee765b4ad48ebdc27bd9f1121b1c5298 | [
"MIT"
] | null | null | null | gomill/mcts_tuners.py | BenisonSam/goprime | 3613f643ee765b4ad48ebdc27bd9f1121b1c5298 | [
"MIT"
] | null | null | null | gomill/mcts_tuners.py | BenisonSam/goprime | 3613f643ee765b4ad48ebdc27bd9f1121b1c5298 | [
"MIT"
] | null | null | null | """Competitions for parameter tuning using Monte-carlo tree search."""
from __future__ import division
import operator
import random
from heapq import nlargest
from math import exp, log, sqrt
from gomill import compact_tracebacks
from gomill import game_jobs
from gomill import competitions
from gomill import competition_schedulers
from gomill.competitions import (
Competition, NoGameAvailable, CompetitionError, ControlFileError,
Player_config)
from gomill.settings import *
class Node(object):
"""A MCTS node.
Public attributes:
children -- list of Nodes, or None for unexpanded
wins
visits
value -- wins / visits
rsqrt_visits -- 1 / sqrt(visits)
"""
def count_tree_size(self):
if self.children is None:
return 1
return sum(child.count_tree_size() for child in self.children) + 1
def recalculate(self):
"""Update value and rsqrt_visits from changed wins and visits."""
self.value = self.wins / self.visits
self.rsqrt_visits = sqrt(1 / self.visits)
def __getstate__(self):
return (self.children, self.wins, self.visits)
def __setstate__(self, state):
self.children, self.wins, self.visits = state
self.recalculate()
__slots__ = (
'children',
'wins',
'visits',
'value',
'rsqrt_visits',
)
def __repr__(self):
return "<Node:%.2f{%s}>" % (self.value, repr(self.children))
class Tree(object):
"""A tree of MCTS nodes representing N-dimensional parameter space.
Parameters (available as read-only attributes):
splits -- subdivisions of each dimension
(list of integers, one per dimension)
max_depth -- number of generations below the root
initial_visits -- visit count for newly-created nodes
initial_wins -- win count for newly-created nodes
exploration_coefficient -- constant for UCT formula (float)
Public attributes:
root -- Node
dimensions -- number of dimensions in the parameter space
All changing state is in the tree of Node objects started at 'root'.
References to 'optimiser_parameters' below mean a sequence of length
'dimensions', whose values are floats in the range 0.0..1.0 representing
a point in this space.
Each node in the tree represents an N-cuboid of parameter space. Each
expanded node has prod(splits) children, tiling its cuboid.
(The splits are the same in each generation.)
Instantiate with:
all parameters listed above
parameter_formatter -- function optimiser_parameters -> string
"""
def __init__(self, splits, max_depth,
exploration_coefficient,
initial_visits, initial_wins,
parameter_formatter):
self.splits = splits
self.dimensions = len(splits)
self.branching_factor = reduce(operator.mul, splits)
self.max_depth = max_depth
self.exploration_coefficient = exploration_coefficient
self.initial_visits = initial_visits
self.initial_wins = initial_wins
self._initial_value = initial_wins / initial_visits
self._initial_rsqrt_visits = 1 / sqrt(initial_visits)
self.format_parameters = parameter_formatter
# map child index -> coordinate vector
# coordinate vector -- tuple length 'dimensions' with values in
# range(splits[d])
# The first dimension changes most slowly.
self._cube_coordinates = []
for child_index in xrange(self.branching_factor):
v = []
i = child_index
for split in reversed(splits):
i, coord = divmod(i, split)
v.append(coord)
v.reverse()
self._cube_coordinates.append(tuple(v))
def new_root(self):
"""Initialise the tree with an expanded root node."""
self.node_count = 1 # For description only
self.root = Node()
self.root.children = None
self.root.wins = self.initial_wins
self.root.visits = self.initial_visits
self.root.value = self.initial_wins / self.initial_visits
self.root.rsqrt_visits = self._initial_rsqrt_visits
self.expand(self.root)
def set_root(self, node):
"""Use the specified node as the tree's root.
This is used when restoring serialised state.
Raises ValueError if the node doesn't have the expected number of
children.
"""
if not node.children or len(node.children) != self.branching_factor:
raise ValueError
self.root = node
self.node_count = node.count_tree_size()
def expand(self, node):
"""Add children to the specified node."""
assert node.children is None
node.children = []
child_count = self.branching_factor
for _ in xrange(child_count):
child = Node()
child.children = None
child.wins = self.initial_wins
child.visits = self.initial_visits
child.value = self._initial_value
child.rsqrt_visits = self._initial_rsqrt_visits
node.children.append(child)
self.node_count += child_count
def is_ripe(self, node):
"""Say whether a node has been visted enough times to be expanded."""
return node.visits != self.initial_visits
def parameters_for_path(self, choice_path):
"""Retrieve the point in parameter space given by a node.
choice_path -- sequence of child indices
Returns optimiser_parameters representing the centre of the region
of parameter space represented by the node of interest.
choice_path must represent a path from the root to the node of interest.
"""
lo = [0.0] * self.dimensions
breadths = [1.0] * self.dimensions
for child_index in choice_path:
cube_pos = self._cube_coordinates[child_index]
breadths = [f / split for (f, split) in zip(breadths, self.splits)]
for d, coord in enumerate(cube_pos):
lo[d] += breadths[d] * coord
return [f + .5 * breadth for (f, breadth) in zip(lo, breadths)]
def retrieve_best_parameters(self):
"""Find the parameters with the most promising simulation results.
Returns optimiser_parameters
This walks the tree from the root, at each point choosing the node with
most wins, and returns the parameters corresponding to the leaf node.
"""
simulation = self.retrieve_best_parameter_simulation()
return simulation.get_parameters()
def retrieve_best_parameter_simulation(self):
"""Return the Greedy_simulation used for retrieve_best_parameters."""
simulation = Greedy_simulation(self)
simulation.walk()
return simulation
def get_test_parameters(self):
"""Return a 'typical' optimiser_parameters."""
return self.parameters_for_path([0])
def describe_choice(self, choice):
"""Return a string describing a child's coordinates in its parent."""
return str(self._cube_coordinates[choice]).replace(" ", "")
def describe(self):
"""Return a text description of the current state of the tree.
This currently dumps the full tree to depth 2.
"""
def describe_node(node, choice_path):
parameters = self.format_parameters(
self.parameters_for_path(choice_path))
choice_s = self.describe_choice(choice_path[-1])
return "%s %s %.3f %3d" % (
choice_s, parameters, node.value,
node.visits - self.initial_visits)
root = self.root
wins = root.wins - self.initial_wins
visits = root.visits - self.initial_visits
try:
win_rate = "%.3f" % (wins / visits)
except ZeroDivisionError:
win_rate = "--"
result = [
"%d nodes" % self.node_count,
"Win rate %d/%d = %s" % (wins, visits, win_rate)
]
for choice, node in enumerate(self.root.children):
result.append(" " + describe_node(node, [choice]))
if node.children is None:
continue
for choice2, node2 in enumerate(node.children):
result.append(" " + describe_node(node2, [choice, choice2]))
return "\n".join(result)
def summarise(self, out, summary_spec):
"""Write a summary of the most-visited parts of the tree.
out -- writeable file-like object
summary_spec -- list of ints
summary_spec says how many nodes to describe at each depth of the tree
(so to show only direct children of the root, pass a list of length 1).
"""
def p(s):
print >> out, s
def describe_node(node, choice_path):
parameters = self.format_parameters(
self.parameters_for_path(choice_path))
choice_s = " ".join(map(self.describe_choice, choice_path))
return "%s %-40s %.3f %3d" % (
choice_s, parameters, node.value,
node.visits - self.initial_visits)
def most_visits((child_index, node)):
return node.visits
last_generation = [([], self.root)]
for i, n in enumerate(summary_spec):
depth = i + 1
p("most visited at depth %s" % (depth))
this_generation = []
for path, node in last_generation:
if node.children is not None:
this_generation += [
(path + [child_index], child)
for (child_index, child) in enumerate(node.children)]
for path, node in sorted(
nlargest(n, this_generation, key=most_visits)):
p(describe_node(node, path))
last_generation = this_generation
p("")
class Simulation(object):
"""A single monte-carlo simulation.
Instantiate with the Tree the simulation will run in.
Use the methods in the following order:
run()
get_parameters()
update_stats(b)
describe()
"""
def __init__(self, tree):
self.tree = tree
# list of Nodes
self.node_path = []
# corresponding list of child indices
self.choice_path = []
# bool
self.candidate_won = None
def _choose_action(self, node):
"""Choose the best action from the specified node.
Returns a pair (child index, node)
"""
uct_numerator = (self.tree.exploration_coefficient *
sqrt(log(node.visits)))
def urgency((i, child)):
return child.value + uct_numerator * child.rsqrt_visits
start = random.randrange(len(node.children))
children = list(enumerate(node.children))
return max(children[start:] + children[:start], key=urgency)
def walk(self):
"""Choose a node sequence, without expansion."""
node = self.tree.root
while node.children is not None:
choice, node = self._choose_action(node)
self.node_path.append(node)
self.choice_path.append(choice)
def run(self):
"""Choose the node sequence for this simulation.
This walks down from the root, using _choose_action() at each level,
until it reaches a leaf; if the leaf has already been visited, this
expands it and chooses one more action.
"""
self.walk()
node = self.node_path[-1]
if (len(self.node_path) < self.tree.max_depth and
self.tree.is_ripe(node)):
self.tree.expand(node)
choice, child = self._choose_action(node)
self.node_path.append(child)
self.choice_path.append(choice)
def get_parameters(self):
"""Retrieve the parameters corresponding to the simulation's leaf node.
Returns optimiser_parameters
"""
return self.tree.parameters_for_path(self.choice_path)
def update_stats(self, candidate_won):
"""Update the tree's node statistics with the simulation's results.
This updates visits (and wins, if appropriate) for each node in the
simulation's node sequence.
"""
self.candidate_won = candidate_won
for node in self.node_path:
node.visits += 1
if candidate_won:
node.wins += 1
node.recalculate()
self.tree.root.visits += 1
if candidate_won:
self.tree.root.wins += 1 # For description only
self.tree.root.recalculate()
def describe_steps(self):
"""Return a text description of the simulation's node sequence."""
return " ".join(map(self.tree.describe_choice, self.choice_path))
def describe(self):
"""Return a one-line-ish text description of the simulation."""
result = "%s [%s]" % (
self.tree.format_parameters(self.get_parameters()),
self.describe_steps())
if self.candidate_won is not None:
result += (" lost", " won")[self.candidate_won]
return result
def describe_briefly(self):
"""Return a shorter description of the simulation."""
return "%s %s" % (self.tree.format_parameters(self.get_parameters()),
("lost", "won")[self.candidate_won])
class Greedy_simulation(Simulation):
"""Variant of simulation that chooses the node with most wins.
This is used to pick the 'best' parameters from the current state of the
tree.
"""
def _choose_action(self, node):
def wins((i, node)):
return node.wins
return max(enumerate(node.children), key=wins)
parameter_settings = [
Setting('code', interpret_identifier),
Setting('scale', interpret_callable),
Setting('split', interpret_positive_int),
Setting('format', interpret_8bit_string, default=None),
]
class Parameter_config(Quiet_config):
"""Parameter (ie, dimension) description for use in control files."""
# positional or keyword
positional_arguments = ('code',)
# keyword-only
keyword_arguments = tuple(setting.name for setting in parameter_settings
if setting.name != 'code')
class Parameter_spec(object):
"""Internal description of a parameter spec from the configuration file.
Public attributes:
code -- identifier
split -- integer
scale -- function float(0.0..1.0) -> player parameter
format -- string for use with '%'
"""
class Scale_fn(object):
"""Callable implementing a scale function.
Scale_fn classes are used to provide a convenient way to describe scale
functions in the control file (LINEAR, LOG, ...).
"""
class Linear_scale_fn(Scale_fn):
"""Linear scale function.
Instantiate with
lower_bound -- float
upper_bound -- float
integer -- bool (means 'round result to nearest integer')
"""
def __init__(self, lower_bound, upper_bound, integer=False):
self.lower_bound = float(lower_bound)
self.upper_bound = float(upper_bound)
self.range = float(upper_bound - lower_bound)
self.integer = bool(integer)
def __call__(self, f):
result = (f * self.range) + self.lower_bound
if self.integer:
result = int(result + .5)
return result
class Log_scale_fn(Scale_fn):
"""Log scale function.
Instantiate with
lower_bound -- float
upper_bound -- float
integer -- bool (means 'round result to nearest integer')
"""
def __init__(self, lower_bound, upper_bound, integer=False):
if lower_bound == 0.0:
raise ValueError("lower bound is zero")
self.rate = log(upper_bound / lower_bound)
self.lower_bound = lower_bound
self.integer = bool(integer)
def __call__(self, f):
result = exp(self.rate * f) * self.lower_bound
if self.integer:
result = int(result + .5)
return result
class Explicit_scale_fn(Scale_fn):
"""Scale function that returns elements from a list.
Instantiate with the list of values to use.
Normally use this with 'split' equal to the length of the list
(more generally, split**max_depth equal to the length of the list).
"""
def __init__(self, values):
if not values:
raise ValueError("empty value list")
self.values = tuple(values)
self.n = len(values)
def __call__(self, f):
return self.values[int(self.n * f)]
class LINEAR(Config_proxy):
underlying = Linear_scale_fn
class LOG(Config_proxy):
underlying = Log_scale_fn
class EXPLICIT(Config_proxy):
underlying = Explicit_scale_fn
def interpret_candidate_colour(v):
if v in ('r', 'random'):
return 'random'
else:
return interpret_colour(v)
class Mcts_tuner(Competition):
"""A Competition for parameter tuning using the Monte-carlo tree search.
The game ids are strings containing integers starting from zero.
"""
def __init__(self, competition_code, **kwargs):
Competition.__init__(self, competition_code, **kwargs)
self.outstanding_simulations = {}
self.halt_on_next_failure = True
def control_file_globals(self):
result = Competition.control_file_globals(self)
result.update({
'Parameter': Parameter_config,
'LINEAR': LINEAR,
'LOG': LOG,
'EXPLICIT': EXPLICIT,
})
return result
global_settings = (Competition.global_settings +
competitions.game_settings + [
Setting('number_of_games', allow_none(interpret_int), default=None),
Setting('candidate_colour', interpret_candidate_colour),
Setting('log_tree_to_history_period',
allow_none(interpret_positive_int), default=None),
Setting('summary_spec', interpret_sequence_of(interpret_int),
default=(30,)),
Setting('number_of_running_simulations_to_show', interpret_int,
default=12),
])
special_settings = [
Setting('opponent', interpret_identifier),
Setting('parameters',
interpret_sequence_of_quiet_configs(Parameter_config)),
Setting('make_candidate', interpret_callable),
]
# These are used to instantiate Tree; they don't turn into Mcts_tuner
# attributes.
tree_settings = [
Setting('max_depth', interpret_positive_int, default=1),
Setting('exploration_coefficient', interpret_float),
Setting('initial_visits', interpret_positive_int),
Setting('initial_wins', interpret_positive_int),
]
def parameter_spec_from_config(self, parameter_config):
"""Make a Parameter_spec from a Parameter_config.
Raises ControlFileError if there is an error in the configuration.
Returns a Parameter_spec with all attributes set.
"""
arguments = parameter_config.resolve_arguments()
interpreted = load_settings(parameter_settings, arguments)
pspec = Parameter_spec()
for name, value in interpreted.iteritems():
setattr(pspec, name, value)
optimiser_param = 1.0 / (pspec.split * 2)
try:
scaled = pspec.scale(optimiser_param)
except Exception:
raise ValueError(
"error from scale (applied to %s)\n%s" %
(optimiser_param, compact_tracebacks.format_traceback(skip=1)))
if pspec.format is None:
pspec.format = pspec.code + ":%s"
try:
pspec.format % scaled
except Exception:
raise ControlFileError("'format': invalid format string")
return pspec
def initialise_from_control_file(self, config):
Competition.initialise_from_control_file(self, config)
if self.komi == int(self.komi):
raise ControlFileError("komi: must be fractional to prevent jigos")
competitions.validate_handicap(
self.handicap, self.handicap_style, self.board_size)
try:
specials = load_settings(self.special_settings, config)
except ValueError, e:
raise ControlFileError(str(e))
try:
self.opponent = self.players[specials['opponent']]
except KeyError:
raise ControlFileError(
"opponent: unknown player %s" % specials['opponent'])
self.parameter_specs = []
if not specials['parameters']:
raise ControlFileError("parameters: empty list")
seen_codes = set()
for i, parameter_spec in enumerate(specials['parameters']):
try:
pspec = self.parameter_spec_from_config(parameter_spec)
except StandardError, e:
code = parameter_spec.get_key()
if code is None:
code = i
raise ControlFileError("parameter %s: %s" % (code, e))
if pspec.code in seen_codes:
raise ControlFileError(
"duplicate parameter code: %s" % pspec.code)
seen_codes.add(pspec.code)
self.parameter_specs.append(pspec)
self.candidate_maker_fn = specials['make_candidate']
try:
tree_arguments = load_settings(self.tree_settings, config)
except ValueError, e:
raise ControlFileError(str(e))
self.tree = Tree(splits=[pspec.split for pspec in self.parameter_specs],
parameter_formatter=self.format_optimiser_parameters,
**tree_arguments)
# State attributes (*: in persistent state):
# *scheduler -- Simple_scheduler
# *tree -- Tree (root node is persisted)
# outstanding_simulations -- map game_number -> Simulation
# halt_on_next_failure -- bool
# *opponent_description -- string (or None)
def set_clean_status(self):
self.scheduler = competition_schedulers.Simple_scheduler()
self.tree.new_root()
self.opponent_description = None
# Can bump this to prevent people loading incompatible .status files.
status_format_version = 0
def get_status(self):
# path0 is stored for consistency check
return {
'scheduler': self.scheduler,
'tree_root': self.tree.root,
'opponent_description': self.opponent_description,
'path0': self.scale_parameters(self.tree.parameters_for_path([0])),
}
def set_status(self, status):
root = status['tree_root']
try:
self.tree.set_root(root)
except ValueError:
raise CompetitionError(
"status file is inconsistent with control file")
expected_path0 = self.scale_parameters(
self.tree.parameters_for_path([0]))
if status['path0'] != expected_path0:
raise CompetitionError(
"status file is inconsistent with control file")
self.scheduler = status['scheduler']
self.scheduler.rollback()
self.opponent_description = status['opponent_description']
def scale_parameters(self, optimiser_parameters):
l = []
for pspec, v in zip(self.parameter_specs, optimiser_parameters):
try:
l.append(pspec.scale(v))
except Exception:
raise CompetitionError(
"error from scale for %s\n%s" %
(pspec.code, compact_tracebacks.format_traceback(skip=1)))
return tuple(l)
def format_engine_parameters(self, engine_parameters):
l = []
for pspec, v in zip(self.parameter_specs, engine_parameters):
try:
s = pspec.format % v
except Exception:
s = "[%s?%s]" % (pspec.code, v)
l.append(s)
return "; ".join(l)
def format_optimiser_parameters(self, optimiser_parameters):
return self.format_engine_parameters(self.scale_parameters(
optimiser_parameters))
def make_candidate(self, player_code, engine_parameters):
"""Make a player using the specified engine parameters.
Returns a game_jobs.Player.
"""
try:
candidate_config = self.candidate_maker_fn(*engine_parameters)
except Exception:
raise CompetitionError(
"error from make_candidate()\n%s" %
compact_tracebacks.format_traceback(skip=1))
if not isinstance(candidate_config, Player_config):
raise CompetitionError(
"make_candidate() returned %r, not Player" %
candidate_config)
try:
candidate = self.game_jobs_player_from_config(
player_code, candidate_config)
except Exception, e:
raise CompetitionError(
"bad player spec from make_candidate():\n"
"%s\nparameters were: %s" %
(e, self.format_engine_parameters(engine_parameters)))
return candidate
def get_player_checks(self):
test_parameters = self.tree.get_test_parameters()
engine_parameters = self.scale_parameters(test_parameters)
candidate = self.make_candidate('candidate', engine_parameters)
result = []
for player in [candidate, self.opponent]:
check = game_jobs.Player_check()
check.player = player
check.board_size = self.board_size
check.komi = self.komi
result.append(check)
return result
def choose_candidate_colour(self):
if self.candidate_colour == 'random':
return random.choice('bw')
else:
return self.candidate_colour
def get_game(self):
if (self.number_of_games is not None and
self.scheduler.issued >= self.number_of_games):
return NoGameAvailable
game_number = self.scheduler.issue()
simulation = Simulation(self.tree)
simulation.run()
optimiser_parameters = simulation.get_parameters()
engine_parameters = self.scale_parameters(optimiser_parameters)
candidate = self.make_candidate("#%d" % game_number, engine_parameters)
self.outstanding_simulations[game_number] = simulation
job = game_jobs.Game_job()
job.game_id = str(game_number)
job.game_data = game_number
if self.choose_candidate_colour() == 'b':
job.player_b = candidate
job.player_w = self.opponent
else:
job.player_b = self.opponent
job.player_w = candidate
job.board_size = self.board_size
job.komi = self.komi
job.move_limit = self.move_limit
job.handicap = self.handicap
job.handicap_is_free = (self.handicap_style == 'free')
job.use_internal_scorer = (self.scorer == 'internal')
job.internal_scorer_handicap_compensation = \
self.internal_scorer_handicap_compensation
job.sgf_event = self.competition_code
job.sgf_note = ("Candidate parameters: %s" %
self.format_engine_parameters(engine_parameters))
return job
def process_game_result(self, response):
self.halt_on_next_failure = False
self.opponent_description = response.engine_descriptions[
self.opponent.code].get_long_description()
game_number = response.game_data
self.scheduler.fix(game_number)
# Counting no-result as loss for the candidate
candidate_won = (
response.game_result.losing_player == self.opponent.code)
simulation = self.outstanding_simulations.pop(game_number)
simulation.update_stats(candidate_won)
self.log_history(simulation.describe())
if (self.log_tree_to_history_period is not None and
self.scheduler.fixed % self.log_tree_to_history_period == 0):
self.log_history(self.tree.describe())
return "%s %s" % (simulation.describe(),
response.game_result.sgf_result)
def process_game_error(self, job, previous_error_count):
## If the very first game to return a response gives an error, halt.
## If two games in a row give an error, halt.
## Otherwise, forget about the failed game
stop_competition = False
retry_game = False
game_number = job.game_data
del self.outstanding_simulations[game_number]
self.scheduler.fix(game_number)
if self.halt_on_next_failure:
stop_competition = True
else:
self.halt_on_next_failure = True
return stop_competition, retry_game
def write_static_description(self, out):
def p(s):
print >> out, s
p("MCTS tuning event: %s" % self.competition_code)
if self.description:
p(self.description)
p("board size: %s" % self.board_size)
p("komi: %s" % self.komi)
def _write_main_report(self, out):
games_played = self.scheduler.fixed
if self.number_of_games is None:
print >> out, "%d games played" % games_played
else:
print >> out, "%d/%d games played" % (
games_played, self.number_of_games)
print >> out
best_simulation = self.tree.retrieve_best_parameter_simulation()
print >> out, "Best parameters: %s" % best_simulation.describe()
print >> out
self.tree.summarise(out, self.summary_spec)
def write_screen_report(self, out):
self._write_main_report(out)
if self.outstanding_simulations:
print >> out, "In progress:"
to_show = sorted(self.outstanding_simulations.iteritems()) \
[:self.number_of_running_simulations_to_show]
for game_id, simulation in to_show:
print >> out, "game %s: %s" % (game_id, simulation.describe())
def write_short_report(self, out):
self.write_static_description(out)
self._write_main_report(out)
if self.opponent_description:
print >> out, "opponent (%s): %s" % (
self.opponent.code, self.opponent_description)
else:
print >> out, "opponent: %s" % self.opponent.code
print >> out
write_full_report = write_short_report
| 35.077966 | 95 | 0.616544 | 3,567 | 31,044 | 5.173255 | 0.143258 | 0.011272 | 0.009213 | 0.007478 | 0.225004 | 0.14751 | 0.100905 | 0.079553 | 0.072075 | 0.054842 | 0 | 0.003019 | 0.295677 | 31,044 | 884 | 96 | 35.117647 | 0.840933 | 0.031439 | 0 | 0.155235 | 0 | 0 | 0.053451 | 0.00358 | 0 | 0 | 0 | 0 | 0.001805 | 0 | null | null | 0 | 0.019856 | null | null | 0.021661 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9acbd6e09016763ff8a75cf2e88c6a01d873ad9c | 9,705 | py | Python | endoscopic_ai.py | dennkitotaichi/AI_prediction_for_patients_with_colorectal_polyps | afbad36cb3fc2de31665fc3b0a7f065b7e6564a0 | [
"MIT"
] | null | null | null | endoscopic_ai.py | dennkitotaichi/AI_prediction_for_patients_with_colorectal_polyps | afbad36cb3fc2de31665fc3b0a7f065b7e6564a0 | [
"MIT"
] | null | null | null | endoscopic_ai.py | dennkitotaichi/AI_prediction_for_patients_with_colorectal_polyps | afbad36cb3fc2de31665fc3b0a7f065b7e6564a0 | [
"MIT"
] | null | null | null | import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
#%matplotlib inline
import codecs
import lightgbm as lgb
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.metrics import mean_squared_error
from sklearn.metrics import r2_score
# Read data
image_file_path = './simulated_dpc_data.csv'
with codecs.open(image_file_path, "r", "Shift-JIS", "ignore") as file:
dpc = pd.read_table(file, delimiter=",")
# dpc_r, g_dpc_r_1, g_r: restricted data from dpc
dpc_r=dpc.loc[:, ['ID','code']]
# g_dpc_r_1: made to check the details (: name of the code, ‘name’)
g_dpc_r_1=dpc.loc[:, ['ID','code','name']]
# Dummy Encoding with ‘name’
g_r = pd.get_dummies(dpc_r['code'])
# Reconstruct simulated data for AI learning
df_concat_dpc_get_dummies = pd.concat([dpc_r, g_r], axis=1)
# Remove features that may be the cause of the data leak
dpc_Remove_data_leak = df_concat_dpc_get_dummies.drop(["code",160094710,160094810,160094910,150285010,2113008,8842965,8843014,622224401,810000000,160060010], axis=1)
# Sum up the number of occurrences of each feature for each patient.
total_patient_features= dpc_Remove_data_leak.groupby("ID").sum()
total_patient_features.reset_index()
# Load a new file with ID and treatment availability
# Prepare training data
image_file_path_ID_and_polyp_pn = './simulated_patient_data.csv'
with codecs.open(image_file_path_ID_and_polyp_pn, "r", "Shift-JIS", "ignore") as file:
ID_and_polyp_pn = pd.read_table(file, delimiter=",")
ID_and_polyp_pn_data= ID_and_polyp_pn[['ID', 'target']]
#Combine the new file containing ID and treatment status with the file after dummy encoding by the ‘name’
ID_treatment_medical_statement=pd.merge(ID_and_polyp_pn_data,total_patient_features,on=["ID"],how='outer')
ID_treatment_medical_statement_o= ID_treatment_medical_statement.fillna(0)
ID_treatment_medical_statement_p=ID_treatment_medical_statement_o.drop("ID", axis=1)
ID_treatment_medical_statement_rename= ID_treatment_medical_statement_p.rename(columns={'code':"Receipt type code"})
merge_data= ID_treatment_medical_statement_rename
# Split the training/validation set into 80% and the test set into 20%, with a constant proportion of cases with lesions
X = merge_data.drop("target",axis=1).values
y = merge_data["target"].values
columns_name = merge_data.drop("target",axis=1).columns
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2,random_state=1)
# Create a function to divide data
def data_split(X,y):
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
X_train = pd.DataFrame(X_train, columns=columns_name)
X_test = pd.DataFrame(X_test, columns=columns_name)
return X_train, y_train, X_test, y_test
# Separate into training, validation, and test set
X_train, y_train, X_test, y_test = data_split(X, y)
X_train, y_train, X_val, y_val = data_split(X_train.values, y_train)
# Make test set into pandas
X_test_df = pd.DataFrame(X_test)
y_test_df = pd.DataFrame(y_test)
# Make test set into test_df to keep away for the final process
test_dfp = pd.concat([y_test_df,X_test_df], axis=1)
test_df=test_dfp.rename(columns={0:"target"})
# Make training/validation sets into pandas
y_trainp = pd.DataFrame(y_train)
X_trainp = pd.DataFrame(X_train)
train=pd.concat([y_trainp, X_trainp], axis=1)
y_valp = pd.DataFrame(y_val)
X_valp = pd.DataFrame(X_val)
val=pd.concat([y_valp, X_valp], axis=1)
test_vol=pd.concat([train, val])
training_validation_sets=test_vol.rename(columns={0:"target"})
# Create a function to save the results and feature importance after analysis with lightGBM
def reg_top10_lightGBM(merge_data,outname,no,random_state_number):
# Define the objective variable
X = merge_data.drop("target",axis=1).values
y = merge_data["target"].values
columns_name = merge_data.drop("target",axis=1).columns
# Define a function
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=random_state_number)
def data_split(X,y):
for train_index, test_index in sss.split(X, y):
X_train, X_test = X[train_index], X[test_index]
y_train, y_test = y[train_index], y[test_index]
X_train = pd.DataFrame(X_train, columns=columns_name)
X_test = pd.DataFrame(X_test, columns=columns_name)
return X_train, y_train, X_test, y_test
X_train, y_train, X_test, y_test = data_split(X, y)
X_train, y_train, X_val, y_val = data_split(X_train.values, y_train)
y_test_df = pd.DataFrame(y_test)
# Prepare dataset: training data: X_train, label: y_train
train = lgb.Dataset(X_train, label=y_train)
valid = lgb.Dataset(X_val, label=y_val)
# Set the parameters
params = {'task': 'train',
'boosting_type': 'gbdt',
'objective': 'regression',
'metric': 'rmse',
'learning_rate': 0.1 }
# Train the model
model = lgb.train(params,
train,
valid_sets=valid,
num_boost_round=3000,
early_stopping_rounds=100)
# Prediction
y_pred = model.predict(X_test, num_iteration=model.best_iteration)
# Display actual values and predicted values
df_pred = pd.DataFrame({'regression_y_test':y_test,'regression_y_pred':y_pred})
# Calculate MSE (Mean Square Error)
mse = mean_squared_error(y_test, y_pred)
# Calculate RSME = √MSE
rmse = np.sqrt(mse)
# r2 : Calculate the coefficient of determination
r2 = r2_score(y_test,y_pred)
df_Df = pd.DataFrame({'regression_y_test_'+no:y_test,'regression_y_pred_'+no:y_pred,'RMSE_'+no:rmse,'R2_'+no:r2})
df_Df.to_csv(r""+"./"+outname+no+'.csv', encoding = 'shift-jis')
importance = pd.DataFrame(model.feature_importance(), columns=['importance'])
column_list=merge_data.drop(["target"], axis=1)
importance["columns"] =list(column_list.columns)
return importance
# Find out Top 50 features procedure / Run the model once
importance = reg_top10_lightGBM(training_validation_sets,"check_data","_1",1)
# Create a function that sorts and stores the values of feature importance.
def after_imp_save_sort(importance,outname,no):
importance.sort_values(by='importance',ascending=False)
i_df=importance.sort_values(by='importance',ascending=False)
top50=i_df.iloc[0:51,:]
g_dpc_pre= g_dpc_r_1.drop(["ID"], axis=1)
g_dpc_Remove_duplicates=g_dpc_pre.drop_duplicates()
g_dpc_r_columns=g_dpc_Remove_duplicates.rename(columns={'code':"columns"})
importance_name=pd.merge(top50,g_dpc_r_columns)
importance_all=pd.merge(i_df,g_dpc_r_columns)
importance_all.to_csv(r""+"./"+outname+no+'importance_name_all'+'.csv', encoding = 'shift-jis')
return importance_all
# Run a function to sort and save the values of feature importance.
top50_importance_all = after_imp_save_sort(importance,"check_data","_1")
# 10 runs of this procedure
dict = {}
for num in range(10):
print(num+1)
importance = reg_top10_lightGBM(training_validation_sets,"check_data","_"+str(num+1),num+1)
top50_importance_all = after_imp_save_sort(importance,"check_data","_"+str(num+1))
dict[str(num)] = top50_importance_all
# Recall and merge the saved CSV files
def concat_importance(First_pd,Next_pd):
importance_1=pd.DataFrame(dict[First_pd])
importance_1d=importance_1.drop_duplicates(subset='columns')
importance_2=pd.DataFrame(dict[Next_pd])
importance_2d=importance_2.drop_duplicates(subset='columns')
importance_1_2=pd.concat([importance_1d, importance_2d])
return importance_1_2
importance_1_2 = concat_importance("0","1")
importance_3_4 = concat_importance("2","3")
importance_5_6 = concat_importance("4","5")
importance_7_8 = concat_importance("6","7")
importance_9_10 = concat_importance("8","9")
importance_1_4=pd.concat([importance_1_2, importance_3_4])
importance_1_6=pd.concat([importance_1_4, importance_5_6])
importance_1_8=pd.concat([importance_1_6, importance_7_8])
importance_1_10=pd.concat([importance_1_8, importance_9_10])
# Calculate the total value of the feature importance for each code
group_sum=importance_1_10.groupby(["columns"]).sum()
group_sum_s = group_sum.sort_values('importance', ascending=False)
importance_group_sum=group_sum_s.reset_index()
# Create train/validation test data with all features
merge_data_test=pd.concat([training_validation_sets, test_df])
# Make features in the order of highest total feature impotance value
importance_top50_previous_data=importance_group_sum["columns"]
importance_top50_previous_data
# refine the data to top 50 features
dict_top50 = {}
pycaret_dict_top50 = {}
X = range(1, 51)
for i,v in enumerate(X):
dict_top50[str(i)] = importance_top50_previous_data.iloc[v]
pycaret_dict_top50[importance_top50_previous_data[i]] = merge_data_test[dict_top50[str(i)]]
pycaret_df_dict_top50=pd.DataFrame(pycaret_dict_top50)
# Add the value of target (: objective variable)
target_data=merge_data_test["target"]
target_top50_dataframe=pd.concat([target_data, pycaret_df_dict_top50], axis=1)
# adjust pandas (pycaret needs to set “str” to “int”)
target_top50_dataframe_int=target_top50_dataframe.astype('int')
target_top50_dataframe_columns=target_top50_dataframe_int.columns.astype(str)
numpy_target_top50=target_top50_dataframe_int.to_numpy()
target_top50_dataframe_pycaret=pd.DataFrame(numpy_target_top50,columns=target_top50_dataframe_columns)
# compare the models
from pycaret.classification import *
clf1 = setup(target_top50_dataframe_pycaret, target ='target',train_size = 0.8,data_split_shuffle=False,fold=10,session_id=0)
best_model = compare_models()
| 48.525 | 165 | 0.757651 | 1,544 | 9,705 | 4.446244 | 0.180699 | 0.016606 | 0.02622 | 0.031464 | 0.306336 | 0.215149 | 0.198252 | 0.17276 | 0.162855 | 0.146249 | 0 | 0.03331 | 0.130757 | 9,705 | 199 | 166 | 48.768844 | 0.780346 | 0.188253 | 0 | 0.176871 | 0 | 0 | 0.071192 | 0.006634 | 0 | 0 | 0 | 0 | 0 | 1 | 0.034014 | false | 0 | 0.319728 | 0 | 0.387755 | 0.006803 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
9acd3d20a14d9e96bec466426e861a98197f22b0 | 330 | py | Python | src/the_impossible/live/migrations/newsletter/migrations/0002_auto_20200514_1518.py | micha31r/The-Impossible | 7a79dea3169907eb93107107f4003c5813de58dc | [
"MIT"
] | null | null | null | src/the_impossible/live/migrations/newsletter/migrations/0002_auto_20200514_1518.py | micha31r/The-Impossible | 7a79dea3169907eb93107107f4003c5813de58dc | [
"MIT"
] | 2 | 2020-04-15T03:57:42.000Z | 2020-06-06T01:43:34.000Z | src/the_impossible/live/migrations/newsletter/migrations/0002_auto_20200514_1518.py | micha31r/The-Impossible | 7a79dea3169907eb93107107f4003c5813de58dc | [
"MIT"
] | null | null | null | # Generated by Django 2.2.7 on 2020-05-14 03:18
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('newsletter', '0001_initial'),
]
operations = [
migrations.RenameModel(
old_name='Newsletter',
new_name='Subscriber',
),
]
| 18.333333 | 47 | 0.593939 | 34 | 330 | 5.676471 | 0.794118 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.081545 | 0.293939 | 330 | 17 | 48 | 19.411765 | 0.746781 | 0.136364 | 0 | 0 | 1 | 0 | 0.14841 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ad11bb35b11a89ca5873c299ffa8f65fee28a06 | 3,694 | py | Python | test/test_contacts_info_from_main_page.py | OlgaZtv/python_training | 661165613ef4b9545345a8a2c61a894571ded703 | [
"Apache-2.0"
] | null | null | null | test/test_contacts_info_from_main_page.py | OlgaZtv/python_training | 661165613ef4b9545345a8a2c61a894571ded703 | [
"Apache-2.0"
] | null | null | null | test/test_contacts_info_from_main_page.py | OlgaZtv/python_training | 661165613ef4b9545345a8a2c61a894571ded703 | [
"Apache-2.0"
] | null | null | null | import re
from model.contact import Contact
def test_contact_info_from_home_page(app, db):
app.navigation.open_home_page()
contact_from_home_page = sorted(app.contact.get_contact_list(), key=Contact.id_or_max)
def clean(contact):
return Contact(id=contact.id, firstname=contact.firstname.strip(), lastname=contact.lastname.strip(),
address=contact.address.strip(),
home=contact.home, mobile=contact.mobile, phone2=contact.phone2,
email=contact.email, email2=contact.email2, email3=contact.email3)
contact_from_db_list = list(map(clean, db.get_contact_list()))
print("Contacts_from_home_page>>>>", contact_from_home_page)
print("Contacts_from_DB>>>>", contact_from_db_list)
i = 0
for item in contact_from_home_page:
assert item.address == contact_from_db_list[i].address
assert item.lastname == contact_from_db_list[i].lastname.strip()
assert item.firstname == contact_from_db_list[i].firstname.strip()
assert item.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_db_list[i])
assert item.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_db_list[i])
i += 1
def clear(s):
return re.sub("[() -]", "", s)
def merge_phones_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None,
[contact.home, contact.mobile, contact.work, contact.phone2]))))
def merge_emails_like_on_home_page(contact):
return "\n".join(filter(lambda x: x != "",
map(lambda x: clear(x),
filter(lambda x: x is not None, [contact.email, contact.email2, contact.email3]))))
# def test_contacts(app, ormdb):
# random_index = randrange(app.contact.count())
# # взять все контакты с главной страницы
# contact_from_home_page = app.contact.get_contact_list()
# # взять все записи конатктов из бд
# contact_from_db = ormdb.get_contact_list()
# # сравниваем списки, сортируя
# assert sorted(contact_from_home_page, key=Contact.id_or_max) == sorted(contact_from_db, key=Contact.id_or_max)
# def test_contact_info_on_main_page(app):
# if app.contact.amount() == 0:
# app.contact.create(
# Contact(firstname="TestTest", middlename="Test", lastname="Testing", nickname="testing",
# title="test", company="Test test", address="Spb", home="000222111",
# mobile="444555222", work="99966655", fax="11122255", email="test@tesr.ru",
# email2="test2@test.ru", email3="test3@test.ru", homepage="www.test.ru", bday="15",
# bmonth="May", byear="1985", aday="14", amonth="June", ayear="1985",
# address2="Spb", phone2="111111", notes="Friend"))
# random_index = randrange(app.contact.amount())
# contact_from_home_page = app.contact.get_contact_list()[random_index]
# contact_from_edit_page = app.contact.get_contact_info_from_edit_page(random_index)
# assert contact_from_home_page.all_phones_from_home_page == merge_phones_like_on_home_page(contact_from_edit_page)
# assert contact_from_home_page.firstname == contact_from_edit_page.firstname
# assert contact_from_home_page.lastname == contact_from_edit_page.lastname
# assert contact_from_home_page.address == contact_from_edit_page.address
# assert contact_from_home_page.all_emails_from_home_page == merge_emails_like_on_home_page(contact_from_edit_page)
| 52.028169 | 119 | 0.67542 | 493 | 3,694 | 4.726166 | 0.235294 | 0.122747 | 0.087554 | 0.0897 | 0.414592 | 0.297854 | 0.236052 | 0.226609 | 0.219742 | 0.176824 | 0 | 0.023858 | 0.205739 | 3,694 | 70 | 120 | 52.771429 | 0.770279 | 0.472658 | 0 | 0.125 | 0 | 0 | 0.029703 | 0.01407 | 0 | 0 | 0 | 0 | 0.15625 | 1 | 0.15625 | false | 0 | 0.0625 | 0.125 | 0.34375 | 0.0625 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
9ad242baf7204452ac38c08eb06958775483a1b5 | 1,790 | py | Python | benchmark.py | raonyguimaraes/machinelearning | 03b18e5c69931c4ee2ea4803de72c846aba97bce | [
"MIT"
] | 1 | 2016-10-23T19:45:12.000Z | 2016-10-23T19:45:12.000Z | benchmark.py | raonyguimaraes/machinelearning | 03b18e5c69931c4ee2ea4803de72c846aba97bce | [
"MIT"
] | null | null | null | benchmark.py | raonyguimaraes/machinelearning | 03b18e5c69931c4ee2ea4803de72c846aba97bce | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Writing Our First Classifier - Machine Learning Recipes #5
#https://www.youtube.com/watch?v=AoeEHqVSNOw&list=PLOU2XLYxmsIIuiBfYad6rFYQU_jL2ryal&index=1
from scipy.spatial import distance
from sklearn.neighbors import KNeighborsClassifier
from sklearn.metrics import accuracy_score
from sklearn import datasets
from sklearn.cross_validation import train_test_split
import numpy as np
def euc(a,b):
return distance.euclidean(a,b)
class ScrappyKNN():
def fit(self, X_train, y_train):
self.X_train = X_train
self.y_train = y_train
def predict(self, X_test):
predictions = []
for row in X_test:
label = self.closest(row)
predictions.append(label)
return predictions
def closest(self, row):
best_dist = euc(row, self.X_train[0])
best_index = 0
for i in range(1,len(self.X_train)):
dist = euc(row, self.X_train[i])
if dist < best_dist:
best_dist = dist
best_index = i
return self.y_train[best_index]
iris = datasets.load_iris()
X = iris.data
y = iris.target
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size = .5)
# from sklearn.neighbors import KNeighborsClassifier
my_classifier = ScrappyKNN()
my_classifier_sklearn = KNeighborsClassifier()
accuracies = []
for i in range (0,1000):
my_classifier.fit(X_train, y_train)
predictions = my_classifier.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
accuracies.append(accuracy)
print 'ScrappyKNN accuracy mean:', np.mean(accuracies)
accuracies = []
for i in range (0,1000):
my_classifier_sklearn.fit(X_train, y_train)
predictions = my_classifier_sklearn.predict(X_test)
accuracy = accuracy_score(y_test, predictions)
accuracies.append(accuracy)
print 'sklearn accuracy mean:', np.mean(accuracies) | 24.189189 | 92 | 0.754749 | 266 | 1,790 | 4.890977 | 0.315789 | 0.041507 | 0.038432 | 0.027671 | 0.381245 | 0.267487 | 0.236741 | 0.236741 | 0.178324 | 0.119908 | 0 | 0.013012 | 0.141341 | 1,790 | 74 | 93 | 24.189189 | 0.833442 | 0.135196 | 0 | 0.170213 | 0 | 0 | 0.03046 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.12766 | null | null | 0.042553 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ad3d0b300ea5b2d36712d2ed1f19a77b925f25f | 383 | py | Python | plaintext_password/checks.py | bryanwills/django-plaintext-password | 752cf0316cdc45dc9bed5f9107614881d613647f | [
"MIT"
] | null | null | null | plaintext_password/checks.py | bryanwills/django-plaintext-password | 752cf0316cdc45dc9bed5f9107614881d613647f | [
"MIT"
] | null | null | null | plaintext_password/checks.py | bryanwills/django-plaintext-password | 752cf0316cdc45dc9bed5f9107614881d613647f | [
"MIT"
] | 2 | 2021-04-23T08:24:08.000Z | 2022-03-01T06:56:33.000Z | from django.contrib.auth.hashers import get_hashers_by_algorithm
from django.core import checks
@checks.register(checks.Tags.security, deploy=True)
def check_for_plaintext_passwords(app_configs, **kwargs):
if "plaintext" in get_hashers_by_algorithm():
yield checks.Critical(
"Plaintext module should not be used in production.", hint="Remove it."
)
| 34.818182 | 83 | 0.744125 | 51 | 383 | 5.392157 | 0.72549 | 0.072727 | 0.087273 | 0.152727 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.167102 | 383 | 10 | 84 | 38.3 | 0.862069 | 0 | 0 | 0 | 0 | 0 | 0.180157 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0.125 | 0.25 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9ad633a8b545c9fd60433dd7e1485b51abf58bfc | 1,265 | py | Python | app/user/models.py | briankaemingk/streaks-with-todoist | c6cbc982fbedafce04e9f23af7422e996513c8bb | [
"MIT"
] | 3 | 2019-08-06T19:04:32.000Z | 2022-01-19T14:00:12.000Z | app/user/models.py | briankaemingk/streaks-with-todoist | c6cbc982fbedafce04e9f23af7422e996513c8bb | [
"MIT"
] | 6 | 2018-10-14T21:32:58.000Z | 2021-03-20T00:07:56.000Z | app/user/models.py | briankaemingk/streaks-with-todoist | c6cbc982fbedafce04e9f23af7422e996513c8bb | [
"MIT"
] | null | null | null | from app.extensions import db
from flask import current_app
class User(db.Model):
__tablename__ = 'users'
id = db.Column(db.Integer, primary_key=True)
access_token = db.Column(db.String())
jit_feature = db.Column(db.Boolean())
recurrence_resch_feature = db.Column(db.Boolean())
streaks_feature = db.Column(db.Boolean())
in_line_comment_feature = db.Column(db.Boolean())
def __init__(self, id, access_token, jit_feature, recurrence_resch_feature, streaks_feature, in_line_comment_feature):
self.id = id
self.access_token = access_token
self.jit_feature = jit_feature
self.recurrence_resch_feature = recurrence_resch_feature
self.streaks_feature = streaks_feature
self.in_line_comment_feature = in_line_comment_feature
def __repr__(self):
return '<id {}, access token {}, jit feature {}, recurrence resch feature {}, streaks feature {}, in-line comment feature {}>'.\
format(self.id, self.access_token, self.jit_feature, self.recurrence_resch_feature, self.streaks_feature, self.in_line_comment_feature)
def launch_task(self, name, description, *args, **kwargs):
current_app.task_queue.enqueue('app.tasks.' + name, self.id, *args, **kwargs)
| 38.333333 | 147 | 0.714625 | 167 | 1,265 | 5.071856 | 0.269461 | 0.090909 | 0.070838 | 0.141677 | 0.604486 | 0.413223 | 0.276269 | 0.186541 | 0.186541 | 0.186541 | 0 | 0 | 0.177075 | 1,265 | 32 | 148 | 39.53125 | 0.813641 | 0 | 0 | 0 | 0 | 0.045455 | 0.104762 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.090909 | 0.045455 | 0.636364 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
9ad672b90b5e5960648f597358159ab9f9c375ec | 5,060 | py | Python | Invaders/Displays/animation_display.py | JaredsGames/SpaceInvaders | 8a0da236c97340c4a8a06e7dd68e4672f885d9e0 | [
"MIT"
] | null | null | null | Invaders/Displays/animation_display.py | JaredsGames/SpaceInvaders | 8a0da236c97340c4a8a06e7dd68e4672f885d9e0 | [
"MIT"
] | null | null | null | Invaders/Displays/animation_display.py | JaredsGames/SpaceInvaders | 8a0da236c97340c4a8a06e7dd68e4672f885d9e0 | [
"MIT"
] | null | null | null | # Jared Dyreson
# CPSC 386-01
# 2021-11-29
# jareddyreson@csu.fullerton.edu
# @JaredDyreson
#
# Lab 00-04
#
# Some filler text
#
"""
This module contains the Intro display class
"""
import pygame
import functools
import sys
import pathlib
import typing
import os
import dataclasses
import random
from pprint import pprint as pp
import time
from Invaders.Dataclasses.point import Point
from Invaders.Displays.display import Display
from Invaders.UI.button import Button
# from Invaders.Entities.cacodemon import Cacodemon
# from Invaders.Entities.Entity import Entity
from Invaders.Entities.enemy_matrix import EnemyMatrix
# from Invaders.Entities.Player import Player
from Invaders.Entities.Entity import Entity
from Invaders.Dataclasses.direction import Direction
# TODO : move this to its own respective module or something like that
def absolute_file_paths(directory: pathlib.Path) -> typing.List[pathlib.Path]:
"""
List the contents of a directory with their absolute path
@param directory: path where to look
@return: typing.List[pathlib.Path]
"""
return [
pathlib.Path(os.path.abspath(os.path.join(dirpath, f)))
for dirpath, _, filenames in os.walk(directory)
for f in filenames
]
class AnimationDisplay(Display):
def __init__(self):
super().__init__()
self.break_from_draw = False
self.entities = EnemyMatrix(5, 5, self._display_surface)
self.main_player = Entity(
self._display_surface, ["assets/rocket.png"], Point(550, 700)
)
# self.main_player = Player(self._display_surface, [
# "assets/rocket.png"], Point(550, 700))
self.DRAW_NEXT_ENTITY = pygame.USEREVENT + 1
self.ENEMY_FIRE_INTERVAL = pygame.USEREVENT + 2
self.score, self.lives = 0, 3
self.score_label_position = Point(775, 20)
self.lives_label_position = Point(775, 60)
def draw(self) -> None:
draw_loop = True
pygame.time.set_timer(self.DRAW_NEXT_ENTITY, 300)
pygame.time.set_timer(self.ENEMY_FIRE_INTERVAL, 2000)
will_move = False
enemy_group = pygame.sprite.Group()
player_group = pygame.sprite.Group()
enemy_laser_group = pygame.sprite.Group()
player_group.add(self.main_player)
# print(player_group)
for x, row in enumerate(self.entities.matrix):
for y, column in enumerate(row):
enemy_group.add(column.entity)
# FIXME
while draw_loop and not self.break_from_draw:
positions = self.entities.scan_column() # FIXME: this code is not working
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
sys.exit()
elif event.type == self.DRAW_NEXT_ENTITY:
self._display_surface.fill(pygame.Color("black"))
enemy_group.update(1)
elif event.type == self.ENEMY_FIRE_INTERVAL:
for position in random.choices(positions, k=2):
x, y = position.container
__laser = self.entities.matrix[x][y].entity.fire(
Direction.SOUTH.value, True
)
enemy_laser_group.add(__laser)
elif event.type == pygame.KEYDOWN:
if event.key == pygame.K_SPACE:
self.main_player.fire(Direction.NORTH.value)
if event.key == pygame.K_LEFT:
self.main_player.position.x -= 20
if event.key == pygame.K_RIGHT:
self.main_player.position.x += 20
will_move = True
elif event.type != pygame.KEYDOWN:
will_move = False
if pygame.sprite.groupcollide(
self.main_player.lasers, enemy_group, True, True
):
self.score += 20
if pygame.sprite.groupcollide(
enemy_laser_group, player_group, False, False
):
print("hit the player!")
self.lives -= 1
self._display_surface.fill(self.background_color)
enemy_group.draw(self._display_surface)
self.main_player.draw()
self.main_player.lasers.draw(self._display_surface)
enemy_laser_group.draw(self._display_surface)
enemy_laser_group.update()
if not enemy_group:
draw_loop = False
self.write_text(
f"Score: {self.score}",
self.score_label_position,
pygame.font.SysFont(None, 30),
)
self.write_text(
f"Lives: {self.lives}",
self.lives_label_position,
pygame.font.SysFont(None, 30),
)
self.main_player.update(1)
pygame.display.flip()
self.fps_meter.tick(60)
| 32.025316 | 86 | 0.594862 | 588 | 5,060 | 4.945578 | 0.30102 | 0.02751 | 0.048143 | 0.018569 | 0.235901 | 0.182256 | 0.120358 | 0.094911 | 0.033012 | 0.033012 | 0 | 0.019682 | 0.317194 | 5,060 | 157 | 87 | 32.229299 | 0.821997 | 0.126285 | 0 | 0.097087 | 0 | 0 | 0.017155 | 0 | 0 | 0 | 0 | 0.012739 | 0 | 1 | 0.029126 | false | 0 | 0.15534 | 0 | 0.203884 | 0.019417 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9ae33df6172e3d387be468447aa95067143972f3 | 4,477 | py | Python | src/apps/tractatusapp/views_spacetree.py | lambdamusic/wittgensteiniana | f9b37282dcf4b93f9a6218cc827a6ab7386a3dd4 | [
"MIT"
] | 1 | 2018-04-24T09:55:40.000Z | 2018-04-24T09:55:40.000Z | src/apps/tractatusapp/views_spacetree.py | lambdamusic/wittgensteiniana | f9b37282dcf4b93f9a6218cc827a6ab7386a3dd4 | [
"MIT"
] | null | null | null | src/apps/tractatusapp/views_spacetree.py | lambdamusic/wittgensteiniana | f9b37282dcf4b93f9a6218cc827a6ab7386a3dd4 | [
"MIT"
] | 1 | 2020-11-25T08:53:49.000Z | 2020-11-25T08:53:49.000Z | """
Using
http://thejit.org/static/v20/Docs/files/Options/Options-Canvas-js.html#Options.Canvas
"""
from django.http import HttpResponse, Http404, HttpResponseRedirect
from django.urls import reverse
from django.shortcuts import render, redirect, get_object_or_404
import json
import os
import json
from libs.myutils.myutils import printDebug
from tractatusapp.models import *
def spacetree(request):
"""
Visualizes a space tree - ORIGINAL VIEW (USED TO GENERATE HTML VERSION)
"""
# DEFAULT JSON FOR TESTING THE APP
to_json = {
'id': "190_0",
'name': "Pearl Jam",
'children': [
{
'id': "306208_1",
'name': "Pearl Jam & Cypress Hill",
'data': {
'relation': "<h4>Pearl Jam & Cypress Hill</h4><b>Connections:</b><ul><h3>Pearl Jam <div>(relation: collaboration)</div></h3><h3>Cypress Hill <div>(relation: collaboration)</div></h3></ul>"
},},
{ 'id': "191_0",
'name': "Pink Floyd",
'children': [{
'id': "306209_1",
'name': "Guns and Roses",
'data': {
'relation': "<h4>Pearl Jam & Cypress Hill</h4><b>Connections:</b><ul><h3>Pearl Jam <div>(relation: collaboration)</div></h3><h3>Cypress Hill <div>(relation: collaboration)</div></h3></ul>"
},
}],
}]}
# reconstruct the tree as a nested dictionary
TESTING = False
def nav_tree(el):
d = {}
d['id'] = el.name
d['name'] = el.name
full_ogden = generate_text(el)
preview_ogden = "%s .." % ' '.join(el.textOgden().split()[:10]).replace("div", "span")
d['data'] = {'preview_ogden' : preview_ogden, 'full_ogden' : full_ogden}
if el.get_children() and not TESTING:
d['children'] = [nav_tree(x) for x in el.get_children()]
else:
d['children'] = []
return d
treeroot = {'id': "root", 'name': "TLP", 'children': [],
'data': {'preview_ogden' : "root node", 'full_ogden' : generate_text("root")}}
# level0 = TextUnit.tree.root_nodes()
# TODO - make this a mptt tree function
level0 = TextUnit.tree_top()
for x in level0:
treeroot['children'] += [nav_tree(x)]
context = {
'json': json.dumps(treeroot),
'experiment_description': """
The Space Tree Tractatus is an experimental visualization of the <br />
<a target='_blank' href="http://en.wikipedia.org/wiki/Tractatus_Logico-Philosophicus">Tractatus Logico-Philosophicus</a>, a philosophical text by Ludwig Wittgenstein.
<br /><br />
<b>Click</b> on a node to move the tree and center that node. The text contents of the node are displayed at the bottom of the page. <b>Use the mouse wheel</b> to zoom and <b>drag and drop the canvas</b> to pan.
<br /><br />
<small>Made with <a target='_blank' href="http://www.python.org/">Python</a> and the <a target='_blank' href="http://thejit.org/">JavaScript InfoVis Toolkit</a>. More info on this <a href="http://www.michelepasin.org/blog/2012/07/08/wittgenstein-and-the-javascript-infovis-toolkit/">blog post</a></small>
"""
}
return render(request,
'tractatusapp/spacetree/spacetree.html',
context)
def generate_text(instance, expression="ogden"):
""" creates the html needed for the full text representation of the tractatus
includes the number-title, and small links to next and prev satz
# TODO: add cases for different expressions
"""
if instance == "root":
return """<div class='tnum'>Tractatus Logico-Philosophicus<span class='smalllinks'></small></div>
<div>Ludwig Wittgenstein, 1921.<br />
Translated from the German by C.K. Ogden in 1922<br />
Original title: Logisch-Philosophische Abhandlung, Wilhelm Ostwald (ed.), Annalen der Naturphilosophie, 14 (1921)</div>
"""
else:
next, prev = "", ""
next_satz = instance.tractatus_next()
prev_satz = instance.tractatus_prev()
if next_satz:
next = "<a title='Next Sentence' href='javascript:focus_node(%s);'>→ %s</a>" % (next_satz.name, next_satz.name)
if prev_satz:
prev = "<a title='Previous Sentence' href='javascript:focus_node(%s);'>%s ←</a>" % (prev_satz.name, prev_satz.name)
# HACK src images rendered via JS in the template cause WGET errors
# hence they are hidden away in this visualization
# TODO find a more elegant solution
text_js_ready = instance.textOgden().replace('src="', '-src=\"src image omitted ')
t = "<div class='tnum'><span class='smalllinks'>%s</span>%s<span class='smalllinks'>%s</span></div>%s" % (prev, instance.name, next, text_js_ready)
return t
| 33.916667 | 309 | 0.663837 | 626 | 4,477 | 4.672524 | 0.375399 | 0.01641 | 0.032821 | 0.036923 | 0.159316 | 0.114872 | 0.092991 | 0.092991 | 0.092991 | 0.092991 | 0 | 0.018654 | 0.173777 | 4,477 | 131 | 310 | 34.175573 | 0.772101 | 0.146973 | 0 | 0.151899 | 0 | 0.113924 | 0.56555 | 0.144289 | 0 | 0 | 0 | 0.022901 | 0 | 1 | 0.037975 | false | 0 | 0.101266 | 0 | 0.189873 | 0.012658 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9aea27159d7833c105fb4af0a9c01c188110c93d | 2,693 | py | Python | polymorphic/tests/test_utils.py | likeanaxon/django-polymorphic | ad4e6e90c82f897300c1c135bd7a95e4b2d802a3 | [
"BSD-3-Clause"
] | 1 | 2021-03-12T17:42:37.000Z | 2021-03-12T17:42:37.000Z | polymorphic/tests/test_utils.py | likeanaxon/django-polymorphic | ad4e6e90c82f897300c1c135bd7a95e4b2d802a3 | [
"BSD-3-Clause"
] | 10 | 2020-02-12T01:46:41.000Z | 2022-02-10T09:00:03.000Z | polymorphic/tests/test_utils.py | likeanaxon/django-polymorphic | ad4e6e90c82f897300c1c135bd7a95e4b2d802a3 | [
"BSD-3-Clause"
] | 1 | 2020-04-18T15:14:47.000Z | 2020-04-18T15:14:47.000Z | from django.test import TransactionTestCase
from polymorphic.models import PolymorphicModel, PolymorphicTypeUndefined
from polymorphic.tests.models import (
Enhance_Base,
Enhance_Inherit,
Model2A,
Model2B,
Model2C,
Model2D,
)
from polymorphic.utils import (
get_base_polymorphic_model,
reset_polymorphic_ctype,
sort_by_subclass,
)
class UtilsTests(TransactionTestCase):
def test_sort_by_subclass(self):
self.assertEqual(
sort_by_subclass(Model2D, Model2B, Model2D, Model2A, Model2C),
[Model2A, Model2B, Model2C, Model2D, Model2D],
)
def test_reset_polymorphic_ctype(self):
"""
Test the the polymorphic_ctype_id can be restored.
"""
Model2A.objects.create(field1="A1")
Model2D.objects.create(field1="A1", field2="B2", field3="C3", field4="D4")
Model2B.objects.create(field1="A1", field2="B2")
Model2B.objects.create(field1="A1", field2="B2")
Model2A.objects.all().update(polymorphic_ctype_id=None)
with self.assertRaises(PolymorphicTypeUndefined):
list(Model2A.objects.all())
reset_polymorphic_ctype(Model2D, Model2B, Model2D, Model2A, Model2C)
self.assertQuerysetEqual(
Model2A.objects.order_by("pk"),
[Model2A, Model2D, Model2B, Model2B],
transform=lambda o: o.__class__,
)
def test_get_base_polymorphic_model(self):
"""
Test that finding the base polymorphic model works.
"""
# Finds the base from every level (including lowest)
self.assertIs(get_base_polymorphic_model(Model2D), Model2A)
self.assertIs(get_base_polymorphic_model(Model2C), Model2A)
self.assertIs(get_base_polymorphic_model(Model2B), Model2A)
self.assertIs(get_base_polymorphic_model(Model2A), Model2A)
# Properly handles multiple inheritance
self.assertIs(get_base_polymorphic_model(Enhance_Inherit), Enhance_Base)
# Ignores PolymorphicModel itself.
self.assertIs(get_base_polymorphic_model(PolymorphicModel), None)
def test_get_base_polymorphic_model_skip_abstract(self):
"""
Skipping abstract models that can't be used for querying.
"""
class A(PolymorphicModel):
class Meta:
abstract = True
class B(A):
pass
class C(B):
pass
self.assertIs(get_base_polymorphic_model(A), None)
self.assertIs(get_base_polymorphic_model(B), B)
self.assertIs(get_base_polymorphic_model(C), B)
self.assertIs(get_base_polymorphic_model(C, allow_abstract=True), A)
| 32.445783 | 82 | 0.671742 | 296 | 2,693 | 5.871622 | 0.290541 | 0.120829 | 0.161105 | 0.172037 | 0.348677 | 0.3084 | 0.156502 | 0.042578 | 0 | 0 | 0 | 0.027277 | 0.237653 | 2,693 | 82 | 83 | 32.841463 | 0.819289 | 0.105087 | 0 | 0.074074 | 0 | 0 | 0.008554 | 0 | 0 | 0 | 0 | 0 | 0.240741 | 1 | 0.074074 | false | 0.037037 | 0.074074 | 0 | 0.240741 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
9af8e51dd66ea49555fb4a24794f6c9c1dc7752a | 885 | py | Python | apps/user/serializers.py | major-hub/soil_app | ddd250161ad496afd4c8484f79500ff2657b51df | [
"MIT"
] | null | null | null | apps/user/serializers.py | major-hub/soil_app | ddd250161ad496afd4c8484f79500ff2657b51df | [
"MIT"
] | null | null | null | apps/user/serializers.py | major-hub/soil_app | ddd250161ad496afd4c8484f79500ff2657b51df | [
"MIT"
] | null | null | null | from rest_framework import serializers
from user.models import User
from main.exceptions.user_exceptions import UserException
user_exception = UserException
class UserRegisterSerializer(serializers.ModelSerializer):
password_confirmation = serializers.CharField(max_length=128)
class Meta:
model = User
fields = ['email', 'phone_number', 'first_name', 'last_name', 'password', 'password_confirmation']
def validate(self, attrs):
password_confirmation = attrs.pop('password_confirmation')
if password_confirmation != attrs.get('password'):
raise serializers.ValidationError({'non_field_errors': user_exception("NOT_MATCHED_PASSWORDS").message})
return attrs
class UserLoginSerializer(serializers.Serializer):
email = serializers.EmailField(max_length=255)
password = serializers.CharField(max_length=128)
| 32.777778 | 116 | 0.754802 | 92 | 885 | 7.054348 | 0.543478 | 0.154083 | 0.070878 | 0.089368 | 0.098613 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012032 | 0.154802 | 885 | 26 | 117 | 34.038462 | 0.855615 | 0 | 0 | 0 | 0 | 0 | 0.14819 | 0.071267 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0.352941 | 0.176471 | 0 | 0.647059 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 1 |
9aff6921a655770822f92c25247b7dfa80a21333 | 2,521 | py | Python | src/Coord_cmd.py | aembillo/MNWellRecordGui | 1683bdde75ff37a17726ce1cd7ba0135988f2992 | [
"BSD-3-Clause"
] | null | null | null | src/Coord_cmd.py | aembillo/MNWellRecordGui | 1683bdde75ff37a17726ce1cd7ba0135988f2992 | [
"BSD-3-Clause"
] | null | null | null | src/Coord_cmd.py | aembillo/MNWellRecordGui | 1683bdde75ff37a17726ce1cd7ba0135988f2992 | [
"BSD-3-Clause"
] | null | null | null | """ 2015-07-23
Perform coordinate conversions from the command line.
Uses
"""
import argparse
import pyperclip
# p1 = argparse.ArgumentParser()
# p1.add_argument('x')
# print p1.parse_args(['123'])
#
# p2 = argparse.ArgumentParser()
# p2.add_argument('-d', action='store_const',const='dak')
# print p2.parse_args(['-d'])
#
# p3 = argparse.ArgumentParser()
# p3.add_argument('-d', action='store_const',const='dak')
# p3.add_argument('x')
# p3.add_argument('y')
# print p3.parse_args(['-d','1','2'])
#p1.add_argument(
from Coordinate_Transform import DCcoordinate_projector
# #
# # parser = argparse.ArgumentParser()
# # parser.add_argument("coord_1")
# # parser.add_argument("coord_2")
# # args = parser.parse_args()
# # x,y = args.coord_1, args.coord_2
#
def coord_convert():
parser = argparse.ArgumentParser()
parser.add_argument('-d','--dak', action='store_const', const='dak', help="return Dakota County coords on clipboard")
parser.add_argument('-u','--utm', action='store_const', const='utm', help="return UTM NAD 83, Zone 15 coords on clipboard")
parser.add_argument('x')
parser.add_argument('y')
args = parser.parse_args()
print 'args=',args
coordtext = '%s,%s'%( args.x, args.y)
Cprojector = DCcoordinate_projector()
cliptext = Cprojector.handle_unspecified_coords(coordtext)
#print outtext
try:
if args.dak:
cliptext = '%4.2f,%4.2f'%(Cprojector.dakx,Cprojector.daky)
#print 'returning dakx,daky to clipboard "%s"'%cliptext
elif args.utm:
cliptext = '%4.2f,%4.2f'%(Cprojector.utmx,Cprojector.utmy)
#print 'returning utmx,utmy to clipboard "%s"'%cliptext
except:
pass
pyperclip.copy(cliptext)
pyperclip.paste()
return cliptext
def test_parse_args():
import sys
sys.argv = ["prog", '-d', "93.0444", "44.5926"]
rv = coord_convert()
print '>>\n'+ str(rv) +'\n================'
sys.argv = ["prog", '--utm', "93.0444", "44.5926"]
rv = coord_convert()
print '>>\n'+ str(rv) +'\n================'
if __name__ == '__main__':
#test_parse_args()
coord_convert()
'''
ERROR coordinates not recognized or not within Dakota County
"570931,1441"
496475.91,4937695.85
Dakota Co: 570931, 144108
Dakota Co: 570931.0, 144108.0
UTM : 496475.91, 4937695.85
D.d : -93.044399765, 44.592598646
D M.m : -93 2.663986, 44 35.555919
D M S.s : -93 2 39.839", 44 35 33.355"''' | 28.647727 | 127 | 0.623165 | 334 | 2,521 | 4.565868 | 0.338323 | 0.086557 | 0.066885 | 0.055082 | 0.247869 | 0.232131 | 0.097049 | 0.097049 | 0.049836 | 0.049836 | 0 | 0.095639 | 0.199524 | 2,521 | 88 | 128 | 28.647727 | 0.660059 | 0.273701 | 0 | 0.117647 | 0 | 0 | 0.185828 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0.029412 | 0.117647 | null | null | 0.088235 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b103007297614b73c2ae8e2e4d5c35bd947a709c | 1,051 | py | Python | wordcount/views.py | chinya07/Django-wordcount | 57808f922a140b341807a5b5352864cec5728695 | [
"MIT"
] | null | null | null | wordcount/views.py | chinya07/Django-wordcount | 57808f922a140b341807a5b5352864cec5728695 | [
"MIT"
] | null | null | null | wordcount/views.py | chinya07/Django-wordcount | 57808f922a140b341807a5b5352864cec5728695 | [
"MIT"
] | null | null | null | from django.http import HttpResponse
from django.shortcuts import render
import operator
def home(request):
return render(request, 'home.html')
def count(request):
fulltext1=request.GET['fulltext1']
fulltext2=request.GET['fulltext2']
wordlist1=fulltext1.split(' ')
wordlist2=fulltext2.split(' ')
from difflib import SequenceMatcher
similarity_ratio = SequenceMatcher(None, wordlist1, wordlist2).ratio()
# count=0
# for word in wordlist1:
# if word in wordlist2:
# count+=1
# worddic = {}
#
# for word in wordlist:
# if word in worddic:
# #increase
# worddic[word] += 1
# else:
# # add to worddic
# worddic[word] = 1
#sortedwords=sorted(worddic.items(), key=operator.itemgetter(1), reverse=True)
return render(request, 'count.html', {'fulltext1':fulltext1, 'fulltext2':fulltext2, 'count':similarity_ratio})
def about(request):
return render(request, 'about.html')
| 30.028571 | 115 | 0.617507 | 110 | 1,051 | 5.881818 | 0.409091 | 0.037094 | 0.088099 | 0.080371 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.027237 | 0.266413 | 1,051 | 34 | 116 | 30.911765 | 0.811933 | 0.298763 | 0 | 0 | 0 | 0 | 0.104956 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.266667 | 0.133333 | 0.666667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 1 |
b10ef155b141d1ff49de7abd5e3a562536e9e728 | 771 | py | Python | tests/Bio/test_tandem.py | iwasakishuto/Keras-Imitation | 8ac0cd7c8912d49d13b19a0182ad534c0781fbfe | [
"MIT"
] | 4 | 2020-04-25T08:50:36.000Z | 2020-04-26T04:49:16.000Z | tests/Bio/test_tandem.py | iwasakishuto/Keras-Imitation | 8ac0cd7c8912d49d13b19a0182ad534c0781fbfe | [
"MIT"
] | null | null | null | tests/Bio/test_tandem.py | iwasakishuto/Keras-Imitation | 8ac0cd7c8912d49d13b19a0182ad534c0781fbfe | [
"MIT"
] | null | null | null | # coding: utf-8
from kerasy.Bio.tandem import find_tandem
from kerasy.utils import generateSeq
len_sequences = 1000
def get_test_data():
sequence = generateSeq(size=len_sequences,
nucleic_acid='DNA',
weights=None,
seed=123)
sequence = "".join(sequence)
return sequence
def test_find_tandem():
sequence = get_test_data()
max_val_sais, tandem_lists_sais = find_tandem(sequence, method="SAIS")
tandem_sais = tandem_lists_sais[0]
max_val_dp, tandem_lists_dp = find_tandem(sequence, method="DP")
tandem_dp = tandem_lists_dp[0]
assert max_val_sais == max_val_dp
assert any([tandem_dp[i:]+tandem_dp[:i] == tandem_sais for i in range(len(tandem_dp))])
| 29.653846 | 91 | 0.660182 | 105 | 771 | 4.52381 | 0.4 | 0.084211 | 0.113684 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017123 | 0.242542 | 771 | 25 | 92 | 30.84 | 0.796233 | 0.016861 | 0 | 0 | 1 | 0 | 0.011905 | 0 | 0 | 0 | 0 | 0 | 0.111111 | 1 | 0.111111 | false | 0 | 0.111111 | 0 | 0.277778 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b10f2700bf5dd4688d783eebd9aacb68abc85ac5 | 679 | py | Python | NEW_PRAC/HackerRank/Python/SetDifferenceString.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 13 | 2021-03-11T00:25:22.000Z | 2022-03-19T00:19:23.000Z | NEW_PRAC/HackerRank/Python/SetDifferenceString.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 160 | 2021-04-26T19:04:15.000Z | 2022-03-26T20:18:37.000Z | NEW_PRAC/HackerRank/Python/SetDifferenceString.py | side-projects-42/INTERVIEW-PREP-COMPLETE | 627a3315cee4bbc38a0e81c256f27f928eac2d63 | [
"MIT"
] | 12 | 2021-04-26T19:43:01.000Z | 2022-01-31T08:36:29.000Z | # >>> s = set("Hacker")
# >>> print s.difference("Rank")
# set(['c', 'r', 'e', 'H'])
# >>> print s.difference(set(['R', 'a', 'n', 'k']))
# set(['c', 'r', 'e', 'H'])
# >>> print s.difference(['R', 'a', 'n', 'k'])
# set(['c', 'r', 'e', 'H'])
# >>> print s.difference(enumerate(['R', 'a', 'n', 'k']))
# set(['a', 'c', 'r', 'e', 'H', 'k'])
# >>> print s.difference({"Rank":1})
# set(['a', 'c', 'e', 'H', 'k', 'r'])
# >>> s - set("Rank")
# set(['H', 'c', 'r', 'e'])
if __name__ == "__main__":
eng = input()
eng_stu = set(map(int, input().split()))
fre = input()
fre_stu = set(map(int, input().split()))
eng_only = eng_stu - fre_stu
print(len(eng_only))
| 24.25 | 57 | 0.443299 | 103 | 679 | 2.786408 | 0.262136 | 0.10453 | 0.278746 | 0.055749 | 0.445993 | 0.421603 | 0.268293 | 0.268293 | 0.188153 | 0.188153 | 0 | 0.001815 | 0.188513 | 679 | 27 | 58 | 25.148148 | 0.519056 | 0.639175 | 0 | 0 | 0 | 0 | 0.034632 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.142857 | 0 | 0 | 0 | null | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b11347dca32d00ada08a415a09ab2e6c4431c76c | 2,354 | py | Python | chaos_genius/celery_config.py | eltociear/chaos_genius | eb3bc27181c8af4144b95e685386814109173164 | [
"MIT"
] | 1 | 2022-02-25T16:11:34.000Z | 2022-02-25T16:11:34.000Z | chaos_genius/celery_config.py | eltociear/chaos_genius | eb3bc27181c8af4144b95e685386814109173164 | [
"MIT"
] | null | null | null | chaos_genius/celery_config.py | eltociear/chaos_genius | eb3bc27181c8af4144b95e685386814109173164 | [
"MIT"
] | null | null | null | from datetime import timedelta
from celery.schedules import crontab, schedule
CELERY_IMPORTS = ("chaos_genius.jobs")
CELERY_TASK_RESULT_EXPIRES = 30
CELERY_TIMEZONE = "UTC"
CELERY_ACCEPT_CONTENT = ["json", "msgpack", "yaml"]
CELERY_TASK_SERIALIZER = "json"
CELERY_RESULT_SERIALIZER = "json"
CELERYBEAT_SCHEDULE = {
"anomaly-scheduler": {
"task": "chaos_genius.jobs.anomaly_tasks.anomaly_scheduler",
"schedule": schedule(timedelta(minutes=10)),
"args": ()
},
'alerts-daily': {
'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
'schedule': crontab(hour="3", minute="0"), # Daily: at 3am
'args': ('daily',)
},
"alert-digest-daily-scheduler": {
"task": "chaos_genius.jobs.alert_tasks.alert_digest_daily_scheduler",
"schedule": schedule(timedelta(minutes=10)),
"args": ()
},
# 'anomaly-task-every-minute': {
# 'task': 'chaos_genius.jobs.anomaly_tasks.add_together',
# 'schedule': crontab(minute="*"), # Every minutes
# 'args': (5,10,)
# },
# "anomaly-tasks-all-kpis": {
# "task": "chaos_genius.jobs.anomaly_tasks.anomaly_kpi",
# # "schedule": crontab(hour=[11]),
# "schedule": schedule(timedelta(minutes=1)), # for testing
# "args": ()
# },
# 'alerts-weekly': {
# 'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
# 'schedule': crontab(day_of_week="0"), # Weekly: every sunday
# 'args': ('weekly',)
# },
# 'alerts-hourly': {
# 'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
# 'schedule': crontab(hour="*"), # Hourly: at 0th minute
# 'args': ('hourly',)
# },
# 'alerts-every-15-minute': {
# 'task': 'chaos_genius.jobs.alert_tasks.check_event_alerts',
# 'schedule': crontab(minute="*/15"), # Every 15 minutes
# 'args': ('every_15_minute',)
# }
}
CELERY_ROUTES = {
"chaos_genius.jobs.anomaly_tasks.*": {"queue": "anomaly-rca"},
"chaos_genius.jobs.alert_tasks.*": {"queue": "alerts"},
}
# Scheduler runs every hour
# looks at tasks in last n hour
# if they are in processing in 24 hours, schedule them right away
# job expiry window
# add details of job into a table, then schedule it
# TODO: Use this for config
class Config:
enable_utc = True
| 32.694444 | 77 | 0.619371 | 270 | 2,354 | 5.203704 | 0.351852 | 0.086121 | 0.117438 | 0.108185 | 0.398577 | 0.340214 | 0.297509 | 0.176512 | 0.176512 | 0.176512 | 0 | 0.014602 | 0.214528 | 2,354 | 71 | 78 | 33.15493 | 0.745268 | 0.477485 | 0 | 0.129032 | 0 | 0 | 0.336975 | 0.207563 | 0 | 0 | 0 | 0.014085 | 0 | 1 | 0 | false | 0 | 0.096774 | 0 | 0.16129 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b122b1664a2960a396de4fbb595bf3821559d96f | 563 | py | Python | orderedtable/urls.py | Shivam2k16/DjangoOrderedTable | da133a23a6659ce5467b8161edcf6db35f1c0b76 | [
"MIT"
] | 2 | 2018-04-15T17:03:59.000Z | 2019-03-23T04:45:00.000Z | orderedtable/urls.py | Shivam2k16/DjangoOrderedTable | da133a23a6659ce5467b8161edcf6db35f1c0b76 | [
"MIT"
] | null | null | null | orderedtable/urls.py | Shivam2k16/DjangoOrderedTable | da133a23a6659ce5467b8161edcf6db35f1c0b76 | [
"MIT"
] | 1 | 2018-04-15T16:54:07.000Z | 2018-04-15T16:54:07.000Z | from django.conf.urls import include, url
from django.contrib import admin
import orderedtable
from orderedtable import views
app_name="orderedtable"
urlpatterns = [
url(r'^$', views.home,name="home"),
url(r'^import-json/$', views.import_json,name="import_json"),
url(r'^project-list/$', views.project_list,name="project_list"),
url(r'^empty-list/$', views.delete_table,name="delete_table"),
url(r'^multiple-sorting/$', views.multiple_sorting,name="multiple_sorting"),
url(r'^sort-by = (?P<pk>[\w-]+)/$', views.sorted,name="sorted"),
]
| 33.117647 | 80 | 0.698046 | 79 | 563 | 4.860759 | 0.379747 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.108348 | 563 | 16 | 81 | 35.1875 | 0.76494 | 0 | 0 | 0 | 0 | 0 | 0.28952 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.384615 | 0 | 0.384615 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
b123669e9c0103e63c00a8b4dcdbc0e0596f1442 | 2,242 | py | Python | call_google_translate.py | dadap/klingon-assistant-data | 5371f8ae6e3669f48a83087a4937af0dee8d23d1 | [
"Apache-2.0"
] | null | null | null | call_google_translate.py | dadap/klingon-assistant-data | 5371f8ae6e3669f48a83087a4937af0dee8d23d1 | [
"Apache-2.0"
] | 5 | 2018-07-11T09:17:19.000Z | 2018-10-14T10:33:51.000Z | call_google_translate.py | dadap/klingon-assistant-data | 5371f8ae6e3669f48a83087a4937af0dee8d23d1 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python3
# Calls Google Translate to produce translations.
# To use, set "language" and "dest_language" below. (They are normally the same,
# unless Google uses a different language code than we do.) Then fill in
# the definition_[language] fields with "TRANSLATE" or
# "TRANSLATE: [replacement definition]". The latter is to allow for a better
# translation when the original definition is ambiguous, e.g., if the definition
# is "launcher", a better translation might result from
# "TRANSLATE: rocket launcher".
from googletrans import Translator
import fileinput
import re
import time
# TODO: Refactor this and also use in renumber.py.
# Ignore mem-00-header.xml and mem-28-footer.xml because they don't contain entries.
filenames = ['mem-01-b.xml', 'mem-02-ch.xml', 'mem-03-D.xml', 'mem-04-gh.xml', 'mem-05-H.xml', 'mem-06-j.xml', 'mem-07-l.xml', 'mem-08-m.xml', 'mem-09-n.xml', 'mem-10-ng.xml', 'mem-11-p.xml', 'mem-12-q.xml', 'mem-13-Q.xml', 'mem-14-r.xml', 'mem-15-S.xml', 'mem-16-t.xml', 'mem-17-tlh.xml', 'mem-18-v.xml', 'mem-19-w.xml', 'mem-20-y.xml', 'mem-21-a.xml', 'mem-22-e.xml', 'mem-23-I.xml', 'mem-24-o.xml', 'mem-25-u.xml', 'mem-26-suffixes.xml', 'mem-27-extra.xml']
translator = Translator()
language = "zh-HK"
dest_language = "zh-TW"
limit = 250
for filename in filenames:
with fileinput.FileInput(filename, inplace=True) as file:
definition = ""
for line in file:
definition_match = re.search(r"definition\">?(.+)<", line)
definition_translation_match = re.search(r"definition_(.+)\">TRANSLATE(?:: (.*))?<", line)
if (definition_match):
definition = definition_match.group(1)
if (limit > 0 and \
definition != "" and \
definition_translation_match and \
language.replace('-','_') == definition_translation_match.group(1)):
if definition_translation_match.group(2):
definition = definition_translation_match.group(2)
translation = translator.translate(definition, src='en', dest=dest_language)
line = re.sub(r">(.*)<", ">%s [AUTOTRANSLATED]<" % translation.text, line)
# Rate-limit calls to Google Translate.
limit = limit - 1
time.sleep(0.1)
print(line, end='')
| 44.84 | 460 | 0.666369 | 336 | 2,242 | 4.389881 | 0.464286 | 0.105763 | 0.088136 | 0.063051 | 0.075932 | 0 | 0 | 0 | 0 | 0 | 0 | 0.037473 | 0.166815 | 2,242 | 49 | 461 | 45.755102 | 0.752141 | 0.303301 | 0 | 0 | 0 | 0 | 0.263226 | 0 | 0 | 0 | 0 | 0.020408 | 0 | 1 | 0 | false | 0 | 0.142857 | 0 | 0.142857 | 0.035714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b129413908fca02566b29b673b606e60be14141b | 7,824 | py | Python | icetray_version/trunk/resources/scripts/make_plots.py | hershalpandya/airshowerclassification_llhratio_test | a2a2ce5234c8f455fe56c332ab4fcc65008e9409 | [
"MIT"
] | null | null | null | icetray_version/trunk/resources/scripts/make_plots.py | hershalpandya/airshowerclassification_llhratio_test | a2a2ce5234c8f455fe56c332ab4fcc65008e9409 | [
"MIT"
] | null | null | null | icetray_version/trunk/resources/scripts/make_plots.py | hershalpandya/airshowerclassification_llhratio_test | a2a2ce5234c8f455fe56c332ab4fcc65008e9409 | [
"MIT"
] | null | null | null |
# coding: utf-8
# In[1]:
import numpy as np
get_ipython().magic(u'matplotlib inline')
from matplotlib import pyplot as plt
from matplotlib.colors import LogNorm
import sys
sys.path.append('../../python/')
from general_functions import load_5D_PDF_from_file
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
import tables
import glob
def plot_2d_hist(hist,xedges,yedges,
xlim,ylim,
xlabel='',ylabel='',title='',cmap='coolwarm',
vmin=1e-5,vmax=1e-1,same_plot=False,alpha=1.0):
hist=hist.T
hist=np.ma.masked_where(hist==0,hist)
#label='nentries: %i'%np.sum(hist)
if not same_plot:
plt.figure()#dpi=320)
plt.pcolormesh(xedges,yedges,hist,alpha=alpha,
cmap=cmap,norm=LogNorm(vmin=vmin,vmax=vmax))
#cbar=plt.colorbar()
#plt.scatter([2.0],[2],color=None,s=0,label=label)
#plt.legend()
plt.xlim(xlim)
plt.ylim(ylim)
#plt.xlabel(xlabel)
#plt.ylabel(ylabel)
#plt.title(title)
return plt
def plot_3dhist(bkg_hist,bincenters,azim,elev,outputname,vmin,vmax):
Q,T,R = np.meshgrid(bincenters[1],bincenters[0],bincenters[2])
c= bkg_hist/np.sum(bkg_hist)
Q=Q.T
T=T.T
R=R.T
c=c.T
#print np.shape(Q.T), np.shape(T.T), np.shape(R.T), np.shape(bkg_hist.T)
reshape_ = np.prod(np.shape(Q))
Q = Q.reshape(reshape_)
T = T.reshape(reshape_)
R = R.reshape(reshape_)
c= c.reshape(reshape_)
select=(c!=0)#&(np.random.rand(len(c))>0.5)
Q=Q[select]
T=T[select]
R=R[select]
c=np.log10(c[select])
alpha=np.ones_like(c)
alpha[c<-2]=0.70
alpha[c<-3]=0.60
alpha[c<-4]=0.50
alpha[c<-5]=0.40
alpha[c<-6]=0.30
norm = mpl.colors.Normalize(vmin=vmin, vmax=vmax)
cmap = cm.jet
m = cm.ScalarMappable(norm=norm, cmap=cmap)
c= m.to_rgba(c)
c.T[3]=alpha
fig=plt.figure(figsize=(8,8))
ax = fig.add_subplot(111, projection='3d')
ax.scatter(R,T,Q,zdir='Q',c=c,s=30,edgecolors=c)
ax.azim = azim
ax.elev = elev
ax.set_xlabel('R')
ax.set_ylabel('Q')
ax.set_zlabel('T')
ax.set_xlim([0,3.5])
ax.set_ylim([-3.2,4])
ax.set_zlim([-5.2,4.2])
#fig.colorbar(myax)
fig.savefig(outputname,bbox_inches='tight')
plt.close()
return
def hist_2d_proj(hist3d,axis=0):
if axis==0:
axes=[0,1,2]
if axis==1:
axes=[1,0,2]
if axis==2:
axes=[2,0,1]
hist3d=np.transpose(hist3d,axes=axes)
proj_hist=np.zeros_like(hist3d[0])
print np.shape(proj_hist)
for i in range(len(hist3d)):
proj_hist += hist3d[i]
return proj_hist
def hist_1d_proj(hist2d,axis=0):
if axis==0:
axes=[0,1]
if axis==1:
axes=[1,0]
hist2d=np.transpose(hist2d,axes=axes)
proj_hist=np.zeros_like(hist2d[0])
print np.shape(proj_hist)
for i in range(len(hist2d)):
proj_hist += hist2d[i]
return proj_hist
def plot_2D_projected_hist(hist3d,edges,axis=0,
xlabel='',ylabel='',
event_overlay=False, event=None):
projected_hist = hist_2d_proj(hist3d,axis)
if axis==0:
xedges= edges[1]
yedges= edges[2]
if axis==1:
xedges= edges[0]
yedges= edges[2]
if axis==2:
xedges= edges[0]
yedges= edges[1]
xlim = [xedges[0]-0.25,xedges[-1]+0.25]
ylim = [yedges[0]-0.25,yedges[-1]+0.25]
projected_hist /=np.sum(projected_hist)
projected_hist = projected_hist.T
plot_2d_hist(projected_hist,yedges,xedges,ylim,xlim,xlabel,ylabel,cmap='jet')
if event_overlay:
xcenters=(xedges[:-1]+xedges[1:])/2.0
ycenters=(yedges[:-1]+yedges[1:])/2.0
xscatter=[]
yscatter=[]
zscatter=[]
for r,row in enumerate(hist_2d_proj(event,axis)):
for c,element in enumerate(row):
if element!=0:
xscatter.append(xcenters[r])
yscatter.append(ycenters[c])
zscatter.append(element)
xscatter=np.array(xscatter)
yscatter=np.array(yscatter)
zscatter=np.array(zscatter)
plt.scatter(yscatter,xscatter,marker='s',s=10*zscatter,edgecolor='k',facecolor='r',
alpha=0.6)
return
# In[3]:
sig_pdf_file='../../files/PDF_12360_0123x.hd5'
bkg_pdf_file='../../files/PDF_12362_0123x.hd5'
temp=load_5D_PDF_from_file(SigPDFFileName=sig_pdf_file, BkgPDFFileName=bkg_pdf_file)
sig_hist=temp[0]
bkg_hist=temp[1]
binedges=temp[2]
distinct_regions_binedges=temp[3]
labels=temp[4]
sig_n_events=temp[5]
bkg_n_events = temp[6]
# In[4]:
# find the logE and coszen bins select those bins in sig/bkg pdfs
logEbincenters = np.array((binedges[0][1:] + binedges[0][:-1] )/2.)
coszenbincenters = np.array((binedges[1][1:] + binedges[1][:-1] )/2.)
logE=-0.01
dE = np.absolute(logEbincenters - logE)
Ebin=np.where(np.amin(dE)==dE)[0][0]
coszen=0.96
dcZ = np.absolute(coszenbincenters - coszen)
cZbin = np.where(np.amin(dcZ)==dcZ)[0][0]
sig_hist_3dslice = sig_hist[Ebin][cZbin]
bkg_hist_3dslice = bkg_hist[Ebin][cZbin]
binedges_3dslice = binedges[2:]
# In[7]:
plot_2D_projected_hist(sig_hist_3dslice,binedges_3dslice,axis=2)
# In[27]:
sig_hdf_files=glob.glob('../../files/Events_12360_?x.hd5.hd5')
bkg_hdf_files=glob.glob('../../files/Events_12362_?x.hd5.hd5')
# In[30]:
def load_hdf_file(tfiles):
d={}
for tfile in tfiles:
f=tables.open_file(tfile)
for name in f.root.IceTopLLHR.colnames:
if tfile==tfiles[0]:
d[name]= eval('f.root.IceTopLLHR.cols.'+name+'[:]')
else:
d[name]=np.concatenate( (d[name],eval('f.root.IceTopLLHR.cols.'+name+'[:]')) )
if tfile==tfiles[0]:
d['log_s125']=np.log10(f.root.LaputopParams.cols.s125[:])
d['cos_zen']=np.cos(f.root.Laputop.cols.zenith[:])
else:
d['log_s125']=np.concatenate( (d['log_s125'],np.log10(f.root.LaputopParams.cols.s125[:])) )
d['cos_zen']=np.concatenate( (d['cos_zen'], np.cos(f.root.Laputop.cols.zenith[:])) )
return d
# In[31]:
llhr={}
llhr['sig']=load_hdf_file(sig_hdf_files)
llhr['bkg']=load_hdf_file(bkg_hdf_files)
# In[45]:
low_E=1.5
high_E=1.6
low_z=0.8
high_z=.85
for key in llhr.keys():
cut1=llhr[key]['isGood']==1.0
cut2=llhr[key]['tanks_have_nans']==0.
cut3=llhr[key]['log_s125']>=low_E
cut4=llhr[key]['log_s125']<high_E
cut5=llhr[key]['cos_zen']>=low_z
cut6=llhr[key]['cos_zen']<high_z
select=cut1&cut2&cut3&cut4&cut5&cut6
print len(select)
print len(select[select])
hist_this ='llh_ratio'
range=[-10,15]
bins=35
#hist_this='n_extrapolations_bkg_PDF'
#range=[0,20]
#bins=20
plt.hist(llhr[key][hist_this][select],range=range,bins=bins,label=key,histtype='step')
plt.legend()
# In[34]:
llhr['sig'].keys()
# In[2]:
def load_results_hist(tfile):
f=tables.open_file(tfile)
labels=f.root.labels[:]
nevents=f.root.n_events[:]
edges0=f.root.binedges_0[:]
edges1=f.root.binedges_1[:]
edges2=f.root.binedges_2[:]
hist=f.root.hist[:]
f.close()
return hist, [edges0,edges1,edges2], nevents,labels
# In[3]:
sig_hist, edges, sig_nevents, labels = load_results_hist('../../files/results_sig_Ezenllhr.hd5')
bkg_hist, edges, bkg_nevents, labels = load_results_hist('../../files/results_bkg_Ezenllhr.hd5')
# In[4]:
sig_onedhist=hist_2d_proj(sig_hist,axis=1)[3]
bkg_onedhist=hist_2d_proj(bkg_hist,axis=1)[3]
# In[5]:
plt.bar(edges[2][:-1],sig_onedhist,alpha=1.,label='rand')
plt.bar(edges[2][:-1],bkg_onedhist,alpha=0.3,label='data')
plt.yscale('log')
#plt.xlim([-1,1])
plt.legend()
# In[54]:
| 23.709091 | 103 | 0.61797 | 1,237 | 7,824 | 3.767179 | 0.21342 | 0.013949 | 0.01073 | 0.006438 | 0.174678 | 0.116524 | 0.099356 | 0.070601 | 0.049142 | 0.049142 | 0 | 0.052352 | 0.206544 | 7,824 | 329 | 104 | 23.781155 | 0.698293 | 0.069402 | 0 | 0.125604 | 0 | 0 | 0.06074 | 0.034511 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0.043478 | null | null | 0.019324 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b1355b614d3140ba034b33a7f3ee7859a1245971 | 723 | py | Python | flake8_strings/visitor.py | d1618033/flake8-strings | 2ad34a41eab65e2264da7aa91c54dbca701af1c5 | [
"MIT"
] | null | null | null | flake8_strings/visitor.py | d1618033/flake8-strings | 2ad34a41eab65e2264da7aa91c54dbca701af1c5 | [
"MIT"
] | 1 | 2021-02-19T13:50:29.000Z | 2021-02-19T13:50:29.000Z | flake8_strings/visitor.py | d1618033/flake8-strings | 2ad34a41eab65e2264da7aa91c54dbca701af1c5 | [
"MIT"
] | null | null | null | import ast
from typing import List
from flake8_plugin_utils import Visitor
from .errors import UnnecessaryBackslashEscapingError
class StringsVisitor(Visitor):
lines: List[str]
def _is_escaped_char(self, character: str) -> bool:
repr_c = repr(character)
return repr_c[1] == '\\' and repr_c[2] != '\\'
def visit_Str(self, node: ast.Str) -> None: # noqa: N802
if '\\' not in node.s:
return
if node.s[-1] == '\\':
return
if any(self._is_escaped_char(c) for c in node.s):
return
if self.lines[node.lineno - 1][node.col_offset] == 'r':
return
self.error_from_node(UnnecessaryBackslashEscapingError, node)
| 27.807692 | 69 | 0.615491 | 93 | 723 | 4.623656 | 0.473118 | 0.034884 | 0.060465 | 0.060465 | 0.069767 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015123 | 0.268326 | 723 | 25 | 70 | 28.92 | 0.797732 | 0.013831 | 0 | 0.210526 | 0 | 0 | 0.012658 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.210526 | 0 | 0.684211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 1 |
b13b701d2eb809667c24251d55ce1c0bf248bc34 | 1,465 | py | Python | substitute_finder/migrations/0003_product.py | tohugaby/pur_beurre_web | c3bdacee50907eea79821e7a8b3fe0f349719d88 | [
"MIT"
] | 1 | 2020-01-05T18:58:51.000Z | 2020-01-05T18:58:51.000Z | substitute_finder/migrations/0003_product.py | tohugaby/pur_beurre_web | c3bdacee50907eea79821e7a8b3fe0f349719d88 | [
"MIT"
] | 3 | 2020-06-05T18:35:47.000Z | 2021-06-10T20:32:44.000Z | substitute_finder/migrations/0003_product.py | tomlemeuch/pur_beurre_web | c3bdacee50907eea79821e7a8b3fe0f349719d88 | [
"MIT"
] | null | null | null | # Generated by Django 2.1 on 2018-08-14 09:42
from django.conf import settings
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('substitute_finder', '0002_category'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('code', models.CharField(max_length=300, primary_key=True, serialize=False, verbose_name='identifiant')),
('product_name', models.CharField(max_length=300, verbose_name='nom du produit')),
('generic_name', models.CharField(max_length=1000, verbose_name='description')),
('url', models.URLField(max_length=1000, verbose_name='url OpenFoodFacts')),
('stores', models.CharField(max_length=300, verbose_name='vendeur')),
('nutrition_grade_fr', models.CharField(max_length=1, verbose_name='score nutritionnel')),
('last_updated', models.DateTimeField(auto_now=True, verbose_name='dernière mise à jour')),
('categories', models.ManyToManyField(to='substitute_finder.Category', verbose_name='categories')),
('users', models.ManyToManyField(related_name='favorite', to=settings.AUTH_USER_MODEL, verbose_name='utilisateurs')),
],
options={
'verbose_name': 'Produit',
'verbose_name_plural': 'Produits',
},
),
]
| 44.393939 | 133 | 0.624573 | 149 | 1,465 | 5.932886 | 0.536913 | 0.136878 | 0.10181 | 0.135747 | 0.196833 | 0.085973 | 0.085973 | 0 | 0 | 0 | 0 | 0.03252 | 0.244369 | 1,465 | 32 | 134 | 45.78125 | 0.766034 | 0.029352 | 0 | 0 | 1 | 0 | 0.224648 | 0.01831 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.076923 | 0 | 0.192308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b13f03597d9a5e677488aa6621f7a6411da41c2d | 3,223 | py | Python | Estrangement/tests/test_utils.py | kawadia/estrangement | 612542bf4af64f248766ad28c18028ff4b2307b5 | [
"BSD-3-Clause"
] | 7 | 2015-02-17T14:04:25.000Z | 2020-02-16T08:59:00.000Z | tnetwork/DCD/externals/estrangement_master/Estrangement/tests/test_utils.py | Yquetzal/tnetwork | 43fb2f19aeed57a8a9d9af032ee80f1c9f58516d | [
"BSD-2-Clause"
] | 1 | 2019-07-13T16:16:28.000Z | 2019-07-15T09:34:33.000Z | Estrangement/tests/test_utils.py | kawadia/estrangement | 612542bf4af64f248766ad28c18028ff4b2307b5 | [
"BSD-3-Clause"
] | 4 | 2015-02-20T15:29:59.000Z | 2021-03-28T04:12:08.000Z | import networkx as nx
import sys
import os
import nose
sys.path.append(os.getcwd() + "/..")
import utils
class test_utils:
def setUp(self):
self.g0 = nx.Graph()
self.g1 = nx.Graph()
self.g2 = nx.Graph()
self.g3 = nx.Graph()
self.g4 = nx.Graph()
self.g5 = nx.Graph()
self.g7 = nx.Graph()
self.g6 = nx.Graph()
self.g0.add_edges_from([(1,2,{'weight':2}),(1,3,{'weight':1}),(2,4,{'weight':1})])
self.g1.add_edges_from([(1,4,{'weight':1}),(2,3,{'weight':1}),(3,4,{'weight':1})])
self.g2.add_edges_from([(1,2,{'weight':2}),(2,3,{'weight':1}),(3,4,{'weight':1})])
self.g3.add_edges_from([(5,6),(5,7)])
self.g4.add_edges_from([(1,5),(2,3)])
self.g5.add_edges_from([(1,2,{'weight':2}),(1,3,{'weight':1}),(2,4,{'weight':1})])
self.g6.add_edges_from([(1,2,{'weight':1}),(1,3,{'weight':1}),(2,3,{'weight':1})])
self.g7.add_edges_from([(1,2,{'weight':1})])
self.label_dict1 = {1:'a',2:'a',3:'b',4:'b',5:'c',6:'c'}
self.label_dict2 = {1:'a',2:'b',3:'b',4:'b',5:'c',6:'c'}
self.label_dict3 = {1:'a',2:'b',3:'c',4:'d',5:'e',6:'f'}
self.label_dict4 = {1:'a',2:'a',3:'a',4:'a',5:'a',6:'a'}
self.label_dict5 = {1:'b',2:'b',3:'b',4:'b',5:'b',6:'b'}
self.label_dict6 = {1:'a',2:'b',3:'b'}
def test_graph_distance(self):
assert utils.graph_distance(self.g0, self.g1) == 1
assert utils.graph_distance(self.g0, self.g1, False) == 1
assert utils.graph_distance(self.g0, self.g0) == 0
assert utils.graph_distance(self.g0, self.g0) == 0
assert utils.graph_distance(self.g0, self.g2, False) == 0.8
assert utils.graph_distance(self.g0, self.g2, True) == 0.5
def test_node_graph_distance(self):
assert utils.node_graph_distance(self.g0, self.g1) == 0
assert utils.node_graph_distance(self.g0, self.g0) == 0
assert utils.node_graph_distance(self.g0, self.g3) == 1
assert utils.node_graph_distance(self.g0, self.g4) == 0.4
assert utils.node_graph_distance(nx.path_graph(2),nx.path_graph(4)) == 0.5
def test_Estrangement(self):
assert utils.Estrangement(self.g0, self.label_dict4, self.g3) == 0 # no common edge
assert utils.Estrangement(self.g0, self.label_dict3, self.g5) == 1 # all common edge, all diff community
assert utils.Estrangement(self.g0, self.label_dict1, self.g2) == 0.25 # one edge between community
nose.tools.assert_almost_equal(utils.Estrangement(self.g6, self.label_dict6, self.g7),0.3333,4)
print(utils.Estrangement(self.g6, self.label_dict6, self.g7))
def test_match_labels(self):
assert utils.match_labels(self.label_dict1, self.label_dict1) == self.label_dict1 # snapshots are the same
assert utils.match_labels(self.label_dict5, self.label_dict4) == self.label_dict4 # same community, diff label
assert utils.match_labels(self.label_dict4, self.label_dict4) == self.label_dict4 # same community, same label
def test_confidence_interval(self):
assert utils.confidence_interval([2,2,2,2]) == 0
nose.tools.assert_almost_equal(utils.confidence_interval([1,2,3,4]), 1.096,3)
assert utils.confidence_interval([2,2,4,4]) == 0.98
| 48.104478 | 127 | 0.630779 | 550 | 3,223 | 3.56 | 0.147273 | 0.091931 | 0.066394 | 0.097038 | 0.64096 | 0.601124 | 0.456588 | 0.361593 | 0.223698 | 0.116445 | 0 | 0.075612 | 0.162892 | 3,223 | 66 | 128 | 48.833333 | 0.650111 | 0.047782 | 0 | 0.035088 | 0 | 0 | 0.043123 | 0 | 0 | 0 | 0 | 0 | 0.368421 | 0 | null | null | 0 | 0.087719 | null | null | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b14084e431f80764a4ba711f2600b59b246111f5 | 830 | py | Python | ex44e.py | liggettla/python | 4bdad72bc2143679be6d1f8722b83cc359753ca9 | [
"MIT"
] | null | null | null | ex44e.py | liggettla/python | 4bdad72bc2143679be6d1f8722b83cc359753ca9 | [
"MIT"
] | null | null | null | ex44e.py | liggettla/python | 4bdad72bc2143679be6d1f8722b83cc359753ca9 | [
"MIT"
] | null | null | null | #Rather than rely on inplicit inheritance from other classes, classes can just
#call the functions from a class; termed composition
class Other(object):
def override(self):
print "OTHER override()"
def implicit(self):
print "OTHER implicit()"
def altered(self):
print "OTHER altered()"
class Child(object):
def __init__(self):
#Here the Child uses Other() to get its work done
#Rather than just using implicit inheritance
self.other = Other()
def implicit(self):
self.other.implicit()
def override(self):
print "CHILD override()"
def altered(self):
print "CHILD, BEFORE OTHER altered()"
self.other.altered()
print "CHILD, AFTER OTHER altered()"
son = Child()
son.implicit()
son.override()
son.altered()
| 21.842105 | 78 | 0.639759 | 102 | 830 | 5.166667 | 0.372549 | 0.085389 | 0.079696 | 0.075901 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.257831 | 830 | 37 | 79 | 22.432432 | 0.855519 | 0.263855 | 0 | 0.272727 | 0 | 0 | 0.197694 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | null | 0 | 0 | null | null | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b1450ba4c392fda6a05914dd0e6efe6138ef8c05 | 8,049 | py | Python | src/abaqus/Odb/Odb.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | 7 | 2022-01-21T09:15:45.000Z | 2022-02-15T09:31:58.000Z | src/abaqus/Odb/Odb.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | src/abaqus/Odb/Odb.py | Haiiliin/PyAbaqus | f20db6ebea19b73059fe875a53be370253381078 | [
"MIT"
] | null | null | null | from abaqusConstants import *
from .OdbPart import OdbPart
from .OdbStep import OdbStep
from .SectionCategory import SectionCategory
from ..Amplitude.AmplitudeOdb import AmplitudeOdb
from ..BeamSectionProfile.BeamSectionProfileOdb import BeamSectionProfileOdb
from ..Filter.FilterOdb import FilterOdb
from ..Material.MaterialOdb import MaterialOdb
class Odb(AmplitudeOdb,
FilterOdb,
MaterialOdb,
BeamSectionProfileOdb):
"""The Odb object is the in-memory representation of an output database (ODB) file.
Attributes
----------
isReadOnly: Boolean
A Boolean specifying whether the output database was opened with read-only access.
amplitudes: dict[str, Amplitude]
A repository of :py:class:`~abaqus.Amplitude.Amplitude.Amplitude` objects.
filters: dict[str, Filter]
A repository of :py:class:`~abaqus.Filter.Filter.Filter` objects.
rootAssembly: OdbAssembly
An :py:class:`~abaqus.Odb.OdbAssembly.OdbAssembly` object.
jobData: JobData
A :py:class:`~abaqus.Odb.JobData.JobData` object.
parts: dict[str, OdbPart]
A repository of :py:class:`~abaqus.Odb.OdbPart.OdbPart` objects.
materials: dict[str, Material]
A repository of :py:class:`~abaqus.Material.Material.Material` objects.
steps: dict[str, OdbStep]
A repository of :py:class:`~abaqus.Odb.OdbStep.OdbStep` objects.
sections: dict[str, Section]
A repository of :py:class:`~abaqus.Section.Section.Section` objects.
sectionCategories: dict[str, SectionCategory]
A repository of :py:class:`~abaqus.Odb.SectionCategory.SectionCategory` objects.
sectorDefinition: SectorDefinition
A :py:class:`~abaqus.Odb.SectorDefinition.SectorDefinition` object.
userData: UserData
A :py:class:`~abaqus.Odb.UserData.UserData` object.
customData: RepositorySupport
A :py:class:`~abaqus.CustomKernel.RepositorySupport.RepositorySupport` object.
profiles: dict[str, Profile]
A repository of :py:class:`~abaqus.BeamSectionProfile.Profile.Profile` objects.
Notes
-----
This object can be accessed by:
.. code-block:: python
import odbAccess
session.odbs[name]
"""
def Part(self, name: str, embeddedSpace: SymbolicConstant, type: SymbolicConstant) -> OdbPart:
"""This method creates an OdbPart object. Nodes and elements are added to this object at a
later stage.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[name].Part
Parameters
----------
name
A String specifying the part name.
embeddedSpace
A SymbolicConstant specifying the dimensionality of the Part object. Possible values are
THREE_D, TWO_D_PLANAR, and AXISYMMETRIC.
type
A SymbolicConstant specifying the type of the Part object. Possible values are
DEFORMABLE_BODY and ANALYTIC_RIGID_SURFACE.
Returns
-------
An OdbPart object.
"""
self.parts[name] = odbPart = OdbPart(name, embeddedSpace, type)
return odbPart
def Step(self, name: str, description: str, domain: SymbolicConstant, timePeriod: float = 0,
previousStepName: str = '', procedure: str = '', totalTime: float = None) -> OdbStep:
"""This method creates an OdbStep object.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[name].Step
Parameters
----------
name
A String specifying the repository key.
description
A String specifying the step description.
domain
A SymbolicConstant specifying the domain of the step. Possible values are TIME,
FREQUENCY, ARC_LENGTH, and MODAL.The type of OdbFrame object that can be created for
this step is based on the value of the *domain* argument.
timePeriod
A Float specifying the time period of the step. *timePeriod* is required if
*domain*=TIME; otherwise, this argument is not applicable. The default value is 0.0.
previousStepName
A String specifying the preceding step. If *previousStepName* is the empty string, the
last step in the repository is used. If *previousStepName* is not the last step, this
will result in a change to the *previousStepName* member of the step that was in that
position. A special value 'Initial' refers to the internal initial model step and may be
used exclusively for inserting a new step at the first position before any other
existing steps. The default value is an empty string.
procedure
A String specifying the step procedure. The default value is an empty string. The
following is the list of valid procedures:
```
*ANNEAL
*BUCKLE
*COMPLEX FREQUENCY
*COUPLED TEMPERATURE-DISPLACEMENT
*COUPLED TEMPERATURE-DISPLACEMENT, CETOL
*COUPLED TEMPERATURE-DISPLACEMENT, STEADY STATE
*COUPLED THERMAL-ELECTRICAL, STEADY STATE
*COUPLED THERMAL-ELECTRICAL
*COUPLED THERMAL-ELECTRICAL, DELTMX
*DYNAMIC
*DYNAMIC, DIRECT
*DYNAMIC, EXPLICIT
*DYNAMIC, SUBSPACE
*DYNAMIC TEMPERATURE-DISPLACEMENT, EXPLICT
*ELECTROMAGNETIC, HIGH FREQUENCY, TIME HARMONIC
*ELECTROMAGNETIC, LOW FREQUENCY, TIME DOMAIN
*ELECTROMAGNETIC, LOW FREQUENCY, TIME DOMAIN, DIRECT
*ELECTROMAGNETIC, LOW FREQUENCY, TIME HARMONIC
*FREQUENCY
*GEOSTATIC
*HEAT TRANSFER
*HEAT TRANSFER, DELTAMX=__
*HEAT TRANSFER, STEADY STATE
*MAGNETOSTATIC
*MAGNETOSTATIC, DIRECT
*MASS DIFFUSION
*MASS DIFFUSION, DCMAX=
*MASS DIFFUSION, STEADY STATE
*MODAL DYNAMIC
*RANDOM RESPONSE
*RESPONSE SPECTRUM
*SOILS
*SOILS, CETOL/UTOL
*SOILS, CONSOLIDATION
*SOILS, CONSOLIDATION, CETOL/UTOL
*STATIC
*STATIC, DIRECT
*STATIC, RIKS
*STEADY STATE DYNAMICS
*STEADY STATE TRANSPORT
*STEADY STATE TRANSPORT, DIRECT
*STEP PERTURBATION, *STATIC
*SUBSTRUCTURE GENERATE
*USA ADDDED MASS GENERATION
*VISCO
```
totalTime
A Float specifying the analysis time spend in all the steps previous to this step. The
default value is −1.0.
Returns
-------
An OdbStep object.
Raises
------
- If *previousStepName* is invalid:
ValueError: previousStepName is invalid
"""
self.steps[name] = odbStep = OdbStep(name, description, domain, timePeriod, previousStepName, procedure,
totalTime)
return odbStep
def SectionCategory(self, name: str, description: str) -> SectionCategory:
"""This method creates a SectionCategory object.
Notes
-----
This function can be accessed by:
.. code-block:: python
session.odbs[*name*].SectionCategory
Parameters
----------
name
A String specifying the name of the category.
description
A String specifying the description of the category.
Returns
-------
A SectionCategory object.
"""
self.sectionCategories[name] = sectionCategory = SectionCategory(name, description)
return sectionCategory
| 37.966981 | 112 | 0.610262 | 812 | 8,049 | 6.039409 | 0.277094 | 0.018556 | 0.034462 | 0.02447 | 0.205139 | 0.136827 | 0.089519 | 0.040375 | 0.040375 | 0.040375 | 0 | 0.00091 | 0.317431 | 8,049 | 211 | 113 | 38.146919 | 0.891518 | 0.691266 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.130435 | false | 0 | 0.347826 | 0 | 0.652174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 1 |
b1483e23d7d2752b7248ed2d54d8ac8e55492604 | 241 | py | Python | popcorn_gallery/tutorials/urls.py | Koenkk/popcorn_maker | 0978b9f98dacd4e8eb753404b24eb584f410aa11 | [
"BSD-3-Clause"
] | 15 | 2015-03-23T02:55:20.000Z | 2021-01-12T12:42:30.000Z | popcorn_gallery/tutorials/urls.py | Koenkk/popcorn_maker | 0978b9f98dacd4e8eb753404b24eb584f410aa11 | [
"BSD-3-Clause"
] | null | null | null | popcorn_gallery/tutorials/urls.py | Koenkk/popcorn_maker | 0978b9f98dacd4e8eb753404b24eb584f410aa11 | [
"BSD-3-Clause"
] | 16 | 2015-02-18T21:43:31.000Z | 2021-11-09T22:50:03.000Z | from django.conf.urls.defaults import patterns, url
urlpatterns = patterns(
'popcorn_gallery.tutorials.views',
url(r'^(?P<slug>[\w-]+)/$', 'object_detail', name='object_detail'),
url(r'^$', 'object_list', name='object_list'),
)
| 30.125 | 71 | 0.66805 | 31 | 241 | 5.032258 | 0.677419 | 0.051282 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.120332 | 241 | 7 | 72 | 34.428571 | 0.735849 | 0 | 0 | 0 | 0 | 0 | 0.414938 | 0.128631 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
b15750ce5aef5b54cce96688ad262cadc96dc7f8 | 4,432 | py | Python | src/taskmaster/client.py | alex/taskmaster | 04a03bf0853facf318ce98192db6389cdaaefe3c | [
"Apache-2.0"
] | 2 | 2015-11-08T12:45:38.000Z | 2017-06-03T09:16:16.000Z | src/taskmaster/client.py | alex/taskmaster | 04a03bf0853facf318ce98192db6389cdaaefe3c | [
"Apache-2.0"
] | null | null | null | src/taskmaster/client.py | alex/taskmaster | 04a03bf0853facf318ce98192db6389cdaaefe3c | [
"Apache-2.0"
] | null | null | null | """
taskmaster.consumer
~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010 DISQUS.
:license: Apache License 2.0, see LICENSE for more details.
"""
import cPickle as pickle
import gevent
from gevent_zeromq import zmq
from gevent.queue import Queue
from taskmaster.util import import_target
class Worker(object):
def __init__(self, consumer, target):
self.consumer = consumer
self.target = target
def run(self):
self.started = True
while self.started:
gevent.sleep(0)
try:
job_id, job = self.consumer.get_job()
self.target(job)
except KeyboardInterrupt:
return
finally:
self.consumer.task_done()
class Client(object):
def __init__(self, address, timeout=2500, retries=3):
self.address = address
self.timeout = timeout
self.retries = retries
self.context = zmq.Context(1)
self.poller = zmq.Poller()
self.client = None
def reconnect(self):
if self.client:
self.poller.unregister(self.client)
self.client.close()
print "Reconnecting to server on %r" % self.address
else:
print "Connecting to server on %r" % self.address
self.client = self.context.socket(zmq.REQ)
self.client.setsockopt(zmq.LINGER, 0)
self.client.connect(self.address)
self.poller.register(self.client, zmq.POLLIN)
def send(self, cmd, data=''):
request = [cmd, data]
retries = self.retries
reply = None
while retries > 0:
gevent.sleep(0)
self.client.send_multipart(request)
try:
items = self.poller.poll(self.timeout)
except KeyboardInterrupt:
break # interrupted
if items:
reply = self.recv()
break
else:
if retries:
self.reconnect()
else:
break
retries -= 1
return reply
def recv(self):
reply = self.client.recv_multipart()
assert len(reply) == 2
return reply
def destroy(self):
if self.client:
self.poller.unregister(self.client)
self.client.setsockopt(zmq.LINGER, 0)
self.client.close()
self.context.destroy()
class Consumer(object):
def __init__(self, client, target, progressbar=True):
if isinstance(target, basestring):
target = import_target(target, 'handle_job')
self.client = client
self.target = target
self.queue = Queue(maxsize=1)
if progressbar:
self.pbar = self.get_progressbar()
else:
self.pbar = None
self._wants_job = False
def get_progressbar(self):
from taskmaster.progressbar import Counter, Speed, Timer, ProgressBar, UnknownLength
widgets = ['Tasks Completed: ', Counter(), ' | ', Speed(), ' | ', Timer()]
pbar = ProgressBar(widgets=widgets, maxval=UnknownLength)
return pbar
def get_job(self):
self._wants_job = True
return self.queue.get()
def task_done(self):
if self.pbar:
self.pbar.update(self.tasks_completed)
self.tasks_completed += 1
# self.client.send('DONE')
def start(self):
self.started = True
self.tasks_completed = 0
self.client.reconnect()
worker = Worker(self, self.target)
gevent.spawn(worker.run)
if self.pbar:
self.pbar.start()
while self.started:
gevent.sleep(0)
# If the queue has items in it, we just loop
if not self._wants_job:
continue
reply = self.client.send('GET')
if not reply:
break
cmd, data = reply
# Reply can be "WAIT", "OK", or "ERROR"
if cmd == 'OK':
self._wants_job = False
job = pickle.loads(data)
self.queue.put(job)
elif cmd == 'QUIT':
break
self.shutdown()
def shutdown(self):
if not self.started:
return
self.started = False
if self.pbar:
self.pbar.finish()
self.client.destroy()
| 25.181818 | 92 | 0.54287 | 476 | 4,432 | 4.978992 | 0.271008 | 0.084388 | 0.029536 | 0.021519 | 0.141772 | 0.118987 | 0.076793 | 0.076793 | 0.047257 | 0.047257 | 0 | 0.00809 | 0.358529 | 4,432 | 175 | 93 | 25.325714 | 0.825536 | 0.026399 | 0 | 0.314516 | 0 | 0 | 0.023005 | 0 | 0 | 0 | 0 | 0 | 0.008065 | 0 | null | null | 0 | 0.056452 | null | null | 0.016129 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 1 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.