hexsha
stringlengths 40
40
| size
int64 1
1.03M
| ext
stringclasses 10
values | lang
stringclasses 1
value | max_stars_repo_path
stringlengths 3
239
| max_stars_repo_name
stringlengths 5
130
| max_stars_repo_head_hexsha
stringlengths 40
78
| max_stars_repo_licenses
listlengths 1
10
| max_stars_count
int64 1
191k
⌀ | max_stars_repo_stars_event_min_datetime
stringlengths 24
24
⌀ | max_stars_repo_stars_event_max_datetime
stringlengths 24
24
⌀ | max_issues_repo_path
stringlengths 3
239
| max_issues_repo_name
stringlengths 5
130
| max_issues_repo_head_hexsha
stringlengths 40
78
| max_issues_repo_licenses
listlengths 1
10
| max_issues_count
int64 1
67k
⌀ | max_issues_repo_issues_event_min_datetime
stringlengths 24
24
⌀ | max_issues_repo_issues_event_max_datetime
stringlengths 24
24
⌀ | max_forks_repo_path
stringlengths 3
239
| max_forks_repo_name
stringlengths 5
130
| max_forks_repo_head_hexsha
stringlengths 40
78
| max_forks_repo_licenses
listlengths 1
10
| max_forks_count
int64 1
105k
⌀ | max_forks_repo_forks_event_min_datetime
stringlengths 24
24
⌀ | max_forks_repo_forks_event_max_datetime
stringlengths 24
24
⌀ | content
stringlengths 1
1.03M
| avg_line_length
float64 1
958k
| max_line_length
int64 1
1.03M
| alphanum_fraction
float64 0
1
|
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
4a183e9eb1629119b07a80ab55e50e7e4f9662d0
| 23,106
|
py
|
Python
|
tests/authorization/test_owner_privileges.py
|
shiveshwar/impala-3.1.0
|
f4e0c72ee1fe5e530518f2096702a00d356619cd
|
[
"Apache-2.0"
] | null | null | null |
tests/authorization/test_owner_privileges.py
|
shiveshwar/impala-3.1.0
|
f4e0c72ee1fe5e530518f2096702a00d356619cd
|
[
"Apache-2.0"
] | null | null | null |
tests/authorization/test_owner_privileges.py
|
shiveshwar/impala-3.1.0
|
f4e0c72ee1fe5e530518f2096702a00d356619cd
|
[
"Apache-2.0"
] | null | null | null |
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
#
# Client tests to ensure object ownership functionality.
import grp
import pytest
from getpass import getuser
from os import getenv
from tests.common.sentry_cache_test_suite import SentryCacheTestSuite, TestObject
from tests.common.test_dimensions import create_uncompressed_text_dimension
from tests.common.skip import SkipIfHive3
# Sentry long polling frequency to make Sentry refresh not run.
SENTRY_LONG_POLLING_FREQUENCY_S = 3600
SENTRY_CONFIG_DIR = getenv('IMPALA_HOME') + '/fe/src/test/resources/'
SENTRY_BASE_LOG_DIR = getenv('IMPALA_CLUSTER_LOGS_DIR') + "/sentry"
SENTRY_CONFIG_FILE_OO = SENTRY_CONFIG_DIR + 'sentry-site_oo.xml'
SENTRY_CONFIG_FILE_OO_NOGRANT = SENTRY_CONFIG_DIR + 'sentry-site_oo_nogrant.xml'
SENTRY_CONFIG_FILE_NO_OO = SENTRY_CONFIG_DIR + 'sentry-site_no_oo.xml'
@SkipIfHive3.sentry_not_supported
class TestOwnerPrivileges(SentryCacheTestSuite):
@classmethod
def add_test_dimensions(cls):
super(TestOwnerPrivileges, cls).add_test_dimensions()
cls.ImpalaTestMatrix.add_dimension(
create_uncompressed_text_dimension(cls.get_workload()))
def teardown_class(self):
super(self)
def setup_method(self, method):
super(TestOwnerPrivileges, self).setup_method(method)
self._setup_admin()
def teardown_method(self, method):
self._cleanup_admin()
super(TestOwnerPrivileges, self).teardown_method(method)
def _setup_ownership_test(self):
self._cleanup_ownership_test()
# Base roles for enabling tests.
self.execute_query("create role owner_priv_test_oo_user1")
# Role for verifying grant.
self.execute_query("create role owner_priv_test_all_role")
# Role for verifying transfer to role.
self.execute_query("create role owner_priv_test_owner_role")
self.execute_query("grant role owner_priv_test_oo_user1 to group oo_group1")
self.execute_query("grant role owner_priv_test_owner_role to group oo_group1")
self.execute_query("grant create on server to owner_priv_test_oo_user1")
self.execute_query("grant select on database functional to owner_priv_test_oo_user1")
def _cleanup_ownership_test(self):
# Clean up the test artifacts.
try:
self.cleanup_db("owner_priv_db", sync_ddl=0)
except Exception:
# Ignore this if we can't show tables.
pass
# Clean up any old roles created by this test
for role_name in self.execute_query("show roles").data:
if "owner_priv_test" in role_name:
self.execute_query("drop role %s" % role_name)
@staticmethod
def count_user_privileges(result):
"""
This method returns a new list of privileges that only contain user privileges.
"""
# results should have the following columns
# principal_name, principal_type, scope, database, table, column, uri, privilege,
# grant_option, create_time
total = 0
for row in result.data:
col = row.split('\t')
if col[0] == 'USER':
total += 1
return total
def _validate_no_user_privileges(self, client, user, refresh_authorization):
if refresh_authorization: self.execute_query("refresh authorization")
result = self.user_query(client, "show grant user %s" % user, user=user)
return TestOwnerPrivileges.count_user_privileges(result) == 0
def _setup_admin(self):
# Admin for manipulation and cleaning up.
try:
self.execute_query("drop role owner_priv_admin")
except Exception:
# Ignore in case it wasn't created yet.
pass
self.execute_query("create role owner_priv_admin")
self.execute_query("grant all on server to owner_priv_admin with grant option")
group_name = grp.getgrnam(getuser()).gr_name
self.execute_query("grant role owner_priv_admin to group `%s`" % group_name)
def _cleanup_admin(self):
self.execute_query("drop role owner_priv_admin")
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO),
catalogd_args="--sentry_config={0} --sentry_catalog_polling_frequency_s={1} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO, SENTRY_LONG_POLLING_FREQUENCY_S),
sentry_config=SENTRY_CONFIG_FILE_OO,
sentry_log_dir="{0}/test_owner_privileges_with_grant".format(SENTRY_BASE_LOG_DIR))
def test_owner_privileges_with_grant(self, vector, unique_database):
"""Tests owner privileges with grant on database, table, and view.
- refresh_authorization=True: With Sentry refresh to make sure privileges are really
stored in Sentry.
- refresh_authorization=False: No Sentry refresh to make sure user can use owner
privileges right away without a Sentry refresh."""
for refresh in [True, False]:
try:
self._setup_ownership_test()
self._execute_owner_privilege_tests(TestObject(TestObject.DATABASE,
"owner_priv_db",
grant=True),
refresh_authorization=refresh)
self._execute_owner_privilege_tests(TestObject(TestObject.TABLE,
unique_database +
".owner_priv_tbl",
grant=True),
refresh_authorization=refresh)
self._execute_owner_privilege_tests(TestObject(TestObject.VIEW,
unique_database +
".owner_priv_view",
grant=True),
refresh_authorization=refresh)
finally:
self._cleanup_ownership_test()
def _execute_owner_privilege_tests(self, test_obj, refresh_authorization):
"""
Executes all the statements required to validate owner privileges work correctly
for a specific database, table, or view.
"""
# Create object and ensure oo_user1 gets owner privileges.
self.oo_user1_impalad_client = self.create_impala_client()
# oo_user2 is only used for transferring ownership.
self.oo_user2_impalad_client = self.create_impala_client()
self.user_query(self.oo_user1_impalad_client, "create %s if not exists %s %s %s" %
(test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select), user="oo_user1")
self.validate_privileges(self.oo_user1_impalad_client, "show grant user oo_user1",
test_obj, user="oo_user1",
refresh_authorization=refresh_authorization)
# Ensure grant works.
self.user_query(self.oo_user1_impalad_client,
"grant all on %s %s to role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1")
self.user_query(self.oo_user1_impalad_client,
"revoke all on %s %s from role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1")
# Change the database owner and ensure oo_user1 does not have owner privileges.
self.user_query(self.oo_user1_impalad_client, "alter %s %s set owner user oo_user2" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
refresh_authorization=refresh_authorization)
# Ensure oo_user1 cannot drop database after owner change.
self.user_query(self.oo_user1_impalad_client, "drop %s %s" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute 'DROP'")
# oo_user2 should have privileges for object now.
self.validate_privileges(self.oo_user2_impalad_client, "show grant user oo_user2",
test_obj, user="oo_user2",
refresh_authorization=refresh_authorization)
# Change the owner to a role and ensure oo_user2 doesn't have privileges.
# Set the owner back to oo_user1 since for views, oo_user2 doesn't have select
# privileges on the underlying table.
self.execute_query("alter %s %s set owner user oo_user1" %
(test_obj.obj_type, test_obj.obj_name),
query_options={"sync_ddl": 1})
assert self._validate_no_user_privileges(self.oo_user2_impalad_client,
user="oo_user2",
refresh_authorization=refresh_authorization)
self.user_query(self.oo_user1_impalad_client,
"alter %s %s set owner role owner_priv_test_owner_role" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
# Ensure oo_user1 does not have user privileges.
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
refresh_authorization=refresh_authorization)
# Ensure role has owner privileges.
self.validate_privileges(self.oo_user1_impalad_client,
"show grant role owner_priv_test_owner_role", test_obj,
user="oo_user1", refresh_authorization=refresh_authorization)
# Drop the object and ensure no role privileges.
self.user_query(self.oo_user1_impalad_client, "drop %s %s " %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
refresh_authorization=refresh_authorization)
# Ensure user privileges are gone after drop.
self.user_query(self.oo_user1_impalad_client, "create %s if not exists %s %s %s" %
(test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select), user="oo_user1")
self.user_query(self.oo_user1_impalad_client, "drop %s %s " %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
refresh_authorization=refresh_authorization)
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_NO_OO),
catalogd_args="--sentry_config={0} --authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_NO_OO),
sentry_config=SENTRY_CONFIG_FILE_NO_OO,
sentry_log_dir="{0}/test_owner_privileges_disabled".format(SENTRY_BASE_LOG_DIR))
def test_owner_privileges_disabled(self, vector, unique_database):
"""Tests that there should not be owner privileges."""
try:
self._setup_ownership_test()
self._execute_owner_privilege_tests_no_oo(TestObject(TestObject.DATABASE,
"owner_priv_db"))
self._execute_owner_privilege_tests_no_oo(TestObject(TestObject.TABLE,
unique_database +
".owner_priv_tbl"))
self._execute_owner_privilege_tests_no_oo(TestObject(TestObject.VIEW,
unique_database +
".owner_priv_view"))
finally:
self._cleanup_ownership_test()
def _execute_owner_privilege_tests_no_oo(self, test_obj):
"""
Executes all the statements required to validate owner privileges work correctly
for a specific database, table, or view.
"""
# Create object and ensure oo_user1 gets owner privileges.
self.oo_user1_impalad_client = self.create_impala_client()
self.user_query(self.oo_user1_impalad_client, "create %s if not exists %s %s %s"
% (test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select), user="oo_user1")
# Ensure grant doesn't work.
self.user_query(self.oo_user1_impalad_client,
"grant all on %s %s to role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute: GRANT_PRIVILEGE")
self.user_query(self.oo_user1_impalad_client,
"revoke all on %s %s from role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute: REVOKE_PRIVILEGE")
# Ensure changing the database owner doesn't work.
self.user_query(self.oo_user1_impalad_client,
"alter %s %s set owner user oo_user2" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges with 'GRANT OPTION'")
# Ensure oo_user1 cannot drop database.
self.user_query(self.oo_user1_impalad_client, "drop %s %s" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute 'DROP'")
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO_NOGRANT),
catalogd_args="--sentry_config={0} --sentry_catalog_polling_frequency_s={1} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO_NOGRANT,
SENTRY_LONG_POLLING_FREQUENCY_S),
sentry_config=SENTRY_CONFIG_FILE_OO_NOGRANT,
sentry_log_dir="{0}/test_owner_privileges_without_grant"
.format(SENTRY_BASE_LOG_DIR))
def test_owner_privileges_without_grant(self, vector, unique_database):
"""Tests owner privileges without grant on database, table, and view.
- refresh_authorization=True: With Sentry refresh to make sure privileges are really
stored in Sentry.
- refresh_authorization=False: No Sentry refresh to make sure user can use owner
privileges right away without a Sentry refresh."""
for refresh in [True, False]:
try:
self._setup_ownership_test()
self._execute_owner_privilege_tests_oo_nogrant(TestObject(TestObject.DATABASE,
"owner_priv_db"),
refresh_authorization=refresh)
self._execute_owner_privilege_tests_oo_nogrant(TestObject(TestObject.TABLE,
unique_database +
".owner_priv_tbl"),
refresh_authorization=refresh)
self._execute_owner_privilege_tests_oo_nogrant(TestObject(TestObject.VIEW,
unique_database +
".owner_priv_view"),
refresh_authorization=refresh)
finally:
self._cleanup_ownership_test()
def _execute_owner_privilege_tests_oo_nogrant(self, test_obj, refresh_authorization):
"""
Executes all the statements required to validate owner privileges work correctly
for a specific database, table, or view.
"""
# Create object and ensure oo_user1 gets owner privileges.
self.oo_user1_impalad_client = self.create_impala_client()
self.user_query(self.oo_user1_impalad_client, "create %s if not exists %s %s %s" %
(test_obj.obj_type, test_obj.obj_name, test_obj.table_def,
test_obj.view_select), user="oo_user1")
self.validate_privileges(self.oo_user1_impalad_client, "show grant user oo_user1",
test_obj, user="oo_user1",
refresh_authorization=refresh_authorization)
# Ensure grant doesn't work.
self.user_query(self.oo_user1_impalad_client,
"grant all on %s %s to role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute: GRANT_PRIVILEGE")
self.user_query(self.oo_user1_impalad_client,
"revoke all on %s %s from role owner_priv_test_all_role" %
(test_obj.grant_name, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges to execute: REVOKE_PRIVILEGE")
self.user_query(self.oo_user1_impalad_client, "alter %s %s set owner user oo_user2" %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1",
error_msg="does not have privileges with 'GRANT OPTION'")
# Use a delay to avoid cache consistency issue that could occur after alter.
self.user_query(self.oo_user1_impalad_client, "drop %s %s " %
(test_obj.obj_type, test_obj.obj_name), user="oo_user1")
assert self._validate_no_user_privileges(self.oo_user1_impalad_client,
user="oo_user1",
refresh_authorization=refresh_authorization)
@pytest.mark.execute_serially
@SentryCacheTestSuite.with_args(
impalad_args="--server_name=server1 --sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO),
catalogd_args="--sentry_config={0} "
"--authorization_policy_provider_class="
"org.apache.impala.testutil.TestSentryResourceAuthorizationProvider"
.format(SENTRY_CONFIG_FILE_OO),
sentry_config=SENTRY_CONFIG_FILE_OO,
sentry_log_dir="{0}/test_owner_privileges_different_cases"
.format(SENTRY_BASE_LOG_DIR))
def test_owner_privileges_different_cases(self, vector, unique_database):
"""IMPALA-7742: Tests that only user names that differ only in case are not
authorized to access the database/table/view unless the user is the owner."""
# Use two different clients so that the sessions will use two different user names.
foobar_impalad_client = self.create_impala_client()
FOOBAR_impalad_client = self.create_impala_client()
role_name = "owner_priv_diff_cases_role"
try:
self.execute_query("create role %s" % role_name)
self.execute_query("grant role %s to group foobar" % role_name)
self.execute_query("grant all on server to role %s" % role_name)
self.user_query(foobar_impalad_client, "create database %s_db" %
unique_database, user="foobar")
# FOOBAR user should not be allowed to create a table in the foobar's database.
self.user_query(FOOBAR_impalad_client, "create table %s_db.test_tbl(i int)" %
unique_database, user="FOOBAR",
error_msg="User 'FOOBAR' does not have privileges to execute "
"'CREATE' on: %s_db" % unique_database)
self.user_query(foobar_impalad_client, "create table %s.owner_case_tbl(i int)" %
unique_database, user="foobar")
# FOOBAR user should not be allowed to select foobar's table.
self.user_query(FOOBAR_impalad_client, "select * from %s.owner_case_tbl" %
unique_database, user="FOOBAR",
error_msg="User 'FOOBAR' does not have privileges to execute "
"'SELECT' on: %s.owner_case_tbl" % unique_database)
self.user_query(foobar_impalad_client,
"create view %s.owner_case_view as select 1" % unique_database,
user="foobar")
# FOOBAR user should not be allowed to select foobar's view.
self.user_query(FOOBAR_impalad_client, "select * from %s.owner_case_view" %
unique_database, user="FOOBAR",
error_msg="User 'FOOBAR' does not have privileges to execute "
"'SELECT' on: %s.owner_case_view" % unique_database)
# FOOBAR user should not be allowed to see foobar's privileges.
self.user_query(FOOBAR_impalad_client, "show grant user foobar", user="FOOBAR",
error_msg="User 'FOOBAR' does not have privileges to access the "
"requested policy metadata")
finally:
self.user_query(foobar_impalad_client, "drop database %s_db cascade" %
unique_database, user="foobar")
self.execute_query("drop role %s" % role_name)
| 54.112412
| 90
| 0.641695
|
4a183f1fd090f35eb5147b17fa89e1738b96d55f
| 8,313
|
py
|
Python
|
trac/wiki/test.py
|
tiagoeckhardt/trac
|
b18c226195bfed8cd19cba97c6f03bd54dbbc044
|
[
"BSD-3-Clause"
] | null | null | null |
trac/wiki/test.py
|
tiagoeckhardt/trac
|
b18c226195bfed8cd19cba97c6f03bd54dbbc044
|
[
"BSD-3-Clause"
] | null | null | null |
trac/wiki/test.py
|
tiagoeckhardt/trac
|
b18c226195bfed8cd19cba97c6f03bd54dbbc044
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
#
# Copyright (C) 2004-2019 Edgewall Software
# All rights reserved.
#
# This software is licensed as described in the file COPYING, which
# you should have received as part of this distribution. The terms
# are also available at https://trac.edgewall.org/wiki/TracLicense.
#
# This software consists of voluntary contributions made by many
# individuals. For the exact contribution history, see the revision
# history and logs, available at https://trac.edgewall.org/log/.
import difflib
import io
import os
import re
import unittest
# Python 2.7 `assertMultiLineEqual` calls `safe_repr(..., short=True)`
# which breaks our custom failure display in WikiTestCase.
try:
from unittest.util import safe_repr
except ImportError:
pass
else:
unittest.case.safe_repr = lambda obj, short=False: safe_repr(obj, False)
from trac.test import EnvironmentStub, MockRequest
from trac.util.datefmt import datetime_now, to_utimestamp, utc
from trac.util.text import strip_line_ws, to_unicode
from trac.web.chrome import web_context
from trac.wiki.formatter import (HtmlFormatter, InlineHtmlFormatter,
OutlineFormatter)
class WikiTestCase(unittest.TestCase):
generate_opts = {}
def __init__(self, title, input, expected, file, line,
setup=None, teardown=None, context=None, default_data=False,
enable_components=None, disable_components=None,
env_path='', destroying=False):
unittest.TestCase.__init__(self, 'test')
self.title = title
self.input = input
self.expected = expected
if file.endswith('.pyc'):
file = file.replace('.pyc', '.py')
self.file = file
self.line = line
self._setup = setup
self._teardown = teardown
self._context = context
self.context = None
self._env_kwargs = {'default_data': default_data,
'enable': enable_components,
'disable': disable_components,
'path': env_path, 'destroying': destroying}
def _create_env(self):
env = EnvironmentStub(**self._env_kwargs)
# -- intertrac support
env.config.set('intertrac', 'genshi.title', "Genshi's Trac")
env.config.set('intertrac', 'genshi.url', "https://genshi.edgewall.org")
env.config.set('intertrac', 't', 'trac')
env.config.set('intertrac', 'th.title', "Trac Hacks")
env.config.set('intertrac', 'th.url', "http://trac-hacks.org")
# -- safe schemes
env.config.set('wiki', 'safe_schemes',
'data,file,ftp,http,https,svn,svn+ssh,'
'rfc-2396.compatible,rfc-2396+under_score')
return env
def setUp(self):
self.env = self._create_env()
self.req = MockRequest(self.env, script_name='/')
context = self._context
if context:
if isinstance(self._context, tuple):
context = web_context(self.req, *self._context)
else:
context = web_context(self.req, 'wiki', 'WikiStart')
self.context = context
# Remove the following lines in order to discover
# all the places were we should use the req.href
# instead of env.href
self.env.href = self.req.href
self.env.abs_href = self.req.abs_href
self.env.db_transaction(
"INSERT INTO wiki VALUES(%s,%s,%s,%s,%s,%s,%s)",
('WikiStart', 1, to_utimestamp(datetime_now(utc)), 'joe',
'--', 'Entry page', 0))
if self._setup:
self._setup(self)
def tearDown(self):
self.env.reset_db()
if self._teardown:
self._teardown(self)
def test(self):
"""Testing WikiFormatter"""
formatter = self.formatter()
v = unicode(formatter.generate(**self.generate_opts))
v = v.replace('\r', '').replace(u'\u200b', '') # FIXME: keep ZWSP
v = strip_line_ws(v, leading=False)
try:
self.assertEqual(self.expected, v)
except AssertionError as e:
msg = to_unicode(e)
match = re.match(r"u?'(.*)' != u?'(.*)'", msg)
if match:
g1 = ["%s\n" % x for x in match.group(1).split(r'\n')]
g2 = ["%s\n" % x for x in match.group(2).split(r'\n')]
expected = ''.join(g1)
actual = ''.join(g2)
wiki = repr(self.input).replace(r'\n', '\n')
diff = ''.join(list(difflib.unified_diff(g1, g2, 'expected',
'actual')))
# Tip: sometimes, 'expected' and 'actual' differ only by
# whitespace, so it can be useful to visualize them, e.g.
# expected = expected.replace(' ', '.')
# actual = actual.replace(' ', '.')
def info(*args):
return '\n========== %s: ==========\n%s' % args
msg = info('expected', expected)
msg += info('actual', actual)
msg += info('wiki', ''.join(wiki))
msg += info('diff', diff)
raise AssertionError( # See below for details
'%s\n\n%s:%s: "%s" (%s flavor)' \
% (msg, self.file, self.line, self.title, formatter.flavor))
def formatter(self):
return HtmlFormatter(self.env, self.context, self.input)
def shortDescription(self):
return 'Test ' + self.title
class OneLinerTestCase(WikiTestCase):
def formatter(self):
return InlineHtmlFormatter(self.env, self.context, self.input)
class EscapeNewLinesTestCase(WikiTestCase):
generate_opts = {'escape_newlines': True}
def formatter(self):
return HtmlFormatter(self.env, self.context, self.input)
class OutlineTestCase(WikiTestCase):
def formatter(self):
class Outliner(object):
flavor = 'outliner'
def __init__(self, env, context, input):
self.outliner = OutlineFormatter(env, context)
self.input = input
def generate(self):
out = io.StringIO()
self.outliner.format(self.input, out)
return out.getvalue()
return Outliner(self.env, self.context, self.input)
def wikisyntax_test_suite(data=None, setup=None, file=None, teardown=None,
context=None, default_data=False,
enable_components=None, disable_components=None,
env_path=None, destroying=False):
suite = unittest.TestSuite()
def add_test_cases(data, filename):
tests = re.compile('^(%s.*)$' % ('=' * 30), re.MULTILINE).split(data)
next_line = 1
line = 0
for title, test in zip(tests[1::2], tests[2::2]):
title = title.lstrip('=').strip()
if line != next_line:
line = next_line
if not test or test == '\n':
continue
next_line += len(test.split('\n')) - 1
if 'SKIP' in title or 'WONTFIX' in title:
continue
blocks = test.split('-' * 30 + '\n')
if len(blocks) < 5:
blocks.extend([None] * (5 - len(blocks)))
input, page, oneliner, page_escape_nl, outline = blocks[:5]
for cls, expected in [
(WikiTestCase, page),
(OneLinerTestCase, oneliner and oneliner[:-1]),
(EscapeNewLinesTestCase, page_escape_nl),
(OutlineTestCase, outline)]:
if expected:
tc = cls(title, input, expected, filename, line,
setup, teardown, context, default_data,
enable_components, disable_components,
env_path, destroying)
suite.addTest(tc)
if data:
add_test_cases(data, file)
else:
if os.path.exists(file):
with open(file, 'r') as fobj:
data = fobj.read().decode('utf-8')
add_test_cases(data, file)
else:
print('no ' + file)
return suite
| 38.486111
| 80
| 0.558763
|
4a183f3ef1f4fa7e9816c454bae4041644c55e95
| 2,442
|
py
|
Python
|
where/tools/delete.py
|
ingridfausk/where
|
b65398911075b7ddef3a3a1146efa428eae498fe
|
[
"MIT"
] | 16
|
2018-08-31T10:31:11.000Z
|
2022-03-15T16:07:24.000Z
|
where/tools/delete.py
|
ingridfausk/where
|
b65398911075b7ddef3a3a1146efa428eae498fe
|
[
"MIT"
] | 5
|
2018-07-13T14:04:24.000Z
|
2021-06-17T02:14:44.000Z
|
where/tools/delete.py
|
ingridfausk/where
|
b65398911075b7ddef3a3a1146efa428eae498fe
|
[
"MIT"
] | 15
|
2018-06-07T05:45:24.000Z
|
2022-03-15T16:07:27.000Z
|
"""Delete a Where analysis
Usage:
{exe:tools} delete <date> <pipeline> [--session=<session>] [options]
The tool requires a date given in the format `<year month day>` (for example
2015 8 4).
In addition, one pipeline must be specified. See below for available pipelines.
=================== ===========================================================
Pipeline Description
=================== ===========================================================
{pipelines_doc:Delete}
=================== ===========================================================
Description:
------------
The delete tool deletes all files for a given model run from the work
directory.
Examples:
---------
Delete the VLBI analysis for August 4 2015:
{exe:tools} delete 2015 8 4 -v --session=XA
Current Maintainers:
--------------------
{maintainers}
Version: {version}
"""
# Standard library imports
import pathlib
import shutil
# Midgard imports
from midgard.dev import plugins
# Where imports
from where.lib import config
from where.lib import log
@plugins.register
def delete_analysis(rundate: "date", pipeline: "pipeline", **kwargs): # typing: ignore
"""Delete working directory for a given model run date
Args:
rundate: The model run date.
"""
file_vars = config.create_file_vars(rundate, pipeline, **kwargs)
work_directory = config.files.path("directory_work", file_vars=file_vars)
log.info(f"Deleting '{work_directory}'")
_warn_about_cwd_deleted(work_directory)
try:
shutil.rmtree(work_directory)
except FileNotFoundError:
log.warn(f"'{work_directory}' does not exist. Nothing to delete")
def _warn_about_cwd_deleted(directory):
"""Warn about the current working directory being deleted
Deleting the current working directory can lead to weird bugs when continuing to work in the
terminal. Unfortunately, we cannot change the working directory of the terminal from within Where, as that is a
parent process. For now, we simply print a warning, and instructions about changing to an existing directory.
Args:
directory (Path): Directory that will be deleted
"""
cwd = pathlib.Path.cwd()
directory = directory.resolve()
if directory == cwd or directory in cwd.parents:
log.warn(f"Current working directory '{cwd}' is being deleted")
log.warn(f"Do 'cd {directory.parent}' to keep working")
| 28.068966
| 115
| 0.635954
|
4a1840110752af64b4abc497e2772eccf370f159
| 9,947
|
py
|
Python
|
advbench/models/mnist.py
|
constrainedlearning/advbench
|
68f9f6d77268aad45517ca84d383b996724cc976
|
[
"MIT"
] | null | null | null |
advbench/models/mnist.py
|
constrainedlearning/advbench
|
68f9f6d77268aad45517ca84d383b996724cc976
|
[
"MIT"
] | null | null | null |
advbench/models/mnist.py
|
constrainedlearning/advbench
|
68f9f6d77268aad45517ca84d383b996724cc976
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
from e2cnn import gspaces
from e2cnn import nn as enn
from advbench.models.e2_utils import *
from torch.nn.functional import pad
class MNISTNet(nn.Module):
def __init__(self, input_shape, num_classes, n_layers=2):
super(MNISTNet, self).__init__()
convs = [nn.Conv2d(input_shape[0], 32, 3, 1),nn.ReLU(),nn.Conv2d(32, 64, 3, 1),nn.ReLU()]
for i in range(n_layers-2):
convs.append(nn.Conv2d(64, 64, 3, 1))
convs.append(nn.ReLU())
self.convs = nn.Sequential(*convs)
self.dropout1 = nn.Dropout(0.25)
self.dropout2 = nn.Dropout(0.5)
dim_out = input_shape[-1]-2*(n_layers)
dim_out = ((int((dim_out-2)/2)+1)**2)*64
self.fc1 = nn.Linear(dim_out, 128)
self.fc2 = nn.Linear(128, num_classes)
def forward(self, x):
x = self.convs(x)
x = F.max_pool2d(x, 2)
x = self.dropout1(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = F.relu(x)
x = self.dropout2(x)
x = self.fc2(x)
return x
class SteerableMNISTnet(torch.nn.Module):
def __init__(self, n_classes=10, n_rot = 8, num_channels = 1, control_width = True):
super(SteerableMNISTnet, self).__init__()
self.exported=False
channels = [32, 64]
if control_width:
channels = [int(c//(n_rot)) for c in channels]
# the model is equivariant under rotations by 360/n_rot degrees, modelled by Cn_rot
self.r2_act = gspaces.Rot2dOnR2(N=n_rot)
# the input image is a scalars field, corresponding to the trivial representation
in_type = enn.FieldType(self.r2_act, [self.r2_act.trivial_repr]*num_channels)
# we store the input type for wrapping the images into a geometric tensor during the forward pass
self.input_type = in_type
# convolution 1
# first specify the output type of the convolutional layer
# we choose 32 feature fields, each transforming under the regular representation of C8
out_type_1 = enn.FieldType(self.r2_act, channels[0]*[self.r2_act.regular_repr])
self.block1 = enn.SequentialModule(
enn.R2Conv(in_type, out_type_1, kernel_size=3, padding=1, bias=False),
enn.ReLU(out_type_1, inplace=True)
)
in_type_2 = self.block1.out_type
# the output type of the second convolution layer are 64 regular feature fields of C8
out_type_2 = enn.FieldType(self.r2_act, channels[1]*[self.r2_act.regular_repr])
self.block2 = enn.SequentialModule(
enn.R2Conv(in_type_2, out_type_2, kernel_size=3, padding=1, bias=False),
enn.ReLU(out_type_2, inplace=True)
)
self.pool1 = enn.PointwiseMaxPool(out_type_2, 2)
out_type_size = self.pool1.out_type.size
self.fc1 = torch.nn.Sequential(
nn.Dropout(0.25),
nn.Linear(out_type_size*196, 128),
nn.ReLU(inplace=True),
)
self.fc2 = torch.nn.Sequential(
nn.Dropout(0.5),
nn.Linear(128, n_classes)
)
self.model = enn.SequentialModule(self.block1, self.block2, self.pool1)
def forward(self, input: torch.Tensor):
# wrap the input tensor in a GeometricTensor
# (associate it with the input type)
if not self.exported:
#x = pad(input, (0,1,0,1))
x = enn.GeometricTensor(input, self.input_type)
else:
x = input
if not self.exported:
x = self.model(x)
# unwrap the output GeometricTensor
# (take the Pytorch tensor and discard the associated representation)
x = x.tensor
else:
x = self.exported_model(x)
x = torch.flatten(x, 1)
x = self.fc1(x)
x = self.fc2(x)
return x
def export(self):
self.exported=True
self.exported_model = self.model.export().eval()
def unexport(self):
self.exported=False
class CnSteerableCNN(torch.nn.Module):
def __init__(self, n_classes=10, n_rot = 8, num_channels = 1):
super(CnSteerableCNN, self).__init__()
self.exported=False
# the model is equivariant under rotations by 360/n_rot degrees, modelled by Cn_rot
self.r2_act = gspaces.Rot2dOnR2(N=n_rot)
# the input image is a scalars field, corresponding to the trivial representation
in_type = enn.FieldType(self.r2_act, [self.r2_act.trivial_repr]*num_channels)
# we store the input type for wrapping the images into a geometric tensor during the forward pass
self.input_type = in_type
# convolution 1
# first specify the output type of the convolutional layer
# we choose 24 feature fields, each transforming under the regular representation of C8
out_type_1 = enn.FieldType(self.r2_act, 24*[self.r2_act.regular_repr])
self.block1 = enn.SequentialModule(
enn.R2Conv(in_type, out_type_1, kernel_size=7, padding=2, bias=False),
enn.InnerBatchNorm(out_type_1),
enn.ReLU(out_type_1, inplace=True)
)
in_type_2 = self.block1.out_type
# the output type of the second convolution layer are 48 regular feature fields of C8
out_type_2 = enn.FieldType(self.r2_act, 48*[self.r2_act.regular_repr])
self.block2 = enn.SequentialModule(
enn.R2Conv(in_type_2, out_type_2, kernel_size=5, padding=2, bias=False),
enn.InnerBatchNorm(out_type_2),
enn.ReLU(out_type_2, inplace=True)
)
#self.pool1 = enn.SequentialModule(
# enn.PointwiseAvgPoolAntialiased(out_type_2, sigma=0.66, stride=2)
#)
self.pool1 = enn.PointwiseAdaptiveAvgPool(out_type_2, (14, 14))
# convolution 3
# the old output type is the input type to the next layer
in_type_3 = self.block2.out_type
# the output type of the third convolution layer are 48 regular feature fields of C8
out_type_3 = enn.FieldType(self.r2_act, 48*[self.r2_act.regular_repr])
self.block3 = enn.SequentialModule(
enn.R2Conv(in_type_3, out_type_3, kernel_size=5, padding=2, bias=False),
enn.InnerBatchNorm(out_type_3),
enn.ReLU(out_type_3, inplace=True)
)
# convolution 4
# the old output type is the input type to the next layer
in_type_4 = self.block3.out_type
# the output type of the fourth convolution layer are 96 regular feature fields of C8
out_type_4 = enn.FieldType(self.r2_act, 96*[self.r2_act.regular_repr])
self.block4 = enn.SequentialModule(
enn.R2Conv(in_type_4, out_type_4, kernel_size=5, padding=2, bias=False),
enn.InnerBatchNorm(out_type_4),
enn.ReLU(out_type_4, inplace=True)
)
#self.pool2 = enn.SequentialModule(
# enn.PointwiseAvgPoolAntialiased(out_type_4, sigma=0.66, stride=2)
#)
self.pool2 = enn.PointwiseAdaptiveAvgPool(out_type_4, (7,7))
# convolution 5
# the old output type is the input type to the next layer
in_type_5 = self.block4.out_type
# the output type of the fifth convolution layer are 96 regular feature fields of C8
out_type_5 = enn.FieldType(self.r2_act, 96*[self.r2_act.regular_repr])
self.block5 = enn.SequentialModule(
enn.R2Conv(in_type_5, out_type_5, kernel_size=5, padding=2, bias=False),
enn.InnerBatchNorm(out_type_5),
enn.ReLU(out_type_5, inplace=True)
)
# convolution 6
# the old output type is the input type to the next layer
in_type_6 = self.block5.out_type
# the output type of the sixth convolution layer are 64 regular feature fields of C8
out_type_6 = enn.FieldType(self.r2_act, 64*[self.r2_act.regular_repr])
self.block6 = enn.SequentialModule(
enn.R2Conv(in_type_6, out_type_6, kernel_size=5, padding=1, bias=False),
enn.InnerBatchNorm(out_type_6),
enn.ReLU(out_type_6, inplace=True)
)
#self.pool3 = enn.PointwiseAvgPoolAntialiased(out_type_6, sigma=0.66, stride=1, padding=0)
self.pool3 = enn.PointwiseAdaptiveAvgPool(out_type_6, (7,7))
self.gpool = enn.GroupPooling(out_type_6)
# number of output channels
#c = self.gpool.out_type.size
# Fully Connected
self.fully_net = torch.nn.Sequential(
torch.nn.Linear(3136, 64),
torch.nn.BatchNorm1d(64),
torch.nn.ELU(inplace=True),
torch.nn.Linear(64, n_classes),
)
self.model = enn.SequentialModule(self.block1,
self.block2, self.pool1, self.block3, self.block4,
self.pool2, self.block5, self.block6, self.pool3,
self.gpool)
def forward(self, input: torch.Tensor):
# wrap the input tensor in a GeometricTensor
# (associate it with the input type)
if not self.exported:
#x = pad(input, (0,1,0,1))
x = enn.GeometricTensor(input, self.input_type)
else:
x = input
if not self.exported:
x = self.model(x)
# unwrap the output GeometricTensor
# (take the Pytorch tensor and discard the associated representation)
x = x.tensor
else:
x = self.exported_model(x)
# classify with the final fully connected layers)
x = self.fully_net(x.reshape(x.shape[0], -1))
return x
def export(self):
self.exported=True
self.exported_model = self.model.export().eval()
def unexport(self):
self.exported=False
| 41.619247
| 105
| 0.62069
|
4a1841c217f899187ed948684e2b187b90f50d04
| 186
|
py
|
Python
|
util.py
|
zaabjuda/test_chat_server
|
96db615223d4a1548629d70b31b3eb7e89dd5ff6
|
[
"MIT"
] | null | null | null |
util.py
|
zaabjuda/test_chat_server
|
96db615223d4a1548629d70b31b3eb7e89dd5ff6
|
[
"MIT"
] | null | null | null |
util.py
|
zaabjuda/test_chat_server
|
96db615223d4a1548629d70b31b3eb7e89dd5ff6
|
[
"MIT"
] | null | null | null |
# coding=utf-8
__author__ = "Dmitry Zhiltsov"
__copyright__ = "Copyright 2015, Dmitry Zhiltsov"
def to_bytes(str_data, encoding='UTF-8'):
return bytes(str_data, encoding=encoding)
| 23.25
| 49
| 0.752688
|
4a18423faba5c5687c6f8a95c48e785bafc603ad
| 1,716
|
py
|
Python
|
directory_builder/test_changes.py
|
kimvanwyk/md_directory
|
2a877251eb61baca57188d0a0976756c8f3e2c31
|
[
"BSD-3-Clause"
] | null | null | null |
directory_builder/test_changes.py
|
kimvanwyk/md_directory
|
2a877251eb61baca57188d0a0976756c8f3e2c31
|
[
"BSD-3-Clause"
] | 2
|
2021-07-05T09:36:29.000Z
|
2021-07-05T09:36:29.000Z
|
directory_builder/test_changes.py
|
kimvanwyk/md_directory
|
2a877251eb61baca57188d0a0976756c8f3e2c31
|
[
"BSD-3-Clause"
] | null | null | null |
'''
Copyright (c) 2011, Kim van Wyk
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are
met:
Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
Redistributions in binary form must reproduce the above copyright
notice, this list of conditions and the following disclaimer in the
documentation and/or other materials provided with the distribution.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
'''
import diff_tex
import glob
from sys import argv
differ = False
for f in glob.glob('*_orig.tex'):
f = '%s' % f[:-9]
d = diff_tex.diff_filename(f, print_diff=((len(argv) > 1) and (argv[1] == 'print_diff')))
if d == None:
print 'Error in diffing %s.tex with %s_orig.tex' % (f,f)
if d:
print '%s differs from original' % f
differ = True
if not differ:
print 'No files differ'
| 38.133333
| 93
| 0.757576
|
4a1843564eb4292000aa2184d20c5020a20612c2
| 371
|
py
|
Python
|
problem-004.py
|
natehouk/Project-Euler-Solutions
|
af6301b2309eb400694e398219c216f740bdd3ce
|
[
"MIT"
] | null | null | null |
problem-004.py
|
natehouk/Project-Euler-Solutions
|
af6301b2309eb400694e398219c216f740bdd3ce
|
[
"MIT"
] | null | null | null |
problem-004.py
|
natehouk/Project-Euler-Solutions
|
af6301b2309eb400694e398219c216f740bdd3ce
|
[
"MIT"
] | null | null | null |
def is_palidrone(value):
length = len(str(value))
for i in range(0, length):
if (str(value)[i] != str(value)[length-i-1]):
return False
return True
max_palidrone = 0
for i in range(999, 99, -1):
for j in range (999, 99, -1):
if (is_palidrone(i*j) and i*j > max_palidrone):
max_palidrone = i*j
print(max_palidrone)
| 26.5
| 55
| 0.587601
|
4a18435d9fcb84f02385b05c8e6592f07170e25c
| 1,980
|
py
|
Python
|
emacs/emacs.d/jedi/test_jediepcserver.py
|
KitKod/dotfiles
|
92d8081280c7b6ebe7d91a00efb5dcdcc882b271
|
[
"BSD-3-Clause"
] | 87
|
2015-01-03T13:57:31.000Z
|
2022-01-18T14:56:23.000Z
|
emacs/emacs.d/jedi/test_jediepcserver.py
|
KitKod/dotfiles
|
92d8081280c7b6ebe7d91a00efb5dcdcc882b271
|
[
"BSD-3-Clause"
] | 1
|
2015-09-13T15:45:54.000Z
|
2015-09-13T15:45:54.000Z
|
emacs/emacs.d/jedi/test_jediepcserver.py
|
KitKod/dotfiles
|
92d8081280c7b6ebe7d91a00efb5dcdcc882b271
|
[
"BSD-3-Clause"
] | 124
|
2015-01-15T22:05:39.000Z
|
2022-03-20T18:35:57.000Z
|
import os
import textwrap
from contextlib import contextmanager
import jediepcserver as jep
jep.import_jedi()
@contextmanager
def osenv(*args, **kwds):
def putenvs(dct):
for (k, v) in dct.items():
if v is None:
del os.environ[k]
else:
os.environ[k] = v
newenv = dict(*args, **kwds)
oldenv = dict(zip(newenv, map(os.getenv, newenv)))
try:
putenvs(newenv)
yield
finally:
putenvs(oldenv)
def test_add_virtualenv_path_runs_fine_in_non_virtualenv():
# See: https://github.com/tkf/emacs-jedi/issues/3
with osenv(VIRTUAL_ENV=None):
jep.add_virtualenv_path()
def check_defined_names(source, keys, deftree):
stripdict = lambda d: dict((k, d[k]) for k in keys)
striptree = lambda ds: [stripdict(ds[0])] + list(map(striptree, ds[1:]))
fulldicts = jep.defined_names(textwrap.dedent(source), 'example.py')
dicts = list(map(striptree, fulldicts))
assert dicts == deftree
def test_defined_names_imports():
item = lambda name, local_name: {'name': name, 'local_name': local_name}
keys = ['name', 'local_name']
dicts = [
[item('f', 'f')],
[item('g', 'g')],
[item('C', 'C'), [item('h', 'C.h')]],
]
check_defined_names("""
from module import f
g = f(f)
class C:
h = g
""", keys, dicts)
def test_defined_names_nested_classes():
item = lambda name, local_name: {'name': name, 'local_name': local_name}
keys = ['name', 'local_name']
dicts = [
[item('L1', 'L1'),
[item('L2', 'L1.L2'),
[item('L3', 'L1.L2.L3'),
[item('f', 'L1.L2.L3.f')]],
[item('f', 'L1.L2.f')]],
[item('f', 'L1.f')]],
[item('f', 'f')]]
check_defined_names("""
class L1:
class L2:
class L3:
def f(): pass
def f(): pass
def f(): pass
def f(): pass
""", keys, dicts)
| 26.052632
| 76
| 0.548485
|
4a184367dd31c745484448b5751f3c98c14279db
| 1,594
|
py
|
Python
|
ThreadSaver/tspkg/routes.py
|
Mabdelwanis/thread-it
|
f0da5be7bc288b3712d0e7e92e73a7b35a3df3be
|
[
"MIT"
] | 4
|
2021-05-20T15:44:08.000Z
|
2022-03-18T13:09:46.000Z
|
ThreadSaver/tspkg/routes.py
|
Mabdelwanis/thread-it
|
f0da5be7bc288b3712d0e7e92e73a7b35a3df3be
|
[
"MIT"
] | null | null | null |
ThreadSaver/tspkg/routes.py
|
Mabdelwanis/thread-it
|
f0da5be7bc288b3712d0e7e92e73a7b35a3df3be
|
[
"MIT"
] | 6
|
2021-05-18T05:48:46.000Z
|
2022-03-20T19:03:25.000Z
|
from flask import render_template, url_for, redirect, send_file
from ThreadSaver.tspkg import app
from ThreadSaver.tspkg.models import User
from flask_login import logout_user, login_required
from flask_dance.contrib.twitter import twitter
@app.route("/")
@app.route("/home")
def home():
return render_template('home.html', title='Home')
@app.route("/about")
def about():
return render_template('about.html', title='About')
@app.route("/dashboard")
def dashboard():
if not twitter.authorized:
return redirect(url_for("twitter.login"))
resp = twitter.get("account/verify_credentials.json")
assert resp.ok
user_data = resp.json()
user_id = user_data['id_str']
user_name = user_data['screen_name']
tweets = User.query.filter_by(user_id=user_id).all()
# with open('ts_file.txt', 'w') as file:
# file.write(user.tweet)
return render_template('dashboard.html', title='Dashboard', value=user_name, tweets=tweets)
@app.route("/download/<string:text>")
def download(text):
with open('tweet.txt', 'w') as file:
file.write(text)
path = '../tweet.txt'
return send_file(path, as_attachment=True)
@app.route('/downloadall')
def download_all():
tweets = User.query.filter_by(user_id='user1').all()
with open('tweet.txt', 'w') as file:
for tweet in tweets:
file.write(tweet.tweet)
file.write('\n')
path = '../tweet.txt'
return send_file(path, as_attachment=True)
@app.route("/logout")
@login_required
def logout():
logout_user()
return redirect(url_for('home'))
| 27.482759
| 95
| 0.680678
|
4a184421896ee5654c08b8580d178b3e60cc0124
| 589
|
py
|
Python
|
self-learning/based/00000003.py
|
vladspirin/python-learning
|
6b005fb28f96c0d610348a0b5f8830f94c53075f
|
[
"Unlicense"
] | 1
|
2018-10-23T14:50:43.000Z
|
2018-10-23T14:50:43.000Z
|
self-learning/based/00000003.py
|
vladspirin/python-learning
|
6b005fb28f96c0d610348a0b5f8830f94c53075f
|
[
"Unlicense"
] | null | null | null |
self-learning/based/00000003.py
|
vladspirin/python-learning
|
6b005fb28f96c0d610348a0b5f8830f94c53075f
|
[
"Unlicense"
] | null | null | null |
# movie (dir. director) came out in year
# movie = input()
# director = input()
# year = input()
# print(f'{movie} (dir. {director}) came out in {year}')
# txt = input()
# txt = txt.title()
# for i in txt:
# if not i.isalpha():
# txt = txt.replace(i, "")
# print(txt)
def heading(string, number=1):
for i in range(1, 7):
if number == i:
return print(("#" * i) + " " + string)
elif number < i:
return print(("#" * 1) + " " + string)
elif number > i:
return print(("#" * 6) + " " + string)
| 23.56
| 56
| 0.475382
|
4a18446b6f858b798c13a25d34ff166a79b4d346
| 43,536
|
py
|
Python
|
src/python/BuildingControlsSimulator/DataClients/DataClient.py
|
ecobee/building-controls-simulator
|
c4256224d5b2e171417b60069ccc97dde44de342
|
[
"BSD-3-Clause"
] | 10
|
2020-06-04T14:22:57.000Z
|
2021-06-16T17:00:48.000Z
|
src/python/BuildingControlsSimulator/DataClients/DataClient.py
|
ecobee/building-controls-simulator
|
c4256224d5b2e171417b60069ccc97dde44de342
|
[
"BSD-3-Clause"
] | 56
|
2020-06-17T16:59:52.000Z
|
2022-03-25T18:59:11.000Z
|
src/python/BuildingControlsSimulator/DataClients/DataClient.py
|
ecobee/building-controls-simulator
|
c4256224d5b2e171417b60069ccc97dde44de342
|
[
"BSD-3-Clause"
] | 5
|
2020-06-04T14:29:29.000Z
|
2021-03-05T07:59:34.000Z
|
# created by Tom Stesco tom.s@ecobee.com
import os
import logging
# import pkg_resources
from datetime import datetime
import attr
import pandas as pd
import numpy as np
from BuildingControlsSimulator.DataClients.DataStates import (
UNITS,
CHANNELS,
STATES,
)
from BuildingControlsSimulator.Conversions.Conversions import Conversions
from BuildingControlsSimulator.DataClients.DataSpec import (
Internal,
FlatFilesSpec,
DonateYourDataSpec,
convert_spec,
)
from BuildingControlsSimulator.DataClients.DateTimeChannel import DateTimeChannel
from BuildingControlsSimulator.DataClients.ThermostatChannel import ThermostatChannel
from BuildingControlsSimulator.DataClients.EquipmentChannel import EquipmentChannel
from BuildingControlsSimulator.DataClients.SensorsChannel import SensorsChannel
from BuildingControlsSimulator.DataClients.WeatherChannel import WeatherChannel
from BuildingControlsSimulator.DataClients.DataSource import DataSource
from BuildingControlsSimulator.DataClients.DataDestination import DataDestination
from BuildingControlsSimulator.DataClients.LocalDestination import LocalDestination
logger = logging.getLogger(__name__)
@attr.s(kw_only=True)
class DataClient:
# data channels
thermostat = attr.ib(default=None)
equipment = attr.ib(default=None)
sensors = attr.ib(default=None)
weather = attr.ib(default=None)
datetime = attr.ib(default=None)
full_data_periods = attr.ib(factory=list)
# input variables
source = attr.ib(validator=attr.validators.instance_of(DataSource))
destination = attr.ib(validator=attr.validators.instance_of(DataDestination))
nrel_dev_api_key = attr.ib(default=os.environ.get("NREL_DEV_API_KEY"))
nrel_dev_email = attr.ib(default=os.environ.get("NREL_DEV_EMAIL"))
archive_tmy3_dir = attr.ib(default=os.environ.get("ARCHIVE_TMY3_DIR"))
archive_tmy3_meta = attr.ib(default=os.environ.get("ARCHIVE_TMY3_META"))
archive_tmy3_data_dir = attr.ib(default=os.environ.get("ARCHIVE_TMY3_DATA_DIR"))
ep_tmy3_cache_dir = attr.ib(default=os.environ.get("EP_TMY3_CACHE_DIR"))
nsrdb_cache_dir = attr.ib(default=os.environ.get("NSRDB_CACHE_DIR"))
simulation_epw_dir = attr.ib(default=os.environ.get("SIMULATION_EPW_DIR"))
weather_dir = attr.ib(default=os.environ.get("WEATHER_DIR"))
weather_forecast_source = attr.ib(default="perfect")
epw_path = attr.ib(default=None)
# state variables
sim_config = attr.ib(default=None)
start_utc = attr.ib(default=None)
end_utc = attr.ib(default=None)
eplus_fill_to_day_seconds = attr.ib(default=None)
eplus_warmup_seconds = attr.ib(default=None)
internal_spec = attr.ib(factory=Internal)
forecast_from_measured = attr.ib(default=True)
has_data = attr.ib(default=False)
def __attrs_post_init__(self):
# first, post init class specification
self.make_data_directories()
def make_data_directories(self):
os.makedirs(self.weather_dir, exist_ok=True)
os.makedirs(self.archive_tmy3_data_dir, exist_ok=True)
os.makedirs(self.ep_tmy3_cache_dir, exist_ok=True)
os.makedirs(self.nsrdb_cache_dir, exist_ok=True)
os.makedirs(self.simulation_epw_dir, exist_ok=True)
if self.source.local_cache:
os.makedirs(
os.path.join(
self.source.local_cache,
self.source.operator_name,
self.source.source_name,
),
exist_ok=True,
)
if self.destination.local_cache:
os.makedirs(
os.path.join(
self.destination.local_cache,
self.destination.operator_name,
),
exist_ok=True,
)
def get_data(self):
# check if data has already been fetched by another simulation
if self.has_data:
return
# check for invalid start/end combination
if self.sim_config["end_utc"] <= self.sim_config["start_utc"]:
raise ValueError("sim_config contains invalid start_utc >= end_utc.")
# load from cache or download data from source
_data = self.source.get_data(self.sim_config)
if _data.empty:
logger.error(
"EMPTY DATA SOURCE: \nsim_config={} \nsource={}\n".format(
self.sim_config, self.source
)
)
_data = self.internal_spec.get_empty_df()
# remove any fully duplicated records
_data = _data.drop_duplicates(ignore_index=True)
# remove multiple records for same datetime
# there may also be multiple entries for same exact datetime in ISM
# in this case keep the record that has the most combined runtime
# because in observed cases of this the extra record has 0 runtime.
_runtime_sum_column = "sum_runtime"
_data[_runtime_sum_column] = _data[
set(self.internal_spec.equipment.spec.keys()) & set(_data.columns)
].sum(axis=1)
# last duplicate datetime value will have maximum sum_runtime
_data = _data.sort_values(
[self.internal_spec.datetime_column, _runtime_sum_column],
ascending=True,
)
_data = _data.drop_duplicates(
subset=[STATES.DATE_TIME], keep="last", ignore_index=True
)
_data = _data.drop(columns=[_runtime_sum_column])
# the period data source is expected at
_expected_period = f"{self.internal_spec.data_period_seconds}S"
_min_datetime = _data[self.internal_spec.datetime.datetime_column].min()
_max_datetime = _data[self.internal_spec.datetime.datetime_column].max()
# truncate the data to desired simulation start and end time
_data = _data[
(_data[self.internal_spec.datetime_column] >= self.sim_config["start_utc"])
& (_data[self.internal_spec.datetime_column] <= self.sim_config["end_utc"])
].reset_index(drop=True)
# remove unused categories from categorical columns after date range
# for simulation is selected
for _cat_col in [
_col
for _col in _data.columns
if isinstance(_data[_col].dtype, pd.api.types.CategoricalDtype)
]:
_data[_cat_col].cat = _data[_cat_col].cat.remove_unused_categories()
# run settings change point detection before filling missing data
# the fill data would create false positive change points
# the change points can also be used to correctly fill the schedule
# and comfort preferences
(
_change_points_schedule,
_change_points_comfort_prefs,
_change_points_hvac_mode,
) = ThermostatChannel.get_settings_change_points(
_data, self.internal_spec.data_period_seconds
)
# ffill first 15 minutes of missing data periods
_data = DataClient.fill_missing_data(
full_data=_data,
expected_period=_expected_period,
data_spec=self.internal_spec,
)
# compute full_data_periods with only first 15 minutes ffilled
self.full_data_periods = DataClient.get_full_data_periods(
full_data=_data,
data_spec=self.internal_spec,
expected_period=_expected_period,
min_sim_period=self.sim_config["min_sim_period"],
)
# need time zone before init of DatetimeChannel
internal_timezone = DateTimeChannel.get_timezone(
self.sim_config["latitude"], self.sim_config["longitude"]
)
# there will be filled data even if there are no full_data_periods
# the fill data is present to run continuous simulations smoothly
# in the presence of potentially many missing data periods
if self.full_data_periods:
# compute the total sim steps for later use determining offset for
# weather forecasts idx
_total_sim_steps = (
_data[self.internal_spec.datetime_column].max()
- _data[self.internal_spec.datetime_column].min()
) // pd.Timedelta(seconds=self.sim_config["sim_step_size_seconds"])
# the simulation period must be full days starting at 0 hour to use
# SimulationControl: Run Simulation for Weather File Run Periods
_start_utc, _end_utc = self.get_simulation_period(
expected_period=_expected_period,
internal_timezone=internal_timezone,
)
# add records for warm_up period
_data = DataClient.add_fill_records(
df=_data,
data_spec=self.internal_spec,
start_utc=_start_utc,
end_utc=_end_utc,
expected_period=_expected_period,
)
# drop records before and after full simulation time
# end is less than
_data = _data[
(_data[self.internal_spec.datetime_column] >= _start_utc)
& (_data[self.internal_spec.datetime_column] <= _end_utc)
].reset_index(drop=True)
# bfill to interpolate missing data
# first and last records must be full because we used full data periods
# need to add a NA_code to stop fillna from clobbering columns
# where NA means something
na_code_name = "NA_code"
_data[STATES.CALENDAR_EVENT].cat.add_categories(
new_categories=na_code_name, inplace=True
)
_data[STATES.CALENDAR_EVENT] = _data[STATES.CALENDAR_EVENT].fillna(
na_code_name
)
# bfill then ffill to handle where no data after null
_data = _data.fillna(method="bfill", limit=None)
_data = _data.fillna(method="ffill", limit=None)
_data = DataClient.resample_to_step_size(
df=_data,
step_size_seconds=self.sim_config["sim_step_size_seconds"],
data_spec=self.internal_spec,
)
# we can replace na_code_name now that filling is complete
_data.loc[
_data[STATES.CALENDAR_EVENT] == na_code_name,
[STATES.CALENDAR_EVENT],
] = pd.NA
# finally convert dtypes to final types now that nulls in
# non-nullable columns have been properly filled or removed
_data = convert_spec(
_data,
src_spec=self.internal_spec,
dest_spec=self.internal_spec,
src_nullable=True,
dest_nullable=False,
)
else:
raise ValueError(
f"ID={self.sim_config['identifier']} has no full_data_periods "
+ "for requested duration: "
+ f"start_utc={self.sim_config['start_utc']}, "
+ f"end_utc={self.sim_config['end_utc']} "
+ f"with min_sim_period={self.sim_config['min_sim_period']}. "
+ f"The given data file runs from {_min_datetime}"
+ f" to {_max_datetime}. "
+ f"If there is overlap between these two time periods then "
+ "there is too much missing data. If there is no overlap "
+ "consider altering your sim_config start_utc and end_utc."
)
self.datetime = DateTimeChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.datetime.spec
)
],
spec=self.internal_spec.datetime,
latitude=self.sim_config["latitude"],
longitude=self.sim_config["longitude"],
internal_timezone=internal_timezone,
)
# finally create the data channel objs for usage during simulation
self.thermostat = ThermostatChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.thermostat.spec
)
],
spec=self.internal_spec.thermostat,
change_points_schedule=_change_points_schedule,
change_points_comfort_prefs=_change_points_comfort_prefs,
change_points_hvac_mode=_change_points_hvac_mode,
)
self.equipment = EquipmentChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.equipment.spec
)
],
spec=self.internal_spec.equipment,
)
self.sensors = SensorsChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.sensors.spec
)
],
spec=self.internal_spec.sensors,
)
self.sensors.drop_unused_room_sensors()
self.weather = WeatherChannel(
data=_data[
self.internal_spec.intersect_columns(
_data.columns, self.internal_spec.weather.spec
)
],
spec=self.internal_spec.weather,
weather_forecast_source=self.weather_forecast_source,
archive_tmy3_dir=self.archive_tmy3_dir,
archive_tmy3_data_dir=self.archive_tmy3_data_dir,
ep_tmy3_cache_dir=self.ep_tmy3_cache_dir,
nrel_dev_api_key=self.nrel_dev_api_key,
nrel_dev_email=self.nrel_dev_email,
nsrdb_cache_dir=self.nsrdb_cache_dir,
simulation_epw_dir=self.simulation_epw_dir,
)
# add nsrdb solar data fields
self.weather.data = self.weather.fill_nsrdb(
input_data=self.weather.data,
datetime_channel=self.datetime,
sim_config=self.sim_config,
)
# merge current weather data with epw
# backfill of any missing weather data here
self.weather.get_epw_data(
sim_config=self.sim_config,
datetime_channel=self.datetime,
epw_path=self.epw_path,
)
# TODO: this is an example implementation showing
# the anticapated structure of forecast data from
# an external source
self.weather.get_forecast_data(
sim_config=self.sim_config,
total_sim_steps=_total_sim_steps,
)
# need to convert data types of filled weather data to spec dtypes
self.weather.data = self.weather.data.astype(
{k: v["dtype"] for k, v in self.internal_spec.weather.spec.items()}
)
# set flag for other simulations using this data client
self.has_data = True
def get_simulation_period(self, expected_period, internal_timezone):
# set start and end times from full_data_periods and simulation config
# take limiting period as start_utc and end_utc
if not self.full_data_periods:
self.start_utc = None
self.end_utc = None
return self.start_utc, self.end_utc
if self.sim_config["start_utc"] >= self.full_data_periods[0][0]:
self.start_utc = self.sim_config["start_utc"]
else:
logger.info(
f"config start_utc={self.sim_config['start_utc']} is before "
+ f"first full data period={self.full_data_periods[0][0]}. "
+ "Simulation start_utc set to first full data period."
)
self.start_utc = self.full_data_periods[0][0]
if self.sim_config["end_utc"] <= self.full_data_periods[-1][-1]:
self.end_utc = self.sim_config["end_utc"]
else:
logger.info(
f"config end_utc={self.sim_config['end_utc']} is after "
+ f"last full data period={self.full_data_periods[-1][-1]}. "
+ "Simulation end_utc set to last full data period."
)
self.end_utc = self.full_data_periods[-1][-1]
if self.end_utc < self.start_utc:
raise ValueError(
f"end_utc={self.end_utc} before start_utc={self.start_utc}.\n"
+ f"Set sim_config start_utc and end_utc within "
+ f"full_data_period: {self.full_data_periods[0][0]} to "
+ f"{self.full_data_periods[-1][-1]}"
)
# fill additional day before simulation and up end of day end of simulation
(self.start_utc, self.end_utc,) = DataClient.eplus_day_fill_simulation_time(
start_utc=self.start_utc,
end_utc=self.end_utc,
expected_period=expected_period,
internal_timezone=internal_timezone,
)
return self.start_utc, self.end_utc
def store_output(self, output, sim_name, src_spec):
self.destination.put_data(df=output, sim_name=sim_name, src_spec=src_spec)
def store_input(
self,
filepath_or_buffer,
df_input=None,
src_spec=None,
dest_spec=None,
file_extension=None,
):
"""For usage capturing input data for unit tests."""
if not df_input:
df_input = self.get_full_input()
if not src_spec:
src_spec = self.internal_spec
if not dest_spec:
dest_spec = self.destination.data_spec
if not file_extension:
file_extension = self.destination.file_extension
_df = convert_spec(
df=df_input, src_spec=src_spec, dest_spec=dest_spec, copy=True
)
self.destination.write_data_by_extension(
_df,
filepath_or_buffer,
data_spec=dest_spec,
file_extension=file_extension,
)
@staticmethod
def add_fill_records(df, data_spec, start_utc, end_utc, expected_period):
if not (start_utc and end_utc):
return df
rec = pd.Series(pd.NA, index=df.columns)
should_resample = False
if df[(df[data_spec.datetime_column] == start_utc)].empty:
# append record with start_utc time
rec[data_spec.datetime_column] = start_utc
df = df.append(rec, ignore_index=True).sort_values(
data_spec.datetime_column
)
should_resample = True
if df[(df[data_spec.datetime_column] == end_utc)].empty:
# append record with end_utc time
rec[data_spec.datetime_column] = end_utc
df = df.append(rec, ignore_index=True).sort_values(
data_spec.datetime_column
)
should_resample = True
if should_resample:
# frequency rules have different str format
_str_format_dict = {
"M": "T", # covert minutes formats
"S": "S",
}
# replace last char using format conversion dict
resample_freq = (
expected_period[0:-1] + _str_format_dict[expected_period[-1]]
)
# resampling
df = df.set_index(data_spec.datetime_column)
df = df.resample(resample_freq).asfreq()
df = df.reset_index()
# adding a null record breaks categorical dtypes
# convert back to categories
for state in df.columns:
if data_spec.full.spec[state]["dtype"] == "category":
df[state] = df[state].astype("category")
return df
@staticmethod
def eplus_day_fill_simulation_time(
start_utc, end_utc, expected_period, internal_timezone
):
# EPlus requires that total simulation time be divisible by 86400 seconds
# or whole days. EPlus also has some transient behaviour at t_init
# adding time to beginning of simulation input data that will be
# backfilled is more desirable than adding time to end of simulation
# this time will not be included in the full_data_periods and thus
# will not be considered during analysis
# fill extra day before simulation and up to end of day at end of simulation
# the added_timedelta is the difference to wholes days minus one period
# this period can be considered 23:55 to 00:00
# EnergyPlus will be initialized for this extra period but not simulated
# date 10 days into year is used for offset because it wont cross DST or
# year line under any circumstances
tz_offset_seconds = internal_timezone.utcoffset(
datetime(start_utc.year, 1, 10)
).total_seconds()
filled_start_utc = start_utc - pd.Timedelta(
days=1,
hours=start_utc.hour,
minutes=start_utc.minute,
seconds=start_utc.second + tz_offset_seconds,
)
filled_end_utc = end_utc
return filled_start_utc, filled_end_utc
@staticmethod
def get_full_data_periods(
full_data, data_spec, expected_period="300S", min_sim_period="7D"
):
"""Get full data periods. These are the periods for which there is data
on all channels. Preliminary forward filling of the data is used to
fill small periods of missing data where padding values is advantageous
for examplem the majority of missing data periods are less than 15 minutes
(3 message intervals).
The remaining missing data is back filled after the full_data_periods are
computed to allow the simulations to run continously. Back fill is used
because set point changes during the missing data period should be
assumed to be not in tracking mode and in regulation mode after greater
than
"""
if full_data.empty:
return []
# compute time deltas between records
diffs = full_data.dropna(axis="rows", subset=data_spec.full.null_check_columns)[
data_spec.datetime_column
].diff()
# seperate periods by missing data
periods_df = diffs[diffs > pd.to_timedelta(expected_period)].reset_index()
# make df of periods
periods_df["start"] = full_data.loc[
periods_df["index"], data_spec.datetime_column
].reset_index(drop=True)
periods_df["end"] = periods_df["start"] - periods_df[1]
periods_df = periods_df.drop(axis="columns", columns=["index", 1])
# append start and end datetimes from full_data
periods_df.loc[len(periods_df)] = [
pd.NA,
full_data.loc[len(full_data) - 1, data_spec.datetime_column],
]
periods_df["start"] = periods_df["start"].shift(1)
periods_df.loc[0, "start"] = full_data.loc[0, data_spec.datetime_column]
# only include full_data_periods that are geq min_sim_period
# convert all np.arrays to lists for ease of use
_full_data_periods = [
list(rec)
for rec in periods_df[
periods_df["end"] - periods_df["start"] >= pd.Timedelta(min_sim_period)
].to_numpy()
]
return _full_data_periods
@staticmethod
def fill_missing_data(
full_data,
data_spec,
expected_period,
limit=3,
method="ffill",
):
"""Fill periods of missing data within limit using method.
Periods larger than limit will not be partially filled."""
if full_data.empty:
return full_data
# frequency rules have different str format
_str_format_dict = {
"M": "T", # covert minutes formats
"S": "S",
}
# replace last char using format conversion dict
resample_freq = expected_period[0:-1] + _str_format_dict[expected_period[-1]]
# resample to add any timesteps that are fully missing
full_data = full_data.set_index(data_spec.datetime_column)
full_data = full_data.resample(resample_freq).asfreq()
full_data = full_data.reset_index()
# compute timesteps between steps of data
_null_check_columns = [
_col
for _col in data_spec.full.null_check_columns
if _col in full_data.columns
]
diffs = full_data.dropna(axis="rows", subset=_null_check_columns)[
data_spec.datetime_column
].diff()
fill_start_df = (
(
diffs[
(diffs > pd.to_timedelta(expected_period))
& (diffs <= pd.to_timedelta(expected_period) * limit)
]
/ pd.Timedelta(expected_period)
)
.astype("Int64")
.reset_index()
)
if not fill_start_df.empty:
# take idxs with missing data and one record on either side to allow
# for ffill and bfill methods to work generally
fill_idxs = []
for idx, num_missing in fill_start_df.to_numpy():
fill_idxs = fill_idxs + [i for i in range(idx - (num_missing), idx + 1)]
# fill exact idxs that are missing using method
full_data.iloc[fill_idxs] = full_data.iloc[fill_idxs].fillna(method=method)
return full_data
def get_full_input(self, column_names=False):
full_input = pd.concat(
[
self.datetime.data,
self.thermostat.data,
self.equipment.data,
self.sensors.data,
self.weather.data,
],
axis="columns",
)
# drop duplicated datetime columns
full_input = full_input.loc[:, ~full_input.columns.duplicated()]
# remove warm up time and forecast time
full_input = full_input[
(
full_input[self.internal_spec.datetime_column]
>= self.sim_config["start_utc"]
)
& (
full_input[self.internal_spec.datetime_column]
< self.sim_config["end_utc"]
)
].reset_index(drop=True)
# resample to output step size
full_input = DataClient.resample_to_step_size(
df=full_input,
step_size_seconds=self.sim_config["output_step_size_seconds"],
data_spec=self.internal_spec,
)
if column_names:
full_input.columns = [
self.internal_spec.full.spec[_col]["name"]
for _col in full_input.columns
]
return full_input
@staticmethod
def resample_to_step_size(df, step_size_seconds, data_spec):
"""This function contains the rules for resampling data of all
types different time steps"""
# the mode seconds between messages is the expected sample period
cur_sample_period = (
df[data_spec.datetime_column].diff().mode()[0].total_seconds()
)
if cur_sample_period < step_size_seconds:
# downsample data to lower frequency
df = DataClient.downsample_to_step_size(df, step_size_seconds, data_spec)
elif cur_sample_period > step_size_seconds:
# upsample data to higher frequency
df = DataClient.upsample_to_step_size(df, step_size_seconds, data_spec)
return df
@staticmethod
def upsample_to_step_size(df, step_size_seconds, data_spec):
"""This function contains the rules for resampling data of all
types into smaller time steps"""
# resample to desired frequency
_resample_period = f"{step_size_seconds}S"
current_step_size = int(
df[data_spec.datetime_column].diff().mode()[0].total_seconds()
)
# runtime_columns can be filled with zeros because they are not used
runtime_columns = [
_state
for _state, _v in data_spec.full.spec.items()
if ((_v["unit"] == UNITS.SECONDS) and (_state in df.columns))
]
# before resampling generate step_end_on column for runtime columns
# we must know if the end of the step cycle is one or off
for _col in runtime_columns:
# TODO: define min cycle time for all equipment
min_cycle_time = 300
df[f"{_col}_step_end_off"] = (
(
((df[_col] + df[_col].shift(1)) >= min_cycle_time)
& ((df[_col] + df[_col].shift(-1)) <= min_cycle_time)
)
& ~(
((df[_col].shift(1) + df[_col].shift(2)) >= min_cycle_time)
& ((df[_col] + df[_col].shift(1)) <= min_cycle_time)
)
| ((df[_col] + df[_col].shift(-1)) < min_cycle_time)
).astype("boolean")
# we need to set a datetime index to resample
df = df.set_index(data_spec.datetime_column)
df = df.resample(_resample_period).asfreq()
# the datetime index can be reset back to a column
# this is actually required due to an issue in the interpolate method
df = df.reset_index()
# linear interpolation
# setpoint columns which are in units that can be interpolated,
# must not be interpolated, but ffilled, exclude them from list
linear_columns_exclude = [
STATES.TEMPERATURE_STP_COOL,
STATES.TEMPERATURE_STP_HEAT,
STATES.HUMIDITY_EXPECTED_LOW,
STATES.HUMIDITY_EXPECTED_HIGH,
]
linear_columns = [
_state
for _state, _v in data_spec.full.spec.items()
if (
(_v["unit"] in [UNITS.CELSIUS, UNITS.RELATIVE_HUMIDITY])
and (_state in df.columns)
)
and (_state not in linear_columns_exclude)
]
# Note: must have numpy `float32` or `float64` dtypes for interpolation
df.loc[:, linear_columns] = df.loc[:, linear_columns].interpolate(
axis="rows", method="linear"
)
# ffill interpolation
ffill_columns = [
_state
for _state, _v in data_spec.full.spec.items()
if ((_v["unit"] == UNITS.OTHER) and (_state in df.columns))
]
ffill_columns = ffill_columns + list(
set(linear_columns_exclude) & set(df.columns)
)
df.loc[:, ffill_columns] = df.loc[:, ffill_columns].interpolate(
axis="rows", method="ffill"
)
# run time columns must be disaggregated using minimum runtime
# rules to determin if runtime happens in beginning or end of step
# step idx used to determin leftover runtime
upsample_ratio = int(current_step_size / step_size_seconds)
df["inner_step_idx"] = np.hstack(
(
[upsample_ratio],
np.tile(
np.arange(1, upsample_ratio + 1),
(int((len(df) - 1) / upsample_ratio), 1),
).flatten(),
)
)
for _col in runtime_columns:
df[f"{_col}_step_end_off"] = df[f"{_col}_step_end_off"].bfill()
# runtime sum over step
df["step_runtime"] = df[_col].shift(-upsample_ratio).ffill().shift(1)
# runtime at beginning of step
df["b_upsample"] = df["step_runtime"] - (
(df["inner_step_idx"] - 1) * step_size_seconds
)
df.loc[
df["b_upsample"] > step_size_seconds, ["b_upsample"]
] = step_size_seconds
# runtime at end of step
df["e_upsample"] = df["step_runtime"] - (
(upsample_ratio - df["inner_step_idx"]) * step_size_seconds
)
df.loc[
df["e_upsample"] > step_size_seconds, ["e_upsample"]
] = step_size_seconds
# steps ending with off-cycle
df.loc[df[f"{_col}_step_end_off"], [_col]] = df["b_upsample"]
df.loc[~df[f"{_col}_step_end_off"], [_col]] = df["e_upsample"]
df.loc[df[_col] < 0, [_col]] = 0
df[_col] = df[_col].fillna(0)
df = df.drop(columns=[f"{_col}_step_end_off"])
df = df.drop(
columns=["e_upsample", "b_upsample", "step_runtime", "inner_step_idx"]
)
# as inputs and will just be re-aggregated into output
zero_fill_columns = [
_state
for _state, _v in data_spec.full.spec.items()
if ((_v["unit"] == UNITS.SECONDS) and (_state in df.columns))
]
df.loc[:, zero_fill_columns] = df.loc[:, zero_fill_columns].fillna(0)
return df
@staticmethod
def downsample_to_step_size(df, step_size_seconds, data_spec):
"""This function contains the rules for integrating data of all
types into larger time steps"""
# resample to desired frequency
_resample_period = f"{step_size_seconds}S"
# we need to set a datetime index to resample
df = df.set_index(data_spec.datetime_column)
# set result df with new frequency
# each group of columns must be filled in separately
res_df = df.resample(_resample_period).asfreq()
# mean integration
mean_columns = [
_state
for _state, _v in data_spec.full.spec.items()
if (
_v["unit"] in [UNITS.CELSIUS, UNITS.RELATIVE_HUMIDITY]
and _state in df.columns
)
]
res_df.loc[:, mean_columns] = (
df.loc[:, mean_columns].resample(_resample_period).mean()
)
# mode interpolation
# columns that were ffilled and represent current states will
# be filled with the most recent value as the default resample().asfreq()
# sum integration
sum_columns = [
_state
for _state, _v in data_spec.full.spec.items()
if (_v["unit"] == UNITS.SECONDS and _state in df.columns)
]
res_df.loc[:, sum_columns] = (
df.loc[:, sum_columns].resample(_resample_period).sum()
)
# the datetime index can be reset back to a column
res_df = res_df.reset_index()
return res_df
@staticmethod
def generate_dummy_data(
sim_config,
spec,
outdoor_weather=None,
schedule_chg_pts=None,
comfort_chg_pts=None,
hvac_mode_chg_pts=None,
):
if isinstance(spec, Internal):
raise ValueError(
f"Supplied Spec {spec} is internal spec."
+ " Data of this spec should not be stored in data files"
)
for _idx, sim in sim_config.iterrows():
# _df = pd.DataFrame(columns=spec.full.spec.keys())
_df = pd.DataFrame(
index=pd.date_range(
start=sim.start_utc,
end=sim.end_utc,
freq=f"{spec.data_period_seconds}S",
)
)
if not schedule_chg_pts:
# set default ecobee schedule
schedule_chg_pts = {
sim.start_utc: [
{
"name": "Home",
"minute_of_day": 390,
"on_day_of_week": [
True,
True,
True,
True,
True,
True,
True,
],
},
{
"name": "Sleep",
"minute_of_day": 1410,
"on_day_of_week": [
True,
True,
True,
True,
True,
True,
True,
],
},
]
}
if not comfort_chg_pts:
# set default ecobee comfort setpoints
if isinstance(spec, FlatFilesSpec):
home_stp_cool = Conversions.C2Fx10(23.5)
home_stp_heat = Conversions.C2Fx10(21.0)
sleep_stp_cool = Conversions.C2Fx10(28.0)
sleep_stp_heat = Conversions.C2Fx10(16.5)
elif isinstance(spec, DonateYourDataSpec):
home_stp_cool = Conversions.C2F(23.5)
home_stp_heat = Conversions.C2F(21.0)
sleep_stp_cool = Conversions.C2F(28.0)
sleep_stp_heat = Conversions.C2F(16.5)
else:
home_stp_cool = 23.5
home_stp_heat = 21.0
sleep_stp_cool = 28.0
sleep_stp_heat = 16.5
comfort_chg_pts = {
sim.start_utc: {
"Home": {
STATES.TEMPERATURE_STP_COOL: home_stp_cool,
STATES.TEMPERATURE_STP_HEAT: home_stp_heat,
},
"Sleep": {
STATES.TEMPERATURE_STP_COOL: sleep_stp_cool,
STATES.TEMPERATURE_STP_HEAT: sleep_stp_heat,
},
}
}
if not hvac_mode_chg_pts:
# set default ecobee comfort setpoints
hvac_mode_chg_pts = {sim.start_utc: "heat"}
# enforce ascending sorting of dict keys
hvac_mode_chg_pts = dict(sorted(hvac_mode_chg_pts.items()))
comfort_chg_pts = dict(sorted(comfort_chg_pts.items()))
schedule_chg_pts = dict(sorted(schedule_chg_pts.items()))
# check for errors in settings
if len(hvac_mode_chg_pts) <= 0:
raise ValueError(f"Invalid hvac_mode_chg_pts={hvac_mode_chg_pts}.")
if len(comfort_chg_pts) <= 0:
raise ValueError(f"Invalid comfort_chg_pts={comfort_chg_pts}.")
if len(schedule_chg_pts) <= 0:
raise ValueError(f"Invalid schedule_chg_pts={schedule_chg_pts}.")
for k, v in spec.full.spec.items():
_default_value, _ = Conversions.numpy_down_cast_default_value_dtype(
v["dtype"]
)
if v["channel"] == CHANNELS.THERMOSTAT_SETTING:
# settings channels set with default values first
# they are set below after full df columns have been filled
_df[k] = _default_value
elif v["channel"] == CHANNELS.WEATHER:
# default: set no values for outdoor_weather=None
# will default to using TMY3 data for the provided location
if outdoor_weather:
# outdoor_weather can be set with internal states as keys
if v["internal_state"] in outdoor_weather.keys():
_df[k] = outdoor_weather[v["internal_state"]]
elif v["channel"] == CHANNELS.THERMOSTAT_SENSOR:
# sensor data unused for dummy data
# set default
_df[k] = _default_value
elif v["channel"] == CHANNELS.EQUIPMENT:
# equipment data unused for dummy data
# set default
_df[k] = _default_value
# settings is always in spec add in specific order
# 1. add HVAC_MODE
k_hvac_mode = [
k
for k, v in spec.full.spec.items()
if v["internal_state"] == STATES.HVAC_MODE
][0]
# assuming sorted ascending by timestamp
# each change point sets all future hvac modes
for _ts, _hvac_mode in hvac_mode_chg_pts.items():
_df.loc[_df.index >= _ts, k_hvac_mode] = _hvac_mode
# 2. add SCHEDULE
k_schedule = [
k
for k, v in spec.full.spec.items()
if v["internal_state"] == STATES.SCHEDULE
][0]
# assuming sorted ascending by timestamp
# each change point sets all future schedules
for _ts, _schedule in schedule_chg_pts.items():
for _dow in range(7):
_dow_schedule = [
_s for _s in _schedule if _s["on_day_of_week"][_dow]
]
_dow_schedule = sorted(
_dow_schedule, key=lambda k: k["minute_of_day"]
)
_prev_dow_schedule = [
_s for _s in _schedule if _s["on_day_of_week"][(_dow - 1) % 7]
]
_prev_dow_schedule = sorted(
_prev_dow_schedule, key=lambda k: k["minute_of_day"]
)
# first period is defined from previous day of week last schedule
_prev_s = _prev_dow_schedule[-1]
_s = _dow_schedule[0]
_df.loc[
(_df.index >= _ts)
& (_df.index.day_of_week == _dow)
& (
_df.index.hour * 60 + _df.index.minute < _s["minute_of_day"]
),
k_schedule,
] = _prev_s["name"]
for _s in _dow_schedule:
_df.loc[
(_df.index >= _ts)
& (_df.index.day_of_week == _dow)
& (
_df.index.hour * 60 + _df.index.minute
>= _s["minute_of_day"]
),
k_schedule,
] = _s["name"]
# 3. add SCHEDULE
k_stp_cool = [
k
for k, v in spec.full.spec.items()
if v["internal_state"] == STATES.TEMPERATURE_STP_COOL
][0]
k_stp_heat = [
k
for k, v in spec.full.spec.items()
if v["internal_state"] == STATES.TEMPERATURE_STP_HEAT
][0]
# assuming sorted ascending by timestamp
# each change point sets all future comfort set points
for _ts, _comfort in comfort_chg_pts.items():
for _schedule_name, _setpoints in _comfort.items():
_df.loc[
(_df.index >= _ts) & (_df[k_schedule] == _schedule_name),
k_stp_cool,
] = _setpoints[STATES.TEMPERATURE_STP_COOL]
_df.loc[
(_df.index >= _ts) & (_df[k_schedule] == _schedule_name),
k_stp_heat,
] = _setpoints[STATES.TEMPERATURE_STP_HEAT]
_df = _df.reset_index().rename(columns={"index": spec.datetime_column})
return _df
| 39.578182
| 88
| 0.57449
|
4a1844a41ee901a055a27d3ba1cd9b8726430147
| 791
|
py
|
Python
|
src/the_tale/the_tale/game/mobs/meta_relations.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 85
|
2017-11-21T12:22:02.000Z
|
2022-03-27T23:07:17.000Z
|
src/the_tale/the_tale/game/mobs/meta_relations.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 545
|
2017-11-04T14:15:04.000Z
|
2022-03-27T14:19:27.000Z
|
src/the_tale/the_tale/game/mobs/meta_relations.py
|
al-arz/the-tale
|
542770257eb6ebd56a5ac44ea1ef93ff4ab19eb5
|
[
"BSD-3-Clause"
] | 45
|
2017-11-11T12:36:30.000Z
|
2022-02-25T06:10:44.000Z
|
import smart_imports
smart_imports.all()
class Mob(meta_relations_objects.MetaType):
__slots__ = ('caption', )
TYPE = 5
TYPE_CAPTION = 'Монстр'
def __init__(self, caption, **kwargs):
super(Mob, self).__init__(**kwargs)
self.caption = caption
@property
def url(self):
return utils_urls.url('guide:mobs:show', self.id)
@classmethod
def create_from_object(cls, mob):
return cls(id=mob.id, caption=mob.name)
@classmethod
def create_from_id(cls, id):
from . import storage
mob = storage.mobs.get(id)
if mob is None:
return None
return cls.create_from_object(mob)
@classmethod
def create_from_ids(cls, ids):
return [cls.create_from_id(id) for id in ids]
| 21.378378
| 57
| 0.629583
|
4a184520f71fd66d993551dd0bb0f729d60f85f3
| 12,021
|
py
|
Python
|
cloudvolume/datasource/precomputed/mesh/multilod.py
|
seung-lab/cloud-volume
|
aebadc762435a4d9cca0595f2c1f6d8a6df19f0b
|
[
"BSD-3-Clause"
] | 94
|
2017-09-03T16:18:34.000Z
|
2022-03-31T14:49:14.000Z
|
cloudvolume/datasource/precomputed/mesh/multilod.py
|
seung-lab/cloud-volume
|
aebadc762435a4d9cca0595f2c1f6d8a6df19f0b
|
[
"BSD-3-Clause"
] | 378
|
2017-08-30T17:46:45.000Z
|
2022-03-31T00:15:04.000Z
|
cloudvolume/datasource/precomputed/mesh/multilod.py
|
seung-lab/cloud-volume
|
aebadc762435a4d9cca0595f2c1f6d8a6df19f0b
|
[
"BSD-3-Clause"
] | 40
|
2018-03-01T09:12:02.000Z
|
2022-02-08T17:57:37.000Z
|
from collections import defaultdict
import re
import numpy as np
from cloudfiles import CloudFiles
from .common import apply_transform
from .unsharded import UnshardedLegacyPrecomputedMeshSource
from ..sharding import ShardingSpecification, ShardReader
from ....mesh import Mesh
from ....lib import yellow, red, toiter, first
from .... import exceptions
def extract_lod_meshes(manifest, lod, lod_binary, vertex_quantization_bits, transform):
meshdata = defaultdict(list)
for frag in range(manifest.fragment_offsets[lod].shape[0]):
frag_binary = lod_binary[
int(np.sum(manifest.fragment_offsets[lod][0:frag])) :
int(np.sum(manifest.fragment_offsets[lod][0:frag+1]))
]
if len(frag_binary) == 0:
# According to @JBMS, empty fragments are used in cases where a child
# fragment exists, but its parent does not have a corresponding fragment,
# a possible byproduct of running marching cubes and mesh simplification
# independently for each level of detail.
continue
mesh = Mesh.from_draco(frag_binary)
# Convert from "stored model" space to "model" space
mesh.vertices = manifest.grid_origin + manifest.vertex_offsets[lod] + \
manifest.chunk_shape * (2 ** lod) * \
(manifest.fragment_positions[lod][:,frag] + \
(mesh.vertices / (2.0 ** vertex_quantization_bits - 1)))
mesh.vertices = apply_transform(mesh.vertices, transform)
meshdata[manifest.segment_id].append(mesh)
return meshdata
class UnshardedMultiLevelPrecomputedMeshSource(UnshardedLegacyPrecomputedMeshSource):
def __init__(self, meta, cache, config, readonly=False):
super().__init__(meta, cache, config, readonly)
self.vertex_quantization_bits = self.meta.info['vertex_quantization_bits']
self.lod_scale_multiplier = self.meta.info['lod_scale_multiplier']
self.transform = np.array(self.meta.info['transform'] + [0,0,0,1]).reshape(4,4)
@property
def path(self):
return self.meta.mesh_path
def get_manifest(self, segid, progress=None):
"""Retrieve the manifest for one or more segments."""
segid, multiple_return = toiter(segid, is_iter=True)
progress = progress if progress is not None else self.config.progress
cloudpath = self.meta.join(self.meta.cloudpath, self.path)
cf = CloudFiles(cloudpath, progress=progress)
results = cf.get((f"{sid}.index" for sid in segid ), total=len(segid))
if not multiple_return:
if not results:
return None
binary = results[0]["content"]
if binary is None:
return None
return MultiLevelPrecomputedMeshManifest(binary, segment_id=first(segid), offset=0)
regexp = re.compile(r'(\d+)\.index$')
manifests = []
for res in results:
key = res["path"]
sid = int(re.match(regexp, key).groups()[0])
binary = res["content"]
if binary is None:
manifests.append(None)
manifest = MultiLevelPrecomputedMeshManifest(binary, segment_id=sid, offset=0)
manifests.append(manifest)
return manifests
def exists(self, segids, progress=None):
"""
Checks if the mesh exists
Returns: { MultiLevelPrecomputedMeshManifest or None, ... }
"""
cf = CloudFiles(self.path)
return cf.exists(( f"{segid}.index" for segid in segids ))
def get(self, segids, lod=0, concat=True, progress=None):
"""Fetch meshes at a given level of detail (lod).
Parameters:
segids: (iterable or int) segids to render
lod: int, default 0
Level of detail to retrieve. 0 is highest level of detail.
Optional:
concat: bool, concatenate fragments (per segment per lod)
Returns:
{ segid: { Mesh } }
... or if concatenate=False: { segid: { Mesh, ... } }
Reference:
https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/meshes.md
"""
if lod < 0:
raise exceptions.ValueError(red(f'lod ({lod}) must be >= 0.'))
progress = progress if progress is not None else self.config.progress
segids = toiter(segids)
# decode all the fragments
manifests = self.get_manifest(segids)
for manifest in manifests:
if manifest is None:
raise exceptions.MeshDecodeError(red(
f'Manifest not found for segment {manifest.segment_id}.'
))
if lod >= manifest.num_lods:
raise exceptions.MeshDecodeError(red(
f'LOD value ({lod}) out of range (0 - {manifest.num_lods - 1}) for segment {manifest.segment_id}.'
))
full_path = self.meta.join(self.meta.cloudpath, self.path)
meshdata = defaultdict(list)
for manifest in manifests:
# Read the manifest (with a tweak to sharding.py to get the offset)
fragment_sizes = [
np.sum(lod_fragment_sizes) for lod_fragment_sizes in manifest.fragment_offsets
]
lod_binary = CloudFiles(
full_path, progress=progress,
green=self.config.green, secrets=self.config.secrets
).get({
'path': str(manifest.segment_id),
'start': np.sum(fragment_sizes[0:lod]),
'end': np.sum(fragment_sizes[0:lod+1]),
})
meshes = extract_lod_meshes(
manifest, lod, lod_binary,
self.vertex_quantization_bits, self.transform
)
meshdata.update(meshes)
if concat:
for segid in meshdata:
meshdata[segid] = Mesh.concatenate(*meshdata[segid])
return meshdata
class ShardedMultiLevelPrecomputedMeshSource(UnshardedLegacyPrecomputedMeshSource):
def __init__(self, meta, cache, config, readonly=False):
super(ShardedMultiLevelPrecomputedMeshSource, self).__init__(meta, cache, config, readonly)
spec = ShardingSpecification.from_dict(self.meta.info['sharding'])
self.reader = ShardReader(meta, cache, spec)
self.vertex_quantization_bits = self.meta.info['vertex_quantization_bits']
self.lod_scale_multiplier = self.meta.info['lod_scale_multiplier']
self.transform = np.array(self.meta.info['transform'] + [0,0,0,1]).reshape(4,4)
@property
def path(self):
return self.meta.mesh_path
def exists(self, segids, progress=None):
"""
Checks if the mesh exists
Returns: { MultiLevelPrecomputedMeshManifest or None, ... }
"""
return [ self.get_manifest(segid) for segid in segids ]
def get_manifest(self, segid, progress=None):
"""Retrieve the manifest for a single segment.
Returns:
{ MultiLevelPrecomputedMeshManifest or None }
"""
manifest_info = self.reader.exists(segid, self.path, return_byte_range=True)
if manifest_info is None:
# Manifest not found
return None
shard_filepath, byte_start, num_bytes = tuple(manifest_info)
binary = self.reader.get_data(segid, self.path)
if binary is None:
return None
return MultiLevelPrecomputedMeshManifest(binary, segment_id=segid, offset=byte_start, path=shard_filepath)
def get(self, segids, lod=0, concat=True, progress=None):
"""Fetch meshes at a given level of detail (lod).
Parameters:
segids: (iterable or int) segids to render
lod: int, default 0
Level of detail to retrieve. 0 is highest level of detail.
Optional:
concat: bool, concatenate fragments (per segment per lod)
Returns:
{ segid: { Mesh } }
... or if concatenate=False: { segid: { Mesh, ... } }
Reference:
https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/meshes.md
"""
progress = progress if progress is not None else self.config.progress
segids = toiter(segids)
# decode all the fragments
meshdata = defaultdict(list)
for segid in segids:
# Read the manifest (with a tweak to sharding.py to get the offset)
manifest = self.get_manifest(segid)
if manifest == None:
raise exceptions.MeshDecodeError(red(
'Manifest not found for segment {}.'.format(segid)
))
if lod < 0 or lod >= manifest.num_lods:
raise exceptions.MeshDecodeError(red(
'LOD value ({}) out of range (0 - {}) for segment {}.'.format(lod, manifest.num_lods - 1, segid)
))
# Read the data for all LODs
fragment_sizes = [
np.sum(lod_fragment_sizes) for lod_fragment_sizes in manifest.fragment_offsets
]
total_fragment_size = np.sum(fragment_sizes)
full_path = self.reader.meta.join(self.reader.meta.cloudpath)
lod_binary = CloudFiles(full_path, progress=progress, secrets=self.config.secrets).get({
'path': manifest.path,
'start': (manifest.offset - total_fragment_size) + np.sum(fragment_sizes[0:lod]),
'end': (manifest.offset - total_fragment_size) + np.sum(fragment_sizes[0:lod+1]),
})
meshes = extract_lod_meshes(
manifest, lod, lod_binary,
self.vertex_quantization_bits, self.transform
)
meshdata.update(meshes)
if concat:
for segid in meshdata:
meshdata[segid] = Mesh.concatenate(*meshdata[segid])
return meshdata
class MultiLevelPrecomputedMeshManifest:
# Parse the multi-resolution mesh manifest file format:
# https://github.com/google/neuroglancer/blob/master/src/neuroglancer/datasource/precomputed/meshes.md
# https://github.com/google/neuroglancer/blob/master/src/neuroglancer/mesh/multiscale.ts
def __init__(self, binary, segment_id, offset, path=None):
self._segment = segment_id
self._binary = binary
self._offset = offset
self._path = path
# num_loads is the 7th word
num_lods = int(np.frombuffer(self._binary[6*4:7*4], dtype=np.uint32)[0])
header_dt = np.dtype([
('chunk_shape', np.float32, (3,)),
('grid_origin', np.float32, (3,)),
('num_lods', np.uint32),
('lod_scales', np.float32, (num_lods,)),
('vertex_offsets', np.float32, (num_lods,3)),
('num_fragments_per_lod', np.uint32, (num_lods,))
])
self._header = np.frombuffer(self._binary[0:header_dt.itemsize], dtype=header_dt)
offset = header_dt.itemsize
self._fragment_positions = []
self._fragment_offsets = []
for lod in range(num_lods):
# Read fragment positions
pos_size = 3 * 4 * self.num_fragments_per_lod[lod]
self._fragment_positions.append(
np.frombuffer(self._binary[offset:offset + pos_size], dtype=np.uint32).reshape((3,self.num_fragments_per_lod[lod]))
)
offset += pos_size
# Read fragment sizes
off_size = 4 * self.num_fragments_per_lod[lod]
self._fragment_offsets.append(
np.frombuffer(self._binary[offset:offset + off_size], dtype=np.uint32)
)
offset += off_size
# Make sure we read the entire manifest
if offset != len(binary):
raise exceptions.MeshDecodeError(red(
'Error decoding mesh manifest for segment {}'.format(segment_id)
))
def data_size(self):
fragment_sizes = [
np.sum(lod_fragment_sizes) for lod_fragment_sizes in self.fragment_offsets
]
return np.sum(fragment_sizes)
@property
def segment_id(self):
return self._segment
@property
def chunk_shape(self):
return self._header['chunk_shape'][0]
@property
def grid_origin(self):
return self._header['grid_origin'][0]
@property
def num_lods(self):
return self._header['num_lods'][0]
@property
def lod_scales(self):
return self._header['lod_scales'][0]
@property
def vertex_offsets(self):
return self._header['vertex_offsets'][0]
@property
def num_fragments_per_lod(self):
return self._header['num_fragments_per_lod'][0]
@property
def fragment_positions(self):
return self._fragment_positions
@property
def fragment_offsets(self):
return self._fragment_offsets
@property
def length(self):
return len(self._binary)
@property
def offset(self):
"""Manifest offset within the shard file. Used as a base when calculating fragment offsets."""
return self._offset
@property
def path(self):
return self._path
| 33.207182
| 123
| 0.682639
|
4a18456ff8f0e6f0ffe9570d64052bed5fcc2945
| 2,378
|
py
|
Python
|
lib/config.py
|
Sprint-Core/sentinel
|
adaff12976a3c226c094e7149ffcef49eb1381cc
|
[
"MIT"
] | null | null | null |
lib/config.py
|
Sprint-Core/sentinel
|
adaff12976a3c226c094e7149ffcef49eb1381cc
|
[
"MIT"
] | null | null | null |
lib/config.py
|
Sprint-Core/sentinel
|
adaff12976a3c226c094e7149ffcef49eb1381cc
|
[
"MIT"
] | null | null | null |
"""
Set up defaults and read sentinel.conf
"""
import sys
import os
from sprint_config import SprintConfig
default_sentinel_config = os.path.normpath(
os.path.join(os.path.dirname(__file__), '../sentinel.conf')
)
sentinel_config_file = os.environ.get('SENTINEL_CONFIG', default_sentinel_config)
sentinel_cfg = SprintConfig.tokenize(sentinel_config_file)
sentinel_version = "1.1.0"
min_sprintd_proto_version_with_sentinel_ping = 70207
def get_sprint_conf():
home = os.environ.get('HOME')
sprint_conf = os.path.join(home, ".sprintcore/sprint.conf")
if sys.platform == 'darwin':
sprint_conf = os.path.join(home, "Library/Application Support/SprintCore/sprint.conf")
sprint_conf = sentinel_cfg.get('sprint_conf', sprint_conf)
return sprint_conf
def get_network():
return sentinel_cfg.get('network', 'mainnet')
def sqlite_test_db_name(sqlite_file_path):
(root, ext) = os.path.splitext(sqlite_file_path)
test_sqlite_file_path = root + '_test' + ext
return test_sqlite_file_path
def get_db_conn():
import peewee
env = os.environ.get('SENTINEL_ENV', 'production')
# default values should be used unless you need a different config for development
db_host = sentinel_cfg.get('db_host', '127.0.0.1')
db_port = sentinel_cfg.get('db_port', None)
db_name = sentinel_cfg.get('db_name', 'sentinel')
db_user = sentinel_cfg.get('db_user', 'sentinel')
db_password = sentinel_cfg.get('db_password', 'sentinel')
db_charset = sentinel_cfg.get('db_charset', 'utf8mb4')
db_driver = sentinel_cfg.get('db_driver', 'sqlite')
if (env == 'test'):
if db_driver == 'sqlite':
db_name = sqlite_test_db_name(db_name)
else:
db_name = "%s_test" % db_name
peewee_drivers = {
'mysql': peewee.MySQLDatabase,
'postgres': peewee.PostgresqlDatabase,
'sqlite': peewee.SqliteDatabase,
}
driver = peewee_drivers.get(db_driver)
dbpfn = 'passwd' if db_driver == 'mysql' else 'password'
db_conn = {
'host': db_host,
'user': db_user,
dbpfn: db_password,
}
if db_port:
db_conn['port'] = int(db_port)
if driver == peewee.SqliteDatabase:
db_conn = {}
db = driver(db_name, **db_conn)
return db
sprint_conf = get_sprint_conf()
network = get_network()
db = get_db_conn()
| 27.976471
| 94
| 0.681665
|
4a184594d4cd34b022a1048b466166f3c618ee2c
| 1,049
|
py
|
Python
|
elasticapm/utils/deprecation.py
|
lyrixderaven/apm-agent-python
|
e21b306da70995ca1582666378b7059495ff1bee
|
[
"BSD-3-Clause"
] | 2
|
2019-02-15T20:23:39.000Z
|
2019-02-15T20:26:06.000Z
|
elasticapm/utils/deprecation.py
|
lyrixderaven/apm-agent-python
|
e21b306da70995ca1582666378b7059495ff1bee
|
[
"BSD-3-Clause"
] | null | null | null |
elasticapm/utils/deprecation.py
|
lyrixderaven/apm-agent-python
|
e21b306da70995ca1582666378b7059495ff1bee
|
[
"BSD-3-Clause"
] | null | null | null |
import functools
import warnings
# https://wiki.python.org/moin/PythonDecoratorLibrary#Smart_deprecation_warnings_.28with_valid_filenames.2C_line_numbers.2C_etc..29
# Updated to work with 2.6 and 3+.
from elasticapm.utils import compat
def deprecated(alternative=None):
"""This is a decorator which can be used to mark functions
as deprecated. It will result in a warning being emitted
when the function is used."""
def real_decorator(func):
@functools.wraps(func)
def new_func(*args, **kwargs):
msg = "Call to deprecated function {0}.".format(func.__name__)
if alternative:
msg += " Use {0} instead".format(alternative)
warnings.warn_explicit(
msg,
category=DeprecationWarning,
filename=compat.get_function_code(func).co_filename,
lineno=compat.get_function_code(func).co_firstlineno + 1,
)
return func(*args, **kwargs)
return new_func
return real_decorator
| 33.83871
| 131
| 0.654909
|
4a1845e31b9f3ecc0c997ad98827d84993d4e393
| 91
|
py
|
Python
|
torchtext/experimental/__init__.py
|
abhinavarora/text
|
69f67f3a775f3d3c6f85cfaa4ac3819500b90696
|
[
"BSD-3-Clause"
] | 1
|
2022-01-03T17:30:57.000Z
|
2022-01-03T17:30:57.000Z
|
torchtext/experimental/__init__.py
|
abhinavarora/text
|
69f67f3a775f3d3c6f85cfaa4ac3819500b90696
|
[
"BSD-3-Clause"
] | null | null | null |
torchtext/experimental/__init__.py
|
abhinavarora/text
|
69f67f3a775f3d3c6f85cfaa4ac3819500b90696
|
[
"BSD-3-Clause"
] | null | null | null |
from . import datasets, models, transforms
__all__ = ["datasets", "transforms", "models"]
| 22.75
| 46
| 0.714286
|
4a18463fb9bac2a4a5ed87ed1fb5c636bcb87309
| 15,276
|
py
|
Python
|
treegrad/utils_interpret.py
|
charliec443/TreeGrad2
|
87eecc8ba857056f0ff6be0a7bd6544e0ae38991
|
[
"MIT"
] | null | null | null |
treegrad/utils_interpret.py
|
charliec443/TreeGrad2
|
87eecc8ba857056f0ff6be0a7bd6544e0ae38991
|
[
"MIT"
] | null | null | null |
treegrad/utils_interpret.py
|
charliec443/TreeGrad2
|
87eecc8ba857056f0ff6be0a7bd6544e0ae38991
|
[
"MIT"
] | null | null | null |
import numpy as np
import pandas as pd
import tensorflow as tf
class BiasLayer(tf.keras.layers.Layer):
def __init__(self, intercept=None, **kwargs):
super().__init__(**kwargs)
self.intercept = None
self.bias = None
self.build()
def build(self, input_shape=None):
if self.intercept is not None and self.bias is None:
self.bias = self.add_weight(
"bias",
shape=(1,),
trainable=True,
initializer=tf.constant_initializer(self.intercept),
)
elif self.bias is None:
self.bias = self.add_weight(
"bias",
shape=(1,),
trainable=True,
)
def call(self, inputs):
return tf.math.reduce_sum(tf.concat(inputs, axis=1), axis=1) + self.bias
class InteractionLayer(tf.keras.layers.Layer):
def __init__(self, shape):
super().__init__()
self.shape = shape
def build(self, input_shape=None):
self.lookup = self.add_weight("interaction_lookup", shape=self.shape)
def call(self, inputs):
indx = tf.stack(inputs, axis=-1)
return tf.gather_nd(self.lookup, indx)
class EBMClassifier(tf.keras.Model):
def __init__(self, model=None, set_weights=False):
super().__init__()
self.model = model
if len(self.model.classes_) > 2:
raise Exception("Multiclass classification not supported at this time")
if set_weights:
self.bias = BiasLayer(name="bias", intercept=model.intercept_)
else:
self.bias = BiasLayer(name="bias")
self.sigmoid = tf.keras.layers.Activation("sigmoid")
self.create_model(set_weights)
def create_model(self, set_weights=False):
"""
See reference: https://github.com/interpretml/ebm2onnx/blob/master/ebm2onnx/convert.py
"""
self.feature_model = []
self.feature_info = []
self.feature_names = []
for feature_index in range(len(self.model.feature_names)):
feature_name = self.model.feature_names[feature_index]
feature_name = feature_name.replace(" ", "__")
feature_type = self.model.feature_types[feature_index]
feature_group = self.model.feature_groups_[feature_index]
self.feature_names.append(feature_name)
info_config = {}
# print(feature_type)
if feature_type == "continuous":
self.feature_model.append(
tf.keras.Sequential(
[
tf.keras.layers.Discretization(
list(self.model.preprocessor_.col_bin_edges_[feature_group[0]]),
input_shape=(1,),
),
tf.keras.layers.IntegerLookup(
len(self.model.preprocessor_.col_bin_edges_[feature_group[0]]) + 1,
vocabulary=tf.constant(
list(range(len(self.model.preprocessor_.col_bin_edges_[feature_group[0]])))
),
output_mode="one_hot",
pad_to_max_tokens=True,
),
tf.keras.layers.Dense(1, use_bias=False),
],
name=f"model_{feature_name}",
)
)
info_config["feature_type"] = feature_type
info_config["column_name"] = feature_name
info_config["column_index"] = feature_group[0]
info_config["scores"] = self.model.additive_terms_[feature_index][1:]
self.feature_info.append(info_config)
if feature_type == "categorical":
self.feature_model.append(
tf.keras.Sequential(
[
tf.keras.layers.StringLookup(
max_tokens=len(list(self.model.preprocessor_.col_mapping_[feature_group[0]].keys()))
+ 1,
vocabulary=list(self.model.preprocessor_.col_mapping_[feature_group[0]].keys()),
output_mode="one_hot",
pad_to_max_tokens=True,
input_shape=(1,),
),
tf.keras.layers.Dense(1, use_bias=False),
],
name=f"model_{feature_name}",
)
)
info_config["feature_type"] = feature_type
info_config["column_name"] = feature_name
info_config["column_index"] = feature_group[0]
info_config["scores"] = self.model.additive_terms_[feature_index]
self.feature_info.append(info_config)
elif feature_type == "interaction":
interactions = [self.model.feature_types[idx] for idx in feature_group]
# else not implemented right now
if interactions[0] == "continuous":
left_input = tf.keras.layers.Input(shape=(1,))
left_size = len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[0]].tolist()) + 1
left_x = tf.keras.layers.Discretization(
self.model.pair_preprocessor_.col_bin_edges_[feature_group[0]].tolist()
)(left_input)
left_x = tf.keras.layers.IntegerLookup(
len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[0]].tolist()) + 1,
vocabulary=tf.constant(
list(range(len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[0]].tolist())))
),
)(left_x)
elif interactions[0] == "categorical":
left_input = tf.keras.layers.Input(shape=(1,), dtype=tf.string)
left_size = len(list(self.model.preprocessor_.col_mapping_[feature_group[0]].keys())) + 1
left_x = tf.keras.layers.StringLookup(
max_tokens=len(list(self.model.preprocessor_.col_mapping_[feature_group[0]].keys())) + 1,
vocabulary=list(self.model.preprocessor_.col_mapping_[feature_group[0]].keys()),
pad_to_max_tokens=True,
)(left_input)
left_x = tf.keras.layers.IntegerLookup(
len(self.model.pair_preprocessor_.col_mapping_[feature_group[0]].keys()) + 1,
vocabulary=tf.constant(
list(range(len(self.model.pair_preprocessor_.col_mapping_[feature_group[0]].keys())))
),
)(left_x)
else:
raise ValueError("")
if interactions[1] == "continuous":
right_input = tf.keras.layers.Input(shape=(1,))
right_size = len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[1]].tolist()) + 1
right_x = tf.keras.layers.Discretization(
self.model.pair_preprocessor_.col_bin_edges_[feature_group[1]].tolist()
)(right_input)
right_x = tf.keras.layers.IntegerLookup(
len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[1]].tolist()) + 1,
vocabulary=tf.constant(
list(range(len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[1]].tolist())))
),
)(right_x)
elif interactions[1] == "categorical":
right_input = tf.keras.layers.Input(shape=(1,), dtype=tf.string)
right_size = len(list(self.model.preprocessor_.col_mapping_[feature_group[1]].keys())) + 1
right_x = tf.keras.layers.StringLookup(
max_tokens=len(list(self.model.preprocessor_.col_mapping_[feature_group[1]].keys())) + 1,
vocabulary=list(self.model.preprocessor_.col_mapping_[feature_group[1]].keys()),
pad_to_max_tokens=True,
)(right_input)
right_x = tf.keras.layers.IntegerLookup(
len(self.model.pair_preprocessor_.col_mapping_[feature_group[1]].keys()) + 1,
vocabulary=tf.constant(
list(range(len(self.model.pair_preprocessor_.col_mapping_[feature_group[1]].keys())))
),
)(right_x)
else:
raise ValueError("")
# assert interactions[0] == "continuous"
# assert interactions[1] == "continuous"
# left_input = tf.keras.Sequential([
# tf.keras.layers.Discretization(self.model.pair_preprocessor_.col_bin_edges_[feature_group[0]].tolist()),
# tf.keras.layers.IntegerLookup(len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[0]].tolist()) + 1,
# vocabulary = tf.constant(list(range(len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[0]].tolist())))))
# ])
# right_input = tf.keras.Sequential([
# tf.keras.layers.Discretization(self.model.pair_preprocessor_.col_bin_edges_[feature_group[1]].tolist()),
# tf.keras.layers.IntegerLookup(len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[1]].tolist()) + 1,
# vocabulary = tf.constant(list(range(len(self.model.pair_preprocessor_.col_bin_edges_[feature_group[1]].tolist())))))
# ])
# print(left_input, right_input)
# print(dir(left_input))
output = InteractionLayer([left_size, right_size])([left_x, right_x])
self.feature_model.append(
tf.keras.Model(
inputs=[left_input, right_input],
outputs=output,
name=f"model_{feature_name}",
)
)
info_config["feature_type"] = interactions
info_config["column_name"] = [self.model.preprocessor_.feature_names[idx] for idx in feature_group]
info_config["column_index"] = list(feature_group)
info_config["scores"] = self.model.additive_terms_[feature_index] # [left_slice, right_slice]
self.feature_info.append(info_config)
else:
# raise NotImplementedError("")
continue
if set_weights:
for feature_index in range(len(self.feature_names)):
nm = self.feature_names[feature_index]
w = self.feature_model[feature_index].weights[0]
learned_w = self.feature_info[feature_index]["scores"]
try:
if len(self.feature_info[feature_index]["scores"].shape) == 2:
if w.shape[0] < learned_w.shape[0]:
learned_w = learned_w[1:, :]
if w.shape[1] < learned_w.shape[1]:
learned_w = learned_w[:, 1:]
else:
learned_w = learned_w.reshape(w.shape)
self.feature_model[feature_index].set_weights([learned_w])
except Exception:
print(nm, w.shape, learned_w.shape)
self.bias.set_weights([np.array(self.model.intercept_).reshape((1,))])
def call(self, inputs):
# print(zip(self.feature_model, self.feature_info))
outputs = []
if type(inputs) in [pd.DataFrame]:
for mod, info in zip(self.feature_model, self.feature_info):
cols = info["column_name"]
if type(cols) is str:
outputs.append(mod(tf.convert_to_tensor(inputs[cols].tolist)))
else:
outputs.append(
mod(
[
tf.convert_to_tensor(inputs[cols[0]].tolist),
tf.convert_to_tensor(inputs[cols[1]].tolist),
]
)
)
elif type(inputs) in [dict]:
for mod, info in zip(self.feature_model, self.feature_info):
cols = info["column_name"]
if type(cols) is str:
outputs.append(mod(inputs[cols]))
else:
outputs.append(mod([inputs[cols[0]], inputs[cols[1]]]))
else:
for mod, info in zip(self.feature_model, self.feature_info):
cols = info["column_index"]
if type(cols) is str or type(cols) is int:
if info["feature_type"] == "continuous":
c_input = tf.cast(inputs[:, cols], tf.float32)
else:
c_input = tf.convert_to_tensor(inputs[:, cols])
outputs.append(mod(c_input))
else:
if info["feature_type"][0] == "continuous":
l_input = tf.cast(inputs[:, cols[0]], tf.float32)
else:
l_input = tf.convert_to_tensor(inputs[:, cols[0]])
if info["feature_type"][1] == "continuous":
r_input = tf.cast(inputs[:, cols[1]], tf.float32)
else:
r_input = tf.convert_to_tensor(inputs[:, cols[1]])
outputs.append(mod([l_input, r_input]))
# else:
# for mod, info in zip(self.feature_model, self.feature_info):
# cols = info["column_index"]
# if type(cols) is str or type(cols) is int:
# if info["feature_type"] == "continuous":
# c_input = tf.cast(inputs[:, cols].tolist(), tf.float32)
# else:
# c_input = tf.convert_to_tensor(inputs[:, cols].tolist())
# outputs.append(mod(c_input))
# else:
# if info["feature_type"][0] == "continuous":
# l_input = tf.cast(inputs[:, cols[0]].tolist(), tf.float32)
# else:
# l_input = tf.convert_to_tensor(inputs[:, cols[0]].tolist())
# if info["feature_type"][1] == "continuous":
# r_input = tf.cast(inputs[:, cols[1]].tolist(), tf.float32)
# else:
# r_input = tf.convert_to_tensor(inputs[:, cols[1]].tolist())
# outputs.append(mod([l_input, r_input]))
pre_activation = self.bias(outputs)
return self.sigmoid(pre_activation)
| 50.92
| 138
| 0.514467
|
4a184768b67b5e49a3ccbee1d1ba44d236421349
| 7,772
|
py
|
Python
|
airflow/utils/logging.py
|
Avik1993/incubator-airflow
|
75e0159e9b0ceea1853db2cd7c71623201985d46
|
[
"Apache-2.0"
] | 2
|
2017-01-09T22:39:42.000Z
|
2017-07-19T20:57:40.000Z
|
airflow/utils/logging.py
|
rrbarbosa/airflow
|
23068924c09d6cebb0af6151b1950197ac16e67b
|
[
"Apache-2.0"
] | 1
|
2016-10-19T21:49:51.000Z
|
2016-10-19T21:49:51.000Z
|
airflow/utils/logging.py
|
rrbarbosa/airflow
|
23068924c09d6cebb0af6151b1950197ac16e67b
|
[
"Apache-2.0"
] | 2
|
2018-04-09T15:13:50.000Z
|
2019-06-14T07:19:46.000Z
|
# -*- coding: utf-8 -*-
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from builtins import object
import logging
from airflow import configuration
from airflow.exceptions import AirflowException
class LoggingMixin(object):
"""
Convenience super-class to have a logger configured with the class name
"""
@property
def logger(self):
try:
return self._logger
except AttributeError:
self._logger = logging.root.getChild(self.__class__.__module__ + '.' + self.__class__.__name__)
return self._logger
class S3Log(object):
"""
Utility class for reading and writing logs in S3.
Requires airflow[s3] and setting the REMOTE_BASE_LOG_FOLDER and
REMOTE_LOG_CONN_ID configuration options in airflow.cfg.
"""
def __init__(self):
remote_conn_id = configuration.get('core', 'REMOTE_LOG_CONN_ID')
try:
from airflow.hooks.S3_hook import S3Hook
self.hook = S3Hook(remote_conn_id)
except:
self.hook = None
logging.error(
'Could not create an S3Hook with connection id "{}". '
'Please make sure that airflow[s3] is installed and '
'the S3 connection exists.'.format(remote_conn_id))
def read(self, remote_log_location, return_error=False):
"""
Returns the log found at the remote_log_location. Returns '' if no
logs are found or there is an error.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: string (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
"""
if self.hook:
try:
s3_key = self.hook.get_key(remote_log_location)
if s3_key:
return s3_key.get_contents_as_string().decode()
except:
pass
# raise/return error if we get here
err = 'Could not read logs from {}'.format(remote_log_location)
logging.error(err)
return err if return_error else ''
def write(self, log, remote_log_location, append=False):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: string
:param remote_log_location: the log's location in remote storage
:type remote_log_location: string (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if self.hook:
if append:
old_log = self.read(remote_log_location)
log = old_log + '\n' + log
try:
self.hook.load_string(
log,
key=remote_log_location,
replace=True,
encrypt=configuration.getboolean('core', 'ENCRYPT_S3_LOGS'))
return
except:
pass
# raise/return error if we get here
logging.error('Could not write logs to {}'.format(remote_log_location))
class GCSLog(object):
"""
Utility class for reading and writing logs in GCS. Requires
airflow[gcp_api] and setting the REMOTE_BASE_LOG_FOLDER and
REMOTE_LOG_CONN_ID configuration options in airflow.cfg.
"""
def __init__(self):
"""
Attempt to create hook with airflow[gcp_api].
"""
remote_conn_id = configuration.get('core', 'REMOTE_LOG_CONN_ID')
self.hook = None
try:
from airflow.contrib.hooks.gcs_hook import GoogleCloudStorageHook
self.hook = GoogleCloudStorageHook(
google_cloud_storage_conn_id=remote_conn_id)
except:
logging.error(
'Could not create a GoogleCloudStorageHook with connection id '
'"{}". Please make sure that airflow[gcp_api] is installed '
'and the GCS connection exists.'.format(remote_conn_id))
def read(self, remote_log_location, return_error=False):
"""
Returns the log found at the remote_log_location.
:param remote_log_location: the log's location in remote storage
:type remote_log_location: string (path)
:param return_error: if True, returns a string error message if an
error occurs. Otherwise returns '' when an error occurs.
:type return_error: bool
"""
if self.hook:
try:
bkt, blob = self.parse_gcs_url(remote_log_location)
return self.hook.download(bkt, blob).decode()
except:
pass
# raise/return error if we get here
err = 'Could not read logs from {}'.format(remote_log_location)
logging.error(err)
return err if return_error else ''
def write(self, log, remote_log_location, append=False):
"""
Writes the log to the remote_log_location. Fails silently if no hook
was created.
:param log: the log to write to the remote_log_location
:type log: string
:param remote_log_location: the log's location in remote storage
:type remote_log_location: string (path)
:param append: if False, any existing log file is overwritten. If True,
the new log is appended to any existing logs.
:type append: bool
"""
if self.hook:
if append:
old_log = self.read(remote_log_location)
log = old_log + '\n' + log
try:
bkt, blob = self.parse_gcs_url(remote_log_location)
from tempfile import NamedTemporaryFile
with NamedTemporaryFile(mode='w+') as tmpfile:
tmpfile.write(log)
# Force the file to be flushed, since we're doing the
# upload from within the file context (it hasn't been
# closed).
tmpfile.flush()
self.hook.upload(bkt, blob, tmpfile.name)
except:
# raise/return error if we get here
logging.error('Could not write logs to {}'.format(remote_log_location))
def parse_gcs_url(self, gsurl):
"""
Given a Google Cloud Storage URL (gs://<bucket>/<blob>), returns a
tuple containing the corresponding bucket and blob.
"""
# Python 3
try:
from urllib.parse import urlparse
# Python 2
except ImportError:
from urlparse import urlparse
parsed_url = urlparse(gsurl)
if not parsed_url.netloc:
raise AirflowException('Please provide a bucket name')
else:
bucket = parsed_url.netloc
blob = parsed_url.path.strip('/')
return (bucket, blob)
| 36.317757
| 107
| 0.613742
|
4a1847e01c784420779f30f7319d8ac4dc09e4ff
| 252
|
py
|
Python
|
Test_Condiut/email_generator.py
|
Lia4-info/conduit
|
65f3b4b0be86c0ce0ae6b8eb95f7f43810727b58
|
[
"MIT"
] | null | null | null |
Test_Condiut/email_generator.py
|
Lia4-info/conduit
|
65f3b4b0be86c0ce0ae6b8eb95f7f43810727b58
|
[
"MIT"
] | null | null | null |
Test_Condiut/email_generator.py
|
Lia4-info/conduit
|
65f3b4b0be86c0ce0ae6b8eb95f7f43810727b58
|
[
"MIT"
] | null | null | null |
import time
from datetime import datetime
now = datetime.now()
print(now)
email_num = now.strftime("%d%H")
username = f"TKori{email_num}"
print(username)
email = f"tk{email_num}@mail.com"
print(email)
password = f"TKpass{email_num}"
print(password)
| 16.8
| 33
| 0.742063
|
4a184940c802539f918633edacfe685c8e85f07f
| 7,316
|
py
|
Python
|
test/geocoders/geonames.py
|
hyper750/geopy
|
564b9cce9ee595ca6cc5c1c965a28b9e2925ebb9
|
[
"MIT"
] | 1
|
2019-10-03T10:29:13.000Z
|
2019-10-03T10:29:13.000Z
|
test/geocoders/geonames.py
|
hyper750/geopy
|
564b9cce9ee595ca6cc5c1c965a28b9e2925ebb9
|
[
"MIT"
] | null | null | null |
test/geocoders/geonames.py
|
hyper750/geopy
|
564b9cce9ee595ca6cc5c1c965a28b9e2925ebb9
|
[
"MIT"
] | 1
|
2021-04-09T15:12:15.000Z
|
2021-04-09T15:12:15.000Z
|
# -*- coding: UTF-8 -*-
import unittest
import uuid
import pytz
from geopy import Point
from geopy.compat import u
from geopy.exc import GeocoderAuthenticationFailure, GeocoderQueryError
from geopy.geocoders import GeoNames
from test.geocoders.util import GeocoderTestBase, env
class GeoNamesTestCaseUnitTest(GeocoderTestBase):
def test_user_agent_custom(self):
geocoder = GeoNames(
username='DUMMYUSER_NORBERT',
user_agent='my_user_agent/1.0'
)
self.assertEqual(geocoder.headers['User-Agent'], 'my_user_agent/1.0')
@unittest.skipUnless(
bool(env.get('GEONAMES_USERNAME')),
"No GEONAMES_USERNAME env variable set"
)
class GeoNamesTestCase(GeocoderTestBase):
delta = 0.04
@classmethod
def setUpClass(cls):
cls.geocoder = GeoNames(username=env['GEONAMES_USERNAME'])
def reverse_timezone_run(self, payload, expected):
timezone = self._make_request(self.geocoder.reverse_timezone, **payload)
if expected is None:
self.assertIsNone(timezone)
else:
self.assertEqual(timezone.pytz_timezone, expected)
return timezone
def test_unicode_name(self):
self.geocode_run(
{"query": "Mount Everest, Nepal"},
{"latitude": 27.987, "longitude": 86.925},
skiptest_on_failure=True, # sometimes the result is empty
)
def test_query_urlencoding(self):
location = self.geocode_run(
{"query": u("Ry\u016b\u014d")},
{"latitude": 35.65, "longitude": 138.5},
skiptest_on_failure=True, # sometimes the result is empty
)
self.assertIn(u("Ry\u016b\u014d"), location.address)
def test_reverse(self):
location = self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
},
{
"latitude": 40.75376406311989,
"longitude": -73.98489005863667,
},
)
self.assertIn("Times Square", location.address)
def test_geocode_empty_response(self):
self.geocode_run(
{"query": "sdlahaslkhdkasldhkjsahdlkash"},
{},
expect_failure=True,
)
def test_reverse_nearby_place_name_raises_for_feature_code(self):
with self.assertRaises(ValueError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"feature_code": "ADM1",
},
{},
)
with self.assertRaises(ValueError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"feature_code": "ADM1",
"find_nearby_type": "findNearbyPlaceName",
},
{},
)
def test_reverse_nearby_place_name_lang(self):
location = self.reverse_run(
{
"query": "52.50, 13.41",
"exactly_one": True,
"lang": 'ru',
},
{},
)
self.assertIn(u'Берлин, Германия', location.address)
def test_reverse_find_nearby_raises_for_lang(self):
with self.assertRaises(ValueError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": 'findNearby',
"lang": 'en',
},
{},
)
def test_reverse_find_nearby(self):
location = self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": 'findNearby',
},
{
"latitude": 40.75376406311989,
"longitude": -73.98489005863667,
},
)
self.assertIn("New York, United States", location.address)
def test_reverse_find_nearby_feature_code(self):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": 'findNearby',
"feature_code": "ADM1",
},
{
"latitude": 40.16706,
"longitude": -74.49987,
},
)
def test_reverse_raises_for_unknown_find_nearby_type(self):
with self.assertRaises(GeocoderQueryError):
self.reverse_run(
{
"query": "40.75376406311989, -73.98489005863667",
"exactly_one": True,
"find_nearby_type": "findSomethingNonExisting",
},
{},
)
def test_reverse_timezone(self):
new_york_point = Point(40.75376406311989, -73.98489005863667)
america_new_york = pytz.timezone("America/New_York")
timezone = self.reverse_timezone_run(
{"query": new_york_point},
america_new_york,
)
self.assertEqual(timezone.raw['countryCode'], 'US')
def test_reverse_timezone_unknown(self):
self.reverse_timezone_run(
# Geonames doesn't return `timezoneId` for Antarctica,
# but it provides GMT offset which can be used
# to create a FixedOffset pytz timezone.
{"query": "89.0, 1.0"},
pytz.UTC,
)
self.reverse_timezone_run(
{"query": "89.0, 80.0"},
pytz.FixedOffset(5 * 60),
)
def test_country_str(self):
self.geocode_run(
{"query": "kazan", "country": "TR"},
{"latitude": 40.2317, "longitude": 32.6839},
)
def test_country_list(self):
self.geocode_run(
{"query": "kazan", "country": ["CN", "TR", "JP"]},
{"latitude": 40.2317, "longitude": 32.6839},
)
def test_country_bias(self):
self.geocode_run(
{"query": "kazan", "country_bias": "TR"},
{"latitude": 40.2317, "longitude": 32.6839},
)
class GeoNamesInvalidAccountTestCase(GeocoderTestBase):
@classmethod
def setUpClass(cls):
cls.geocoder = GeoNames(username="geopy-not-existing-%s" % uuid.uuid4())
def reverse_timezone_run(self, payload, expected):
timezone = self._make_request(self.geocoder.reverse_timezone, **payload)
if expected is None:
self.assertIsNone(timezone)
else:
self.assertEqual(timezone.pytz_timezone, expected)
return timezone
def test_geocode(self):
with self.assertRaises(GeocoderAuthenticationFailure):
self.geocode_run(
{"query": "moscow"},
{},
expect_failure=True,
)
def test_reverse_timezone(self):
with self.assertRaises(GeocoderAuthenticationFailure):
self.reverse_timezone_run(
{"query": "40.6997716, -73.9753359"},
None,
)
| 31.399142
| 80
| 0.54497
|
4a18494d19ba617529d0402f74d293a7783b6868
| 913
|
py
|
Python
|
model.py
|
NB-prog/multiclass-classificationDistilBert
|
ed64da14ac59e1c2992a8eb80e45befee6f02879
|
[
"MIT"
] | null | null | null |
model.py
|
NB-prog/multiclass-classificationDistilBert
|
ed64da14ac59e1c2992a8eb80e45befee6f02879
|
[
"MIT"
] | null | null | null |
model.py
|
NB-prog/multiclass-classificationDistilBert
|
ed64da14ac59e1c2992a8eb80e45befee6f02879
|
[
"MIT"
] | null | null | null |
import config
import transformers
import torch.nn
from torch.utils.data import Dataset, DataLoader
from transformers import DistilBertModel, DistilBertTokenizer
class DistillBERTClass(torch.nn.Module):
def __init__(self):
super(DistillBERTClass, self).__init__()
self.l1 = DistilBertModel.from_pretrained("distilbert-base-uncased")
self.pre_classifier = torch.nn.Linear(768, 768)
self.dropout = torch.nn.Dropout(0.3)
self.classifier = torch.nn.Linear(768, 4)
def forward(self, input_ids, attention_mask):
output_1 = self.l1(input_ids=input_ids, attention_mask=attention_mask)
hidden_state = output_1[0]
pooler = hidden_state[:, 0]
pooler = self.pre_classifier(pooler)
pooler = torch.nn.ReLU()(pooler)
pooler = self.dropout(pooler)
output = self.classifier(pooler)
return output
| 39.695652
| 79
| 0.684556
|
4a184ab656349660841b015a1c51d725ae21dd56
| 701
|
py
|
Python
|
migrations/versions/b9618f6fb95f_initial_migration.py
|
Kenneth-joseph/Blogs
|
b6c508d36cdf2f874c233485003021d10567de7b
|
[
"Unlicense"
] | null | null | null |
migrations/versions/b9618f6fb95f_initial_migration.py
|
Kenneth-joseph/Blogs
|
b6c508d36cdf2f874c233485003021d10567de7b
|
[
"Unlicense"
] | null | null | null |
migrations/versions/b9618f6fb95f_initial_migration.py
|
Kenneth-joseph/Blogs
|
b6c508d36cdf2f874c233485003021d10567de7b
|
[
"Unlicense"
] | 1
|
2021-11-17T11:03:08.000Z
|
2021-11-17T11:03:08.000Z
|
"""Initial Migration
Revision ID: b9618f6fb95f
Revises: f1a90e784faa
Create Date: 2020-01-19 12:25:29.005897
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = 'b9618f6fb95f'
down_revision = 'f1a90e784faa'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint('users_password_hash_key', 'users', type_='unique')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint('users_password_hash_key', 'users', ['password_hash'])
# ### end Alembic commands ###
| 24.172414
| 86
| 0.71184
|
4a184ae73a8481732fb24e6b2a17e45cb09246e1
| 5,107
|
py
|
Python
|
src/zhinst/labber/generator/conf.py
|
markheik/zhinst-labber
|
d43479729c511c98f83cb84728bfa84d7098fa52
|
[
"MIT"
] | null | null | null |
src/zhinst/labber/generator/conf.py
|
markheik/zhinst-labber
|
d43479729c511c98f83cb84728bfa84d7098fa52
|
[
"MIT"
] | null | null | null |
src/zhinst/labber/generator/conf.py
|
markheik/zhinst-labber
|
d43479729c511c98f83cb84728bfa84d7098fa52
|
[
"MIT"
] | null | null | null |
import string
import typing as t
from zhinst.labber.generator.helpers import tooltip
class LabberConfiguration:
"""Labber JSON configuration handler.
Parses selected values from given settings based on name and mode.
Args:
name: Name of the Zurich Instrument object.
mode: What parts to read from the settings file.
'NORMAL' | 'ADVANCED'
settings: Settings for the given object
JSON schema: `zhinst/labber/resources/settings_json_schema.json`
"""
def __init__(self, name: str, mode: str, settings: t.Dict):
self._name = name.upper()
self._mode = mode.lower()
self.json_settings = settings.copy()
self._dev_base = name.rstrip(string.digits)
self._dev_names = self._combine_devices(self._dev_base)
self.dev_settings = self.json_settings.get(self._dev_base, {})
self._quants = self._find_quants()
def _combine_devices(self, name: str) -> t.List[str]:
"""Combine device quants in case the device is a combination
if multiple devices."""
if "SHFQC" in name:
return ["SHFQC", "SHFQA", "SHFSG"]
return [name]
def _find_quants(self) -> t.Dict:
"""Find quants based on device type and selected mode.
Returns:
Dictionary of all matching quant objects.
"""
quants = self.json_settings["common"].get("quants", {}).copy()
for dev_name in self._dev_names:
quants.update(self.json_settings.get(dev_name, {}).get("quants", {}))
for quant, defs in quants.copy().items():
if not defs.get("conf", {}) or defs.get("add", None) is None:
quants.pop(quant)
continue
if defs["conf"].get("tooltip", None):
quants[quant]["conf"]["tooltip"] = tooltip(defs["conf"]["tooltip"])
if "mapping" in defs.keys():
for dev_name in self._dev_names:
map_ = defs["mapping"].get(dev_name, {})
quants.pop(quant, None)
if not map_:
continue
quants[map_["path"]] = {
"indexes": map_["indexes"],
"conf": defs["conf"],
"add": defs["add"],
}
elif "dev_type" in defs.keys():
if not self._name in defs["dev_type"]:
quants.pop(quant)
return quants
@property
def quant_order(self) -> t.Dict:
"""Quant order"""
common = self.json_settings["common"].get("quantOrder", {}).copy()
common.update(self.dev_settings.get("quantOrder", {}))
return common
@property
def base_name(self) -> str:
"""Base name of the device."""
return self._dev_base
@property
def version(self) -> str:
"""Settings JSON version."""
return self.json_settings["version"]
@property
def general_settings(self) -> t.Dict:
"""Labber configuration file `General settings`-section."""
if self.dev_settings:
return self.dev_settings["generalSettings"]
return self.json_settings["common"]["generalSettings"]
@property
def ignored_nodes(self) -> t.List[str]:
"""List of ignored nodes based on device and selected mode."""
ignored_nodes = []
ignored_common = self.json_settings["common"].get("ignoredNodes", {})
common_norm = ignored_common.get("normal", [])
common_adv = ignored_common.get("advanced", [])
if self._mode == "normal":
ignored_nodes += common_norm + common_adv
else:
ignored_nodes += common_adv
for dev in self._dev_names:
dev_settings = self.json_settings.get(dev, {})
ignored_dev = dev_settings.get("ignoredNodes", {})
dev_norm = ignored_dev.get("normal", [])
dev_adv = ignored_dev.get("advanced", [])
if self._mode == "normal":
ignored_nodes += dev_adv + dev_norm
else:
ignored_nodes += dev_adv
return ignored_nodes
@property
def quants(self) -> t.Dict:
"""Quants based on device type and selected mode."""
return self._quants
@property
def quant_sections(self) -> t.Dict[str, str]:
"""Quant sections based on device type and selected mode."""
common = self.json_settings["common"]["sections"]
for dev in self._dev_names:
dev_settings = self.json_settings.get(dev, {})
dev = dev_settings.get("sections", {})
common.update(dev)
return common
@property
def quant_groups(self) -> t.Dict[str, str]:
"""Quant groups based on device type and selected mode."""
common = self.json_settings["common"]["groups"]
for dev in self._dev_names:
dev_settings = self.json_settings.get(dev, {})
dev = dev_settings.get("groups", {})
common.update(dev)
return common
| 36.741007
| 83
| 0.572156
|
4a184cd76bb160cd10c71946205b23cbe09dadb7
| 1,339
|
py
|
Python
|
Github/ListNonLicensed.py
|
scivision/pygit-bulk
|
bdbd395c0b478b17c22cb7f1be8bddb0b8ee5c97
|
[
"MIT"
] | 3
|
2021-05-26T00:23:56.000Z
|
2021-09-02T11:47:27.000Z
|
Github/ListNonLicensed.py
|
scivision/gitedu
|
bdbd395c0b478b17c22cb7f1be8bddb0b8ee5c97
|
[
"MIT"
] | null | null | null |
Github/ListNonLicensed.py
|
scivision/gitedu
|
bdbd395c0b478b17c22cb7f1be8bddb0b8ee5c97
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
List all non-licensed repos (public and private,
if Oauth key has "repo" permission)
Requires GitHub Oauth login with permissions
"repo:public_repo" or "repo" for private repos.
"""
import argparse
import github.GithubException
import gitbulk as gb
def main(username: str, oauth: str, stem: str):
# %% authentication
sess = gb.session(P.oauth)
gb.check_api_limit(sess)
# %% get user / organization handle
userorg = gb.user_or_org(sess, P.user)
# %% prepare to loop over repos
repos = gb.get_repos(userorg)
# filter repos
to_act = (
repo
for repo in repos
if repo.name.startswith(stem)
and repo.name != ".github"
and not repo.fork
and not repo.archived
and repo.owner.login == userorg.login
)
for repo in to_act:
try:
repo.get_license()
except github.GithubException:
print(repo.full_name)
if __name__ == "__main__":
p = argparse.ArgumentParser(description="List all non-licensed repos")
p.add_argument("user", help="GitHub username / organizations")
p.add_argument("oauth", help="Oauth filename")
p.add_argument("-stem", help="list repos with name starting with this string", default="")
P = p.parse_args()
main(P.user, P.oauth, P.stem)
| 25.75
| 94
| 0.651979
|
4a184cfdf0a755080965cf91834f9f728c1c92c8
| 3,979
|
py
|
Python
|
parlai/tasks/dailydialog/agents.py
|
bokanyibalazs7/ParlAI
|
6640f68a36b9f58fb863fbc50b5fde0d5df4b6a3
|
[
"MIT"
] | null | null | null |
parlai/tasks/dailydialog/agents.py
|
bokanyibalazs7/ParlAI
|
6640f68a36b9f58fb863fbc50b5fde0d5df4b6a3
|
[
"MIT"
] | null | null | null |
parlai/tasks/dailydialog/agents.py
|
bokanyibalazs7/ParlAI
|
6640f68a36b9f58fb863fbc50b5fde0d5df4b6a3
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""
Daily Dialog
https://arxiv.org/abs/1710.03957
Original data is copyright by the owners of the paper, and free for use in research.
Every conversation contains entries with special fields (see the paper):
- emotion
- act_type
- topic
This teacher plays both sides of the conversation, once acting as Speaker 1, and
once acting as Speaker 2.
"""
import os
import json
from parlai.core.teachers import FixedDialogTeacher
from .build import build
START_ENTRY = {
'text': '__SILENCE__',
'emotion': 'no_emotion',
'act': 'no_act',
}
class Convai2Teacher(FixedDialogTeacher):
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
self.opt = opt
if shared:
self.data = shared['data']
else:
build(opt)
fold = opt.get('datatype', 'train').split(':')[0]
self._setup_data(fold)
self.num_exs = sum(len(d['dialogue']) for d in self.data)
# we learn from both sides of every conversation
self.num_eps = 2 * len(self.data)
self.reset()
def num_episodes(self):
return self.num_eps
def num_examples(self):
return self.num_exs
def _setup_data(self, fold):
self.data = []
fpath = os.path.join(self.opt['datapath'], 'dailydialog', fold + '.json')
with open(fpath) as f:
for line in f:
self.data.append(json.loads(line))
def get(self, episode_idx, entry_idx=0):
# Sometimes we're speaker 1 and sometimes we're speaker 2
speaker_id = episode_idx % 2
full_eps = self.data[episode_idx // 2]
entries = [START_ENTRY] + full_eps['dialogue']
their_turn = entries[speaker_id + 2 * entry_idx]
my_turn = entries[1 + speaker_id + 2 * entry_idx]
episode_done = 2 * entry_idx + speaker_id + 1 >= len(full_eps['dialogue']) - 1
action = {
'topic': full_eps['topic'],
'text': their_turn['text'],
'emotion': their_turn['emotion'],
'act_type': their_turn['act'],
'labels': [my_turn['text']],
'episode_done': episode_done,
}
return action
def share(self):
shared = super().share()
shared['data'] = self.data
return shared
class NoStartTeacher(Convai2Teacher):
"""
Same as default teacher, but it doesn't contain __SILENCE__ entries.
If we are the first speaker, then the first utterance is skipped.
"""
def __init__(self, opt, shared=None):
super().__init__(opt, shared)
# Calculate the correct number of examples.
self.num_exs = sum(len(d['dialogue']) - 1 for d in self.data)
# Store all episodes separately, so we can deal with 2-turn dialogs.
self.all_eps = self.data + [d for d in self.data if len(d['dialogue']) > 2]
self.num_eps = len(self.all_eps)
def get(self, episode_idx, entry_idx=0):
full_eps = self.all_eps[episode_idx]
entries = full_eps['dialogue']
# Sometimes we're speaker 1 and sometimes we're speaker 2.
# We can't be speaker 1 if dialog has only 2 turns.
speaker_id = int(episode_idx >= len(self.data))
their_turn = entries[speaker_id + 2 * entry_idx]
my_turn = entries[1 + speaker_id + 2 * entry_idx]
episode_done = 2 * entry_idx + speaker_id + 1 >= len(entries) - 2
action = {
'topic': full_eps['topic'],
'text': their_turn['text'],
'emotion': their_turn['emotion'],
'act_type': their_turn['act'],
'labels': [my_turn['text']],
'episode_done': episode_done,
}
return action
class DefaultTeacher(Convai2Teacher):
pass
| 29.917293
| 86
| 0.608947
|
4a184d557cb36dd8d6cbf1416449a0b2120b7544
| 13,939
|
py
|
Python
|
A3C/network.py
|
Xin-Ye-1/HIEM
|
6764f579eef6ec92dd85a005af27419f630df7da
|
[
"Apache-2.0"
] | 2
|
2021-04-12T02:41:00.000Z
|
2021-05-15T02:18:15.000Z
|
A3C/network.py
|
Xin-Ye-1/HIEM
|
6764f579eef6ec92dd85a005af27419f630df7da
|
[
"Apache-2.0"
] | null | null | null |
A3C/network.py
|
Xin-Ye-1/HIEM
|
6764f579eef6ec92dd85a005af27419f630df7da
|
[
"Apache-2.0"
] | null | null | null |
#! /usr/bin/env python
import tensorflow as tf
import tensorflow.contrib.slim as slim
seed = 0
class Lowlevel_Network():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope='global'
):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1], dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
targets_expanded = tf.tile(tf.expand_dims(self.targets, 1),
[1, history_steps * window_size * window_size, 1])
masked_visions = tf.reduce_sum(self.visions * targets_expanded, axis=-1)
masked_visions = slim.flatten(masked_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=masked_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths], 1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='embed')
# policy estimation
hidden_policy = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='policy_hidden')
self.policy = slim.fully_connected(inputs=hidden_policy,
num_outputs=action_size,
activation_fn=tf.nn.softmax,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='policy')
# value estimation
hidden_value = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='value_hidden')
self.value = slim.fully_connected(inputs=hidden_value,
num_outputs=1,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(),
biases_initializer=tf.zeros_initializer(),
scope='value')
# Lowlevel training
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.target_values = tf.placeholder(shape=[None], dtype=tf.float32)
self.lowlevel_lr = tf.placeholder(dtype=tf.float32)
self.er = tf.placeholder(dtype=tf.float32)
actions_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
log_policy = tf.log(tf.clip_by_value(self.policy, 0.000001, 0.999999))
log_pi_for_action = tf.reduce_sum(tf.multiply(log_policy, actions_onehot), axis=1)
self.value_loss = 0.5 * tf.reduce_mean(tf.square(self.target_values - self.value))
self.policy_loss = -tf.reduce_mean(log_pi_for_action * self.advantages)
self.entropy_loss = -tf.reduce_mean(tf.reduce_sum(self.policy * (-log_policy), axis=1))
self.lowlevel_loss = self.value_loss + self.policy_loss + self.er * self.entropy_loss
local_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
gradients = tf.gradients(self.lowlevel_loss, local_lowlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
lowlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.lowlevel_lr)
global_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.lowlevel_update = lowlevel_trainer.apply_gradients(zip(norm_gradients, global_lowlevel_params))
def fc2d(inputs,
num_outputs,
activation_fn,
scope):
with tf.variable_scope(scope, reuse=tf.AUTO_REUSE) as s:
n0, n1, n2 = inputs.get_shape().as_list()
weights = tf.get_variable(name='weights',
shape=[n2, num_outputs],
initializer=tf.contrib.layers.xavier_initializer(seed=seed),
trainable=True)
wx = tf.einsum('ijk,kl->ijl', inputs, weights)
biases = tf.get_variable(name='biases',
shape=[num_outputs],
initializer=tf.zeros_initializer(),
trainable=True)
wx_b = wx + biases
result = wx_b if activation_fn is None else activation_fn(wx_b, name=s.name)
return result
class Lowlevel_Network_full():
def __init__(self,
window_size,
num_labels,
action_size,
history_steps,
scope='global'
):
with tf.variable_scope(scope):
self.visions = tf.placeholder(shape=[None, history_steps * window_size * window_size, num_labels],
dtype=tf.float32)
self.depths = tf.placeholder(shape=[None, history_steps * window_size * window_size, 1],
dtype=tf.float32)
self.targets = tf.placeholder(shape=[None, num_labels], dtype=tf.float32)
related_visions = fc2d(inputs=self.visions,
num_outputs=1,
activation_fn=None,
scope='vision_preprocess')
related_visions = slim.flatten(related_visions)
depths = slim.flatten(self.depths)
hidden_visions = slim.fully_connected(inputs=related_visions,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='vision_hidden')
hidden_depths = slim.fully_connected(inputs=depths,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='depth_hidden')
hidden_targets = slim.fully_connected(inputs=self.targets,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(
seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='target_hidden')
vision_depth_feature = tf.concat([hidden_visions, hidden_depths, hidden_targets], -1)
embed_feature = slim.fully_connected(inputs=vision_depth_feature,
num_outputs=256,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='embed')
# policy estimation
hidden_policy = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='policy_hidden')
self.policy = slim.fully_connected(inputs=hidden_policy,
num_outputs=action_size,
activation_fn=tf.nn.softmax,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='policy')
# value estimation
hidden_value = slim.fully_connected(inputs=embed_feature,
num_outputs=20,
activation_fn=tf.nn.relu,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='value_hidden')
self.value = slim.fully_connected(inputs=hidden_value,
num_outputs=1,
activation_fn=None,
weights_initializer=tf.contrib.layers.xavier_initializer(seed=seed),
biases_initializer=tf.zeros_initializer(),
scope='value')
# Lowlevel training
if not scope.startswith('global'):
self.chosen_actions = tf.placeholder(shape=[None], dtype=tf.int32)
self.advantages = tf.placeholder(shape=[None], dtype=tf.float32)
self.target_values = tf.placeholder(shape=[None], dtype=tf.float32)
self.lowlevel_lr = tf.placeholder(dtype=tf.float32)
self.er = tf.placeholder(dtype=tf.float32)
actions_onehot = tf.one_hot(self.chosen_actions, action_size, dtype=tf.float32)
log_policy = tf.log(tf.clip_by_value(self.policy, 0.000001, 0.999999))
log_pi_for_action = tf.reduce_sum(tf.multiply(log_policy, actions_onehot), axis=1)
self.value_loss = 0.5 * tf.reduce_mean(tf.square(self.target_values - self.value))
self.policy_loss = -tf.reduce_mean(log_pi_for_action * self.advantages)
self.entropy_loss = -tf.reduce_mean(tf.reduce_sum(self.policy * (-log_policy), axis=1))
self.lowlevel_loss = self.value_loss + self.policy_loss + self.er * self.entropy_loss
local_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, scope)
gradients = tf.gradients(self.lowlevel_loss, local_lowlevel_params)
norm_gradients, _ = tf.clip_by_global_norm(gradients, 40.0)
lowlevel_trainer = tf.train.RMSPropOptimizer(learning_rate=self.lowlevel_lr)
global_lowlevel_params = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'global')
self.lowlevel_update = lowlevel_trainer.apply_gradients(zip(norm_gradients, global_lowlevel_params))
| 52.402256
| 118
| 0.493794
|
4a184f4dcf6dbf79b514b2e166fba6f027bbe1ea
| 5,412
|
py
|
Python
|
examples/exampleFunctions.py
|
adigitalmonk/pyzerto-unofficial
|
bbe8922b00f89edf7b9b84877178fd26acd67229
|
[
"Apache-2.0"
] | 1
|
2020-12-05T00:03:24.000Z
|
2020-12-05T00:03:24.000Z
|
examples/exampleFunctions.py
|
adigitalmonk/pyzerto-unofficial
|
bbe8922b00f89edf7b9b84877178fd26acd67229
|
[
"Apache-2.0"
] | 1
|
2020-12-06T03:27:47.000Z
|
2020-12-06T03:27:47.000Z
|
examples/exampleFunctions.py
|
adigitalmonk/pyzerto-unofficial
|
bbe8922b00f89edf7b9b84877178fd26acd67229
|
[
"Apache-2.0"
] | 3
|
2020-12-05T00:51:44.000Z
|
2021-02-17T19:39:55.000Z
|
import json
import vmware.vapi.vsphere.client
import requests
import urllib3
import zerto_auth, zvm, vpg, vra
import time
from secrets import zvm_ip, zvm_u, zvm_p, zca_u, zerto_license, zerto_tag
testVraDict = {
"DatastoreIdentifier":"0ad85e47-6b7d-4a95-a60d-be3d79308223.datastore-10",
"HostIdentifier":"0ad85e47-6b7d-4a95-a60d-be3d79308223.host-9",
"HostRootPassword":zvm_p,
"MemoryInGb":3,
"NumOfCpus":1,
"NetworkIdentifier":"0ad85e47-6b7d-4a95-a60d-be3d79308223.network-11",
"UsePublicKeyInsteadOfCredentials":False,
"PopulatePostInstallation":False,
"VraNetworkDataApi":{
"DefaultGateway":"192.168.1.72",
"SubnetMask":"255.255.255.0",
"VraIPAddress":"192.168.1.80",
"VraIPConfigurationTypeApi":"Static"
}
}
wf2 = open('automatedVpgSkel.json')
workingOpen = json.load(wf2)
wf2.close()
def constructTagProtectedVpg(tagname):
vCenterUUID = '0ad85e47-6b7d-4a95-a60d-be3d79308223'
session = requests.session()
session.verify = False
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
vsphere_client = vmware.vapi.vsphere.client.create_vsphere_client(server="192.168.1.41", username=zvm_u,
password=zvm_p, session=session)
protectedTag = tagname
vmlist = vsphere_client.vcenter.VM.list()
taglist = vsphere_client.tagging.Tag.list()
friendlyTagList = []
curatedTagList = []
for i in range(len(taglist)):
friendlyTagList.append(vsphere_client.tagging.Tag.get(taglist[i]))
for i in range(len(friendlyTagList)):
if friendlyTagList[i].name == protectedTag:
curatedTagList.append(friendlyTagList[i].id)
toBeProtectedVMs = vsphere_client.tagging.TagAssociation.list_attached_objects_on_tags(curatedTagList)
for i in range(len(toBeProtectedVMs[0].object_ids)):
print(toBeProtectedVMs[0].object_ids[i].id)
workingDict = {"VmIdentifier": str(vCenterUUID+'.' + toBeProtectedVMs[0].object_ids[i].id)}
workingOpen['Vms'].append(workingDict)
payload2 = json.dumps(workingOpen)
return payload2
#return v.createNewVpgSettingsObject(payload2).json()
def multiPlatformDemo():
terraformWf = open('terraformvariables.json') # Grab ZCA specific information from 'terraform output -json'
terraformVars = json.load(terraformWf)
terraformWf.close()
ipOfZca = terraformVars.get('SecondZCAprivateIPaddress').get('value')
pwOfZca = terraformVars.get('ZCADecrypted_Password').get('value')
vczvmSession = zerto_auth.login(zvm_ip, zvm_u, zvm_p)
awszcaSession = zerto_auth.login(ipOfZca, zca_u, pwOfZca)
z = zvm.zvm(zvm_ip, vczvmSession)
z.addLicense(zerto_license)
v = vpg.vpgSettings(zvm_ip, vczvmSession)
zvmvra = vra.vra(zvm_ip, vczvmSession)
zvmvra.installVRA(json.dumps(testVraDict))
zca = zvm.zvm(ipOfZca, awszcaSession)
zca.addLicense(zerto_license)
zcaTokenObject = zca.generatePeeringToken()
zcaTokenActual = zcaTokenObject.json().get('Token')
testOutput = requests.post('https://' + zvm_ip +':9669/v1/peersites', headers=vczvmSession, data=json.dumps(
{"HostName": ipOfZca, "Port":"9071", "Token":zcaTokenActual}), verify=False)
time.sleep(100)
workingOpen['Basic']['ProtectedSiteIdentifier'] = z.getLocalSiteInfo().json()['SiteIdentifier']
workingOpen['Basic']['RecoverySiteIdentifier'] = z.getLocalSiteInfo().json()['SiteIdentifier']
tagYoureIt=v.createNewVpgSettingsObject(constructTagProtectedVpg(zerto_tag)).json()
#tagYoureIt=json.loads(constructTagProtectedVpg(zerto_tag))
v.commitSettingsObject(tagYoureIt)
def newMultiPlatformDemo():
terraformWf = open('terraformvariables.json') # Grab ZCA specific information from 'terraform output -json'
terraformVars = json.load(terraformWf)
terraformWf.close()
ipOfZca = terraformVars.get('SecondZCAprivateIPaddress').get('value')
pwOfZca = terraformVars.get('ZCADecrypted_Password').get('value')
vczvmSession = zerto_auth.login(zvm_ip, zvm_u, zvm_p)
awszcaSession = zerto_auth.login(ipOfZca, zca_u, pwOfZca)
z = zvm.zvm(zvm_ip, vczvmSession)
z.addLicense(zerto_license)
v = vpg.vpgSettings(zvm_ip, vczvmSession)
zvmvra = vra.vra(zvm_ip, vczvmSession)
zvmvra.installVRA(json.dumps(testVraDict))
zca = zvm.zvm(ipOfZca, awszcaSession)
zca.addLicense(zerto_license)
zcaTokenObject = zca.generatePeeringToken()
zcaTokenActual = zcaTokenObject.json().get('Token')
testOutput = requests.post('https://' + zvm_ip +':9669/v1/peersites', headers=vczvmSession, data=json.dumps(
{"HostName": ipOfZca, "Port":"9071", "Token":zcaTokenActual}), verify=False)
time.sleep(100)
workingOpen['Basic']['ProtectedSiteIdentifier'] = z.getLocalSiteInfo().json()['SiteIdentifier']
workingOpen['Basic']['RecoverySiteIdentifier'] = z.getLocalSiteInfo().json()['SiteIdentifier']
tagYoureIt=v.createNewVpgSettingsObject(constructTagProtectedVpg(zerto_tag)).json()
#tagYoureIt=json.loads(constructTagProtectedVpg(zerto_tag))
v.commitSettingsObject(tagYoureIt)
multiPlatformDemo()
| 45.478992
| 112
| 0.691611
|
4a184fae206240cb7b674b4fd48bb420e401af33
| 119,042
|
py
|
Python
|
modules/s3/s3filter.py
|
sungkomp/sambro
|
4618d785d03424d122206d88d9ebfb6971486e2c
|
[
"MIT"
] | 1
|
2017-10-06T23:18:01.000Z
|
2017-10-06T23:18:01.000Z
|
modules/s3/s3filter.py
|
sungkomp/sambro
|
4618d785d03424d122206d88d9ebfb6971486e2c
|
[
"MIT"
] | null | null | null |
modules/s3/s3filter.py
|
sungkomp/sambro
|
4618d785d03424d122206d88d9ebfb6971486e2c
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Framework for filtered REST requests
@copyright: 2013-2016 (c) Sahana Software Foundation
@license: MIT
@requires: U{B{I{gluon}} <http://web2py.com>}
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ("S3DateFilter",
"S3Filter",
"S3FilterForm",
"S3FilterString",
"S3FilterWidget",
"S3HierarchyFilter",
"S3LocationFilter",
"S3OptionsFilter",
"S3RangeFilter",
"S3SliderFilter",
"S3TextFilter",
"s3_get_filter_opts",
)
import datetime
import json
import re
from collections import OrderedDict
from gluon import *
from gluon.storage import Storage
from gluon.tools import callback
from s3datetime import s3_decode_iso_datetime, S3DateTime
from s3query import FS, S3ResourceField, S3ResourceQuery, S3URLQuery
from s3rest import S3Method
from s3utils import s3_get_foreign_key, s3_unicode, S3TypeConverter
from s3validators import *
from s3widgets import ICON, \
S3CalendarWidget, \
S3DateWidget, \
S3DateTimeWidget, \
S3GroupedOptionsWidget, \
S3MultiSelectWidget, \
S3HierarchyWidget
# Compact JSON encoding
SEPARATORS = (",", ":")
# =============================================================================
class S3FilterWidget(object):
""" Filter widget for interactive search forms (base class) """
#: the HTML class for the widget type
_class = "generic-filter"
#: the default query operator(s) for the widget type
operator = None
#: alternatives for client-side changeable operators
alternatives = None
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Prototype method to render this widget as an instance of
a web2py HTML helper class, to be implemented by subclasses.
@param resource: the S3Resource to render with widget for
@param values: the values for this widget from the URL query
"""
raise NotImplementedError
# -------------------------------------------------------------------------
def variable(self, resource, get_vars=None):
"""
Prototype method to generate the name for the URL query variable
for this widget, can be overwritten in subclasses.
@param resource: the resource
@return: the URL query variable name (or list of
variable names if there are multiple operators)
"""
opts = self.opts
if "selector" in opts:
# Override selector
label, selector = None, opts["selector"]
else:
label, selector = self._selector(resource, self.field)
self.selector = selector
if not selector:
return None
if self.alternatives and get_vars is not None:
# Get the actual operator from get_vars
operator = self._operator(get_vars, selector)
if operator:
self.operator = operator
if "label" not in self.opts:
self.opts["label"] = label
return self._variable(selector, self.operator)
# -------------------------------------------------------------------------
def data_element(self, variable):
"""
Prototype method to construct the hidden element that holds the
URL query term corresponding to an input element in the widget.
@param variable: the URL query variable
"""
if type(variable) is list:
variable = "&".join(variable)
return INPUT(_type="hidden",
_id="%s-data" % self.attr["_id"],
_class="filter-widget-data %s-data" % self._class,
_value=variable)
# -------------------------------------------------------------------------
# Helper methods
#
def __init__(self, field=None, **attr):
"""
Constructor to configure the widget
@param field: the selector(s) for the field(s) to filter by
@param attr: configuration options for this widget
Common configuration options:
@keyword label: label for the widget
@keyword comment: comment for the widget
@keyword hidden: render widget initially hidden
(="advanced" option)
- other options see subclasses
"""
self.field = field
self.alias = None
attributes = Storage()
options = Storage()
for k, v in attr.iteritems():
if k[0] == "_":
attributes[k] = v
else:
options[k] = v
self.attr = attributes
self.opts = options
self.selector = None
self.values = Storage()
# -------------------------------------------------------------------------
def __call__(self, resource, get_vars=None, alias=None):
"""
Entry point for the form builder
@param resource: the S3Resource to render the widget for
@param get_vars: the GET vars (URL query vars) to prepopulate
the widget
@param alias: the resource alias to use
"""
self.alias = alias
# Initialize the widget attributes
self._attr(resource)
# Extract the URL values to populate the widget
variable = self.variable(resource, get_vars)
defaults = {}
for k, v in self.values.items():
selector = self._prefix(k)
defaults[selector] = v
if type(variable) is list:
values = Storage()
for k in variable:
if k in defaults:
values[k] = defaults[k]
else:
values[k] = self._values(get_vars, k)
else:
if variable in defaults:
values = defaults[variable]
else:
values = self._values(get_vars, variable)
# Construct and populate the widget
widget = self.widget(resource, values)
# Recompute variable in case operator got changed in widget()
if self.alternatives:
variable = self._variable(self.selector, self.operator)
# Construct the hidden data element
data = self.data_element(variable)
if type(data) is list:
data.append(widget)
else:
data = [data, widget]
return TAG[""](*data)
# -------------------------------------------------------------------------
def _attr(self, resource):
""" Initialize and return the HTML attributes for this widget """
_class = self._class
# Construct name and id for the widget
attr = self.attr
if "_name" not in attr:
if not resource:
raise SyntaxError("%s: _name parameter required " \
"when rendered without resource." % \
self.__class__.__name__)
flist = self.field
if not isinstance(flist, (list, tuple)):
flist = [flist]
colnames = []
for f in flist:
rfield = S3ResourceField(resource, f)
colname = rfield.colname
if colname:
colnames.append(colname)
else:
colnames.append(rfield.fname)
name = "%s-%s-%s" % (resource.alias, "-".join(colnames), _class)
attr["_name"] = name.replace(".", "_")
if "_id" not in attr:
attr["_id"] = attr["_name"]
return attr
# -------------------------------------------------------------------------
@classmethod
def _operator(cls, get_vars, selector):
"""
Helper method to get the operators from the URL query
@param get_vars: the GET vars (a dict)
@param selector: field selector
@return: query operator - None, str or list
"""
variables = ["%s__%s" % (selector, op) for op in cls.alternatives]
slen = len(selector) + 2
operators = [k[slen:] for k, v in get_vars.iteritems()
if k in variables]
if not operators:
return None
elif len(operators) == 1:
return operators[0]
else:
return operators
# -------------------------------------------------------------------------
def _prefix(self, selector):
"""
Helper method to prefix an unprefixed field selector
@param alias: the resource alias to use as prefix
@param selector: the field selector
@return: the prefixed selector
"""
alias = self.alias
items = selector.split("$", 0)
head = items[0]
if "." in head:
if alias not in (None, "~"):
prefix, key = head.split(".", 1)
if prefix == "~":
prefix = alias
elif prefix != alias:
prefix = "%s.%s" % (alias, prefix)
items[0] = "%s.%s" % (prefix, key)
selector = "$".join(items)
else:
if alias is None:
alias = "~"
selector = "%s.%s" % (alias, selector)
return selector
# -------------------------------------------------------------------------
def _selector(self, resource, fields):
"""
Helper method to generate a filter query selector for the
given field(s) in the given resource.
@param resource: the S3Resource
@param fields: the field selectors (as strings)
@return: the field label and the filter query selector, or None
if none of the field selectors could be resolved
"""
prefix = self._prefix
label = None
if not fields:
return label, None
if not isinstance(fields, (list, tuple)):
fields = [fields]
selectors = []
for field in fields:
if resource:
try:
rfield = S3ResourceField(resource, field)
except (AttributeError, TypeError):
continue
if not rfield.field and not rfield.virtual:
# Unresolvable selector
continue
if not label:
label = rfield.label
selectors.append(prefix(rfield.selector))
else:
selectors.append(field)
if selectors:
return label, "|".join(selectors)
else:
return label, None
# -------------------------------------------------------------------------
@staticmethod
def _values(get_vars, variable):
"""
Helper method to get all values of a URL query variable
@param get_vars: the GET vars (a dict)
@param variable: the name of the query variable
@return: a list of values
"""
if not variable:
return []
elif variable in get_vars:
values = S3URLQuery.parse_value(get_vars[variable])
if not isinstance(values, (list, tuple)):
values = [values]
return values
else:
return []
# -------------------------------------------------------------------------
@classmethod
def _variable(cls, selector, operator):
"""
Construct URL query variable(s) name from a filter query
selector and the given operator(s)
@param selector: the selector
@param operator: the operator (or tuple/list of operators)
@return: the URL query variable name (or list of variable names)
"""
if isinstance(operator, (tuple, list)):
return [cls._variable(selector, o) for o in operator]
elif operator:
return "%s__%s" % (selector, operator)
else:
return selector
# =============================================================================
class S3TextFilter(S3FilterWidget):
"""
Text filter widget
Configuration options:
@keyword label: label for the widget
@keyword comment: comment for the widget
@keyword hidden: render widget initially hidden (="advanced" option)
@keyword match_any: match any of the strings
"""
_class = "text-filter"
operator = "like"
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self.attr
if "_size" not in attr:
attr.update(_size="40")
if "_class" in attr and attr["_class"]:
_class = "%s %s" % (attr["_class"], self._class)
else:
_class = self._class
attr["_class"] = _class
attr["_type"] = "text"
# Match any or all of the strings entered?
data = attr.get("data", {})
data["match"] = "any" if self.opts.get("match_any") else "all"
attr["data"] = data
values = [v.strip("*") for v in values if v is not None]
if values:
attr["_value"] = " ".join(values)
return INPUT(**attr)
# =============================================================================
class S3RangeFilter(S3FilterWidget):
"""
Numerical Range Filter Widget
Configuration options:
@keyword label: label for the widget
@keyword comment: comment for the widget
@keyword hidden: render widget initially hidden (="advanced" option)
"""
# Overall class
_class = "range-filter"
# Class for visible input boxes.
_input_class = "%s-%s" % (_class, "input")
operator = ["ge", "le"]
# Untranslated labels for individual input boxes.
input_labels = {"ge": "Minimum", "le": "Maximum"}
# -------------------------------------------------------------------------
def data_element(self, variables):
"""
Overrides S3FilterWidget.data_element(), constructs multiple
hidden INPUTs (one per variable) with element IDs of the form
<id>-<operator>-data (where no operator is translated as "eq").
@param variables: the variables
"""
if variables is None:
operators = self.operator
if type(operators) is not list:
operators = [operators]
variables = self._variable(self.selector, operators)
else:
# Split the operators off the ends of the variables.
if type(variables) is not list:
variables = [variables]
operators = [v.split("__")[1]
if "__" in v else "eq"
for v in variables]
elements = []
id = self.attr["_id"]
for o, v in zip(operators, variables):
elements.append(
INPUT(_type="hidden",
_id="%s-%s-data" % (id, o),
_class="filter-widget-data %s-data" % self._class,
_value=v))
return elements
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self.attr
_class = self._class
if "_class" in attr and attr["_class"]:
_class = "%s %s" % (attr["_class"], _class)
else:
_class = _class
attr["_class"] = _class
input_class = self._input_class
input_labels = self.input_labels
input_elements = DIV()
ie_append = input_elements.append
_id = attr["_id"]
_variable = self._variable
selector = self.selector
for operator in self.operator:
input_id = "%s-%s" % (_id, operator)
input_box = INPUT(_name=input_id,
_id=input_id,
_type="text",
_class=input_class)
variable = _variable(selector, operator)
# Populate with the value, if given
# if user has not set any of the limits, we get [] in values.
value = values.get(variable, None)
if value not in [None, []]:
if type(value) is list:
value = value[0]
input_box["_value"] = value
input_box["value"] = value
ie_append(DIV(
DIV(LABEL(current.T(input_labels[operator] + ":"),
_for=input_id),
_class="range-filter-label"),
DIV(input_box,
_class="range-filter-widget"),
_class="range-filter-field"))
return input_elements
# =============================================================================
class S3DateFilter(S3RangeFilter):
"""
Date Range Filter Widget
Configuration options:
@keyword label: label for the widget
@keyword comment: comment for the widget
@keyword hidden: render widget initially hidden (="advanced" option)
@keyword fieldtype: explicit field type "date" or "datetime" to
use for context or virtual fields
@keyword hide_time: don't show time selector
"""
_class = "date-filter"
# Class for visible input boxes.
_input_class = "%s-%s" % (_class, "input")
operator = ["ge", "le"]
# Untranslated labels for individual input boxes.
input_labels = {"ge": "From", "le": "To"}
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self.attr
# CSS class and element ID
_class = self._class
if "_class" in attr and attr["_class"]:
_class = "%s %s" % (attr["_class"], _class)
else:
_class = _class
_id = attr["_id"]
# Determine the field type
if resource:
rfield = S3ResourceField(resource, self.field)
field = rfield.field
else:
rfield = field = None
if not field:
if not rfield or rfield.virtual:
ftype = self.opts.get("fieldtype", "datetime")
else:
# Unresolvable selector
return ""
else:
ftype = rfield.ftype
# S3CalendarWidget requires a Field
if not field:
if rfield:
tname, fname = rfield.tname, rfield.fname
else:
tname, fname = "notable", "datetime"
if not _id:
raise SyntaxError("%s: _id parameter required " \
"when rendered without resource." % \
self.__class__.__name__)
field = Field(fname, ftype, requires = IS_UTC_DATE())
field.tablename = field._tablename = tname
# Classes and labels for the individual date/time inputs
T = current.T
input_class = self._input_class
input_labels = self.input_labels
# Picker options
hide_time = self.opts.get("hide_time", False)
# Generate the input elements
filter_widget = DIV(_id=_id, _class=_class)
append = filter_widget.append
selector = self.selector
get_variable = self._variable
for operator in self.operator:
input_id = "%s-%s" % (_id, operator)
# Do we want a timepicker?
timepicker = False if ftype == "date" or hide_time else True
# Make the two inputs constrain each other
set_min = set_max = None
if operator == "ge":
set_min = "#%s-%s" % (_id, "le")
elif operator == "le":
set_max = "#%s-%s" % (_id, "ge")
# Instantiate the widget
widget = S3CalendarWidget(timepicker = timepicker,
set_min = set_min,
set_max = set_max,
)
# Populate with the value, if given
# if user has not set any of the limits, we get [] in values.
value = values.get(get_variable(selector, operator))
if value in (None, []):
value = None
elif type(value) is list:
value = value[0]
# Widget expects a string in local calendar and format
if isinstance(value, basestring):
# URL filter or filter default come as string in
# Gregorian calendar and ISO format => convert into
# a datetime
dt = s3_decode_iso_datetime(value)
else:
# Assume datetime
dt = value
if dt:
if timepicker:
dtstr = S3DateTime.datetime_represent(dt, utc=False)
else:
dtstr = S3DateTime.date_represent(dt, utc=False)
else:
dtstr = None
# Render the widget
picker = widget(field,
dtstr,
_class = input_class,
_id = input_id,
_name = input_id,
)
if operator in input_labels:
label = DIV(LABEL("%s:" % T(input_labels[operator]),
_for=input_id,
),
_class="range-filter-label",
)
else:
label = ""
# Append label and widget
append(DIV(label,
DIV(picker,
_class="range-filter-widget",
),
_class="range-filter-field",
))
return filter_widget
# =============================================================================
class S3SliderFilter(S3RangeFilter):
"""
Filter widget for Ranges which is controlled by a Slider instead of
INPUTs
Configuration options:
@keyword label: label for the widget
@keyword comment: comment for the widget
@keyword hidden: render widget initially hidden (="advanced" option)
"""
_class = "slider-filter"
operator = ["ge", "le"]
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self.attr
# CSS class and element ID
_class = self._class
if "_class" in attr and attr["_class"]:
_class = "%s %s" % (attr["_class"], _class)
else:
_class = _class
attr["_class"] = _class
_id = attr["_id"]
# Determine the field type
if resource:
rfield = S3ResourceField(resource, self.field)
field = rfield.field
else:
field = None
if not field:
# Unresolvable selector
return ""
# Options
step = self.opts.get("step", 1)
type = self.opts.get("type", "int")
# Generate the input elements
field = str(field)
fieldname = field.replace(".", "_")
selector = self.selector
_variable = self._variable
for operator in self.operator:
input_id = "%s-%s" % (_id, operator)
input = INPUT(_name = input_id,
_disabled = True,
_id = input_id,
_style = "border:0",
)
# Populate with the value, if given
# if user has not set any of the limits, we get [] in values.
variable = _variable(selector, operator)
value = values.get(variable, None)
if value not in [None, []]:
if type(value) is list:
value = value[0]
input["_value"] = value
input["value"] = value
slider = DIV(_id="%s_slider" % fieldname, **attributes)
s3 = current.response.s3
validator = field.requires
if isinstance(validator, IS_EMPTY_OR):
validator = validator.other
_min = validator.minimum
# Max Value depends upon validator type
if isinstance(validator, IS_INT_IN_RANGE):
_max = validator.maximum - 1
elif isinstance(validator, IS_FLOAT_IN_RANGE):
_max = validator.maximum
if values is None:
# JSONify
value = "null"
script = '''i18n.slider_help="%s"''' % \
current.T("Click on the slider to choose a value")
s3.js_global.append(script)
if _type == "int":
script = '''S3.range_slider('%s',%i,%i,%i,%s)''' % (fieldname,
_min,
_max,
step,
values)
else:
# Float
script = '''S3.range_slider('%s',%f,%f,%f,%s)''' % (fieldname,
_min,
_max,
step,
values)
s3.jquery_ready.append(script)
return TAG[""](input1, input2, slider)
# =============================================================================
class S3LocationFilter(S3FilterWidget):
"""
Hierarchical Location Filter Widget
NB This will show records linked to all child locations of the Lx
Configuration options:
@keyword label: label for the widget
@keyword comment: comment for the widget
@keyword hidden: render widget initially hidden (="advanced" option)
@keyword levels: list of location hierarchy levels
@keyword filter: show filter for options (with "multiselect" widget)
@keyword header: show header in widget (with "multiselect" widget)
@keyword selectedList: number of selected items to show before
collapsing into number of items
(with "multiselect" widget)
@keyword no_opts: text to show if no options available
@keyword resource: alternative resource to look up options
@keyword lookup: field in the alternative resource to look up
@keyword options: fixed set of options (list of gis_location IDs)
"""
_class = "location-filter"
operator = "belongs"
# -------------------------------------------------------------------------
def __init__(self, field=None, **attr):
"""
Constructor to configure the widget
@param field: the selector(s) for the field(s) to filter by
@param attr: configuration options for this widget
"""
if not field:
field = "location_id"
# Translate options using gis_location_name?
settings = current.deployment_settings
translate = settings.get_L10n_translate_gis_location()
if translate:
language = current.session.s3.language
#if language == settings.get_L10n_default_language():
if language == "en": # Can have a default language for system & yet still want to translate from base English
translate = False
self.translate = translate
super(S3LocationFilter, self).__init__(field=field, **attr)
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self._attr(resource)
opts = self.opts
name = attr["_name"]
ftype, levels, noopt = self._options(resource, values=values)
if noopt:
return SPAN(noopt, _class="no-options-available")
# Filter class (default+custom)
_class = self._class
if "_class" in attr and attr["_class"]:
_class = "%s %s" % (_class, attr["_class"])
attr["_class"] = _class
# Store id and name for the data element
base_id = attr["_id"]
base_name = attr["_name"]
widgets = []
w_append = widgets.append
operator = self.operator
field_name = self.field
fname = self._prefix(field_name) if resource else field_name
#widget_type = opts["widget"]
# Use groupedopts widget if we specify cols, otherwise assume multiselect
cols = opts.get("cols", None)
if cols:
# Grouped Checkboxes
# @ToDo: somehow working, but ugly, not usable (deprecated?)
if "groupedopts-filter-widget" not in _class:
attr["_class"] = "%s groupedopts-filter-widget" % _class
attr["cols"] = cols
# Add one widget per level
for level in levels:
options = levels[level]["options"]
groupedopts = S3GroupedOptionsWidget(cols = cols,
size = opts["size"] or 12,
)
# Dummy field
name = "%s-%s" % (base_name, level)
dummy_field = Storage(name=name,
type=ftype,
requires=IS_IN_SET(options,
multiple=True))
# Unique ID/name
attr["_id"] = "%s-%s" % (base_id, level)
attr["_name"] = name
# Find relevant values to pre-populate
_values = values.get("%s$%s__%s" % (fname, level, operator))
w_append(groupedopts(dummy_field, _values, **attr))
else:
# Multiselect is default
T = current.T
# Multiselect Dropdown with Checkboxes
if "multiselect-filter-widget" not in _class:
_class = "%s multiselect-filter-widget" % _class
header_opt = opts.get("header", False)
if header_opt is False or header_opt is True:
setting = current.deployment_settings \
.get_ui_location_filter_bulk_select_option()
if setting is not None:
header_opt = setting
# Add one widget per level
first = True
hide = True
s3 = current.response.s3
for level in levels:
# Dummy field
name = "%s-%s" % (base_name, level)
# Unique ID/name
attr["_id"] = "%s-%s" % (base_id, level)
attr["_name"] = name
# Find relevant values to pre-populate the widget
_values = values.get("%s$%s__%s" % (fname, level, operator))
w = S3MultiSelectWidget(filter = opts.get("filter", "auto"),
header = header_opt,
selectedList = opts.get("selectedList", 3),
noneSelectedText = T("Select %(location)s") % \
dict(location=levels[level]["label"]))
if first:
# Visible Multiselect Widget added to the page
attr["_class"] = _class
options = levels[level]["options"]
dummy_field = Storage(name=name,
type=ftype,
requires=IS_IN_SET(options,
multiple=True))
widget = w(dummy_field, _values, **attr)
else:
# Hidden, empty dropdown added to the page, whose options and multiselect will be activated when the higher level is selected
if hide:
_class = "%s hide" % _class
attr["_class"] = _class
hide = False
# Store the current jquery_ready
jquery_ready = s3.jquery_ready
# Build the widget with the MultiSelect activation script
s3.jquery_ready = []
dummy_field = Storage(name=name,
type=ftype,
requires=IS_IN_SET([],
multiple=True))
widget = w(dummy_field, _values, **attr)
# Extract the MultiSelect activation script
script = s3.jquery_ready[0]
# Restore jquery_ready
s3.jquery_ready = jquery_ready
# Wrap the script & reinsert
script = '''S3.%s=function(){%s}''' % (name.replace("-", "_"), script)
s3.js_global.append(script)
w_append(widget)
first = False
# Restore id and name for the data_element
attr["_id"] = base_id
attr["_name"] = base_name
# Render the filter widget
return TAG[""](*widgets)
# -------------------------------------------------------------------------
def data_element(self, variable):
"""
Construct the hidden element that holds the
URL query term corresponding to an input element in the widget.
@param variable: the URL query variable
"""
output = []
oappend = output.append
i = 0
for level in self.levels:
widget = INPUT(_type="hidden",
_id="%s-%s-data" % (self.attr["_id"], level),
_class="filter-widget-data %s-data" % self._class,
_value=variable[i])
oappend(widget)
i += 1
return output
# -------------------------------------------------------------------------
def ajax_options(self, resource):
attr = self._attr(resource)
ftype, levels, noopt = self._options(resource, inject_hierarchy=False)
opts = {}
base_id = attr["_id"]
for level in levels:
if noopt:
opts["%s-%s" % (base_id, level)] = str(noopt)
else:
options = levels[level]["options"]
opts["%s-%s" % (base_id, level)] = options
return opts
# -------------------------------------------------------------------------
@staticmethod
def __options(row, levels, inject_hierarchy, hierarchy, _level, translate, name_l10n):
if inject_hierarchy:
parent = None
grandparent = None
greatgrandparent = None
greatgreatgrandparent = None
greatgreatgreatgrandparent = None
i = 0
for level in levels:
v = row[level]
if v:
o = levels[level]["options"]
if v not in o:
if translate:
o[v] = name_l10n.get(v, v)
else:
o.append(v)
if inject_hierarchy:
if i == 0:
h = hierarchy[_level]
if v not in h:
h[v] = {}
parent = v
elif i == 1:
h = hierarchy[_level][parent]
if v not in h:
h[v] = {}
grandparent = parent
parent = v
elif i == 2:
h = hierarchy[_level][grandparent][parent]
if v not in h:
h[v] = {}
greatgrandparent = grandparent
grandparent = parent
parent = v
elif i == 3:
h = hierarchy[_level][greatgrandparent][grandparent][parent]
if v not in h:
h[v] = {}
greatgreatgrandparent = greatgrandparent
greatgrandparent = grandparent
grandparent = parent
parent = v
elif i == 4:
h = hierarchy[_level][greatgreatgrandparent][greatgrandparent][grandparent][parent]
if v not in h:
h[v] = {}
greatgreatgreatgrandparent = greatgreatgrandparent
greatgreatgrandparent = greatgrandparent
greatgrandparent = grandparent
grandparent = parent
parent = v
elif i == 5:
h = hierarchy[_level][greatgreatgreatgrandparent][greatgreatgrandparent][greatgrandparent][grandparent][parent]
if v not in h:
h[v] = {}
i += 1
# -------------------------------------------------------------------------
def _options(self, resource, inject_hierarchy=True, values=None):
T = current.T
s3db = current.s3db
gtable = s3db.gis_location
NOOPT = T("No options available")
#attr = self.attr
opts = self.opts
translate = self.translate
# Which levels should we display?
# Lookup the appropriate labels from the GIS configuration
if "levels" in opts:
hierarchy = current.gis.get_location_hierarchy()
levels = OrderedDict()
for level in opts["levels"]:
levels[level] = hierarchy.get(level, level)
else:
levels = current.gis.get_relevant_hierarchy_levels(as_dict=True)
# Pass to data_element
self.levels = levels
if "label" not in opts:
opts["label"] = T("Filter by Location")
ftype = "reference gis_location"
default = (ftype, levels.keys(), opts.get("no_opts", NOOPT))
# Resolve the field selector
selector = None
if resource is None:
rname = opts.get("resource")
if rname:
resource = s3db.resource(rname)
selector = opts.get("lookup", "location_id")
else:
selector = self.field
options = opts.get("options")
if options:
# Fixed options (=list of location IDs)
resource = s3db.resource("gis_location", id=options)
fields = ["id"] + [l for l in levels]
if translate:
fields.append("path")
joined = False
elif selector:
# Lookup options from resource
rfield = S3ResourceField(resource, selector)
if not rfield.field or rfield.ftype != ftype:
# Must be a real reference to gis_location
return default
fields = [selector] + ["%s$%s" % (selector, l) for l in levels]
if translate:
fields.append("%s$path" % selector)
joined = True
# Filter out old Locations
# @ToDo: Allow override
resource.add_filter(FS("%s.end_date" % selector) == None)
else:
# Neither fixed options nor resource to look them up
return default
# Find the options
rows = resource.select(fields=fields,
limit=None,
virtual=False,
as_rows=True)
rows2 = []
if not rows:
if values:
# Make sure the selected options are in the available options
resource = s3db.resource("gis_location")
fields = ["id"] + [l for l in levels]
if translate:
fields.append("path")
joined = False
rows = []
for f in values:
v = values[f]
if not v:
continue
level = "L%s" % f.split("L", 1)[1][0]
resource.clear_query()
query = (gtable.level == level) & \
(gtable.name.belongs(v))
resource.add_filter(query)
# Filter out old Locations
# @ToDo: Allow override
resource.add_filter(gtable.end_date == None)
_rows = resource.select(fields=fields,
limit=None,
virtual=False,
as_rows=True)
if rows:
rows &= _rows
else:
rows = _rows
if not rows:
# No options
return default
elif values:
# Make sure the selected options are in the available options
resource2 = s3db.resource("gis_location")
fields = ["id"] + [l for l in levels]
if translate:
fields.append("path")
for f in values:
v = values[f]
if not v:
continue
level = "L%s" % f.split("L", 1)[1][0]
resource2.clear_query()
query = (gtable.level == level) & \
(gtable.name.belongs(v))
resource2.add_filter(query)
# Filter out old Locations
# @ToDo: Allow override
resource2.add_filter(gtable.end_date == None)
_rows = resource2.select(fields=fields,
limit=None,
virtual=False,
as_rows=True)
if rows2:
rows2 &= _rows
else:
rows2 = _rows
# Initialise Options Storage & Hierarchy
hierarchy = {}
first = True
for level in levels:
if first:
hierarchy[level] = {}
_level = level
first = False
levels[level] = {"label": levels[level],
"options": {} if translate else [],
}
# Generate a name localization lookup dict
name_l10n = {}
if translate:
# Get IDs via Path to lookup name_l10n
ids = set()
if joined:
selector = rfield.colname
for row in rows:
_row = getattr(row, "gis_location") if joined else row
path = _row.path
if path:
path = path.split("/")
else:
# Build it
if joined:
location_id = row[selector]
if location_id:
_row.id = location_id
if "id" in _row:
path = current.gis.update_location_tree(_row)
path = path.split("/")
if path:
ids |= set(path)
for row in rows2:
path = row.path
if path:
path = path.split("/")
else:
# Build it
if "id" in row:
path = current.gis.update_location_tree(row)
path = path.split("/")
if path:
ids |= set(path)
# Build lookup table for name_l10n
ntable = s3db.gis_location_name
query = (gtable.id.belongs(ids)) & \
(ntable.deleted == False) & \
(ntable.location_id == gtable.id) & \
(ntable.language == current.session.s3.language)
nrows = current.db(query).select(gtable.name,
ntable.name_l10n,
limitby=(0, len(ids)),
)
for row in nrows:
name_l10n[row["gis_location.name"]] = row["gis_location_name.name_l10n"]
# Populate the Options and the Hierarchy
for row in rows:
_row = getattr(row, "gis_location") if joined else row
self.__options(_row, levels, inject_hierarchy, hierarchy, _level, translate, name_l10n)
for row in rows2:
self.__options(row, levels, inject_hierarchy, hierarchy, _level, translate, name_l10n)
if translate:
# Sort the options dicts
for level in levels:
options = levels[level]["options"]
options = OrderedDict(sorted(options.iteritems()))
else:
# Sort the options lists
for level in levels:
levels[level]["options"].sort()
if inject_hierarchy:
# Inject the Location Hierarchy
hierarchy = "S3.location_filter_hierarchy=%s" % \
json.dumps(hierarchy, separators=SEPARATORS)
js_global = current.response.s3.js_global
js_global.append(hierarchy)
if translate:
# Inject lookup list
name_l10n = "S3.location_name_l10n=%s" % \
json.dumps(name_l10n, separators=SEPARATORS)
js_global.append(name_l10n)
return (ftype, levels, None)
# -------------------------------------------------------------------------
def _selector(self, resource, fields):
"""
Helper method to generate a filter query selector for the
given field(s) in the given resource.
@param resource: the S3Resource
@param fields: the field selectors (as strings)
@return: the field label and the filter query selector, or None if none of the
field selectors could be resolved
"""
prefix = self._prefix
if resource:
rfield = S3ResourceField(resource, fields)
label = rfield.label
else:
label = None
if "levels" in self.opts:
levels = self.opts.levels
else:
levels = current.gis.get_relevant_hierarchy_levels()
fields = ["%s$%s" % (fields, level) for level in levels]
if resource:
selectors = []
for field in fields:
try:
rfield = S3ResourceField(resource, field)
except (AttributeError, TypeError):
continue
selectors.append(prefix(rfield.selector))
else:
selectors = fields
if selectors:
return label, "|".join(selectors)
else:
return label, None
# -------------------------------------------------------------------------
@classmethod
def _variable(cls, selector, operator):
"""
Construct URL query variable(s) name from a filter query
selector and the given operator(s)
@param selector: the selector
@param operator: the operator (or tuple/list of operators)
@return: the URL query variable name (or list of variable names)
"""
selectors = selector.split("|")
return ["%s__%s" % (selector, operator) for selector in selectors]
# =============================================================================
class S3OptionsFilter(S3FilterWidget):
"""
Options filter widget
Configuration options:
@keyword label: label for the widget
@keyword comment: comment for the widget
@keyword hidden: render widget initially hidden (="advanced" option)
@keyword widget: widget to use:
"select", "multiselect" (default), or "groupedopts"
@keyword cols: number of columns of checkboxes
(with "groupedopts" widget)
@keyword filter: show filter for options
(with "multiselect" widget)
@keyword header: show header in widget
(with "multiselect" widget)
@keyword selectedList: number of selected items to show before
collapsing into number of items
(with "multiselect" widget)
@keyword size: maximum size of multi-letter options groups
(with "groupedopts" widget)
@keyword help_field: field in the referenced table to display on
hovering over a foreign key option
(with "groupedopts" widget)
@keyword no_opts: text to show if no options available
@keyword none: label for explicit None-option in many-to-many fields
@keyword resource: alternative resource to look up options
@keyword lookup: field in the alternative resource to look up
@keyword represent: custom represent for looked-up options
(overrides field representation method)
@keyword options: fixed set of options (of {value: label} or
a callable that returns one)
@keyword translate: translate the option labels in the fixed set
(looked-up option sets will use the
field representation method instead)
"""
_class = "options-filter"
operator = "belongs"
alternatives = ["anyof", "contains"]
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
attr = self._attr(resource)
opts = self.opts
name = attr["_name"]
# Get the options
ftype, options, noopt = self._options(resource, values=values)
if noopt:
return SPAN(noopt, _class="no-options-available")
else:
options = OrderedDict(options)
# Any-All-Option : for many-to-many fields the user can
# search for records containing all the options or any
# of the options:
if len(options) > 1 and ftype[:4] == "list":
operator = opts.get("operator", None)
if operator:
self.operator = operator
any_all = ""
else:
operator = self.operator
any_all = True
if operator == "anyof":
filter_type = "any"
else:
filter_type = "all"
if operator == "belongs":
operator = "contains"
if any_all:
# Provide a form to prompt the user to choose
T = current.T
any_all = DIV(T("Filter type"),
INPUT(_name="%s_filter" % name,
_id="%s_filter_any" % name,
_type="radio",
_value="any",
value=filter_type),
LABEL(T("Any"),
_for="%s_filter_any" % name),
INPUT(_name="%s_filter" % name,
_id="%s_filter_all" % name,
_type="radio",
_value="all",
value=filter_type),
LABEL(T("All"),
_for="%s_filter_all" % name),
_class="s3-options-filter-anyall",
)
else:
any_all = ""
# Initialize widget
#widget_type = opts["widget"]
# Use groupedopts widget if we specify cols, otherwise assume multiselect
cols = opts.get("cols", None)
if cols:
widget_class = "groupedopts-filter-widget"
w = S3GroupedOptionsWidget(options = options,
multiple = opts.get("multiple", True),
cols = cols,
size = opts["size"] or 12,
help_field = opts["help_field"],
sort = opts.get("sort", True),
orientation = opts.get("orientation"),
)
else:
# Default widget_type = "multiselect"
widget_class = "multiselect-filter-widget"
w = S3MultiSelectWidget(filter = opts.get("filter", "auto"),
header = opts.get("header", False),
selectedList = opts.get("selectedList", 3),
multiple = opts.get("multiple", True),
)
# Add widget class and default class
classes = set(attr.get("_class", "").split()) | \
set((widget_class, self._class))
attr["_class"] = " ".join(classes) if classes else None
# Render the widget
dummy_field = Storage(name=name,
type=ftype,
requires=IS_IN_SET(options, multiple=True))
widget = w(dummy_field, values, **attr)
return TAG[""](any_all, widget)
# -------------------------------------------------------------------------
def ajax_options(self, resource):
"""
Method to Ajax-retrieve the current options of this widget
@param resource: the S3Resource
"""
opts = self.opts
attr = self._attr(resource)
ftype, options, noopt = self._options(resource)
if noopt:
options = {attr["_id"]: str(noopt)}
else:
#widget_type = opts["widget"]
# Use groupedopts widget if we specify cols, otherwise assume multiselect
cols = opts.get("cols", None)
if cols:
# Use the widget method to group and sort the options
widget = S3GroupedOptionsWidget(
options = options,
multiple = True,
cols = cols,
size = opts["size"] or 12,
help_field = opts["help_field"],
sort = opts.get("sort", True),
)
options = {attr["_id"]:
widget._options({"type": ftype}, [])}
else:
# Multiselect
# Produce a simple list of tuples
options = {attr["_id"]: [(k, s3_unicode(v))
for k, v in options]}
return options
# -------------------------------------------------------------------------
def _options(self, resource, values=None):
"""
Helper function to retrieve the current options for this
filter widget
@param resource: the S3Resource
"""
T = current.T
NOOPT = T("No options available")
EMPTY = T("None")
#attr = self.attr
opts = self.opts
# Resolve the field selector
selector = self.field
if isinstance(selector, (tuple, list)):
selector = selector[0]
if resource is None:
rname = opts.get("resource")
if rname:
resource = current.s3db.resource(rname)
if resource:
rfield = S3ResourceField(resource, selector)
field = rfield.field
colname = rfield.colname
ftype = rfield.ftype
else:
rfield = field = colname = None
ftype = "string"
# Find the options
opt_keys = []
multiple = ftype[:5] == "list:"
if opts.options is not None:
# Custom dict of options {value: label} or a callable
# returning such a dict:
options = opts.options
if callable(options):
options = options()
opt_keys = options.keys()
elif resource:
# Determine the options from the field type
options = None
if ftype == "boolean":
opt_keys = (True, False)
elif field or rfield.virtual:
groupby = field if field and not multiple else None
virtual = field is None
# If the search field is a foreign key, then try to perform
# a reverse lookup of primary IDs in the lookup table which
# are linked to at least one record in the resource => better
# scalability.
rows = None
if field:
ktablename, key, m = s3_get_foreign_key(field, m2m=False)
if ktablename:
multiple = m
ktable = current.s3db.table(ktablename)
key_field = ktable[key]
colname = str(key_field)
left = None
accessible_query = current.auth.s3_accessible_query
# Respect the validator of the foreign key field.
# Commented because questionable: We want a filter
# option for every current field value, even if it
# doesn't match the validator (don't we?)
#requires = field.requires
#if requires:
#if not isinstance(requires, list):
#requires = [requires]
#requires = requires[0]
#if isinstance(requires, IS_EMPTY_OR):
#requires = requires.other
#if isinstance(requires, IS_ONE_OF_EMPTY):
#query, left = requires.query(ktable)
#else:
#query = accessible_query("read", ktable)
#query &= (key_field == field)
query = accessible_query("read", ktable) & \
(key_field == field)
joins = rfield.join
for tname in joins:
query &= joins[tname]
# We do not allow the user to see values only used
# in records he's not permitted to see:
query &= accessible_query("read", resource.table)
# Filter options by location?
location_filter = opts.get("location_filter")
if location_filter and "location_id" in ktable:
location = current.session.s3.location_filter
if location:
query &= (ktable.location_id == location)
# Filter options by organisation?
org_filter = opts.get("org_filter")
if org_filter and "organisation_id" in ktable:
root_org = current.auth.root_org()
if root_org:
query &= ((ktable.organisation_id == root_org) | \
(ktable.organisation_id == None))
#else:
# query &= (ktable.organisation_id == None)
rows = current.db(query).select(key_field,
resource._id.min(),
groupby=key_field,
left=left)
# If we can not perform a reverse lookup, then we need
# to do a forward lookup of all unique values of the
# search field from all records in the table :/ still ok,
# but not endlessly scalable:
if rows is None:
rows = resource.select([selector],
limit=None,
orderby=field,
groupby=groupby,
virtual=virtual,
as_rows=True)
opt_keys = [] # Can't use set => would make orderby pointless
if rows:
kappend = opt_keys.append
kextend = opt_keys.extend
for row in rows:
val = row[colname]
if virtual and callable(val):
val = val()
if (multiple or \
virtual) and isinstance(val, (list, tuple, set)):
kextend([v for v in val
if v not in opt_keys])
elif val not in opt_keys:
kappend(val)
# Make sure the selected options are in the available options
# (not possible if we have a fixed options dict)
if options is None and values:
numeric = rfield.ftype in ("integer", "id") or \
rfield.ftype[:9] == "reference"
for _val in values:
if numeric and _val is not None:
try:
val = int(_val)
except ValueError:
# not valid for this field type => skip
continue
else:
val = _val
if val not in opt_keys and \
(not isinstance(val, (int, long)) or not str(val) in opt_keys):
opt_keys.append(val)
# No options?
if len(opt_keys) < 1 or len(opt_keys) == 1 and not opt_keys[0]:
return (ftype, None, opts.get("no_opts", NOOPT))
# Represent the options
opt_list = [] # list of tuples (key, value)
# Custom represent? (otherwise fall back to field.represent)
represent = opts.represent
if not represent: # or ftype[:9] != "reference":
represent = field.represent if field else None
if options is not None:
# Custom dict of {value:label} => use this label
if opts.get("translate"):
# Translate the labels
opt_list = [(opt, T(label))
if isinstance(label, basestring) else (opt, label)
for opt, label in options.items()
]
else:
opt_list = options.items()
elif callable(represent):
# Callable representation function:
if hasattr(represent, "bulk"):
# S3Represent => use bulk option
opt_dict = represent.bulk(opt_keys,
list_type=False,
show_link=False)
if None in opt_keys:
opt_dict[None] = EMPTY
elif None in opt_dict:
del opt_dict[None]
if "" in opt_keys:
opt_dict[""] = EMPTY
opt_list = opt_dict.items()
else:
# Simple represent function
args = {"show_link": False} \
if "show_link" in represent.func_code.co_varnames else {}
if multiple:
repr_opt = lambda opt: opt in (None, "") and (opt, EMPTY) or \
(opt, represent([opt], **args))
else:
repr_opt = lambda opt: opt in (None, "") and (opt, EMPTY) or \
(opt, represent(opt, **args))
opt_list = map(repr_opt, opt_keys)
elif isinstance(represent, str) and ftype[:9] == "reference":
# Represent is a string template to be fed from the
# referenced record
# Get the referenced table
db = current.db
ktable = db[ftype[10:]]
k_id = ktable._id.name
# Get the fields referenced by the string template
fieldnames = [k_id]
fieldnames += re.findall("%\(([a-zA-Z0-9_]*)\)s", represent)
represent_fields = [ktable[fieldname] for fieldname in fieldnames]
# Get the referenced records
query = (ktable.id.belongs([k for k in opt_keys
if str(k).isdigit()])) & \
(ktable.deleted == False)
rows = db(query).select(*represent_fields).as_dict(key=k_id)
# Run all referenced records against the format string
opt_list = []
ol_append = opt_list.append
for opt_value in opt_keys:
if opt_value in rows:
opt_represent = represent % rows[opt_value]
if opt_represent:
ol_append((opt_value, opt_represent))
else:
# Straight string representations of the values (fallback)
opt_list = [(opt_value, s3_unicode(opt_value))
for opt_value in opt_keys if opt_value]
if opts.get("sort", True):
try:
opt_list.sort(key=lambda item: item[1])
except:
opt_list.sort(key=lambda item: s3_unicode(item[1]))
options = []
empty = False
none = opts["none"]
for k, v in opt_list:
if k is None:
if none:
empty = True
if none is True:
# Use the represent
options.append((k, v))
else:
# Must be a string to use as the represent:
options.append((k, none))
else:
options.append((k, v))
if none and not empty:
# Add the value anyway (e.g. not found via the reverse lookup)
if none is True:
none = current.messages["NONE"]
options.append((None, none))
if not opts.get("multiple", True) and not self.values:
# Browsers automatically select the first option in single-selects,
# but that doesn't filter the data, so the first option must be
# empty if we don't have a default:
options.insert(0, ("", "")) # XML(" ") better?
# Sort the options
return (ftype, options, None)
# -------------------------------------------------------------------------
@staticmethod
def _values(get_vars, variable):
"""
Helper method to get all values of a URL query variable
@param get_vars: the GET vars (a dict)
@param variable: the name of the query variable
@return: a list of values
"""
if not variable:
return []
# Match __eq before checking any other operator
selector = variable.split("__", 1)[0]
for key in ("%s__eq" % selector, selector, variable):
if key in get_vars:
values = S3URLQuery.parse_value(get_vars[key])
if not isinstance(values, (list, tuple)):
values = [values]
return values
return []
# =============================================================================
class S3HierarchyFilter(S3FilterWidget):
"""
Filter widget for hierarchical types
Configuration Options (see also: S3HierarchyWidget):
@keyword lookup: name of the lookup table
@keyword represent: representation method for the key
@keyword multiple: allow selection of multiple options
@keyword leafonly: only leaf nodes can be selected
@keyword cascade: automatically select child nodes when
selecting a parent node
@keyword bulk_select: provide an option to select/deselect all nodes
"""
_class = "hierarchy-filter"
operator = "belongs"
# -------------------------------------------------------------------------
def widget(self, resource, values):
"""
Render this widget as HTML helper object(s)
@param resource: the resource
@param values: the search values from the URL query
"""
# Currently selected values
selected = []
append = selected.append
if not isinstance(values, (list, tuple, set)):
values = [values]
for v in values:
if isinstance(v, (int, long)) or str(v).isdigit():
append(v)
# Resolve the field selector
rfield = S3ResourceField(resource, self.field)
# Instantiate the widget
opts = self.opts
bulk_select = current.deployment_settings \
.get_ui_hierarchy_filter_bulk_select_option()
if bulk_select is None:
bulk_select = opts.get("bulk_select", False)
w = S3HierarchyWidget(lookup = opts.get("lookup"),
represent = opts.get("represent"),
multiple = opts.get("multiple", True),
leafonly = opts.get("leafonly", True),
cascade = opts.get("cascade", False),
bulk_select = bulk_select,
filter = opts.get("filter"),
none = opts.get("none"),
)
# Render the widget
widget = w(rfield.field, selected, **self._attr(resource))
widget.add_class(self._class)
return widget
# -------------------------------------------------------------------------
def variable(self, resource, get_vars=None):
"""
Generate the name for the URL query variable for this
widget, detect alternative __typeof queries.
@param resource: the resource
@return: the URL query variable name (or list of
variable names if there are multiple operators)
"""
label, self.selector = self._selector(resource, self.field)
if not self.selector:
return None
if "label" not in self.opts:
self.opts["label"] = label
selector = self.selector
if self.alternatives and get_vars is not None:
# Get the actual operator from get_vars
operator = self._operator(get_vars, self.selector)
if operator:
self.operator = operator
variable = self._variable(selector, self.operator)
if not get_vars or not resource or variable in get_vars:
return variable
# Detect and resolve __typeof queries
resolve = S3ResourceQuery._resolve_hierarchy
selector = resource.prefix_selector(selector)
for key, value in get_vars.items():
if key.startswith(selector):
selectors, op, invert = S3URLQuery.parse_expression(key)
else:
continue
if op != "typeof" or len(selectors) != 1:
continue
rfield = resource.resolve_selector(selectors[0])
if rfield.field:
values = S3URLQuery.parse_value(value)
hierarchy, field, nodeset, none = resolve(rfield.field, values)
if field and (nodeset or none):
if nodeset is None:
nodeset = set()
if none:
nodeset.add(None)
get_vars.pop(key, None)
get_vars[variable] = [str(v) for v in nodeset]
break
return variable
# =============================================================================
class S3FilterForm(object):
""" Helper class to construct and render a filter form for a resource """
def __init__(self, widgets, **attr):
"""
Constructor
@param widgets: the widgets (as list)
@param attr: HTML attributes for this form
"""
self.widgets = widgets
attributes = Storage()
options = Storage()
for k, v in attr.iteritems():
if k[0] == "_":
attributes[k] = v
else:
options[k] = v
self.attr = attributes
self.opts = options
# -------------------------------------------------------------------------
def html(self, resource, get_vars=None, target=None, alias=None):
"""
Render this filter form as HTML form.
@param resource: the S3Resource
@param get_vars: the request GET vars (URL query dict)
@param target: the HTML element ID of the target object for
this filter form (e.g. a datatable)
@param alias: the resource alias to use in widgets
"""
attr = self.attr
form_id = attr.get("_id")
if not form_id:
form_id = "filter-form"
attr["_id"] = form_id
# Prevent issues with Webkit-based browsers & Back buttons
attr["_autocomplete"] = "off"
opts = self.opts
settings = current.deployment_settings
# Form style
formstyle = opts.get("formstyle", None)
if not formstyle:
formstyle = settings.get_ui_filter_formstyle()
# Filter widgets
rows = self._render_widgets(resource,
get_vars=get_vars or {},
alias=alias,
formstyle=formstyle)
# Other filter form controls
controls = self._render_controls(resource)
if controls:
rows.append(formstyle(None, "", controls, ""))
# Submit elements
ajax = opts.get("ajax", False)
submit = opts.get("submit", False)
if submit:
# Auto-submit?
auto_submit = settings.get_ui_filter_auto_submit()
if auto_submit and opts.get("auto_submit", True):
script = '''S3.search.filterFormAutoSubmit('%s',%s)''' % \
(form_id, auto_submit)
current.response.s3.jquery_ready.append(script)
# Custom label and class
_class = None
if submit is True:
label = current.T("Search")
elif isinstance(submit, (list, tuple)):
label, _class = submit
else:
label = submit
# Submit button
submit_button = INPUT(_type="button",
_value=label,
_class="filter-submit")
#if auto_submit:
#submit_button.add_class("hide")
if _class:
submit_button.add_class(_class)
# Where to request filtered data from:
submit_url = opts.get("url", URL(vars={}))
# Where to request updated options from:
ajax_url = opts.get("ajaxurl", URL(args=["filter.options"], vars={}))
# Submit row elements
submit = TAG[""](submit_button,
INPUT(_type="hidden",
_class="filter-ajax-url",
_value=ajax_url),
INPUT(_type="hidden",
_class="filter-submit-url",
_value=submit_url))
if ajax and target:
submit.append(INPUT(_type="hidden",
_class="filter-submit-target",
_value=target))
# Append submit row
submit_row = formstyle(None, "", submit, "")
if auto_submit and hasattr(submit_row, "add_class"):
submit_row.add_class("hide")
rows.append(submit_row)
# Filter Manager (load/apply/save filters)
fm = settings.get_search_filter_manager()
if fm and opts.get("filter_manager", resource is not None):
filter_manager = self._render_filters(resource, form_id)
if filter_manager:
fmrow = formstyle(None, "", filter_manager, "")
if hasattr(fmrow, "add_class"):
fmrow.add_class("hide filter-manager-row")
rows.append(fmrow)
# Adapt to formstyle: render a TABLE only if formstyle returns TRs
if rows:
elements = rows[0]
if not isinstance(elements, (list, tuple)):
elements = elements.elements()
n = len(elements)
if n > 0 and elements[0].tag == "tr" or \
n > 1 and elements[0].tag == "" and elements[1].tag == "tr":
form = FORM(TABLE(TBODY(rows)), **attr)
else:
form = FORM(DIV(rows), **attr)
if settings.ui.formstyle == "bootstrap":
# We need to amend the HTML markup to support this CSS framework
form.add_class("form-horizontal")
form.add_class("filter-form")
if ajax:
form.add_class("filter-ajax")
else:
return ""
# Put a copy of formstyle into the form for access by the view
form.formstyle = formstyle
return form
# -------------------------------------------------------------------------
def fields(self, resource, get_vars=None, alias=None):
"""
Render the filter widgets without FORM wrapper, e.g. to
embed them as fieldset in another form.
@param resource: the S3Resource
@param get_vars: the request GET vars (URL query dict)
@param alias: the resource alias to use in widgets
"""
formstyle = self.opts.get("formstyle", None)
if not formstyle:
formstyle = current.deployment_settings.get_ui_filter_formstyle()
rows = self._render_widgets(resource,
get_vars=get_vars,
alias=alias,
formstyle=formstyle)
controls = self._render_controls(resource)
if controls:
rows.append(formstyle(None, "", controls, ""))
# Adapt to formstyle: only render a TABLE if formstyle returns TRs
if rows:
elements = rows[0]
if not isinstance(elements, (list, tuple)):
elements = elements.elements()
n = len(elements)
if n > 0 and elements[0].tag == "tr" or \
n > 1 and elements[0].tag == "" and elements[1].tag == "tr":
fields = TABLE(TBODY(rows))
else:
fields = DIV(rows)
return fields
# -------------------------------------------------------------------------
def _render_controls(self, resource):
"""
Render optional additional filter form controls: advanced
options toggle, clear filters.
"""
T = current.T
controls = []
opts = self.opts
advanced = opts.get("advanced", False)
if advanced:
_class = "filter-advanced"
if advanced is True:
label = T("More Options")
elif isinstance(advanced, (list, tuple)):
label = advanced[0]
label = advanced[1]
if len(advanced > 2):
_class = "%s %s" % (advanced[2], _class)
else:
label = advanced
label_off = T("Less Options")
advanced = A(SPAN(label,
data = {"on": label,
"off": label_off,
},
_class="filter-advanced-label",
),
ICON("down"),
ICON("up", _style="display:none"),
_class=_class
)
controls.append(advanced)
clear = opts.get("clear", True)
if clear:
_class = "filter-clear"
if clear is True:
label = T("Clear filter")
elif isinstance(clear, (list, tuple)):
label = clear[0]
_class = "%s %s" % (clear[1], _class)
else:
label = clear
clear = A(label, _class=_class)
clear.add_class("action-lnk")
controls.append(clear)
fm = current.deployment_settings.get_search_filter_manager()
if fm and opts.get("filter_manager", resource is not None):
show_fm = A(T("Saved Filters"),
_class="show-filter-manager action-lnk")
controls.append(show_fm)
if controls:
return DIV(controls, _class="filter-controls")
else:
return None
# -------------------------------------------------------------------------
def _render_widgets(self,
resource,
get_vars=None,
alias=None,
formstyle=None):
"""
Render the filter widgets
@param resource: the S3Resource
@param get_vars: the request GET vars (URL query dict)
@param alias: the resource alias to use in widgets
@param formstyle: the formstyle to use
@return: a list of form rows
"""
rows = []
rappend = rows.append
advanced = False
for f in self.widgets:
widget = f(resource, get_vars, alias=alias)
label = f.opts["label"]
comment = f.opts["comment"]
hidden = f.opts["hidden"]
if hidden:
advanced = True
widget_id = f.attr["_id"]
if widget_id:
row_id = "%s__row" % widget_id
label_id = "%s__label" % widget_id
else:
row_id = None
label_id = None
if label:
label = LABEL("%s:" % label, _id=label_id, _for=widget_id)
else:
label = ""
if not comment:
comment = ""
formrow = formstyle(row_id, label, widget, comment, hidden=hidden)
if hidden:
if isinstance(formrow, DIV):
formrow.add_class("advanced")
elif isinstance(formrow, tuple):
for item in formrow:
if hasattr(item, "add_class"):
item.add_class("advanced")
rappend(formrow)
if advanced:
if resource:
self.opts["advanced"] = resource.get_config(
"filter_advanced", True)
else:
self.opts["advanced"] = True
return rows
# -------------------------------------------------------------------------
def _render_filters(self, resource, form_id):
"""
Render a filter manager widget
@param resource: the resource
@return: the widget
"""
SELECT_FILTER = current.T("Saved Filters")
ajaxurl = self.opts.get("saveurl", URL(args=["filter.json"], vars={}))
# Current user
auth = current.auth
pe_id = auth.user.pe_id if auth.s3_logged_in() else None
if not pe_id:
return None
table = current.s3db.pr_filter
query = (table.deleted != True) & \
(table.pe_id == pe_id)
if resource:
query &= (table.resource == resource.tablename)
else:
query &= (table.resource == None)
rows = current.db(query).select(table._id,
table.title,
table.query,
orderby=table.title)
options = [OPTION(SELECT_FILTER,
_value="",
_class="filter-manager-prompt",
_disabled="disabled")]
add_option = options.append
filters = {}
for row in rows:
filter_id = row[table._id]
add_option(OPTION(row.title, _value=filter_id))
query = row.query
if query:
query = json.loads(query)
filters[filter_id] = query
widget_id = "%s-fm" % form_id
widget = DIV(SELECT(options,
_id=widget_id,
_class="filter-manager-widget"),
_class="filter-manager-container")
# JSON-serializable translator
T = current.T
_t = lambda s: str(T(s))
# Configure the widget
settings = current.deployment_settings
config = dict(
# Filters and Ajax URL
filters = filters,
ajaxURL = ajaxurl,
# Workflow Options
allowDelete = settings.get_search_filter_manager_allow_delete(),
# Tooltips for action icons/buttons
createTooltip = _t("Save current options as new filter"),
loadTooltip = _t("Load filter"),
saveTooltip = _t("Update saved filter"),
deleteTooltip = _t("Delete saved filter"),
# Hints
titleHint = _t("Enter a title..."),
selectHint = str(SELECT_FILTER),
emptyHint = _t("No saved filters"),
# Confirm update + confirmation text
confirmUpdate = _t("Update this filter?"),
confirmDelete = _t("Delete this filter?"),
)
# Render actions as buttons with text if configured, otherwise
# they will appear as empty DIVs with classes for CSS icons
create_text = settings.get_search_filter_manager_save()
if create_text:
config["createText"] = _t(create_text)
update_text = settings.get_search_filter_manager_update()
if update_text:
config["saveText"] = _t(update_text)
delete_text = settings.get_search_filter_manager_delete()
if delete_text:
config["deleteText"] = _t(delete_text)
load_text = settings.get_search_filter_manager_load()
if load_text:
config["loadText"] = _t(load_text)
script = '''$("#%s").filtermanager(%s)''' % \
(widget_id,
json.dumps(config, separators=SEPARATORS))
current.response.s3.jquery_ready.append(script)
return widget
# -------------------------------------------------------------------------
def json(self, resource, get_vars=None):
"""
Render this filter form as JSON (for Ajax requests)
@param resource: the S3Resource
@param get_vars: the request GET vars (URL query dict)
"""
raise NotImplementedError
# -------------------------------------------------------------------------
@staticmethod
def apply_filter_defaults(request, resource):
"""
Add default filters to resource, to be called a multi-record
view with a filter form is rendered the first time and before
the view elements get processed
@param request: the request
@param resource: the resource
@return: dict with default filters (URL vars)
"""
s3 = current.response.s3
get_vars = request.get_vars
tablename = resource.tablename
default_filters = {}
# Do we have filter defaults for this resource?
filter_defaults = s3
for level in ("filter_defaults", tablename):
if level not in filter_defaults:
filter_defaults = None
break
filter_defaults = filter_defaults[level]
# Which filter widgets do we need to apply defaults for?
filter_widgets = resource.get_config("filter_widgets")
for filter_widget in filter_widgets:
# Do not apply defaults of hidden widgets because they are
# not visible to the user:
if filter_widget.opts.hidden:
continue
has_default = False
if "default" in filter_widget.opts:
has_default = True
elif filter_defaults is None:
continue
defaults = set()
variable = filter_widget.variable(resource, get_vars)
multiple = type(variable) is list
# Do we have a corresponding value in get_vars?
if multiple:
for k in variable:
values = filter_widget._values(get_vars, k)
if values:
filter_widget.values[k] = values
else:
defaults.add(k)
else:
values = filter_widget._values(get_vars, variable)
if values:
filter_widget.values[variable] = values
else:
defaults.add(variable)
# Extract widget default
if has_default:
widget_default = filter_widget.opts["default"]
if not isinstance(widget_default, dict):
if multiple:
widget_default = dict((k, widget_default)
for k in variable)
else:
widget_default = {variable: widget_default}
for k in widget_default:
if k not in filter_widget.values:
defaults.add(k)
else:
widget_default = {}
for variable in defaults:
if "__" in variable:
selector, operator = variable.split("__", 1)
else:
selector, operator = variable, None
if filter_defaults and selector in filter_defaults:
applicable_defaults = filter_defaults[selector]
elif variable in widget_default:
applicable_defaults = widget_default[variable]
else:
continue
if callable(applicable_defaults):
applicable_defaults = applicable_defaults(selector,
tablename=tablename)
if isinstance(applicable_defaults, dict):
if operator in applicable_defaults:
default = applicable_defaults[operator]
else:
continue
elif operator in (None, "belongs", "eq"):
default = applicable_defaults
else:
continue
if default is None:
# Ignore (return [None] to filter for None)
continue
elif not isinstance(default, list):
default = [default]
filter_widget.values[variable] = [str(v) if v is None else v
for v in default]
default_filters[variable] = ",".join(s3_unicode(v)
for v in default)
# Apply to resource
queries = S3URLQuery.parse(resource, default_filters)
add_filter = resource.add_filter
for alias in queries:
for q in queries[alias]:
add_filter(q)
return default_filters
# =============================================================================
class S3Filter(S3Method):
""" Back-end for filter forms """
def apply_method(self, r, **attr):
"""
Entry point for REST interface
@param r: the S3Request
@param attr: additional controller parameters
"""
representation = r.representation
if representation == "options":
# Return the filter options as JSON
return self._options(r, **attr)
elif representation == "json":
if r.http == "GET":
# Load list of saved filters
return self._load(r, **attr)
elif r.http == "POST":
if "delete" in r.get_vars:
# Delete a filter
return self._delete(r, **attr)
else:
# Save a filter
return self._save(r, **attr)
else:
r.error(405, current.ERROR.BAD_METHOD)
elif representation == "html":
return self._form(r, **attr)
else:
r.error(415, current.ERROR.BAD_FORMAT)
# -------------------------------------------------------------------------
def _form(self, r, **attr):
"""
Get the filter form for the target resource as HTML snippet
GET filter.html
@param r: the S3Request
@param attr: additional controller parameters
"""
r.error(501, current.ERROR.NOT_IMPLEMENTED)
# -------------------------------------------------------------------------
def _options(self, r, **attr):
"""
Get the updated options for the filter form for the target
resource as JSON
GET filter.options
@param r: the S3Request
@param attr: additional controller parameters
"""
resource = self.resource
get_config = resource.get_config
options = {}
filter_widgets = get_config("filter_widgets", None)
if filter_widgets:
fresource = current.s3db.resource(resource.tablename)
for widget in filter_widgets:
if hasattr(widget, "ajax_options"):
opts = widget.ajax_options(fresource)
if opts and isinstance(opts, dict):
options.update(opts)
options = json.dumps(options, separators=SEPARATORS)
current.response.headers["Content-Type"] = "application/json"
return options
# -------------------------------------------------------------------------
def _delete(self, r, **attr):
"""
Delete a filter, responds to POST filter.json?delete=
@param r: the S3Request
@param attr: additional controller parameters
"""
# Authorization, get pe_id
auth = current.auth
if auth.s3_logged_in():
pe_id = current.auth.user.pe_id
else:
pe_id = None
if not pe_id:
r.unauthorised()
# Read the source
source = r.body
source.seek(0)
try:
data = json.load(source)
except ValueError:
# Syntax error: no JSON data
r.error(501, current.ERROR.BAD_SOURCE)
# Try to find the record
db = current.db
s3db = current.s3db
table = s3db.pr_filter
record = None
record_id = data.get("id")
if record_id:
query = (table.id == record_id) & (table.pe_id == pe_id)
record = db(query).select(table.id, limitby=(0, 1)).first()
if not record:
r.error(501, current.ERROR.BAD_RECORD)
resource = s3db.resource("pr_filter", id=record_id)
success = resource.delete(format=r.representation)
if not success:
raise(400, resource.error)
else:
current.response.headers["Content-Type"] = "application/json"
return current.xml.json_message(deleted=record_id)
# -------------------------------------------------------------------------
def _save(self, r, **attr):
"""
Save a filter, responds to POST filter.json
@param r: the S3Request
@param attr: additional controller parameters
"""
# Authorization, get pe_id
auth = current.auth
if auth.s3_logged_in():
pe_id = current.auth.user.pe_id
else:
pe_id = None
if not pe_id:
r.unauthorised()
# Read the source
source = r.body
source.seek(0)
try:
data = json.load(source)
except ValueError:
r.error(501, current.ERROR.BAD_SOURCE)
# Try to find the record
db = current.db
s3db = current.s3db
table = s3db.pr_filter
record_id = data.get("id")
record = None
if record_id:
query = (table.id == record_id) & (table.pe_id == pe_id)
record = db(query).select(table.id, limitby=(0, 1)).first()
if not record:
r.error(404, current.ERROR.BAD_RECORD)
# Build new record
filter_data = {
"pe_id": pe_id,
"controller": r.controller,
"function": r.function,
"resource": self.resource.tablename,
"deleted": False,
}
title = data.get("title")
if title is not None:
filter_data["title"] = title
description = data.get("description")
if description is not None:
filter_data["description"] = description
query = data.get("query")
if query is not None:
filter_data["query"] = json.dumps(query)
url = data.get("url")
if url is not None:
filter_data["url"] = url
# Store record
onaccept = None
form = Storage(vars=filter_data)
if record:
success = db(table.id == record_id).update(**filter_data)
if success:
current.audit("update", "pr", "filter", form, record_id, "json")
info = {"updated": record_id}
onaccept = s3db.get_config(table, "update_onaccept",
s3db.get_config(table, "onaccept"))
else:
success = table.insert(**filter_data)
if success:
record_id = success
current.audit("create", "pr", "filter", form, record_id, "json")
info = {"created": record_id}
onaccept = s3db.get_config(table, "update_onaccept",
s3db.get_config(table, "onaccept"))
if onaccept is not None:
form.vars["id"] = record_id
callback(onaccept, form)
# Success/Error response
xml = current.xml
if success:
msg = xml.json_message(**info)
else:
msg = xml.json_message(False, 400)
current.response.headers["Content-Type"] = "application/json"
return msg
# -------------------------------------------------------------------------
def _load(self, r, **attr):
"""
Load filters
GET filter.json or GET filter.json?load=<id>
@param r: the S3Request
@param attr: additional controller parameters
"""
db = current.db
table = current.s3db.pr_filter
# Authorization, get pe_id
auth = current.auth
if auth.s3_logged_in():
pe_id = current.auth.user.pe_id
else:
pe_id = None
if not pe_id:
r.unauthorized()
# Build query
query = (table.deleted != True) & \
(table.resource == self.resource.tablename) & \
(table.pe_id == pe_id)
# Any particular filters?
load = r.get_vars.get("load")
if load:
record_ids = [i for i in load.split(",") if i.isdigit()]
if record_ids:
if len(record_ids) > 1:
query &= table.id.belongs(record_ids)
else:
query &= table.id == record_ids[0]
else:
record_ids = None
# Retrieve filters
rows = db(query).select(table.id,
table.title,
table.description,
table.query)
# Pack filters
filters = []
for row in rows:
filters.append({
"id": row.id,
"title": row.title,
"description": row.description,
"query": json.loads(row.query) if row.query else [],
})
# JSON response
current.response.headers["Content-Type"] = "application/json"
return json.dumps(filters, separators=SEPARATORS)
# =============================================================================
class S3FilterString(object):
"""
Helper class to render a human-readable representation of a
filter query, as representation method of JSON-serialized
queries in saved filters.
"""
def __init__(self, resource, query):
"""
Constructor
@param query: the URL query (list of key-value pairs or a
string with such a list in JSON)
"""
if type(query) is not list:
try:
self.query = json.loads(query)
except ValueError:
self.query = []
else:
self.query = query
get_vars = {}
for k, v in self.query:
if v is not None:
key = resource.prefix_selector(k)
if key in get_vars:
value = get_vars[key]
if type(value) is list:
value.append(v)
else:
get_vars[key] = [value, v]
else:
get_vars[key] = v
self.resource = resource
self.get_vars = get_vars
# -------------------------------------------------------------------------
def represent(self):
""" Render the query representation for the given resource """
default = ""
get_vars = self.get_vars
resource = self.resource
if not get_vars:
return default
else:
queries = S3URLQuery.parse(resource, get_vars)
# Get alternative field labels
labels = {}
get_config = resource.get_config
prefix = resource.prefix_selector
for config in ("list_fields", "notify_fields"):
fields = get_config(config, set())
for f in fields:
if type(f) is tuple:
labels[prefix(f[1])] = f[0]
# Iterate over the sub-queries
render = self._render
substrings = []
append = substrings.append
for alias, subqueries in queries.iteritems():
for subquery in subqueries:
s = render(resource, alias, subquery, labels=labels)
if s:
append(s)
if substrings:
result = substrings[0]
T = current.T
for s in substrings[1:]:
result = T("%s AND %s") % (result, s)
return result
else:
return default
# -------------------------------------------------------------------------
@classmethod
def _render(cls, resource, alias, query, invert=False, labels=None):
"""
Recursively render a human-readable representation of a
S3ResourceQuery.
@param resource: the S3Resource
@param query: the S3ResourceQuery
@param invert: invert the query
"""
T = current.T
if not query:
return None
op = query.op
l = query.left
r = query.right
render = lambda q, r=resource, a=alias, invert=False, labels=labels: \
cls._render(r, a, q, invert=invert, labels=labels)
if op == query.AND:
# Recurse AND
l = render(l)
r = render(r)
if l is not None and r is not None:
if invert:
result = T("NOT %s OR NOT %s") % (l, r)
else:
result = T("%s AND %s") % (l, r)
else:
result = l if l is not None else r
elif op == query.OR:
# Recurse OR
l = render(l)
r = render(r)
if l is not None and r is not None:
if invert:
result = T("NOT %s AND NOT %s") % (l, r)
else:
result = T("%s OR %s") % (l, r)
else:
result = l if l is not None else r
elif op == query.NOT:
# Recurse NOT
result = render(l, invert=not invert)
else:
# Resolve the field selector against the resource
try:
rfield = l.resolve(resource)
except (AttributeError, SyntaxError):
return None
# Convert the filter values into the field type
try:
values = cls._convert(rfield, r)
except (TypeError, ValueError):
values = r
# Alias
selector = l.name
if labels and selector in labels:
rfield.label = labels[selector]
# @todo: for duplicate labels, show the table name
#else:
#tlabel = " ".join(s.capitalize() for s in rfield.tname.split("_")[1:])
#rfield.label = "(%s) %s" % (tlabel, rfield.label)
# Represent the values
if values is None:
values = T("None")
else:
list_type = rfield.ftype[:5] == "list:"
renderer = rfield.represent
if not callable(renderer):
renderer = lambda v: s3_unicode(v)
if hasattr(renderer, "linkto"):
#linkto = renderer.linkto
renderer.linkto = None
#else:
# #linkto = None
is_list = type(values) is list
try:
if is_list and hasattr(renderer, "bulk") and not list_type:
fvalues = renderer.bulk(values, list_type=False)
values = [fvalues[v] for v in values if v in fvalues]
elif list_type:
if is_list:
values = renderer(values)
else:
values = renderer([values])
else:
if is_list:
values = [renderer(v) for v in values]
else:
values = renderer(values)
except:
values = s3_unicode(values)
# Translate the query
result = cls._translate_query(query, rfield, values, invert=invert)
return result
# -------------------------------------------------------------------------
@classmethod
def _convert(cls, rfield, value):
"""
Convert a filter value according to the field type
before representation
@param rfield: the S3ResourceField
@param value: the value
"""
if value is None:
return value
ftype = rfield.ftype
if ftype[:5] == "list:":
if ftype[5:8] in ("int", "ref"):
ftype = long
else:
ftype = unicode
elif ftype == "id" or ftype [:9] == "reference":
ftype = long
elif ftype == "integer":
ftype = int
elif ftype == "date":
ftype = datetime.date
elif ftype == "time":
ftype = datetime.time
elif ftype == "datetime":
ftype = datetime.datetime
elif ftype == "double":
ftype = float
elif ftype == "boolean":
ftype = bool
else:
ftype = unicode
convert = S3TypeConverter.convert
if type(value) is list:
output = []
append = output.append
for v in value:
try:
append(convert(ftype, v))
except (TypeError, ValueError):
continue
else:
try:
output = convert(ftype, value)
except (TypeError, ValueError):
output = None
return output
# -------------------------------------------------------------------------
@classmethod
def _translate_query(cls, query, rfield, values, invert=False):
"""
Translate the filter query into human-readable language
@param query: the S3ResourceQuery
@param rfield: the S3ResourceField the query refers to
@param values: the filter values
@param invert: invert the operation
"""
T = current.T
# Value list templates
vor = T("%s or %s")
vand = T("%s and %s")
# Operator templates
otemplates = {
query.LT: (query.GE, vand, "%(label)s < %(values)s"),
query.LE: (query.GT, vand, "%(label)s <= %(values)s"),
query.EQ: (query.NE, vor, T("%(label)s is %(values)s")),
query.GE: (query.LT, vand, "%(label)s >= %(values)s"),
query.GT: (query.LE, vand, "%(label)s > %(values)s"),
query.NE: (query.EQ, vor, T("%(label)s != %(values)s")),
query.LIKE: ("notlike", vor, T("%(label)s like %(values)s")),
query.BELONGS: (query.NE, vor, T("%(label)s = %(values)s")),
query.CONTAINS: ("notall", vand, T("%(label)s contains %(values)s")),
query.ANYOF: ("notany", vor, T("%(label)s contains any of %(values)s")),
"notall": (query.CONTAINS, vand, T("%(label)s does not contain %(values)s")),
"notany": (query.ANYOF, vor, T("%(label)s does not contain %(values)s")),
"notlike": (query.LIKE, vor, T("%(label)s not like %(values)s"))
}
# Quote values as necessary
ftype = rfield.ftype
if ftype in ("string", "text") or \
ftype[:9] == "reference" or \
ftype[:5] == "list:" and ftype[5:8] in ("str", "ref"):
if type(values) is list:
values = ['"%s"' % v for v in values]
elif values is not None:
values = '"%s"' % values
else:
values = current.messages["NONE"]
# Render value list template
def render_values(template=None, values=None):
if not template or type(values) is not list:
return str(values)
elif not values:
return "()"
elif len(values) == 1:
return values[0]
else:
return template % (", ".join(values[:-1]), values[-1])
# Render the operator template
op = query.op
if op in otemplates:
inversion, vtemplate, otemplate = otemplates[op]
if invert:
inversion, vtemplate, otemplate = otemplates[inversion]
return otemplate % dict(label=rfield.label,
values=render_values(vtemplate, values))
else:
# Fallback to simple representation
# FIXME: resource not defined here!
return query.represent(resource)
# =============================================================================
def s3_get_filter_opts(tablename,
fieldname = "name",
location_filter = False,
org_filter = False,
key = "id",
none = False,
translate = False,
):
"""
Lazy options getter - this is useful when the expected number
of options is significantly smaller than the number of records
to iterate through
@note: unlike the built-in reverse lookup in S3OptionsFilter, this
function does *not* check whether the options are actually
in use - so it can be used to enforce filter options to be
shown even if there are no records matching them.
@param tablename: the name of the lookup table
@param fieldname: the name of the field to represent options with
@param location_filter: whether to filter the values by location
@param org_filter: whether to filter the values by root_org
@param key: the option key field (if not "id", e.g. a super key)
@param none: whether to include an option for None
@param translate: whether to translate the values
"""
auth = current.auth
table = current.s3db.table(tablename)
if auth.s3_has_permission("read", table):
query = auth.s3_accessible_query("read", table)
if "deleted" in table.fields:
query &= (table.deleted != True)
if location_filter:
location = current.session.s3.location_filter
if location:
query &= (table.location_id == location)
if org_filter:
root_org = auth.root_org()
if root_org:
query &= ((table.organisation_id == root_org) | \
(table.organisation_id == None))
#else:
# query &= (table.organisation_id == None)
rows = current.db(query).select(table[key],
table[fieldname],
# Options are sorted later
#orderby = table[fieldname]
)
if translate:
T = current.T
opts = dict((row[key], T(row[fieldname])) for row in rows)
else:
opts = dict((row[key], row[fieldname]) for row in rows)
if none:
opts[None] = current.messages["NONE"]
else:
opts = {}
return opts
# END =========================================================================
| 36.605781
| 145
| 0.476596
|
4a184fdabfab94c249d85b9750ae6d8341a2890c
| 1,247
|
py
|
Python
|
Udacity/7/5ConstConnected.py
|
Camiloasc1/AlgorithmsUNAL
|
1542b8f2c170f9b5a24638f05ae50fa2c85cfc7b
|
[
"MIT"
] | null | null | null |
Udacity/7/5ConstConnected.py
|
Camiloasc1/AlgorithmsUNAL
|
1542b8f2c170f9b5a24638f05ae50fa2c85cfc7b
|
[
"MIT"
] | null | null | null |
Udacity/7/5ConstConnected.py
|
Camiloasc1/AlgorithmsUNAL
|
1542b8f2c170f9b5a24638f05ae50fa2c85cfc7b
|
[
"MIT"
] | null | null | null |
#
# Design and implement an algorithm that can preprocess a
# graph and then answer the question "is x connected to y in the
# graph" for any x and y in constant time Theta(1).
#
Graph = {}
#
# `process_graph` will be called only once on each graph. If you want,
# you can store whatever information you need for `is_connected` in
# global variables
#
def process_graph(G):
global Graph
for n in G:
for m in G:
if m in G[n]:
for u in G[n]:
G[m][u] = 1
Graph = G
# your code here
#
# When being graded, `is_connected` will be called
# many times so this routine needs to be quick
#
def is_connected(i, j):
return j in Graph[i]
# your code here
#######
# Testing
#
def test():
G = {'a':{'b':1},
'b':{'a':1},
'c':{'d':1},
'd':{'c':1},
'e':{}}
process_graph(G)
assert is_connected('a', 'b') == True
assert is_connected('a', 'c') == False
G = {'a':{'b':1, 'c':1},
'b':{'a':1},
'c':{'d':1, 'a':1},
'd':{'c':1},
'e':{}}
process_graph(G)
assert is_connected('a', 'b') == True
assert is_connected('a', 'c') == True
assert is_connected('a', 'e') == False
test()
| 22.267857
| 71
| 0.526864
|
4a1854943de82950771d3344eec32428e9a9e96d
| 2,030
|
py
|
Python
|
tests/test_utils.py
|
cqkh42/advent-of-code-data
|
06f6338e9cf43289beac0b7132e5b7b91ab50372
|
[
"MIT"
] | 255
|
2016-12-04T01:34:05.000Z
|
2022-03-18T09:05:49.000Z
|
tests/test_utils.py
|
miguel-bm/advent-of-code-data
|
2ed6f9303b06206057ee8f03f9bc0e16b95d39cb
|
[
"MIT"
] | 46
|
2016-12-04T07:24:52.000Z
|
2022-01-03T09:52:25.000Z
|
tests/test_utils.py
|
miguel-bm/advent-of-code-data
|
2ed6f9303b06206057ee8f03f9bc0e16b95d39cb
|
[
"MIT"
] | 34
|
2017-12-19T05:18:10.000Z
|
2022-01-17T16:09:31.000Z
|
import platform
import sys
import pytest
from aocd.exceptions import DeadTokenError
from aocd.utils import blocker
from aocd.utils import get_owner
from freezegun import freeze_time
cpython = platform.python_implementation() == "CPython"
winblows = sys.platform == "win32"
py27 = sys.version_info[:2] == (2, 7)
py27_on_windows = py27 and winblows
# see https://github.com/spulec/freezegun/issues/253
@pytest.mark.xfail(py27_on_windows, reason="freezegun tick is not working on py2.7 windows")
@pytest.mark.xfail(not cpython, reason="freezegun tick is not working on pypy")
def test_blocker(capsys):
with freeze_time("2020-11-30 23:59:59.8-05:00", tick=True):
# 0.2 second before unlock day 1
blocker(dt=0.2)
out, err = capsys.readouterr()
assert " Unlock day 1 at " in out
def test_blocker_quiet(capsys):
with freeze_time("2020-11-30 23:59:59.8-05:00", auto_tick_seconds=1):
blocker(dt=0.2, quiet=True)
out, err = capsys.readouterr()
assert not out
def test_get_owner_not_logged_in(requests_mock):
requests_mock.get("https://adventofcode.com/settings", status_code=302)
with pytest.raises(DeadTokenError):
get_owner("not_logged_in")
def test_get_owner_user_id(requests_mock):
requests_mock.get(
"https://adventofcode.com/settings",
text="<span>Link to wtf</span><code>123-456-9c3a0172</code>",
)
owner = get_owner("...")
assert owner == "unknown.unknown.123"
def test_get_owner_and_username(requests_mock):
requests_mock.get(
"https://adventofcode.com/settings",
text="<span>Link to https://www.reddit.com/u/wim</span><code>123-456-9c3a0172</code>",
)
owner = get_owner("...")
assert owner == "reddit.wim.123"
def test_get_owner_google(requests_mock):
requests_mock.get(
"https://adventofcode.com/settings",
text='<span><img src="https://lh3.googleusercontent.com/...">wim</span><code>1-2</code>',
)
owner = get_owner("...")
assert owner == "google.wim.1"
| 31.230769
| 97
| 0.694581
|
4a1854a1b2f9bd74cd55d96994eb519853971887
| 1,348
|
py
|
Python
|
app/app.py
|
ScMofeoluwa/cat-vs-dog
|
999e1daec16a8afdc4e5476651990c45c3f3db58
|
[
"MIT"
] | 3
|
2021-01-06T01:01:17.000Z
|
2021-01-23T09:27:05.000Z
|
app/app.py
|
ScMofeoluwa/cat-vs-dog
|
999e1daec16a8afdc4e5476651990c45c3f3db58
|
[
"MIT"
] | null | null | null |
app/app.py
|
ScMofeoluwa/cat-vs-dog
|
999e1daec16a8afdc4e5476651990c45c3f3db58
|
[
"MIT"
] | 1
|
2021-01-23T09:26:28.000Z
|
2021-01-23T09:26:28.000Z
|
import os
from secrets import token_hex
import numpy as np
from flask import Flask, render_template, request
from keras_preprocessing.image import img_to_array, load_img
from load import init
from utils import rescale
model = init()
app = Flask(__name__)
app.config["MAX_CONTENT_LENGTH"] = 1024 * 1024 * 4
app.config["UPLOAD_DIRECTORY"] = "uploads"
app.config["DEBUG"] = os.environ.get("DEBUG")
app.config["ENV"] = os.environ.get("ENV")
app.config["SECRET_KEY"] = os.environ.get("SECRET_KEY")
@app.route("/")
def index():
return render_template("index.html")
@app.route("/predict", methods=["POST"])
def predict():
image_data = request.files.get("image")
image_data_filename = os.path.join(
app.config["UPLOAD_DIRECTORY"],
str(token_hex(16)) + os.path.splitext(image_data.filename)[1],
)
image_data.save(image_data_filename)
x = load_img(image_data_filename, target_size=(224, 224))
x = np.array(img_to_array(x))
x = rescale(x)
value = None
pred = model.predict_proba(x.reshape(1, 224, 224, 3)).flatten()
print(pred)
for i in pred:
if 0 <= i <= 0.2:
value = "Cat"
elif 0.99 <= i <= 1:
value = "Dog"
else:
value = "Neither a cat nor a dog"
return {"message": value}
if __name__ == "__main__":
app.run()
| 24.071429
| 70
| 0.646142
|
4a1854d5c0b572d58a187fa32d2612b08c7fb1e6
| 2,846
|
py
|
Python
|
azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/azure_firewall_ip_configuration.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 1
|
2021-09-07T18:36:04.000Z
|
2021-09-07T18:36:04.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/azure_firewall_ip_configuration.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | 2
|
2019-10-02T23:37:38.000Z
|
2020-10-02T01:17:31.000Z
|
azure-mgmt-network/azure/mgmt/network/v2018_12_01/models/azure_firewall_ip_configuration.py
|
JonathanGailliez/azure-sdk-for-python
|
f0f051bfd27f8ea512aea6fc0c3212ee9ee0029b
|
[
"MIT"
] | null | null | null |
# coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .sub_resource import SubResource
class AzureFirewallIPConfiguration(SubResource):
"""IP configuration of an Azure Firewall.
Variables are only populated by the server, and will be ignored when
sending a request.
:param id: Resource ID.
:type id: str
:ivar private_ip_address: The Firewall Internal Load Balancer IP to be
used as the next hop in User Defined Routes.
:vartype private_ip_address: str
:param subnet: Reference of the subnet resource. This resource must be
named 'AzureFirewallSubnet'.
:type subnet: ~azure.mgmt.network.v2018_12_01.models.SubResource
:param public_ip_address: Reference of the PublicIP resource. This field
is a mandatory input if subnet is not null.
:type public_ip_address:
~azure.mgmt.network.v2018_12_01.models.SubResource
:param provisioning_state: The provisioning state of the resource.
Possible values include: 'Succeeded', 'Updating', 'Deleting', 'Failed'
:type provisioning_state: str or
~azure.mgmt.network.v2018_12_01.models.ProvisioningState
:param name: Name of the resource that is unique within a resource group.
This name can be used to access the resource.
:type name: str
:ivar etag: A unique read-only string that changes whenever the resource
is updated.
:vartype etag: str
"""
_validation = {
'private_ip_address': {'readonly': True},
'etag': {'readonly': True},
}
_attribute_map = {
'id': {'key': 'id', 'type': 'str'},
'private_ip_address': {'key': 'properties.privateIPAddress', 'type': 'str'},
'subnet': {'key': 'properties.subnet', 'type': 'SubResource'},
'public_ip_address': {'key': 'properties.publicIPAddress', 'type': 'SubResource'},
'provisioning_state': {'key': 'properties.provisioningState', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'etag': {'key': 'etag', 'type': 'str'},
}
def __init__(self, **kwargs):
super(AzureFirewallIPConfiguration, self).__init__(**kwargs)
self.private_ip_address = None
self.subnet = kwargs.get('subnet', None)
self.public_ip_address = kwargs.get('public_ip_address', None)
self.provisioning_state = kwargs.get('provisioning_state', None)
self.name = kwargs.get('name', None)
self.etag = None
| 41.852941
| 90
| 0.64617
|
4a18551a47917529802628329ee7c8267d0039e1
| 10,285
|
py
|
Python
|
AttributionDraftExp/mnist/get_attribution_features_mnist.py
|
frkl/Trinity-TrojAI
|
c0e5ab9d26496469aa0463f33f468cec3d43b25a
|
[
"MIT"
] | 16
|
2021-01-05T01:53:54.000Z
|
2021-12-16T12:42:16.000Z
|
AttributionDraftExp/mnist/get_attribution_features_mnist.py
|
frkl/Trinity-TrojAI
|
c0e5ab9d26496469aa0463f33f468cec3d43b25a
|
[
"MIT"
] | null | null | null |
AttributionDraftExp/mnist/get_attribution_features_mnist.py
|
frkl/Trinity-TrojAI
|
c0e5ab9d26496469aa0463f33f468cec3d43b25a
|
[
"MIT"
] | 4
|
2021-02-24T05:17:14.000Z
|
2021-12-16T12:42:18.000Z
|
import numpy as np
import torch
import os
import sys
from matplotlib import pyplot as plt
import torch.nn as nn
from captum.attr import LayerIntegratedGradients, LayerGradientXActivation
import skimage.io
import torchvision
import pickle
import scipy.interpolate as interpolate
import helper
import warnings
from tqdm import tqdm
warnings.filterwarnings("ignore")
from torch.utils.data import TensorDataset, DataLoader
# Later push to config/fn.
src = "/data/mnist-dataset"
# src = "/data/ksikka/mount/rebel/ksikka/mnist-dataset"
device = torch.device("cuda")
num_cls = 10
########################################################################
def create_dataloader(data_src):
filenames, labels = list(
zip(
*[
it.strip().split(",")
for it in open(os.path.join(data_src, "labels.txt"))
]
)
)
labels = [int(it) for it in labels]
vecs = torch.stack(
[
torch.from_numpy(skimage.io.imread(os.path.join(data_src, img_loc))).float()
for img_loc in filenames
]
)
labels = torch.Tensor(labels)
dataset = TensorDataset(vecs, labels)
dataloader = DataLoader(dataset, batch_size=2)
return dataloader
########################################################################
if __name__ == "__main__":
model_name = sys.argv[1]
attribution_fn = "GradxAct"
# attribution_fn = "IG"
attribution_dir = "attribution_features_gradxact"
curves_dir = "curve_features_gradxact"
model = torch.load(os.path.join(src, f"{model_name}", "model.pt.1")).to(device)
model_type = type(model).__name__
print(model_type)
model.eval()
# Create hook since last layer is softmax
hook_fn_logit_layer = helper.hook_fn_nn()
hook_fn_feat_layer = helper.hook_fn_nn()
if model_type == "ModdedLeNet5Net":
NUM_NEURONS = 84
MULT_FACTOR = 2
elif model_type == "BadNetExample":
NUM_NEURONS = 512
MULT_FACTOR = 2
elif model_type == "ModdedBadNetExample":
NUM_NEURONS = 512
MULT_FACTOR = 1.5
if model_type != "ModdedBadNetExample":
model.fc[2].register_forward_hook(hook_fn_logit_layer)
model.fc[0].register_forward_hook(hook_fn_feat_layer)
layer_pointer = model.fc[0]
else:
model.fc[0].register_forward_hook(hook_fn_logit_layer)
model.convnet[5].register_forward_hook(hook_fn_feat_layer)
layer_pointer = model.convnet[5]
print("\n\n\nModel-name", model_name)
print("Model-type", model_type)
print("Num_neurons", NUM_NEURONS)
###########################################################################
def forward_fn(
model, dataloader, model_type, attribution_type, compute_attribution=True
):
print(attribution_type)
if attribution_type == "IG":
attribution_fn = LayerIntegratedGradients
use_internal_batch_size = True
elif attribution_type == "GradxAct":
attribution_fn = LayerGradientXActivation
use_internal_batch_size = False
attrib_fn = attribution_fn(model, layer_pointer)
pred = []
gnd = []
logits = []
attribution = []
labels = []
feat = []
for data in tqdm(dataloader):
img, label = data
labels.append(label)
model(img.unsqueeze(1).to(device))
_logits = hook_fn_logit_layer.outputs
logits.append(_logits.data.cpu().numpy())
_feat = hook_fn_feat_layer.outputs
if model_type == "ModdedBadNetExample":
feat.append(_feat.view(_feat.shape[0], -1).data.cpu().numpy())
else:
feat.append(_feat.data.cpu().numpy())
pred.append(_logits.argmax(1).data.cpu().numpy())
gnd.append(label.numpy())
# Compute attribution over all the classes
if compute_attribution:
_attrib = []
for c in range(num_cls):
if use_internal_batch_size:
__attrib = attrib_fn.attribute(
img.unsqueeze(1).to(device),
target=torch.Tensor([c, c]).to(device).long(),
internal_batch_size=4,
)
else:
__attrib = attrib_fn.attribute(
img.unsqueeze(1).to(device),
target=torch.Tensor([c, c]).to(device).long(),
)
# __attrib = attrib_fn.attribute(
# img.unsqueeze(1).to(device),
# target=torch.Tensor([c, c]).to(device).long(),
# )
if __attrib.ndim > 2:
__attrib = __attrib.view(__attrib.shape[0], -1)
_attrib.append(__attrib.unsqueeze(-1).data.cpu().numpy())
attribution.append(np.concatenate(_attrib, axis=-1))
logits = np.vstack(logits)
labels = np.hstack(labels)
feat = np.vstack(feat)
if compute_attribution:
attribution = np.concatenate(attribution, 0)
acc = np.mean(np.hstack(gnd) == np.hstack(pred)) * 100
print("Accuracy is ", acc)
return logits, labels, attribution, feat, acc
###########################################################################
attribution_path = os.path.join(attribution_dir, model_name + ".npy")
if not os.path.exists(attribution_path):
dataloader = create_dataloader(os.path.join(src, "example"))
print("Extracting features")
logits, labels, attribution, feat, acc = forward_fn(
model, dataloader, model_type, attribution_fn
)
# Save these
np.save(
attribution_path,
{
"logits": logits,
"labels": labels,
"attribution": attribution,
"feat": feat,
},
)
else:
tmp = np.load(attribution_path, allow_pickle=True).item()
logits = tmp["logits"]
labels = tmp["labels"]
attribution = tmp["attribution"]
feat = tmp["feat"]
# ipdb.set_trace()
feat_ds = TensorDataset(torch.from_numpy(feat), torch.from_numpy(labels))
feat_dl = DataLoader(feat_ds, batch_size=2)
###########################################################################
def identify_bad_neurons(target, attribution):
tmp = []
for cls in range(num_cls):
if cls == target:
continue
_idx = (labels == cls).nonzero()[0]
attribution_mean = attribution[_idx].mean(0)[:, target]
_idx = attribution_mean > 0
thresh = np.percentile(attribution_mean[_idx], 20)
attribution_mean[attribution_mean < thresh] = 0
tmp.append(attribution_mean)
bad_neurons = np.mean(tmp, 0).argsort()[::-1].tolist()
return bad_neurons
###########################################################################
def ablation_plot(feat_dl, bad_neurons, activation_value=25):
acc_all = []
nn_all = []
N = int(NUM_NEURONS)
for nn in range(0, N, 2):
pred = []
gnd = []
logits_clean = []
# for data in dataloader:
for data in feat_dl:
feat, label = data
feat = feat.to(device)
# model(img.unsqueeze(1).to(device))
# feat = hook_fn_feat_layer.outputs
# Following needs to be replaced
if model_type != "ModdedBadNetExample":
feat[:, bad_neurons[:nn]] = activation_value
logits = model.fc[2](torch.relu(feat))
else:
feat = feat.view(feat.shape[0], -1)
feat[:, bad_neurons[:nn]] = activation_value
logits = model.fc[0](feat)
logits_clean.append(logits.data.cpu().numpy())
pred.append(logits.argmax(1).data.cpu().numpy())
gnd.append(label.numpy())
logits_clean = np.vstack(logits_clean)
acc = np.mean(np.hstack(gnd) == np.hstack(pred)) * 100
acc_all.append(acc)
nn_all.append(int(nn / NUM_NEURONS * 100))
kk = 0
# % neurons where perf = P
position = {}
f = interpolate.interp1d(acc_all, nn_all)
try:
P = 20
position[P] = f(P).item()
except:
position[P] = 0
try:
P = 40
position[P] = f(P).item()
except:
position[P] = 0
try:
P = 60
position[P] = f(P).item()
except:
position[P] = 0
plt.plot(nn_all, acc_all)
plt.plot(nn_all, 20 * np.ones((len(nn_all))))
plt.plot(nn_all, 40 * np.ones((len(nn_all))), color="red")
plt.ylabel("Accuracy")
plt.xlabel("Percentage of neurons triggered in the layer")
plt.title(f"Ablation for class {target}, Position={position[40]}")
print(target, ":", position[20], position[40])
return acc_all, nn_all, position
###########################################################################
fig = plt.figure(figsize=[20, 20])
acc_ablation = []
position = []
# ipdb.set_trace()
M = feat.mean(0).max() * MULT_FACTOR
print("Using activation value", M)
for target in range(num_cls):
ax = plt.subplot(4, 3, target + 1)
bad_neurons = identify_bad_neurons(target, attribution)
_acc, nn_all, pos = ablation_plot(feat_dl, bad_neurons, activation_value=M)
position.append(pos)
acc_ablation.append(_acc)
pickle.dump(
(acc_ablation, nn_all, position),
open(os.path.join(curves_dir, model_name + ".pkl"), "wb"),
)
position = np.asarray(position)
# print(f"Poisoned class is {position.argmin()} with {position.min()}")
plt.savefig(os.path.join(curves_dir, model_name + ".jpg"))
| 35.222603
| 88
| 0.532329
|
4a185619632e2342b4012b800bfdaa70b7a725ac
| 881
|
py
|
Python
|
Multi_s.py
|
mohabedalgani/GFSA
|
30a9ebad3d6b7f4f275ab2ea4b8509f64ab7ff40
|
[
"MIT"
] | null | null | null |
Multi_s.py
|
mohabedalgani/GFSA
|
30a9ebad3d6b7f4f275ab2ea4b8509f64ab7ff40
|
[
"MIT"
] | null | null | null |
Multi_s.py
|
mohabedalgani/GFSA
|
30a9ebad3d6b7f4f275ab2ea4b8509f64ab7ff40
|
[
"MIT"
] | null | null | null |
'''
This file is part of GFLIB toolbox
First Version Sept. 2018
Cite this project as:
Mezher M., Abbod M. (2011) Genetic Folding: A New Class of Evolutionary Algorithms.
In: Bramer M., Petridis M., Hopgood A. (eds) Research and Development in Intelligent Systems XXVII.
SGAI 2010. Springer, London
Copyright (C) 20011-2018 Mohd A. Mezher (mohabedalgani@gmail.com)
'''
import numpy as np
def Multi_s(x1, x2):
if (np.isscalar(x1)) & (np.isscalar(x2)):
value = (x1 * x2) ** 2
elif (~np.isscalar(x1)) & (~np.isscalar(x2)):
value = np.sum(np.dot(x1, x2))
elif (np.isscalar(x1)) & (~np.isscalar(x2)):
tmp = list(x2)
tmp.append(x1)
value = np.sum(np.power(tmp, 2))
else:
tmp = list(x1)
tmp.append(x2)
value = np.sum(np.power(tmp, 2))
return value
| 28.419355
| 103
| 0.584563
|
4a18568abef33b636046658b0a907284aaa171c5
| 3,246
|
py
|
Python
|
src/health/checker.py
|
katcipis/spyglass
|
ceabbf011898e3f07931cfc2f69c117e41171b25
|
[
"MIT"
] | null | null | null |
src/health/checker.py
|
katcipis/spyglass
|
ceabbf011898e3f07931cfc2f69c117e41171b25
|
[
"MIT"
] | null | null | null |
src/health/checker.py
|
katcipis/spyglass
|
ceabbf011898e3f07931cfc2f69c117e41171b25
|
[
"MIT"
] | null | null | null |
import asyncio
from collections import namedtuple
from urllib.parse import urlparse
from health.probes import http_probe
HealthCheck = namedtuple(
'HealthCheck',
['url', 'period_sec', 'patterns'],
defaults=(None,),
)
class InvalidParamsError(Exception):
pass
class HealthChecker:
"""
Performs regular checks for healthiness.
Given a set of HealthCheck descriptors it will
probe them (through HTTP) regularly.
"""
def __init__(self, handler, checks):
"""
Creates a new HealthChecker.
The provided checks must be an iterable of HealthCheck, including
all the information required to do probing for healthiness.
The provided handler must be a coroutine (awaitable) that will
receive the as parameters:
- url : The url that has been probed
- status: A health.status.HealthStatus
The handler will be responsible for handling results for all the
provided health check targets.
"""
if len(checks) == 0:
raise InvalidParamsError(
"HealthChecker needs at least one HealthCheck defined")
for check in checks:
try:
res = urlparse(check.url)
if res.scheme == "":
raise InvalidParamsError(
f"url '{check.url}' doesn't have an scheme")
if res.netloc == "":
raise InvalidParamsError(
f"url '{check.url}' doesn't have an domain")
except Exception as err:
url = check.url
raise InvalidParamsError(
f"can't parse health check url '{url}', err: '{err}'")
if check.period_sec <= 0:
psec = check.period_sec
raise InvalidParamsError(
f"period_sec must be a positive value, got: {psec}")
self.__checks = checks
self.__handler = handler
self.__run = False
def start(self):
"""
Starts to periodically check for healthiness.
Calling it will start multiple asynchronous tasks that
will periodically probe HTTP endpoints and call
a handler with the results.
Calling start on a checker that is already started
will be ignored.
The created asyncio tasks will be returned, so the caller can
use them to wait for completion, although in a normal scenario
the tasks will never finish (unless the stop method is called).
"""
if self.__run:
return
self.__run = True
tasks = []
for check in self.__checks:
tasks.append(asyncio.create_task(self.__probe_scheduler(check)))
return tasks
def stop(self):
"""
Stops the periodical check for healthiness.
Calling stop on a checker that is already stopped
will be ignored.
"""
self.__run = False
async def __probe_scheduler(self, check):
while self.__run:
await asyncio.sleep(check.period_sec)
status = await http_probe(check.url, check.patterns)
await self.__handler(check.url, status)
| 29.779817
| 76
| 0.597043
|
4a185704be6055275c9e4d555e2ce35a93aeb32d
| 1,322
|
py
|
Python
|
manager/get_api_gateway_endpoint.py
|
gvvynplaine/cortex
|
aa3daf0d138a880df29a2c075af41176119da47f
|
[
"Apache-2.0"
] | 1
|
2021-12-08T03:43:30.000Z
|
2021-12-08T03:43:30.000Z
|
manager/get_api_gateway_endpoint.py
|
ourobouros/cortex
|
1b3aaf909816b93f6a6e3edd0da8c10891e05be9
|
[
"Apache-2.0"
] | null | null | null |
manager/get_api_gateway_endpoint.py
|
ourobouros/cortex
|
1b3aaf909816b93f6a6e3edd0da8c10891e05be9
|
[
"Apache-2.0"
] | 1
|
2021-05-25T03:49:14.000Z
|
2021-05-25T03:49:14.000Z
|
# Copyright 2020 Cortex Labs, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import boto3
import os
def get_api_gateway_endpoint():
cluster_name = os.environ["CORTEX_CLUSTER_NAME"]
region = os.environ["CORTEX_REGION"]
client_apigateway = boto3.client("apigatewayv2", region_name=region)
paginator = client_apigateway.get_paginator("get_apis")
for api_gateway_page in paginator.paginate():
for api_gateway in api_gateway_page["Items"]:
if api_gateway["Tags"].get("cortex.dev/cluster-name") == cluster_name:
return api_gateway["ApiEndpoint"]
raise Exception(
f"your cluster's api gateway (in {region} with tag cortex.dev/cluster-name={cluster_name}) does not exist"
)
if __name__ == "__main__":
print(get_api_gateway_endpoint(), end="")
| 35.72973
| 114
| 0.726929
|
4a1857129518cb6b6fafd1444c9a16a02150ff98
| 6,907
|
py
|
Python
|
kubernetes/client/models/v1_persistent_volume_claim_status.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_persistent_volume_claim_status.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
kubernetes/client/models/v1_persistent_volume_claim_status.py
|
mariusgheorghies/python
|
68ac7e168963d8b5a81dc493b1973d29e903a15b
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
"""
Kubernetes
No description provided (generated by Openapi Generator https://github.com/openapitools/openapi-generator) # noqa: E501
The version of the OpenAPI document: v1.20.7
Generated by: https://openapi-generator.tech
"""
import pprint
import re # noqa: F401
import six
from kubernetes.client.configuration import Configuration
class V1PersistentVolumeClaimStatus(object):
"""NOTE: This class is auto generated by OpenAPI Generator.
Ref: https://openapi-generator.tech
Do not edit the class manually.
"""
"""
Attributes:
openapi_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
openapi_types = {
'access_modes': 'list[str]',
'capacity': 'dict(str, str)',
'conditions': 'list[V1PersistentVolumeClaimCondition]',
'phase': 'str'
}
attribute_map = {
'access_modes': 'accessModes',
'capacity': 'capacity',
'conditions': 'conditions',
'phase': 'phase'
}
def __init__(self, access_modes=None, capacity=None, conditions=None, phase=None, local_vars_configuration=None): # noqa: E501
"""V1PersistentVolumeClaimStatus - a model defined in OpenAPI""" # noqa: E501
if local_vars_configuration is None:
local_vars_configuration = Configuration()
self.local_vars_configuration = local_vars_configuration
self._access_modes = None
self._capacity = None
self._conditions = None
self._phase = None
self.discriminator = None
if access_modes is not None:
self.access_modes = access_modes
if capacity is not None:
self.capacity = capacity
if conditions is not None:
self.conditions = conditions
if phase is not None:
self.phase = phase
@property
def access_modes(self):
"""Gets the access_modes of this V1PersistentVolumeClaimStatus. # noqa: E501
AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:return: The access_modes of this V1PersistentVolumeClaimStatus. # noqa: E501
:rtype: list[str]
"""
return self._access_modes
@access_modes.setter
def access_modes(self, access_modes):
"""Sets the access_modes of this V1PersistentVolumeClaimStatus.
AccessModes contains the actual access modes the volume backing the PVC has. More info: https://kubernetes.io/docs/concepts/storage/persistent-volumes#access-modes-1 # noqa: E501
:param access_modes: The access_modes of this V1PersistentVolumeClaimStatus. # noqa: E501
:type: list[str]
"""
self._access_modes = access_modes
@property
def capacity(self):
"""Gets the capacity of this V1PersistentVolumeClaimStatus. # noqa: E501
Represents the actual resources of the underlying volume. # noqa: E501
:return: The capacity of this V1PersistentVolumeClaimStatus. # noqa: E501
:rtype: dict(str, str)
"""
return self._capacity
@capacity.setter
def capacity(self, capacity):
"""Sets the capacity of this V1PersistentVolumeClaimStatus.
Represents the actual resources of the underlying volume. # noqa: E501
:param capacity: The capacity of this V1PersistentVolumeClaimStatus. # noqa: E501
:type: dict(str, str)
"""
self._capacity = capacity
@property
def conditions(self):
"""Gets the conditions of this V1PersistentVolumeClaimStatus. # noqa: E501
Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. # noqa: E501
:return: The conditions of this V1PersistentVolumeClaimStatus. # noqa: E501
:rtype: list[V1PersistentVolumeClaimCondition]
"""
return self._conditions
@conditions.setter
def conditions(self, conditions):
"""Sets the conditions of this V1PersistentVolumeClaimStatus.
Current Condition of persistent volume claim. If underlying persistent volume is being resized then the Condition will be set to 'ResizeStarted'. # noqa: E501
:param conditions: The conditions of this V1PersistentVolumeClaimStatus. # noqa: E501
:type: list[V1PersistentVolumeClaimCondition]
"""
self._conditions = conditions
@property
def phase(self):
"""Gets the phase of this V1PersistentVolumeClaimStatus. # noqa: E501
Phase represents the current phase of PersistentVolumeClaim. # noqa: E501
:return: The phase of this V1PersistentVolumeClaimStatus. # noqa: E501
:rtype: str
"""
return self._phase
@phase.setter
def phase(self, phase):
"""Sets the phase of this V1PersistentVolumeClaimStatus.
Phase represents the current phase of PersistentVolumeClaim. # noqa: E501
:param phase: The phase of this V1PersistentVolumeClaimStatus. # noqa: E501
:type: str
"""
self._phase = phase
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.openapi_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, V1PersistentVolumeClaimStatus):
return False
return self.to_dict() == other.to_dict()
def __ne__(self, other):
"""Returns true if both objects are not equal"""
if not isinstance(other, V1PersistentVolumeClaimStatus):
return True
return self.to_dict() != other.to_dict()
| 33.36715
| 187
| 0.63385
|
4a185774b926302d24142d6432fef830f03799f7
| 39,766
|
py
|
Python
|
web/server/codechecker_server/server.py
|
gocarlos/codechecker
|
17678b103375a9dbff6e69328d0996d54de78ef3
|
[
"Apache-2.0"
] | null | null | null |
web/server/codechecker_server/server.py
|
gocarlos/codechecker
|
17678b103375a9dbff6e69328d0996d54de78ef3
|
[
"Apache-2.0"
] | null | null | null |
web/server/codechecker_server/server.py
|
gocarlos/codechecker
|
17678b103375a9dbff6e69328d0996d54de78ef3
|
[
"Apache-2.0"
] | null | null | null |
# -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
Main server starts a http server which handles Thrift client
and browser requests.
"""
import atexit
import datetime
import errno
from hashlib import sha256
from multiprocessing.pool import ThreadPool
import os
import posixpath
from random import sample
import shutil
import signal
import socket
import ssl
import sys
import stat
import urllib
from http.server import HTTPServer, BaseHTTPRequestHandler, \
SimpleHTTPRequestHandler
from sqlalchemy.orm import sessionmaker
from thrift.protocol import TJSONProtocol
from thrift.transport import TTransport
from thrift.Thrift import TApplicationException
from thrift.Thrift import TMessageType
from codechecker_api_shared.ttypes import DBStatus
from codechecker_api.Authentication_v6 import \
codeCheckerAuthentication as AuthAPI_v6
from codechecker_api.Configuration_v6 import \
configurationService as ConfigAPI_v6
from codechecker_api.codeCheckerDBAccess_v6 import \
codeCheckerDBAccess as ReportAPI_v6
from codechecker_api.ProductManagement_v6 import \
codeCheckerProductService as ProductAPI_v6
from codechecker_common.logger import get_logger
from codechecker_web.shared.version import get_version_str
from . import instance_manager
from . import permissions
from . import routing
from . import session_manager
from .tmp import get_tmp_dir_hash
from .api.authentication import ThriftAuthHandler as AuthHandler_v6
from .api.config_handler import ThriftConfigHandler as ConfigHandler_v6
from .api.product_server import ThriftProductHandler as ProductHandler_v6
from .api.report_server import ThriftRequestHandler as ReportHandler_v6
from .database import database, db_cleanup
from .database.config_db_model import Product as ORMProduct, \
Configuration as ORMConfiguration
from .database.database import DBSession
from .database.run_db_model import IDENTIFIER as RUN_META, Run, RunLock
LOG = get_logger('server')
class RequestHandler(SimpleHTTPRequestHandler):
"""
Handle thrift and browser requests
Simply modified and extended version of SimpleHTTPRequestHandler
"""
auth_session = None
def __init__(self, request, client_address, server):
BaseHTTPRequestHandler.__init__(self,
request,
client_address,
server)
def log_message(self, msg_format, *args):
""" Silencing http server. """
return
def send_thrift_exception(self, error_msg, iprot, oprot, otrans):
"""
Send an exception response to the client in a proper format which can
be parsed by the Thrift clients expecting JSON responses.
"""
ex = TApplicationException(TApplicationException.INTERNAL_ERROR,
error_msg)
fname, _, seqid = iprot.readMessageBegin()
oprot.writeMessageBegin(fname, TMessageType.EXCEPTION, seqid)
ex.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
result = otrans.getvalue()
self.send_response(200)
self.send_header("content-type", "application/x-thrift")
self.send_header("Content-Length", len(result))
self.end_headers()
self.wfile.write(result)
def __check_session_cookie(self):
"""
Check the CodeChecker privileged access cookie in the request headers.
:returns: A session_manager._Session object if a correct, valid session
cookie was found in the headers. None, otherwise.
"""
if not self.server.manager.is_enabled:
return None
session = None
# Check if the user has presented a privileged access cookie.
cookies = self.headers.get("Cookie")
if cookies:
split = cookies.split("; ")
for cookie in split:
values = cookie.split("=")
if len(values) == 2 and \
values[0] == session_manager.SESSION_COOKIE_NAME:
session = self.server.manager.get_session(values[1])
if session and session.is_alive:
# If a valid session token was found and it can still be used,
# mark that the user's last access to the server was the
# request that resulted in the execution of this function.
session.revalidate()
return session
else:
# If the user's access cookie is no longer usable (invalid),
# present an error.
client_host, client_port, is_ipv6 = \
RequestHandler._get_client_host_port(self.client_address)
LOG.debug("%s:%s Invalid access, credentials not found - "
"session refused",
client_host if not is_ipv6 else '[' + client_host + ']',
str(client_port))
return None
def __has_access_permission(self, product):
"""
Returns True if the currently authenticated user has access permission
on the given product.
"""
with DBSession(self.server.config_session) as session:
perm_args = {'productID': product.id,
'config_db_session': session}
return permissions.require_permission(
permissions.PRODUCT_ACCESS,
perm_args,
self.auth_session)
def __handle_readiness(self):
""" Handle readiness probe. """
try:
cfg_sess = self.server.config_session()
cfg_sess.query(ORMConfiguration).count()
self.send_response(200)
self.end_headers()
self.wfile.write(b'CODECHECKER_SERVER_IS_READY')
except Exception:
self.send_response(500)
self.end_headers()
self.wfile.write(b'CODECHECKER_SERVER_IS_NOT_READY')
finally:
if cfg_sess:
cfg_sess.close()
cfg_sess.commit()
def __handle_liveness(self):
""" Handle liveness probe. """
self.send_response(200)
self.end_headers()
self.wfile.write(b'CODECHECKER_SERVER_IS_LIVE')
def end_headers(self):
# Sending the authentication cookie
# in every response if any.
# This will update the the session cookie
# on the clients to the newest.
if self.auth_session:
token = self.auth_session.token
if token:
self.send_header(
"Set-Cookie",
"{0}={1}; Path=/".format(
session_manager.SESSION_COOKIE_NAME,
token))
# Set the current user name in the header.
user_name = self.auth_session.user
if user_name:
self.send_header("X-User", user_name)
SimpleHTTPRequestHandler.end_headers(self)
@staticmethod
def _get_client_host_port(address):
"""
Returns the host and port of the request's address, and whether it
was an IPv6 address.
"""
if len(address) == 2:
return address[0], address[1], False
if len(address) == 4:
return address[0], address[1], True
raise IndexError("Invalid address tuple given.")
def do_GET(self):
""" Handles the SPA browser access (GET requests).
It will do the following steps:
- for requests for index.html ('/'), just respond with the file.
- if the requested path contains a product endpoint name
('/prod/app.js', '/prod/runs'), remove the endpoint from the path.
- if the requested path is a valid file (e.g: 'app.js'), respond with
the file.
- otherwise (e.g: 'runs') respond with index.html.
"""
client_host, client_port, is_ipv6 = \
RequestHandler._get_client_host_port(self.client_address)
self.auth_session = self.__check_session_cookie()
username = self.auth_session.user if self.auth_session else 'Anonymous'
LOG.debug("%s:%s -- [%s] GET %s",
client_host if not is_ipv6 else '[' + client_host + ']',
client_port, username, self.path)
if self.path == '/':
self.path = 'index.html'
SimpleHTTPRequestHandler.do_GET(self)
return
if self.path == '/live':
self.__handle_liveness()
return
if self.path == '/ready':
self.__handle_readiness()
return
product_endpoint, _ = routing.split_client_GET_request(self.path)
# Check that path contains a product endpoint.
if product_endpoint is not None and product_endpoint != '':
self.path = self.path.replace(
"{0}/".format(product_endpoint), "", 1)
if self.path == '/':
self.path = "index.html"
# Check that the given path is a file.
if not os.path.exists(self.translate_path(self.path)):
self.path = 'index.html'
SimpleHTTPRequestHandler.do_GET(self)
def __check_prod_db(self, product_endpoint):
"""
Check the product database status.
Try to reconnect in some cases.
Returns if everything is ok with the database or throw an exception
with the error message if something is wrong with the database.
"""
product = self.server.get_product(product_endpoint)
if not product:
raise ValueError(
"The product with the given endpoint '{0}' does "
"not exist!".format(product_endpoint))
if product.db_status == DBStatus.OK:
# No reconnect needed.
return product
# Try to reconnect in these cases.
# Do not try to reconnect if there is a schema mismatch.
# If the product is not connected, try reconnecting...
if product.db_status in [DBStatus.FAILED_TO_CONNECT,
DBStatus.MISSING,
DBStatus.SCHEMA_INIT_ERROR]:
LOG.error("Request's product '%s' is not connected! "
"Attempting reconnect...", product.endpoint)
product.connect()
if product.db_status != DBStatus.OK:
# If the reconnection fails send an error to the user.
LOG.debug("Product reconnection failed.")
error_msg = "'{0}' database connection " \
"failed!".format(product.endpoint)
LOG.error(error_msg)
raise ValueError(error_msg)
else:
# Send an error to the user.
db_stat = DBStatus._VALUES_TO_NAMES.get(product.db_status)
error_msg = "'{0}' database connection " \
"failed. DB status: {1}".format(product.endpoint,
str(db_stat))
LOG.error(error_msg)
raise ValueError(error_msg)
return product
def do_POST(self):
"""
Handles POST queries, which are usually Thrift messages.
"""
client_host, client_port, is_ipv6 = \
RequestHandler._get_client_host_port(self.client_address)
self.auth_session = self.__check_session_cookie()
LOG.info("%s:%s -- [%s] POST %s",
client_host if not is_ipv6 else '[' + client_host + ']',
client_port,
self.auth_session.user if self.auth_session else "Anonymous",
self.path)
# Create new thrift handler.
checker_md_docs = self.server.checker_md_docs
checker_md_docs_map = self.server.checker_md_docs_map
version = self.server.version
protocol_factory = TJSONProtocol.TJSONProtocolFactory()
input_protocol_factory = protocol_factory
output_protocol_factory = protocol_factory
itrans = TTransport.TFileObjectTransport(self.rfile)
itrans = TTransport.TBufferedTransport(itrans,
int(self.headers[
'Content-Length']))
otrans = TTransport.TMemoryBuffer()
iprot = input_protocol_factory.getProtocol(itrans)
oprot = output_protocol_factory.getProtocol(otrans)
if self.server.manager.is_enabled and \
not self.path.endswith(('/Authentication',
'/Configuration')) and \
not self.auth_session:
# Bail out if the user is not authenticated...
# This response has the possibility of melting down Thrift clients,
# but the user is expected to properly authenticate first.
LOG.debug("%s:%s Invalid access, credentials not found "
"- session refused.",
client_host if not is_ipv6 else '[' + client_host + ']',
str(client_port))
self.send_thrift_exception("Error code 401: Unauthorized!", iprot,
oprot, otrans)
return
# Authentication is handled, we may now respond to the user.
try:
product_endpoint, api_ver, request_endpoint = \
routing.split_client_POST_request(self.path)
product = None
if product_endpoint:
# The current request came through a product route, and not
# to the main endpoint.
product = self.__check_prod_db(product_endpoint)
version_supported = routing.is_supported_version(api_ver)
if version_supported:
major_version, _ = version_supported
if major_version == 6:
if request_endpoint == 'Authentication':
auth_handler = AuthHandler_v6(
self.server.manager,
self.auth_session,
self.server.config_session)
processor = AuthAPI_v6.Processor(auth_handler)
elif request_endpoint == 'Configuration':
conf_handler = ConfigHandler_v6(
self.auth_session,
self.server.config_session)
processor = ConfigAPI_v6.Processor(conf_handler)
elif request_endpoint == 'Products':
prod_handler = ProductHandler_v6(
self.server,
self.auth_session,
self.server.config_session,
product,
version)
processor = ProductAPI_v6.Processor(prod_handler)
elif request_endpoint == 'CodeCheckerService':
# This endpoint is a product's report_server.
if not product:
error_msg = "Requested CodeCheckerService on a " \
"nonexistent product: '{0}'." \
.format(product_endpoint)
LOG.error(error_msg)
raise ValueError(error_msg)
if product_endpoint:
# The current request came through a
# product route, and not to the main endpoint.
product = self.__check_prod_db(product_endpoint)
acc_handler = ReportHandler_v6(
self.server.manager,
product.session_factory,
product,
self.auth_session,
self.server.config_session,
checker_md_docs,
checker_md_docs_map,
version,
self.server.context)
processor = ReportAPI_v6.Processor(acc_handler)
else:
LOG.debug("This API endpoint does not exist.")
error_msg = "No API endpoint named '{0}'." \
.format(self.path)
raise ValueError(error_msg)
else:
error_msg = "The API version you are using is not supported " \
"by this server (server API version: {0})!".format(
get_version_str())
self.send_thrift_exception(error_msg, iprot, oprot, otrans)
return
processor.process(iprot, oprot)
result = otrans.getvalue()
self.send_response(200)
self.send_header("content-type", "application/x-thrift")
self.send_header("Content-Length", len(result))
self.end_headers()
self.wfile.write(result)
return
except Exception as exn:
LOG.warning(str(exn))
import traceback
traceback.print_exc()
cstringio_buf = itrans.cstringio_buf.getvalue()
if cstringio_buf:
itrans = TTransport.TMemoryBuffer(cstringio_buf)
iprot = input_protocol_factory.getProtocol(itrans)
self.send_thrift_exception(str(exn), iprot, oprot, otrans)
return
def list_directory(self, path):
""" Disable directory listing. """
self.send_error(405, "No permission to list directory")
return None
def translate_path(self, path):
"""
Modified version from SimpleHTTPRequestHandler.
Path is set to www_root.
"""
# Abandon query parameters.
path = path.split('?', 1)[0]
path = path.split('#', 1)[0]
path = posixpath.normpath(urllib.parse.unquote(path))
words = path.split('/')
words = [_f for _f in words if _f]
path = self.server.www_root
for word in words:
_, word = os.path.splitdrive(word)
_, word = os.path.split(word)
if word in (os.curdir, os.pardir):
continue
path = os.path.join(path, word)
return path
class Product(object):
"""
Represents a product, which is a distinct storage of analysis reports in
a separate database (and database connection) with its own access control.
"""
# The amount of SECONDS that need to pass after the last unsuccessful
# connect() call so the next could be made.
CONNECT_RETRY_TIMEOUT = 300
def __init__(self, orm_object, context, check_env):
"""
Set up a new managed product object for the configuration given.
"""
self.__id = orm_object.id
self.__endpoint = orm_object.endpoint
self.__connection_string = orm_object.connection
self.__display_name = orm_object.display_name
self.__driver_name = None
self.__context = context
self.__check_env = check_env
self.__engine = None
self.__session = None
self.__db_status = DBStatus.MISSING
self.__last_connect_attempt = None
@property
def id(self):
return self.__id
@property
def endpoint(self):
"""
Returns the accessible URL endpoint of the product.
"""
return self.__endpoint
@property
def name(self):
"""
Returns the display name of the product.
"""
return self.__display_name
@property
def session_factory(self):
"""
Returns the session maker on this product's database engine which
can be used to initiate transactional connections.
"""
return self.__session
@property
def driver_name(self):
"""
Returns the name of the sql driver (sqlite, postgres).
"""
return self.__driver_name
@property
def db_status(self):
"""
Returns the status of the database which belongs to this product.
Call connect to update it.
"""
return self.__db_status
@property
def last_connection_failure(self):
"""
Returns the reason behind the last executed connection attempt's
failure.
"""
return self.__last_connect_attempt[1] if self.__last_connect_attempt \
else None
def connect(self, init_db=False):
"""
Initiates the actual connection to the database configured for the
product.
Each time the connect is called the db_status is updated.
"""
LOG.debug("Checking '%s' database.", self.endpoint)
sql_server = database.SQLServer.from_connection_string(
self.__connection_string,
RUN_META,
self.__context.run_migration_root,
interactive=False,
env=self.__check_env)
if isinstance(sql_server, database.PostgreSQLServer):
self.__driver_name = 'postgresql'
elif isinstance(sql_server, database.SQLiteDatabase):
self.__driver_name = 'sqlite'
try:
LOG.debug("Trying to connect to the database")
# Create the SQLAlchemy engine.
self.__engine = sql_server.create_engine()
LOG.debug(self.__engine)
self.__session = sessionmaker(bind=self.__engine)
self.__engine.execute('SELECT 1')
self.__db_status = sql_server.check_schema()
self.__last_connect_attempt = None
if self.__db_status == DBStatus.SCHEMA_MISSING and init_db:
LOG.debug("Initializing new database schema.")
self.__db_status = sql_server.connect(init_db)
except Exception as ex:
LOG.exception("The database for product '%s' cannot be"
" connected to.", self.endpoint)
self.__db_status = DBStatus.FAILED_TO_CONNECT
self.__last_connect_attempt = (datetime.datetime.now(), str(ex))
def get_details(self):
"""
Get details for a product from the database.
It may throw different error messages depending on the used SQL driver
adapter in case of connection error.
"""
with DBSession(self.session_factory) as run_db_session:
run_locks = run_db_session.query(RunLock.name) \
.filter(RunLock.locked_at.isnot(None)) \
.all()
runs_in_progress = set([run_lock[0] for run_lock in run_locks])
num_of_runs = run_db_session.query(Run).count()
latest_store_to_product = ""
if num_of_runs:
last_updated_run = run_db_session.query(Run) \
.order_by(Run.date.desc()) \
.limit(1) \
.one_or_none()
latest_store_to_product = last_updated_run.date
return num_of_runs, runs_in_progress, latest_store_to_product
def teardown(self):
"""
Disposes the database connection to the product's backend.
"""
if self.__db_status == DBStatus.FAILED_TO_CONNECT:
return
self.__engine.dispose()
self.__session = None
self.__engine = None
def cleanup_run_db(self):
"""
Cleanup the run database which belongs to this product.
"""
LOG.info("Garbage collection for product '%s' started...",
self.endpoint)
db_cleanup.remove_expired_run_locks(self.session_factory)
db_cleanup.remove_unused_files(self.session_factory)
db_cleanup.upgrade_severity_levels(self.session_factory,
self.__context.severity_map)
LOG.info("Garbage collection finished.")
return True
class CCSimpleHttpServer(HTTPServer):
"""
Simple http server to handle requests from the clients.
"""
daemon_threads = False
address_family = socket.AF_INET # IPv4
def __init__(self,
server_address,
RequestHandlerClass,
config_directory,
product_db_sql_server,
skip_db_cleanup,
pckg_data,
context,
check_env,
manager):
LOG.debug("Initializing HTTP server...")
self.config_directory = config_directory
self.www_root = pckg_data['www_root']
self.doc_root = pckg_data['doc_root']
self.checker_md_docs = pckg_data['checker_md_docs']
self.checker_md_docs_map = pckg_data['checker_md_docs_map']
self.version = pckg_data['version']
self.context = context
self.check_env = check_env
self.manager = manager
self.__products = {}
# Create a database engine for the configuration database.
LOG.debug("Creating database engine for CONFIG DATABASE...")
self.__engine = product_db_sql_server.create_engine()
self.config_session = sessionmaker(bind=self.__engine)
self.manager.set_database_connection(self.config_session)
# Load the initial list of products and set up the server.
cfg_sess = self.config_session()
permissions.initialise_defaults('SYSTEM', {
'config_db_session': cfg_sess
})
products = cfg_sess.query(ORMProduct).all()
for product in products:
self.add_product(product)
permissions.initialise_defaults('PRODUCT', {
'config_db_session': cfg_sess,
'productID': product.id
})
cfg_sess.commit()
cfg_sess.close()
if not skip_db_cleanup:
for endpoint, product in self.__products.items():
if not product.cleanup_run_db():
LOG.warning("Cleaning database for %s Failed.", endpoint)
worker_processes = self.manager.worker_processes
self.__request_handlers = ThreadPool(processes=worker_processes)
try:
HTTPServer.__init__(self, server_address,
RequestHandlerClass,
bind_and_activate=True)
ssl_key_file = os.path.join(config_directory, "key.pem")
ssl_cert_file = os.path.join(config_directory, "cert.pem")
if os.path.isfile(ssl_key_file) and os.path.isfile(ssl_cert_file):
LOG.info("Initiating SSL. Server listening on secure socket.")
LOG.debug("Using cert file: %s", ssl_cert_file)
LOG.debug("Using key file: %s", ssl_key_file)
self.socket = ssl.wrap_socket(self.socket, server_side=True,
keyfile=ssl_key_file,
certfile=ssl_cert_file)
else:
LOG.info("Searching for SSL key at %s, cert at %s, "
"not found...", ssl_key_file, ssl_cert_file)
LOG.info("Falling back to simple, insecure HTTP.")
except Exception as e:
LOG.error("Couldn't start the server: %s", e.__str__())
raise
def terminate(self):
"""
Terminating the server.
"""
try:
self.server_close()
self.__engine.dispose()
self.__request_handlers.terminate()
self.__request_handlers.join()
except Exception as ex:
LOG.error("Failed to shut down the WEB server!")
LOG.error(str(ex))
sys.exit(1)
def process_request_thread(self, request, client_address):
try:
# Finish_request instantiates request handler class.
self.finish_request(request, client_address)
self.shutdown_request(request)
except socket.error as serr:
if serr.errno == errno.EPIPE:
LOG.debug("Broken pipe")
LOG.debug(serr)
self.shutdown_request(request)
except Exception as ex:
LOG.debug(ex)
self.handle_error(request, client_address)
self.shutdown_request(request)
def process_request(self, request, client_address):
self.__request_handlers.apply_async(self.process_request_thread,
(request, client_address))
def add_product(self, orm_product, init_db=False):
"""
Adds a product to the list of product databases connected to
by the server.
Checks the database connection for the product databases.
"""
if orm_product.endpoint in self.__products:
LOG.debug("This product is already configured!")
return
LOG.debug("Setting up product '%s'", orm_product.endpoint)
prod = Product(orm_product,
self.context,
self.check_env)
# Update the product database status.
prod.connect()
if prod.db_status == DBStatus.SCHEMA_MISSING and init_db:
LOG.debug("Schema was missing in the database. Initializing new")
prod.connect(init_db=True)
self.__products[prod.endpoint] = prod
@property
def num_products(self):
"""
Returns the number of products currently mounted by the server.
"""
return len(self.__products)
def get_product(self, endpoint):
"""
Get the product connection object for the given endpoint, or None.
"""
if endpoint in self.__products:
return self.__products.get(endpoint)
LOG.debug("Product with the given endpoint '%s' does not exist in "
"the local cache. Try to get it from the database.",
endpoint)
# If the product doesn't find in the cache, try to get it from the
# database.
try:
cfg_sess = self.config_session()
product = cfg_sess.query(ORMProduct) \
.filter(ORMProduct.endpoint == endpoint) \
.limit(1).one_or_none()
if not product:
return None
self.add_product(product)
permissions.initialise_defaults('PRODUCT', {
'config_db_session': cfg_sess,
'productID': product.id
})
return self.__products.get(endpoint, None)
finally:
if cfg_sess:
cfg_sess.close()
cfg_sess.commit()
def get_only_product(self):
"""
Returns the Product object for the only product connected to by the
server, or None, if there are 0 or >= 2 products managed.
"""
return list(self.__products.items())[0][1] if self.num_products == 1 \
else None
def remove_product(self, endpoint):
product = self.get_product(endpoint)
if not product:
raise ValueError("The product with the given endpoint '{0}' does "
"not exist!".format(endpoint))
LOG.info("Disconnecting product '%s'", endpoint)
product.teardown()
del self.__products[endpoint]
def remove_products_except(self, endpoints_to_keep):
"""
Removes EVERY product connection from the server except those
endpoints specified in :endpoints_to_keep.
"""
[self.remove_product(ep)
for ep in self.__products.keys()
if ep not in endpoints_to_keep]
class CCSimpleHttpServerIPv6(CCSimpleHttpServer):
"""
CodeChecker HTTP simple server that listens over an IPv6 socket.
"""
address_family = socket.AF_INET6
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def __make_root_file(root_file):
"""
Generate a root username and password SHA. This hash is saved to the
given file path, and is also returned.
"""
LOG.debug("Generating initial superuser (root) credentials...")
username = ''.join(sample("ABCDEFGHIJKLMNOPQRSTUVWXYZ", 6))
password = get_tmp_dir_hash()[:8]
LOG.info("A NEW superuser credential was generated for the server. "
"This information IS SAVED, thus subsequent server starts "
"WILL use these credentials. You WILL NOT get to see "
"the credentials again, so MAKE SURE YOU REMEMBER THIS "
"LOGIN!")
# Highlight the message a bit more, as the server owner configuring the
# server must know this root access initially.
credential_msg = "The superuser's username is '{0}' with the " \
"password '{1}'".format(username, password)
LOG.info("-" * len(credential_msg))
LOG.info(credential_msg)
LOG.info("-" * len(credential_msg))
sha = sha256((username + ':' + password).encode('utf-8')).hexdigest()
with open(root_file, 'w', encoding="utf-8", errors="ignore") as f:
LOG.debug("Save root SHA256 '%s'", sha)
f.write(sha)
# This file should be only readable by the process owner, and noone else.
os.chmod(root_file, stat.S_IRUSR)
return sha
def start_server(config_directory, package_data, port, config_sql_server,
listen_address, force_auth, skip_db_cleanup,
context, check_env):
"""
Start http server to handle web client and thrift requests.
"""
LOG.debug("Starting CodeChecker server...")
server_addr = (listen_address, port)
root_file = os.path.join(config_directory, 'root.user')
if not os.path.exists(root_file):
LOG.warning("Server started without 'root.user' present in "
"CONFIG_DIRECTORY!")
root_sha = __make_root_file(root_file)
else:
LOG.debug("Root file was found. Loading...")
try:
with open(root_file, 'r', encoding="utf-8", errors="ignore") as f:
root_sha = f.read()
LOG.debug("Root digest is '%s'", root_sha)
except IOError:
LOG.info("Cannot open root file '%s' even though it exists",
root_file)
root_sha = __make_root_file(root_file)
# Check whether configuration file exists, create an example if not.
server_cfg_file = os.path.join(config_directory, 'server_config.json')
if not os.path.exists(server_cfg_file):
# For backward compatibility reason if the session_config.json file
# exists we rename it to server_config.json.
session_cfg_file = os.path.join(config_directory,
'session_config.json')
example_cfg_file = os.path.join(os.environ['CC_PACKAGE_ROOT'],
'config', 'server_config.json')
if os.path.exists(session_cfg_file):
LOG.info("Renaming '%s' to '%s'. Please check the example "
"configuration file ('%s') or the user guide for more "
"information.", session_cfg_file,
server_cfg_file, example_cfg_file)
os.rename(session_cfg_file, server_cfg_file)
else:
LOG.info("CodeChecker server's example configuration file "
"created at '%s'", server_cfg_file)
shutil.copyfile(example_cfg_file, server_cfg_file)
try:
manager = session_manager.SessionManager(
server_cfg_file,
root_sha,
force_auth)
except IOError as ioerr:
LOG.debug(ioerr)
LOG.error("The server's configuration file "
"is missing or can not be read!")
sys.exit(1)
except ValueError as verr:
LOG.debug(verr)
LOG.error("The server's configuration file is invalid!")
sys.exit(1)
server_clazz = CCSimpleHttpServer
if ':' in server_addr[0]:
# IPv6 address specified for listening.
# FIXME: Python>=3.8 automatically handles IPv6 if ':' is in the bind
# address, see https://bugs.python.org/issue24209.
server_clazz = CCSimpleHttpServerIPv6
http_server = server_clazz(server_addr,
RequestHandler,
config_directory,
config_sql_server,
skip_db_cleanup,
package_data,
context,
check_env,
manager)
def signal_handler(signum, frame):
"""
Handle SIGTERM to stop the server running.
"""
LOG.info("Shutting down the WEB server on [%s:%d]",
'[' + listen_address + ']'
if server_clazz is CCSimpleHttpServerIPv6 else listen_address,
port)
http_server.terminate()
sys.exit(128 + signum)
signal.signal(signal.SIGINT, signal_handler)
signal.signal(signal.SIGTERM, signal_handler)
def reload_signal_handler(*args, **kwargs):
"""
Reloads server configuration file.
"""
manager.reload_config()
signal.signal(signal.SIGHUP, reload_signal_handler)
try:
instance_manager.register(os.getpid(),
os.path.abspath(
context.codechecker_workspace),
port)
except IOError as ex:
LOG.debug(ex.strerror)
LOG.info("Server waiting for client requests on [%s:%d]",
'[' + listen_address + ']'
if server_clazz is CCSimpleHttpServerIPv6 else listen_address,
port)
def unregister_handler(pid):
"""
Handle errors during instance unregistration.
The workspace might be removed so updating the
config content might fail.
"""
try:
instance_manager.unregister(pid)
except IOError as ex:
LOG.debug(ex.strerror)
atexit.register(unregister_handler, os.getpid())
http_server.serve_forever()
LOG.info("Webserver quit.")
def add_initial_run_database(config_sql_server, product_connection):
"""
Create a default run database as SQLite in the config directory,
and add it to the list of products in the config database specified by
db_conn_string.
"""
# Connect to the configuration database
LOG.debug("Creating database engine for CONFIG DATABASE...")
__engine = config_sql_server.create_engine()
product_session = sessionmaker(bind=__engine)
# Load the initial list of products and create the connections.
sess = product_session()
products = sess.query(ORMProduct).all()
if products:
raise ValueError("Called create_initial_run_database on non-empty "
"config database -- you shouldn't have done this!")
LOG.debug("Adding default product to the config db...")
product = ORMProduct('Default', product_connection, 'Default',
"Default product created at server start.")
sess.add(product)
sess.commit()
sess.close()
LOG.debug("Default product set up.")
| 36.957249
| 79
| 0.586003
|
4a1857dbe6ca215d5841f76e944822da38ea8ff4
| 2,771
|
py
|
Python
|
PolishNotation.py
|
mhmddpkts/PolishNotation
|
94fc848d666999638542ebcaaad16f133944933e
|
[
"Apache-2.0"
] | 2
|
2020-12-29T10:19:16.000Z
|
2021-12-19T10:46:52.000Z
|
PolishNotation.py
|
m-pektas/PolishNotation
|
94fc848d666999638542ebcaaad16f133944933e
|
[
"Apache-2.0"
] | null | null | null |
PolishNotation.py
|
m-pektas/PolishNotation
|
94fc848d666999638542ebcaaad16f133944933e
|
[
"Apache-2.0"
] | 1
|
2018-12-25T14:03:13.000Z
|
2018-12-25T14:03:13.000Z
|
#------------- import Library ---------------------------
from builtins import input
#-------------Define Methods ----------------------------
def isOperator(c):
return ((c == '+') | (c == '-') | (c == '/') | (c == '*'))
def whichOperator(c):
if c =='+':
return 0
elif c=='-':
return 1
elif c=='*':
return 2
elif c=='/':
return 3
else:
return -1
def doIt (c,val1,val2):
oprt = whichOperator(c)
if (oprt == 0) : # addition
return val1 + val2
elif (oprt == 1): #extraction
return val1 - val2
elif (oprt == 2): #multiplication
return val1 * val2
elif (oprt == 3): #divide
return val1 / val2
else :
print("value not operator..")
return -1
#-------------------------MAIN CODE---------------------------------------------------
Lifo = [] #Operator Stack
Fifo = [] #Operator Queue
generalList = [] #seperated polish notasyon
operandList = [] #operands will be processed
lastOperand ='' #last operator
result=-1
notasyon = input("Enter Polish Notation :") # get notasyon
generalList = notasyon.split(" ") #split notasyon
isFine = False #is it ok ?
lastOperand=""
for i in generalList :
if(isOperator(i)): #is operator
isFine=True
lastOperand=i
Lifo.append(i)
elif(isOperator(i)==False):
operandList.append(int(i))
if(len(operandList)==2 ):
if(isFine):
result = doIt(Lifo[len(Lifo)-1],operandList[0],operandList[1])
del operandList[0:]
del Lifo[len(Lifo)-1]
Fifo.append(result)
isFine=False
else:
Fifo.append(operandList[0])
Fifo.append(operandList[1])
else:
if(i == generalList[-1]):
Fifo.append(int(i))
#You can add print("Fifo :",Fifo),print("Lifo :",Lifo)
lenght = len(Lifo)
for i in range(lenght):
result = doIt(Lifo[len(Lifo)-1],Fifo[0],Fifo[1])
del Fifo[0:2]
Fifo.insert(0,result)
del Lifo[len(Lifo)-1]
print("Result :",Fifo[0])
| 37.445946
| 114
| 0.384699
|
4a185885be705606e03847846f567c318103598c
| 4,202
|
py
|
Python
|
translate/google.py
|
ssshier/meta-realize
|
cc13309fa9e7e59044fb1c8e6e6b0a62caa7ca8c
|
[
"MIT"
] | 1
|
2021-12-18T09:12:58.000Z
|
2021-12-18T09:12:58.000Z
|
translate/google.py
|
ssshier/meta-realize
|
cc13309fa9e7e59044fb1c8e6e6b0a62caa7ca8c
|
[
"MIT"
] | null | null | null |
translate/google.py
|
ssshier/meta-realize
|
cc13309fa9e7e59044fb1c8e6e6b0a62caa7ca8c
|
[
"MIT"
] | null | null | null |
import urllib.request
import urllib.parse
import json
import execjs
import re
import ssl
ssl._create_default_https_context = ssl._create_unverified_context
JS_CODE="""
var uo = function (a, b) {
for (var c = 0; c < b.length - 2; c += 3) {
var d = b.charAt(c + 2);
d = "a" <= d ? d.charCodeAt(0) - 87 : Number(d);
d = "+" == b.charAt(c + 1) ? a >>> d : a << d;
a = "+" == b.charAt(c) ? a + d & 4294967295 : a ^ d
}
return a
},
wo = function (a, tkk) { // 需要在调用函数时传入window['TKK']的值
var d = tkk.split(".");
var b = Number(d[0]);
for (var e = [], f = 0, g = 0; g < a.length; g++) {
var h = a.charCodeAt(g);
128 > h ? e[f++] = h :
(2048 > h ? e[f++] = h >> 6 | 192 :
(55296 == (h & 64512) && g + 1 < a.length && 56320 == (a.charCodeAt(g + 1) & 64512) ? (h = 65536 + ((h & 1023) << 10) + (a.charCodeAt(++g) & 1023), e[f++] = h >> 18 | 240, e[f++] = h >> 12 & 63 | 128) :
e[f++] = h >> 12 | 224, e[f++] = h >> 6 & 63 | 128), e[f++] = h & 63 | 128)
}
a = b;
for (f = 0; f < e.length; f++)
a += e[f],
a = uo(a, "+-a^+6");
a = uo(a, "+-3^+b+-f");
a ^= Number(d[1]) || 0;
0 > a && (a = (a & 2147483647) + 2147483648);
a %= 1E6;
return (a.toString() + "." + (a ^ b))
};
"""
class GoogleTranslate(object):
def __init__(self):
self.url = 'https://translate.google.cn/translate_a/single'
self.TKK = "434674.96463358" # 随时都有可能需要更新的TKK值
self.header = {
"accept": "*/*",
"accept-language": "zh-CN,zh;q=0.9",
"cookie": "NID=188=M1p_rBfweeI_Z02d1MOSQ5abYsPfZogDrFjKwIUbmAr584bc9GBZkfDwKQ80cQCQC34zwD4ZYHFMUf4F59aDQLSc79_LcmsAihnW0Rsb1MjlzLNElWihv-8KByeDBblR2V1kjTSC8KnVMe32PNSJBQbvBKvgl4CTfzvaIEgkqss",
"referer": "https://translate.google.cn/",
"user-agent": "Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/75.0.3770.142 Safari/537.36",
"x-client-data": "CJK2yQEIpLbJAQjEtskBCKmdygEIqKPKAQi5pcoBCLGnygEI4qjKAQjxqcoBCJetygEIza3KAQ==",
}
self.data = {
"client": "webapp",
"sl": "auto",
"tl": "vi", # 翻译的目标语言
"hl": "zh-CN",
"dt": ["at", "bd", "ex", "ld", "md", "qca", "rw", "rm", "ss", "t"],
"otf": "2",
"ssel": "0",
"tsel": "0",
"kc": "1",
"tk": "", # 谷歌服务器会核对的token
"q": "" # 待翻译的字符串
}
# with open('./token.js', 'r', encoding='utf-8') as f:
self.js_fun = execjs.compile(JS_CODE)
# 构建完对象以后要同步更新一下TKK值
# self.update_TKK()
def update_TKK(self):
url = "https://translate.google.cn/"
req = urllib.request.Request(url=url, headers=self.header)
page_source = urllib.request.urlopen(req).read().decode("utf-8")
self.TKK = re.findall(r"tkk:'([0-9]+\.[0-9]+)'", page_source)[0]
def construct_url(self):
base = self.url + '?'
for key in self.data:
if isinstance(self.data[key], list):
base = base + "dt=" + "&dt=".join(self.data[key]) + "&"
else:
base = base + key + '=' + self.data[key] + '&'
base = base[:-1]
return base
def translate(self, q, lang_to=''):
self.data['q'] = urllib.parse.quote(q)
self.data['tk'] = self.js_fun.call('wo', q, self.TKK)
self.data['tl'] = lang_to
url = self.construct_url()
req = urllib.request.Request(url=url, headers=self.header)
response = json.loads(urllib.request.urlopen(req).read().decode("utf-8"))
print(response)
return response[0][0][0]
# originalText = response[0][0][1]
# originalLanguageCode = response[2]
# print("翻译前:{},翻译前code:{}".format(originalText, originalLanguageCode))
# print("翻译后:{}, 翻译后code:{}".format(targetText, lang_to))
# return originalText, originalLanguageCode, targetText, lang_to
if __name__ == '__main__':
res = GoogleTranslate().translate('This is an interesting setting', lang_to='zh-CN')
print(res)
| 37.185841
| 222
| 0.519515
|
4a18599ae5bb2d02d7c473899595a610f998fcf5
| 15,630
|
py
|
Python
|
scvi/modules/_autozivae.py
|
giovp/scvi-tools
|
9b9370aa502b308f84e3129a7c940a9bea06426b
|
[
"BSD-3-Clause"
] | null | null | null |
scvi/modules/_autozivae.py
|
giovp/scvi-tools
|
9b9370aa502b308f84e3129a7c940a9bea06426b
|
[
"BSD-3-Clause"
] | null | null | null |
scvi/modules/_autozivae.py
|
giovp/scvi-tools
|
9b9370aa502b308f84e3129a7c940a9bea06426b
|
[
"BSD-3-Clause"
] | null | null | null |
from typing import Dict, Optional, Tuple, Union
import numpy as np
import torch
import torch.nn.functional as F
from scipy.special import logit
from torch.distributions import Beta, Gamma, Normal
from torch.distributions import kl_divergence as kl
from scvi import _CONSTANTS
from scvi.compose import LossRecorder, auto_move_data, one_hot
from scvi.distributions import NegativeBinomial, ZeroInflatedNegativeBinomial
from ._vae import VAE
torch.backends.cudnn.benchmark = True
class AutoZIVAE(VAE):
"""
Implementation of the AutoZI model [Clivio19]_.
Parameters
----------
n_input
Number of input genes
alpha_prior
Float denoting the alpha parameter of the prior Beta distribution of
the zero-inflation Bernoulli parameter. Should be between 0 and 1, not included.
When set to ``None'', will be set to 1 - beta_prior if beta_prior is not ``None'',
otherwise the prior Beta distribution will be learned on an Empirical Bayes fashion.
beta_prior
Float denoting the beta parameter of the prior Beta distribution of
the zero-inflation Bernoulli parameter. Should be between 0 and 1, not included.
When set to ``None'', will be set to 1 - alpha_prior if alpha_prior is not ``None'',
otherwise the prior Beta distribution will be learned on an Empirical Bayes fashion.
minimal_dropout
Float denoting the lower bound of the cell-gene ZI rate in the ZINB component.
Must be non-negative. Can be set to 0 but not recommended as this may make
the mixture problem ill-defined.
zero_inflation: One of the following
* ``'gene'`` - zero-inflation Bernoulli parameter of AutoZI is constant per gene across cells
* ``'gene-batch'`` - zero-inflation Bernoulli parameter can differ between different batches
* ``'gene-label'`` - zero-inflation Bernoulli parameter can differ between different labels
* ``'gene-cell'`` - zero-inflation Bernoulli parameter can differ for every gene in every cell
See VAE docstring (scvi/models/vae.py) for more parameters. ``reconstruction_loss`` should not be specified.
Examples
--------
>>> gene_dataset = CortexDataset()
>>> autozivae = AutoZIVAE(gene_dataset.nb_genes, alpha_prior=0.5, beta_prior=0.5, minimal_dropout=0.01)
"""
def __init__(
self,
n_input: int,
alpha_prior: Optional[float] = 0.5,
beta_prior: Optional[float] = 0.5,
minimal_dropout: float = 0.01,
zero_inflation: str = "gene",
**args,
) -> None:
if "reconstruction_loss" in args:
raise ValueError(
"No reconstruction loss must be specified for AutoZI : it is 'autozinb'."
)
super().__init__(n_input, **args)
self.zero_inflation = zero_inflation
self.reconstruction_loss = "autozinb"
self.minimal_dropout = minimal_dropout
# Parameters of prior Bernoulli Beta distribution : alpha + beta = 1 if only one is specified
if beta_prior is None and alpha_prior is not None:
beta_prior = 1.0 - alpha_prior
if alpha_prior is None and beta_prior is not None:
alpha_prior = 1.0 - beta_prior
# Create parameters for Bernoulli Beta prior and posterior distributions
# Each parameter, whose values are in (0,1), is encoded as its logit, in the set of real numbers
if self.zero_inflation == "gene":
self.alpha_posterior_logit = torch.nn.Parameter(torch.randn(n_input))
self.beta_posterior_logit = torch.nn.Parameter(torch.randn(n_input))
if alpha_prior is None:
self.alpha_prior_logit = torch.nn.Parameter(torch.randn(1))
else:
self.register_buffer(
"alpha_prior_logit", torch.tensor([logit(alpha_prior)])
)
if beta_prior is None:
self.beta_prior_logit = torch.nn.Parameter(torch.randn(1))
else:
self.register_buffer(
"beta_prior_logit", torch.tensor([logit(alpha_prior)])
)
elif self.zero_inflation == "gene-batch":
self.alpha_posterior_logit = torch.nn.Parameter(
torch.randn(n_input, self.n_batch)
)
self.beta_posterior_logit = torch.nn.Parameter(
torch.randn(n_input, self.n_batch)
)
if alpha_prior is None:
self.alpha_prior_logit = torch.nn.parameter(
torch.randn(1, self.n_batch)
)
else:
self.register_buffer(
"alpha_prior_logit", torch.tensor([logit(alpha_prior)])
)
if beta_prior is None:
self.beta_prior_logit = torch.nn.parameter(torch.randn(1, self.n_batch))
else:
self.register_buffer(
"beta_prior_logit", torch.tensor([logit(beta_prior)])
)
elif self.zero_inflation == "gene-label":
self.alpha_posterior_logit = torch.nn.Parameter(
torch.randn(n_input, self.n_labels)
)
self.beta_posterior_logit = torch.nn.Parameter(
torch.randn(n_input, self.n_labels)
)
if alpha_prior is None:
self.alpha_prior_logit = torch.nn.parameter(
torch.randn(1, self.n_labels)
)
else:
self.register_buffer(
"alpha_prior_logit", torch.tensor([logit(alpha_prior)])
)
if beta_prior is None:
self.beta_prior_logit = torch.nn.parameter(
torch.randn(1, self.n_labels)
)
else:
self.register_buffer(
"beta_prior_logit", torch.tensor([logit(beta_prior)])
)
else: # gene-cell
raise Exception("Gene-cell not implemented yet for AutoZI")
def get_alphas_betas(
self, as_numpy: bool = True
) -> Dict[str, Union[torch.Tensor, np.ndarray]]:
# Return parameters of Bernoulli Beta distributions in a dictionary
outputs = {}
outputs["alpha_posterior"] = torch.sigmoid(self.alpha_posterior_logit)
outputs["beta_posterior"] = torch.sigmoid(self.beta_posterior_logit)
outputs["alpha_prior"] = torch.sigmoid(self.alpha_prior_logit)
outputs["beta_prior"] = torch.sigmoid(self.beta_prior_logit)
if as_numpy:
for key, value in outputs.items():
outputs[key] = (
value.detach().cpu().numpy()
if value.requires_grad
else value.cpu().numpy()
)
return outputs
def sample_from_beta_distribution(
self,
alpha: torch.Tensor,
beta: torch.Tensor,
eps_gamma: float = 1e-30,
eps_sample: float = 1e-7,
) -> torch.Tensor:
# Sample from a Beta distribution using the reparameterization trick.
# Problem : it is not implemented in CUDA yet
# Workaround : sample X and Y from Gamma(alpha,1) and Gamma(beta,1), the Beta sample is X/(X+Y)
# Warning : use logs and perform logsumexp to avoid numerical issues
# Sample from Gamma
sample_x_log = torch.log(Gamma(alpha, 1).rsample() + eps_gamma)
sample_y_log = torch.log(Gamma(beta, 1).rsample() + eps_gamma)
# Sum using logsumexp (note : eps_gamma is used to prevent numerical issues with perfect
# 0 and 1 final Beta samples
sample_xy_log_max = torch.max(sample_x_log, sample_y_log)
sample_xplusy_log = sample_xy_log_max + torch.log(
torch.exp(sample_x_log - sample_xy_log_max)
+ torch.exp(sample_y_log - sample_xy_log_max)
)
sample_log = sample_x_log - sample_xplusy_log
sample = eps_sample + (1 - 2 * eps_sample) * torch.exp(sample_log)
return sample
def reshape_bernoulli(
self,
bernoulli_params: torch.Tensor,
batch_index: Optional[torch.Tensor] = None,
y: Optional[torch.Tensor] = None,
) -> torch.Tensor:
if self.zero_inflation == "gene-label":
one_hot_label = one_hot(y, self.n_labels)
# If we sampled several random Bernoulli parameters
if len(bernoulli_params.shape) == 2:
bernoulli_params = F.linear(one_hot_label, bernoulli_params)
else:
bernoulli_params_res = []
for sample in range(bernoulli_params.shape[0]):
bernoulli_params_res.append(
F.linear(one_hot_label, bernoulli_params[sample])
)
bernoulli_params = torch.stack(bernoulli_params_res)
elif self.zero_inflation == "gene-batch":
one_hot_batch = one_hot(batch_index, self.n_batch)
if len(bernoulli_params.shape) == 2:
bernoulli_params = F.linear(one_hot_batch, bernoulli_params)
# If we sampled several random Bernoulli parameters
else:
bernoulli_params_res = []
for sample in range(bernoulli_params.shape[0]):
bernoulli_params_res.append(
F.linear(one_hot_batch, bernoulli_params[sample])
)
bernoulli_params = torch.stack(bernoulli_params_res)
return bernoulli_params
def sample_bernoulli_params(
self,
batch_index: Optional[torch.Tensor] = None,
y: Optional[torch.Tensor] = None,
n_samples: int = 1,
) -> torch.Tensor:
outputs = self.get_alphas_betas(as_numpy=False)
alpha_posterior = outputs["alpha_posterior"]
beta_posterior = outputs["beta_posterior"]
if n_samples > 1:
alpha_posterior = (
alpha_posterior.unsqueeze(0).expand(
(n_samples, alpha_posterior.size(0))
)
if self.zero_inflation == "gene"
else alpha_posterior.unsqueeze(0).expand(
(n_samples, alpha_posterior.size(0), alpha_posterior.size(1))
)
)
beta_posterior = (
beta_posterior.unsqueeze(0).expand((n_samples, beta_posterior.size(0)))
if self.zero_inflation == "gene"
else beta_posterior.unsqueeze(0).expand(
(n_samples, beta_posterior.size(0), beta_posterior.size(1))
)
)
bernoulli_params = self.sample_from_beta_distribution(
alpha_posterior, beta_posterior
)
bernoulli_params = self.reshape_bernoulli(bernoulli_params, batch_index, y)
return bernoulli_params
def rescale_dropout(
self, px_dropout: torch.Tensor, eps_log: float = 1e-8
) -> torch.Tensor:
if self.minimal_dropout > 0.0:
dropout_prob_rescaled = self.minimal_dropout + (
1.0 - self.minimal_dropout
) * torch.sigmoid(px_dropout)
px_dropout_rescaled = torch.log(
dropout_prob_rescaled / (1.0 - dropout_prob_rescaled + eps_log)
)
else:
px_dropout_rescaled = px_dropout
return px_dropout_rescaled
def generative(
self,
z,
library,
batch_index: Optional[torch.Tensor] = None,
y: Optional[torch.Tensor] = None,
cont_covs=None,
cat_covs=None,
n_samples: int = 1,
eps_log: float = 1e-8,
) -> Dict[str, torch.Tensor]:
outputs = super().generative(
z=z,
library=library,
batch_index=batch_index,
cont_covs=cont_covs,
cat_covs=cat_covs,
y=y,
)
# Rescale dropout
outputs["px_dropout"] = self.rescale_dropout(
outputs["px_dropout"], eps_log=eps_log
)
# Bernoulli parameters
outputs["bernoulli_params"] = self.sample_bernoulli_params(
batch_index, y, n_samples=n_samples
)
return outputs
def compute_global_kl_divergence(self) -> torch.Tensor:
outputs = self.get_alphas_betas(as_numpy=False)
alpha_posterior = outputs["alpha_posterior"]
beta_posterior = outputs["beta_posterior"]
alpha_prior = outputs["alpha_prior"]
beta_prior = outputs["beta_prior"]
return kl(
Beta(alpha_posterior, beta_posterior), Beta(alpha_prior, beta_prior)
).sum()
def get_reconstruction_loss(
self,
x: torch.Tensor,
px_rate: torch.Tensor,
px_r: torch.Tensor,
px_dropout: torch.Tensor,
bernoulli_params: torch.Tensor,
eps_log: float = 1e-8,
**kwargs,
) -> torch.Tensor:
# LLs for NB and ZINB
ll_zinb = torch.log(
1.0 - bernoulli_params + eps_log
) + ZeroInflatedNegativeBinomial(
mu=px_rate, theta=px_r, zi_logits=px_dropout
).log_prob(
x
)
ll_nb = torch.log(bernoulli_params + eps_log) + NegativeBinomial(
mu=px_rate, theta=px_r
).log_prob(x)
# Reconstruction loss using a logsumexp-type computation
ll_max = torch.max(ll_zinb, ll_nb)
ll_tot = ll_max + torch.log(
torch.exp(ll_nb - ll_max) + torch.exp(ll_zinb - ll_max)
)
reconst_loss = -ll_tot.sum(dim=-1)
return reconst_loss
@auto_move_data
def loss(
self,
tensors,
inference_outputs,
generative_outputs,
kl_weight: int = 1.0,
n_obs: int = 1.0,
) -> Tuple[torch.Tensor, torch.Tensor, torch.Tensor]:
# Parameters for z latent distribution
qz_m = inference_outputs["qz_m"]
qz_v = inference_outputs["qz_v"]
ql_m = inference_outputs["ql_m"]
ql_v = inference_outputs["ql_v"]
px_rate = generative_outputs["px_rate"]
px_r = generative_outputs["px_r"]
px_dropout = generative_outputs["px_dropout"]
bernoulli_params = generative_outputs["bernoulli_params"]
x = tensors[_CONSTANTS.X_KEY]
local_l_mean = tensors[_CONSTANTS.LOCAL_L_MEAN_KEY]
local_l_var = tensors[_CONSTANTS.LOCAL_L_VAR_KEY]
# KL divergences wrt z_n,l_n
mean = torch.zeros_like(qz_m)
scale = torch.ones_like(qz_v)
kl_divergence_z = kl(Normal(qz_m, torch.sqrt(qz_v)), Normal(mean, scale)).sum(
dim=1
)
kl_divergence_l = kl(
Normal(ql_m, torch.sqrt(ql_v)),
Normal(local_l_mean, torch.sqrt(local_l_var)),
).sum(dim=1)
# KL divergence wrt Bernoulli parameters
kl_divergence_bernoulli = self.compute_global_kl_divergence()
# Reconstruction loss
reconst_loss = self.get_reconstruction_loss(
x, px_rate, px_r, px_dropout, bernoulli_params
)
kl_global = kl_divergence_bernoulli
kl_local_for_warmup = kl_divergence_z
kl_local_no_warmup = kl_divergence_l
weighted_kl_local = kl_weight * kl_local_for_warmup + kl_local_no_warmup
loss = n_obs * torch.mean(reconst_loss + weighted_kl_local) + kl_global
kl_local = dict(
kl_divergence_l=kl_divergence_l, kl_divergence_z=kl_divergence_z
)
return LossRecorder(loss, reconst_loss, kl_local, kl_global)
| 38.402948
| 112
| 0.606142
|
4a185b46d451fce03acd6c7fc37ff25befdf90bf
| 2,760
|
py
|
Python
|
anyway/parsers/rsa.py
|
atalyaalon/anyway
|
0ddcd1d587de3bb65c528affcef5b6bd1dcaca71
|
[
"MIT"
] | null | null | null |
anyway/parsers/rsa.py
|
atalyaalon/anyway
|
0ddcd1d587de3bb65c528affcef5b6bd1dcaca71
|
[
"MIT"
] | 78
|
2017-06-20T09:25:11.000Z
|
2021-08-01T05:48:08.000Z
|
anyway/parsers/rsa.py
|
atalyaalon/anyway
|
0ddcd1d587de3bb65c528affcef5b6bd1dcaca71
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
from dateutil import parser
from openpyxl import load_workbook
from anyway.parsers.utils import batch_iterator
from anyway.backend_constants import BE_CONST
from anyway.models import AccidentMarker
from anyway.app_and_db import db
def _iter_rows(filename):
workbook = load_workbook(filename, read_only=True)
sheet = workbook["Worksheet1"]
rows = sheet.rows
first_row = next(rows)
headers = [
"מזהה",
"תאריך דיווח",
"סטטוס",
"סוג עבירה",
"סוג רכב",
"סוג לוחית רישוי",
"נ״צ ערוך",
]
assert [cell.value for cell in first_row] == headers
for row in rows:
id_ = int(row[0].value)
provider_and_id_ = int(str(BE_CONST.RSA_PROVIDER_CODE) + str(id_))
violation = row[3].value
vehicle_type = row[4].value
coordinates = row[6].value
if coordinates == ',' or coordinates == '0.0,0.0':
continue
rsa_license_plate = row[5].value
rsa_severity = None
video_link = None
timestamp = parser.parse(row[1].value, dayfirst=True)
if not violation:
continue
vehicle_type = vehicle_type or ""
coordinates = coordinates.split(",")
latitude, longitude = float(coordinates[0]), float(coordinates[1])
description = {
"VIOLATION_TYPE": violation,
"VEHICLE_TYPE": vehicle_type,
"RSA_LICENSE_PLATE": rsa_license_plate,
}
yield {
"id": id_,
"provider_and_id": provider_and_id_,
"latitude": latitude,
"longitude": longitude,
"created": timestamp,
"provider_code": BE_CONST.RSA_PROVIDER_CODE,
"accident_severity": 0,
"title": "שומרי הדרך",
"description": json.dumps(description),
"location_accuracy": 1,
"type": BE_CONST.MARKER_TYPE_ACCIDENT,
"video_link": video_link,
"vehicle_type_rsa": vehicle_type,
"violation_type_rsa": violation,
"rsa_license_plate": rsa_license_plate,
"accident_year": timestamp.year,
}
def parse(filename):
db.session.execute(f"DELETE from markers where provider_code = {BE_CONST.RSA_PROVIDER_CODE}")
for batch in batch_iterator(_iter_rows(filename), batch_size=50000):
db.session.bulk_insert_mappings(AccidentMarker, batch)
db.session.commit()
"""
Fills empty geometry object according to coordinates in database
"""
db.session.execute(
"UPDATE markers SET geom = ST_SetSRID(ST_MakePoint(longitude,latitude),4326)\
WHERE geom IS NULL;"
)
db.session.commit()
| 32.470588
| 97
| 0.610145
|
4a185b4c5d5a7b18d9c19877382a08135fcc263c
| 1,054
|
py
|
Python
|
code/read_average_degree.py
|
CHUNHUNGFAN/Course_Social_Network_Novel
|
fe85ffe6ba7213e4b5a7878554184f1728899694
|
[
"MIT"
] | null | null | null |
code/read_average_degree.py
|
CHUNHUNGFAN/Course_Social_Network_Novel
|
fe85ffe6ba7213e4b5a7878554184f1728899694
|
[
"MIT"
] | 5
|
2021-03-10T12:17:57.000Z
|
2022-02-27T01:52:58.000Z
|
code/read_average_degree.py
|
CHUNHUNGFAN/Course_Social_Network_Novel
|
fe85ffe6ba7213e4b5a7878554184f1728899694
|
[
"MIT"
] | null | null | null |
#%%
file = open("../dataset/novel.txt")
# %%
characters = []
file2 = open("../dataset/character.txt")
for line in file2:
if line != '\n':
line = line.strip('\n')
characters.append(line)
# %%
relationship = [[0 for column in range(len(characters))] for row in range(len(characters))]
lines = file.readlines()
for line in lines:
if line != '\n':
links = []
for index in range(len(characters)):
if characters[index] in line:
links.append(index)
for row in range(len(links)):
for column in range(row + 1, len(links)):
relationship[links[row]][links[column]] += 1
# %%
charactersDegree=[0]*len(characters)
for row in range(len(characters)):
for column in range(row + 1, len(characters)):
if relationship[row][column] != 0:
charactersDegree[row] += 1
charactersDegree[column] += 1
# print(charactersDegree)
# %%
allDegree = sum(charactersDegree)
avgDegree = allDegree / len(charactersDegree)
print(avgDegree)
# %%
| 25.707317
| 91
| 0.59962
|
4a185bb5981d0d47344af832303102ad74a0587d
| 7,391
|
py
|
Python
|
pusher/pusher.py
|
makingspace/pusher-http-python
|
569d8fa1ef9baca431cbb86467aabb39d4077fd8
|
[
"MIT"
] | null | null | null |
pusher/pusher.py
|
makingspace/pusher-http-python
|
569d8fa1ef9baca431cbb86467aabb39d4077fd8
|
[
"MIT"
] | 1
|
2017-01-25T02:17:17.000Z
|
2017-01-25T02:17:17.000Z
|
pusher/pusher.py
|
makingspace/pusher-http-python
|
569d8fa1ef9baca431cbb86467aabb39d4077fd8
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import (
print_function,
unicode_literals,
absolute_import,
division)
import os
from pusher.util import (
ensure_text,
pusher_url_re,
doc_string)
from pusher.pusher_client import PusherClient
from pusher.notification_client import NotificationClient, ClientNotificationClient
from pusher.authentication_client import AuthenticationClient
class Pusher(object):
"""Client for the Pusher HTTP API.
This client supports various backend adapters to support various http
libraries available in the python ecosystem.
:param app_id: a pusher application identifier
:param key: a pusher application key
:param secret: a pusher application secret token
:param ssl: Whenever to use SSL or plain HTTP
:param host: Used for custom host destination
:param port: Used for custom port destination
:param timeout: Request timeout (in seconds)
:param cluster: Convention for other clusters than the main Pusher-one.
Eg: 'eu' will resolve to the api-eu.pusherapp.com host
:param backend: an http adapter class (AsyncIOBackend, RequestsBackend,
SynchronousBackend, TornadoBackend)
:param backend_options: additional backend
"""
def __init__(
self, app_id, key, secret, ssl=True, host=None, port=None,
timeout=5, cluster=None, json_encoder=None, json_decoder=None,
backend=None, notification_host=None, notification_ssl=True,
**backend_options):
self._pusher_client = PusherClient(
app_id, key, secret, ssl, host, port, timeout, cluster,
json_encoder, json_decoder, backend, **backend_options)
self._authentication_client = AuthenticationClient(
app_id, key, secret, ssl, host, port, timeout, cluster,
json_encoder, json_decoder, backend, **backend_options)
self._notification_client = NotificationClient(
app_id, key, secret, notification_ssl, notification_host, port,
timeout, cluster, json_encoder, json_decoder, backend,
**backend_options)
self._notification_client = NotificationClient(
app_id, key, secret, notification_ssl, notification_host, port,
timeout, cluster, json_encoder, json_decoder, backend,
**backend_options)
@classmethod
def from_url(cls, url, **options):
"""Alternative constructor that extracts the information from a URL.
:param url: String containing a URL
Usage::
>> from pusher import Pusher
>> p =
Pusher.from_url("http://mykey:mysecret@api.pusher.com/apps/432")
"""
m = pusher_url_re.match(ensure_text(url, "url"))
if not m:
raise Exception("Unparsable url: %s" % url)
ssl = m.group(1) == 'https'
options_ = {
'key': m.group(2),
'secret': m.group(3),
'host': m.group(4),
'app_id': m.group(5),
'ssl': ssl}
options_.update(options)
return cls(**options_)
@classmethod
def from_env(cls, env='PUSHER_URL', **options):
"""Alternative constructor that extracts the information from an URL
stored in an environment variable. The pusher heroku addon will set
the PUSHER_URL automatically when installed for example.
:param env: Name of the environment variable
Usage::
>> from pusher import Pusher
>> c = Pusher.from_env("PUSHER_URL")
"""
val = os.environ.get(env)
if not val:
raise Exception("Environment variable %s not found" % env)
return cls.from_url(val, **options)
@doc_string(PusherClient.trigger.__doc__)
def trigger(self, channels, event_name, data, socket_id=None):
return self._pusher_client.trigger(
channels, event_name, data, socket_id)
@doc_string(PusherClient.trigger_batch.__doc__)
def trigger_batch(self, batch=[], already_encoded=False):
return self._pusher_client.trigger_batch(batch, already_encoded)
@doc_string(PusherClient.channels_info.__doc__)
def channels_info(self, prefix_filter=None, attributes=[]):
return self._pusher_client.channels_info(prefix_filter, attributes)
@doc_string(PusherClient.channel_info.__doc__)
def channel_info(self, channel, attributes=[]):
return self._pusher_client.channel_info(channel, attributes)
@doc_string(PusherClient.users_info.__doc__)
def users_info(self, channel):
return self._pusher_client.users_info(channel)
@doc_string(AuthenticationClient.authenticate.__doc__)
def authenticate(self, channel, socket_id, custom_data=None):
return self._authentication_client.authenticate(
channel, socket_id, custom_data)
@doc_string(AuthenticationClient.validate_webhook.__doc__)
def validate_webhook(self, key, signature, body):
return self._authentication_client.validate_webhook(
key, signature, body)
@doc_string(NotificationClient.notify.__doc__)
def notify(self, interest, notification):
return self._notification_client.notify(interest, notification)
class ClientPusher(object):
"""Client for the Pusher HTTP API.
This client supports various backend adapters to support various http
libraries available in the python ecosystem.
:param app_id: a pusher application identifier
:param key: a pusher application key
:param secret: a pusher application secret token
:param ssl: Whenever to use SSL or plain HTTP
:param host: Used for custom host destination
:param port: Used for custom port destination
:param timeout: Request timeout (in seconds)
:param cluster: Convention for other clusters than the main Pusher-one.
Eg: 'eu' will resolve to the api-eu.pusherapp.com host
:param backend: an http adapter class (AsyncIOBackend, RequestsBackend,
SynchronousBackend, TornadoBackend)
:param backend_options: additional backend
"""
def __init__(
self, app_id, key, secret, ssl=True, host=None, port=None,
timeout=5, cluster=None, json_encoder=None, json_decoder=None,
backend=None, notification_host=None, notification_ssl=True,
client_id=None, **backend_options):
self._notification_client = ClientNotificationClient(
app_id, key, secret, notification_ssl, notification_host, port,
timeout, cluster, json_encoder, json_decoder, backend,
client_id=client_id, **backend_options)
@doc_string(ClientNotificationClient.register.__doc__)
def notifications_register(self, device_token):
return self._notification_client.register(device_token)
@doc_string(ClientNotificationClient.update_token.__doc__)
def notifications_update_token(self, device_token):
return self._notification_client.update_token(device_token)
@doc_string(ClientNotificationClient.subscribe.__doc__)
def notifications_subscribe(self, interest):
return self._notification_client.subscribe(interest)
@doc_string(ClientNotificationClient.unsubscribe.__doc__)
def notifications_unsubscribe(self, interest):
return self._notification_client.unsubscribe(interest)
| 38.295337
| 83
| 0.693817
|
4a185d788bbc841a788866b9eb2407963c7c344f
| 3,877
|
py
|
Python
|
behave2cucumber/__main__.py
|
ones0318/behave2cucumber
|
8fc67dc431e38731034d27f0091047c5030b9115
|
[
"MIT"
] | 20
|
2016-11-23T10:14:12.000Z
|
2022-01-17T04:23:55.000Z
|
behave2cucumber/__main__.py
|
ones0318/behave2cucumber
|
8fc67dc431e38731034d27f0091047c5030b9115
|
[
"MIT"
] | 12
|
2016-12-02T13:02:53.000Z
|
2021-03-31T15:14:30.000Z
|
behave2cucumber/__main__.py
|
ones0318/behave2cucumber
|
8fc67dc431e38731034d27f0091047c5030b9115
|
[
"MIT"
] | 25
|
2016-12-08T19:39:30.000Z
|
2021-08-13T09:38:10.000Z
|
import sys
import json
import getopt
import logging
from pprint import pprint
from .__init__ import convert
# Global Parameters
_name = "behave2cucumber"
_debug = logging.WARNING
# Logging
logFormatter = logging.Formatter("%(asctime)s [%(threadName)-12.12s] [%(levelname)-10.10s] %(message)s")
shortFormatter = logging.Formatter("[%(levelname)-8.8s] %(message)s")
log = logging.getLogger()
log.setLevel(_debug)
fileHandler = logging.FileHandler("{0}/{1}.log".format("./", _name))
fileHandler.setFormatter(logFormatter)
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(shortFormatter)
log.addHandler(fileHandler)
log.addHandler(consoleHandler)
options = {
"short": "hd:i:o:rfD",
"long": [
"help", "debug=", "infile=", "outfile=", "remove-background",
"format-duration","deduplicate"
],
"descriptions": [
"Print help message",
"Set debug level",
"Specify the input JSON",
"Specify the output JSON, otherwise use stdout",
"Remove background steps from output",
"Format the duration",
"Remove duplicate scenarios caused by @autoretry"
]
}
def usage():
"""Print out a usage message"""
global options
l = len(options['long'])
options['shortlist'] = [s for s in options['short'] if s is not ":"]
print("python -m behave2cucumber [-h] [-d level|--debug=level]")
for i in range(l):
print(" -{0}|--{1:20} {2}".format(options['shortlist'][i], options['long'][i], options['descriptions'][i]))
def main(argv):
"""Main"""
global options
opts = None
try:
opts, args = getopt.getopt(argv, options['short'], options['long'])
except getopt.GetoptError:
usage()
exit(2)
for opt, arg in opts:
if opt in ("-h", "--help"):
usage()
exit()
elif opt in ("-d", "--debug"):
try:
arg = int(arg)
log.debug("Debug level received: " + str(arg))
except ValueError:
log.warning("Invalid log level: " + arg)
continue
if 0 <= arg <= 5:
log.setLevel(60 - (arg*10))
log.critical("Log level changed to: " + str(logging.getLevelName(60 - (arg*10))))
else:
log.warning("Invalid log level: " + str(arg))
infile = None
outfile = None
remove_background = False
duration_format = False
deduplicate = False
for opt, arg in opts:
if opt in ("-i", "--infile"):
log.info("Input File: " + arg)
infile = arg
if opt in ("-o", "--outfile"):
log.info("Output File: " + arg)
outfile = arg
if opt in ("-r", "--remove-background"):
log.info("Remove Background: Enabled")
remove_background = True
if opt in ("-f", "--format-duration"):
log.info("Format Duration: Enabled")
duration_format = True
if opt in ("-D", "--deduplicate"):
log.info("Deduplicate: Enabled")
deduplicate = True
if infile is None:
log.critical("No input JSON provided.")
usage()
exit(3)
with open(infile) as f:
cucumber_output = convert(json.load(f),
remove_background=remove_background,
duration_format=duration_format,
deduplicate=deduplicate)
if outfile is not None:
with open(outfile, 'w') as f:
json.dump(cucumber_output, f, indent=4, separators=(',', ': '))
else:
pprint(cucumber_output)
if __name__ == "__main__":
try:
main(sys.argv[1:])
except KeyboardInterrupt:
sys.exit(0)
except EOFError:
sys.exit(0)
# except:
# sys.exit(0)
| 28.932836
| 118
| 0.559711
|
4a185f471e151b447e4e04d08bef81b739380f18
| 3,054
|
py
|
Python
|
safari_crash.py
|
TheSecondSun/Safari-Crash
|
a01f01c2de132fa4776f2f410fd3293ccbc91a4f
|
[
"MIT"
] | 24
|
2018-09-17T21:55:44.000Z
|
2020-03-26T05:30:04.000Z
|
safari_crash.py
|
wintrmvte/Safari-Crash
|
a01f01c2de132fa4776f2f410fd3293ccbc91a4f
|
[
"MIT"
] | null | null | null |
safari_crash.py
|
wintrmvte/Safari-Crash
|
a01f01c2de132fa4776f2f410fd3293ccbc91a4f
|
[
"MIT"
] | 5
|
2018-09-18T14:07:39.000Z
|
2020-03-26T05:30:05.000Z
|
#!/usr/bin/python2.7
import argparse
from terminaltables import SingleTable
import os
import lib.logs as logs
import importlib
from bottle import route, run
PATH = __file__.replace('./safari_crash.py', '')
sun = "{}{}o{}{}{}O{}".format(logs.color().BLINK, logs.color().RED, logs.color().END, logs.color().BLINK, logs.color().YELLOW, logs.color().END)
print "\n{} Safari Crash Exploit Kit".format(logs.red("(*)"))
print "{} Created by: TheSecondSun {} (thescndsun@gmail.com)".format(logs.red("(*)"), sun)
print "\n"
def list_exploits():
header = logs.green(logs.bold('***'))
print '\n'
print header + 'AVAILABLE EXPLOITS' + header
exploits = os.listdir(PATH+'exploits')
table_data = [['--NAME--', '--DESCRIPTION--']]
for e in exploits:
if ('init' not in e and '.pyc' not in e):
name = e.replace('.py', '')
imported_exploit = importlib.import_module('exploits.'+name)
description = imported_exploit.description
table_data.append([name, imported_exploit.description])
table_instance = SingleTable(table_data)
table_instance.inner_heading_row_border = True
table_instance.inner_row_border = True
table_instance.justify_columns = {0: 'left', 1: 'left', 2: 'left'}
print table_instance.table
def arguments():
parser = argparse.ArgumentParser(prog='safari_crash')
parser.add_argument('EXPLOIT_NAME', nargs='?',help='Name of the exploit to use (use "-l" flag to list exploits)')
parser.add_argument('-l', '--list', action='store_true',
dest='LIST',
help='List available exploits')
parser.add_argument('-lp', '--port', action='store',
default=8080,
dest='PORT',
help='Port to serve the exploit on (default: 8080)')
parser.add_argument('-lh', '--hostname', action='store',
default='0.0.0.0',
dest='HOSTNAME',
help='Local hostname (default: 0.0.0.0)')
parser.add_argument('-u', '--url', action='store',
default='/index',
dest='URL',
help='Sub-url to serve exploit on (default: /index)')
parser.add_argument('-d', '--debug', action='store_true',
dest='DEBUG',
help='Start Bottle server in a debug mode')
res = parser.parse_args()
if res.LIST:
list_exploits()
elif not res.EXPLOIT_NAME:
parser.error('"EXPLOIT_NAME" option is required')
return res
def main():
res = arguments()
try:
exploit_code = importlib.import_module('exploits.'+res.EXPLOIT_NAME).code
@route('{}'.format(res.URL))
def exploit():
return exploit_code
logs.good('Started serving {} on {}:{}{} ...'.format(logs.bold(logs.purple(res.EXPLOIT_NAME)),
res.HOSTNAME, res.PORT, res.URL))
print '\n'
run(host=res.HOSTNAME, port=res.PORT, debug=res.DEBUG)
except TypeError:
pass
except ImportError:
logs.err('No such exploit')
if __name__ == '__main__':
main()
| 37.703704
| 144
| 0.613294
|
4a1861bbb3e0d8acc02c6276de71fb4400ff0ecf
| 8,175
|
py
|
Python
|
lib/cell_spacegroup.py
|
jorgediazjr/fast_dp
|
972fe7f09fb28b07053de595faa6857692320cbe
|
[
"Apache-2.0"
] | null | null | null |
lib/cell_spacegroup.py
|
jorgediazjr/fast_dp
|
972fe7f09fb28b07053de595faa6857692320cbe
|
[
"Apache-2.0"
] | null | null | null |
lib/cell_spacegroup.py
|
jorgediazjr/fast_dp
|
972fe7f09fb28b07053de595faa6857692320cbe
|
[
"Apache-2.0"
] | null | null | null |
import os
from cctbx import xray
from cctbx.sgtbx import space_group
from cctbx.sgtbx import space_group_symbols
from cctbx.uctbx import unit_cell
from cctbx.crystal import symmetry
def ersatz_pointgroup(spacegroup_name):
'''Guess the pointgroup for the spacegroup by mapping from short to
long name, then taking 1st character from each block.'''
pg = None
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if spacegroup_name == record.split()[3]:
pg = record.split()[4][2:]
elif spacegroup_name == record.split('\'')[1].replace(' ', ''):
pg = record.split()[4][2:]
if not pg:
raise RuntimeError('spacegroup {} unknown'.format(spacegroup_name))
# FIXME this is probably not correct for small molecule work...
# just be aware of this, in no danger right now of handling non-chiral
# spacegroups
if '/' in pg:
pg = pg.split('/')[0]
result = spacegroup_name[0] + pg
if 'H3' in result:
result = result.replace('H3', 'R3')
return result
def spacegroup_to_lattice(input_spacegroup):
''' This generates a lattics from a the imported file bu chopping off
the first letter of the cell type, changing to lowercase and then
prepending it to the first letter of the spacegroup.'''
def fix_hH(lattice):
if lattice != 'hH':
return lattice
return 'hR'
mapping = {'TRICLINIC': 'a',
'MONOCLINIC': 'm',
'ORTHORHOMBIC': 'o',
'TETRAGONAL': 't',
'TRIGONAL': 'h',
'HEXAGONAL': 'h',
'CUBIC': 'c'}
if type(input_spacegroup) == type(u''):
input_spacegroup = str(input_spacegroup)
if type(input_spacegroup) == type(''):
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if input_spacegroup == record.split()[3]:
return fix_hH(mapping[record.split()[5]] + record.split()[3][0])
elif input_spacegroup == record.split('\'')[1].replace(' ', ''):
return fix_hH(mapping[record.split()[5]] + record.split()[3][0])
elif type(input_spacegroup) == type(0):
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if input_spacegroup == int(record.split()[0]):
return fix_hH(mapping[record.split()[5]] + record.split()[3][0])
else:
raise RuntimeError('bad type for input: {}'.format(type(input_spacegroup)))
return None
def check_spacegroup_name(spacegroup_name):
'''Will return normalised name if spacegroup name is recognised,
raise exception otherwise. For checking command-line options.'''
try:
j = int(spacegroup_name)
if j > 230 or j <= 0:
raise RuntimeError('spacegroup number nonsense: {}'.format(
spacegroup_name))
return spacegroup_number_to_name(j)
except ValueError as e:
pass
found_spacegroup = None
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
if spacegroup_name == record.split()[3]:
return spacegroup_name
raise RuntimeError('spacegroup name "{}" not recognised'.format(spacegroup_name))
def check_split_cell(cell_string):
'''Will return tuple of floats a, b, c, alpha, beta, gamma from input
cell string which contains a,b,c,alpha,beta,gamma raising an exception
if there is a problem.'''
ideal_string = 'a,b,c,alpha,beta,gamma'
if not cell_string.count(',') == 5:
raise RuntimeError('{} should be of the form {}'.format(
cell_string, ideal_string))
a, b, c, alpha, beta, gamma = tuple(
map(float, cell_string.split(',')))
return a, b, c, alpha, beta, gamma
def constrain_cell(lattice_class, cell):
'''Constrain cell to fit lattice class x.'''
a, b, c, alpha, beta, gamma = cell
if lattice_class == 'a':
return (a, b, c, alpha, beta, gamma)
elif lattice_class == 'm':
return (a, b, c, 90.0, beta, 90.0)
elif lattice_class == 'o':
return (a, b, c, 90.0, 90.0, 90.0)
elif lattice_class == 't':
e = (a + b) / 2.0
return (e, e, c, 90.0, 90.0, 90.0)
elif lattice_class == 'h':
e = (a + b) / 2.0
return (e, e, c, 90.0, 90.0, 120.0)
elif lattice_class == 'c':
e = (a + b + c) / 3.0
return (e, e, e, 90.0, 90.0, 90.0)
raise RuntimeError('lattice class not recognised: {}'.format(lattice_class))
def spacegroup_number_to_name(spg_num):
'''Convert a spacegroup number to a more readable name.'''
database = {}
for record in open(
os.path.join(os.environ['CLIBD'], 'symop.lib'), 'r').readlines():
if ' ' in record[:1]:
continue
number = int(record.split()[0])
name = record.split('\'')[1].strip()
database[number] = name
return database[spg_num]
def lattice_to_spacegroup(lattice):
''' Converts a lattice to the spacegroup with the lowest symmetry
possible for that lattice'''
l2s = {
'aP': 1, 'mP': 3, 'mC': 5, 'mI': 5,
'oP': 16, 'oC': 21, 'oI': 23, 'oF': 22,
'tP': 75, 'tI': 79, 'hP': 143, 'hR': 146,
'hH': 146, 'cP': 195, 'cF': 196, 'cI': 197
}
return l2s[lattice]
def lauegroup_to_lattice(lauegroup):
'''Convert a Laue group representation (from pointless, e.g. I m m m)
to something useful, like the implied crystal lattice (in this
case, oI.)'''
# this has been calculated from the results of Ralf GK's sginfo and a
# little fiddling...
#
# 19/feb/08 added mI record as pointless has started producing this -
# why??? this is not a "real" spacegroup... may be able to switch this
# off...
# 'I2/m': 'mI',
lauegroup_to_lattice = {'Ammm': 'oA',
'C2/m': 'mC',
'I2/m': 'mI',
'Cmmm': 'oC',
'Fm-3': 'cF',
'Fm-3m': 'cF',
'Fmmm': 'oF',
'H-3': 'hR',
'H-3m': 'hR',
'R-3:H': 'hR',
'R-3m:H': 'hR',
'R-3': 'hR',
'R-3m': 'hR',
'I4/m': 'tI',
'I4/mmm': 'tI',
'Im-3': 'cI',
'Im-3m': 'cI',
'Immm': 'oI',
'P-1': 'aP',
'P-3': 'hP',
'P-3m': 'hP',
'P2/m': 'mP',
'P4/m': 'tP',
'P4/mmm': 'tP',
'P6/m': 'hP',
'P6/mmm': 'hP',
'Pm-3': 'cP',
'Pm-3m': 'cP',
'Pmmm': 'oP'}
updated_laue = ''
for l in lauegroup.split():
if not l == '1':
updated_laue += l
return lauegroup_to_lattice[updated_laue]
def generate_primitive_cell(unit_cell_constants, space_group_name):
'''For a given set of unit cell constants and space group, determine the
corresponding primitive unit cell...'''
uc = unit_cell(unit_cell_constants)
sg = space_group(space_group_symbols(space_group_name).hall())
cs = symmetry(unit_cell=uc,
space_group=sg)
csp = cs.change_basis(cs.change_of_basis_op_to_primitive_setting())
return csp.unit_cell()
if __name__ == '__main__':
import sys
for token in sys.argv[1:]:
print(ersatz_pointgroup(token))
| 32.312253
| 85
| 0.522446
|
4a186329b2fefa2d4f0ff6b8bc9dfc3bb4dd1e28
| 317
|
py
|
Python
|
src/627A.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | 2
|
2016-08-19T09:47:03.000Z
|
2016-10-01T10:15:03.000Z
|
src/627A.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | null | null | null |
src/627A.py
|
viing937/codeforces
|
d694eb6967cd56af02963c3a662066048cb78d07
|
[
"MIT"
] | 1
|
2015-07-01T23:57:32.000Z
|
2015-07-01T23:57:32.000Z
|
s, x = map(int, input().split(' '))
if (s-x)%2 or s < x:
print(0)
else:
c = bin((s-x)//2)[2:][::-1]
t = bin(x)[2:][::-1]
for i in range(len(t)):
if t[i] == '1' and i < len(c) and c[i] == '1':
print(0)
exit(0)
print(pow(2, bin(x)[2:].count('1'))-(2 if s==x else 0))
| 26.416667
| 59
| 0.403785
|
4a186349b134f37a91f9d24ea58b63ebe9ba211b
| 13,627
|
py
|
Python
|
src/sdk/pynni/nni/compression/torch/pruners.py
|
RayMeng8/nni
|
c3cd9fe7ffdb20d07f7562592774fe071b235de3
|
[
"MIT"
] | null | null | null |
src/sdk/pynni/nni/compression/torch/pruners.py
|
RayMeng8/nni
|
c3cd9fe7ffdb20d07f7562592774fe071b235de3
|
[
"MIT"
] | null | null | null |
src/sdk/pynni/nni/compression/torch/pruners.py
|
RayMeng8/nni
|
c3cd9fe7ffdb20d07f7562592774fe071b235de3
|
[
"MIT"
] | null | null | null |
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT license.
import copy
import logging
import torch
from .compressor import Pruner
__all__ = ['LevelPruner', 'AGP_Pruner', 'SlimPruner', 'LotteryTicketPruner']
logger = logging.getLogger('torch pruner')
class LevelPruner(Pruner):
"""
Prune to an exact pruning level specification
"""
def __init__(self, model, config_list, optimizer):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
List on pruning configs
"""
super().__init__(model, config_list, optimizer)
self.set_wrappers_attribute("if_calculated", False)
def calc_mask(self, wrapper, **kwargs):
"""
Calculate the mask of given layer
Parameters
----------
wrapper : Module
the module to instrument the compression operation
Returns
-------
dict
dictionary for storing masks
"""
config = wrapper.config
weight = wrapper.module.weight.data
if not wrapper.if_calculated:
w_abs = weight.abs()
k = int(weight.numel() * config['sparsity'])
if k == 0:
return torch.ones(weight.shape).type_as(weight)
threshold = torch.topk(w_abs.view(-1), k, largest=False)[0].max()
mask_weight = torch.gt(w_abs, threshold).type_as(weight)
mask = {'weight_mask': mask_weight}
wrapper.if_calculated = True
return mask
else:
return None
class AGP_Pruner(Pruner):
"""
An automated gradual pruning algorithm that prunes the smallest magnitude
weights to achieve a preset level of network sparsity.
Michael Zhu and Suyog Gupta, "To prune, or not to prune: exploring the
efficacy of pruning for model compression", 2017 NIPS Workshop on Machine
Learning of Phones and other Consumer Devices,
https://arxiv.org/pdf/1710.01878.pdf
"""
def __init__(self, model, config_list, optimizer):
"""
Parameters
----------
model : torch.nn.module
Model to be pruned
config_list : list
List on pruning configs
"""
super().__init__(model, config_list, optimizer)
self.now_epoch = 0
self.set_wrappers_attribute("if_calculated", False)
def calc_mask(self, wrapper, **kwargs):
"""
Calculate the mask of given layer.
Scale factors with the smallest absolute value in the BN layer are masked.
Parameters
----------
wrapper : Module
the layer to instrument the compression operation
Returns
-------
dict
dictionary for storing masks
"""
config = wrapper.config
weight = wrapper.module.weight.data
start_epoch = config.get('start_epoch', 0)
freq = config.get('frequency', 1)
if wrapper.if_calculated:
return None
if not (self.now_epoch >= start_epoch and (self.now_epoch - start_epoch) % freq == 0):
return None
mask = {'weight_mask': wrapper.weight_mask}
target_sparsity = self.compute_target_sparsity(config)
k = int(weight.numel() * target_sparsity)
if k == 0 or target_sparsity >= 1 or target_sparsity <= 0:
return mask
# if we want to generate new mask, we should update weigth first
w_abs = weight.abs() * mask['weight_mask']
threshold = torch.topk(w_abs.view(-1), k, largest=False)[0].max()
new_mask = {'weight_mask': torch.gt(w_abs, threshold).type_as(weight)}
wrapper.if_calculated = True
return new_mask
def compute_target_sparsity(self, config):
"""
Calculate the sparsity for pruning
Parameters
----------
config : dict
Layer's pruning config
Returns
-------
float
Target sparsity to be pruned
"""
end_epoch = config.get('end_epoch', 1)
start_epoch = config.get('start_epoch', 0)
freq = config.get('frequency', 1)
final_sparsity = config.get('final_sparsity', 0)
initial_sparsity = config.get('initial_sparsity', 0)
if end_epoch <= start_epoch or initial_sparsity >= final_sparsity:
logger.warning('your end epoch <= start epoch or initial_sparsity >= final_sparsity')
return final_sparsity
if end_epoch <= self.now_epoch:
return final_sparsity
span = ((end_epoch - start_epoch - 1) // freq) * freq
assert span > 0
target_sparsity = (final_sparsity +
(initial_sparsity - final_sparsity) *
(1.0 - ((self.now_epoch - start_epoch) / span)) ** 3)
return target_sparsity
def update_epoch(self, epoch):
"""
Update epoch
Parameters
----------
epoch : int
current training epoch
"""
if epoch > 0:
self.now_epoch = epoch
for wrapper in self.get_modules_wrapper():
wrapper.if_calculated.copy_(torch.tensor(0)) # pylint: disable=not-callable
class SlimPruner(Pruner):
"""
A structured pruning algorithm that prunes channels by pruning the weights of BN layers.
Zhuang Liu, Jianguo Li, Zhiqiang Shen, Gao Huang, Shoumeng Yan and Changshui Zhang
"Learning Efficient Convolutional Networks through Network Slimming", 2017 ICCV
https://arxiv.org/pdf/1708.06519.pdf
"""
def __init__(self, model, config_list, optimizer):
"""
Parameters
----------
config_list : list
support key for each list item:
- sparsity: percentage of convolutional filters to be pruned.
"""
super().__init__(model, config_list, optimizer)
weight_list = []
if len(config_list) > 1:
logger.warning('Slim pruner only supports 1 configuration')
config = config_list[0]
for (layer, config) in self.get_modules_to_compress():
assert layer.type == 'BatchNorm2d', 'SlimPruner only supports 2d batch normalization layer pruning'
weight_list.append(layer.module.weight.data.abs().clone())
all_bn_weights = torch.cat(weight_list)
k = int(all_bn_weights.shape[0] * config['sparsity'])
self.global_threshold = torch.topk(all_bn_weights.view(-1), k, largest=False)[0].max()
self.set_wrappers_attribute("if_calculated", False)
def calc_mask(self, wrapper, **kwargs):
"""
Calculate the mask of given layer.
Scale factors with the smallest absolute value in the BN layer are masked.
Parameters
----------
wrapper : Module
the layer to instrument the compression operation
Returns
-------
dict
dictionary for storing masks
"""
config = wrapper.config
weight = wrapper.module.weight.data
op_type = wrapper.type
assert op_type == 'BatchNorm2d', 'SlimPruner only supports 2d batch normalization layer pruning'
if wrapper.if_calculated:
return None
base_mask = torch.ones(weight.size()).type_as(weight).detach()
mask = {'weight_mask': base_mask.detach(), 'bias_mask': base_mask.clone().detach()}
filters = weight.size(0)
num_prune = int(filters * config.get('sparsity'))
if filters >= 2 and num_prune >= 1:
w_abs = weight.abs()
mask_weight = torch.gt(w_abs, self.global_threshold).type_as(weight)
mask_bias = mask_weight.clone()
mask = {'weight_mask': mask_weight.detach(), 'bias_mask': mask_bias.detach()}
wrapper.if_calculated = True
return mask
class LotteryTicketPruner(Pruner):
"""
This is a Pytorch implementation of the paper "The Lottery Ticket Hypothesis: Finding Sparse, Trainable Neural Networks",
following NNI model compression interface.
1. Randomly initialize a neural network f(x;theta_0) (where theta_0 follows D_{theta}).
2. Train the network for j iterations, arriving at parameters theta_j.
3. Prune p% of the parameters in theta_j, creating a mask m.
4. Reset the remaining parameters to their values in theta_0, creating the winning ticket f(x;m*theta_0).
5. Repeat step 2, 3, and 4.
"""
def __init__(self, model, config_list, optimizer, lr_scheduler=None, reset_weights=True):
"""
Parameters
----------
model : pytorch model
The model to be pruned
config_list : list
Supported keys:
- prune_iterations : The number of rounds for the iterative pruning.
- sparsity : The final sparsity when the compression is done.
optimizer : pytorch optimizer
The optimizer for the model
lr_scheduler : pytorch lr scheduler
The lr scheduler for the model if used
reset_weights : bool
Whether reset weights and optimizer at the beginning of each round.
"""
super().__init__(model, config_list, optimizer)
self.curr_prune_iteration = None
self.prune_iterations = self._validate_config(config_list)
# save init weights and optimizer
self.reset_weights = reset_weights
if self.reset_weights:
self._model = model
self._optimizer = optimizer
self._model_state = copy.deepcopy(model.state_dict())
self._optimizer_state = copy.deepcopy(optimizer.state_dict())
self._lr_scheduler = lr_scheduler
if lr_scheduler is not None:
self._scheduler_state = copy.deepcopy(lr_scheduler.state_dict())
def _validate_config(self, config_list):
prune_iterations = None
for config in config_list:
assert 'prune_iterations' in config, 'prune_iterations must exist in your config'
assert 'sparsity' in config, 'sparsity must exist in your config'
if prune_iterations is not None:
assert prune_iterations == config[
'prune_iterations'], 'The values of prune_iterations must be equal in your config'
prune_iterations = config['prune_iterations']
return prune_iterations
def _calc_sparsity(self, sparsity):
keep_ratio_once = (1 - sparsity) ** (1 / self.prune_iterations)
curr_keep_ratio = keep_ratio_once ** self.curr_prune_iteration
return max(1 - curr_keep_ratio, 0)
def _calc_mask(self, weight, sparsity, curr_w_mask):
if self.curr_prune_iteration == 0:
mask = torch.ones(weight.shape).type_as(weight)
else:
curr_sparsity = self._calc_sparsity(sparsity)
w_abs = weight.abs() * curr_w_mask
k = int(w_abs.numel() * curr_sparsity)
threshold = torch.topk(w_abs.view(-1), k, largest=False).values.max()
mask = torch.gt(w_abs, threshold).type_as(weight)
return {'weight_mask': mask}
def calc_mask(self, wrapper, **kwargs):
"""
Generate mask for the given ``weight``.
Parameters
----------
wrapper : Module
The layer to be pruned
Returns
-------
tensor
The mask for this weight, it is ```None``` because this pruner
calculates and assigns masks in ```prune_iteration_start```,
no need to do anything in this function.
"""
return None
def get_prune_iterations(self):
"""
Return the range for iterations.
In the first prune iteration, masks are all one, thus, add one more iteration
Returns
-------
list
A list for pruning iterations
"""
return range(self.prune_iterations + 1)
def prune_iteration_start(self):
"""
Control the pruning procedure on updated epoch number.
Should be called at the beginning of the epoch.
"""
if self.curr_prune_iteration is None:
self.curr_prune_iteration = 0
else:
self.curr_prune_iteration += 1
assert self.curr_prune_iteration < self.prune_iterations + 1, 'Exceed the configured prune_iterations'
modules_wrapper = self.get_modules_wrapper()
modules_to_compress = self.get_modules_to_compress()
for layer, config in modules_to_compress:
module_wrapper = None
for wrapper in modules_wrapper:
if wrapper.name == layer.name:
module_wrapper = wrapper
break
assert module_wrapper is not None
sparsity = config.get('sparsity')
mask = self._calc_mask(layer.module.weight.data, sparsity, module_wrapper.weight_mask)
# TODO: directly use weight_mask is not good
module_wrapper.weight_mask = mask['weight_mask']
# there is no mask for bias
# reinit weights back to original after new masks are generated
if self.reset_weights:
# should use this member function to reset model weights
self.load_model_state_dict(self._model_state)
self._optimizer.load_state_dict(self._optimizer_state)
if self._lr_scheduler is not None:
self._lr_scheduler.load_state_dict(self._scheduler_state)
| 36.82973
| 125
| 0.611507
|
4a18659f83d07648c6aa2d1d54541fd7fcdcfc5e
| 1,161
|
py
|
Python
|
tests/test_fdt_image.py
|
Caesurus/pyUBoot
|
c8f421fede9a3e318bc0280ffbb2f8c1dad934d1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fdt_image.py
|
Caesurus/pyUBoot
|
c8f421fede9a3e318bc0280ffbb2f8c1dad934d1
|
[
"Apache-2.0"
] | null | null | null |
tests/test_fdt_image.py
|
Caesurus/pyUBoot
|
c8f421fede9a3e318bc0280ffbb2f8c1dad934d1
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Martin Olejar
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from uboot import parse_itb, parse_its, FdtImage
# Used Directories
DATA_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'data')
TEMP_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'temp')
# Test Files
UBOOT_ITS = os.path.join(DATA_DIR, 'u-boot.its')
UBOOT_ITB_TEMP = os.path.join(TEMP_DIR, 'u-boot.itb')
def setup_module(module):
# Create temp directory
os.makedirs(TEMP_DIR, exist_ok=True)
def teardown_module(module):
# Delete created files
#os.remove(UBOOT_ITB_TEMP)
pass
def test_01():
pass
| 28.317073
| 75
| 0.745047
|
4a1865f1dc0d82b56b0196b7b6543079c8ce8212
| 46
|
py
|
Python
|
code/sample_1-2-13.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | 1
|
2022-03-29T13:50:12.000Z
|
2022-03-29T13:50:12.000Z
|
code/sample_1-2-13.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
code/sample_1-2-13.py
|
KoyanagiHitoshi/AtCoder-Python-Introduction
|
6d014e333a873f545b4d32d438e57cf428b10b96
|
[
"MIT"
] | null | null | null |
x = [int(input()) for i in range(5)]
print(x)
| 15.333333
| 36
| 0.586957
|
4a1866b5fa1c389467bcd258ea8e5601bbe10e70
| 5,937
|
py
|
Python
|
fuzzers/int_loop_check.py
|
rw1nkler/prjxray
|
aff076b47dcf6d653eb3ce791b41fd6cf4343edd
|
[
"ISC"
] | 583
|
2017-12-21T11:06:13.000Z
|
2022-02-20T21:27:33.000Z
|
fuzzers/int_loop_check.py
|
rw1nkler/prjxray
|
aff076b47dcf6d653eb3ce791b41fd6cf4343edd
|
[
"ISC"
] | 1,212
|
2017-12-22T15:05:06.000Z
|
2022-02-19T13:04:59.000Z
|
fuzzers/int_loop_check.py
|
mfkiwl/prjxray-xilinx-7-bitstream-fortmat
|
5349556bc2c230801d6df0cf11bccb9cfd171639
|
[
"ISC"
] | 134
|
2017-12-21T10:16:50.000Z
|
2022-02-16T06:42:04.000Z
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
#
# Copyright (C) 2017-2020 The Project X-Ray Authors.
#
# Use of this source code is governed by a ISC-style
# license that can be found in the LICENSE file or at
# https://opensource.org/licenses/ISC
#
# SPDX-License-Identifier: ISC
from __future__ import print_function
import sys, re
import os
import glob
import hashlib
def bytehex(x):
return ''.join('{:02x}'.format(x) for x in x)
def wc_for_iteration(todo_dir, fni):
with open("%s/%u_all.txt" % (todo_dir, fni), "rb") as f:
return sum(1 for _ in f)
def check_made_progress(todo_dir, max_iter, min_progress):
""" Returns true if minimum progress is being made. """
if max_iter == 1:
return True
prev_iteration = wc_for_iteration(todo_dir, max_iter - 1)
cur_iteration = wc_for_iteration(todo_dir, max_iter)
made_progress = prev_iteration - cur_iteration > min_progress
if not made_progress:
print(
"Between iteration {} and iteration {} only {} pips were solved. Terminating iteration."
.format(max_iter - 1, max_iter, prev_iteration - cur_iteration))
return made_progress
def run(
todo_dir,
min_iters=None,
min_progress=None,
timeout_iters=None,
max_iters=None,
zero_entries=None,
zero_entries_filter=".*",
verbose=False):
timeout_fn = "%s/timeout" % todo_dir
# make clean removes todo dir, but helps debugging
if os.path.exists(timeout_fn):
print("WARNING: removing %s" % timeout_fn)
os.remove(timeout_fn)
alls = glob.glob("%s/*_all.txt" % todo_dir)
max_iter = 0
for fn in alls:
n = int(re.match(r".*/([0-9]*)_all.txt", fn).group(1))
max_iter = max(max_iter, n)
if max_iter == 0:
print("Incomplete: no iters")
sys.exit(1)
verbose and print("Max iter: %u, need: %s" % (max_iter, min_iters))
# Don't allow early termination if below min_iters
if min_iters is not None and max_iter < min_iters:
print("Incomplete: not enough iters")
sys.exit(1)
# Force early termination if at or above max_iters.
if max_iters is not None and max_iter >= max_iters:
print(
"Complete: reached max iters (want %u, got %u)" %
(max_iters, max_iter))
sys.exit(0)
# Mark timeout if above timeout_iters
if timeout_iters is not None and max_iter > timeout_iters:
print("ERROR: timeout (max %u, got %u)" % (timeout_iters, max_iter))
with open(timeout_fn, "w") as _f:
pass
sys.exit(1)
# Check if zero entries criteria is not met.
if zero_entries:
filt = re.compile(zero_entries_filter)
count = 0
fn = "%s/%u_all.txt" % (todo_dir, max_iter)
with open(fn, 'r') as f:
for l in f:
if filt.search(l):
count += 1
if count > 0:
print("%s: %s lines" % (fn, count))
print(
"Incomplete: need zero entries (used filter: {})".format(
repr(zero_entries_filter)))
sys.exit(1)
else:
# If there are zero entries, check if min_progress criteria is in
# affect. If so, that becomes the new termination condition.
if min_progress is None:
print(
"No unfiltered entries, done (used filter: {})!".format(
repr(zero_entries_filter)))
sys.exit(0)
else:
# Even if there are 0 unfiltered entries, fuzzer may still be
# making progress with filtered entries.
print(
"No unfiltered entries (used filter: {}), checking if progress is being made"
.format(repr(zero_entries_filter)))
# Check if minimum progress was achieved, continue iteration if so.
if min_progress is not None and not check_made_progress(todo_dir, max_iter,
min_progress):
sys.exit(0)
print("No exit criteria met, keep going!")
sys.exit(1)
def main():
import argparse
parser = argparse.ArgumentParser(
description=
"Check int_loop completion. Exits 0 on done, 1 if more loops are needed"
)
parser.add_argument('--verbose', action='store_true', help='')
parser.add_argument('--todo-dir', default="build/todo", help='')
parser.add_argument(
'--min-iters', default=None, help='Minimum total number of iterations')
parser.add_argument(
'--min-progress',
default=None,
help=
'Minimum amount of process between iterations. If less progress is made, terminates immediately.'
)
parser.add_argument(
'--timeout-iters',
default=None,
help='Max number of entries before creating todo/timeout')
parser.add_argument(
'--max-iters',
default=None,
help='Max number of entries before declaring success')
parser.add_argument(
'--zero-entries',
action="store_true",
help='Must be no unsolved entries in latest')
parser.add_argument(
'--zero-entries-filter',
default=".*",
help=
'When zero-entries is supplied, this filter is used to filter pips used for counting against zero entries termination condition.'
)
args = parser.parse_args()
def zint(x):
return None if x is None else int(x)
run(
todo_dir=args.todo_dir,
min_iters=zint(args.min_iters),
min_progress=zint(args.min_progress),
timeout_iters=zint(args.timeout_iters),
max_iters=zint(args.max_iters),
zero_entries=args.zero_entries,
zero_entries_filter=args.zero_entries_filter,
verbose=args.verbose)
if __name__ == '__main__':
main()
| 32.091892
| 137
| 0.60384
|
4a1866b79ab810c825f5f69494305a7cdd0f343d
| 616
|
py
|
Python
|
api/news.py
|
qianbin01/lagou_python_api
|
84c0d21cd6a2296efb974dbf7c07cc074106d799
|
[
"MIT"
] | 8
|
2018-09-10T06:30:56.000Z
|
2021-03-11T19:16:32.000Z
|
api/news.py
|
qianbin01/lagou_python_api
|
84c0d21cd6a2296efb974dbf7c07cc074106d799
|
[
"MIT"
] | null | null | null |
api/news.py
|
qianbin01/lagou_python_api
|
84c0d21cd6a2296efb974dbf7c07cc074106d799
|
[
"MIT"
] | 5
|
2018-10-12T12:37:16.000Z
|
2020-05-16T03:17:40.000Z
|
import models.news as news
from flask import Blueprint, jsonify, request
news_blue_print = Blueprint('news', __name__)
status = {
'msg': '请求成功',
'code': 1000
}
news_doc = news.get_news_count()
@news_blue_print.route('/lists')
def lists():
page = request.args.get('page')
news_doc['index'] = page
return jsonify(
{'status': status,
'pageInfo': news_doc,
'dataList': news.get_news_list(page)
})
@news_blue_print.route('/single')
def single_news():
nid = request.args.get('nid')
return jsonify({'status': status, 'object': news.get_single_news(nid)})
| 22.814815
| 75
| 0.646104
|
4a18688dcc5f70a55ed8c5c3fd67e82b860c2b41
| 1,860
|
py
|
Python
|
create_nlcd_only_baseline.py
|
baoqianyue/DFC2021-Track-MSD
|
d707f7601c6caa0d0f0e6013d493e66059d23d49
|
[
"Apache-2.0"
] | 11
|
2021-03-31T06:47:21.000Z
|
2022-03-01T04:07:15.000Z
|
create_nlcd_only_baseline.py
|
baoqianyue/DFC2021-Track-MSD
|
d707f7601c6caa0d0f0e6013d493e66059d23d49
|
[
"Apache-2.0"
] | null | null | null |
create_nlcd_only_baseline.py
|
baoqianyue/DFC2021-Track-MSD
|
d707f7601c6caa0d0f0e6013d493e66059d23d49
|
[
"Apache-2.0"
] | null | null | null |
import os
import re
os.environ[
"CURL_CA_BUNDLE"] = "/etc/ssl/certs/ca-certificates.crt" # A workaround in case this happens: https://github.com/mapbox/rasterio/issues/1289
import argparse
import numpy as np
import pandas as pd
import rasterio
import utils
parser = argparse.ArgumentParser(description='DFC2021 baseline training script')
parser.add_argument('--output_dir', type=str, default="results/nlcd_only_baseline/output/",
help='The path to save the output to.')
args = parser.parse_args()
def main():
os.makedirs(args.output_dir, exist_ok=True)
df = pd.read_csv("data/splits/val_inference_both.csv")
fns = df["label_fn"].values
for i, fn in enumerate(fns):
## http path 2 local path
fn = re.findall(r'\bdata.*\b', fn)[0]
fn = fn.split('/')
fn = os.path.join('/home/Projects/DFC2021/dfc2021-msd-baseline/data/image', fn[1], fn[2])
print("(%d/%d) %s" % (i + 1, len(fns), fn))
output_fn = os.path.join(
args.output_dir,
fn.split("/")[-1].replace("nlcd", "predictions")
)
if "predictions-2016" in output_fn:
output_fn = output_fn.replace("predictions-2016", "predictions-2017")
with rasterio.open(fn) as f:
data_nlcd_class = f.read(1)
input_profile = f.profile.copy()
output_profile = input_profile.copy()
output_profile["driver"] = "GTiff"
print('data_nlcd_class: {}'.format(data_nlcd_class))
data_nlcd_idx = utils.NLCD_CLASS_TO_IDX_MAP[data_nlcd_class].astype(np.uint8)
with rasterio.open(output_fn, "w", **output_profile) as f:
f.write(data_nlcd_idx, 1)
print('data_nlcd_idx : {}'.format(data_nlcd_idx))
f.write_colormap(1, utils.NLCD_IDX_COLORMAP)
if __name__ == "__main__":
main()
| 31
| 145
| 0.636559
|
4a18690c48942ac2073f2eb39ee29753bd16215d
| 716
|
py
|
Python
|
app/core/management/commands/wait_for_db.py
|
tejasvadgama5/recipe-app-api
|
670ec021fb75f99f490079baa105b7c4e58050ab
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
tejasvadgama5/recipe-app-api
|
670ec021fb75f99f490079baa105b7c4e58050ab
|
[
"MIT"
] | null | null | null |
app/core/management/commands/wait_for_db.py
|
tejasvadgama5/recipe-app-api
|
670ec021fb75f99f490079baa105b7c4e58050ab
|
[
"MIT"
] | 1
|
2021-11-12T12:39:36.000Z
|
2021-11-12T12:39:36.000Z
|
import time
from django.db import connections
from django.db.utils import OperationalError
from django.core.management.base import BaseCommand
class Command(BaseCommand):
"""Django command to pause execution utill database is available"""
def handle(self, *args, **options):
self.stdout.write("Waiting for database....")
db_conn = None
while not db_conn:
try:
db_conn = connections['default']
except OperationalError:
self.stdout.write("Database unavailable,"
" waiting for 1 second.....")
time.sleep(1)
self.stdout.write(self.style.SUCCESS('Database available!'))
| 31.130435
| 71
| 0.615922
|
4a1869adc6198d687b649327c1d789a1796bf361
| 340
|
py
|
Python
|
jsonbourne/_version.py
|
dynamic-graphics-inc/jsonbourne
|
e270220e609015b3de9a1a31ecd0d2411c1db138
|
[
"MIT"
] | 1
|
2021-03-25T01:50:20.000Z
|
2021-03-25T01:50:20.000Z
|
jsonbourne/_version.py
|
dynamic-graphics-inc/jsonbourne
|
e270220e609015b3de9a1a31ecd0d2411c1db138
|
[
"MIT"
] | null | null | null |
jsonbourne/_version.py
|
dynamic-graphics-inc/jsonbourne
|
e270220e609015b3de9a1a31ecd0d2411c1db138
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""`jsonbourne` version"""
__version__ = "0.5.0"
VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH = [int(el) for el in __version__.split('.')]
VERSION_INFO = (VERSION_MAJOR, VERSION_MINOR, VERSION_PATCH)
__all__ = [
"VERSION_MAJOR",
"VERSION_MINOR",
"VERSION_PATCH",
"VERSION_INFO",
"__version__",
]
| 24.285714
| 88
| 0.670588
|
4a186ac49f6b4bbddc25a4322f970757a2c4c3dd
| 636
|
py
|
Python
|
netscan.py
|
zweed4u/netscan
|
9671acda67af6e8e04c3628f86b2a2ec348e5f2d
|
[
"MIT"
] | null | null | null |
netscan.py
|
zweed4u/netscan
|
9671acda67af6e8e04c3628f86b2a2ec348e5f2d
|
[
"MIT"
] | null | null | null |
netscan.py
|
zweed4u/netscan
|
9671acda67af6e8e04c3628f86b2a2ec348e5f2d
|
[
"MIT"
] | null | null | null |
import os,sys,time,socket
from selenium import webdriver
#Relative and Direct paths stink
driverBin=os.path.expanduser("~/Desktop/chromedriver")
print 'Binary directory:',driverBin
#browser instance
driver=webdriver.Chrome(driverBin)
driver.set_page_load_timeout(5)
socket.setdefaulttimeout(5)
#Class A
lastOctet=0
thirdOctet=120
while thirdOctet<124:
while lastOctet <256:
try:
driver.get('http://10.0.'+str(thirdOctet)+'.'+str(lastOctet))
except socket.timeout:
print 'press escape 10.0.'+str(thirdOctet)+'.'+str(lastOctet)
lastOctet+=1
time.sleep(5)
lastOctet=0
thirdOctet+=1
driver.close()
print 'Scan Complete'
| 23.555556
| 65
| 0.762579
|
4a186ba5950f43356d6c3a1e61c4ebb2ea5657d6
| 12,119
|
py
|
Python
|
tests.py
|
dferens/django-classsettings
|
9894dcbf33e07821c7a71d6ad8839a8711bb6333
|
[
"MIT"
] | 2
|
2015-01-15T17:50:11.000Z
|
2015-10-31T20:27:01.000Z
|
tests.py
|
dferens/django-classsettings
|
9894dcbf33e07821c7a71d6ad8839a8711bb6333
|
[
"MIT"
] | null | null | null |
tests.py
|
dferens/django-classsettings
|
9894dcbf33e07821c7a71d6ad8839a8711bb6333
|
[
"MIT"
] | null | null | null |
import os
import sys
import unittest
import mock
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.utils.unittest import skipIf
from classsettings import Settings, Config, from_env, utils
from classsettings.urls import Context, Scope, url
IS_ABOVE_26 = sys.version_info[0] > 2 or sys.version_info[1] > 6
settings.configure()
class InjectorTestCase(unittest.TestCase):
def setUp(self):
self._old_globals = dict(globals())
def tearDown(self):
for k in [k for k in globals() if k not in self._old_globals]:
globals().pop(k)
class SettingsTestCase(InjectorTestCase):
def test_fields(self):
old_module_vars = set(globals())
self.assertFalse('public_method' in old_module_vars)
class MySettings(Settings):
def public_method(self): return 1
def _private_method(self): return 2
some_field = 2
class some_class(object): pass
new_module_vars = set(globals())
self.assertEqual(new_module_vars - old_module_vars,
set(('public_method', 'some_field', 'some_class')))
def test_related(self):
class MySettings(Settings):
def _private_setting(self): return 1
def public_setting(self): return self._private_setting() * 2
self.assertEqual(globals()['public_setting'], 2)
def test_inheritance(self):
old_module_vars = set(globals())
self.assertFalse('public_super' in old_module_vars or
'public_sub' in old_module_vars)
class SuperSettings(Settings):
def public_super(self): return 1
class SubSettings(SuperSettings):
def public_sub(self): return 2
self.assertEqual(set(globals()) - old_module_vars,
set(('public_sub', 'public_super')))
self.assertEqual(globals()['public_super'], 1)
self.assertEqual(globals()['public_sub'], 2)
class ConfigTestCase(InjectorTestCase):
def test_injects(self):
old_module_vars = set(globals())
self.assertFalse('MyConfig' in old_module_vars)
class MyConfig(Config): pass
new_module_vars = set(globals())
self.assertEqual(new_module_vars - old_module_vars,
set(('MyConfig',)))
self.assertTrue(globals()['MyConfig'] == MyConfig == dict())
def test_fields(self):
class MyConfig(Config):
def public_method(self): return 1
def _private_method(self): return 2
some_field = 2
class some_class(object): pass
result_dict = globals()['MyConfig']
self.assertEqual(result_dict, {'public_method': 1, 'some_field': 2,
'some_class': MyConfig['some_class']})
def test_related(self):
class MyConfig(Config):
def public_setting(self): return self._private_setting() * 2
def _private_setting(self): return 1
self.assertEqual(globals()['MyConfig'], {'public_setting': 2})
def test_inheritance(self):
old_module_vars = set(globals())
self.assertFalse('SuperConfig' in old_module_vars or
'Sub1Config' in old_module_vars)
class SuperConfig(Config):
def public_super(self): return 1
class Sub1Config(SuperConfig):
def public_sub(self): return 2 * self.public_super()
class Sub2Config(Sub1Config):
def public_sub2(self): return 2 * self.public_sub()
self.assertEqual(set(globals()) - old_module_vars,
set(('SuperConfig', 'Sub1Config', 'Sub2Config')))
self.assertEqual(SuperConfig, dict(public_super=1))
self.assertEqual(Sub1Config, dict(public_super=1, public_sub=2))
self.assertEqual(Sub2Config, dict(public_super=1, public_sub=2, public_sub2=4))
class FromEnvTestCase(unittest.TestCase):
def setUp(self):
self._old_environ = dict(os.environ)
def tearDown(self):
for key in [k for k in os.environ if k not in self._old_environ]:
os.environ.pop(key)
def test_has_default(self):
self.assertEqual(os.environ.get('CLASSSETTINGS_ENV'), None)
@from_env(key='CLASSSETTINGS_ENV')
def getter(): return 'default'
self.assertEqual(getter(), 'default')
os.environ['CLASSSETTINGS_ENV'] = 'value'
self.assertEqual(getter(), 'value')
def test_no_default(self):
self.assertEqual(os.environ.get('CLASSSETTINGS_ENV'), None)
@from_env(key='CLASSSETTINGS_ENV')
def getter(): pass
self.assertRaises(ImproperlyConfigured, getter)
os.environ['CLASSSETTINGS_ENV'] = 'value'
self.assertEqual(getter(), 'value')
def test_through_with_env(self):
self.assertEqual(os.environ.get('CLASSSETTINGS_ENV'), None)
filter_func = lambda val: val.upper()
@from_env(key='CLASSSETTINGS_ENV', through=filter_func)
def getter(): pass
os.environ['CLASSSETTINGS_ENV'] = 'value'
self.assertEqual(getter(), 'VALUE')
def test_through_with_default(self):
self.assertEqual(os.environ.get('CLASSSETTINGS_ENV'), None)
filter_func = lambda val: val.upper()
@from_env(key='CLASSSETTINGS_ENV', through=filter_func)
def getter(): return 'default'
self.assertEqual(getter(), 'DEFAULT')
class UtilsTestCase(unittest.TestCase):
def test_defaultargs(self):
@utils.defaultargs
def configurable_decorator(*decor_args, **decor_kwargs):
def decorator(func):
def decorated(*func_args, **func_kwargs): pass
return decorator
def test_func(*func_args, **func_kwargs): pass
funcs = set([configurable_decorator(test_func),
configurable_decorator(1)(test_func),
configurable_decorator(kwarg=2)(test_func),
configurable_decorator(1, kwarg=2)(test_func)])
self.assertEqual(len(funcs), 1)
class UrlsTestCase(unittest.TestCase):
def test_url_resolution(self):
view = lambda request: 'response'
with Scope(regex='r/') as prefixed_root:
with Scope(regex='{0}child1/') as child1:
url('{0}url1/', view)
with Scope() as child2:
url('{0}url2/', view)
url('absolute', view)
self.assertEqual(len(prefixed_root.urls), 3)
expected_urls = ('r/child1/url1/', 'r/url2/', 'absolute')
for url_obj, exp_url in zip(prefixed_root.urls, expected_urls):
self.assertEqual(url_obj.regex.pattern, exp_url)
with Scope() as nonprefixed_root:
self.assertRaises(ImproperlyConfigured, url, '{0}', view)
url('absolute', view)
self.assertEqual(nonprefixed_root.urls[0].regex.pattern, 'absolute')
def test_view_resolution(self):
def modules_view(request):
return 'modules view'
def view_callable(request):
return 'callable view'
class CBV(object):
@staticmethod
def as_view():
def actual_view(request):
return 'cbv view'
return actual_view
views_module = type('module', (), {})()
setattr(views_module, 'view_name', modules_view)
setattr(views_module, 'view_callable', view_callable)
setattr(views_module, 'CBV', CBV)
with Scope() as root:
with mock.patch('classsettings.urls.inspect.ismodule') as mock_is_module:
mock_is_module.return_value = True
with Scope(view=views_module):
url('test-url', 'view_name')
with mock.patch('django.core.urlresolvers.import_module') as mock_import:
mock_import.return_value = views_module
with Scope(view='project.app.views'):
url('test-url', '{0}.view_callable')
url('test-url', view_callable)
url('test-url', '{0}.CBV')
url('test-url', CBV)
url('test-url', CBV.as_view())
expected_views = ['modules view', 'callable view', 'callable view',
'cbv view', 'cbv view', 'cbv view']
self.assertEqual([u.callback(None) for u in root.urls], expected_views)
def test_name_resolution(self):
view = lambda request: 'response'
with Scope(name='root') as root_named:
with Scope() as child1:
url('test-url', view, name='{0}_child1')
with Scope(name='{0}_child2'):
url('test-url', view, name='{0}_url')
url('test-url', view, name='absolute')
expected_names = ['root_child1', 'root_child2_url', 'absolute']
self.assertEqual([u.name for u in root_named.urls], expected_names)
with Scope() as root_unnamed:
self.assertRaises(ImproperlyConfigured, url, 'test-url', view, name='{0}')
def test_context_variables(self):
view = lambda request: 'response'
with Scope() as root:
with Scope(vasyan='foo') as child1:
self.assertEqual(child1['vasyan'], 'foo')
with Scope(tadasyan='bar') as child2:
self.assertEqual(child2['vasyan'], 'foo')
self.assertEqual(child2['tadasyan'], 'bar')
with Scope(vasyan='baz') as child3:
self.assertEqual(child3['tadasyan'], 'bar')
self.assertEqual(child3['vasyan'], 'baz')
url('{vasyan}{tadasyan}', view)
child3['tadasyan'] = 'xxx'
url('{vasyan}{tadasyan}', view)
url('{vasyan}{tadasyan}', view)
self.assertRaises(KeyError, child1.__getitem__, 'tadasyan')
self.assertRaises(ImproperlyConfigured, url, '{tadasyan}', view)
self.assertRaises(KeyError, root.__getitem__, 'vasyan')
self.assertRaises(ImproperlyConfigured, url, '{vasyan}', view)
expected_urls = ['bazbar', 'bazxxx', 'foobar']
self.assertEqual([u.regex.pattern for u in root.urls], expected_urls)
def test_full_resolution(self):
view = lambda request: 'response'
with Scope(regex='url') as root:
with Scope(name='name'):
with Scope(view=view):
url('{0}', view, name='{0}')
self.assertEqual([u.regex.pattern for u in root.urls], ['url'])
self.assertEqual([u.callback for u in root.urls], [view])
self.assertEqual([u.name for u in root.urls], ['name'])
def test_context_passthrough(self):
with Scope(regex='a', view='b', name='c') as root:
with Scope():
with Scope():
with Scope():
url('{0}', '{0}', name='{0}')
u = root.urls[0]
self.assertTrue(u.regex.pattern == 'a' and u.callback == 'b' and u.name == 'c')
@skipIf(IS_ABOVE_26, '')
def test_str_format_under_27(self):
view = lambda request: 'response'
with Scope(regex='test') as root:
self.assertRaises(ImproperlyConfigured, url, '{}', view)
def test_context(self):
root = Context(one=1, two=2)
child = Context(root, one='child 1', three=3)
def parent_setter(this, new_parent): this.parent = new_parent
self.assertRaises(TypeError, parent_setter, child, dict())
self.assertTrue('one' in child.__str__())
self.assertTrue('one' in child.__repr__())
self.assertEqual(root['one'], 1)
self.assertEqual(child['one'], 'child 1')
self.assertEqual(child.dict(), dict(one='child 1', two=2, three=3))
self.assertEqual(set(child.keys()), set('one two three'.split()))
self.assertRaises(KeyError, child.__getitem__, 'not exists')
if __name__ == '__main__':
unittest.main()
| 34.428977
| 88
| 0.599142
|
4a186de7c42a87f0feb229aa7f15b2dd9da0129d
| 1,201
|
py
|
Python
|
am/finance/models/financetransaction.py
|
access-missouri/am-django-project
|
2457b8089900c61c73000c1d7479b7a72f6d1855
|
[
"BSD-2-Clause"
] | 4
|
2018-05-01T20:31:49.000Z
|
2021-12-20T19:30:40.000Z
|
am/finance/models/financetransaction.py
|
access-missouri/am-django-project
|
2457b8089900c61c73000c1d7479b7a72f6d1855
|
[
"BSD-2-Clause"
] | 22
|
2017-04-13T15:02:09.000Z
|
2021-02-02T21:48:41.000Z
|
am/finance/models/financetransaction.py
|
access-missouri/am-django-project
|
2457b8089900c61c73000c1d7479b7a72f6d1855
|
[
"BSD-2-Clause"
] | 1
|
2018-07-02T20:08:43.000Z
|
2018-07-02T20:08:43.000Z
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Finance transaction and related models.
"""
from __future__ import unicode_literals
from datetime import datetime
from general.models import AMBaseModel
from django.db import models
from . import FinanceEntity
from django.utils.encoding import python_2_unicode_compatible
@python_2_unicode_compatible
class FinanceTransaction(AMBaseModel):
"""
Someone spent money on something.
"""
t_from = models.ForeignKey(FinanceEntity,
related_name="spending")
t_to = models.ForeignKey(FinanceEntity,
related_name="income")
TRANSACTION_TYPE_CHOICES = (
('M', 'Monetary'),
('I', 'In-Kind'),
)
e_type = models.CharField(
max_length=1,
choices=TRANSACTION_TYPE_CHOICES
)
date = models.DateField()
amount = models.FloatField()
def __str__(self):
return "From {} to {} - {}".format(self.t_from.name, self.t_to.name, self.amount)
def get_absolute_url(self):
return "/finance/transactions/{}".format(self.id)
class Meta:
"""
Model options.
"""
ordering = ['-date']
| 24.510204
| 89
| 0.631141
|
4a186e50c0d2dbe9dca5bdc6676c515499012c34
| 317
|
py
|
Python
|
credentials.py
|
NancyWachiuri/PasswordLoker
|
d307a30b78fdcb70ffbc23d21ecd30bf34a3b931
|
[
"MIT"
] | null | null | null |
credentials.py
|
NancyWachiuri/PasswordLoker
|
d307a30b78fdcb70ffbc23d21ecd30bf34a3b931
|
[
"MIT"
] | null | null | null |
credentials.py
|
NancyWachiuri/PasswordLoker
|
d307a30b78fdcb70ffbc23d21ecd30bf34a3b931
|
[
"MIT"
] | null | null | null |
class Credentials:
credentions = []
def __init__(self, website_name, website_email, website_password):
self.website_name = website_name
self.website_email = website_email
self.website_password = website_password
def add_credentials(self):
Credentials.credentions.append(self)
| 31.7
| 66
| 0.728707
|
4a186e5113ab840870272acd65b5d770347d180a
| 1,300
|
py
|
Python
|
tests/test_create_jwt_args.py
|
jeffreyparker/duo_universal_python
|
01515c77abef017d526d70f28d111b171638e372
|
[
"BSD-3-Clause"
] | 8
|
2020-10-22T17:14:42.000Z
|
2021-11-16T02:10:53.000Z
|
tests/test_create_jwt_args.py
|
jeffreyparker/duo_universal_python
|
01515c77abef017d526d70f28d111b171638e372
|
[
"BSD-3-Clause"
] | 9
|
2021-01-01T16:49:13.000Z
|
2022-03-10T15:37:42.000Z
|
tests/test_create_jwt_args.py
|
jeffreyparker/duo_universal_python
|
01515c77abef017d526d70f28d111b171638e372
|
[
"BSD-3-Clause"
] | 14
|
2020-11-05T16:24:58.000Z
|
2022-01-19T20:57:17.000Z
|
from mock import MagicMock, patch
from duo_universal import client
import unittest
CLIENT_ID = "DIXXXXXXXXXXXXXXXXXX"
CLIENT_SECRET = "deadbeefdeadbeefdeadbeefdeadbeefdeadbeef"
HOST = "api-XXXXXXX.test.duosecurity.com"
REDIRECT_URI = "https://www.example.com"
ERROR_TIMEOUT = "Connection to api-xxxxxxx.test.duosecurity.com timed out."
ERROR_NETWORK_CONNECTION_FAILED = "Failed to establish a new connection"
EXPIRATION_TIME = 10 + client.FIVE_MINUTES_IN_SECONDS
RAND_ALPHANUMERIC_STR = "deadbeef"
SUCCESS_JWT_ARGS = {
'iss': CLIENT_ID,
'sub': CLIENT_ID,
'aud': client.OAUTH_V1_TOKEN_ENDPOINT,
'exp': EXPIRATION_TIME,
'jti': RAND_ALPHANUMERIC_STR
}
class TestCreateJwtArgs(unittest.TestCase):
def setUp(self):
self.client = client.Client(CLIENT_ID, CLIENT_SECRET, HOST, REDIRECT_URI)
@patch("time.time", MagicMock(return_value=10))
def test_create_jwt_args_success(self):
"""
Test that _create_jwt_args creates proper jwt arguments
"""
self.client._generate_rand_alphanumeric = MagicMock(return_value=RAND_ALPHANUMERIC_STR)
actual_jwt_args = self.client._create_jwt_args(client.OAUTH_V1_TOKEN_ENDPOINT)
self.assertEqual(SUCCESS_JWT_ARGS, actual_jwt_args)
if __name__ == '__main__':
unittest.main()
| 30.952381
| 95
| 0.755385
|
4a187027ab450bd68843fac8c4c0ab11730196d1
| 1,533
|
py
|
Python
|
scripts/automation/trex_control_plane/unit_tests/outer_packages.py
|
klement/trex-core
|
b98e2e6d2b8c6caeb233ce36fcbc131ffc45e35e
|
[
"Apache-2.0"
] | 3
|
2019-03-27T08:21:17.000Z
|
2021-01-17T14:27:28.000Z
|
scripts/automation/trex_control_plane/unit_tests/outer_packages.py
|
klement/trex-core
|
b98e2e6d2b8c6caeb233ce36fcbc131ffc45e35e
|
[
"Apache-2.0"
] | 13
|
2019-10-11T12:33:43.000Z
|
2020-02-10T08:28:34.000Z
|
scripts/automation/trex_control_plane/unit_tests/outer_packages.py
|
klement/trex-core
|
b98e2e6d2b8c6caeb233ce36fcbc131ffc45e35e
|
[
"Apache-2.0"
] | 2
|
2019-10-21T15:32:24.000Z
|
2019-10-29T13:06:11.000Z
|
import sys
import os
EXT_PATH = os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir, os.pardir, os.pardir, 'external_libs'))
if not os.path.exists(EXT_PATH):
raise Exception('Wrong path to external libs: %s' % EXT_PATH)
CLIENT_UTILS_MODULES = [
{'name': 'scapy-2.3.1', 'py-dep': True},
{'name': 'texttable-0.8.4'},
]
def generate_module_path (module, is_python3, is_64bit):
platform_path = [module['name']]
if module.get('py-dep'):
platform_path.append('python3' if is_python3 else 'python2')
if module.get('arch-dep'):
platform_path.append('arm' if os.uname()[4] == 'aarch64' else 'intel')
platform_path.append('64bit' if is_64bit else '32bit')
return os.path.normcase(os.path.join(EXT_PATH, *platform_path))
def import_module_list(modules_list):
# platform data
is_64bit = sys.maxsize > 0xffffffff
is_python3 = sys.version_info >= (3, 0)
# regular modules
for p in modules_list:
full_path = generate_module_path(p, is_python3, is_64bit)
if not os.path.exists(full_path):
print("Unable to find required module library: '{0}'".format(p['name']))
print("Please provide the correct path using TREX_STL_EXT_PATH variable")
print("current path used: '{0}'".format(full_path))
exit(1)
if full_path not in sys.path:
sys.path.insert(1, full_path)
import_module_list(CLIENT_UTILS_MODULES)
| 32.617021
| 117
| 0.634051
|
4a1870d8674c31b6e7ca035fb65d392c7fcbef31
| 2,204
|
py
|
Python
|
scripts/addons/animation_nodes/ui/problems_panel.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | 2
|
2020-04-16T22:12:40.000Z
|
2022-01-22T17:18:45.000Z
|
scripts/addons/animation_nodes/ui/problems_panel.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | null | null | null |
scripts/addons/animation_nodes/ui/problems_panel.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | 2
|
2019-05-16T04:01:09.000Z
|
2020-08-25T11:42:26.000Z
|
import bpy
import sys
from .. import problems
from .. utils.layout import writeText
from .. draw_handler import drawHandler
from .. graphics.rectangle import Rectangle
from .. utils.blender_ui import getDpiFactor
class ProblemsPanel(bpy.types.Panel):
bl_idname = "an_problems_panel"
bl_label = "Problems"
bl_space_type = "NODE_EDITOR"
bl_region_type = "TOOLS"
@classmethod
def poll(cls, context):
tree = cls.getTree()
if tree is None: return False
return tree.bl_idname == "an_AnimationNodeTree" and problems.problemsExist()
def draw_header(self, context):
self.layout.label(text = "", icon = "ERROR")
def draw(self, context):
layout = self.layout
col = layout.column(align = True)
subcol = col.column(align = True)
subcol.scale_y = 1.5
subcol.operator("an.tag_retry_execution", text = "Retry", icon = "FILE_REFRESH")
if sys.platform == "win32":
col.operator("wm.console_toggle", text = "Toogle Console", icon = "CONSOLE")
layout.separator()
problems.drawCurrentProblemInfo(layout)
layout.separator()
col = layout.column(align = True)
tree = self.getTree()
lastExec = tree.lastExecutionInfo
col.label(text = "Last successful execution using:")
col.label(text = " Blender: v{}".format(lastExec.blenderVersionString))
col.label(text = " Animation Nodes: v{}".format(lastExec.animationNodesVersionString))
if lastExec.isDefault:
writeText(col,
("These versions are only guesses. This file has not been executed "
"in a version that supports storing of version information yet."),
autoWidth = True)
@classmethod
def getTree(cls):
return bpy.context.space_data.edit_tree
@drawHandler("SpaceNodeEditor", "WINDOW")
def drawWarningOverlay():
if problems.problemsExist():
rectangle = Rectangle.fromRegionDimensions(bpy.context.region)
rectangle.draw(
color = (0, 0, 0, 0),
borderColor = (0.9, 0.1, 0.1, 0.6),
borderThickness = 4 * getDpiFactor()
)
| 34.4375
| 99
| 0.636116
|
4a18711d421b609dec9dc48e16bc3c6496dedb37
| 419
|
py
|
Python
|
django_background_job/asgi.py
|
drunkpig/django-background-job
|
d143db1199fbb453814b254e5f1ec890e43a9709
|
[
"MIT"
] | 1
|
2021-07-02T06:22:43.000Z
|
2021-07-02T06:22:43.000Z
|
django_background_job/asgi.py
|
drunkpig/django-background-job
|
d143db1199fbb453814b254e5f1ec890e43a9709
|
[
"MIT"
] | null | null | null |
django_background_job/asgi.py
|
drunkpig/django-background-job
|
d143db1199fbb453814b254e5f1ec890e43a9709
|
[
"MIT"
] | null | null | null |
"""
ASGI config for django_background_job project.
It exposes the ASGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.1/howto/deployment/asgi/
"""
import os
from django.core.asgi import get_asgi_application
os.environ.setdefault('DJANGO_SETTINGS_MODULE', 'django_background_job.settings')
application = get_asgi_application()
| 24.647059
| 81
| 0.799523
|
4a187146270b3a8e8ce852a834a13981b3c2bf62
| 34,215
|
py
|
Python
|
embyapi/api/trailers_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
embyapi/api/trailers_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
embyapi/api/trailers_service_api.py
|
stanionascu/python-embyapi
|
a3f7aa49aea4052277cc43605c0d89bc6ff21913
|
[
"BSD-3-Clause"
] | null | null | null |
# coding: utf-8
"""
Emby Server API
Explore the Emby Server API # noqa: E501
OpenAPI spec version: 4.1.1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from embyapi.api_client import ApiClient
class TrailersServiceApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def get_trailers(self, **kwargs): # noqa: E501
"""Finds movies and trailers similar to a given trailer. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_trailers(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str artist_type: Artist or AlbumArtist
:param str max_official_rating: Optional filter by maximum official rating (PG, PG-13, TV-MA, etc).
:param bool has_theme_song: Optional filter by items with theme songs.
:param bool has_theme_video: Optional filter by items with theme videos.
:param bool has_subtitles: Optional filter by items with subtitles.
:param bool has_special_feature: Optional filter by items with special features.
:param bool has_trailer: Optional filter by items with trailers.
:param str adjacent_to: Optional. Return items that are siblings of a supplied item.
:param int min_index_number: Optional filter by minimum index number.
:param int min_players: Optional filter by minimum number of game players.
:param int max_players: Optional filter by maximum number of game players.
:param int parent_index_number: Optional filter by parent index number.
:param bool has_parental_rating: Optional filter by items that have or do not have a parental rating
:param bool is_hd: Optional filter by items that are HD or not.
:param str location_types: Optional. If specified, results will be filtered based on LocationType. This allows multiple, comma delimeted.
:param str exclude_location_types: Optional. If specified, results will be filtered based on LocationType. This allows multiple, comma delimeted.
:param bool is_missing: Optional filter by items that are missing episodes or not.
:param bool is_unaired: Optional filter by items that are unaired episodes or not.
:param float min_community_rating: Optional filter by minimum community rating.
:param float min_critic_rating: Optional filter by minimum critic rating.
:param int aired_during_season: Gets all episodes that aired during a season, including specials.
:param str min_premiere_date: Optional. The minimum premiere date. Format = ISO
:param str min_date_last_saved: Optional. The minimum premiere date. Format = ISO
:param str min_date_last_saved_for_user: Optional. The minimum premiere date. Format = ISO
:param str max_premiere_date: Optional. The maximum premiere date. Format = ISO
:param bool has_overview: Optional filter by items that have an overview or not.
:param bool has_imdb_id: Optional filter by items that have an imdb id or not.
:param bool has_tmdb_id: Optional filter by items that have a tmdb id or not.
:param bool has_tvdb_id: Optional filter by items that have a tvdb id or not.
:param str exclude_item_ids: Optional. If specified, results will be filtered by exxcluding item ids. This allows multiple, comma delimeted.
:param int start_index: Optional. The record index to start at. All items with a lower index will be dropped from the results.
:param int limit: Optional. The maximum number of records to return
:param bool recursive: When searching within folders, this determines whether or not the search will be recursive. true/false
:param str sort_order: Sort Order - Ascending,Descending
:param str parent_id: Specify this to localize the search to a specific item or folder. Omit to use the root
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines
:param str exclude_item_types: Optional. If specified, results will be filtered based on item type. This allows multiple, comma delimeted.
:param str include_item_types: Optional. If specified, results will be filtered based on item type. This allows multiple, comma delimeted.
:param str any_provider_id_equals: Optional. If specified, result will be filtered to contain only items which match at least one of the specified IDs. Each provider ID must be in the form 'prov.id', e.g. 'imdb.tt123456'. This allows multiple, comma delimeted value pairs.
:param str filters: Optional. Specify additional filters to apply. This allows multiple, comma delimeted. Options: IsFolder, IsNotFolder, IsUnplayed, IsPlayed, IsFavorite, IsResumable, Likes, Dislikes
:param bool is_favorite: Optional filter by items that are marked as favorite, or not.
:param bool is_movie: Optional filter for movies.
:param bool is_series: Optional filter for movies.
:param bool is_news: Optional filter for news.
:param bool is_kids: Optional filter for kids.
:param bool is_sports: Optional filter for sports.
:param str media_types: Optional filter by MediaType. Allows multiple, comma delimited.
:param str image_types: Optional. If specified, results will be filtered based on those containing image types. This allows multiple, comma delimited.
:param str sort_by: Optional. Specify one or more sort orders, comma delimeted. Options: Album, AlbumArtist, Artist, Budget, CommunityRating, CriticRating, DateCreated, DatePlayed, PlayCount, PremiereDate, ProductionYear, SortName, Random, Revenue, Runtime
:param bool is_played: Optional filter by items that are played, or not.
:param str genres: Optional. If specified, results will be filtered based on genre. This allows multiple, pipe delimeted.
:param str official_ratings: Optional. If specified, results will be filtered based on OfficialRating. This allows multiple, pipe delimeted.
:param str tags: Optional. If specified, results will be filtered based on tag. This allows multiple, pipe delimeted.
:param str years: Optional. If specified, results will be filtered based on production year. This allows multiple, comma delimeted.
:param bool enable_images: Optional, include image information in output
:param bool enable_user_data: Optional, include user data
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param str person: Optional. If specified, results will be filtered to include only those containing the specified person.
:param str person_ids: Optional. If specified, results will be filtered to include only those containing the specified person.
:param str person_types: Optional. If specified, along with Person, results will be filtered to include only those containing the specified person and PersonType. Allows multiple, comma-delimited
:param str studios: Optional. If specified, results will be filtered based on studio. This allows multiple, pipe delimeted.
:param str studio_ids: Optional. If specified, results will be filtered based on studio. This allows multiple, pipe delimeted.
:param str artists: Optional. If specified, results will be filtered based on artist. This allows multiple, pipe delimeted.
:param str artist_ids: Optional. If specified, results will be filtered based on artist. This allows multiple, pipe delimeted.
:param str albums: Optional. If specified, results will be filtered based on album. This allows multiple, pipe delimeted.
:param str ids: Optional. If specific items are needed, specify a list of item id's to retrieve. This allows multiple, comma delimited.
:param str video_types: Optional filter by VideoType (videofile, dvd, bluray, iso). Allows multiple, comma delimeted.
:param str containers: Optional filter by Container. Allows multiple, comma delimeted.
:param str audio_codecs: Optional filter by AudioCodec. Allows multiple, comma delimeted.
:param str video_codecs: Optional filter by VideoCodec. Allows multiple, comma delimeted.
:param str subtitle_codecs: Optional filter by SubtitleCodec. Allows multiple, comma delimeted.
:param str path: Optional filter by Path.
:param str user_id: User Id
:param str min_official_rating: Optional filter by minimum official rating (PG, PG-13, TV-MA, etc).
:param bool is_locked: Optional filter by items that are locked.
:param bool is_place_holder: Optional filter by items that are placeholders
:param bool has_official_rating: Optional filter by items that have official ratings
:param bool group_items_into_collections: Whether or not to hide items behind their boxsets.
:param bool is3_d: Optional filter by items that are 3D, or not.
:param str series_status: Optional filter by Series Status. Allows multiple, comma delimeted.
:param str name_starts_with_or_greater: Optional filter by items whose name is sorted equally or greater than a given input string.
:param str name_starts_with: Optional filter by items whose name is sorted equally than a given input string.
:param str name_less_than: Optional filter by items whose name is equally or lesser than a given input string.
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_trailers_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_trailers_with_http_info(**kwargs) # noqa: E501
return data
def get_trailers_with_http_info(self, **kwargs): # noqa: E501
"""Finds movies and trailers similar to a given trailer. # noqa: E501
Requires authentication as user # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_trailers_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:param str artist_type: Artist or AlbumArtist
:param str max_official_rating: Optional filter by maximum official rating (PG, PG-13, TV-MA, etc).
:param bool has_theme_song: Optional filter by items with theme songs.
:param bool has_theme_video: Optional filter by items with theme videos.
:param bool has_subtitles: Optional filter by items with subtitles.
:param bool has_special_feature: Optional filter by items with special features.
:param bool has_trailer: Optional filter by items with trailers.
:param str adjacent_to: Optional. Return items that are siblings of a supplied item.
:param int min_index_number: Optional filter by minimum index number.
:param int min_players: Optional filter by minimum number of game players.
:param int max_players: Optional filter by maximum number of game players.
:param int parent_index_number: Optional filter by parent index number.
:param bool has_parental_rating: Optional filter by items that have or do not have a parental rating
:param bool is_hd: Optional filter by items that are HD or not.
:param str location_types: Optional. If specified, results will be filtered based on LocationType. This allows multiple, comma delimeted.
:param str exclude_location_types: Optional. If specified, results will be filtered based on LocationType. This allows multiple, comma delimeted.
:param bool is_missing: Optional filter by items that are missing episodes or not.
:param bool is_unaired: Optional filter by items that are unaired episodes or not.
:param float min_community_rating: Optional filter by minimum community rating.
:param float min_critic_rating: Optional filter by minimum critic rating.
:param int aired_during_season: Gets all episodes that aired during a season, including specials.
:param str min_premiere_date: Optional. The minimum premiere date. Format = ISO
:param str min_date_last_saved: Optional. The minimum premiere date. Format = ISO
:param str min_date_last_saved_for_user: Optional. The minimum premiere date. Format = ISO
:param str max_premiere_date: Optional. The maximum premiere date. Format = ISO
:param bool has_overview: Optional filter by items that have an overview or not.
:param bool has_imdb_id: Optional filter by items that have an imdb id or not.
:param bool has_tmdb_id: Optional filter by items that have a tmdb id or not.
:param bool has_tvdb_id: Optional filter by items that have a tvdb id or not.
:param str exclude_item_ids: Optional. If specified, results will be filtered by exxcluding item ids. This allows multiple, comma delimeted.
:param int start_index: Optional. The record index to start at. All items with a lower index will be dropped from the results.
:param int limit: Optional. The maximum number of records to return
:param bool recursive: When searching within folders, this determines whether or not the search will be recursive. true/false
:param str sort_order: Sort Order - Ascending,Descending
:param str parent_id: Specify this to localize the search to a specific item or folder. Omit to use the root
:param str fields: Optional. Specify additional fields of information to return in the output. This allows multiple, comma delimeted. Options: Budget, Chapters, DateCreated, Genres, HomePageUrl, IndexOptions, MediaStreams, Overview, ParentId, Path, People, ProviderIds, PrimaryImageAspectRatio, Revenue, SortName, Studios, Taglines
:param str exclude_item_types: Optional. If specified, results will be filtered based on item type. This allows multiple, comma delimeted.
:param str include_item_types: Optional. If specified, results will be filtered based on item type. This allows multiple, comma delimeted.
:param str any_provider_id_equals: Optional. If specified, result will be filtered to contain only items which match at least one of the specified IDs. Each provider ID must be in the form 'prov.id', e.g. 'imdb.tt123456'. This allows multiple, comma delimeted value pairs.
:param str filters: Optional. Specify additional filters to apply. This allows multiple, comma delimeted. Options: IsFolder, IsNotFolder, IsUnplayed, IsPlayed, IsFavorite, IsResumable, Likes, Dislikes
:param bool is_favorite: Optional filter by items that are marked as favorite, or not.
:param bool is_movie: Optional filter for movies.
:param bool is_series: Optional filter for movies.
:param bool is_news: Optional filter for news.
:param bool is_kids: Optional filter for kids.
:param bool is_sports: Optional filter for sports.
:param str media_types: Optional filter by MediaType. Allows multiple, comma delimited.
:param str image_types: Optional. If specified, results will be filtered based on those containing image types. This allows multiple, comma delimited.
:param str sort_by: Optional. Specify one or more sort orders, comma delimeted. Options: Album, AlbumArtist, Artist, Budget, CommunityRating, CriticRating, DateCreated, DatePlayed, PlayCount, PremiereDate, ProductionYear, SortName, Random, Revenue, Runtime
:param bool is_played: Optional filter by items that are played, or not.
:param str genres: Optional. If specified, results will be filtered based on genre. This allows multiple, pipe delimeted.
:param str official_ratings: Optional. If specified, results will be filtered based on OfficialRating. This allows multiple, pipe delimeted.
:param str tags: Optional. If specified, results will be filtered based on tag. This allows multiple, pipe delimeted.
:param str years: Optional. If specified, results will be filtered based on production year. This allows multiple, comma delimeted.
:param bool enable_images: Optional, include image information in output
:param bool enable_user_data: Optional, include user data
:param int image_type_limit: Optional, the max number of images to return, per image type
:param str enable_image_types: Optional. The image types to include in the output.
:param str person: Optional. If specified, results will be filtered to include only those containing the specified person.
:param str person_ids: Optional. If specified, results will be filtered to include only those containing the specified person.
:param str person_types: Optional. If specified, along with Person, results will be filtered to include only those containing the specified person and PersonType. Allows multiple, comma-delimited
:param str studios: Optional. If specified, results will be filtered based on studio. This allows multiple, pipe delimeted.
:param str studio_ids: Optional. If specified, results will be filtered based on studio. This allows multiple, pipe delimeted.
:param str artists: Optional. If specified, results will be filtered based on artist. This allows multiple, pipe delimeted.
:param str artist_ids: Optional. If specified, results will be filtered based on artist. This allows multiple, pipe delimeted.
:param str albums: Optional. If specified, results will be filtered based on album. This allows multiple, pipe delimeted.
:param str ids: Optional. If specific items are needed, specify a list of item id's to retrieve. This allows multiple, comma delimited.
:param str video_types: Optional filter by VideoType (videofile, dvd, bluray, iso). Allows multiple, comma delimeted.
:param str containers: Optional filter by Container. Allows multiple, comma delimeted.
:param str audio_codecs: Optional filter by AudioCodec. Allows multiple, comma delimeted.
:param str video_codecs: Optional filter by VideoCodec. Allows multiple, comma delimeted.
:param str subtitle_codecs: Optional filter by SubtitleCodec. Allows multiple, comma delimeted.
:param str path: Optional filter by Path.
:param str user_id: User Id
:param str min_official_rating: Optional filter by minimum official rating (PG, PG-13, TV-MA, etc).
:param bool is_locked: Optional filter by items that are locked.
:param bool is_place_holder: Optional filter by items that are placeholders
:param bool has_official_rating: Optional filter by items that have official ratings
:param bool group_items_into_collections: Whether or not to hide items behind their boxsets.
:param bool is3_d: Optional filter by items that are 3D, or not.
:param str series_status: Optional filter by Series Status. Allows multiple, comma delimeted.
:param str name_starts_with_or_greater: Optional filter by items whose name is sorted equally or greater than a given input string.
:param str name_starts_with: Optional filter by items whose name is sorted equally than a given input string.
:param str name_less_than: Optional filter by items whose name is equally or lesser than a given input string.
:return: QueryResultBaseItemDto
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['artist_type', 'max_official_rating', 'has_theme_song', 'has_theme_video', 'has_subtitles', 'has_special_feature', 'has_trailer', 'adjacent_to', 'min_index_number', 'min_players', 'max_players', 'parent_index_number', 'has_parental_rating', 'is_hd', 'location_types', 'exclude_location_types', 'is_missing', 'is_unaired', 'min_community_rating', 'min_critic_rating', 'aired_during_season', 'min_premiere_date', 'min_date_last_saved', 'min_date_last_saved_for_user', 'max_premiere_date', 'has_overview', 'has_imdb_id', 'has_tmdb_id', 'has_tvdb_id', 'exclude_item_ids', 'start_index', 'limit', 'recursive', 'sort_order', 'parent_id', 'fields', 'exclude_item_types', 'include_item_types', 'any_provider_id_equals', 'filters', 'is_favorite', 'is_movie', 'is_series', 'is_news', 'is_kids', 'is_sports', 'media_types', 'image_types', 'sort_by', 'is_played', 'genres', 'official_ratings', 'tags', 'years', 'enable_images', 'enable_user_data', 'image_type_limit', 'enable_image_types', 'person', 'person_ids', 'person_types', 'studios', 'studio_ids', 'artists', 'artist_ids', 'albums', 'ids', 'video_types', 'containers', 'audio_codecs', 'video_codecs', 'subtitle_codecs', 'path', 'user_id', 'min_official_rating', 'is_locked', 'is_place_holder', 'has_official_rating', 'group_items_into_collections', 'is3_d', 'series_status', 'name_starts_with_or_greater', 'name_starts_with', 'name_less_than'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_trailers" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
if 'artist_type' in params:
query_params.append(('ArtistType', params['artist_type'])) # noqa: E501
if 'max_official_rating' in params:
query_params.append(('MaxOfficialRating', params['max_official_rating'])) # noqa: E501
if 'has_theme_song' in params:
query_params.append(('HasThemeSong', params['has_theme_song'])) # noqa: E501
if 'has_theme_video' in params:
query_params.append(('HasThemeVideo', params['has_theme_video'])) # noqa: E501
if 'has_subtitles' in params:
query_params.append(('HasSubtitles', params['has_subtitles'])) # noqa: E501
if 'has_special_feature' in params:
query_params.append(('HasSpecialFeature', params['has_special_feature'])) # noqa: E501
if 'has_trailer' in params:
query_params.append(('HasTrailer', params['has_trailer'])) # noqa: E501
if 'adjacent_to' in params:
query_params.append(('AdjacentTo', params['adjacent_to'])) # noqa: E501
if 'min_index_number' in params:
query_params.append(('MinIndexNumber', params['min_index_number'])) # noqa: E501
if 'min_players' in params:
query_params.append(('MinPlayers', params['min_players'])) # noqa: E501
if 'max_players' in params:
query_params.append(('MaxPlayers', params['max_players'])) # noqa: E501
if 'parent_index_number' in params:
query_params.append(('ParentIndexNumber', params['parent_index_number'])) # noqa: E501
if 'has_parental_rating' in params:
query_params.append(('HasParentalRating', params['has_parental_rating'])) # noqa: E501
if 'is_hd' in params:
query_params.append(('IsHD', params['is_hd'])) # noqa: E501
if 'location_types' in params:
query_params.append(('LocationTypes', params['location_types'])) # noqa: E501
if 'exclude_location_types' in params:
query_params.append(('ExcludeLocationTypes', params['exclude_location_types'])) # noqa: E501
if 'is_missing' in params:
query_params.append(('IsMissing', params['is_missing'])) # noqa: E501
if 'is_unaired' in params:
query_params.append(('IsUnaired', params['is_unaired'])) # noqa: E501
if 'min_community_rating' in params:
query_params.append(('MinCommunityRating', params['min_community_rating'])) # noqa: E501
if 'min_critic_rating' in params:
query_params.append(('MinCriticRating', params['min_critic_rating'])) # noqa: E501
if 'aired_during_season' in params:
query_params.append(('AiredDuringSeason', params['aired_during_season'])) # noqa: E501
if 'min_premiere_date' in params:
query_params.append(('MinPremiereDate', params['min_premiere_date'])) # noqa: E501
if 'min_date_last_saved' in params:
query_params.append(('MinDateLastSaved', params['min_date_last_saved'])) # noqa: E501
if 'min_date_last_saved_for_user' in params:
query_params.append(('MinDateLastSavedForUser', params['min_date_last_saved_for_user'])) # noqa: E501
if 'max_premiere_date' in params:
query_params.append(('MaxPremiereDate', params['max_premiere_date'])) # noqa: E501
if 'has_overview' in params:
query_params.append(('HasOverview', params['has_overview'])) # noqa: E501
if 'has_imdb_id' in params:
query_params.append(('HasImdbId', params['has_imdb_id'])) # noqa: E501
if 'has_tmdb_id' in params:
query_params.append(('HasTmdbId', params['has_tmdb_id'])) # noqa: E501
if 'has_tvdb_id' in params:
query_params.append(('HasTvdbId', params['has_tvdb_id'])) # noqa: E501
if 'exclude_item_ids' in params:
query_params.append(('ExcludeItemIds', params['exclude_item_ids'])) # noqa: E501
if 'start_index' in params:
query_params.append(('StartIndex', params['start_index'])) # noqa: E501
if 'limit' in params:
query_params.append(('Limit', params['limit'])) # noqa: E501
if 'recursive' in params:
query_params.append(('Recursive', params['recursive'])) # noqa: E501
if 'sort_order' in params:
query_params.append(('SortOrder', params['sort_order'])) # noqa: E501
if 'parent_id' in params:
query_params.append(('ParentId', params['parent_id'])) # noqa: E501
if 'fields' in params:
query_params.append(('Fields', params['fields'])) # noqa: E501
if 'exclude_item_types' in params:
query_params.append(('ExcludeItemTypes', params['exclude_item_types'])) # noqa: E501
if 'include_item_types' in params:
query_params.append(('IncludeItemTypes', params['include_item_types'])) # noqa: E501
if 'any_provider_id_equals' in params:
query_params.append(('AnyProviderIdEquals', params['any_provider_id_equals'])) # noqa: E501
if 'filters' in params:
query_params.append(('Filters', params['filters'])) # noqa: E501
if 'is_favorite' in params:
query_params.append(('IsFavorite', params['is_favorite'])) # noqa: E501
if 'is_movie' in params:
query_params.append(('IsMovie', params['is_movie'])) # noqa: E501
if 'is_series' in params:
query_params.append(('IsSeries', params['is_series'])) # noqa: E501
if 'is_news' in params:
query_params.append(('IsNews', params['is_news'])) # noqa: E501
if 'is_kids' in params:
query_params.append(('IsKids', params['is_kids'])) # noqa: E501
if 'is_sports' in params:
query_params.append(('IsSports', params['is_sports'])) # noqa: E501
if 'media_types' in params:
query_params.append(('MediaTypes', params['media_types'])) # noqa: E501
if 'image_types' in params:
query_params.append(('ImageTypes', params['image_types'])) # noqa: E501
if 'sort_by' in params:
query_params.append(('SortBy', params['sort_by'])) # noqa: E501
if 'is_played' in params:
query_params.append(('IsPlayed', params['is_played'])) # noqa: E501
if 'genres' in params:
query_params.append(('Genres', params['genres'])) # noqa: E501
if 'official_ratings' in params:
query_params.append(('OfficialRatings', params['official_ratings'])) # noqa: E501
if 'tags' in params:
query_params.append(('Tags', params['tags'])) # noqa: E501
if 'years' in params:
query_params.append(('Years', params['years'])) # noqa: E501
if 'enable_images' in params:
query_params.append(('EnableImages', params['enable_images'])) # noqa: E501
if 'enable_user_data' in params:
query_params.append(('EnableUserData', params['enable_user_data'])) # noqa: E501
if 'image_type_limit' in params:
query_params.append(('ImageTypeLimit', params['image_type_limit'])) # noqa: E501
if 'enable_image_types' in params:
query_params.append(('EnableImageTypes', params['enable_image_types'])) # noqa: E501
if 'person' in params:
query_params.append(('Person', params['person'])) # noqa: E501
if 'person_ids' in params:
query_params.append(('PersonIds', params['person_ids'])) # noqa: E501
if 'person_types' in params:
query_params.append(('PersonTypes', params['person_types'])) # noqa: E501
if 'studios' in params:
query_params.append(('Studios', params['studios'])) # noqa: E501
if 'studio_ids' in params:
query_params.append(('StudioIds', params['studio_ids'])) # noqa: E501
if 'artists' in params:
query_params.append(('Artists', params['artists'])) # noqa: E501
if 'artist_ids' in params:
query_params.append(('ArtistIds', params['artist_ids'])) # noqa: E501
if 'albums' in params:
query_params.append(('Albums', params['albums'])) # noqa: E501
if 'ids' in params:
query_params.append(('Ids', params['ids'])) # noqa: E501
if 'video_types' in params:
query_params.append(('VideoTypes', params['video_types'])) # noqa: E501
if 'containers' in params:
query_params.append(('Containers', params['containers'])) # noqa: E501
if 'audio_codecs' in params:
query_params.append(('AudioCodecs', params['audio_codecs'])) # noqa: E501
if 'video_codecs' in params:
query_params.append(('VideoCodecs', params['video_codecs'])) # noqa: E501
if 'subtitle_codecs' in params:
query_params.append(('SubtitleCodecs', params['subtitle_codecs'])) # noqa: E501
if 'path' in params:
query_params.append(('Path', params['path'])) # noqa: E501
if 'user_id' in params:
query_params.append(('UserId', params['user_id'])) # noqa: E501
if 'min_official_rating' in params:
query_params.append(('MinOfficialRating', params['min_official_rating'])) # noqa: E501
if 'is_locked' in params:
query_params.append(('IsLocked', params['is_locked'])) # noqa: E501
if 'is_place_holder' in params:
query_params.append(('IsPlaceHolder', params['is_place_holder'])) # noqa: E501
if 'has_official_rating' in params:
query_params.append(('HasOfficialRating', params['has_official_rating'])) # noqa: E501
if 'group_items_into_collections' in params:
query_params.append(('GroupItemsIntoCollections', params['group_items_into_collections'])) # noqa: E501
if 'is3_d' in params:
query_params.append(('Is3D', params['is3_d'])) # noqa: E501
if 'series_status' in params:
query_params.append(('SeriesStatus', params['series_status'])) # noqa: E501
if 'name_starts_with_or_greater' in params:
query_params.append(('NameStartsWithOrGreater', params['name_starts_with_or_greater'])) # noqa: E501
if 'name_starts_with' in params:
query_params.append(('NameStartsWith', params['name_starts_with'])) # noqa: E501
if 'name_less_than' in params:
query_params.append(('NameLessThan', params['name_less_than'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json', 'application/xml']) # noqa: E501
# Authentication setting
auth_settings = ['apikeyauth', 'embyauth'] # noqa: E501
return self.api_client.call_api(
'/Trailers', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='QueryResultBaseItemDto', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 74.868709
| 1,424
| 0.69639
|
4a187189e706402c46d77e8171a68041c248b87a
| 387
|
py
|
Python
|
occpm/wsgi.py
|
JanKruska/occpm
|
0838f522064429cb7e3db3eb56379b731aabc6dc
|
[
"MIT"
] | 2
|
2021-11-30T17:23:23.000Z
|
2021-12-30T12:12:18.000Z
|
occpm/wsgi.py
|
JanKruska/occpm
|
0838f522064429cb7e3db3eb56379b731aabc6dc
|
[
"MIT"
] | null | null | null |
occpm/wsgi.py
|
JanKruska/occpm
|
0838f522064429cb7e3db3eb56379b731aabc6dc
|
[
"MIT"
] | 3
|
2021-11-11T17:46:32.000Z
|
2022-01-03T22:25:21.000Z
|
"""
WSGI config for occpm project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/3.2/howto/deployment/wsgi/
"""
import os
from django.core.wsgi import get_wsgi_application
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "occpm.settings")
application = get_wsgi_application()
| 22.764706
| 78
| 0.782946
|
4a1873b33eee70539513ac95afbe778fcfa3fbfd
| 1,071
|
py
|
Python
|
PriceHero/_journeys.py
|
bizzyvinci/PriceHero
|
c32a382143ca8ef63bb77c3dbd3e38676cc410d6
|
[
"MIT"
] | null | null | null |
PriceHero/_journeys.py
|
bizzyvinci/PriceHero
|
c32a382143ca8ef63bb77c3dbd3e38676cc410d6
|
[
"MIT"
] | null | null | null |
PriceHero/_journeys.py
|
bizzyvinci/PriceHero
|
c32a382143ca8ef63bb77c3dbd3e38676cc410d6
|
[
"MIT"
] | null | null | null |
from requests import get
from selectorlib import Extractor
def _journeys(journeys_url):
extractor = Extractor.from_yaml_string("""
name:
css: h1.detail-name
xpath: null
type: Text
price:
css: span.regular-price
xpath: null
type: Text
""")
headers = {
'authority': 'www.journeys.com',
'pragma': 'no-cache',
'cache-control': 'no-cache',
'dnt': '1',
'upgrade-insecure-requests': '1',
'user-agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.83 Safari/537.36',
'accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'sec-fetch-site': 'none',
'sec-fetch-mode': 'navigate',
'sec-fetch-dest': 'document',
'accept-language': 'en-GB,en-US;q=0.9,en;q=0.8'}
website = get(journeys_url, headers=headers)
productdata = extractor.extract(website.text)
return productdata
| 35.7
| 145
| 0.612512
|
4a187439a723bcee596551b30fecc504066eac63
| 3,682
|
py
|
Python
|
api/instantboxManager.py
|
mexious/vm_image
|
18e671558b20dc92328108db51344660a2d3fa89
|
[
"MIT"
] | null | null | null |
api/instantboxManager.py
|
mexious/vm_image
|
18e671558b20dc92328108db51344660a2d3fa89
|
[
"MIT"
] | null | null | null |
api/instantboxManager.py
|
mexious/vm_image
|
18e671558b20dc92328108db51344660a2d3fa89
|
[
"MIT"
] | null | null | null |
import docker
import random
import string
import time
import json
class InstantboxManager(object):
CONTAINER_PREFIX = 'instantbox_managed_'
TIMEOUT_LABEL = 'org.instantbox.variables.EXPIRATION_TIMESTAMP'
OS_LIST = None
def __init__(self):
self.client = docker.from_env()
try:
with open('manifest.json', 'r') as os_manifest:
self.OS_LIST = json.load(os_manifest)
except Exception:
pass
if self.OS_LIST is None:
raise Exception(
'Could not load manifest.json. ' +
'Download it from https://gist.githubusercontent.com/rifkytech/ced766832986cdeae86efcadc430e41e/raw/5bc1c28e304827ab82bd125814c5a482308192ea/manifest.json'
)
self.AVAILABLE_OS_LIST = []
for os in self.OS_LIST:
for ver in os['subList']:
self.AVAILABLE_OS_LIST.append(ver['osCode'])
def is_create_container(self,
mem,
cpu,
os_name,
os_timeout,
open_port=None):
if open_port is None:
port_dict = {'1588/tcp': None}
else:
port_dict = {'1588/tcp': None, '{}/tcp'.format(open_port): None}
container_name = self.generateContainerName()
try:
self.client.containers.run(
image=os_name,
cpu_period=100000,
cpu_quota=int('%s0000' % cpu),
mem_limit='%sm' % mem,
name=container_name,
ports=port_dict,
restart_policy={'Name': 'always'},
labels={self.TIMEOUT_LABEL: str.format('{:.0f}', os_timeout)},
tty=True,
detach=True,
)
except Exception:
return None
else:
return container_name
def get_container_ports(self, container_name):
try:
ports = self.client.containers.get(
container_name).attrs['NetworkSettings']['Ports']
return {
port: mapped_ports[0]['HostPort']
for port, mapped_ports in ports.items()
}
except Exception:
return None
def remove_timeout_containers(self):
for container in self.client.containers.list():
if container.name.startswith(self.CONTAINER_PREFIX):
timeout = container.labels.get(self.TIMEOUT_LABEL)
if timeout is not None and float(timeout) < time.time():
self.is_rm_container(container.name)
def is_rm_container(self, container_id) -> bool:
try:
container = self.client.containers.get(container_id)
except docker.errors.NotFound:
return True
else:
if container.name.startswith(self.CONTAINER_PREFIX):
container.remove(force=True)
return True
def is_os_available(self, osCode=None) -> bool:
return osCode is not None and osCode in self.AVAILABLE_OS_LIST
def generateContainerName(self) -> str:
return self.CONTAINER_PREFIX + ''.join(
random.sample(string.ascii_letters + string.digits, 16))
if __name__ == '__main__':
test = InstantboxManager()
container_name = test.is_create_container('512', 1,
'instantbox/ubuntu:latest',
time.time())
test.get_container_ports(container_name)
test.remove_timeout_containers()
test.is_rm_container(container_name)
| 34.411215
| 171
| 0.562466
|
4a1875952dba552526f6af01c0c1c9d6b317ca90
| 2,363
|
py
|
Python
|
AtCoder/ABC120/D.py
|
takaaki82/Java-Lessons
|
c4f11462bf84c091527dde5f25068498bfb2cc49
|
[
"MIT"
] | 1
|
2018-11-25T04:15:45.000Z
|
2018-11-25T04:15:45.000Z
|
AtCoder/ABC120/D.py
|
takaaki82/Java-Lessons
|
c4f11462bf84c091527dde5f25068498bfb2cc49
|
[
"MIT"
] | null | null | null |
AtCoder/ABC120/D.py
|
takaaki82/Java-Lessons
|
c4f11462bf84c091527dde5f25068498bfb2cc49
|
[
"MIT"
] | 2
|
2018-08-08T13:01:14.000Z
|
2018-11-25T12:38:36.000Z
|
class UnionFind:
def __init__(self, n):
self.parent = [i for i in range(n + 1)]
self.rank = [0 for _ in range(n + 1)]
self.size = [1] * (n + 1)
self.group = [[i] for i in range(n + 1)]
def find(self, x):
# If x is root
if self.parent[x] == x:
return x
# If x is not root, search again by using x's parent
else:
self.parent[x] = self.find(self.parent[x])
return self.parent[x]
def union(self, x, y):
x = self.find(x)
y = self.find(y)
if x == y:
return
# Make an edge from the root of lower tree to the root of higher tree
if self.rank[x] < self.rank[y]:
self.parent[x] = y
self.size[y] += self.size[x]
else:
self.parent[y] = x
self.size[x] += self.size[y]
# If the height of tree the tree is the same, increase one of the heights by 1
if self.rank[x] == self.rank[y]:
self.rank[x] += 1
def check_same(self, x, y):
return self.find(x) == self.find(y)
def get_size(self, x):
return self.size[self.find(x)]
def merge(self, x, y):
"""
"データ構造をマージする一般的なテク"
"""
x = self.find(x)
y = self.find(y)
if len(self.group[x]) < len(self.group[y]):
x, y = y, x
self.group[x].extend(self.group[y])
self.group[y] = []
self.parent[y] = x
def combination(n, r):
"""
:param n: the count of different items
:param r: the number of select
:return: combination
n! / (r! * (n - r)!)
"""
r = min(n - r, r)
result = 1
for i in range(n, n - r, -1):
result *= i
for i in range(1, r + 1):
result //= i
return result
N, M = map(int, input().split())
items = []
for i in range(M):
a, b = map(int, input().split())
items.append((a, b))
items = list(reversed(items))
union_find = UnionFind(N)
ans = (N - 1) * N // 2
minus = 0
result = []
for i, ab in enumerate(items):
a, b = ab
ans -= minus
result.append(ans)
if union_find.check_same(a, b):
minus = 0
else:
sa = union_find.get_size(a)
sb = union_find.get_size(b)
minus = sa * sb
union_find.union(a, b)
for r in result[::-1]:
print(r)
| 25.138298
| 90
| 0.499365
|
4a187603b24ee331d8e6aef0f742871b1b3c7773
| 4,017
|
py
|
Python
|
ceilometer/compute/pollsters/cpu.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/compute/pollsters/cpu.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | null | null | null |
ceilometer/compute/pollsters/cpu.py
|
orbitfp7/ceilometer
|
9905da14bbdf06f95e1e056c9ca0e18087214d0f
|
[
"Apache-2.0"
] | null | null | null |
#
# Copyright 2012 eNovance <licensing@enovance.com>
# Copyright 2012 Red Hat, Inc
#
# Author: Julien Danjou <julien@danjou.info>
# Author: Eoghan Glynn <eglynn@redhat.com>
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import ceilometer
from ceilometer.compute import plugin
from ceilometer.compute.pollsters import util
from ceilometer.compute.virt import inspector as virt_inspector
from ceilometer.openstack.common.gettextutils import _
from ceilometer.openstack.common import log
from ceilometer import sample
LOG = log.getLogger(__name__)
class CPUPollster(plugin.ComputePollster):
def get_samples(self, manager, cache, resources):
for instance in resources:
LOG.debug(_('checking instance %s'), instance.id)
try:
cpu_info = manager.inspector.inspect_cpus(instance)
LOG.debug(_("CPUTIME USAGE: %(instance)s %(time)d"),
{'instance': instance.__dict__,
'time': cpu_info.time})
cpu_num = {'cpu_number': cpu_info.number}
yield util.make_sample_from_instance(
instance,
name='cpu',
type=sample.TYPE_CUMULATIVE,
unit='ns',
volume=cpu_info.time,
additional_metadata=cpu_num,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU time is not implemented for %s'
), manager.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_('could not get CPU time for %(id)s: %(e)s'),
{'id': instance.id, 'e': err})
class CPUUtilPollster(plugin.ComputePollster):
def get_samples(self, manager, cache, resources):
self._inspection_duration = self._record_poll_time()
for instance in resources:
LOG.debug(_('Checking CPU util for instance %s'), instance.id)
try:
cpu_info = manager.inspector.inspect_cpu_util(
instance, self._inspection_duration)
LOG.debug(_("CPU UTIL: %(instance)s %(util)d"),
({'instance': instance.__dict__,
'util': cpu_info.util}))
yield util.make_sample_from_instance(
instance,
name='cpu_util',
type=sample.TYPE_GAUGE,
unit='%',
volume=cpu_info.util,
)
except virt_inspector.InstanceNotFoundException as err:
# Instance was deleted while getting samples. Ignore it.
LOG.debug(_('Exception while getting samples %s'), err)
except ceilometer.NotImplementedError:
# Selected inspector does not implement this pollster.
LOG.debug(_('Obtaining CPU Util is not implemented for %s'),
manager.inspector.__class__.__name__)
except Exception as err:
LOG.exception(_('Could not get CPU Util for %(id)s: %(e)s'),
{'id': instance.id, 'e': err})
| 44.633333
| 76
| 0.599452
|
4a18772015815aa5631960d25377c329547130d4
| 1,104
|
py
|
Python
|
piks/crypt.py
|
peitur/pikstore
|
4adec308dcf5cc358398f9750e2c24d7d89a2e28
|
[
"MIT"
] | 1
|
2019-09-02T19:12:30.000Z
|
2019-09-02T19:12:30.000Z
|
piks/crypt.py
|
peitur/pikstore
|
4adec308dcf5cc358398f9750e2c24d7d89a2e28
|
[
"MIT"
] | null | null | null |
piks/crypt.py
|
peitur/pikstore
|
4adec308dcf5cc358398f9750e2c24d7d89a2e28
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import os, sys, re
import base64
import piks.utils
import piks.defaults
import piks.validate
import piks.runner
from Crypto.PublicKey import RSA
from pprint import pprint
class CryptRSA( object ):
def __init__( self, **opt ):
self._debug = False
self._pem = "\n".join( opt['pem'] )
self._password = opt['password']
pprint( self._pem )
print( "PASSWORD: %s" % ( self._password ) )
self._key = RSA.importKey( self._pem, passphrase=self._password )
def get_key( self ):
return self._key
def encrypt( self, **opt ):
pass
def decrypt( self, **opt ):
pass
if __name__ == "__main__":
passwd = piks.file.file_hash( os.path.realpath( __file__ ) )
print("PASSWORD: %s"%( passwd ))
pem = list()
privkey = piks.runner.CreatePrivateKey( password=passwd, bits="512", encode="aes256").run()
pubkey = piks.runner.CreatePublicKey( password=passwd, privatekey=privkey, encode="aes256" ).run()
rsa = CryptRSA( pem=privkey, password=passwd )
pprint( rsa.get_key() )
| 22.530612
| 102
| 0.63587
|
4a187767d2ac815ad11eec2d0a9e2d78598d1871
| 2,076
|
py
|
Python
|
tests/python/gaiatest/tests/unit/test_initial_state.py
|
rillian/gaia
|
45d1cc1b8365db192ff28a9405406ab0a2cb3910
|
[
"Apache-2.0"
] | 3
|
2015-08-31T15:24:31.000Z
|
2020-04-24T20:31:29.000Z
|
tests/python/gaiatest/tests/unit/test_initial_state.py
|
rillian/gaia
|
45d1cc1b8365db192ff28a9405406ab0a2cb3910
|
[
"Apache-2.0"
] | null | null | null |
tests/python/gaiatest/tests/unit/test_initial_state.py
|
rillian/gaia
|
45d1cc1b8365db192ff28a9405406ab0a2cb3910
|
[
"Apache-2.0"
] | 3
|
2015-07-29T07:17:15.000Z
|
2020-11-04T06:55:37.000Z
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
from gaiatest import GaiaTestCase
class TestInitialState(GaiaTestCase):
homescreen_frame_locator = ('css selector', 'div.homescreen iframe')
def test_initial_state(self):
self.check_initial_state()
def test_state_after_reset(self):
# push media files
self.push_resource('IMG_0001.jpg', 'DCIM/100MZLLA')
self.push_resource('VID_0001.3gp', 'DCIM/100MZLLA')
self.push_resource('MUS_0001.mp3')
# change volume
self.data_layer.set_volume(5)
if self.wifi:
# connect to wifi network
self.data_layer.enable_wifi()
self.data_layer.connect_to_wifi(self.testvars['wifi'])
self.data_layer.disable_wifi()
# move away from home screen
self.marionette.switch_to_frame(
self.marionette.find_element(*self.homescreen_frame_locator))
self.marionette.execute_script('window.wrappedJSObject.GridManager.goToPage(2);')
self.marionette.switch_to_frame()
# lock screen
self.lockscreen.lock()
self.cleanUp()
self.check_initial_state()
def check_initial_state(self):
self.assertFalse(self.lockscreen.is_locked)
if self.wifi:
self.data_layer.enable_wifi()
self.assertEqual(self.data_layer.known_networks, [{}])
self.data_layer.disable_wifi()
self.assertEqual(self.data_layer.get_setting('audio.volume.master'), 0)
self.assertEqual(self.data_layer.media_files, [])
# check we're on the home screen
self.marionette.switch_to_frame(
self.marionette.find_element(*self.homescreen_frame_locator))
self.assertEqual(self.marionette.execute_script(
'return window.wrappedJSObject.GridManager.pageHelper.getCurrentPageNumber();'), 1)
self.marionette.switch_to_frame()
| 34.6
| 95
| 0.675819
|
4a1877dc628ddd2dbfeb318c1f448bab1b1326ff
| 14,977
|
py
|
Python
|
Leonard_Jones.py
|
copl-labomc/nanocompositeMD
|
53ae8549d1f1acd01986dee5d853f49511174780
|
[
"MIT"
] | null | null | null |
Leonard_Jones.py
|
copl-labomc/nanocompositeMD
|
53ae8549d1f1acd01986dee5d853f49511174780
|
[
"MIT"
] | null | null | null |
Leonard_Jones.py
|
copl-labomc/nanocompositeMD
|
53ae8549d1f1acd01986dee5d853f49511174780
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Created on Thu Feb 11 13:37:22 2021
@author: franc
"""
import pandas as pd
import math
import os
import matplotlib.pyplot as plt
plt.style.use('seaborn-whitegrid')
import numpy as np
from PIL import Image
import glob
import moviepy.editor as mp
from datetime import datetime
import time
class Mol():
def __init__(self, r, rv, ra):
self.r = np.asarray([0.0, 0.0])
self.rv = np.asarray([0.0, 0.0])
self.ra = np.asarray([0.0, 0.0])
class Prop():
def __init__(self, val, sum1, sum2 ):
self.val=val
self.sum1=sum1
self.sum2=sum2
# BASIC FUNCTIONS
# Sqr and Cube functions:
def Sqr(x):
return (x * x)
def Cube(x):
return ((x) * (x) * (x))
# Randomness functions:
def RandR():
global randSeedP
randSeedP = (randSeedP * IMUL + IADD) & MASK
return (randSeedP * SCALE)
def VRand(p):
s: float
s = 2. * math.pi * RandR()
p[0] = math.cos(s)
p[1] = math.sin(s)
return p
# Toroidal functions:
def VWrapAll(v):
if v[0] >= 0.5 * region[0]:
v[0] -= region[0]
elif v[0] < -0.5 * region[0]:
v[0] += region[0]
if v[1] >= 0.5 * region[1]:
v[1] -= region[1]
elif v[1] < -0.5 * region[1]:
v[1] += region[1]
# This function updates coordinates taking care of periodic boundaries
def ApplyBoundaryCond():
for n in range(nMol):
VWrapAll(mol[n].r)
# INITIALIZE COORDINATES.
# Here a simple square lattice (with the option of unequal edge lenghts) is used,
# so that each cell contains just one atom and the system is centered about the origin
def InitCoords():
c = np.asarray([0.0, 0.0]) # Coords
gap = np.divide(region, initUcell)
n = 0
for ny in range(0, int(initUcell[1])):
for nx in range(0, int(initUcell[0])):
#c = np.asarray([nx+0.5, ny+0.5])
#c = np.multiply(c, gap)
#c = np.add(c, np.multiply(-0.5, region))
#mol[n].r = c
mol[n].r = np.add(np.multiply(np.asarray([nx+0.5, ny+0.5]), gap), np.multiply(-0.5, region))
n = n+1
# INITIALIZE VELOCITIES.
# The initial velocities are set to fixed magnitude (velMag)
# that depends on the temperature. After assigning random velocity directions
# the velocoties are adjusted to ensure that the center of mass is stationary.
# The function vRand serves as a source of uniformly distribuited radnom unit vectors.
def InitVels():
global vSum
vSum = np.zeros(vSum.shape)
for n in range(nMol):
VRand(mol[n].rv)
mol[n].rv = np.multiply(mol[n].rv, velMag)
vSum = np.add(vSum, mol[n].rv)
for n in range(nMol):
mol[n].rv = np.add(mol[n].rv, np.multiply((- 1.0 / nMol), vSum))
# INITIALIZE ACCELERATIONS.
# The accelerations are initilized to zero
def InitAccels():
for n in range(nMol):
mol[n].ra = np.zeros(mol[n].ra.shape)
# Set parameters
def SetParams():
global rCut
global region
global velMag # velocity magnitude
rCut = math.pow(2., 1./6. * sigma)
# Define the region
region = np.multiply( 1./math.sqrt(density), initUcell)
nMol = len(mol)
#velocity magnitude depends on the temperature
velMag = math.sqrt(NDIM * (1. -1. /nMol) * temperature)
# Setup Job
def SetupJob():
global stepCount # timestep counter
stepCount = 0
InitCoords()
InitVels()
InitAccels()
AccumProps(0)
# FORCES COMPUTATION
'''
ComputeForces
ComputeForces is responsible for the interaction computations, and the interactions occur between pairs of atoms.
The function implements the LJP, and calculates the accelerations and the forces for each pairs of atoms i and j
located at ri and rj.
rCut = Limiting separation cutoff (rc), and it is: rCut = math.pow(2., 1./6.)
As r increases towards rCut, the force drops to 0.
Newton's third law inplies that fji = -fij, so each atom pair need only be examined once.
The amount of work is proportional to N^2.
'''
def ComputeForces():
global virSum
global uSum
fcVal = 0 # The force that atom j exerts on atom i
# rCut: Rc
rrCut = Sqr(rCut)
for n in range(nMol):
mol[n].ra = np.zeros(mol[n].ra.shape)
uSum = 0.
virSum = 0.
n = 0
for j1 in range(nMol-1):
for j2 in range(j1+1, nMol):
# Make DeltaRij: (sum of squared RJ1-RJ2)
dr = np.subtract(mol[j1].r, mol[j2].r) # dr contains the delta between Rj1 and Rj2
VWrapAll(dr) # toroidal function
rr= (dr[0] * dr[0] + dr[1] * dr[1]) # dr2
r= np.sqrt(rr) #dr
# if dr2 < Rc^2
if (rr < rrCut):
rri = sigma / rr
rri3 = Cube(rri)
# Forces calculation by Lennard-Jones potential (original from Rapaport)
# fcVal = 48. * rri3 * (rri3 - 0.5) * rri
# Forces calculated with the completed Lennard-Jones.
fcVal = 48 * epsilon * np.power(sigma, 12) / np.power(r, 13) - 24 * epsilon * np.power(sigma, 6) / np.power(r, 7)
# Update the accelerations multiplying force for DeltaRij
mol[j1].ra = np.add(mol[j1].ra, np.multiply(fcVal, dr))
mol[j2].ra = np.add(mol[j2].ra, np.multiply(-fcVal, dr))
# Lennard-Jones potential (original from Rapaport)
# uSum += 4. * rri3 * (rri3 - 1.) +1.
# The completed Lennard-Jones.
uSum += 4 * epsilon * np.power(sigma/r, 12)/r - np.power(sigma/r, 6) # balanced
virSum += fcVal * rr
# INTEGRATION
'''
INTEGRATION OF COORDINATES AND VELOCITIES.
Integration of Equation of Motion uses a simple numerical techniques: the leapfrog method.
The method has excellent energy conservation properties.
LeapfrogStep integrates the coordinates and velocities. It appears twice in the listing of
SingleStep, with the argument part determinating which portion of the two-step leapfrog process
is to be performed:
vix(t + h/2) = vix(t) + (h/2)aix(t)
rix(t + h) = rix(t) + hvix (t + h/2)
'''
def LeapfrogStep(part):
if part == 1:
for n in range(nMol):
mol[n].rv = np.add(mol[n].rv, np.multiply(0.5 * deltaT, mol[n].ra))
mol[n].r = np.add(mol[n].r, np.multiply(deltaT, mol[n].rv))
else :
for n in range(nMol):
mol[n].rv = np.add(mol[n].rv, np.multiply(0.5 * deltaT, mol[n].ra))
# PROPERTIES MEASUREMENTS
def EvalProps():
global vSum
vvSum = 0.
vSum = np.zeros(vSum.shape)
global kinEnergy
global totEnergy
global pressure
for n in range(nMol):
vSum=np.add(vSum, mol[n].rv)
vv= (mol[n].rv[0] * mol[n].rv[0] + mol[n].rv[1] * mol[n].rv[1])
vvSum += vv
kinEnergy.val = (0.5 * vvSum) / nMol
totEnergy.val = kinEnergy.val + (uSum / nMol)
pressure.val = density * (vvSum + virSum) / (nMol * NDIM)
# AccumProps functions
def PropZero(v):
v.sum1 = v.sum2 = 0.
return v
def PropAccum(v):
v.sum1 += v.val
v.sum2 += Sqr(v.val)
return v
def PropAvg(v, n):
v.sum1 /= n
v.sum2 = math.sqrt(max(v.sum2 / n - Sqr(v.sum1), 0.))
return v
# AccumProps: collects results of the measurements and evaluates means and standard deviation
def AccumProps(icode):
if icode == 0:
PropZero(totEnergy)
PropZero(kinEnergy)
PropZero(pressure)
if icode == 1:
PropAccum(totEnergy)
PropAccum(kinEnergy)
PropAccum(pressure)
if icode == 2:
PropAvg(totEnergy, stepAvg)
PropAvg(kinEnergy, stepAvg)
PropAvg(pressure, stepAvg)
# OUTPUT FUNCTIONS:
def plotMolCoo(mol, workdir, n):
import matplotlib.patches as mpatches
import matplotlib.pyplot as plt
Time = timeNow
Sigma_v = "{0:.4f}".format(vSum[0] / nMol)
E = "{0:.4f}".format(totEnergy.sum1)
Sigma_E = "{0:.4f}".format(totEnergy.sum2)
Ek = "{0:.4f}".format(kinEnergy.sum1)
Sigma_Ek = "{0:.4f}".format(kinEnergy.sum2)
P_1 = "{0:.4f}".format(pressure.sum1)
P_2 = "{0:.4f}".format(pressure.sum2)
TileName = (workdir+'coo/'+str(n)+'.png')
x = []
y = []
for n in range(len(mol)):
x.append(mol[n].r[0])
y.append(mol[n].r[1])
mark_1 = int(len(mol)/2 + len(mol)/8)
mark_2 = int(len(mol)/2 + len(mol)/8 + 1)
plt.plot(x, y, 'o', color='blue')
plt.plot(x[mark_1], y[mark_1], 'o', color='red')
plt.plot(x[mark_2], y[mark_2], 'o', color='cyan')
plt.title('timestep:'+"{0:.4f}".format(timeNow)+'; '+\
'$\Sigma v$:'+Sigma_v+'; '+\
'E:'+E+'; '+\
'$\sigma E$:'+Sigma_E+';\n'+\
'Ek:'+Ek+'; ' +\
'$\sigma Ek$:'+Sigma_Ek+'; '+\
'P.sum1:'+P_1+'; '+\
'P.sum2:'+P_2+'; ', loc='left')
#plt.rcParams["figure.figsize"] = (200,3)
plt.savefig(TileName, dpi=100)
def makeMov():
# For more information about the use of the glob package with Python, and for the convertion from
# gif to mp4 video formats see:
#https://pythonprogramming.altervista.org/png-to-gif/
#https://stackoverflow.com/questions/6773584/how-is-pythons-glob-glob-ordered
#https://www.programiz.com/python-programming/datetime/current-time
#https://stackoverflow.com/questions/40726502/python-convert-gif-to-videomp4
t = time.localtime()
current_time = time.strftime("%D:%H:%M:%S", t)
current_time = current_time.replace('/','-')
# Create the frames
frames = []
imgs = sorted(glob.glob('coo/*.png'), key=os.path.getmtime)
for i in imgs:
temp = Image.open(i)
keep = temp.copy()
frames.append(keep)
temp.close()
for i in imgs:
os.remove(i)
# Save into a GIF file that loops forever
frames[0].save('coo/coordinates.gif', format='GIF',
append_images=frames[1:],
save_all=True,
duration=30, loop=0)
clip = mp.VideoFileClip("coo/coordinates.gif")
clip.write_videofile("coo/"+"coordinates_"+current_time+".mp4")
os.remove("coo/coordinates.gif")
def PrintSummary():
print(stepCount, \
"{0:.4f}".format(timeNow), \
"{0:.4f}".format(vSum[0] / nMol) ,\
"{0:.4f}".format(totEnergy.sum1),\
"{0:.4f}".format(totEnergy.sum2), \
"{0:.4f}".format(kinEnergy.sum1), \
"{0:.4f}".format(kinEnergy.sum2),\
"{0:.4f}".format(pressure.sum1),\
"{0:.4f}".format(pressure.sum2))
return (stepCount, \
timeNow, \
(vSum[0] / nMol) ,\
totEnergy.sum1,\
totEnergy.sum2, \
kinEnergy.sum1, \
kinEnergy.sum2,\
pressure.sum1,\
pressure.sum2)
def GraphOutput():
ax = \
df_systemParams.plot(x="timestep", y='$\Sigma v$', kind="line")
df_systemParams.plot(x="timestep", y='E', kind="line", ax=ax, color="C1")
df_systemParams.plot(x="timestep", y='$\sigma E$', kind="line", ax=ax, color="C2")
df_systemParams.plot(x="timestep", y='Ek', kind="line", ax=ax, color="C3")
df_systemParams.plot(x="timestep", y='$\sigma Ek$', kind="line", ax=ax, color="C4")
df_systemParams.plot(x="timestep", y='P_1', kind="line", ax=ax, color="C9")
df_systemParams.plot(x="timestep", y='P_2', kind="line", ax=ax, color="C9")
plt.show()
#plt.savefig('plot.jpg', dpi=300)
# HANDLING FUNCTION (SingleStep())
'''
SingleStep: Is the function that handles the processing for a single timestep, including:
1) the force evaluation
2) integration of the equation of motion,
3) adjustments required by periodic boundaries, and
4) property measurements
'''
def SingleStep():
global stepCount # timestep counter
global timeNow
stepCount +=1
timeNow = stepCount * deltaT
LeapfrogStep(1)
ApplyBoundaryCond()
ComputeForces() # 1) The force evaluation
LeapfrogStep(2) # 2) Integration of coordinates and velocities
EvalProps()
AccumProps(1) # Accumulate properties
if (stepCount % stepAvg == 0):
AccumProps(2) # Calculate averages
systemParams.append(PrintSummary())
AccumProps(0) # Set to zero all the properties.
# 2D SOFT-DISK SIMULATION: THE MAIN LOOP
# Import libraries for system operations
import os.path
from os import path
import shutil
# PARAMETERS
mov = 1 # set mov=1 if you want make a video
# Set a working directory for all the png and videos
workdir = str(os.getcwd()+'/')
# If the /coo directory doesn't exist make it, else remove /coo (and its contents) and
# create a new /coo directory.
if path.exists(str(workdir+'coo'))==False:
os.makedirs(str(workdir+'coo'))
else:
shutil.rmtree(str(workdir+'coo'))
os.makedirs(str(workdir+'coo'))
# Load the input parameter file
df_params = pd.read_csv('Rap_2_LJP.txt', sep='\t', header=None, names=['parameter', 'value'])
NDIM = 2 # Two-Dimension setting
vSum = np.asarray([0.0, 0.0]) # velocity sum
kinEnergy =Prop(0.0, 0.0, 0.0) #Ek (and average)
totEnergy =Prop(0.0, 0.0, 0.0) #E (and average)
pressure =Prop(0.0, 0.0, 0.0) #P (and average)
systemParams = []
IADD = 453806245
IMUL = 314159269
MASK = 2147483647
SCALE = 0.4656612873e-9
randSeedP = 17
deltaT = float(df_params.values[0][1])
density = float(df_params.values[1][1])
initUcell = np.asarray([0.0, 0.0]) # initialize cell
initUcell[0] = int(df_params.values[2][1])
initUcell[1] = int(df_params.values[3][1])
stepAvg = int(df_params.values[4][1])
stepEquil = float(df_params.values[5][1])
stepLimit = float(df_params.values[6][1])
temperature = float(df_params.values[7][1])
float(df_params.values[7][1])
#Define an array of Mol
mol = [Mol(np.asarray([0.0, 0.0]), \
np.asarray([0.0, 0.0]), \
np.asarray([0.0, 0.0])) for i in range(int(initUcell[0]*initUcell[1]))]
# Define the number of molecules
global nMol
nMol = len(mol)
# LJP parameters:
epsilon = 1
sigma = 1
# START THE MAIN LOOP
SetParams()
SetupJob()
moreCycles = 1
n = 0
while moreCycles:
SingleStep()
if mov==1:
plotMolCoo(mol, workdir, n) # Make a graph of the coordinates
n += 1
if stepCount >= stepLimit:
moreCycles = 0
columns = ['timestep','timeNow', '$\Sigma v$', 'E', '$\sigma E$', 'Ek', '$\sigma Ek$', 'P_1', 'P_2']
df_systemParams = pd.DataFrame(systemParams, columns=columns)
# Make a video
if mov==1:
makeMov()
GraphOutput()
| 28.419355
| 130
| 0.585765
|
4a18782725d2713cfc6aa8d4916a5548b06a6cf6
| 14,147
|
py
|
Python
|
datalad/cmdline/tests/test_main.py
|
courtois-neuromod/datalad
|
b8357e90469a6b344d270e3dc1761bc17ea9bbdf
|
[
"MIT"
] | null | null | null |
datalad/cmdline/tests/test_main.py
|
courtois-neuromod/datalad
|
b8357e90469a6b344d270e3dc1761bc17ea9bbdf
|
[
"MIT"
] | null | null | null |
datalad/cmdline/tests/test_main.py
|
courtois-neuromod/datalad
|
b8357e90469a6b344d270e3dc1761bc17ea9bbdf
|
[
"MIT"
] | null | null | null |
# emacs: -*- mode: python-mode; py-indent-offset: 4; tab-width: 4; indent-tabs-mode: nil -*-
# ex: set sts=4 ts=4 sw=4 noet:
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
#
# See COPYING file distributed along with the datalad package for the
# copyright and license terms.
#
# ## ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ### ##
"""Test functioning of the datalad main cmdline utility """
import os
import re
from io import StringIO
from unittest.mock import patch
import datalad
from ..main import (
_fix_datalad_ri,
main,
)
from ..helpers import fail_with_short_help
from datalad import __version__
from datalad.cmd import (
WitlessRunner as Runner,
StdOutErrCapture,
)
from datalad.ui.utils import (
get_console_width,
get_terminal_size,
)
from datalad.api import (
create,
Dataset,
)
from datalad.utils import (
chpwd,
Path,
)
from datalad.tests.utils import (
SkipTest,
assert_equal,
assert_in,
assert_not_in,
assert_raises,
assert_re_in,
eq_,
in_,
ok_startswith,
on_windows,
slow,
with_tempfile,
)
from datalad.support.exceptions import CommandError
def run_main(args, exit_code=0, expect_stderr=False):
"""Run main() of the datalad, do basic checks and provide outputs
Parameters
----------
args : list
List of string cmdline arguments to pass
exit_code : int
Expected exit code. Would raise AssertionError if differs
expect_stderr : bool or string
Whether to expect stderr output. If string -- match
Returns
-------
stdout, stderr strings
Output produced
"""
with patch('sys.stderr', new_callable=StringIO) as cmerr:
with patch('sys.stdout', new_callable=StringIO) as cmout:
with assert_raises(SystemExit) as cm:
main(["datalad"] + list(args))
assert_equal(cm.exception.code, exit_code)
stdout = cmout.getvalue()
stderr = cmerr.getvalue()
if expect_stderr is False:
assert_equal(stderr, "")
elif expect_stderr is True:
# do nothing -- just return
pass
else:
# must be a string
assert_equal(stderr, expect_stderr)
return stdout, stderr
# TODO: switch to stdout for --version output
def test_version():
# we just get a version if not asking for a version of some command
stdout, stderr = run_main(['--version'], expect_stderr=True)
eq_(stdout.rstrip(), "datalad %s" % datalad.__version__)
stdout, stderr = run_main(['clone', '--version'], expect_stderr=True)
ok_startswith(stdout, 'datalad %s\n' % datalad.__version__)
# since https://github.com/datalad/datalad/pull/2733 no license in --version
assert_not_in("Copyright", stdout)
assert_not_in("Permission is hereby granted", stdout)
try:
import datalad_container
except ImportError:
pass # not installed, cannot test with extension
else:
stdout, stderr = run_main(['containers-list', '--version'], expect_stderr=True)
eq_(stdout, 'datalad_container %s\n' % datalad_container.__version__)
def test_help_np():
stdout, stderr = run_main(['--help-np'])
# Let's extract section titles:
# enough of bin/datalad and .tox/py27/bin/datalad -- guarantee consistency! ;)
ok_startswith(stdout, 'Usage: datalad')
# Sections start/end with * if ran under DATALAD_HELP2MAN mode
sections = [l[1:-1] for l in filter(re.compile('^\*.*\*$').match, stdout.split('\n'))]
# but order is still not guaranteed (dict somewhere)! TODO
# see https://travis-ci.org/datalad/datalad/jobs/80519004
# thus testing sets
for s in {'Commands for dataset operations',
'Commands for metadata handling',
'Miscellaneous commands',
'General information',
'Global options',
'Plumbing commands',
}:
assert_in(s, sections)
if not get_terminal_size()[0] or 0:
raise SkipTest(
"Could not determine terminal size, skipping the rest of the test")
# none of the lines must be longer than 80 chars
# TODO: decide on create-sibling and possibly
# rewrite-urls
accepted_width = get_console_width()
long_lines = ["%d %s" % (len(l), l) for l in stdout.split('\n')
if len(l) > accepted_width and
'{' not in l # on nd70 summary line is unsplit
]
if long_lines:
raise AssertionError(
"Following lines in --help output were longer than %s chars:\n%s"
% (accepted_width, '\n'.join(long_lines))
)
def test_usage_on_insufficient_args():
stdout, stderr = run_main(['install'], exit_code=2, expect_stderr=True)
ok_startswith(stderr, 'usage:')
def test_subcmd_usage_on_unknown_args():
stdout, stderr = run_main(['get', '--murks'], exit_code=1, expect_stderr=True)
in_('get', stdout)
def test_combined_short_option():
stdout, stderr = run_main(['-fjson'], exit_code=2, expect_stderr=True)
assert_not_in("unrecognized argument", stderr)
assert_in("too few arguments", stderr)
def check_incorrect_option(opts, err_str):
# The first line used to be:
# stdout, stderr = run_main((sys.argv[0],) + opts, expect_stderr=True, exit_code=2)
# But: what do we expect to be in sys.argv[0] here?
# It depends on how we invoke the test.
# - nosetests -s -v datalad/cmdline/tests/test_main.py would result in:
# sys.argv[0}=='nosetests'
# - python -m nose -s -v datalad/cmdline/tests/test_main.py would result in:
# sys.argv[0}=='python -m nose'
# - python -c "import nose; nose.main()" -s -v datalad/cmdline/tests/test_main.py would result in:
# sys.argv[0]=='-c'
# This led to failure in case sys.argv[0] contained an option, that was
# defined to be a datalad option too, therefore was a 'known_arg' and was
# checked to meet its constraints.
# But sys.argv[0] actually isn't used by main at all. It simply doesn't
# matter what's in there. The only thing important to pass here is `opts`.
stdout, stderr = run_main(opts, expect_stderr=True, exit_code=2)
out = stdout + stderr
assert_in("usage: ", out)
assert_re_in(err_str, out, match=False)
def test_incorrect_options():
# apparently a bit different if following a good one so let's do both
err_invalid = "error: (invalid|too few arguments|unrecognized argument)"
yield check_incorrect_option, ('--buga',), err_invalid
yield check_incorrect_option, ('--dbg', '--buga'), err_invalid
err_insufficient = err_invalid # "specify"
yield check_incorrect_option, ('--dbg',), err_insufficient
yield check_incorrect_option, tuple(), err_insufficient
def test_script_shims():
runner = Runner()
for script in [
'datalad',
'git-annex-remote-datalad-archives',
'git-annex-remote-datalad']:
if not on_windows:
# those must be available for execution, and should not contain
which = runner.run(['which', script], protocol=StdOutErrCapture)['stdout']
# test if there is no easy install shim in there
with open(which.rstrip()) as f:
content = f.read()
else:
from distutils.spawn import find_executable
content = find_executable(script)
# and let's check that it is our script
out = runner.run([script, '--version'], protocol=StdOutErrCapture)
version = out['stdout'].rstrip()
mod, version = version.split(' ', 1)
assert_equal(mod, 'datalad')
# we can get git and non git .dev version... so for now
# relax
get_numeric_portion = lambda v: [x for x in re.split('[+.]', v) if x.isdigit()]
# extract numeric portion
assert get_numeric_portion(version), f"Got no numeric portion from {version}"
assert_equal(get_numeric_portion(__version__),
get_numeric_portion(version))
@slow # 11.2591s
@with_tempfile(mkdir=True)
def test_cfg_override(path):
with chpwd(path):
cmd = ['datalad', 'wtf', '-s', 'some']
# control
out = Runner().run(cmd, protocol=StdOutErrCapture)['stdout']
assert_not_in('datalad.dummy: this', out)
# ensure that this is not a dataset's cfg manager
assert_not_in('datalad.dataset.id', out)
# env var
out = Runner(env=dict(os.environ, DATALAD_DUMMY='this')).run(
cmd, protocol=StdOutErrCapture)['stdout']
assert_in('datalad.dummy: this', out)
# cmdline arg
out = Runner().run([cmd[0], '-c', 'datalad.dummy=this'] + cmd[1:],
protocol=StdOutErrCapture)['stdout']
assert_in('datalad.dummy: this', out)
# now create a dataset in the path. the wtf plugin will switch to
# using the dataset's config manager, which must inherit the overrides
create(dataset=path)
# control
out = Runner().run(cmd, protocol=StdOutErrCapture)['stdout']
assert_not_in('datalad.dummy: this', out)
# ensure that this is a dataset's cfg manager
assert_in('datalad.dataset.id', out)
# env var
out = Runner(env=dict(os.environ, DATALAD_DUMMY='this')).run(
cmd, protocol=StdOutErrCapture)['stdout']
assert_in('datalad.dummy: this', out)
# cmdline arg
out = Runner().run([cmd[0], '-c', 'datalad.dummy=this'] + cmd[1:],
protocol=StdOutErrCapture)['stdout']
assert_in('datalad.dummy: this', out)
def test_incorrect_cfg_override():
run_main(['-c', 'some', 'wtf'], exit_code=3)
run_main(['-c', 'some=', 'wtf'], exit_code=3)
run_main(['-c', 'some.var', 'wtf'], exit_code=3)
run_main(['-c', 'some.var=', 'wtf'], exit_code=3)
def test_fail_with_short_help():
out = StringIO()
with assert_raises(SystemExit) as cme:
fail_with_short_help(exit_code=3, out=out)
assert_equal(cme.exception.code, 3)
assert_equal(out.getvalue(), "")
out = StringIO()
with assert_raises(SystemExit) as cme:
fail_with_short_help(msg="Failed badly", out=out)
assert_equal(cme.exception.code, 1)
assert_equal(out.getvalue(), "error: Failed badly\n")
# Suggestions, hint, etc
out = StringIO()
with assert_raises(SystemExit) as cme:
fail_with_short_help(
msg="Failed badly",
known=["mother", "mutter", "father", "son"],
provided="muther",
hint="You can become one",
exit_code=0, # nobody forbids
what="parent",
out=out)
assert_equal(cme.exception.code, 0)
assert_equal(out.getvalue(),
"error: Failed badly\n"
"datalad: Unknown parent 'muther'. See 'datalad --help'.\n\n"
"Did you mean any of these?\n"
" mutter\n"
" mother\n"
" father\n"
"Hint: You can become one\n")
def test_fix_datalad_ri():
assert_equal(_fix_datalad_ri('/'), '/')
assert_equal(_fix_datalad_ri('/a/b'), '/a/b')
assert_equal(_fix_datalad_ri('//'), '///')
assert_equal(_fix_datalad_ri('///'), '///')
assert_equal(_fix_datalad_ri('//a'), '///a')
assert_equal(_fix_datalad_ri('///a'), '///a')
assert_equal(_fix_datalad_ri('//a/b'), '///a/b')
assert_equal(_fix_datalad_ri('///a/b'), '///a/b')
def test_fail_with_short_help():
out = StringIO()
with assert_raises(SystemExit) as cme:
fail_with_short_help(exit_code=3, out=out)
assert_equal(cme.exception.code, 3)
assert_equal(out.getvalue(), "")
out = StringIO()
with assert_raises(SystemExit) as cme:
fail_with_short_help(msg="Failed badly", out=out)
assert_equal(cme.exception.code, 1)
assert_equal(out.getvalue(), "error: Failed badly\n")
# Suggestions, hint, etc
out = StringIO()
with assert_raises(SystemExit) as cme:
fail_with_short_help(
msg="Failed badly",
known=["mother", "mutter", "father", "son"],
provided="muther",
hint="You can become one",
exit_code=0, # nobody forbids
what="parent",
out=out)
assert_equal(cme.exception.code, 0)
assert_equal(out.getvalue(),
"error: Failed badly\n"
"datalad: Unknown parent 'muther'. See 'datalad --help'.\n\n"
"Did you mean any of these?\n"
" mutter\n"
" mother\n"
" father\n"
"Hint: You can become one\n")
def test_fix_datalad_ri():
assert_equal(_fix_datalad_ri('/'), '/')
assert_equal(_fix_datalad_ri('/a/b'), '/a/b')
assert_equal(_fix_datalad_ri('//'), '///')
assert_equal(_fix_datalad_ri('///'), '///')
assert_equal(_fix_datalad_ri('//a'), '///a')
assert_equal(_fix_datalad_ri('///a'), '///a')
assert_equal(_fix_datalad_ri('//a/b'), '///a/b')
assert_equal(_fix_datalad_ri('///a/b'), '///a/b')
@with_tempfile
@with_tempfile(mkdir=True)
def test_commanderror_jsonmsgs(src, exp):
ds = Dataset(src).create()
(ds.pathobj / '123').write_text('123')
ds.save()
ds.repo.call_annex([
'initremote', 'expdir', 'type=directory',
'directory={}'.format(exp),
'encryption=none',
'exporttree=yes'
])
#ds.repo.call_annex(['export', '--to=expdir', 'HEAD'])
# this must fail, because `push` cannot handle an export.
# when https://github.com/datalad/datalad/issues/3127 is implemented
# this test must be adjusted
with assert_raises(CommandError) as cme:
Runner(cwd=ds.path).run(
['datalad', 'push', '--to', 'expdir'],
protocol=StdOutErrCapture)
if ds.repo.git_annex_version >= "8.20200309":
in_('use `git-annex export`', cme.exception.stderr)
| 36.46134
| 102
| 0.61179
|
4a187921734a2b4ca52b6b482f47a91268ca48b2
| 802
|
py
|
Python
|
problem_23.py
|
MasterScott/project-euler
|
643df8258bf9ac84b14b648a3a5a254bc682c473
|
[
"MIT"
] | null | null | null |
problem_23.py
|
MasterScott/project-euler
|
643df8258bf9ac84b14b648a3a5a254bc682c473
|
[
"MIT"
] | null | null | null |
problem_23.py
|
MasterScott/project-euler
|
643df8258bf9ac84b14b648a3a5a254bc682c473
|
[
"MIT"
] | 1
|
2019-10-28T23:33:46.000Z
|
2019-10-28T23:33:46.000Z
|
from math import sqrt
from time import time
abundant = non_sum_ab = set()
def is_prime(n):
for i in range(3, int(sqrt(n))+1):
if n % i == 0:
return False
return True
def div_sum(n):
if is_prime(n):
return 1
s = 1
forward = 2
backward = n
while forward < backward and forward < int(sqrt(n)):
if n % forward == 0:
backward = n / forward
s += forward + backward
forward += 1
if forward-1 == backward:
s -= backward
return s
def check_abundancy(n):
for ab in abundant:
if n-ab in abundant:
return
non_sum_ab.add(n)
t = time()
for i in range(1, 28123):
if i < div_sum(i):
abundant.add(i)
check_abundancy(i)
print sum(non_sum_ab), time() - t
| 18.227273
| 56
| 0.548628
|
4a187985cf220ac47ba6709d1de2feb72b0d628e
| 4,198
|
py
|
Python
|
yampy/client.py
|
PhracturedBlue/yampy2
|
a98ac137b38c5a0f3f4b1e6fec89673caec36ef4
|
[
"Apache-2.0"
] | null | null | null |
yampy/client.py
|
PhracturedBlue/yampy2
|
a98ac137b38c5a0f3f4b1e6fec89673caec36ef4
|
[
"Apache-2.0"
] | null | null | null |
yampy/client.py
|
PhracturedBlue/yampy2
|
a98ac137b38c5a0f3f4b1e6fec89673caec36ef4
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) Microsoft Corporation
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# THIS CODE IS PROVIDED *AS IS* BASIS, WITHOUT WARRANTIES OR CONDITIONS OF
# ANY KIND, EITHER EXPRESS OR IMPLIED, INCLUDING WITHOUT LIMITATION ANY
# IMPLIED WARRANTIES OR CONDITIONS OF TITLE, FITNESS FOR A PARTICULAR
# PURPOSE, MERCHANTABLITY OR NON-INFRINGEMENT.
#
# See the Apache Version 2.0 License for specific language governing
# permissions and limitations under the License.
try:
import requests
except ImportError:
import warnings
warnings.warn("Missing requests package")
HAS_REQUESTS = False
from .constants import DEFAULT_BASE_URL
from .errors import ResponseError, NotFoundError, InvalidAccessTokenError, \
RateLimitExceededError, UnauthorizedError
from .models import GenericModel
class Client(object):
"""
A client for the Yammer API.
"""
def __init__(self, access_token=None, base_url=None, proxies=None):
self._access_token = access_token
self._base_url = base_url or DEFAULT_BASE_URL
self._proxies = proxies
def get(self, path, **kwargs):
"""
Makes an HTTP GET request to the Yammer API. Any keyword arguments will
be converted to query string parameters.
The path should be the path of an API endpoint, e.g. "/messages"
"""
return self._request("get", path, **kwargs)
def post(self, path, **kwargs):
"""
Makes an HTTP POST request to the Yammer API. Any keyword arguments
will be sent as the body of the request.
The path should be the path of an API endpoint, e.g. "/messages"
"""
return self._request("post", path, **kwargs)
def put(self, path, **kwargs):
"""
Makes an HTTP PUT request to the Yammer API. Any keyword arguments will
be sent as the body of the request.
The path should be the path of an API endpoint, e.g. "/users/123"
"""
return self._request("put", path, **kwargs)
def delete(self, path, **kwargs):
"""
Makes an HTTP DELETE request to the Yammer API.
The path should be the path of an API endpoint, e.g. "/messages/123"
"""
return self._request("delete", path, **kwargs)
def _request(self, method, path, **kwargs):
if 'files' in kwargs:
kwargs = kwargs.copy()
files = kwargs.pop('files', None)
response = requests.request(
method=method,
url=self._build_url(path),
headers=self._build_headers(),
proxies=self._proxies,
params=kwargs,
files=files,
)
return self._parse_response(response)
def _build_url(self, path):
return self._base_url + path + ".json"
def _build_headers(self):
if self._access_token:
return {
"Authorization": "Bearer %s" % self._access_token,
}
else:
return {}
def _parse_response(self, response):
if 200 <= response.status_code < 300:
return self._value_for_response(response)
else:
raise self._exception_for_response(response)
def _value_for_response(self, response):
if response.text.strip():
return GenericModel.from_json(response.text)
else:
return True
def _exception_for_response(self, response):
if response.status_code == 404:
return NotFoundError(response.reason)
elif response.status_code == 400 and "OAuthException" in response.text:
return InvalidAccessTokenError(response.reason)
elif response.status_code == 401:
return UnauthorizedError(response.reason)
elif response.status_code == 429:
return RateLimitExceededError(response.reason)
else:
return ResponseError("%d error: %s" % (
response.status_code, response.reason,
))
| 33.31746
| 79
| 0.638876
|
4a1879cd403e31df46a9e11530733f3baf42c85f
| 24,675
|
py
|
Python
|
jacket/tests/compute/unit/objects/test_compute_node.py
|
bopopescu/jacket
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
[
"Apache-2.0"
] | null | null | null |
jacket/tests/compute/unit/objects/test_compute_node.py
|
bopopescu/jacket
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
[
"Apache-2.0"
] | null | null | null |
jacket/tests/compute/unit/objects/test_compute_node.py
|
bopopescu/jacket
|
d7ad3147fcb43131098c2a5210847634ff5fb325
|
[
"Apache-2.0"
] | 2
|
2016-08-10T02:21:49.000Z
|
2020-07-24T01:57:21.000Z
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import copy
import mock
import netaddr
from oslo_serialization import jsonutils
from oslo_utils import timeutils
from oslo_versionedobjects import base as ovo_base
from oslo_versionedobjects import exception as ovo_exc
from jacket.db import compute
from jacket.compute import exception
from jacket.objects import compute
from jacket.objects.compute import base
from jacket.objects.compute import compute_node
from jacket.objects.compute import hv_spec
from jacket.objects.compute import service
from jacket.tests.compute.unit import fake_pci_device_pools
from jacket.tests.compute.unit.objects import test_objects
from jacket.tests.compute import uuidsentinel
NOW = timeutils.utcnow().replace(microsecond=0)
fake_stats = {'num_foo': '10'}
fake_stats_db_format = jsonutils.dumps(fake_stats)
# host_ip is coerced from a string to an IPAddress
# but needs to be converted to a string for the database format
fake_host_ip = '127.0.0.1'
fake_numa_topology = compute.NUMATopology(
cells=[compute.NUMACell(id=0, cpuset=set([1, 2]), memory=512,
cpu_usage=0, memory_usage=0,
mempages=[], pinned_cpus=set([]),
siblings=[]),
compute.NUMACell(id=1, cpuset=set([3, 4]), memory=512,
cpu_usage=0, memory_usage=0,
mempages=[], pinned_cpus=set([]),
siblings=[])])
fake_numa_topology_db_format = fake_numa_topology._to_json()
fake_supported_instances = [('x86_64', 'kvm', 'hvm')]
fake_hv_spec = hv_spec.HVSpec(arch=fake_supported_instances[0][0],
hv_type=fake_supported_instances[0][1],
vm_mode=fake_supported_instances[0][2])
fake_supported_hv_specs = [fake_hv_spec]
# for backward compatibility, each supported instance object
# is stored as a list in the database
fake_supported_hv_specs_db_format = jsonutils.dumps([fake_hv_spec.to_list()])
fake_pci = jsonutils.dumps(fake_pci_device_pools.fake_pool_list_primitive)
fake_compute_node = {
'created_at': NOW,
'updated_at': None,
'deleted_at': None,
'deleted': False,
'id': 123,
'uuid': uuidsentinel.fake_compute_node,
'service_id': None,
'host': 'fake',
'vcpus': 4,
'memory_mb': 4096,
'local_gb': 1024,
'vcpus_used': 2,
'memory_mb_used': 2048,
'local_gb_used': 512,
'hypervisor_type': 'Hyper-Dan-VM-ware',
'hypervisor_version': 1001,
'hypervisor_hostname': 'vm.danplanet.com',
'free_ram_mb': 1024,
'free_disk_gb': 256,
'current_workload': 100,
'running_vms': 2013,
'cpu_info': 'Schmintel i786',
'disk_available_least': 256,
'metrics': '',
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'numa_topology': fake_numa_topology_db_format,
'supported_instances': fake_supported_hv_specs_db_format,
'pci_stats': fake_pci,
'cpu_allocation_ratio': 16.0,
'ram_allocation_ratio': 1.5,
'disk_allocation_ratio': 1.0,
}
# FIXME(sbauza) : For compatibility checking, to be removed once we are sure
# that all computes are running latest DB version with host field in it.
fake_old_compute_node = fake_compute_node.copy()
del fake_old_compute_node['host']
# resources are passed from the virt drivers and copied into the compute_node
fake_resources = {
'vcpus': 2,
'memory_mb': 1024,
'local_gb': 10,
'cpu_info': 'fake-info',
'vcpus_used': 1,
'memory_mb_used': 512,
'local_gb_used': 4,
'numa_topology': fake_numa_topology_db_format,
'hypervisor_type': 'fake-type',
'hypervisor_version': 1,
'hypervisor_hostname': 'fake-host',
'disk_available_least': 256,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_instances
}
fake_compute_with_resources = compute.ComputeNode(
vcpus=fake_resources['vcpus'],
memory_mb=fake_resources['memory_mb'],
local_gb=fake_resources['local_gb'],
cpu_info=fake_resources['cpu_info'],
vcpus_used=fake_resources['vcpus_used'],
memory_mb_used=fake_resources['memory_mb_used'],
local_gb_used =fake_resources['local_gb_used'],
numa_topology=fake_resources['numa_topology'],
hypervisor_type=fake_resources['hypervisor_type'],
hypervisor_version=fake_resources['hypervisor_version'],
hypervisor_hostname=fake_resources['hypervisor_hostname'],
disk_available_least=fake_resources['disk_available_least'],
host_ip=netaddr.IPAddress(fake_resources['host_ip']),
supported_hv_specs=fake_supported_hv_specs,
)
class _TestComputeNodeObject(object):
def supported_hv_specs_comparator(self, expected, obj_val):
obj_val = [inst.to_list() for inst in obj_val]
self.assertJsonEqual(expected, obj_val)
def pci_device_pools_comparator(self, expected, obj_val):
if obj_val is not None:
obj_val = obj_val.obj_to_primitive()
self.assertJsonEqual(expected, obj_val)
else:
self.assertEqual(expected, obj_val)
def comparators(self):
return {'stats': self.assertJsonEqual,
'host_ip': self.str_comparator,
'supported_hv_specs': self.supported_hv_specs_comparator,
'pci_device_pools': self.pci_device_pools_comparator,
}
def subs(self):
return {'supported_hv_specs': 'supported_instances',
'pci_device_pools': 'pci_stats'}
def test_get_by_id(self):
self.mox.StubOutWithMock(compute, 'compute_node_get')
compute.compute_node_get(self.context, 123).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode.get_by_id(self.context, 123)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
self.assertNotIn('uuid', compute.obj_what_changed())
@mock.patch.object(compute.Service, 'get_by_id')
@mock.patch.object(compute, 'compute_node_get')
def test_get_by_id_with_host_field_not_in_db(self, mock_cn_get,
mock_obj_svc_get):
fake_compute_node_with_svc_id = fake_compute_node.copy()
fake_compute_node_with_svc_id['service_id'] = 123
fake_compute_node_with_no_host = fake_compute_node_with_svc_id.copy()
host = fake_compute_node_with_no_host.pop('host')
fake_service = service.Service(id=123)
fake_service.host = host
mock_cn_get.return_value = fake_compute_node_with_no_host
mock_obj_svc_get.return_value = fake_service
compute = compute_node.ComputeNode.get_by_id(self.context, 123)
self.compare_obj(compute, fake_compute_node_with_svc_id,
subs=self.subs(),
comparators=self.comparators())
def test_get_by_service_id(self):
self.mox.StubOutWithMock(compute, 'compute_nodes_get_by_service_id')
compute.compute_nodes_get_by_service_id(self.context, 456).AndReturn(
[fake_compute_node])
self.mox.ReplayAll()
compute = compute_node.ComputeNode.get_by_service_id(self.context, 456)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch.object(compute, 'compute_node_get_by_host_and_nodename')
def test_get_by_host_and_nodename(self, cn_get_by_h_and_n):
cn_get_by_h_and_n.return_value = fake_compute_node
compute = compute_node.ComputeNode.get_by_host_and_nodename(
self.context, 'fake', 'vm.danplanet.com')
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('compute.compute.compute_node_get_all_by_host')
def test_get_first_node_by_host_for_old_compat(
self, cn_get_all_by_host):
another_node = fake_compute_node.copy()
another_node['hypervisor_hostname'] = 'neverland'
cn_get_all_by_host.return_value = [fake_compute_node, another_node]
compute = (
compute_node.ComputeNode.get_first_node_by_host_for_old_compat(
self.context, 'fake')
)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('compute.compute.ComputeNodeList.get_all_by_host')
def test_get_first_node_by_host_for_old_compat_not_found(
self, cn_get_all_by_host):
cn_get_all_by_host.side_effect = exception.ComputeHostNotFound(
host='fake')
self.assertRaises(
exception.ComputeHostNotFound,
compute_node.ComputeNode.get_first_node_by_host_for_old_compat,
self.context, 'fake')
def test_create(self):
self.mox.StubOutWithMock(compute, 'compute_node_create')
compute.compute_node_create(
self.context,
{
'service_id': 456,
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_hv_specs_db_format,
'uuid': uuidsentinel.fake_compute_node,
}).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.service_id = 456
compute.uuid = uuidsentinel.fake_compute_node
compute.stats = fake_stats
# NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.supported_hv_specs = fake_supported_hv_specs
with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu:
compute.create()
self.assertFalse(mock_gu.called)
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('compute.compute.compute_node_create')
@mock.patch('oslo_utils.uuidutils.generate_uuid')
def test_create_allocates_uuid(self, mock_gu, mock_create):
mock_create.return_value = fake_compute_node
mock_gu.return_value = fake_compute_node['uuid']
obj = compute.ComputeNode(context=self.context)
obj.create()
mock_gu.assert_called_once_with()
mock_create.assert_called_once_with(
self.context, {'uuid': fake_compute_node['uuid']})
def test_recreate_fails(self):
self.mox.StubOutWithMock(compute, 'compute_node_create')
compute.compute_node_create(
self.context, {'service_id': 456,
'uuid': uuidsentinel.fake_compute_node}).AndReturn(
fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.service_id = 456
compute.uuid = uuidsentinel.fake_compute_node
compute.create()
self.assertRaises(exception.ObjectActionError, compute.create)
def test_save(self):
self.mox.StubOutWithMock(compute, 'compute_node_update')
compute.compute_node_update(
self.context, 123,
{
'vcpus_used': 3,
'stats': fake_stats_db_format,
'host_ip': fake_host_ip,
'supported_instances': fake_supported_hv_specs_db_format,
'uuid': uuidsentinel.fake_compute_node,
}).AndReturn(fake_compute_node)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.vcpus_used = 3
compute.stats = fake_stats
compute.uuid = uuidsentinel.fake_compute_node
# NOTE (pmurray): host_ip is coerced to an IPAddress
compute.host_ip = fake_host_ip
compute.supported_hv_specs = fake_supported_hv_specs
compute.save()
self.compare_obj(compute, fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_query_allocates_uuid(self):
fake = dict(fake_compute_node)
fake.pop('uuid')
compute.compute_node_create(self.context, fake)
with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu:
mock_gu.return_value = uuidsentinel.fake_compute_node
obj = compute.ComputeNode.get_by_id(self.context, fake['id'])
mock_gu.assert_called_once_with()
self.assertEqual(uuidsentinel.fake_compute_node, obj.uuid)
self.assertNotIn('uuid', obj.obj_get_changes())
with mock.patch('oslo_utils.uuidutils.generate_uuid') as mock_gu:
obj = compute.ComputeNode.get_by_id(self.context, fake['id'])
self.assertEqual(uuidsentinel.fake_compute_node, obj.uuid)
self.assertFalse(mock_gu.called)
def test_save_pci_device_pools_empty(self):
fake_pci = jsonutils.dumps(
compute.PciDevicePoolList(compute=[]).obj_to_primitive())
compute_dict = fake_compute_node.copy()
compute_dict['pci_stats'] = fake_pci
with mock.patch.object(
compute, 'compute_node_update',
return_value=compute_dict) as mock_compute_node_update:
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.pci_device_pools = compute.PciDevicePoolList(compute=[])
compute.save()
self.compare_obj(compute, compute_dict,
subs=self.subs(),
comparators=self.comparators())
mock_compute_node_update.assert_called_once_with(
self.context, 123, {'pci_stats': fake_pci})
def test_save_pci_device_pools_null(self):
compute_dict = fake_compute_node.copy()
compute_dict['pci_stats'] = None
with mock.patch.object(
compute, 'compute_node_update',
return_value=compute_dict) as mock_compute_node_update:
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.pci_device_pools = None
compute.save()
self.compare_obj(compute, compute_dict,
subs=self.subs(),
comparators=self.comparators())
mock_compute_node_update.assert_called_once_with(
self.context, 123, {'pci_stats': None})
@mock.patch.object(compute, 'compute_node_create',
return_value=fake_compute_node)
def test_set_id_failure(self, db_mock):
compute = compute_node.ComputeNode(context=self.context)
compute.create()
self.assertRaises(ovo_exc.ReadOnlyFieldError, setattr,
compute, 'id', 124)
def test_destroy(self):
self.mox.StubOutWithMock(compute, 'compute_node_delete')
compute.compute_node_delete(self.context, 123)
self.mox.ReplayAll()
compute = compute_node.ComputeNode(context=self.context)
compute.id = 123
compute.destroy()
def test_get_all(self):
self.mox.StubOutWithMock(compute, 'compute_node_get_all')
compute.compute_node_get_all(self.context).AndReturn([fake_compute_node])
self.mox.ReplayAll()
computes = compute_node.ComputeNodeList.get_all(self.context)
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_get_by_hypervisor(self):
self.mox.StubOutWithMock(compute, 'compute_node_search_by_hypervisor')
compute.compute_node_search_by_hypervisor(self.context, 'hyper').AndReturn(
[fake_compute_node])
self.mox.ReplayAll()
computes = compute_node.ComputeNodeList.get_by_hypervisor(self.context,
'hyper')
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('compute.compute.compute_nodes_get_by_service_id')
def test__get_by_service(self, cn_get_by_svc_id):
cn_get_by_svc_id.return_value = [fake_compute_node]
computes = compute_node.ComputeNodeList._get_by_service(self.context,
123)
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
@mock.patch('compute.compute.compute_node_get_all_by_host')
def test_get_all_by_host(self, cn_get_all_by_host):
cn_get_all_by_host.return_value = [fake_compute_node]
computes = compute_node.ComputeNodeList.get_all_by_host(self.context,
'fake')
self.assertEqual(1, len(computes))
self.compare_obj(computes[0], fake_compute_node,
subs=self.subs(),
comparators=self.comparators())
def test_compat_numa_topology(self):
compute = compute_node.ComputeNode()
versions = ovo_base.obj_tree_get_versions('ComputeNode')
primitive = compute.obj_to_primitive(target_version='1.4',
version_manifest=versions)
self.assertNotIn('numa_topology', primitive)
def test_compat_supported_hv_specs(self):
compute = compute_node.ComputeNode()
compute.supported_hv_specs = fake_supported_hv_specs
versions = ovo_base.obj_tree_get_versions('ComputeNode')
primitive = compute.obj_to_primitive(target_version='1.5',
version_manifest=versions)
self.assertNotIn('supported_hv_specs', primitive)
def test_compat_host(self):
compute = compute_node.ComputeNode()
primitive = compute.obj_to_primitive(target_version='1.6')
self.assertNotIn('host', primitive)
def test_compat_pci_device_pools(self):
compute = compute_node.ComputeNode()
compute.pci_device_pools = fake_pci_device_pools.fake_pool_list
versions = ovo_base.obj_tree_get_versions('ComputeNode')
primitive = compute.obj_to_primitive(target_version='1.8',
version_manifest=versions)
self.assertNotIn('pci_device_pools', primitive)
@mock.patch('compute.compute.Service.get_by_compute_host')
def test_compat_service_id(self, mock_get):
mock_get.return_value = compute.Service(id=1)
compute = compute.ComputeNode(host='fake-host', service_id=None)
primitive = compute.obj_to_primitive(target_version='1.12')
self.assertEqual(1, primitive['nova_object.data']['service_id'])
@mock.patch('compute.compute.Service.get_by_compute_host')
def test_compat_service_id_compute_host_not_found(self, mock_get):
mock_get.side_effect = exception.ComputeHostNotFound(host='fake-host')
compute = compute.ComputeNode(host='fake-host', service_id=None)
primitive = compute.obj_to_primitive(target_version='1.12')
self.assertEqual(-1, primitive['nova_object.data']['service_id'])
def test_update_from_virt_driver(self):
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
compute = compute_node.ComputeNode()
compute.update_from_virt_driver(resources)
expected = fake_compute_with_resources
self.assertTrue(base.obj_equal_prims(expected, compute))
def test_update_from_virt_driver_missing_field(self):
# NOTE(pmurray): update_from_virt_driver does not require
# all fields to be present in resources. Validation of the
# resources data structure would be done in a different method.
resources = copy.deepcopy(fake_resources)
del resources['vcpus']
compute = compute_node.ComputeNode()
compute.update_from_virt_driver(resources)
expected = fake_compute_with_resources.obj_clone()
del expected.vcpus
self.assertTrue(base.obj_equal_prims(expected, compute))
def test_update_from_virt_driver_extra_field(self):
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
resources['extra_field'] = 'nonsense'
compute = compute_node.ComputeNode()
compute.update_from_virt_driver(resources)
expected = fake_compute_with_resources
self.assertTrue(base.obj_equal_prims(expected, compute))
def test_update_from_virt_driver_bad_value(self):
# copy in case the update has a side effect
resources = copy.deepcopy(fake_resources)
resources['vcpus'] = 'nonsense'
compute = compute_node.ComputeNode()
self.assertRaises(ValueError,
compute.update_from_virt_driver, resources)
def test_compat_allocation_ratios(self):
compute = compute_node.ComputeNode()
primitive = compute.obj_to_primitive(target_version='1.13')
self.assertNotIn('cpu_allocation_ratio', primitive)
self.assertNotIn('ram_allocation_ratio', primitive)
def test_compat_disk_allocation_ratio(self):
compute = compute_node.ComputeNode()
primitive = compute.obj_to_primitive(target_version='1.15')
self.assertNotIn('disk_allocation_ratio', primitive)
def test_compat_allocation_ratios_old_compute(self):
self.flags(cpu_allocation_ratio=2.0, ram_allocation_ratio=3.0,
disk_allocation_ratio=0.9)
compute_dict = fake_compute_node.copy()
# old computes don't provide allocation ratios to the table
compute_dict['cpu_allocation_ratio'] = None
compute_dict['ram_allocation_ratio'] = None
compute_dict['disk_allocation_ratio'] = None
cls = compute.ComputeNode
compute = cls._from_db_object(self.context, cls(), compute_dict)
self.assertEqual(2.0, compute.cpu_allocation_ratio)
self.assertEqual(3.0, compute.ram_allocation_ratio)
self.assertEqual(0.9, compute.disk_allocation_ratio)
def test_compat_allocation_ratios_default_values(self):
compute_dict = fake_compute_node.copy()
# new computes provide allocation ratios defaulted to 0.0
compute_dict['cpu_allocation_ratio'] = 0.0
compute_dict['ram_allocation_ratio'] = 0.0
compute_dict['disk_allocation_ratio'] = 0.0
cls = compute.ComputeNode
compute = cls._from_db_object(self.context, cls(), compute_dict)
self.assertEqual(16.0, compute.cpu_allocation_ratio)
self.assertEqual(1.5, compute.ram_allocation_ratio)
self.assertEqual(1.0, compute.disk_allocation_ratio)
def test_compat_allocation_ratios_old_compute_default_values(self):
compute_dict = fake_compute_node.copy()
# old computes don't provide allocation ratios to the table
compute_dict['cpu_allocation_ratio'] = None
compute_dict['ram_allocation_ratio'] = None
compute_dict['disk_allocation_ratio'] = None
cls = compute.ComputeNode
compute = cls._from_db_object(self.context, cls(), compute_dict)
self.assertEqual(16.0, compute.cpu_allocation_ratio)
self.assertEqual(1.5, compute.ram_allocation_ratio)
self.assertEqual(1.0, compute.disk_allocation_ratio)
class TestComputeNodeObject(test_objects._LocalTest,
_TestComputeNodeObject):
pass
class TestRemoteComputeNodeObject(test_objects._RemoteTest,
_TestComputeNodeObject):
pass
| 44.379496
| 83
| 0.669139
|
4a187a5caccb97ea31c6ddc607a3c7583d694d9f
| 1,785
|
py
|
Python
|
PyPoE/shared/config/__init__.py
|
Gloorf/PyPoE
|
23e60e0914a1a669764a552f762d2b2064203e75
|
[
"MIT"
] | 247
|
2015-07-06T19:39:11.000Z
|
2022-03-30T13:11:03.000Z
|
PyPoE/shared/config/__init__.py
|
Gloorf/PyPoE
|
23e60e0914a1a669764a552f762d2b2064203e75
|
[
"MIT"
] | 121
|
2015-09-01T23:50:22.000Z
|
2021-08-23T21:06:47.000Z
|
PyPoE/shared/config/__init__.py
|
Gloorf/PyPoE
|
23e60e0914a1a669764a552f762d2b2064203e75
|
[
"MIT"
] | 109
|
2015-09-09T06:37:56.000Z
|
2022-03-20T16:06:33.000Z
|
"""
Utilities for config handling
Overview
===============================================================================
+----------+------------------------------------------------------------------+
| Path | PyPoE/shared/config/__init__.py |
+----------+------------------------------------------------------------------+
| Version | 1.0.0a0 |
+----------+------------------------------------------------------------------+
| Revision | $Id: 4a187a5caccb97ea31c6ddc607a3c7583d694d9f $ |
+----------+------------------------------------------------------------------+
| Author | Omega_K2 |
+----------+------------------------------------------------------------------+
Description
===============================================================================
Agreement
===============================================================================
See PyPoE/LICENSE
"""
# =============================================================================
# Imports
# =============================================================================
# Python
# 3rd-party
# self
# =============================================================================
# Globals
# =============================================================================
__all__ = []
# =============================================================================
# Classes
# =============================================================================
# =============================================================================
# Functions
# =============================================================================
| 35
| 79
| 0.129412
|
4a187a6ed7bfc07aa23d792cc76a566d31242681
| 1,583
|
py
|
Python
|
server/app/auth/__init__.py
|
adrien1018/app
|
fc312b1e8695968084e35e3bfd9d476ee0e245d6
|
[
"MIT"
] | 1
|
2021-01-08T20:33:32.000Z
|
2021-01-08T20:33:32.000Z
|
server/app/auth/__init__.py
|
adrien1018/app
|
fc312b1e8695968084e35e3bfd9d476ee0e245d6
|
[
"MIT"
] | null | null | null |
server/app/auth/__init__.py
|
adrien1018/app
|
fc312b1e8695968084e35e3bfd9d476ee0e245d6
|
[
"MIT"
] | null | null | null |
import functools
import typing
from typing import Optional
import werkzeug.wrappers
from flask import request, make_response
from ..db import session
from ..db import project
from ..db import user
request = typing.cast(werkzeug.wrappers.Request, request)
def get_session() -> session.Session:
session_id = request.cookies.get('session_id')
return session.get_or_create(session_id)
def check_labml_token_permission(func) -> functools.wraps:
@functools.wraps(func)
def wrapper(*args, **kwargs):
labml_token = kwargs.get('labml_token', '')
p = project.get_project(labml_token)
if p and p.is_sharable:
return func(*args, **kwargs)
kwargs['labml_token'] = None
return func(*args, **kwargs)
return wrapper
def login_required(func) -> functools.wraps:
@functools.wraps(func)
def wrapper(*args, **kwargs):
session_id = request.cookies.get('session_id')
s = session.get_or_create(session_id)
if s.is_auth:
return func(*args, **kwargs)
else:
response = make_response()
response.status_code = 403
if session_id != s.session_id:
response.set_cookie('session_id', s.session_id, session.EXPIRATION_DELAY)
return response
return wrapper
def get_auth_user() -> Optional[user.User]:
s = get_session()
u = None
if s.user:
u = s.user.load()
return u
def get_is_user_logged() -> bool:
s = get_session()
if s.is_auth:
return True
return False
| 21.684932
| 89
| 0.644978
|
4a187b188c7f50360fd1f2fa4053a3f866dd4cb9
| 2,810
|
py
|
Python
|
Tools/LyTestTools/ly_test_tools/_internal/managers/platforms/mac.py
|
Shorotshishir/o3de
|
b0c522be5196a7166cb6f13d3ddf8dae75ef06c1
|
[
"Apache-2.0",
"MIT"
] | 1
|
2021-08-11T02:20:46.000Z
|
2021-08-11T02:20:46.000Z
|
Tools/LyTestTools/ly_test_tools/_internal/managers/platforms/mac.py
|
RoddieKieley/o3de
|
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
Tools/LyTestTools/ly_test_tools/_internal/managers/platforms/mac.py
|
RoddieKieley/o3de
|
e804fd2a4241b039a42d9fa54eaae17dc94a7a92
|
[
"Apache-2.0",
"MIT"
] | null | null | null |
"""
Copyright (c) Contributors to the Open 3D Engine Project.
For complete copyright and license terms please see the LICENSE at the root of this distribution.
SPDX-License-Identifier: Apache-2.0 OR MIT
Inside this module are 2 classes used for Mac directory & workspace mappings:
1. _MacResourceLocator(AbstractResourceLocator) derived class.
2. MacWorkspaceManager(AbstractWorkspaceManager) derived class.
"""
import os
import logging
from ly_test_tools._internal.managers.workspace import AbstractWorkspaceManager
from ly_test_tools._internal.managers.abstract_resource_locator import AbstractResourceLocator
logger = logging.getLogger(__name__)
CACHE_DIR = 'mac'
CONFIG_FILE = 'system_osx_mac.cfg'
class _MacResourceLocator(AbstractResourceLocator):
"""
Override for locating resources in a Mac operating system running LyTestTools.
"""
def platform_config_file(self):
"""
Return the path to the platform config file.
ex. engine_root/dev/system_osx_mac.cfg
:return: path to the platform config file
"""
return os.path.join(self.engine_root(), CONFIG_FILE)
def platform_cache(self):
"""
Return path to the cache for the Mac operating system.
:return: path to cache for the Mac operating system
"""
return os.path.join(self.project_cache(), CACHE_DIR)
def project_log(self):
"""
Return path to the project's log dir for the Mac operating system.
:return: path to 'log' dir in the platform cache dir
"""
return os.path.join(self.project(), 'user', 'log')
def project_screenshots(self):
"""
Return path to the project's screenshot dir for the Mac operating system.
:return: path to 'screenshot' dir in the platform cache dir
"""
return os.path.join(self.project(), 'user', 'ScreenShots')
def editor_log(self):
"""
Return path to the project's editor log dir using the builds project and platform
:return: path to Editor.log
"""
return os.path.join(self.project_log(), "Editor.log")
class MacWorkspaceManager(AbstractWorkspaceManager):
"""
A Mac host WorkspaceManager. Contains Mac overridden functions for the AbstractWorkspaceManager class.
Also creates a Mac host ResourceLocator for directory and build mappings.
"""
def __init__(
self,
build_directory=None,
project=None,
tmp_path=None,
output_path=None,
):
# Type: (str,str,str,str) -> None
super(MacWorkspaceManager, self).__init__(
_MacResourceLocator(build_directory, project),
project=project,
tmp_path=tmp_path,
output_path=output_path,
)
| 33.058824
| 106
| 0.677224
|
4a187c7f378e1932eebd051bd03148ea25edc6ba
| 325
|
py
|
Python
|
ci/on-master.py
|
ya-goodfella/mill
|
4231c15442cf65ddd936e724ad59e48549a57d51
|
[
"MIT"
] | null | null | null |
ci/on-master.py
|
ya-goodfella/mill
|
4231c15442cf65ddd936e724ad59e48549a57d51
|
[
"MIT"
] | null | null | null |
ci/on-master.py
|
ya-goodfella/mill
|
4231c15442cf65ddd936e724ad59e48549a57d51
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import os, sys, subprocess
is_master_commit = (
os.environ["TRAVIS_PULL_REQUEST"] == "false" and
os.environ["TRAVIS_REPO_SLUG"] == "lihaoyi/mill" and
(os.environ["TRAVIS_BRANCH"] == "master" or os.environ["TRAVIS_TAG"] != "")
)
if is_master_commit:
subprocess.check_call(sys.argv[1:])
| 27.083333
| 79
| 0.683077
|
4a187da98ba987e90d811d47a1c525bc6de89b29
| 1,696
|
py
|
Python
|
_scripts/makedevel.py
|
Son-Guhun/Titan-Land-Lands-of-Plenty
|
edeca1d5437a7397195799ebf4b9585ee4609fed
|
[
"MIT"
] | 12
|
2019-05-27T16:02:28.000Z
|
2021-01-08T09:32:08.000Z
|
_scripts/makedevel.py
|
Son-Guhun/Titan-Land-Lands-of-Plenty
|
edeca1d5437a7397195799ebf4b9585ee4609fed
|
[
"MIT"
] | 209
|
2019-04-06T15:16:52.000Z
|
2021-07-03T02:11:53.000Z
|
_scripts/makedevel.py
|
Son-Guhun/Titan-Land-Lands-of-Plenty
|
edeca1d5437a7397195799ebf4b9585ee4609fed
|
[
"MIT"
] | 1
|
2021-05-26T12:13:35.000Z
|
2021-05-26T12:13:35.000Z
|
"""This script iterates over all decorations in a .ini database and set their Specialart field
to the format expected by the SpecialEffect system.
"""
try:
from myconfigparser import UnitParser, load_unit_data
except:
from .myconfigparser import UnitParser, load_unit_data
dataBase = '../devel/table/unit.ini'
# Unit types to be maintined in devel version.
ignored = set([
'e001', # Race Selector
'H00V', # Cosmosis
'H00U', # Cosmosis
'H00S', # Creator
'n000', # Give Unit To
'udr0', # Rect Generator
'udr1', # Rect Generator Dummy
'H0QU',
'nmgv', # Magic Vault
'n02W', # Ring Vault
'n02V', # Vault of Relics
'n02X', # Vault of Tomes
'n02R', # Titanic Teleportation Solutions
])
# Conditions a unit type must meet to be maintained in devel version.
conditions = [
lambda u: u.name in ignored,
lambda u: u["Name"][1:-1].startswith("Tower: "),
lambda u: u["Name"][1:-1].startswith("-Power: "),
lambda u: "[sele]" in u["EditorSuffix"],
# lambda u: unit["Builds"] != '""',
]
def do(file_path, dataBase=dataBase):
global ignored
with open(file_path) as f:
data = load_unit_data(f, parser=UnitParser)
ignored = ignored.union(set(data.get_decobuilders(asString=True)))
for unit in [s for s in data.sections()]:
if not any([c(unit) for c in conditions]):
del data[unit.name]
with open(dataBase, 'w') as f:
data.write(f)
def old():
flagged = []
for decoration in data.get_decorations(asString=True):
if not decoration.startswith('udr'):
flagged.append(decoration)
for u in flagged:
del data[u]
| 27.803279
| 94
| 0.629717
|
4a187e92122887c452be8f96e591e781443dabc6
| 6,650
|
py
|
Python
|
presets.py
|
charlesbrown502/CloneBot
|
6dfba04afbdf0fee9e3ae2caad66117f94122904
|
[
"MIT"
] | null | null | null |
presets.py
|
charlesbrown502/CloneBot
|
6dfba04afbdf0fee9e3ae2caad66117f94122904
|
[
"MIT"
] | null | null | null |
presets.py
|
charlesbrown502/CloneBot
|
6dfba04afbdf0fee9e3ae2caad66117f94122904
|
[
"MIT"
] | null | null | null |
# ----------------------------------- https://github.com/m4mallu/clonebot --------------------------------------------#
class Presets(object):
START_TEXT = """
𝙃𝙚𝙡𝙡𝙤... {}
𝐼 𝑐𝑎𝑛 𝑐𝑙𝑜𝑛𝑒 𝑚𝑒𝑑𝑖𝑎 𝑓𝑟𝑜𝑚 𝑎𝑛𝑦 𝑐ℎ𝑎𝑡 𝑡𝑜 𝑦𝑜𝑢𝑟 𝑝𝑒𝑟𝑠𝑜𝑛𝑎𝑙 𝑐ℎ𝑎𝑡 ! 𝐶𝑙𝑖𝑐𝑘 𝑠𝑒𝑡𝑡𝑖𝑛𝑔𝑠 𝑡𝑜 𝑐𝑜𝑛𝑓𝑖𝑔𝑢𝑟𝑒 𝑚𝑒. 𝐼𝑓 𝑦𝑜𝑢 𝑙𝑖𝑘𝑒 𝑚𝑒, 𝑝𝑙𝑒𝑎𝑠𝑒 𝑔𝑖𝑣𝑒 𝑎 𝑠𝑡𝑎𝑟 𝑖𝑛 𝑚𝑎 𝐺𝑖𝑡𝐻𝑢𝑏 𝑟𝑒𝑝𝑜. 𝑇ℎ𝑎𝑛𝑘𝑠
"""
WELCOME_TEXT = "⭑⭑★✪ HELP for more info: ✪★⭑⭑"
MESSAGE_COUNT = """
𝙇𝙞𝙫𝙚: <code>{}\n{}\n{}</code>\n
𝐒𝐭𝐚𝐫𝐭𝐢𝐧𝐠 𝐈𝐝 - <b>{}</b>
𝐍𝐨𝐰@ - <b>{}</b>
𝐄𝐧𝐝𝐢𝐧𝐠 𝐈𝐝 - <b>{}</b>
𝐂𝐥𝐨𝐧𝐞 𝐃𝐞𝐥𝐚𝐲 - {}
𝐃𝐞𝐟𝐚𝐮𝐥𝐭 𝐂𝐚𝐩𝐭𝐢𝐨𝐧 - {}
𝐅𝐢𝐥𝐞 𝐧𝐚𝐦𝐞 𝐚𝐬 𝐂𝐚𝐩𝐭𝐢𝐨𝐧 - {}
𝕋𝕠𝕥𝕒𝕝 ℂ𝕠𝕡𝕚𝕖𝕕 - <b>{}</b>
𝕋𝕠𝕥𝕒𝕝 ℂ𝕠𝕞𝕡𝕝𝕖𝕥𝕖𝕕 - <b>{} %</b>
📚 𝐃𝐨𝐜𝐮𝐦𝐞𝐧𝐭𝐬 - <b>{}</b>
🎞 𝐕𝐢𝐝𝐞𝐨𝐬 - <b>{}</b>
🔊 𝐀𝐮𝐝𝐢𝐨𝐬 - <b>{}</b>
📸 𝐏𝐡𝐨𝐭𝐨𝐬 - <b>{}</b>
🗣 𝐕𝐨𝐢𝐜𝐞 - <b>{}</b>
🧩 𝐃𝐮𝐩𝐥𝐢𝐜𝐚𝐭𝐞 𝐅𝐢𝐥𝐞𝐬 - <b>{}</b>
⏳ 𝐓𝐢𝐦𝐞 𝐓𝐚𝐤𝐞𝐧 - <b>{}</b>
🆙 𝐁𝐨𝐭 𝐔𝐩𝐭𝐢𝐦𝐞 - <b>{}</b>
📲 𝐂𝐥𝐨𝐧𝐞 𝐒𝐭𝐚𝐫𝐭𝐞𝐝 𝐀𝐭 - <b>{}</b>
📌 𝐋𝐚𝐬𝐭 𝐔𝐩𝐝𝐚𝐭𝐞𝐝 𝐎𝐧 - <b>{}</b>
🔰 <a href='t.me/RMProjects'><b>@RMProjects</b></a> 🏅 <a href='https://github.com/m4mallu/clonebot-ui'><b>@Github</b></a>
"""
INFO_CHAT_TYPES = """
𝙔𝙤𝙪 𝙘𝙖𝙣 𝙚𝙣𝙩𝙚𝙧 𝙩𝙝𝙚 𝙛𝙤𝙡𝙡𝙤𝙬𝙞𝙣𝙜 𝙩𝙮𝙥𝙚𝙨:
𝐈𝐝 : 𝟏𝟐𝟑𝟒𝟓𝟔𝟕𝟖𝟗 (-𝟏𝟎𝟎 𝐧𝐨𝐭 𝐫𝐞𝐪.)
𝐈𝐧𝐯𝐢𝐭𝐞 𝐥𝐢𝐧𝐤𝐬 : 𝐡𝐭𝐭𝐩𝐬://𝐭.𝐦𝐞/𝐣𝐨𝐢𝐧𝐜𝐡𝐚𝐭/
𝐏𝐮𝐛𝐥𝐢𝐜 𝐥𝐢𝐧𝐤𝐬 : 𝐡𝐭𝐭𝐩𝐬://𝐭.𝐦𝐞/𝐩𝐲𝐭𝐡𝐨𝐧
𝐔𝐬𝐞𝐫𝐧𝐚𝐦𝐞𝐬 : @𝐩𝐲𝐭𝐡𝐨𝐧
"""
SELECTED_TYPE = """
𝙔𝙤𝙪 𝙝𝙖𝙫𝙚 𝙨𝙚𝙡𝙚𝙘𝙩𝙚𝙙:
------------------------------
𝐃𝐨𝐜𝐮𝐦𝐞𝐧𝐭 : {}
𝐀𝐮𝐝𝐢𝐨 : {}
𝐕𝐢𝐝𝐞𝐨 : {}
𝐏𝐡𝐨𝐭𝐨 : {}
𝐕𝐨𝐢𝐜𝐞 : {}
"""
VIEW_CONF = """
𝐒𝐨𝐮𝐫𝐜𝐞 𝐈𝐝 : {}
𝐓𝐚𝐫𝐠𝐞𝐭 𝐈𝐝 : {}
𝐅𝐫𝐨𝐦 𝐦𝐬𝐠 𝐈𝐝 : {} | 𝐓𝐨 𝐦𝐬𝐠 𝐈𝐝 : {}
𝐃𝐞𝐥𝐚𝐲𝐞𝐝 : {} | 𝐂𝐚𝐩𝐭𝐢𝐨𝐧 : {} | 𝐅𝐍 𝐂𝐚𝐩𝐭𝐢𝐨𝐧: {}
𝐓𝐲𝐩𝐞𝐬: 📚:{} | 🎞:{} | 🔊:{} | 📸:{} | 🗣:{}
"""
FILE_TYPES = ["document", "video", "audio", "voice", "photo"]
COPIED_MESSAGES = "<b><a href='https://github.com/m4mallu/clonebot'>Medias Copied</a></b>"
IN_CORRECT_PERMISSIONS_MESSAGE_DEST_POSTING = "A̶c̶c̶e̶s̶s̶ ̶D̶e̶n̶i̶e̶d̶\n\n𝘜𝘴𝘦𝘳 𝘪𝘴 𝘯𝘰𝘵 𝘢𝘯 𝘢𝘥𝘮𝘪𝘯 𝘰𝘳 𝘥𝘰𝘦𝘴𝘯'𝘵 𝘩𝘢𝘷𝘦\n" \
"𝘱𝘰𝘴𝘵𝘪𝘯𝘨 𝘱𝘳𝘪𝘷𝘪𝘭𝘢𝘨𝘦𝘴 𝘪𝘯 𝘵𝘩𝘦 𝘨𝘪𝘷𝘦𝘯 𝘤𝘩𝘢𝘵"
USER_ABSENT_MSG = "𝙎𝙚𝙨𝙨𝙞𝙤𝙣 𝙪𝙨𝙚𝙧 𝙞𝙨 𝙣𝙤𝙩 𝙞𝙣 𝙩𝙝𝙚 𝙩𝙖𝙧𝙜𝙚𝙩 𝙘𝙝𝙖𝙩 𝙜𝙞𝙫𝙚𝙣"
CANCEL_CLONE = "𝙎𝙩𝙤𝙥𝙥𝙞𝙣𝙜 𝙩𝙝𝙚 𝙥𝙧𝙤𝙘𝙚𝙨𝙨... 𝙋𝙡𝙯 𝙬𝙖𝙞𝙩 🕚"
CANCELLED_MSG = "⚠ 𝙐𝙨𝙚𝙧 𝙘𝙖𝙣𝙘𝙚𝙡𝙡𝙚𝙙 𝙘𝙡𝙤𝙣𝙞𝙣𝙜 ⚠"
INITIAL_MESSAGE_TEXT = "🔎 𝙄𝙣𝙞𝙩𝙞𝙖𝙡𝙞𝙯𝙞𝙣𝙜 𝙘𝙡𝙤𝙣𝙚 🔎"
WAIT_MSG = "♻️ 𝙋𝙧𝙤𝙘𝙚𝙨𝙨𝙞𝙣𝙜... 𝙥𝙡𝙯 𝙬𝙖𝙞𝙩 "
SOURCE_CONFIRM = """
𝐂𝐡𝐚𝐭 𝐍𝐚𝐦𝐞: {}
𝐂𝐡𝐚𝐭 𝐈𝐝: <code> {}</code>
𝐂𝐡𝐚𝐭 𝐓𝐲𝐩𝐞: {}
𝐂𝐡𝐚𝐭 𝐔𝐬𝐞𝐫𝐧𝐚𝐦𝐞: {}
𝐋𝐨𝐜𝐚𝐭𝐢𝐨𝐧: {}
𝐌𝐞𝐦𝐛𝐞𝐫𝐬: {}
\xad \xad
S̶o̶u̶r̶c̶e̶ C̶h̶a̶t̶ 𝐒𝐚𝐯𝐞𝐝 ✅
"""
DEST_CNF = """
𝐂𝐡𝐚𝐭 𝐍𝐚𝐦𝐞: {}
𝐂𝐡𝐚𝐭 𝐈𝐝: <code> {}</code>
𝐂𝐡𝐚𝐭 𝐓𝐲𝐩𝐞: {}
𝐂𝐡𝐚𝐭 𝐔𝐬𝐞𝐫𝐧𝐚𝐦𝐞: {}
𝐋𝐨𝐜𝐚𝐭𝐢𝐨𝐧: {}
𝐌𝐞𝐦𝐛𝐞𝐫𝐬: {}
\xad \xad
T̶a̶r̶g̶e̶t̶ C̶h̶a̶t̶ 𝐒𝐚𝐯𝐞𝐝 ✅
"""
SESSION_START_INFO = """
𝐔𝐬𝐞𝐫 𝐬𝐞𝐬𝐬𝐢𝐨𝐧 𝐬𝐭𝐚𝐫𝐭𝐞𝐝:
𝐃𝐚𝐭𝐞 : {}
𝐓𝐢𝐦𝐞 : {}
𝘈 𝘶𝘴𝘦𝘳 𝘴𝘦𝘴𝘴𝘪𝘰𝘯 𝘪𝘴 𝘴𝘵𝘢𝘳𝘵𝘦𝘥 𝘪𝘯 𝘺𝘰𝘶𝘳
𝘢𝘤𝘤𝘰𝘶𝘯𝘵, 𝘪𝘧 𝘺𝘰𝘶 𝘬𝘯𝘰𝘸 𝘵𝘩𝘪𝘴, 𝘬𝘦𝘦𝘱
𝘵𝘩𝘪𝘴 𝘣𝘰𝘵 𝘶𝘯𝘣𝘭𝘰𝘤𝘬𝘦𝘥, 𝘺𝘰𝘶 𝘤𝘢𝘯 𝘪𝘨𝘯𝘰𝘳𝘦
𝘵𝘩𝘪𝘴 𝘮𝘦𝘴𝘴𝘢𝘨𝘦, 𝘐𝘧 𝘺𝘰𝘶 𝘧𝘦𝘦𝘭𝘴 𝘭𝘪𝘬𝘦
𝘧𝘶𝘤𝘬𝘦𝘥-𝘶𝘱, ᴛᴇʀᴍɪɴᴀᴛᴇ 𝘵𝘩𝘪𝘴 𝘴𝘦𝘴𝘴𝘪𝘰𝘯
𝘢𝘯𝘥 𝘣𝘭𝘰𝘤𝘬 𝘵𝘩𝘪𝘴 𝘣𝘰𝘵 𝘵𝘰 𝘢𝘷𝘰𝘪𝘥 𝘶𝘴𝘢𝘨𝘦
𝘰𝘧 𝘺𝘰𝘶𝘳 𝘴𝘦𝘴𝘴𝘪𝘰𝘯 𝘢𝘨𝘢𝘪𝘯. Y̶o̶u̶ c̶a̶n̶
s̶e̶e̶ t̶h̶i̶s̶ m̶e̶s̶s̶a̶g̶e̶ a̶g̶a̶i̶n̶ w̶h̶e̶n̶
H̶e̶r̶o̶k̶u̶ f̶r̶e̶e̶ d̶y̶n̶o̶s̶ r̶e̶s̶t̶a̶r̶t̶s̶ .
"""
NOT_CONFIGURED = "𝙎𝙤𝙪𝙧𝙘𝙚 & 𝙏𝙖𝙧𝙜𝙚𝙩 𝙘𝙝𝙖𝙩𝙨 𝙣𝙤𝙩 𝙘𝙤𝙣𝙛𝙞𝙜𝙪𝙧𝙚𝙙 ⚠"
NOT_AUTH_TEXT = "𝙔𝙤𝙪 𝙖𝙧𝙚 𝙣𝙤𝙩 𝙖𝙪𝙩𝙝𝙤𝙧𝙞𝙯𝙚𝙙 ⚠ "
BOT_BLOCKED_MSG = "Bot is blocked by the session user !"
NOT_CONFIGURED_CLONE = "𝙉𝙤 𝙘𝙝𝙖𝙩 𝙘𝙤𝙣𝙛𝙞𝙜𝙪𝙧𝙖𝙩𝙞𝙤𝙣 𝙛𝙤𝙪𝙣𝙙 ⚠\n\n𝘾𝙤𝙣𝙛𝙞𝙜𝙪𝙧𝙚 𝙩𝙝𝙚 𝙎𝙤𝙪𝙧𝙘𝙚 & 𝘿𝙚𝙨𝙩𝙞𝙣𝙖𝙩𝙞𝙤𝙣 𝙘𝙝𝙖𝙩𝙨 𝙗𝙚𝙛𝙤𝙧𝙚 𝙮𝙤𝙪 𝙘𝙡𝙤𝙣𝙚 🤷"
FINISHED_TEXT = "𝘾𝙡𝙤𝙣𝙚 𝙘𝙤𝙢𝙥𝙡𝙚𝙩𝙚𝙙 𝙨𝙪𝙘𝙘𝙚𝙨𝙨𝙛𝙪𝙡𝙡𝙮 ✅"
TERMINATED_MSG = "🚫 𝘽𝙤𝙩 𝙏𝙚𝙧𝙢𝙞𝙣𝙖𝙩𝙚𝙙 🚫\n𝘍𝘦𝘦𝘭𝘴 𝘴𝘰𝘮𝘦𝘵𝘩𝘪𝘯𝘨 𝘧𝘪𝘴𝘩𝘺? 𝘉𝘭𝘰𝘤𝘬 𝘵𝘩𝘪𝘴 𝘣𝘰𝘵 𝘵𝘰 𝘢𝘷𝘰𝘪𝘥 𝘵𝘩𝘦 𝘶𝘴𝘢𝘨𝘦 𝘰𝘧 𝘺𝘰𝘶𝘳 𝘴𝘦𝘴𝘴𝘪𝘰𝘯 𝘢𝘨𝘢𝘪𝘯 !"
COPY_ERROR = "𝙎𝙤𝙢𝙚𝙩𝙝𝙞𝙣𝙜 𝙬𝙚𝙣𝙩 𝙬𝙧𝙤𝙣𝙜 !\n\n𝘊𝘰𝘱𝘺𝘪𝘯𝘨 𝘢𝘣𝘰𝘳𝘵𝘦𝘥 𝘣𝘺 𝘵𝘩𝘦 𝘴𝘺𝘴𝘵𝘦𝘮\n𝘊𝘩𝘦𝘤𝘬 𝘢𝘭𝘭 𝘵𝘩𝘦 𝘶𝘴𝘦𝘳 𝘱𝘦𝘳𝘮𝘪𝘴𝘴𝘪𝘰𝘯𝘴."
INVALID_CHAT_ID = "<u>𝙄𝙣𝙫𝙖𝙡𝙞𝙙 𝙘𝙝𝙖𝙩 𝙥𝙖𝙧𝙖𝙢𝙚𝙩𝙚𝙧 𝙛𝙤𝙪𝙣𝙙</u>\n\n𝐂𝐚𝐮𝐬𝐞𝐬:\n1. 𝘚𝘦𝘴𝘴𝘪𝘰𝘯 𝘶𝘴𝘦𝘳 𝘯𝘰𝘵 𝘪𝘯 𝘗𝘳𝘪𝘷𝘢𝘵𝘦 𝘤𝘩𝘢𝘵\n" \
"2. 𝘍𝘰𝘳 𝘱𝘶𝘣𝘭𝘪𝘤 𝘤𝘩𝘢𝘵𝘴, 𝘶𝘴𝘦 '@𝘶𝘴𝘦𝘳𝘯𝘢𝘮𝘦'\n𝘰𝘳 𝘭𝘪𝘯𝘬 𝘪𝘯𝘴𝘵𝘦𝘢𝘥 𝘰𝘧 '𝘪𝘥'"
ASK_SOURCE = "𝙂𝙞𝙫𝙚 𝙩𝙝𝙚 𝙨𝙤𝙪𝙧𝙘𝙚 𝙘𝙝𝙖𝙩 𝙞𝙣𝙛𝙤:\n𝑌𝑜𝑢 ℎ𝑎𝑣𝑒 30𝑆𝑒𝑐 𝑡𝑜 𝑑𝑜 𝑡ℎ𝑖𝑠.."
ASK_DESTINATION = "𝙂𝙞𝙫𝙚 𝙩𝙝𝙚 𝘿𝙚𝙨𝙩𝙞𝙣𝙖𝙩𝙞𝙤𝙣 𝙘𝙝𝙖𝙩 𝙞𝙣𝙛𝙤:\n𝑌𝑜𝑢 ℎ𝑎𝑣𝑒 30𝑆𝑒𝑐 𝑡𝑜 𝑑𝑜 𝑡ℎ𝑖𝑠.."
ASK_START_MSG_ID = "𝙂𝙞𝙫𝙚 𝙩𝙝𝙚 𝙨𝙩𝙖𝙧𝙩𝙞𝙣𝙜 𝙢𝙚𝙨𝙨𝙖𝙜𝙚 𝙄𝙙:\n𝑌𝑜𝑢 ℎ𝑎𝑣𝑒 30𝑆𝑒𝑐 𝑡𝑜 𝑑𝑜 𝑡ℎ𝑖𝑠.."
ASK_END_MSG_ID = "𝙂𝙞𝙫𝙚 𝙩𝙝𝙚 𝙚𝙣𝙙 𝙢𝙚𝙨𝙨𝙖𝙜𝙚 𝙄𝙙\n𝑌𝑜𝑢 ℎ𝑎𝑣𝑒 30𝑆𝑒𝑐 𝑡𝑜 𝑑𝑜 𝑡ℎ𝑖𝑠.."
CHAT_DUPLICATED_MSG = "𝙎𝙤𝙪𝙧𝙘𝙚 & 𝘿𝙚𝙨𝙩𝙞𝙣𝙖𝙩𝙞𝙤𝙣 𝙘𝙝𝙖𝙩 𝙄𝙙𝙨 𝙘𝙖𝙣'𝙩 𝙗𝙚 𝙨𝙖𝙢𝙚 "
FROM_MSG_ID_CNF = "𝐒𝐭𝐚𝐫𝐭 𝐦𝐞𝐬𝐬𝐚𝐠𝐞 𝐈𝐝:👉 <code>{}</code> 👈 𝐒𝐚𝐯𝐞𝐝 ✅"
END_MSG_ID_CNF = "𝐄𝐧𝐝 𝐦𝐞𝐬𝐬𝐚𝐠𝐞 𝐈𝐝:👉 <code>{}</code> 👈 𝐒𝐚𝐯𝐞𝐝 ✅"
INVALID_MSG_ID = "𝙈𝙚𝙨𝙨𝙖𝙜𝙚 𝙞𝙙 𝙨𝙝𝙤𝙪𝙡𝙙 𝙗𝙚 𝙖𝙣 𝙄𝙣𝙩𝙚𝙜𝙚𝙧 ❗️"
INVALID_REPLY_MSG = "𝙄𝙣𝙫𝙖𝙡𝙞𝙙 𝙧𝙚𝙥𝙡𝙖𝙮 𝙢𝙚𝙨𝙨𝙖𝙜𝙚 ❗️"
CNF_SOURCE_FIRST = "𝘾𝙤𝙣𝙛𝙞𝙜𝙪𝙧𝙚 𝙩𝙝𝙚 𝙨𝙤𝙪𝙧𝙘𝙚 𝙘𝙝𝙖𝙩 𝙛𝙞𝙧𝙨𝙩 ❗️"
DELAY_OFF = "𝘿𝙚𝙡𝙖𝙮𝙚𝙙 𝙘𝙡𝙤𝙣𝙚 : 𝘿𝙚𝙖𝙘𝙩𝙞𝙫𝙖𝙩𝙚𝙙 🚫"
DELAY_ON = "𝘿𝙚𝙡𝙖𝙮𝙚𝙙 𝙘𝙡𝙤𝙣𝙚 : 𝘼𝙘𝙩𝙞𝙫𝙖𝙩𝙚𝙙 [𝟏𝟎 𝐬𝐞𝐜] ✅"
ADD_DOC = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝘿𝙤𝙘𝙪𝙢𝙚𝙣𝙩 👈 𝙖𝙙𝙙𝙚𝙙"
RM_DOC = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝘿𝙤𝙘𝙪𝙢𝙚𝙣𝙩 👈 𝙞𝙜𝙣𝙤𝙧𝙚𝙙 "
ADD_VID = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝙑𝙞𝙙𝙚𝙤 👈 𝙖𝙙𝙙𝙚𝙙"
RM_VID = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝙑𝙞𝙙𝙚𝙤 👈 𝙞𝙜𝙣𝙤𝙧𝙚𝙙 "
ADD_AUD = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝘼𝙪𝙙𝙞𝙤 👈 𝙖𝙙𝙙𝙚𝙙"
RM_AUD = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝘼𝙪𝙙𝙞𝙤 👈 𝙞𝙜𝙣𝙤𝙧𝙚𝙙 "
ADD_PIC = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝙋𝙝𝙤𝙩𝙤 👈 𝙖𝙙𝙙𝙚𝙙"
RM_PIC = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝙋𝙝𝙤𝙩𝙤 👈 𝙞𝙜𝙣𝙤𝙧𝙚𝙙 "
ADD_VOI = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝙑𝙤𝙞𝙘𝙚 👈 𝙖𝙙𝙙𝙚𝙙"
RM_VOI = "𝙁𝙞𝙡𝙚 𝙩𝙮𝙥𝙚 👉 𝙑𝙤𝙞𝙘𝙚 👈 𝙞𝙜𝙣𝙤𝙧𝙚𝙙 "
BLANK = "➖➖➖➖➖➖➖➖➖➖➖➖➖"
BLOCK = "ᴘʀᴏɢʀᴇꜱꜱ ꜰᴀɪʟᴇᴅ ᴛᴏ ᴅɪꜱᴘʟᴀʏ :👉 ʜᴇʟᴘ"
CAPTION_ON = "𝘾𝙖𝙥𝙩𝙞𝙤𝙣 𝙤𝙣 𝙛𝙞𝙡𝙚𝙨 : 𝘼𝙘𝙩𝙞𝙫𝙖𝙩𝙚𝙙 ✅"
CAPTION_OFF = "𝘾𝙖𝙥𝙩𝙞𝙤𝙣 𝙤𝙣 𝙛𝙞𝙡𝙚𝙨 : 𝘿𝙚𝙖𝙘𝙩𝙞𝙫𝙖𝙩𝙚𝙙 🚫"
FN_AS_CAPT_ON = "𝙁𝙞𝙡𝙚 𝙣𝙖𝙢𝙚 𝙖𝙨 𝙘𝙖𝙥𝙩𝙞𝙤𝙣 : 𝘼𝙘𝙩𝙞𝙫𝙖𝙩𝙚𝙙 ✅"
FN_AS_CAPT_OFF = "𝙁𝙞𝙡𝙚 𝙣𝙖𝙢𝙚 𝙖𝙨 𝙘𝙖𝙥𝙩𝙞𝙤𝙣 : 𝘿𝙚𝙖𝙘𝙩𝙞𝙫𝙖𝙩𝙚𝙙 🚫"
NOT_REQUIRED = "𝙏𝙝𝙞𝙨 𝙛𝙞𝙚𝙡𝙙 𝙞𝙨 𝙣𝙤𝙩 𝙈𝙖𝙙𝙖𝙩𝙤𝙧𝙮 ⚠"
RST_MSG = "𝙍𝙚𝙨𝙚𝙩 𝙩𝙤 𝘽𝙤𝙩 𝙙𝙚𝙛𝙖𝙪𝙡𝙩𝙨 .. 𝘾𝙤𝙣𝙛𝙞𝙧𝙢𝙚𝙙 ✅"
TEST_MSG = "Test Message"
OVER_FLOW = "𝙈𝙖𝙭𝙞𝙢𝙪𝙢 𝙡𝙞𝙢𝙞𝙩 𝙞𝙨 𝙚𝙭𝙘𝙚𝙚𝙙𝙚𝙙 !\n𝘾𝙝𝙚𝙘𝙠 𝙩𝙝𝙚 𝙖𝙡𝙡𝙤𝙬𝙚𝙙 𝙡𝙞𝙢𝙞𝙩, 𝙏𝙧𝙮 𝙖𝙜𝙖𝙞𝙣 !"
SELECT_TYPE = "👉 𝙎𝙚𝙡𝙚𝙘𝙩𝙞𝙤𝙣 𝙬𝙞𝙡𝙡 𝙗𝙚 𝙩𝙤𝙜𝙜𝙡𝙚𝙙 𝙤𝙣 𝙩𝙖𝙥\n𝘈𝘭𝘭 𝘢𝘳𝘦 𝘴𝘦𝘭𝘦𝘤𝘵𝘦𝘥 𝘣𝘺 𝘥𝘦𝘧𝘢𝘶𝘭𝘵 !"
INDEXING_MSG = "𝙋𝙡𝙚𝙖𝙨𝙚 𝙬𝙖𝙞𝙩..\n<i>Finding duplicate messages in the\ntarget chat. This will " \
"take some\ntime to figure out.</i>\n\n<b><u>Message id</u>:-\n🔷Now@: {}\n🔷End@ : {}\n\n" \
"<u>Duplicates</u>:-\n⚠Total: {}</b>"
PURGE_PROMPT = "👉 <b>{}</b> 👈 <i>Duplicate files found in your target chat. Do you wish to purge it now ?</i>"
PROCESSING_PURGE = "<b>🔷Now@: {} 🔷End@: {}</b>\n\n<i>𝐏𝐫𝐨𝐜𝐞𝐬𝐬𝐢𝐧𝐠.. Please Wait</i>"
TARGET_CFG_LOAD_MSG = "<b><u>Imported</u> ✅</b>\n\n<code>An index of the given target chat found in my database. " \
"It has been loaded to ma memory.</code>\n\n<b><i>Proceeding to clone..</i></b>"
| 43.464052
| 148
| 0.567368
|
4a187f99b6e2582c2351fe9ff368dad76ef168ca
| 3,731
|
py
|
Python
|
mythril/analysis/module/modules/delegatecall.py
|
marcuswin/mythril
|
27f3693de6e98890db27a258227db58b11566f4c
|
[
"MIT"
] | null | null | null |
mythril/analysis/module/modules/delegatecall.py
|
marcuswin/mythril
|
27f3693de6e98890db27a258227db58b11566f4c
|
[
"MIT"
] | null | null | null |
mythril/analysis/module/modules/delegatecall.py
|
marcuswin/mythril
|
27f3693de6e98890db27a258227db58b11566f4c
|
[
"MIT"
] | null | null | null |
"""This module contains the detection code for insecure delegate call usage."""
import logging
from typing import List
from mythril.analysis.potential_issues import (
get_potential_issues_annotation,
PotentialIssue,
)
from mythril.analysis.swc_data import DELEGATECALL_TO_UNTRUSTED_CONTRACT
from mythril.laser.ethereum.transaction.symbolic import ACTORS
from mythril.laser.ethereum.transaction.transaction_models import (
ContractCreationTransaction,
)
from mythril.analysis.module.base import DetectionModule, EntryPoint
from mythril.exceptions import UnsatError
from mythril.laser.ethereum.state.global_state import GlobalState
from mythril.laser.smt import symbol_factory, UGT
log = logging.getLogger(__name__)
class ArbitraryDelegateCall(DetectionModule):
"""This module detects calldata being forwarded using DELEGATECALL."""
name = "Delegatecall to a user-specified address"
swc_id = DELEGATECALL_TO_UNTRUSTED_CONTRACT
description = (
"Check for invocations of delegatecall(msg.data) in the fallback function."
)
entry_point = EntryPoint.CALLBACK
pre_hooks = ["DELEGATECALL"]
def _execute(self, state: GlobalState) -> None:
"""
:param state:
:return:
"""
if state.get_current_instruction()["address"] in self.cache:
return
potential_issues = self._analyze_state(state)
annotation = get_potential_issues_annotation(state)
annotation.potential_issues.extend(potential_issues)
def _analyze_state(self, state: GlobalState) -> List[PotentialIssue]:
"""
:param state: the current state
:return: returns the issues for that corresponding state
"""
gas = state.mstate.stack[-1]
to = state.mstate.stack[-2]
constraints = [
to == ACTORS.attacker,
UGT(gas, symbol_factory.BitVecVal(2300, 256)),
state.new_bitvec(
"retval_{}".format(state.get_current_instruction()["address"]), 256
)
== 1,
]
for tx in state.world_state.transaction_sequence:
if not isinstance(tx, ContractCreationTransaction):
constraints.append(tx.caller == ACTORS.attacker)
try:
address = state.get_current_instruction()["address"]
logging.debug(
"[DELEGATECALL] Detected potential delegatecall to a user-supplied address : {}".format(
address
)
)
description_head = "The contract delegates execution to another contract with a user-supplied address."
description_tail = (
"The smart contract delegates execution to a user-supplied address. Note that callers "
"can execute arbitrary contracts and that the callee contract "
"can access the storage of the calling contract. "
)
return [
PotentialIssue(
contract=state.environment.active_account.contract_name,
function_name=state.environment.active_function_name,
address=address,
swc_id=DELEGATECALL_TO_UNTRUSTED_CONTRACT,
bytecode=state.environment.code.bytecode,
title="Delegatecall Proxy To User-Supplied Address",
severity="Medium",
description_head=description_head,
description_tail=description_tail,
constraints=constraints,
detector=self,
)
]
except UnsatError:
return []
detector = ArbitraryDelegateCall()
| 35.875
| 115
| 0.638971
|
4a18807e8658851024c96347bf2a910a871e13c3
| 12,297
|
py
|
Python
|
vistrails/packages/persistent_archive/widgets.py
|
remram44/VisTrails-mybinder
|
ee7477b471920d738f3ac430932f01901b56ed44
|
[
"BSD-3-Clause"
] | 83
|
2015-01-05T14:50:50.000Z
|
2021-09-17T19:45:26.000Z
|
vistrails/packages/persistent_archive/widgets.py
|
remram44/VisTrails-mybinder
|
ee7477b471920d738f3ac430932f01901b56ed44
|
[
"BSD-3-Clause"
] | 254
|
2015-01-02T20:39:19.000Z
|
2018-11-28T17:16:44.000Z
|
vistrails/packages/persistent_archive/widgets.py
|
remram44/VisTrails-mybinder
|
ee7477b471920d738f3ac430932f01901b56ed44
|
[
"BSD-3-Clause"
] | 40
|
2015-04-17T16:46:36.000Z
|
2021-09-28T22:43:24.000Z
|
###############################################################################
##
## Copyright (C) 2014-2016, New York University.
## Copyright (C) 2013-2014, NYU-Poly.
## All rights reserved.
## Contact: contact@vistrails.org
##
## This file is part of VisTrails.
##
## "Redistribution and use in source and binary forms, with or without
## modification, are permitted provided that the following conditions are met:
##
## - Redistributions of source code must retain the above copyright notice,
## this list of conditions and the following disclaimer.
## - Redistributions in binary form must reproduce the above copyright
## notice, this list of conditions and the following disclaimer in the
## documentation and/or other materials provided with the distribution.
## - Neither the name of the New York University nor the names of its
## contributors may be used to endorse or promote products derived from
## this software without specific prior written permission.
##
## THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
## AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
## THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
## PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
## CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
## EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
## PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
## OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
## WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
## OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
## ADVISED OF THE POSSIBILITY OF SUCH DAMAGE."
##
###############################################################################
from __future__ import division
from PyQt4 import QtCore, QtGui
from vistrails.core.db.action import create_action
from vistrails.gui.modules.constant_configuration import ConstantWidgetBase
from vistrails.gui.modules.module_configure import \
StandardModuleConfigurationWidget
from .queries import QueryCondition, EqualString, EqualInt
def str_repr(s):
if isinstance(s, unicode):
s = (s.replace('\\', '\\\\')
.replace("'", "\\'")
.encode('ascii', 'backslashreplace'))
else:
s = (s.replace('\\', '\\\\')
.replace("'", "\\'"))
return "'%s'" % s
class Metadata(QtGui.QWidget):
remove = QtCore.pyqtSignal()
changed = QtCore.pyqtSignal()
def __init__(self, name, value=None):
QtGui.QWidget.__init__(self)
layout = QtGui.QHBoxLayout()
self.setLayout(layout)
self.key = QtGui.QLineEdit()
self.key.setText(name)
layout.addWidget(self.key, 1)
self.value = self.value_widget(value)
layout.addWidget(self.value, 2)
remove_button = QtGui.QPushButton("Remove port")
remove_button.setSizePolicy(QtGui.QSizePolicy.Fixed,
QtGui.QSizePolicy.Fixed)
layout.addWidget(remove_button)
self.connect(remove_button, QtCore.SIGNAL('clicked()'),
self.remove)
self.connect(self.key, QtCore.SIGNAL('textEdited(const QString &)'),
self.changed)
self.connect(self.value, QtCore.SIGNAL('textEdited(const QString &)'),
self.changed)
class StringMetadata(Metadata):
@staticmethod
def value_widget(value=None):
return QtGui.QLineEdit(value)
def to_string(self):
return 'EqualString(%s, %s)' % (str_repr(self.key.text()),
str_repr(self.value.text()))
class IntMetadata(Metadata):
def value_widget(self, value=None):
w = QtGui.QLineEdit()
if value is not None:
w.setText('%d' % value)
w.setValidator(QtGui.QIntValidator(self))
return w
def to_string(self):
try:
i = int(self.value.text())
except ValueError:
i = 0
return 'EqualInt(%s, %d)' % (str_repr(self.key.text()), i)
class SetMetadataWidget(StandardModuleConfigurationWidget):
"""
Configuration widget allowing to set metadata on persisted modules.
It is a visual editor for the strings functions set on the 'metadata' port,
which have the form EqualString('mkey', 'mvalue') or EqualInt('mkey', 2).
"""
def __init__(self, module, controller, parent=None):
StandardModuleConfigurationWidget.__init__(self, module,
controller, parent)
# Window title
self.setWindowTitle("Metadata editor")
central_layout = QtGui.QVBoxLayout()
central_layout.setMargin(0)
central_layout.setSpacing(0)
self.setLayout(central_layout)
self._scroll_area = QtGui.QScrollArea()
inner_widget = QtGui.QWidget()
self._list_layout = QtGui.QVBoxLayout()
scroll_layout = QtGui.QVBoxLayout()
scroll_layout.addLayout(self._list_layout)
scroll_layout.addStretch()
inner_widget.setLayout(scroll_layout)
self._scroll_area.setVerticalScrollBarPolicy(
QtCore.Qt.ScrollBarAlwaysOn)
self._scroll_area.setWidget(inner_widget)
self._scroll_area.setWidgetResizable(True)
central_layout.addWidget(self._scroll_area)
add_buttons = QtGui.QHBoxLayout()
central_layout.addLayout(add_buttons)
add_string = QtGui.QPushButton("Add a string")
self.connect(add_string, QtCore.SIGNAL('clicked()'),
self.add_string)
add_buttons.addWidget(add_string, 2)
add_int = QtGui.QPushButton("Add an integer")
self.connect(add_int, QtCore.SIGNAL('clicked()'),
self.add_int)
add_buttons.addWidget(add_int, 1)
self.createButtons()
self.createEntries()
def add_item(self, item):
self._list_layout.addWidget(item)
self.connect(item, QtCore.SIGNAL('remove()'),
lambda: item.deleteLater())
self.connect(item, QtCore.SIGNAL('changed()'),
self.updateState)
def add_string(self):
self.add_item(StringMetadata(
"string%d" % (self._list_layout.count() + 1)))
self.updateState()
def add_int(self):
self.add_item(IntMetadata(
"int%d" % (self._list_layout.count() + 1)))
self.updateState()
def createButtons(self):
""" createButtons() -> None
Create and connect signals to Ok & Cancel button
"""
buttonLayout = QtGui.QHBoxLayout()
buttonLayout.setMargin(5)
self.saveButton = QtGui.QPushButton("&Save", self)
self.saveButton.setFixedWidth(100)
self.saveButton.setEnabled(False)
buttonLayout.addWidget(self.saveButton)
self.resetButton = QtGui.QPushButton("&Reset", self)
self.resetButton.setFixedWidth(100)
self.resetButton.setEnabled(False)
buttonLayout.addWidget(self.resetButton)
self.layout().addLayout(buttonLayout)
self.connect(self.saveButton, QtCore.SIGNAL('clicked(bool)'),
self.saveTriggered)
self.connect(self.resetButton, QtCore.SIGNAL('clicked(bool)'),
self.resetTriggered)
def saveTriggered(self, checked = False):
""" saveTriggered(checked: bool) -> None
Update vistrail controller and module when the user click Ok
"""
if self.updateVistrail():
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
self.state_changed = False
self.emit(QtCore.SIGNAL('stateChanged'))
self.emit(QtCore.SIGNAL('doneConfigure'), self.module.id)
def closeEvent(self, event):
self.askToSaveChanges()
event.accept()
def updateVistrail(self):
""" updateVistrail() -> None
Update functions on the metadata port of the module
"""
# Remove the keys that we loaded
ops = [('delete', func) for func in self._loaded_keys]
# Add the metadata in the list
for i in xrange(self._list_layout.count()):
widget = self._list_layout.itemAt(i).widget()
ops.extend(self.controller.update_function_ops(
self.module, 'metadata',
[widget.to_string()]))
# This code should really be in VistrailController
self.controller.flush_delayed_actions()
action = create_action(ops)
self.controller.add_new_action(action,
"Updated PersistedPath metadata")
self.controller.perform_action(action)
return True
def getCurrentFunctions(self):
for i in xrange(self.module.getNumFunctions()):
func = self.module.functions[i]
if func.name == 'metadata':
yield func, func.params[0].strValue
def createEntries(self):
self._loaded_keys = set()
for func, metadata in self.getCurrentFunctions():
metadata = QueryCondition.translate_to_python(metadata)
save = True
if metadata is None:
save = False
elif isinstance(metadata, EqualString):
self.add_item(StringMetadata(metadata.key, metadata.value))
elif isinstance(metadata, EqualInt):
self.add_item(IntMetadata(metadata.key, metadata.value))
else:
save = False
if save:
self._loaded_keys.add(func)
def resetTriggered(self, checked = False):
for i in xrange(self._list_layout.count()):
self._list_layout.itemAt(i).widget().deleteLater()
self.createEntries()
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
self.state_changed = False
self.emit(QtCore.SIGNAL('stateChanged'))
def updateState(self):
self.saveButton.setEnabled(True)
self.resetButton.setEnabled(True)
if not self.state_changed:
self.state_changed = True
self.emit(QtCore.SIGNAL('stateChanged'))
class MetadataConstantWidget(ConstantWidgetBase, QtGui.QWidget):
contentsChanged = QtCore.pyqtSignal(tuple)
def __init__(self, param, parent=None):
QtGui.QWidget.__init__(self, parent)
self._key = QtGui.QLineEdit()
self.connect(self._key, QtCore.SIGNAL("returnPressed()"),
self.update_parent)
self._type = QtGui.QComboBox()
self._type.addItems(['int', 'str'])
self.connect(self._type, QtCore.SIGNAL("currentIndexChanged()"),
self.update_parent)
self._value = QtGui.QLineEdit()
self.connect(self._value, QtCore.SIGNAL("returnPressed()"),
self.update_parent)
layout = QtGui.QHBoxLayout()
layout.addWidget(self._key)
layout.addWidget(self._type)
layout.addWidget(self._value)
self.setLayout(layout)
ConstantWidgetBase.__init__(self, param)
self.watchForFocusEvents(self._key)
self.watchForFocusEvents(self._type)
self.watchForFocusEvents(self._value)
def contents(self):
if self._type.currentText() == 'int':
return 'EqualInt(%s, %s)' % (str_repr(self._key.text()),
self._value.text())
else: # self._type.currentText() == 'str':
return 'EqualString(%s, %s)' % (str_repr(self._key.text()),
str_repr(self._value.text()))
def setContents(self, value, silent=False):
cond = QueryCondition.translate_to_python(value, text_query=False)
self._key.setText(cond.key)
if isinstance(cond, EqualInt):
self._type.setCurrentIndex(0)
self._value.setText('%d' % cond.value)
elif isinstance(cond, EqualString):
self._type.setCurrentIndex(1)
self._value.setText(cond.value)
if not silent:
self.update_parent()
| 37.490854
| 79
| 0.622428
|
4a1880e04c36035d3f613dc0c00883a7f65e0cd3
| 50,020
|
py
|
Python
|
core_tools/drivers/M3102A.py
|
peendebak/core_tools
|
2e43edf0bbc1d7ceb7042559db499535e8f6a076
|
[
"BSD-2-Clause"
] | 1
|
2022-02-11T09:24:35.000Z
|
2022-02-11T09:24:35.000Z
|
core_tools/drivers/M3102A.py
|
peendebak/core_tools
|
2e43edf0bbc1d7ceb7042559db499535e8f6a076
|
[
"BSD-2-Clause"
] | null | null | null |
core_tools/drivers/M3102A.py
|
peendebak/core_tools
|
2e43edf0bbc1d7ceb7042559db499535e8f6a076
|
[
"BSD-2-Clause"
] | 2
|
2020-07-06T14:31:27.000Z
|
2021-07-07T13:57:19.000Z
|
from qcodes import Instrument, MultiParameter
from dataclasses import dataclass
from typing import Optional
import warnings
import logging
import time
import copy
from si_prefix import si_format
try:
import keysightSD1
except:
warnings.warn("\nM3102A needs Keysight AWG libraries. Please install if you need them.\n")
# check whether SD1 version 2.x or 3.x
is_sd1_3x = 'SD_SandBoxRegister' in dir(keysightSD1)
if is_sd1_3x:
# import function for hvi2 downsampler FPGA image
from keysight_fpga.sd1.dig_iq import config_channel, \
is_iq_image_loaded, dig_set_lo, dig_set_input_channel, dig_set_downsampler
import numpy as np
def check_error(res, s=''):
if (type(res) is int and res < 0):
error = res
msg = f'Keysight error: {keysightSD1.SD_Error.getErrorMessage(error)} ({error}) {s}'
logging.error(msg)
return res
"""
Minimalistic qcodes driver for the Keysight digizer card (M3102A)
Author : Stephan Philips (TuDelft)
"""
class MODES:
"""
Modes to be operating in:
NORMAL : normal / raw data
AVERAGE : averaging / downsampling of traces
IQ_DEMODULATION : IQ demodulation
IQ_DEMOD_I_ONLY : IQ demodulation output I-only
IQ_INPUT_SHIFTED_IQ_OUT : IQ input pair (1+2 or 3+4), phase shift and complex output on odd channel.
IQ_INPUT_SHIFTED_I_OUT : IQ input pair (1+2 or 3+4), phase shift and I value output on odd channel.
The operating modes other than NORMAL require an FPGA image.
"""
NORMAL = 0
AVERAGE = 1
IQ_DEMODULATION = 2
IQ_DEMOD_I_ONLY = 3
IQ_INPUT_SHIFTED_IQ_OUT = 4
IQ_INPUT_SHIFTED_I_OUT = 5
class OPERATION_MODES:
"""
Modes for operation
SOFT_TRG : use software triggers (does call start and trigger in software)
ANALOG_TRG : use external triggering (does call start digitizer)
HVI_TRG : use HVI for triggering (no calls done)
"""
SOFT_TRG = 0
ANALOG_TRG = 1
HVI_TRG = 2
class DATA_MODE:
"""
Mode of handling data. Determines what will be saved.
FULL : no averaging at all, get back full output data
AVERAGE_TIME : average on x axis --> average a full trace to a single point
AVERAGE_CYCLES : average on y axis --> average over all the iterations
AVERAGE_TIME_AND_CYCLES : average on x and y axis, in other words, get back a single point
"""
FULL = 0
AVERAGE_TIME = 1
AVERAGE_CYCLES = 2
AVERAGE_TIME_AND_CYCLES = 3
class line_trace(MultiParameter):
"""
class that defines the parameter for the measured data.
"""
def __init__(self, name, instrument, inst_name, raw=False, **kwargs):
self.my_instrument = instrument
super().__init__(name=name,
instrument=instrument,
names = (name +'_ch1', name +'_ch2'),
shapes=((1,),(1,)),
docstring='Averaged traces from digitizer',
**kwargs)
self.cached_properties = dict()
@property
def channels(self):
"""
list with active channels on the digitizer.
"""
channels = []
for channel_property in self.my_instrument.channel_properties.values():
if channel_property.active == True:
channels.append(channel_property.number)
return channels
@property
def channel_mask(self):
"""
generate channels mask for start multiple control functions
"""
channel_mask = 0
for i in self.channels:
channel_mask += 1 << (i - 1)
return channel_mask
def get_raw(self):
if self.my_instrument.operation_mode in [OPERATION_MODES.SOFT_TRG, OPERATION_MODES.ANALOG_TRG]:
self.start_digitizers()
if self.my_instrument.operation_mode == OPERATION_MODES.SOFT_TRG:
self.trigger_digitizers()
return self.get_data()
def _read_available(self, ch, buffer, offset):
available = self.my_instrument.SD_AIN.DAQcounterRead(ch)
check_error(available)
if available <= 0:
return available
length = len(buffer)
if available + offset > length:
logging.warning(f"ch{ch} more data points in digitizer ram ({available}+{offset}) "
f"than what is being collected ({length}).")
available = length - offset
# Always read with a timeout to prevent infinite blocking of HW (and reboot of system).
# Transfer rate is ~55 MSa/s. Add one second marging
read_timeout = int((available / 50e6 + 1) * 1000)
received = self.my_instrument.SD_AIN.DAQread(ch, available, read_timeout)
check_error(received)
if isinstance(received, int) and received < 0:
# the error has already been logged
return received
n_received = len(received)
# logging.debug(f'DAQread ch:{ch} ready:{available} read:{n_received} offset:{offset}')
if n_received != available:
if available > n_received and available - n_received < 4:
# It seems that M3102A only returns multiples of 4 bytes.
logging.warning(f'DAQread data remaining. ch:{ch} ready:{available} read:{n_received}')
else:
logging.error(f'DAQread failure. ch:{ch} ready:{available} read:{n_received}')
if n_received > 0:
buffer[offset:offset + n_received] = received
return n_received
def _read_channels(self, daq_points_per_channel):
start = time.perf_counter()
data_read = {channel:0 for channel in daq_points_per_channel}
channels = daq_points_per_channel.keys()
channels_to_read = list(channels)
no_data_count = 0
consecutive_error_count = 0
last_read = time.perf_counter()
has_read_timeout = False
while len(channels_to_read) > 0 and not has_read_timeout and consecutive_error_count < 5:
any_read = False
for ch in channels_to_read:
n_read = self._read_available(ch, daq_points_per_channel[ch], data_read[ch])
# logging.debug(f'ch{ch}: {n_read}')
if n_read < 0:
consecutive_error_count += 1
if n_read > 0:
data_read[ch] = data_read[ch] + n_read
consecutive_error_count = 0
any_read = True
if data_read[ch] == len(daq_points_per_channel[ch]):
# all read: remove from list
channels_to_read.remove(ch)
if any_read:
no_data_count = 0
last_read = time.perf_counter()
else:
no_data_time = time.perf_counter() - last_read
no_data_count += 1
time.sleep(0.001)
# abort when no data has been received for 30 s and at least 2 checks without any data
# the timeout of 30 s is needed for T1 measurement of 100 ms and one flush every 256 measurements.
has_read_timeout = no_data_count >= 2 and (no_data_time > 3)
if (no_data_time > 0.5 and no_data_count < 100) or no_data_count % 100 == 0:
logging.debug(f'no data available ({no_data_count}, {no_data_time:4.2f} s); wait...')
logging.info(f'channels {channels}: retrieved {data_read} points in {(time.perf_counter()-start)*1000:3.1f} ms')
for ch in channels:
if data_read[ch] != len(daq_points_per_channel[ch]):
logging.error(f"digitizer did not collect enough data points for channel {ch}; "
f"requested:{len(daq_points_per_channel[ch])} received:{data_read[ch]}; "
"last values are zeros.")
def _get_data(self):
data_out = tuple()
daq_points_per_channel = {}
for channel_property in self.my_instrument.channel_properties.values():
if channel_property.active == False:
continue
channel = channel_property.number
daq_cycles = channel_property.daq_cycles
daq_points_per_cycle = channel_property.daq_points_per_cycle
daq_points = daq_cycles * daq_points_per_cycle
daq_points_per_channel[channel] = np.zeros(daq_points, np.double)
self._read_channels(daq_points_per_channel)
for channel_property in self.my_instrument.channel_properties.values():
if channel_property.active == False:
continue
channel_data_raw = daq_points_per_channel[channel_property.number]
# convert 16 bit signed to mV. (inplace multiplication on numpy array is fast)
channel_data_raw *= channel_property.full_scale * 1000 / 32768
if channel_property.acquisition_mode == MODES.NORMAL:
# reshape for [repetitions, time] and average
channel_data_raw = channel_data_raw.reshape([channel_property.cycles, channel_property.daq_points_per_cycle])
# remove extra samples due to alignment
channel_data_raw = channel_data_raw[:,:channel_property.points_per_cycle]
elif channel_property.acquisition_mode in [MODES.IQ_DEMODULATION, MODES.IQ_INPUT_SHIFTED_IQ_OUT]:
# remove aligment point
total_points = channel_property.points_per_cycle * channel_property.cycles * 2
channel_data_raw = channel_data_raw[:total_points]
# convert to array with complex values
channel_data_raw = channel_data_raw[::2] + 1j * channel_data_raw[1::2]
# reshape for [repetitions, time] and average
channel_data_raw = channel_data_raw.reshape([channel_property.cycles, channel_property.points_per_cycle])
else:
# remove aligment point
total_points = channel_property.points_per_cycle * channel_property.cycles
channel_data_raw = channel_data_raw[:total_points]
# reshape for [repetitions, time] and average
channel_data_raw = channel_data_raw.reshape([channel_property.cycles, channel_property.points_per_cycle])
if channel_property.data_mode == DATA_MODE.FULL:
data_out += (channel_data_raw, )
elif channel_property.data_mode == DATA_MODE.AVERAGE_TIME:
data_out += (np.average(channel_data_raw, axis = 1), )
elif channel_property.data_mode == DATA_MODE.AVERAGE_CYCLES:
data_out += (np.average(channel_data_raw, axis = 0), )
elif channel_property.data_mode == DATA_MODE.AVERAGE_TIME_AND_CYCLES:
data_out += (np.average(channel_data_raw), )
return data_out
# NOTE: only used for old fpga image
def _read_channel_data(self, channel_number, channel_data_raw):
start = time.perf_counter()
data_length = len(channel_data_raw)
no_data_count = 0
consecutive_error_count = 0
points_acquired = 0
while points_acquired < data_length and consecutive_error_count < 3:
np_ready = self.my_instrument.SD_AIN.DAQcounterRead(channel_number)
check_error(np_ready)
if np_ready + points_acquired > data_length:
np_ready = data_length - points_acquired
logging.warning("more data points in digitizer ram than what is being collected.")
n_received = 0
if np_ready > 0:
# Always read with a timeout to prevent infinite blocking of HW (and reboot of system).
# There are np_ready points available. This can be read in 1 second.
received = self.my_instrument.SD_AIN.DAQread(channel_number, np_ready, 1000)
check_error(received)
if isinstance(received, int) and received < 0:
# the error has already been logged
consecutive_error_count += 1
continue
n_received = len(received)
# logging.debug(f'DAQread ready:{np_ready} read:{n_received}')
if n_received != np_ready:
if np_ready > n_received and np_ready - n_received < 4:
# It seems that M3102A only returns multiples of 4 bytes.
logging.warning(f'DAQread data remaining. ready:{np_ready} read:{n_received}')
else:
logging.error(f'DAQread failure. ready:{np_ready} read:{n_received}')
if n_received > 0:
channel_data_raw[points_acquired: points_acquired + n_received] = received
points_acquired = points_acquired + n_received
no_data_count = 0
consecutive_error_count = 0
else:
# logging.debug(f'no data; wait...')
no_data_count += 1
if no_data_count > 100:
break
time.sleep(0.001)
if points_acquired != data_length:
logging.error(f"digitizer did not collect enough data points for channel {channel_number}; "
f"requested:{data_length} received:{points_acquired}; last values are zeros.")
logging.info(f'channel {channel_number}: retrieved {points_acquired} points in {(time.perf_counter()-start)*1000:3.1f} ms')
# NOTE: only used for old fpga image
def _get_data_average(self):
data_out = tuple()
# note that we are acquirering two channels at the same time in this mode.
for channel_property in self.my_instrument.channel_properties.values():
# averaging mode: channels are read in pairs.
if channel_property.number in [2,4]:
# even numbers are read with odd channel.
continue
if channel_property.number not in self.channels and channel_property.number+1 not in self.channels:
# don't read anything if both channels not active.
continue
# make flat data structures.
channel_data_raw = np.zeros([channel_property.cycles*10], np.uint16)
self._read_channel_data(channel_property.number, channel_data_raw)
# format the data
channel_data_raw = channel_data_raw.reshape([channel_property.cycles, 10]).transpose().astype(np.int32)
channel_data = np.empty([2,channel_property.cycles])
channel_data[0] = ((channel_data_raw[1] & 2**16-1) << 16) | (channel_data_raw[0] & 2**16-1)
channel_data[1] = ((channel_data_raw[3] & 2**16-1) << 16) | (channel_data_raw[2] & 2**16-1)
# correct amplitude,
# outputs V, 5 for the acquisition in blocks of 5
channel_data[0] *= 5 * 2/(channel_property.t_measure-160)*channel_property.full_scale / 2**15
channel_data[1] *= 5 * 2/(channel_property.t_measure-160)*channel_property.full_scale / 2**15
# only add the data of the selected channels.
if channel_property.number in self.channels:
if channel_property.data_mode in [DATA_MODE.AVERAGE_CYCLES, DATA_MODE.AVERAGE_TIME_AND_CYCLES]:
data_out += (np.average(channel_data[0]), )
else:
data_out += (channel_data[0], )
if channel_property.number + 1 in self.channels:
if channel_property.data_mode in [DATA_MODE.AVERAGE_CYCLES, DATA_MODE.AVERAGE_TIME_AND_CYCLES]:
data_out += (np.average(channel_data[1]), )
else:
data_out += (channel_data[1], )
return data_out
def get_data(self):
"""
Get data from the cards
"""
if self.my_instrument.use_old_fpga_averaging:
return self._get_data_average()
else:
return self._get_data()
def start_digitizers(self):
# start digizers.
self.my_instrument.daq_start_multiple(self.channel_mask)
def trigger_digitizers(self):
# trigger the digitizers.
for i in range(self.my_instrument.channel_properties[f'ch{self.channels[0]}'].cycles):
self.my_instrument.daq_trigger_multiple(self.channel_mask)
def _generate_parameter_info(self):
"""
Generate the correct labels/units for the digitizer parameter
"""
info_changed = False
for properties in self.my_instrument.channel_properties.values():
if not properties.name in self.cached_properties:
self.cached_properties[properties.name] = channel_properties(properties.name, properties.number)
cached = self.cached_properties[properties.name]
info_changed |= (
properties.active != cached.active
or properties.acquisition_mode != cached.acquisition_mode
or properties.data_mode != cached.data_mode
or properties.cycles != cached.cycles
or properties.t_measure != cached.t_measure
or properties.points_per_cycle != cached.points_per_cycle
)
self.cached_properties[properties.name] = copy.copy(properties)
if info_changed:
self.names = tuple()
self.labels = tuple()
self.units = tuple()
self.setpoint_labels = tuple()
self.setpoint_names = tuple()
self.setpoint_units = tuple()
self.shapes = tuple()
self.setpoints = tuple()
for properties in self.my_instrument.channel_properties.values():
if properties.active:
self.names += (properties.name, )
self.labels += (f"digitizer output {properties.name}", )
self.units += ("mV" , )
setpoint_names = tuple()
setpoint_labels = tuple()
setpoint_units = tuple()
if properties.data_mode in [DATA_MODE.FULL, DATA_MODE.AVERAGE_TIME]:
setpoint_names += (f"nth_cycle_{properties.name}", )
setpoint_labels += ("nth cycle", )
setpoint_units += ("#", )
if (properties.data_mode in [DATA_MODE.FULL, DATA_MODE.AVERAGE_CYCLES]
and (properties.acquisition_mode == MODES.NORMAL or properties.points_per_cycle > 1)):
setpoint_names += (f"time_ch_{properties.name}", )
setpoint_labels += ("time", )
setpoint_units += ("ns", )
self.setpoint_labels += (setpoint_labels, )
self.setpoint_names += (setpoint_names, )
self.setpoint_units += (setpoint_units, )
shape = tuple()
setpoints = tuple()
if properties.data_mode in [DATA_MODE.FULL, DATA_MODE.AVERAGE_TIME]:
shape += (properties.cycles, )
# setpoints need to be a tuple for hash look-up in qcodes ..
setpoints += (tuple(np.linspace(1, properties.cycles, properties.cycles)), )
elif (properties.data_mode == DATA_MODE.AVERAGE_CYCLES
and (properties.acquisition_mode == MODES.NORMAL or properties.points_per_cycle > 1)):
n = properties.points_per_cycle
shape += (n, )
setpoints += (tuple(np.linspace(properties.t_measure/n, properties.t_measure, n)), )
if (properties.data_mode == DATA_MODE.FULL
and (properties.acquisition_mode == MODES.NORMAL or properties.points_per_cycle > 1)):
n = properties.points_per_cycle
shape += (n, )
setpoints += ((tuple(np.linspace(properties.t_measure/n, properties.t_measure, n)), ))
self.shapes += (shape, )
self.setpoints += (setpoints, )
@dataclass
class channel_properties:
"""
structure to save relevant information about marker data.
"""
name : str
number : int
active : bool = False
acquisition_mode : MODES = MODES.NORMAL
data_mode : DATA_MODE = DATA_MODE.FULL
points_per_cycle : int = 1
cycles : int = 0
full_scale : float = 0 #peak voltage
impedance: Optional[int] = None
coupling: Optional[int] = None
t_measure : float = 0 #measurement time in ns of the channel
sample_rate : float = 500e6
# daq configuration
prescaler : Optional[int] = None
daq_points_per_cycle: Optional[int] = None
daq_cycles: Optional[int] = None
# settings of downsampler-iq FPGA image
downsampled_rate : Optional[float] = None
power2decimation : int = 0
downsampling_factor : int = 1
lo_frequency : float = 0
lo_phase : float = 0
input_channel : int = 0
class SD_DIG(Instrument):
"""docstring for SD_DIG"""
def __init__(self, name, chassis, slot, n_channels = 4):
super().__init__(name)
"""
init keysight digitizer
Args:
name (str) : name of the digitizer
chassis (int) : chassis number
slot (int) : slot in the chassis where the digitizer is.
n_channels (int) : number of channels on the digitizer card.
NOTE: channels start for number 1! (e.g. channel 1, channel 2, channel 3, channel 4)
"""
self.SD_AIN = keysightSD1.SD_AIN()
dig_name = check_error(self.SD_AIN.getProductNameBySlot(chassis, slot), 'getProductNameBySlot')
check_error(self.SD_AIN.openWithSlot(dig_name, chassis, slot), 'openWithSlot')
firmware_version = self.SD_AIN.getFirmwareVersion()
major,minor,revision = firmware_version.split('.')
if (major == '02') != is_sd1_3x:
raise Exception(f'KeysightSD1 driver not compatible with firmware "{firmware_version}"')
self.chassis = chassis
self.slot = slot
self.operation_mode = OPERATION_MODES.SOFT_TRG
self.use_old_fpga_averaging = False
self.channel_properties = dict()
for i in range(n_channels):
properties = channel_properties(f'ch{i+1}', i + 1)
self.channel_properties[f'ch{i+1}'] = properties
self.add_parameter(
'measure',
inst_name = self.name,
parameter_class=line_trace,
raw =False
)
def close(self):
self.SD_AIN.close()
super().close()
def snapshot_base(self, update = False, params_to_skip_update = None):
param_to_skip = ['measure']
if params_to_skip_update is not None:
param_to_skip += params_to_skip_update
return super().snapshot_base(update, params_to_skip_update=param_to_skip)
def set_aquisition_mode(self, mode):
logging.warning('M3102A.set_aquisition_mode is deprecated. Use M3102A.set_acquisition_mode')
self.set_acquisition_mode(mode)
def set_acquisition_mode(self, mode):
"""
Modes to be operating in:
0 : normal
1 : averaging of traces (Keysight DEMOD modules needed for this)
2 : IQ demodulation
3 : IQ demodulation I values only
"""
changed = False
for properties in self.channel_properties.values():
if properties.acquisition_mode != mode:
properties.acquisition_mode = mode
changed = True
if not is_sd1_3x and mode in [MODES.IQ_DEMODULATION, MODES.IQ_DEMOD_I_ONLY]:
raise Exception('IQ modes not supported for old Keysight firmware')
self.use_old_fpga_averaging = not is_sd1_3x and mode == MODES.AVERAGE
if changed:
self.measure._generate_parameter_info()
def set_channel_acquisition_mode(self, channel, mode):
"""
Modes to be operating in:
0 : normal
1 : averaging of traces (Keysight DEMOD modules needed for this)
2 : IQ demodulation
3 : IQ demodulation I values only
"""
if not is_sd1_3x:
raise Exception('Operation not support for old KeysightSD1')
properties = self.channel_properties[f'ch{channel}']
if properties.acquisition_mode != mode:
properties.acquisition_mode = mode
self.measure._generate_parameter_info()
def get_channel_acquisition_mode(self, channel):
return self.channel_properties[f'ch{channel}'].acquisition_mode
def set_data_handling_mode(self, data_mode):
"""
mode of handling data. Determines what will be saved.
0 : no averaging at all, get back full output data
1 : average on x axis --> average a full trace to a single point
2 : average on y axis --> average over all the iterations
3 : average on x and y axis, in other words, get back a single point
"""
changed = False
for properties in self.channel_properties.values():
if properties.data_mode != data_mode:
properties.data_mode = data_mode
changed = True
if changed:
self.measure._generate_parameter_info()
def set_channel_data_handling_mode(self, channel, data_mode):
"""
mode of handling data. Determines what will be saved.
0 : no averaging at all, get back full output data
1 : average on x axis --> average a full trace to a single point
2 : average on y axis --> average over all the iterations
3 : average on x and y axis, in other words, get back a single point
"""
properties = self.channel_properties[f'ch{channel}']
if properties.data_mode != data_mode:
properties.data_mode = data_mode
self.measure._generate_parameter_info()
def set_operating_mode(self, operation_mode):
"""
Modes for operation
Only affects daq start and daq trigger in get_raw().
Args:
operation_mode (int) : mode of operation
0 : use software triggers (does call start and trigger in software)
1 : use external triggering (does call start digitizer)
2 : use HVI for triggering (no calls done)
"""
self.operation_mode = operation_mode
def set_active_channels(self, channels):
"""
set the active channels:
Args:
channels (list) : channels numbers that need to be used
"""
changed = False
for channel_property in self.channel_properties.values():
active = channel_property.number in channels
if channel_property.active != active:
channel_property.active = active
changed = True
if changed:
self.measure._generate_parameter_info()
@property
def active_channels(self):
result = []
for properties in self.channel_properties.values():
if properties.active:
result.append(properties.number)
return result
def set_channel_properties(self, channel, V_range, impedance=1, coupling=0):
"""
sets quickly relevant channel properties.
TODO: We need a validator on Vrange.
Args:
channel : channel number (1 to 4)
V_range: amplitude range +- X Volts
impedance: 0(HiZ), 1 (50 Ohm)
coulping: 0 (DC), 1 (AC)
"""
properties = self.channel_properties[f'ch{channel}']
if (properties.full_scale != V_range
or properties.impedance != impedance
or properties.coupling != coupling):
properties.full_scale = V_range
properties.coupling = coupling
properties.impedance = impedance
self.measure._generate_parameter_info()
self.SD_AIN.channelInputConfig(channel, V_range, impedance, coupling)
def set_daq_settings(self, channel, n_cycles, t_measure, sample_rate = 500e6,
DAQ_trigger_delay = 0, DAQ_trigger_mode = 1, downsampled_rate = None, power2decimation = 0):
"""
quickset for the daq settings
Args:
n_cycles (int) : number of trigger to record.
t_measure (float) : time to measure (unit : ns)
sample_rate (float) : sample rate of the channel in Sa/s
DAQ_trigger_delay (int) : use HVI for this..
DAQ_trigger_mode (int) : 1 for HVI see manual for other options. (2 is external trigger)
downsampled_rate (float) : sample rate after downsampling in Sa/s, if None then downsampled_rate = 1/t_measure
power2decimation (int) : number of decimate-by-2 steps applied (with anti-alias filter)
"""
properties = self.channel_properties[f'ch{channel}']
properties.active = True
# find aproriate prescalor if needed
if properties.acquisition_mode == MODES.NORMAL:
if downsampled_rate is not None or power2decimation > 0:
logging.warning(f'ch{channel} downsampled_rate and power2decimation are ignored in NORMAL mode')
downsampled_rate = None
power2decimation = 0
prescaler = max(0, int(500e6/sample_rate -1))
# The M3102A prescaler maximum value is 4.
if prescaler > 4:
raise ValueError(f'Sample rate {sample_rate} not supported.'
'M3102A frequency is limited to range [100..500] MHz')
sample_rate = 500e6/(prescaler+1)
if properties.sample_rate != sample_rate:
logging.info("Effective sampling frequency is set to {}Sa/s (prescaler = {})"
.format(si_format(sample_rate, precision=1), prescaler))
points_per_cycle = int(t_measure*1e-9*sample_rate)
daq_points_per_cycle = points_per_cycle
daq_cycles = n_cycles
eff_t_measure = points_per_cycle * 1e9 / sample_rate
downsampling_factor = 1
if is_iq_image_loaded(self.SD_AIN):
config_channel(self.SD_AIN, channel, properties.acquisition_mode, 1, 1, input_ch=0)
elif not is_sd1_3x:
if properties.acquisition_mode == MODES.AVERAGE:
prescaler = 0
sample_rate = 500e6
points_per_cycle = 1
daq_points_per_cycle = 10
daq_cycles = n_cycles
eff_t_measure = (t_measure//10) * 10
downsampling_factor = 1
else:
raise Exception(f'mode {properties.acquisition_mode} not supported for old firmware')
else:
if sample_rate != 500e6:
logging.warning(f'Sample rate is always 500 MSa/s in mode {properties.acquisition_mode}. '
f'Ignoring requested {sample_rate}')
prescaler = 0
sample_rate = 500e6
if downsampled_rate is None:
downsampling_factor = int(max(1, round(t_measure / 10 / 2**power2decimation)))
points_per_cycle = 1
else:
downsampling_factor = int(max(1, round(100e6 / downsampled_rate / 2**power2decimation)))
t_downsampling = downsampling_factor * 10 * 2**power2decimation
points_per_cycle = max(1, round(t_measure/t_downsampling))
eff_t_measure = points_per_cycle * downsampling_factor * 10 * 2**power2decimation
values_per_point = (
2
if properties.acquisition_mode in [MODES.IQ_DEMODULATION, MODES.IQ_INPUT_SHIFTED_IQ_OUT]
else 1)
daq_points_per_cycle = n_cycles * points_per_cycle * values_per_point
daq_cycles = 1
config_input_channel = properties.input_channel if properties.input_channel != 0 else channel
config_channel(self.SD_AIN, channel, properties.acquisition_mode, downsampling_factor, points_per_cycle,
LO_f=properties.lo_frequency, phase=properties.lo_phase,
p2decim=power2decimation, input_ch=config_input_channel)
# add extra points for acquisition alignment and minimum number of points
daq_points_per_cycle = self._get_aligned_npoints(daq_points_per_cycle)
if (properties.daq_points_per_cycle != daq_points_per_cycle
or properties.daq_cycles != daq_cycles):
logging.debug(f'ch{channel} config: {daq_points_per_cycle}, {daq_cycles}')
check_error(self.SD_AIN.DAQconfig(channel, daq_points_per_cycle, daq_cycles,
DAQ_trigger_delay, DAQ_trigger_mode), 'DAQconfig')
if properties.prescaler != prescaler:
check_error(self.SD_AIN.channelPrescalerConfig(channel, prescaler), 'channelPrescalerConfig')
# variables needed to generate correct setpoints and for data acquisition
properties.cycles = n_cycles
properties.points_per_cycle = points_per_cycle
properties.t_measure = eff_t_measure
properties.sample_rate = sample_rate
properties.prescaler = prescaler
properties.downsampled_rate = downsampled_rate
properties.power2decimation = power2decimation
properties.downsampling_factor = downsampling_factor
properties.daq_points_per_cycle = daq_points_per_cycle
properties.daq_cycles = daq_cycles
self.measure._generate_parameter_info()
def set_ext_digital_trigger(self, channel, delay = 0, mode=3):
"""
Set external trigger for current channel.
Args:
mode: 1(trig high), 2 (trig low), 3 (raising edge), 4 (falling edge)
"""
logging.info('set ext trigger')
# Make sure input port is enabled
self.SD_AIN.triggerIOconfig(1)
# set up the triggering config
self.SD_AIN.DAQdigitalTriggerConfig(channel, 0 , mode)
# overwrite to be sure.
properties = self.channel_properties[f'ch{channel}']
points_per_cycle = properties.points_per_cycle
n_cycles = properties.cycles
# NOTE: add 1 point for odd sample numbers
check_error(self.SD_AIN.DAQconfig(channel, self._get_aligned_npoints(points_per_cycle), n_cycles, delay, 2),
'DAQconfig')
def daq_flush(self, daq, verbose=False):
"""
Flush the specified DAQ
Args:
daq (int) : the DAQ you are flushing
"""
self.SD_AIN.DAQflush(daq)
def daq_flush_multiple(self, daq_mask, verbose=False):
"""
Flush the specified DAQ
Args:
daq_mask (int) : the DAQs you are flushing
"""
self.SD_AIN.DAQflushMultiple(daq_mask)
def daq_stop(self, daq, verbose=False):
""" Stop acquiring data on the specified DAQ
Args:
daq (int) : the DAQ you are stopping
"""
self.SD_AIN.DAQstop(daq)
def daq_stop_multiple(self, daq_mask, verbose=False):
""" Stop acquiring data on the specified DAQ
Args:
daq_mask (int) : the input DAQs you are stopping, composed as a bitmask
where the LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
"""
self.SD_AIN.DAQstopMultiple(daq_mask)
def writeRegisterByNumber(self, regNumber, varValue):
"""
Write to a register of the AWG, by reffreing to the register number
Args:
regNumber (int) : number of the registry (0 to 16)
varValue (int/double) : value to be written into the registry
Returns:
Value (int) : error out (negative number)
"""
if is_sd1_3x:
raise Exception('writeRegisterByNumber is not supported by KeysightSD1 3.x')
return self.SD_AIN.writeRegisterByNumber(regNumber, varValue)
def daq_start_multiple(self, daq_mask, verbose=False):
""" Start acquiring data or waiting for a trigger on the specified DAQs
Args:
daq_mask (int) : the input DAQs you are enabling, composed as a bitmask
where the LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
"""
self.SD_AIN.DAQstartMultiple(daq_mask)
def daq_trigger_multiple(self, daq_mask, verbose=False):
""" Manually trigger the specified DAQs
Args:
daq_mask (int) : the DAQs you are triggering, composed as a bitmask
where the LSB is for DAQ_0, bit 1 is for DAQ_1 etc.
"""
self.SD_AIN.DAQtriggerMultiple(daq_mask)
###############################
# firmware specific functions # Only for FPGA image firmware 1.x
###############################
def set_MAV_filter(self, maf_length = 16, maf_modulo = 1, fourchannel = False):
"""
set the moving avererage filter
Args:
maf_length (int)
maf_modulo (int)
"""
if is_sd1_3x:
raise Exception('set_MAV_filter is for firmware 2.x')
# logging.info(f'MAV filter {maf_length}/{maf_modulo}')
self.SD_AIN.FPGAwritePCport(1, [maf_length], 0, 1, 0)
self.SD_AIN.FPGAwritePCport(1, [maf_modulo], 1, 1, 0)
if fourchannel:
self.SD_AIN.FPGAwritePCport(3, [maf_length], 0, 1, 0)
self.SD_AIN.FPGAwritePCport(3, [maf_modulo], 1, 1, 0)
# print('fourchannel MAV')
def set_meas_time(self, total_time, fourchannel = False):
"""
set time that there should be sampled.
Args:
total_time (ns)
"""
if is_sd1_3x:
raise Exception('set_meas_time is for firmware 2.x')
# logging.info(f'meas time')
for channel_property in self.channel_properties.values():
if channel_property.active == True:
channel_property.t_measure = int(total_time/10)*10
self.SD_AIN.FPGAwritePCport(0,[ int(total_time/10)], 36, 1, 0)
if fourchannel:
self.SD_AIN.FPGAwritePCport(2,[ int(total_time/10)], 36, 1, 0)
# print('fourchannel meastime' + str(int(total_time/10)))
###############################
# firmware specific functions # Only for FPGA image firmware 2.x
###############################
def set_input_channel(self, channel, input_channel):
'''
Selects the input channel to use for averaging/downsampling and IQ demodulation.
Args:
channel (int): channel to configure, i.e. the DAQ buffer.
input_channel (int): input channel to use, i.e. the physical input.
'''
if not is_iq_image_loaded(self.SD_AIN):
raise Exception('IQ demodulation FPGA image not loaded')
properties = self.channel_properties[f'ch{channel}']
properties.input_channel = input_channel if input_channel is not None else channel
if properties.acquisition_mode == MODES.NORMAL:
logging.warning('Input channel selection has no effect when normal mode is selected')
dig_set_input_channel(self.SD_AIN, channel, properties.input_channel)
def set_demodulated_in(self, channel, phase, output_IQ):
'''
Sets demoduled I/Q input with phase shifting.
'''
if channel not in [1, 3]:
raise Exception(f'demodulated IQ input must be configured on channel 1 (=1+2) or 3 (=3+4)')
properties = self.channel_properties[f'ch{channel}']
mode = MODES.IQ_INPUT_SHIFTED_IQ_OUT if output_IQ else MODES.IQ_INPUT_SHIFTED_I_OUT
properties.acquisition_mode = mode
properties.lo_phase = phase
properties.lo_frequency = 0
dig_set_lo(self.SD_AIN, channel, 0, phase)
self.measure._generate_parameter_info()
def set_lo(self, channel, frequency, phase, input_channel=None):
'''
Set the local oscillator for IQ demodulation.
Args:
channel (int): channel to configure
frequency (float): demodulation frequency in Hz
phase (float): phase shift in degrees
input_channel (int): input channel to use for IQ demodulation.
'''
if not is_iq_image_loaded(self.SD_AIN):
raise Exception('IQ demodulation FPGA image not loaded')
properties = self.channel_properties[f'ch{channel}']
properties.lo_phase = phase
properties.lo_frequency = frequency
properties.input_channel = input_channel if input_channel is not None else channel
dig_set_lo(self.SD_AIN, channel, frequency, phase)
dig_set_input_channel(self.SD_AIN, channel, properties.input_channel)
def set_measurement_time_averaging(self, channel, t_measure):
'''
Changes the measurement time for the channel for AVERAGING and IQ modes.
It cannot be used in NORMAL mode or when downsample rate has been set, because
the number of measurements per trigger must be 1.
Args:
channel (int): channel
t_measure (float): measurement time in ns.
'''
properties = self.channel_properties[f'ch{channel}']
if properties.acquisition_mode == 0:
logging.warning(f'set_measurement_time_averaging() cannot be used in normal mode')
return
if properties.downsampled_rate is not None:
# points_per_cycle cannot change without reconfiguring DAQ.
logging.warning(f'set_measurement_time_averaging() cannot be used when downsampling ')
return
power2decimation = properties.power2decimation
downsampling_factor = int(max(1, round(t_measure / 10 / 2**power2decimation)))
eff_t_measure = downsampling_factor * 10 * 2**power2decimation
if eff_t_measure != properties.t_measure:
properties.downsampling_factor = downsampling_factor
properties.points_per_cycle = 1
properties.t_measure = eff_t_measure
logging.debug(f'ch{channel} t_measure:{properties.t_measure}')
dig_set_downsampler(self.SD_AIN, channel, downsampling_factor,
properties.points_per_cycle, power2decimation)
###########################################################
# automatic set function for common experimental settings #
###########################################################
def set_digitizer_software(self, t_measure, cycles, sample_rate= 500e6, data_mode = DATA_MODE.FULL,
channels = [1,2], Vmax = 2.0, fourchannel = False,
downsampled_rate = None, power2decimation = 0):
"""
quick set of minumal settings to make it work.
Args:
t_measure (float) : time to measure in ns
cycles (int) : number of cycles
sample_rate (float) : sample rate you want to use (in #Samples/second). Will automatically choose the most approriate one.
data_mode (int) : data mode of the digizer (output format)
channels (list) : channels you want to measure
vmax (double) : maximum voltage of input (Vpeak)
downsampled_rate (float) : sample rate after downsampling in Sa/s, if None then downsampled_rate = 1/t_measure
power2decimation (int) : decimate data with 2**power2decimation
"""
logging.info(f'set digitizer software')
self.set_data_handling_mode(data_mode)
self.set_operating_mode(OPERATION_MODES.SOFT_TRG)
self.set_active_channels(channels)
for channel in channels:
self.set_channel_properties(channel, Vmax)
# print('sds input is: %.1f' % sample_rate)
self.set_daq_settings(channel, cycles, t_measure, sample_rate,
downsampled_rate=downsampled_rate, power2decimation=power2decimation)
if self.use_old_fpga_averaging:
# print('setting time and MAF')
self.set_meas_time(t_measure, fourchannel = fourchannel)
self.set_MAV_filter(16,1, fourchannel = fourchannel)
def set_digitizer_analog_trg(self, t_measure, cycles, sample_rate= 500e6, data_mode = DATA_MODE.FULL,
channels = [1,2], Vmax = 2.0, downsampled_rate = None, power2decimation = 0):
"""
quick set of minumal settings to make it work.
Args:
t_measure (float) : time to measure in ns
cycles (int) : number of cycles
channels (list) : channels you want to measure
sample_rate (float) : sample rate you want to use (in #Samples/second)
data_mode (int) : data mode of the digizer (output format)
channels (list) : channels you want to measure
vmax (float) : maximum voltage of input (Vpeak)
downsampled_rate (float) : sample rate after downsampling in Sa/s, if None then downsampled_rate = 1/t_measure
power2decimation (int) : decimate data with 2**power2decimation
"""
logging.info(f'set digitizer analog')
self.set_data_handling_mode(data_mode)
self.set_operating_mode(OPERATION_MODES.ANALOG_TRG)
self.set_active_channels(channels)
for channel in channels:
self.set_channel_properties(channel, Vmax)
self.set_daq_settings(channel, cycles, t_measure, sample_rate,
downsampled_rate=downsampled_rate, power2decimation=power2decimation)
self.set_ext_digital_trigger(channel)
if self.use_old_fpga_averaging:
# print('setting time and MAF')
self.set_meas_time(t_measure)
self.set_MAV_filter(16,1)
def set_digitizer_HVI(self, t_measure, cycles, sample_rate= 500e6, data_mode = DATA_MODE.FULL,
channels = [1,2], Vmax = 2.0, downsampled_rate = None, power2decimation = 0):
"""
quick set of minimal settings to make it work.
Args:
t_measure (float) : time to measure in ns
cycles (int) : number of cycles
sample_rate (float) : sample rate you want to use (in #Samples/second). Will automatically choose the most approriate one.
data_mode (int) : data mode of the digizer (output format)
channels (list) : channels you want to measure
vmax (double) : maximum voltage of input (Vpeak)
downsampled_rate (float) : sample rate after downsampling in Sa/s, if None then downsampled_rate = 1/t_measure
power2decimation (int) : decimate data with 2**power2decimation
"""
logging.info(f'set digitizer HVI: {t_measure}, {downsampled_rate}, {channels}')
self.set_data_handling_mode(data_mode)
self.set_operating_mode(OPERATION_MODES.HVI_TRG)
self.set_active_channels(channels)
for channel in channels:
self.set_channel_properties(channel, Vmax)
self.set_daq_settings(channel, cycles, t_measure, sample_rate,
downsampled_rate=downsampled_rate, power2decimation=power2decimation)
def _get_aligned_npoints(self, npt):
# add 1 point for odd sample numbers
# SD1 3.1 requires at least 30 points.
return max(30, (npt + 1)//2 * 2)
if __name__ == '__main__':
#%%
# load digitizer
# digitizer1.close()
digitizer1 = SD_DIG("digitizer1", chassis = 1, slot = 6)
# clear all ram (normally not needed, but just to sure)
digitizer1.daq_flush(1)
digitizer1.daq_flush(2)
digitizer1.daq_flush(3)
digitizer1.daq_flush(4)
# digitizer1.set_acquisition_mode(MODES.AVERAGE)
#%%
# simple example
digitizer1.set_digitizer_software(1e3, 10, sample_rate=500e6, data_mode=DATA_MODE.AVERAGE_TIME_AND_CYCLES, channels=[1,2], Vmax=0.25, fourchannel=False)
print(digitizer1.measure())
print(digitizer1.snapshot())
####################################
# settings (feel free to change) #
# ####################################
# t_list = np.logspace(2.3, 2.7, 20)
# res =[]
# for t in t_list:
# cycles = 1000
# t_measure = t #e3 # ns
# ####################################
# # show some multiparameter properties
# # print(digitizer1.measure.shapes)
# # print(digitizer1.measure.setpoint_units)
# # print(digitizer1.measure.setpoints)
# # # measure the parameter
# digitizer1.set_digitizer_software(t_measure, cycles, data_mode=DATA_MODE.FULL, channels = [1,2])
# digitizer1.set_MAV_filter()
# data = digitizer1.measure()
# # print(data)
# # plt.clf()
# # plt.plot(data[0][:,2], 'o-')
# # plt.plot(data[0][:,3], 'o-')
# # plt.plot(data[1], 'o-')
# # print(data[0].shape, data[1].shape)
# res.append(np.mean(data[1]))
# #t_list = t_list-166
# #res = np.array(res)/np.array(t_list)
# plt.figure(2)
# plt.clf()
# plt.plot(t_list, res, 'o-')
#def fit_func(x, m, q):
# return x*m+q
#
#import scipy
#param, var = scipy.optimize.curve_fit(fit_func, t_list, res)
#plt.plot(np.linspace(0, max(t_list), 50), fit_func(np.linspace(0, max(t_list), 50), *param))
#plt.xlabel('Integration time (ns)')
#plt.title('Intercept: %.2f' %param[1])
| 41.892797
| 156
| 0.612595
|
4a1881f0cf06dd1fa2a907e6c746846131d16d5c
| 9,128
|
py
|
Python
|
ros/src/tl_detector/tl_detector.py
|
Vincentfangao/CarND-Capstone
|
4de7fa0c2c58149eea1e3800b06b2c66526a2e11
|
[
"MIT"
] | null | null | null |
ros/src/tl_detector/tl_detector.py
|
Vincentfangao/CarND-Capstone
|
4de7fa0c2c58149eea1e3800b06b2c66526a2e11
|
[
"MIT"
] | 10
|
2020-01-28T22:51:36.000Z
|
2022-03-11T23:51:55.000Z
|
ros/src/tl_detector/tl_detector.py
|
Vincentfangao/CarND-Capstone
|
4de7fa0c2c58149eea1e3800b06b2c66526a2e11
|
[
"MIT"
] | 1
|
2019-07-15T03:33:36.000Z
|
2019-07-15T03:33:36.000Z
|
#!/usr/bin/env python
import rospy
from std_msgs.msg import Int32
from geometry_msgs.msg import PoseStamped, Pose
from styx_msgs.msg import TrafficLightArray, TrafficLight
from styx_msgs.msg import Lane
from sensor_msgs.msg import Image
from cv_bridge import CvBridge
from light_classification.tl_classifier import TLClassifier
from scipy.spatial import KDTree
import tf
import cv2
import yaml
import os
import calendar
import time
STATE_COUNT_THRESHOLD = 2
GENERATE_TRAIN_IMGS = False
class TLDetector(object):
def __init__(self):
rospy.init_node('tl_detector')
self.pose = None
self.waypoints = None
self.waypoints_2d = None
self.waypoint_tree = None
self.camera_image = None
self.lights = []
self.is_site = None
sub1 = rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
sub2 = rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
'''
/vehicle/traffic_lights provides you with the location of the traffic light in 3D map space and
helps you acquire an accurate ground truth data source for the traffic light
classifier by sending the current color state of all traffic lights in the
simulator. When testing on the vehicle, the color state will not be available. You'll need to
rely on the position of the light and the camera image to predict it.
'''
sub3 = rospy.Subscriber('/vehicle/traffic_lights', TrafficLightArray, self.traffic_cb)
# Consider changing subscription to 'image_raw' for processing instead
sub6 = rospy.Subscriber('/image_color', Image, self.image_cb)
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.is_site = self.config['is_site']
self.is_simulator = not self.is_site
self.upcoming_red_light_pub = rospy.Publisher('/traffic_waypoint', Int32, queue_size=1)
self.bridge = CvBridge()
self.light_classifier = TLClassifier()
self.listener = tf.TransformListener()
self.state = TrafficLight.UNKNOWN
self.last_state = TrafficLight.UNKNOWN
self.last_wp = -1
self.state_count = 0
rospy.spin()
def pose_cb(self, msg):
self.pose = msg
def waypoints_cb(self, waypoints):
self.waypoints = waypoints
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
self.lights = msg.lights
def create_training_data(self, state):
f_name = "sim_tl_{}_{}.jpg".format(calendar.timegm(time.gmtime()), self.light_label(state))
dir = './data/train/sim'
if not os.path.exists(dir):
os.makedirs(dir)
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image)
cv_image = cv_image[:, :, ::-1]
cv2.imwrite('{}/{}'.format(dir, f_name), cv_image)
def image_cb(self, msg):
"""Identifies red lights in the incoming camera image and publishes the index
of the waypoint closest to the red light's stop line to /traffic_waypoint
Args:
msg (Image): image from car-mounted camera
"""
rospy.loginfo("=== image_cb() =============================")
self.has_image = True
self.camera_image = msg
light_wp, state = self.process_traffic_lights()
'''
Publish upcoming red lights at camera frequency.
Each predicted state has to occur `STATE_COUNT_THRESHOLD` number
of times till we start using it. Otherwise the previous stable state is
used.
'''
if self.state != state:
self.state_count = 0
self.state = state
elif self.state_count >= STATE_COUNT_THRESHOLD:
self.last_state = self.state
if GENERATE_TRAIN_IMGS:
# Store images and state for training data for simulator
self.create_training_data(state)
light_wp = light_wp if state == TrafficLight.RED else -1
self.last_wp = light_wp
self.upcoming_red_light_pub.publish(Int32(light_wp))
rospy.loginfo("publish upcoming red light wp index: {}".format(light_wp))
else:
self.upcoming_red_light_pub.publish(Int32(self.last_wp))
rospy.loginfo("publish upcoming red light wp index: {}".format(self.last_wp))
self.state_count += 1
def get_closest_waypoint(self, x, y):
"""Identifies the closest path waypoint to the given position
https://en.wikipedia.org/wiki/Closest_pair_of_points_problem
Args:
x: x-position to match a waypoint to
y: y-position to match a waypoint to
Returns:
int: index of the closest waypoint in self.waypoints
"""
# Assumes waypoint_tree (KDTree) is already created in waypoint_cb()
closest_wp_indx = self.waypoint_tree.query([x, y], 1)[1]
#rospy.loginfo('Closest waypoint index to car position (%s, %s): (%s)',
# x, y, closest_wp_indx)
return closest_wp_indx
def get_light_state(self, light):
"""Determines the current color of the traffic light
Args:
light (TrafficLight): light to classify
Returns:
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
# THIS BLOCK IS FOR TESTING ONLY - using light state provided
# from subscription to /vehicle/traffic_lights; this traffic light
# state is available only in simulator and will NOT be available
# in real live test track
#rospy.loginfo('Light state: %s', light.state)
#return light.state
### UNCOMMENT THIS BLOCK FOR REAL LIGHT CLASSIFICATION ###
### WHEN CLASSIFIER IS AVAILABLE ###
if(not self.has_image):
self.prev_light_loc = None
return False
cv_image = self.bridge.imgmsg_to_cv2(self.camera_image, 'rgb8')
#cv_image = cv_image[:, :, ::-1] # switch layers B and R from BGR to RGB
#cv_image = cv2.cvtColor(cv_image, cv2.COLOR_BGR2RGB)
classified_state = self.light_classifier.get_classification(cv_image)
if self.is_simulator:
rospy.loginfo("Sim ground truth state: {}".format(
self.light_label(light.state)))
rospy.loginfo("Classified state: {}".format(
self.light_label(classified_state)))
return classified_state
def light_label(self, state):
if state == TrafficLight.RED:
return "RED"
elif state == TrafficLight.YELLOW:
return "YELLOW"
elif state == TrafficLight.GREEN:
return "GREEN"
return "UNKNOWN"
def process_traffic_lights(self):
"""Finds closest visible traffic light, if one exists, and determines its
location and color
Returns:
int: index of waypoint closes to the upcoming stop line for a traffic light (-1 if none exists)
int: ID of traffic light color (specified in styx_msgs/TrafficLight)
"""
closest_upcoming_light = None
stop_line_wp_indx = None
# List of positions that correspond to the line to stop in front of for a given intersection
stop_line_positions = self.config['stop_line_positions']
if(self.pose):
car_wp_indx = self.get_closest_waypoint(self.pose.pose.position.x,
self.pose.pose.position.y)
#rospy.loginfo("== process_traffic_lights() ==================")
#Find the closest visible traffic light (if one exists)
indx_dist = len(self.waypoints.waypoints)
for i, light in enumerate(self.lights):
# Get stop line waypoint index
stop_line_pose = stop_line_positions[i]
wp_indx = self.get_closest_waypoint(stop_line_pose[0],
stop_line_pose[1])
# Find the closest stop line waypoint index
d = wp_indx - car_wp_indx
#rospy.loginfo("light: {}, car_wp_indx: {}, wp_indx: {}, d: {}".format(
# i, car_wp_indx, wp_indx, d))
if d >= 0 and d < indx_dist:
indx_dist = d
closest_upcoming_light = light
stop_line_wp_indx = wp_indx
if closest_upcoming_light:
state = self.get_light_state(closest_upcoming_light)
rospy.loginfo(">> closest stop_line_wp_indx: {}, state: {}".format(
stop_line_wp_indx, self.light_label(state)))
return stop_line_wp_indx, state
return -1, TrafficLight.UNKNOWN
if __name__ == '__main__':
try:
TLDetector()
except rospy.ROSInterruptException:
rospy.logerr('Could not start traffic node.')
| 36.955466
| 132
| 0.629163
|
4a18828075d52409f7a8a4268c4599d7ec4b0e59
| 357
|
py
|
Python
|
tests/test_client_regress/models.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 16
|
2019-08-10T12:24:06.000Z
|
2020-05-21T09:11:14.000Z
|
tests/test_client_regress/models.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 12
|
2019-08-10T11:55:29.000Z
|
2020-05-21T04:46:30.000Z
|
tests/test_client_regress/models.py
|
jpmallarino/django
|
659d2421c7adbbcd205604002d521d82d6b0b465
|
[
"BSD-3-Clause",
"0BSD"
] | 3
|
2019-08-20T13:29:34.000Z
|
2020-01-30T22:05:10.000Z
|
from django.contrib.auth.models import AbstractBaseUser, BaseUserManager
from django.db import models
class CustomUser(AbstractBaseUser):
email = models.EmailField(verbose_name="email address", max_length=255, unique=True)
custom_objects = BaseUserManager()
USERNAME_FIELD = "email"
class Meta:
app_label = "test_client_regress"
| 27.461538
| 88
| 0.761905
|
4a1883291c1f7625de8f92ca51b8443426d26a9a
| 6,195
|
py
|
Python
|
shade/tests/functional/test_volume.py
|
noironetworks/shade
|
e46878bae44e7daebf32c0aeaeffea0011542525
|
[
"Apache-2.0"
] | null | null | null |
shade/tests/functional/test_volume.py
|
noironetworks/shade
|
e46878bae44e7daebf32c0aeaeffea0011542525
|
[
"Apache-2.0"
] | null | null | null |
shade/tests/functional/test_volume.py
|
noironetworks/shade
|
e46878bae44e7daebf32c0aeaeffea0011542525
|
[
"Apache-2.0"
] | null | null | null |
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
test_volume
----------------------------------
Functional tests for `shade` block storage methods.
"""
from fixtures import TimeoutException
from openstack import utils
from testtools import content
from shade import exc
from shade.tests.functional import base
class TestVolume(base.BaseFunctionalTestCase):
# Creating and deleting volumes is slow
TIMEOUT_SCALING_FACTOR = 1.5
def setUp(self):
super(TestVolume, self).setUp()
self.skipTest('Volume functional tests temporarily disabled')
if not self.user_cloud.has_service('volume'):
self.skipTest('volume service not supported by cloud')
def test_volumes(self):
'''Test volume and snapshot functionality'''
volume_name = self.getUniqueString()
snapshot_name = self.getUniqueString()
self.addDetail('volume', content.text_content(volume_name))
self.addCleanup(self.cleanup, volume_name, snapshot_name=snapshot_name)
volume = self.user_cloud.create_volume(
display_name=volume_name, size=1)
snapshot = self.user_cloud.create_volume_snapshot(
volume['id'],
display_name=snapshot_name
)
ret_volume = self.user_cloud.get_volume_by_id(volume['id'])
self.assertEqual(volume['id'], ret_volume['id'])
volume_ids = [v['id'] for v in self.user_cloud.list_volumes()]
self.assertIn(volume['id'], volume_ids)
snapshot_list = self.user_cloud.list_volume_snapshots()
snapshot_ids = [s['id'] for s in snapshot_list]
self.assertIn(snapshot['id'], snapshot_ids)
ret_snapshot = self.user_cloud.get_volume_snapshot_by_id(
snapshot['id'])
self.assertEqual(snapshot['id'], ret_snapshot['id'])
self.user_cloud.delete_volume_snapshot(snapshot_name, wait=True)
self.user_cloud.delete_volume(volume_name, wait=True)
def test_volume_to_image(self):
'''Test volume export to image functionality'''
volume_name = self.getUniqueString()
image_name = self.getUniqueString()
self.addDetail('volume', content.text_content(volume_name))
self.addCleanup(self.cleanup, volume_name, image_name=image_name)
volume = self.user_cloud.create_volume(
display_name=volume_name, size=1)
image = self.user_cloud.create_image(
image_name, volume=volume, wait=True)
volume_ids = [v['id'] for v in self.user_cloud.list_volumes()]
self.assertIn(volume['id'], volume_ids)
image_list = self.user_cloud.list_images()
image_ids = [s['id'] for s in image_list]
self.assertIn(image['id'], image_ids)
self.user_cloud.delete_image(image_name, wait=True)
self.user_cloud.delete_volume(volume_name, wait=True)
def cleanup(self, volume, snapshot_name=None, image_name=None):
# Need to delete snapshots before volumes
if snapshot_name:
snapshot = self.user_cloud.get_volume_snapshot(snapshot_name)
if snapshot:
self.user_cloud.delete_volume_snapshot(
snapshot_name, wait=True)
if image_name:
image = self.user_cloud.get_image(image_name)
if image:
self.user_cloud.delete_image(image_name, wait=True)
if not isinstance(volume, list):
self.user_cloud.delete_volume(volume, wait=True)
else:
# We have more than one volume to clean up - submit all of the
# deletes without wait, then poll until none of them are found
# in the volume list anymore
for v in volume:
self.user_cloud.delete_volume(v, wait=False)
try:
for count in utils.iterate_timeout(
180, "Timeout waiting for volume cleanup"):
found = False
for existing in self.user_cloud.list_volumes():
for v in volume:
if v['id'] == existing['id']:
found = True
break
if found:
break
if not found:
break
except (exc.OpenStackCloudTimeout, TimeoutException):
# NOTE(slaweq): ups, some volumes are still not removed
# so we should try to force delete it once again and move
# forward
for existing in self.user_cloud.list_volumes():
for v in volume:
if v['id'] == existing['id']:
self.operator_cloud.delete_volume(
v, wait=False, force=True)
def test_list_volumes_pagination(self):
'''Test pagination for list volumes functionality'''
volumes = []
# the number of created volumes needs to be higher than
# CONF.osapi_max_limit but not higher than volume quotas for
# the test user in the tenant(default quotas is set to 10)
num_volumes = 8
for i in range(num_volumes):
name = self.getUniqueString()
v = self.user_cloud.create_volume(display_name=name, size=1)
volumes.append(v)
self.addCleanup(self.cleanup, volumes)
result = []
for i in self.user_cloud.list_volumes():
if i['name'] and i['name'].startswith(self.id()):
result.append(i['id'])
self.assertEqual(
sorted([i['id'] for i in volumes]),
sorted(result))
| 40.756579
| 79
| 0.616303
|
4a1883ef6220bfcd211bdc4224b2da29f2cf1d99
| 1,116
|
py
|
Python
|
main/Model.py
|
RaminMammadzada/wc-product-list-generator
|
0ed415871fd6e4fd02e763ab0eec9a4094e14842
|
[
"MIT"
] | 15
|
2020-06-30T05:02:05.000Z
|
2021-08-10T08:15:17.000Z
|
main/Model.py
|
RaminMammadzada/wc-product-list-generator
|
0ed415871fd6e4fd02e763ab0eec9a4094e14842
|
[
"MIT"
] | null | null | null |
main/Model.py
|
RaminMammadzada/wc-product-list-generator
|
0ed415871fd6e4fd02e763ab0eec9a4094e14842
|
[
"MIT"
] | null | null | null |
class Model(object):
def __init__(self, modelID, designedYear="2020"):
self.factory = ""
self.designedYear = designedYear
self.modelID = modelID
self.price = ""
self.season = ""
self.style = ""
self.rubberType = ""
def setModelID(self, modelID):
self.modelID = modelID
def getModelID(self ):
return self.modelID
def setDesignedYear(self, year):
self.year = year
def getDesignedYear(self):
return self.year
def setPrice(self, price):
self.price = price
def getPrice(self):
return self.price
def setFactory(self, factory):
self.factory = factory
def getFactory(self):
return self.factory
def setSeason(self, season):
self.season = season
def getSeason(self):
return self.season
def setStyle(self, style):
self.style = style
def getStyle(self):
return self.style
def setRubberType(self, rubberType):
self.rubberType = rubberType
def getRubberType(self):
return self.rubberType
| 21.461538
| 53
| 0.601254
|
4a1885f2c594fafa5f9634c9a5678724c9a0e163
| 20,319
|
py
|
Python
|
satchmo/apps/tax/modules/us_sst/models.py
|
predatell/satchmo
|
6ced1f845aadec240c7e433c3cbf4caca96e0d92
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/tax/modules/us_sst/models.py
|
predatell/satchmo
|
6ced1f845aadec240c7e433c3cbf4caca96e0d92
|
[
"BSD-3-Clause"
] | null | null | null |
satchmo/apps/tax/modules/us_sst/models.py
|
predatell/satchmo
|
6ced1f845aadec240c7e433c3cbf4caca96e0d92
|
[
"BSD-3-Clause"
] | null | null | null |
# coding=UTF-8
from django.db import models
from django.utils.translation import ugettext, ugettext_lazy as _
from django.utils.encoding import python_2_unicode_compatible
from product.models import TaxClass
from l10n.models import AdminArea, Country
#from satchmo_store.shop.models import Order
#from satchmo_store.shop.signals import order_success
#from tax import Processor
from datetime import date as _date
try:
from decimal import Decimal
except:
from django.utils._decimal import Decimal
@python_2_unicode_compatible
class Taxable(models.Model):
"""
Map that says what items are taxable in a jurisdiction.
To use properly, assign products to a meaningful TaxClass, such as 'Shipping',
'Food', 'Default'. Then create rules for the jurisdictions where you are
required to collect tax. If for example, you are taxing objects in two states
and 'Food' is taxable in one and not the other, but shipping is the other
way around, you would need to create the following entries:
food = TaxClass(...)
default = TaxClass(...)
shipping = TaxClass(...)
one_state = AdminArea(...)
two_state = AdminArea(...)
usa = Country(...)
Taxable(taxClass=default, isTaxable=True, taxZone=one_state, taxCountry=usa)
Taxable(taxClass=food, isTaxable=False, useFood=True, taxZone=one_state, taxCountry=usa)
Taxable(taxClass=shipping, isTaxable=True, taxZone=one_state, taxCountry=usa)
Taxable(taxClass=default, isTaxable=True, taxZone=two_state, taxCountry=usa)
Taxable(taxClass=food, isTaxable=True, useFood=True, taxZone=two_state, taxCountry=usa)
Taxable(taxClass=shipping, isTaxable=False, taxZone=two_state, taxCountry=usa)
Laws vary drastically form state to state, so please make sure to make needed
TaxClasses for all objects that vary in taxing jurisdictions to which you
must submit.
If you do not at least create a 'Default' entry for a state, then you will
not be collecting any taxes for that state. Only create entires for states
where you are obligated to collect and report taxes.
SST defines food rates and interstate vs. intrastate rates. You may override
these, otherwise taxes will be charged at the non-food, intrastate rate by default.
WARNING: If a product is taxable in ANY jurisdiction, it must be set taxable
in the product. You disable it per-jurisdiction by disabling it here. You
cannot enable it here if it is disabled on the product itself.
"""
taxClass = models.ForeignKey(TaxClass, verbose_name=_('Tax Class'), on_delete=models.CASCADE)
taxZone = models.ForeignKey(AdminArea, blank=True, null=True,
verbose_name=_('Tax Zone'), on_delete=models.SET_NULL)
taxCountry = models.ForeignKey(Country, blank=True, null=True,
verbose_name=_('Tax Country'), on_delete=models.SET_NULL)
isTaxable = models.BooleanField(verbose_name=_('Taxable?'), default=True, )
useIntrastate = models.BooleanField(verbose_name=_('Use Intrastate rate instead of Interstate?'),
default=True)
useFood = models.BooleanField(verbose_name=_('Use food/drug rate instead of general?'),
default=False)
def _country(self):
if self.taxZone:
return self.taxZone.country.name
else:
return self.taxCountry.name
country = property(_country)
#def _display_percentage(self):
# return "%#2.2f%%" % (100*self.percentage)
#_display_percentage.short_description = _('Percentage')
#display_percentage = property(_display_percentage)
def __str__(self):
return "%s - %s = %s" % (self.taxClass,
self.taxZone and self.taxZone or self.taxCountry,
self.isTaxable)
class Meta:
verbose_name = _("Taxable Class")
verbose_name_plural = _("Taxable Classes")
JURISDICTION_CHOICES = (
(0, 'County'),
(1, 'City'),
(2, 'Town'),
(3, 'Village'),
(4, 'Borough'),
(5, 'Township'),
(9, 'Other Municipality'),
(10, 'School District'),
(11, 'Junior Colleges'),
(19, 'Other Schools'),
(20, 'Water Control'),
(21, 'Utility District'),
(22, 'Sanitation'),
(23, 'Water or Sewer District'),
(24, 'Reclamation District'),
(25, 'Fire or Police'),
(26, 'Roads or Bridges'),
(27, 'Hospitals'),
(29, 'Other Municipal Services'),
(40, 'Township and County'),
(41, 'City and School'),
(42, 'County collected by Other Taxing Authority'),
(43, 'State and County'),
(44, 'Central Collection Taxing Authority'),
(45, 'State Taxing Authority'),
(49, 'Other Combination Collection'),
(50, 'Bond Authority'),
(51, 'Annual County Bond Authority'),
(52, 'Semi-annual County Bond Authority'),
(53, 'Annual City Bond Authority'),
(54, 'Semi-annual City Bond Authority'),
(59, 'Other Bond Authority'),
(61, 'Assessment District'),
(62, 'Homeowner’s Association'),
(63, 'Special District'),
(69, 'Other Special Districts'),
(70, 'Central Appraisal Taxing Authority'),
(71, 'Unsecured County Taxes'),
(72, 'Mobile Home Authority'),
(79, 'Other Special Applications'),
)
@python_2_unicode_compatible
class TaxRate(models.Model):
"""
Records for tax rates in the default SST format as defined at:
http://www.streamlinedsalestax.org/Technology/RatesandBoundariesClean082605.pdf
"""
state = models.IntegerField(verbose_name=_('FIPS State Code'), db_index=True)
jurisdictionType = models.IntegerField(choices=JURISDICTION_CHOICES, verbose_name=_('Type'))
jurisdictionFipsCode = models.CharField(max_length=5,
verbose_name=_('FIPS Code'), db_index=True)
generalRateIntrastate = models.DecimalField(max_digits=8, decimal_places=7,
verbose_name=_('General Tax Rate - Intrastate'))
generalRateInterstate = models.DecimalField(max_digits=8, decimal_places=7,
verbose_name=_('General Tax Rate - Interstate'))
foodRateIntrastate = models.DecimalField(max_digits=8, decimal_places=7,
verbose_name=_('Food/Drug Tax Rate - Intrastate'))
foodRateInterstate = models.DecimalField(max_digits=8, decimal_places=7,
verbose_name=_('Food/Drug Tax Rate - Interstate'))
startDate = models.DateField(verbose_name=_('Effective Start Date'))
endDate = models.DateField(verbose_name=_('Effective End Date'))
class Meta:
verbose_name = _("Tax Rate")
verbose_name_plural = _("Tax Rates")
def __str__(self):
return 'State %d: Jurisdiction: %s(%s)' % (
self.state,
self.jurisdictionFipsCode,
self.get_jurisdictionType_display(),
)
def rate(self, intrastate=False, food=False):
if intrastate:
if food:
return self.foodRateIntrastate
else:
return self.generalRateIntrastate
else:
if food:
return self.foodRateInterstate
else:
return self.generalRateInterstate
TAX_BOUNDRY_CHOICES = (
('Z', 'Zip-5 Record'),
('4', 'Zip+4 Record'),
('A', 'Address Record'),
)
ODD_EVEN_CHOICES = (
('O', 'Odd'),
('E', 'Even'),
('B', 'Both'),
)
@python_2_unicode_compatible
class TaxBoundry(models.Model):
"""
Records for tax boundries in the default SST format as defined at:
http://www.streamlinedsalestax.org/Technology/RatesandBoundariesClean082605.pdf
"""
recordType = models.CharField(max_length=1, choices=TAX_BOUNDRY_CHOICES,
verbose_name=_('Boundry Type'))
startDate = models.DateField(verbose_name=_('Effective Start Date'))
endDate = models.DateField(verbose_name=_('Effective End Date'))
lowAddress = models.IntegerField(blank=True, null=True,
verbose_name=_('Low Address Range'))
highAddress = models.IntegerField(blank=True, null=True,
verbose_name=_('High Address Range'))
oddEven = models.CharField(max_length=1, blank=True, null=True, choices=ODD_EVEN_CHOICES,
verbose_name=_('Odd / Even Range Indicator'))
streetPreDirection = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('State Pre-Directional Abbr.'))
streetName = models.CharField(max_length=20, blank=True, null=True,
verbose_name=_('Street Name'))
streetSuffix = models.CharField(max_length=4, blank=True, null=True,
verbose_name=_('Street Suffix Abbr.'))
streetPostDirection = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('Street Post Directional'))
addressSecondaryAbbr = models.CharField(max_length=4, blank=True, null=True,
verbose_name=_('Address Secondary - Abbr.'))
addressSecondaryLow = models.IntegerField(blank=True, null=True,
verbose_name=_('Address Secondary - Low'))
addressSecondaryHigh = models.IntegerField(blank=True, null=True,
verbose_name=_('Address Secondary - High'))
addressSecondaryOddEven = models.CharField(max_length=1, blank=True, null=True,
choices=ODD_EVEN_CHOICES, verbose_name=_('Address Secondary - Odd/Even'))
cityName = models.CharField(max_length=28, blank=True, null=True,
verbose_name=_('City Name'))
zipCode = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code'))
plus4 = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code - Plus 4'))
zipCodeLow = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code - Low'), db_index=True)
zipExtensionLow = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code Extension - Low'), db_index=True)
zipCodeHigh = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code - High'), db_index=True)
zipExtensionHigh = models.IntegerField(blank=True, null=True,
verbose_name=_('Zip Code Extension - High'), db_index=True)
serCode = models.CharField(max_length=5, verbose_name=_('Composite SER Code'), blank=True, null=True)
fipsStateCode = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('FIPS State Code'))
fipsStateIndicator = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('FIPS State Indicator'))
fipsCountyCode = models.CharField(max_length=3, blank=True, null=True,
verbose_name=_('FIPS County Code'))
fipsPlaceCode = models.CharField(max_length=5, blank=True, null=True,
verbose_name=_('FIPS Place Code'))
fipsPlaceType = models.CharField(max_length=2, blank=True, null=True,
verbose_name=_('FIPS Place Type'), choices=JURISDICTION_CHOICES)
special_1_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 1 code'), blank=True, null=True)
special_1_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 1 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_2_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 2 code'), blank=True, null=True)
special_2_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 2 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_3_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 3 code'), blank=True, null=True)
special_3_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 3 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_4_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 4 code'), blank=True, null=True)
special_4_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 4 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_5_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 5 code'), blank=True, null=True)
special_5_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 5 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_6_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 6 code'), blank=True, null=True)
special_6_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 6 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_7_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 7 code'), blank=True, null=True)
special_7_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 7 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_8_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 8 code'), blank=True, null=True)
special_8_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 8 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_9_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 9 code'), blank=True, null=True)
special_9_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 9 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_10_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 10 code'), blank=True, null=True)
special_10_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 10 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_11_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 11 code'), blank=True, null=True)
special_11_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 11 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_12_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 12 code'), blank=True, null=True)
special_12_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 12 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_13_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 13 code'), blank=True, null=True)
special_13_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 13 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_14_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 14 code'), blank=True, null=True)
special_14_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 14 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_15_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 15 code'), blank=True, null=True)
special_15_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 15 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_16_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 16 code'), blank=True, null=True)
special_16_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 16 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_17_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 17 code'), blank=True, null=True)
special_17_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 17 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_18_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 18 code'), blank=True, null=True)
special_18_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 18 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_19_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 19 code'), blank=True, null=True)
special_19_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 19 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
special_20_code = models.CharField(max_length=5, verbose_name=_('FIPS Special 20 code'), blank=True, null=True)
special_20_type = models.CharField(max_length=2, verbose_name=_('FIPS Special 20 type'), blank=True, null=True, choices=JURISDICTION_CHOICES)
# Fill in this property to use not-today for looking up the tax rates.
date = None
# Set these and we'll use non-default rate.
useIntrastate = None
useFood = None
def get_zip_range(self):
if self.zipExtensionLow:
return '%05d-%04d -> %05d-%04d' % (
self.zipCodeLow, self.zipExtensionLow, self.zipCodeHigh, self.zipExtensionHigh
)
else:
return '%05d -> %05d' % (self.zipCodeLow, self.zipCodeHigh)
zip_range = property(get_zip_range)
def rates(self, date=None):
l = list()
state = self.fipsStateCode
if not date:
date = _date.today()
# Lookup all the applicable codes.
for fips in (
self.fipsStateIndicator, self.fipsCountyCode, self.fipsPlaceCode,
self.special_1_code, self.special_2_code, self.special_3_code,
self.special_4_code, self.special_5_code, self.special_6_code,
self.special_7_code, self.special_8_code, self.special_9_code,
self.special_10_code, self.special_11_code, self.special_12_code,
self.special_13_code, self.special_14_code, self.special_15_code,
self.special_16_code, self.special_17_code, self.special_18_code,
self.special_19_code, self.special_20_code
):
if not fips:
continue
rate = TaxRate.objects.get(
state=state,
jurisdictionFipsCode=fips,
startDate__lte=date,
endDate__gte=date,
)
l.append( rate )
return l
def get_percentage(self, date=None):
"""
Emulate being a tax rate by returning a total percentage to tax the customer.
"""
pct = Decimal('0.00')
for x in self.rates(date):
pct += x.rate(intrastate=self.useIntrastate, food=self.useFood)
return pct
percentage=property(get_percentage)
def __str__(self):
if self.recordType == 'Z':
return 'TaxBoundry(Z): %i -- %i' % (
self.zipCodeLow, self.zipCodeHigh
)
elif self.recordType == '4':
return 'TaxBoundry(4): %i-%i -- %i-%i' % (
self.zipCodeLow, self.zipExtensionLow,
self.zipCodeHigh, self.zipExtensionHigh,
)
else:
return 'TaxBoundry(A)'
@classmethod
def lookup(cls, zip, ext=None, date=None):
"""Handy function to take a zip code and return the appropriate rates
for it."""
if not date:
date = _date.today()
# Try for a ZIP+4 lookup first if we can.
if ext:
try:
return cls.objects.get(
recordType='4',
zipCodeLow__lte=zip,
zipCodeHigh__gte=zip,
zipExtensionLow__lte=ext,
zipExtensionHigh__gte=ext,
startDate__lte=date,
endDate__gte=date,
)
except cls.DoesNotExist:
# Not all zip+4 have entires. That's OK.
pass
# Try for just the ZIP then.
try:
return cls.objects.get(
recordType='Z',
zipCodeLow__lte=zip,
zipCodeHigh__gte=zip,
startDate__lte=date,
endDate__gte=date,
)
except cls.DoesNotExist:
return None
class Meta:
verbose_name = _("Tax Boundry")
verbose_name_plural = _("Tax Boundries")
#class TaxCollected(models.Model):
# order = models.ForeignKey(Order, verbose_name=_("Order"))
# taxRate = models.ForeignKey(TaxRate, verbose_name=_('Tax Rate'))
# useIntrastate = models.BooleanField(verbose_name=_('Use Intrastate rate instead of Interstate?'),
# default=True)
# useFood = models.BooleanField(verbose_name=_('Use food/drug rate instead of general?'),
# default=False)
#
#def save_taxes_collected(order, **kwargs):
# processor = Processor(order=order)
# tb = processor.get_boundry()
#
#order_success.connect(save_taxes_colletecd)
from . import config
| 48.726619
| 145
| 0.679216
|
4a18860c2a5bca602fedb931f45e765f1a1ad93d
| 15,399
|
py
|
Python
|
torchlib/datasets/datasets.py
|
daaiwusheng/ferattention
|
d0497bfe52db4ed9444482558e031986e5175a4e
|
[
"MIT"
] | 79
|
2019-03-25T19:37:17.000Z
|
2022-03-24T03:54:26.000Z
|
torchlib/datasets/datasets.py
|
daaiwusheng/ferattention
|
d0497bfe52db4ed9444482558e031986e5175a4e
|
[
"MIT"
] | 9
|
2019-05-09T15:48:53.000Z
|
2021-10-11T06:58:23.000Z
|
torchlib/datasets/datasets.py
|
daaiwusheng/ferattention
|
d0497bfe52db4ed9444482558e031986e5175a4e
|
[
"MIT"
] | 25
|
2019-04-04T09:30:03.000Z
|
2021-08-02T15:45:38.000Z
|
import os
import numpy as np
import random
from collections import namedtuple
import torch
from pytvision.datasets import utility
from pytvision.datasets.imageutl import imageProvide
from pytvision.transforms.aumentation import ObjectImageAndLabelTransform, ObjectImageTransform
import warnings
warnings.filterwarnings("ignore")
class Dataset( object ):
"""
Generic dataset
"""
def __init__(self,
data,
num_channels=1,
count=None,
transform=None
):
"""
Initialization
Args:
@data: dataprovide class
@num_channels:
@tranform: tranform
"""
if count is None: count = len(data)
self.count = count
self.data = data
self.num_channels = num_channels
self.transform = transform
self.labels = data.labels
self.classes = np.unique(self.labels)
self.numclass = len(self.classes)
def __len__(self):
return self.count
def __getitem__(self, idx):
idx = idx % len(self.data)
image, label = self.data[idx]
image = np.array(image)
image = utility.to_channels(image, self.num_channels)
label = utility.to_one_hot(label, self.numclass)
obj = ObjectImageAndLabelTransform( image, label )
if self.transform:
obj = self.transform( obj )
return obj.to_dict()
class ResampleDataset( object ):
"""
Resample data for generic dataset
"""
def __init__(self,
data,
num_channels=1,
count=200,
transform=None
):
"""
Initialization
data: dataloader class
tranform: tranform
"""
self.num_channels=num_channels
self.data = data
self.transform = transform
self.labels = data.labels
self.count=count
#self.classes = np.unique(self.labels)
self.classes, self.frecs = np.unique(self.labels, return_counts=True)
self.numclass = len(self.classes)
#self.weights = 1-(self.frecs/np.sum(self.frecs))
self.weights = np.ones( (self.numclass,1) )
self.reset(self.weights)
self.labels_index = list()
for cl in range( self.numclass ):
indx = np.where(self.labels==cl)[0]
self.labels_index.append(indx)
def reset(self, weights):
self.dist_of_classes = np.array(random.choices(self.classes, weights=weights, k=self.count ))
def __len__(self):
return self.count
def __getitem__(self, idx):
idx = self.dist_of_classes[idx]
class_index = self.labels_index[idx]
n = len(class_index)
idx = class_index[ random.randint(0,n-1) ]
image, label = self.data[idx]
image = np.array(image)
image = utility.to_channels(image, self.num_channels)
label = utility.to_one_hot(label, self.numclass)
obj = ObjectImageAndLabelTransform( image, label )
if self.transform:
obj = self.transform( obj )
return obj.to_dict()
class SecuencialSamplesDataset( object ):
"""
Generic dataset for extratificate secuencial samples
"""
def __init__(self,
data,
count=None,
num_channels=1,
transform=None
):
"""
Initialization
"""
if count is None: count = len(data)
self.num_channels=num_channels
self.data = data
self.num = count
# make index
self.labels = data.labels
self.classes = np.unique(self.labels)
self.numclass = len(self.classes)
self.labels_index = list()
for cl in range( self.numclass ):
indx = np.where(self.labels==cl)[0]
self.labels_index.append(indx)
self.transform = transform
def __len__(self):
return self.num
def __getitem__(self, idx):
idx = idx % self.numclass
class_index = self.labels_index[idx]
n = len(class_index)
idx = class_index[ random.randint(0,n-1) ]
image, label = self.data[idx]
image = np.array(image)
image = utility.to_channels(image, self.num_channels)
label = utility.to_one_hot(label, self.numclass)
obj = ObjectImageAndLabelTransform( image, label )
if self.transform:
obj = self.transform( obj )
return obj.to_dict()
class SecuencialExSamplesDataset( object ):
"""
Generic dataset for extratificate secuencial ext samples
"""
def __init__(self,
data,
count=200,
n_set=3,
batch_size=10,
num_channels=1,
transform=None
):
"""
Initialization
"""
self.num_channels=num_channels
self.data = data
self.num = count
self.n_set = n_set
self.batch_size = batch_size
# make index
self.labels = data.labels
self.classes = np.unique(self.labels)
self.numclass = len(self.classes)
self.transform = transform
self.labels_index = list()
for cl in range( self.numclass ):
indx = np.where(self.labels==cl)[0]
self.labels_index.append(indx)
self.make_manifold_list()
def __len__(self):
return int( self.num*self.n_set*self.batch_size )
def __getitem__(self, idx):
idx = self.index[ idx ]
image, label = self.data[idx]
image = np.array(image)
image = utility.to_channels(image, self.num_channels)
label = utility.to_one_hot(label, self.numclass)
obj = ObjectImageAndLabelTransform( image, label )
if self.transform:
obj = self.transform( obj )
return obj.to_dict()
def _reset_classes(self):
self.class_select = np.random.choice( self.classes, self.n_set, replace=False )
def make_manifold_list(self):
self._reset_classes()
n = self.num * self.n_set * self.batch_size# iterations
index = np.zeros((n), dtype=int)
for i in range( n ):
if i % self.batch_size * self.n_set == 0:
self._reset_classes()
k = i % self.n_set
class_index = self.labels_index[ self.class_select[ k ] ]
idx = class_index[ random.randint(0, len(class_index)-1) ]
index[i] = idx
self.index = index
class TripletsDataset( object ):
"""
TripletsDataset
"""
def __init__(self,
data,
n_triplets=100,
num_channels=1,
transform=None):
"""
"""
self.data = data
self.num_channels=num_channels
# make triplets
self.labels = data.labels
self.classes, self.frecs = np.unique(self.labels, return_counts=True)
#self.weights = np.array([ 1-(self.frecs[i]/np.sum(self.frecs)) for i in self.labels ])
self.numclass = len( self.classes )
self.num_triplets = n_triplets
self.make_triplet_list(n_triplets)
self.transform = transform
def reset(self):
print('Reset dataloader ...')
self.make_triplet_list(self.num_triplets)
def __len__(self):
return len(self.triplets)
def __getitem__(self, idx):
idx1, idx2, idx3 = self.triplets[idx]
img1, lab1 = self.data[idx1];
img2, lab2 = self.data[idx2];
img3, lab3 = self.data[idx3];
img1 = np.array(img1)
img2 = np.array(img2)
img3 = np.array(img3)
img1 = utility.to_channels(img1, self.num_channels)
img2 = utility.to_channels(img2, self.num_channels)
img3 = utility.to_channels(img3, self.num_channels)
lab1 = utility.to_one_hot(lab1, self.numclass)
lab2 = utility.to_one_hot(lab2, self.numclass)
lab3 = utility.to_one_hot(lab3, self.numclass)
a = ObjectImageAndLabelTransform( img1, lab1 )
b = ObjectImageAndLabelTransform( img2, lab2 )
c = ObjectImageAndLabelTransform( img3, lab3 )
if self.transform is not None:
a = self.transform( a )
b = self.transform( b )
c = self.transform( c )
return {'a':a.to_dict(), 'b':b.to_dict(), 'c':c.to_dict()}
def make_triplet_list(self, ntriplets):
self.triplets = []
nc = self.numclass
#choice = lambda seq: np.array([ random.choice(seq) for _ in range( int(ntriplets/nc) ) ])
for cx in range(nc):
class_idx = cx
# a, b, c are index of labels where it's equal to class_idx
a = np.array(random.choices( np.where(self.labels==class_idx)[0], k=int(ntriplets/nc) ))
b = np.array(random.choices( np.where(self.labels==class_idx)[0], k=int(ntriplets/nc) ))
#while np.any((a-b)==0): #aligning check
while np.sum((a-b) == 0 )/b.shape[0] > 0.1: #aligning check
random.shuffle(b)
#index = np.where(self.labels!=class_idx)[0]
#w = self.weights[index]
#c = np.array(random.choices(index, weights=w, k=int(ntriplets/nc) ))
c = np.array(random.choices(np.where(self.labels!=class_idx)[0], k=int(ntriplets/nc) ))
self.triplets += zip(a,b,c)
random.shuffle(self.triplets)
self.num_triplets = (ntriplets/nc)*nc
def regenerate_triplet_list(self, sampler, frac_hard):
# negatives is a tuple of anchors and negative examples
num_random_triplets = self.num_triplets*(1.0-frac_hard)
# adjust number of random triplets so that it is a multiple of num_classes
num_random_triplets = int(math.ceil(num_random_triplets)/self.num_classes)*self.num_classes
num_hard = self.num_triplets - num_random_triplets
print("Number of hard triplets %d ..." % num_hard)
self.make_triplet_list(num_random_triplets)
neg_hard_examples = sampler.ChooseNegatives(num_hard)
# choose random positives (for now atleast) for hard negatives
for pair in neg_hard_examples:
a, c = pair
anchor_class = self.labels[a]
b = np.random.choice(np.where(self.labels == anchor_class)[0])
self.triplets.append((a, b, c))
np.random.shuffle(self.triplets)
class MitosisDataset( object ):
r"""Mitosis dataset
This dataset have the capacity of classes regeneration
Args:
data: dataprovide class
num_channels: numbers of channels
count: number of objects in datasets
tranform: tranform
"""
def __init__(self,
data,
num_channels=1,
count=None,
transform=None
):
if count is None: count = len( data )
self.count = count
self.data = data
self.num_channels = num_channels
self.transform = transform
self.labels = data.labels
self.classes = np.unique(self.labels)
self.numclass = len(self.classes)
self.labels_reg = self.labels
self.classes_reg = self.classes
self.numclass_reg = self.numclass
def __len__(self):
return self.count
def __getitem__(self, idx):
idx = idx % len(self.data)
image, label = self.data[idx]
label_reg = self.labels_reg[idx]
image = np.array(image)
image = utility.to_channels(image, self.num_channels)
label = utility.to_one_hot(label, self.numclass)
label_reg = utility.to_one_hot(label_reg, self.numclass_reg )
obj = ObjectImageTransform( image )
if self.transform:
obj = self.transform( obj )
x = obj.to_value()
y = torch.from_numpy( label ).float()
y_reg = torch.from_numpy( label_reg ).float()
return x, y, y_reg
def regeneration(self, label_regeneration ):
assert( len(label_regeneration) == len(self.labels_reg) )
self.labels_reg = label_regeneration
self.classes_reg = np.unique(self.labels_reg)
self.numclass_reg = len(self.classes_reg)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__
fmt_str += '\n'
return fmt_str
class MitosisSecuencialSamplesDataset( object ):
"""
Mitosis dataset for extratificate secuencial samples
Args:
data: dataprovide class
count: number of objects in datasets
num_channels: numbers of channels
tranform: tranform
"""
def __init__(self,
data,
count=200,
num_channels=1,
transform=None
):
"""
Initialization
"""
if count is None: count = len(data)
self.num_channels=num_channels
self.data = data
self.count = count
# make index
self.labels = data.labels
self.classes = np.unique( self.labels )
self.numclass = len( self.classes )
self.regeneration( self.labels )
self.transform = transform
def regeneration(self, label_regeneration ):
self.labels_reg = label_regeneration
self.classes_reg = np.unique(self.labels_reg)
self.numclass_reg = len(self.classes_reg)
self.labels_index = []
for cl in range( self.numclass_reg ):
indx = np.where(self.labels_reg == cl)[0]
self.labels_index.append(indx)
def __repr__(self):
fmt_str = 'Dataset ' + self.__class__.__name__
fmt_str += '\n'
return fmt_str
def __len__(self):
return self.count
def __getitem__(self, idx):
idx = idx % self.numclass_reg
class_index = self.labels_index[idx]
n = len(class_index)
idx = class_index[ random.randint(0,n-1) ]
image, label = self.data[idx]
label_reg = self.labels_reg[idx]
image = np.array(image)
image = utility.to_channels(image, self.num_channels)
label = utility.to_one_hot(label, self.numclass)
label_reg = utility.to_one_hot(label_reg, self.numclass_reg )
obj = ObjectImageTransform( image )
if self.transform:
obj = self.transform( obj )
x = obj.to_value()
y = torch.from_numpy( label ).float()
y_reg = torch.from_numpy( label_reg ).float()
return x, y, y_reg
| 29.613462
| 111
| 0.559062
|
4a188639447d3a4ce132f3f7bff6e0526f8406dc
| 1,376
|
py
|
Python
|
agilent_npy2mat.py
|
uctm/agilent_npy2mat
|
8f2cad7095dfa635b8d140f10ab92728864d0b00
|
[
"MIT"
] | null | null | null |
agilent_npy2mat.py
|
uctm/agilent_npy2mat
|
8f2cad7095dfa635b8d140f10ab92728864d0b00
|
[
"MIT"
] | null | null | null |
agilent_npy2mat.py
|
uctm/agilent_npy2mat
|
8f2cad7095dfa635b8d140f10ab92728864d0b00
|
[
"MIT"
] | null | null | null |
import os, tkinter, tkinter.filedialog, tkinter.messagebox
import glob
import scipy.io
import numpy as np
import scipy.io
def agilent_npy2mat(dirname):
'''
Agilent製のオシロスコープを使用するプログラム
Get_waveform_data_elegant_VISA_Python
で保存されたnpyファイルをMatlabで読み取れるようなデータ形式に変換する
プログラム
1つのデータには時間データとセンサデータが含まれているがデータ容量削減のため
センサデータのみを変換する
ディレクトリ名を受け取ってそのディレクトリ名に_matを追加した
ディレクトリを作成し,そこに.matファイルを保存するプログラム
Parameters
----------
dirname : データが保存されているディレクトリ名
Returns
-------
無し
'''
save_dir = dirname + r'_mat/' # matファイルを保存するディレクトリパス
os.mkdir(save_dir)
fnames = glob.glob(dirname + r'/*.npy')
T = np.load(fnames[0])[:, 0]
scipy.io.savemat(save_dir + 'time.mat', {'time': T})
for f in fnames:
x = np.load(f)[:, 1]
# 絶対パスから拡張子なしのファイル名に変換
fname_no_extension = f.split('\\')[-1].split('.')[0]
scipy.io.savemat(save_dir+fname_no_extension + '.mat', {'pvdf':x})
if __name__ == '__main__':
# ファイル選択ダイアログの表示
root = tkinter.Tk()
root.withdraw()
# 実行ファイルが存在しているディレクトリパス
iDir = os.path.abspath(os.path.dirname(__file__))
tkinter.messagebox.showinfo('npy2mat.py','処理データが存在するフォルダを選択してください')
dirname = tkinter.filedialog.askdirectory(initialdir = iDir)
agilent_npy2mat(dirname) # matファイルに変換
tkinter.messagebox.showinfo('npy2mat.py', '変換しました')
| 29.276596
| 74
| 0.678052
|
4a1886dce2b3e488491853b9b57f3114604dda7a
| 15,816
|
py
|
Python
|
sdk/python/pulumi_azure_native/containerservice/v20200101/get_managed_cluster.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/containerservice/v20200101/get_managed_cluster.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_native/containerservice/v20200101/get_managed_cluster.py
|
sebtelko/pulumi-azure-native
|
711ec021b5c73da05611c56c8a35adb0ce3244e4
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from ... import _utilities
from . import outputs
__all__ = [
'GetManagedClusterResult',
'AwaitableGetManagedClusterResult',
'get_managed_cluster',
]
@pulumi.output_type
class GetManagedClusterResult:
"""
Managed cluster.
"""
def __init__(__self__, aad_profile=None, addon_profiles=None, agent_pool_profiles=None, api_server_access_profile=None, disk_encryption_set_id=None, dns_prefix=None, enable_pod_security_policy=None, enable_rbac=None, fqdn=None, id=None, identity=None, identity_profile=None, kubernetes_version=None, linux_profile=None, location=None, max_agent_pools=None, name=None, network_profile=None, node_resource_group=None, private_fqdn=None, provisioning_state=None, service_principal_profile=None, tags=None, type=None, windows_profile=None):
if aad_profile and not isinstance(aad_profile, dict):
raise TypeError("Expected argument 'aad_profile' to be a dict")
pulumi.set(__self__, "aad_profile", aad_profile)
if addon_profiles and not isinstance(addon_profiles, dict):
raise TypeError("Expected argument 'addon_profiles' to be a dict")
pulumi.set(__self__, "addon_profiles", addon_profiles)
if agent_pool_profiles and not isinstance(agent_pool_profiles, list):
raise TypeError("Expected argument 'agent_pool_profiles' to be a list")
pulumi.set(__self__, "agent_pool_profiles", agent_pool_profiles)
if api_server_access_profile and not isinstance(api_server_access_profile, dict):
raise TypeError("Expected argument 'api_server_access_profile' to be a dict")
pulumi.set(__self__, "api_server_access_profile", api_server_access_profile)
if disk_encryption_set_id and not isinstance(disk_encryption_set_id, str):
raise TypeError("Expected argument 'disk_encryption_set_id' to be a str")
pulumi.set(__self__, "disk_encryption_set_id", disk_encryption_set_id)
if dns_prefix and not isinstance(dns_prefix, str):
raise TypeError("Expected argument 'dns_prefix' to be a str")
pulumi.set(__self__, "dns_prefix", dns_prefix)
if enable_pod_security_policy and not isinstance(enable_pod_security_policy, bool):
raise TypeError("Expected argument 'enable_pod_security_policy' to be a bool")
pulumi.set(__self__, "enable_pod_security_policy", enable_pod_security_policy)
if enable_rbac and not isinstance(enable_rbac, bool):
raise TypeError("Expected argument 'enable_rbac' to be a bool")
pulumi.set(__self__, "enable_rbac", enable_rbac)
if fqdn and not isinstance(fqdn, str):
raise TypeError("Expected argument 'fqdn' to be a str")
pulumi.set(__self__, "fqdn", fqdn)
if id and not isinstance(id, str):
raise TypeError("Expected argument 'id' to be a str")
pulumi.set(__self__, "id", id)
if identity and not isinstance(identity, dict):
raise TypeError("Expected argument 'identity' to be a dict")
pulumi.set(__self__, "identity", identity)
if identity_profile and not isinstance(identity_profile, dict):
raise TypeError("Expected argument 'identity_profile' to be a dict")
pulumi.set(__self__, "identity_profile", identity_profile)
if kubernetes_version and not isinstance(kubernetes_version, str):
raise TypeError("Expected argument 'kubernetes_version' to be a str")
pulumi.set(__self__, "kubernetes_version", kubernetes_version)
if linux_profile and not isinstance(linux_profile, dict):
raise TypeError("Expected argument 'linux_profile' to be a dict")
pulumi.set(__self__, "linux_profile", linux_profile)
if location and not isinstance(location, str):
raise TypeError("Expected argument 'location' to be a str")
pulumi.set(__self__, "location", location)
if max_agent_pools and not isinstance(max_agent_pools, int):
raise TypeError("Expected argument 'max_agent_pools' to be a int")
pulumi.set(__self__, "max_agent_pools", max_agent_pools)
if name and not isinstance(name, str):
raise TypeError("Expected argument 'name' to be a str")
pulumi.set(__self__, "name", name)
if network_profile and not isinstance(network_profile, dict):
raise TypeError("Expected argument 'network_profile' to be a dict")
pulumi.set(__self__, "network_profile", network_profile)
if node_resource_group and not isinstance(node_resource_group, str):
raise TypeError("Expected argument 'node_resource_group' to be a str")
pulumi.set(__self__, "node_resource_group", node_resource_group)
if private_fqdn and not isinstance(private_fqdn, str):
raise TypeError("Expected argument 'private_fqdn' to be a str")
pulumi.set(__self__, "private_fqdn", private_fqdn)
if provisioning_state and not isinstance(provisioning_state, str):
raise TypeError("Expected argument 'provisioning_state' to be a str")
pulumi.set(__self__, "provisioning_state", provisioning_state)
if service_principal_profile and not isinstance(service_principal_profile, dict):
raise TypeError("Expected argument 'service_principal_profile' to be a dict")
pulumi.set(__self__, "service_principal_profile", service_principal_profile)
if tags and not isinstance(tags, dict):
raise TypeError("Expected argument 'tags' to be a dict")
pulumi.set(__self__, "tags", tags)
if type and not isinstance(type, str):
raise TypeError("Expected argument 'type' to be a str")
pulumi.set(__self__, "type", type)
if windows_profile and not isinstance(windows_profile, dict):
raise TypeError("Expected argument 'windows_profile' to be a dict")
pulumi.set(__self__, "windows_profile", windows_profile)
@property
@pulumi.getter(name="aadProfile")
def aad_profile(self) -> Optional['outputs.ManagedClusterAADProfileResponse']:
"""
Profile of Azure Active Directory configuration.
"""
return pulumi.get(self, "aad_profile")
@property
@pulumi.getter(name="addonProfiles")
def addon_profiles(self) -> Optional[Mapping[str, 'outputs.ManagedClusterAddonProfileResponse']]:
"""
Profile of managed cluster add-on.
"""
return pulumi.get(self, "addon_profiles")
@property
@pulumi.getter(name="agentPoolProfiles")
def agent_pool_profiles(self) -> Optional[Sequence['outputs.ManagedClusterAgentPoolProfileResponse']]:
"""
Properties of the agent pool.
"""
return pulumi.get(self, "agent_pool_profiles")
@property
@pulumi.getter(name="apiServerAccessProfile")
def api_server_access_profile(self) -> Optional['outputs.ManagedClusterAPIServerAccessProfileResponse']:
"""
Access profile for managed cluster API server.
"""
return pulumi.get(self, "api_server_access_profile")
@property
@pulumi.getter(name="diskEncryptionSetID")
def disk_encryption_set_id(self) -> Optional[str]:
"""
ResourceId of the disk encryption set to use for enabling encryption at rest.
"""
return pulumi.get(self, "disk_encryption_set_id")
@property
@pulumi.getter(name="dnsPrefix")
def dns_prefix(self) -> Optional[str]:
"""
DNS prefix specified when creating the managed cluster.
"""
return pulumi.get(self, "dns_prefix")
@property
@pulumi.getter(name="enablePodSecurityPolicy")
def enable_pod_security_policy(self) -> Optional[bool]:
"""
(DEPRECATING) Whether to enable Kubernetes pod security policy (preview). This feature is set for removal on October 15th, 2020. Learn more at aka.ms/aks/azpodpolicy.
"""
return pulumi.get(self, "enable_pod_security_policy")
@property
@pulumi.getter(name="enableRBAC")
def enable_rbac(self) -> Optional[bool]:
"""
Whether to enable Kubernetes Role-Based Access Control.
"""
return pulumi.get(self, "enable_rbac")
@property
@pulumi.getter
def fqdn(self) -> str:
"""
FQDN for the master pool.
"""
return pulumi.get(self, "fqdn")
@property
@pulumi.getter
def id(self) -> str:
"""
Resource Id
"""
return pulumi.get(self, "id")
@property
@pulumi.getter
def identity(self) -> Optional['outputs.ManagedClusterIdentityResponse']:
"""
The identity of the managed cluster, if configured.
"""
return pulumi.get(self, "identity")
@property
@pulumi.getter(name="identityProfile")
def identity_profile(self) -> Optional[Mapping[str, 'outputs.ManagedClusterPropertiesResponseIdentityProfile']]:
"""
Identities associated with the cluster.
"""
return pulumi.get(self, "identity_profile")
@property
@pulumi.getter(name="kubernetesVersion")
def kubernetes_version(self) -> Optional[str]:
"""
Version of Kubernetes specified when creating the managed cluster.
"""
return pulumi.get(self, "kubernetes_version")
@property
@pulumi.getter(name="linuxProfile")
def linux_profile(self) -> Optional['outputs.ContainerServiceLinuxProfileResponse']:
"""
Profile for Linux VMs in the container service cluster.
"""
return pulumi.get(self, "linux_profile")
@property
@pulumi.getter
def location(self) -> str:
"""
Resource location
"""
return pulumi.get(self, "location")
@property
@pulumi.getter(name="maxAgentPools")
def max_agent_pools(self) -> int:
"""
The max number of agent pools for the managed cluster.
"""
return pulumi.get(self, "max_agent_pools")
@property
@pulumi.getter
def name(self) -> str:
"""
Resource name
"""
return pulumi.get(self, "name")
@property
@pulumi.getter(name="networkProfile")
def network_profile(self) -> Optional['outputs.ContainerServiceNetworkProfileResponse']:
"""
Profile of network configuration.
"""
return pulumi.get(self, "network_profile")
@property
@pulumi.getter(name="nodeResourceGroup")
def node_resource_group(self) -> Optional[str]:
"""
Name of the resource group containing agent pool nodes.
"""
return pulumi.get(self, "node_resource_group")
@property
@pulumi.getter(name="privateFQDN")
def private_fqdn(self) -> str:
"""
FQDN of private cluster.
"""
return pulumi.get(self, "private_fqdn")
@property
@pulumi.getter(name="provisioningState")
def provisioning_state(self) -> str:
"""
The current deployment or provisioning state, which only appears in the response.
"""
return pulumi.get(self, "provisioning_state")
@property
@pulumi.getter(name="servicePrincipalProfile")
def service_principal_profile(self) -> Optional['outputs.ManagedClusterServicePrincipalProfileResponse']:
"""
Information about a service principal identity for the cluster to use for manipulating Azure APIs.
"""
return pulumi.get(self, "service_principal_profile")
@property
@pulumi.getter
def tags(self) -> Optional[Mapping[str, str]]:
"""
Resource tags
"""
return pulumi.get(self, "tags")
@property
@pulumi.getter
def type(self) -> str:
"""
Resource type
"""
return pulumi.get(self, "type")
@property
@pulumi.getter(name="windowsProfile")
def windows_profile(self) -> Optional['outputs.ManagedClusterWindowsProfileResponse']:
"""
Profile for Windows VMs in the container service cluster.
"""
return pulumi.get(self, "windows_profile")
class AwaitableGetManagedClusterResult(GetManagedClusterResult):
# pylint: disable=using-constant-test
def __await__(self):
if False:
yield self
return GetManagedClusterResult(
aad_profile=self.aad_profile,
addon_profiles=self.addon_profiles,
agent_pool_profiles=self.agent_pool_profiles,
api_server_access_profile=self.api_server_access_profile,
disk_encryption_set_id=self.disk_encryption_set_id,
dns_prefix=self.dns_prefix,
enable_pod_security_policy=self.enable_pod_security_policy,
enable_rbac=self.enable_rbac,
fqdn=self.fqdn,
id=self.id,
identity=self.identity,
identity_profile=self.identity_profile,
kubernetes_version=self.kubernetes_version,
linux_profile=self.linux_profile,
location=self.location,
max_agent_pools=self.max_agent_pools,
name=self.name,
network_profile=self.network_profile,
node_resource_group=self.node_resource_group,
private_fqdn=self.private_fqdn,
provisioning_state=self.provisioning_state,
service_principal_profile=self.service_principal_profile,
tags=self.tags,
type=self.type,
windows_profile=self.windows_profile)
def get_managed_cluster(resource_group_name: Optional[str] = None,
resource_name: Optional[str] = None,
opts: Optional[pulumi.InvokeOptions] = None) -> AwaitableGetManagedClusterResult:
"""
Managed cluster.
:param str resource_group_name: The name of the resource group.
:param str resource_name: The name of the managed cluster resource.
"""
__args__ = dict()
__args__['resourceGroupName'] = resource_group_name
__args__['resourceName'] = resource_name
if opts is None:
opts = pulumi.InvokeOptions()
if opts.version is None:
opts.version = _utilities.get_version()
__ret__ = pulumi.runtime.invoke('azure-native:containerservice/v20200101:getManagedCluster', __args__, opts=opts, typ=GetManagedClusterResult).value
return AwaitableGetManagedClusterResult(
aad_profile=__ret__.aad_profile,
addon_profiles=__ret__.addon_profiles,
agent_pool_profiles=__ret__.agent_pool_profiles,
api_server_access_profile=__ret__.api_server_access_profile,
disk_encryption_set_id=__ret__.disk_encryption_set_id,
dns_prefix=__ret__.dns_prefix,
enable_pod_security_policy=__ret__.enable_pod_security_policy,
enable_rbac=__ret__.enable_rbac,
fqdn=__ret__.fqdn,
id=__ret__.id,
identity=__ret__.identity,
identity_profile=__ret__.identity_profile,
kubernetes_version=__ret__.kubernetes_version,
linux_profile=__ret__.linux_profile,
location=__ret__.location,
max_agent_pools=__ret__.max_agent_pools,
name=__ret__.name,
network_profile=__ret__.network_profile,
node_resource_group=__ret__.node_resource_group,
private_fqdn=__ret__.private_fqdn,
provisioning_state=__ret__.provisioning_state,
service_principal_profile=__ret__.service_principal_profile,
tags=__ret__.tags,
type=__ret__.type,
windows_profile=__ret__.windows_profile)
| 41.730871
| 540
| 0.677921
|
4a188719e96e7f63211eb186a14b0f4d58a26f74
| 6,870
|
py
|
Python
|
pennylane/templates/subroutines/qpe.py
|
camponogaraviera/pennylane
|
cacbe6807dcaf264e6f05847df4be8694cccedb2
|
[
"Apache-2.0"
] | 539
|
2018-11-13T08:45:42.000Z
|
2020-07-27T18:17:16.000Z
|
pennylane/templates/subroutines/qpe.py
|
camponogaraviera/pennylane
|
cacbe6807dcaf264e6f05847df4be8694cccedb2
|
[
"Apache-2.0"
] | 588
|
2018-11-14T10:21:47.000Z
|
2020-07-28T06:27:14.000Z
|
pennylane/templates/subroutines/qpe.py
|
camponogaraviera/pennylane
|
cacbe6807dcaf264e6f05847df4be8694cccedb2
|
[
"Apache-2.0"
] | 165
|
2018-11-13T18:58:56.000Z
|
2020-07-27T17:18:17.000Z
|
# Copyright 2018-2021 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Contains the QuantumPhaseEstimation template.
"""
# pylint: disable=too-many-arguments,arguments-differ
import pennylane as qml
from pennylane.operation import AnyWires, Operation
from pennylane.ops import Hadamard, ControlledQubitUnitary
class QuantumPhaseEstimation(Operation):
r"""Performs the
`quantum phase estimation <https://en.wikipedia.org/wiki/Quantum_phase_estimation_algorithm>`__
circuit.
Given a unitary matrix :math:`U`, this template applies the circuit for quantum phase
estimation. The unitary is applied to the qubits specified by ``target_wires`` and :math:`n`
qubits are used for phase estimation as specified by ``estimation_wires``.
.. figure:: ../../_static/templates/subroutines/qpe.svg
:align: center
:width: 60%
:target: javascript:void(0);
This circuit can be used to perform the standard quantum phase estimation algorithm, consisting
of the following steps:
#. Prepare ``target_wires`` in a given state. If ``target_wires`` are prepared in an eigenstate
of :math:`U` that has corresponding eigenvalue :math:`e^{2 \pi i \theta}` with phase
:math:`\theta \in [0, 1)`, this algorithm will measure :math:`\theta`. Other input states can
be prepared more generally.
#. Apply the ``QuantumPhaseEstimation`` circuit.
#. Measure ``estimation_wires`` using :func:`~.probs`, giving a probability distribution over
measurement outcomes in the computational basis.
#. Find the index of the largest value in the probability distribution and divide that number by
:math:`2^{n}`. This number will be an estimate of :math:`\theta` with an error that decreases
exponentially with the number of qubits :math:`n`.
Note that if :math:`\theta \in (-1, 0]`, we can estimate the phase by again finding the index
:math:`i` found in step 4 and calculating :math:`\theta \approx \frac{1 - i}{2^{n}}`. The
usage details below give an example of this case.
Args:
unitary (array): the phase estimation unitary, specified as a matrix
target_wires (Union[Wires, Sequence[int], or int]): the target wires to apply the unitary
estimation_wires (Union[Wires, Sequence[int], or int]): the wires to be used for phase
estimation
Raises:
QuantumFunctionError: if the ``target_wires`` and ``estimation_wires`` share a common
element
.. details::
:title: Usage Details
Consider the matrix corresponding to a rotation from an :class:`~.RX` gate:
.. code-block:: python
import pennylane as qml
from pennylane.templates import QuantumPhaseEstimation
from pennylane import numpy as np
phase = 5
target_wires = [0]
unitary = qml.RX(phase, wires=0).matrix()
The ``phase`` parameter can be estimated using ``QuantumPhaseEstimation``. An example is
shown below using a register of five phase-estimation qubits:
.. code-block:: python
n_estimation_wires = 5
estimation_wires = range(1, n_estimation_wires + 1)
dev = qml.device("default.qubit", wires=n_estimation_wires + 1)
@qml.qnode(dev)
def circuit():
# Start in the |+> eigenstate of the unitary
qml.Hadamard(wires=target_wires)
QuantumPhaseEstimation(
unitary,
target_wires=target_wires,
estimation_wires=estimation_wires,
)
return qml.probs(estimation_wires)
phase_estimated = np.argmax(circuit()) / 2 ** n_estimation_wires
# Need to rescale phase due to convention of RX gate
phase_estimated = 4 * np.pi * (1 - phase_estimated)
"""
num_wires = AnyWires
grad_method = None
def __init__(self, unitary, target_wires, estimation_wires, do_queue=True, id=None):
target_wires = list(target_wires)
estimation_wires = list(estimation_wires)
wires = target_wires + estimation_wires
if any(wire in target_wires for wire in estimation_wires):
raise qml.QuantumFunctionError(
"The target wires and estimation wires must be different"
)
self._hyperparameters = {
"target_wires": target_wires,
"estimation_wires": estimation_wires,
}
super().__init__(unitary, wires=wires, do_queue=do_queue, id=id)
@property
def num_params(self):
return 1
@staticmethod
def compute_decomposition(
unitary, wires, target_wires, estimation_wires
): # pylint: disable=arguments-differ,unused-argument
r"""Representation of the operator as a product of other operators.
.. math:: O = O_1 O_2 \dots O_n.
.. seealso:: :meth:`~.QuantumPhaseEstimation.decomposition`.
Args:
unitary (array): the phase estimation unitary, specified as a matrix
wires (Any or Iterable[Any]): wires that the operator acts on
target_wires (Any or Iterable[Any]): the target wires to apply the unitary
estimation_wires (Any or Iterable[Any]): the wires to be used for phase
estimation
Returns:
list[.Operator]: decomposition of the operator
"""
unitary_powers = [unitary]
for _ in range(len(estimation_wires) - 1):
new_power = unitary_powers[-1] @ unitary_powers[-1]
unitary_powers.append(new_power)
op_list = []
for wire in estimation_wires:
op_list.append(Hadamard(wire))
op_list.append(
ControlledQubitUnitary(unitary_powers.pop(), control_wires=wire, wires=target_wires)
)
op_list.append(qml.templates.QFT(wires=estimation_wires).inv())
return op_list
def adjoint(self):
adjoint_op = QuantumPhaseEstimation(
self.parameters[0],
target_wires=self.hyperparameters["target_wires"],
estimation_wires=self.hyperparameters["estimation_wires"],
)
adjoint_op.inverse = not self.inverse
return adjoint_op
| 37.747253
| 100
| 0.65575
|
4a188725678ebe3fba4ae46cdc09212323771b6f
| 429
|
py
|
Python
|
test/db_test/limit_length.py
|
AnonymousAuthor2013/PostRec
|
a1461f716d177e28b96ca29d1398f96b5717c1e1
|
[
"MIT"
] | 2
|
2019-06-25T02:46:37.000Z
|
2019-12-02T11:26:16.000Z
|
test/db_test/limit_length.py
|
AnonymousAuthor2013/PostRec
|
a1461f716d177e28b96ca29d1398f96b5717c1e1
|
[
"MIT"
] | 1
|
2019-07-18T03:45:28.000Z
|
2019-07-18T03:45:28.000Z
|
test/db_test/limit_length.py
|
AnonymousAuthor2013/PostRec
|
a1461f716d177e28b96ca29d1398f96b5717c1e1
|
[
"MIT"
] | 4
|
2019-06-23T13:49:07.000Z
|
2019-06-25T12:21:59.000Z
|
import argparse
parser = argparse.ArgumentParser()
parser.add_argument('--file', type=str, required=True)
parser.add_argument('--length', type=int, default=510)
args = parser.parse_args()
with open(args.file, "r", encoding="utf-8") as f:
results=map(lambda line: " ".join(line.split()[:args.length])+"\n", f)
with open(args.file+".{}".format(args.length), "w", encoding="utf-8") as f2:
f2.writelines(results)
| 30.642857
| 80
| 0.67366
|
4a188752932e557503257c67582ce456e5254162
| 578
|
py
|
Python
|
automationBeamNgExportFix/get_files.py
|
jonpecar/automationBeamNgExportFix
|
ffb4a1cc02213190631c10044d8e157304750651
|
[
"MIT"
] | null | null | null |
automationBeamNgExportFix/get_files.py
|
jonpecar/automationBeamNgExportFix
|
ffb4a1cc02213190631c10044d8e157304750651
|
[
"MIT"
] | null | null | null |
automationBeamNgExportFix/get_files.py
|
jonpecar/automationBeamNgExportFix
|
ffb4a1cc02213190631c10044d8e157304750651
|
[
"MIT"
] | null | null | null |
from genericpath import isfile
from os import path, getenv, stat, listdir
BEAMNG_VER = '0.24'
BEAMNG_USER_PATH = path.join(getenv('LOCALAPPDATA'), 'BeamNG.drive', BEAMNG_VER, 'mods')
def get_files_sorted():
files = []
for file in listdir(BEAMNG_USER_PATH):
file_path = path.join(BEAMNG_USER_PATH, file)
if isfile(file_path):
file_name, ext = path.splitext(file_path)
if ext.lower() == '.zip':
files.append(file_path)
sorted_files = sorted(files, key=lambda t: -stat(t).st_mtime)
return sorted_files
| 32.111111
| 88
| 0.657439
|
4a188760b8db4efd8fe8f979b3acfc4fde58045a
| 14,613
|
py
|
Python
|
src/main.py
|
racai-ai/RNER
|
272b32f23c9c2e8948436bf39f1b3c809e1ba3f7
|
[
"MIT"
] | null | null | null |
src/main.py
|
racai-ai/RNER
|
272b32f23c9c2e8948436bf39f1b3c809e1ba3f7
|
[
"MIT"
] | null | null | null |
src/main.py
|
racai-ai/RNER
|
272b32f23c9c2e8948436bf39f1b3c809e1ba3f7
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import, division, print_function
import argparse
import csv
import json
import logging
import os
import random
import sys
import numpy as np
import torch
import torch.nn.functional as F
from pytorch_transformers import AdamW, WarmupLinearSchedule
from torch import nn
from torch.utils.data import (DataLoader, RandomSampler, SequentialSampler,
TensorDataset)
from seqeval.metrics import classification_report
from model.xlmr_for_token_classification import XLMRForTokenClassification
from utils.train_utils import add_xlmr_args, evaluate_model, predict_model
from utils.data_utils import NerProcessor, create_dataset, convert_examples_to_features
from tqdm import tqdm as tqdm
from tqdm import trange
logging.basicConfig(format='%(asctime)s - %(levelname)s - %(name)s - %(message)s',
datefmt='%m/%d/%Y %H:%M:%S',
level=logging.INFO)
logger = logging.getLogger(__name__)
def main():
parser = argparse.ArgumentParser()
parser = add_xlmr_args(parser)
args = parser.parse_args()
if args.gradient_accumulation_steps < 1:
raise ValueError("Invalid gradient_accumulation_steps parameter: {}, should be >= 1".format(
args.gradient_accumulation_steps))
args.train_batch_size = args.train_batch_size // args.gradient_accumulation_steps
random.seed(args.seed)
np.random.seed(args.seed)
torch.manual_seed(args.seed)
if not args.do_train and not args.do_eval and not args.do_predict and not args.server:
raise ValueError(
"At least one of `do_train`, `do_eval`, `do_predict` or `server` must be True.")
if os.path.exists(args.output_dir) and os.listdir(args.output_dir) and args.do_train:
raise ValueError(
"Output directory ({}) already exists and is not empty.".format(args.output_dir))
if not os.path.exists(args.output_dir):
os.makedirs(args.output_dir)
processor = NerProcessor(labels=args.labels)
label_list = processor.get_labels()
num_labels = len(label_list) + 1 # add one for IGNORE label
train_examples = None
num_train_optimization_steps = 0
print(args.train_batch_size)
print(args.gradient_accumulation_steps)
if args.do_train:
train_examples = processor.get_train_examples(args.data_dir)
num_train_optimization_steps = int(
len(train_examples) / args.train_batch_size / args.gradient_accumulation_steps) * args.num_train_epochs
# preparing model configs
hidden_size = 768 if 'base' in args.pretrained_path else 1024 # TODO: move this inside model.__init__
device = 'cuda' if (torch.cuda.is_available() and not args.no_cuda) else 'cpu'
# creating model
model = XLMRForTokenClassification(pretrained_path=args.pretrained_path,
n_labels=num_labels, hidden_size=hidden_size, seq_len=args.max_seq_length,
dropout_p=args.dropout, device=device, use_norm=args.use_norm, use_li=args.use_li,
li_dropout_p=args.li_dropout, li_sigma=args.li_sigma)
model.to(device)
no_decay = ['bias', 'final_layer_norm.weight']
params = list(model.named_parameters())
optimizer_grouped_parameters = [
{'params': [p for n, p in params if not any(
nd in n for nd in no_decay)], 'weight_decay': args.weight_decay},
{'params': [p for n, p in params if any(
nd in n for nd in no_decay)], 'weight_decay': 0.0}
]
warmup_steps = int(args.warmup_proportion * num_train_optimization_steps)
optimizer = AdamW(optimizer_grouped_parameters,
lr=args.learning_rate, eps=args.adam_epsilon)
scheduler = WarmupLinearSchedule(
optimizer, warmup_steps=warmup_steps, t_total=num_train_optimization_steps)
# freeze model if necessary
if args.freeze_model:
logger.info("Freezing XLM-R model...")
for n, p in model.named_parameters():
if 'xlmr' in n and p.requires_grad:
p.requires_grad = False
if args.fp16:
try:
from apex import amp
except ImportError:
raise ImportError(
"Please install apex from https://www.github.com/nvidia/apex to use fp16 training.")
model, optimizer = amp.initialize(
model, optimizer, opt_level=args.fp16_opt_level)
global_step = 0
nb_tr_steps = 0
tr_loss = 0
label_map = {i: label for i, label in enumerate(label_list, 1)}
if args.do_train:
output_p_file = os.path.join(args.output_dir, "parameters.txt")
with open(output_p_file, "w") as writer:
for k,v in sorted(vars(args).items()):
writer.write("{0}: {1}\n".format(k,v))
if len(args.train_existing_model)>0:
load_model_path=os.path.join(args.train_existing_model, 'model.pt')
logger.info("Loading model {}".format(load_model_path))
state_dict = torch.load(open(load_model_path, 'rb'), map_location=torch.device(device))
model.load_state_dict(state_dict, False) # do not enforce keys to be present
logger.info("Loaded saved model")
train_features = convert_examples_to_features(
train_examples, label_list, args.max_seq_length, model.encode_word)
logger.info("***** Running training *****")
logger.info(" Num examples = %d", len(train_examples))
logger.info(" Batch size = %d", args.train_batch_size)
logger.info(" Num steps = %d", num_train_optimization_steps)
train_data = create_dataset(train_features)
train_sampler = RandomSampler(train_data)
train_dataloader = DataLoader(
train_data, sampler=train_sampler, batch_size=args.train_batch_size)
# getting validation samples
val_examples = processor.get_dev_examples(args.data_dir)
val_features = convert_examples_to_features(
val_examples, label_list, args.max_seq_length, model.encode_word)
val_data = create_dataset(val_features)
best_val_f1 = 0.0
bestEpoch=0
for currentEpoch in tqdm(range(args.num_train_epochs), desc="Epoch"):
tr_loss = 0
nb_tr_examples, nb_tr_steps = 0, 0
tbar = tqdm(train_dataloader, desc="Iteration")
model.train()
for step, batch in enumerate(tbar):
batch = tuple(t.to(device) for t in batch)
input_ids, label_ids, l_mask, valid_ids, = batch
loss = model(input_ids, label_ids, l_mask, valid_ids)
if args.gradient_accumulation_steps > 1:
loss = loss / args.gradient_accumulation_steps
if args.fp16:
with amp.scale_loss(loss, optimizer) as scaled_loss:
scaled_loss.backward()
torch.nn.utils.clip_grad_norm_(
amp.master_params(optimizer), args.max_grad_norm)
else:
loss.backward()
torch.nn.utils.clip_grad_norm_(
model.parameters(), args.max_grad_norm)
tr_loss += loss.item()
nb_tr_examples += input_ids.size(0)
nb_tr_steps += 1
tbar.set_description('Loss = %.4f' %(tr_loss / (step+1)))
if (step + 1) % args.gradient_accumulation_steps == 0:
optimizer.step()
scheduler.step() # Update learning rate schedule
model.zero_grad()
global_step += 1
logger.info("\nTesting on validation set...")
f1, report = evaluate_model(model, val_data, label_list, args.eval_batch_size, device)
if f1 > best_val_f1:
best_val_f1 = f1
bestEpoch=currentEpoch
logger.info("\nFound better f1=%.4f on validation set. Saving model\n" %(f1))
logger.info("%s\n" %(report))
torch.save(model.state_dict(), open(os.path.join(args.output_dir, 'model.pt'), 'wb'))
with open(os.path.join(args.output_dir, 'model.epoch'),'w') as outf:
outf.write('{}'.format(currentEpoch))
else :
logger.info("\nNo better F1 score: {}. Best F1 was in epoch {} = {}\n".format(f1,bestEpoch,best_val_f1))
else: # load a saved model
load_model_path=os.path.join(args.output_dir, 'model.pt')
logger.info("Loading model {}".format(load_model_path))
state_dict = torch.load(open(load_model_path, 'rb'), map_location=torch.device(device))
model.load_state_dict(state_dict)
logger.info("Loaded saved model")
model.to(device)
if args.do_eval:
if args.eval_on == "dev":
eval_examples = processor.get_dev_examples(args.data_dir)
elif args.eval_on == "test":
eval_examples = processor.get_test_examples(args.data_dir)
else:
raise ValueError("eval on dev or test set only")
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, model.encode_word)
logger.info("***** Running evaluation *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Batch size = %d", args.eval_batch_size)
eval_data = create_dataset(eval_features)
f1_score, report = evaluate_model(model, eval_data, label_list, args.eval_batch_size, device)
logger.info("\n%s", report)
output_eval_file = os.path.join(args.output_dir, "eval_{}_results.txt".format(args.eval_on))
with open(output_eval_file, "w") as writer:
logger.info("***** Writing results to file *****")
writer.write(report)
logger.info("Done.")
if args.do_predict:
if args.predict_on == "dev":
eval_examples = processor.get_dev_examples(args.data_dir)
eval_sentences = processor.get_dev_sentences(args.data_dir)
elif args.predict_on == "test":
eval_examples = processor.get_test_examples(args.data_dir)
eval_sentences = processor.get_test_sentences(args.data_dir)
elif args.predict_on == "deploy":
eval_examples = processor.get_deploy_examples(args.data_dir)
eval_sentences = processor.get_deploy_sentences(args.data_dir)
else:
raise ValueError("Predict on dev, test or deploy set only")
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, model.encode_word)
logger.info("***** Running prediction *****")
logger.info(" Num examples = %d", len(eval_examples))
logger.info(" Num sentences = %d", len(eval_sentences))
logger.info(" Batch size = %d", args.predict_batch_size)
logger.info(" Writing predictions to file [{}]".format(args.predict_filename));
eval_data = create_dataset(eval_features)
prediction = predict_model(model, eval_data, label_list, args.predict_batch_size, device)
output_eval_file = os.path.join(args.output_dir, args.predict_filename)
with open(output_eval_file, "w") as writer:
logger.info("***** Writing results to file *****")
for (i,sent) in enumerate(eval_sentences):
for (j,tok) in enumerate(sent[0]):
pred="O"
if j>=len(prediction[i]):
logger.info("WARNING: Not enough tokens predicted in sentence {} ({}/{})".format(i,len(prediction[i]),len(sent[0])))
else:
pred=prediction[i][j]
if args.predict_format == "ann_only":
writer.write("{}\n".format(pred))
else:
writer.write("{} {}\n".format(tok,pred))
writer.write("\n")
logger.info("Done.")
if args.server:
from flask import Flask,request,jsonify
app = Flask(__name__)
@app.route("/api/v1.0/ner", methods=["GET","POST"])
def ner():
if request.method=="POST":
textf=request.files['text']
text=textf.read().decode('utf-8',errors='ignore')
else:
text=request.args.get("text")
text=text.encode("latin1",errors='ignore').decode("utf8",errors='ignore')
data = processor.get_deploy_examples_from_text(text,args.lang)
doc=data["doc"]
eval_examples = data["examples"]
eval_features = convert_examples_to_features(
eval_examples, label_list, args.max_seq_length, model.encode_word)
eval_data = create_dataset(eval_features)
prediction = predict_model(model, eval_data, label_list, args.predict_batch_size, device)
#print(prediction)
s=-1
t=-1
currentTok=None
lastTok=None
currentType=None
r=[]
eid=1
for token in doc:
if token.is_sent_start:
s+=1
t=-1
t+=1
if s<len(prediction) and t<len(prediction[s]):
pred=prediction[s][t]
else:
pred="O"
if pred.startswith("B-") or pred=="O" or t==0:
if currentTok!=None:
#r+="T{0}\t{1} {2} {3}\t{4}\n".format(eid,currentType,currentTok.idx,lastTok.idx+len(lastTok.text),text[currentTok.idx:lastTok.idx+len(lastTok.text)])
r.append({"id":"T%d"%(eid),"type":currentType,"start":currentTok.idx,"end":lastTok.idx+len(lastTok.text),"text":text[currentTok.idx:lastTok.idx+len(lastTok.text)]})
currentTok=None
currentType=None
eid+=1
if pred.startswith("B-"):
currentTok=token
currentType=pred[2:]
lastTok=token
return jsonify({'status':'OK','result':r})
app.run(threaded=False, debug=False, host="127.0.0.1", port=args.server_port)
if __name__ == "__main__":
main()
| 40.932773
| 188
| 0.603572
|
4a188791d3f1a31a1ec569e93b0430a5939e12b0
| 426
|
py
|
Python
|
pypro/videos/models/model_video.py
|
rodrigoddc/django-advanced-course
|
098507d8111f38f8a6b914575e50861538913f6c
|
[
"MIT"
] | 1
|
2020-06-30T01:30:31.000Z
|
2020-06-30T01:30:31.000Z
|
pypro/videos/models/model_video.py
|
rodrigoddc/django-advanced-course
|
098507d8111f38f8a6b914575e50861538913f6c
|
[
"MIT"
] | 102
|
2020-06-30T01:03:27.000Z
|
2021-09-22T19:26:44.000Z
|
pypro/videos/models/model_video.py
|
rodrigoddc/django-advanced-course
|
098507d8111f38f8a6b914575e50861538913f6c
|
[
"MIT"
] | null | null | null |
from django.db import models
from django.urls import reverse
class Video(models.Model):
title = models.CharField(max_length=32)
slug = models.SlugField(max_length=32)
vimeo_id = models.CharField(max_length=32)
created_at = models.DateField(auto_now_add=True)
def __str__(self):
return self.title
def get_absolute_url(self):
return reverse('videos:video_render', args=(self.slug, ))
| 26.625
| 65
| 0.71831
|
4a1887fb4402786ecffaf98e37ce4f734fcfeddc
| 743
|
py
|
Python
|
rozpoznawaczek/__init__.py
|
GrosQuildu/agh_nlp_diminutives_recognition
|
28ba8ec92db256dde0adfcee40e03271e76862fd
|
[
"MIT"
] | null | null | null |
rozpoznawaczek/__init__.py
|
GrosQuildu/agh_nlp_diminutives_recognition
|
28ba8ec92db256dde0adfcee40e03271e76862fd
|
[
"MIT"
] | null | null | null |
rozpoznawaczek/__init__.py
|
GrosQuildu/agh_nlp_diminutives_recognition
|
28ba8ec92db256dde0adfcee40e03271e76862fd
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Tool for recognizing diminutives in a text.
Example:
L.setLevel('INFO')
text = 'A potem gorzki los tych niewiniątek\nWiędnąć na włosach i sercach dziewczątek;'
print([text[start:end] for start, end in find_diminutives(text)])
Authors:
* Izabela Stechnij
* Dominik Sepioło
* Paweł Płatek
"""
from rozpoznawaczek.rozpoznawaczek import (Interpretation, IsDiminutiveFunc, L,
diminutive_sets, find_diminutives,
has_diminutive_suffix, main)
__all__ = ['find_diminutives', 'main', 'L', 'Interpretation', 'IsDiminutiveFunc', 'has_diminutive_suffix',
'diminutive_sets']
| 33.772727
| 106
| 0.628533
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.