hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a3ee8470edc038ce5afdd46d3446170b34e86c08
| 6,596
|
py
|
Python
|
hoomd/communicator.py
|
EdwardZX/hoomd-blue
|
c87ac3f136534e8a80359a2faceeb730f445da21
|
[
"BSD-3-Clause"
] | 204
|
2018-11-26T21:15:14.000Z
|
2022-03-31T17:17:21.000Z
|
hoomd/communicator.py
|
EdwardZX/hoomd-blue
|
c87ac3f136534e8a80359a2faceeb730f445da21
|
[
"BSD-3-Clause"
] | 769
|
2019-02-15T08:58:04.000Z
|
2022-03-31T17:36:48.000Z
|
hoomd/communicator.py
|
YMWani/hoomd-blue
|
e574b49f0c2c6df3a1eac9cbb86fe612f1ee4c18
|
[
"BSD-3-Clause"
] | 91
|
2018-10-04T21:07:46.000Z
|
2022-03-26T02:44:11.000Z
|
# Copyright (c) 2009-2021 The Regents of the University of Michigan
# This file is part of the HOOMD-blue project, released under the BSD 3-Clause
# License.
"""MPI communicator."""
from hoomd import _hoomd
import hoomd
import contextlib
class Communicator(object):
"""MPI communicator.
Args:
mpi_comm: Accepts an mpi4py communicator. Use this argument to perform
many independent hoomd simulations where you communicate between those
simulations using mpi4py.
ranks_per_partition (int): (MPI) Number of ranks to include in a
partition.
`Communicator` initialize MPI communications for a `hoomd.Simulation`. To
use MPI, launch your Python script with an MPI launcher (e.g. ``mpirun`` or
``mpiexec``). By default, `Communicator` uses all ranks provided by the
launcher ``num_launch_ranks`` for a single `hoomd.Simulation` object which
decomposes the state onto that many domains.
Set ``ranks_per_partition`` to an integer to partition launched ranks into
``num_launch_ranks / ranks_per_partition`` communicators, each with their
own `partition` index. Use this to perform many simulations in parallel, for
example by using `partition` as an index into an array of state points to
execute.
"""
def __init__(self, mpi_comm=None, ranks_per_partition=None):
# check ranks_per_partition
if ranks_per_partition is not None:
if not hoomd.version.mpi_enabled:
raise RuntimeError(
"The ranks_per_partition option is only available in MPI.\n"
)
mpi_available = hoomd.version.mpi_enabled
self.cpp_mpi_conf = None
# create the specified configuration
if mpi_comm is None:
self.cpp_mpi_conf = _hoomd.MPIConfiguration()
else:
if not mpi_available:
raise RuntimeError("mpi_comm is not supported in serial builds")
handled = False
# pass in pointer to MPI_Comm object provided by mpi4py
try:
import mpi4py
if isinstance(mpi_comm, mpi4py.MPI.Comm):
addr = mpi4py.MPI._addressof(mpi_comm)
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(addr)
handled = True
except ImportError:
# silently ignore when mpi4py is missing
pass
# undocumented case: handle plain integers as pointers to MPI_Comm
# objects
if not handled and isinstance(mpi_comm, int):
self.cpp_mpi_conf = \
_hoomd.MPIConfiguration._make_mpi_conf_mpi_comm(mpi_comm)
handled = True
if not handled:
raise RuntimeError(
"Invalid mpi_comm object: {}".format(mpi_comm))
if ranks_per_partition is not None:
# check validity
if (self.cpp_mpi_conf.getNRanksGlobal() % ranks_per_partition):
raise RuntimeError('Total number of ranks is not a multiple of '
'ranks_per_partition.')
# split the communicator into partitions
self.cpp_mpi_conf.splitPartitions(ranks_per_partition)
@property
def num_ranks(self):
"""int: The number of ranks in this partition.
When initialized with ``ranks_per_partition=None``, `num_ranks` is equal
to the ``num_launch_ranks`` set by the MPI launcher. When using
partitions, `num_ranks` is equal to ``ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNRanks()
else:
return 1
@property
def rank(self):
"""int: The current rank within the partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getRank()
else:
return 0
@property
def num_partitions(self):
"""int: The number of partitions in this execution.
Create partitions with the ``ranks_per_partition`` argument on
initialization. Then, the number of partitions is
``num_launch_ranks / ranks_per_partition``.
Note:
Returns 1 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getNPartitions()
else:
return 1
@property
def partition(self):
"""int: The current partition.
Note:
Returns 0 in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
return self.cpp_mpi_conf.getPartition()
else:
return 0
def barrier_all(self):
"""Perform a MPI barrier synchronization across all ranks.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
_hoomd.mpi_barrier_world()
def barrier(self):
"""Perform a barrier synchronization across all ranks in the partition.
Note:
Does nothing in builds with ENABLE_MPI=off.
"""
if hoomd.version.mpi_enabled:
self.cpp_mpi_conf.barrier()
@contextlib.contextmanager
def localize_abort(self):
"""Localize MPI_Abort to this partition.
HOOMD calls ``MPI_Abort`` to tear down all running MPI processes
whenever there is an uncaught exception. By default, this will abort the
entire MPI execution. When using partitions, an uncaught exception on
one partition will therefore abort all of them.
Use the return value of :py:meth:`localize_abort()` as a context manager
to tell HOOMD that all operations within the context will use only
that MPI communicator so that an uncaught exception in one partition
will only abort that partition and leave the others running.
"""
global _current_communicator
prev = _current_communicator
_current_communicator = self
yield None
_current_communicator = prev
# store the "current" communicator to be used for MPI_Abort calls. This defaults
# to the world communicator, but users can opt in to a more specific
# communicator using the Device.localize_abort context manager
_current_communicator = Communicator()
| 34.715789
| 80
| 0.632656
| 805
| 6,596
| 5.021118
| 0.278261
| 0.025977
| 0.063088
| 0.0381
| 0.241465
| 0.195448
| 0.175656
| 0.161801
| 0.149431
| 0.149431
| 0
| 0.005248
| 0.306701
| 6,596
| 189
| 81
| 34.899471
| 0.878635
| 0.475591
| 0
| 0.350649
| 0
| 0
| 0.062092
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103896
| false
| 0.012987
| 0.064935
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3f15add28c75465b292cc3d301905a5d66f9500
| 1,169
|
py
|
Python
|
nlp/handler.py
|
rgschmitz1/tcss702
|
b0fdd7b6107401dc297b467c9e63773dfb8fd487
|
[
"MIT"
] | null | null | null |
nlp/handler.py
|
rgschmitz1/tcss702
|
b0fdd7b6107401dc297b467c9e63773dfb8fd487
|
[
"MIT"
] | null | null | null |
nlp/handler.py
|
rgschmitz1/tcss702
|
b0fdd7b6107401dc297b467c9e63773dfb8fd487
|
[
"MIT"
] | null | null | null |
from minio import Minio
import json
import os
from .Inspector import Inspector
from .topic_model import topic_model
#def handle(event):
def handle(event, context):
with open("/var/openfaas/secrets/minio-access-key") as f:
access_key = f.read()
with open("/var/openfaas/secrets/minio-secret-key") as f:
secret_key = f.read()
mc = Minio(os.environ['minio_hostname'],
access_key=access_key,
secret_key=secret_key,
secure=False)
tm = topic_model(mc)
# Collect data
inspector = Inspector()
inspector.inspectAll()
# Add custom message and finish the function
# if "startWallClock" in event:
# inspector.addAttribute("startWallClock", event['startWallClock'])
body = json.loads(event.body)
print(body['fn'], flush=True)
fn = {"p": tm.preprocess,
"t": tm.train,
"q": tm.query}
fn[body['fn']]()
inspector.inspectAllDeltas()
# Include functionName
inspector.addAttribute("functionName", fn[body['fn']].__name__)
iret = inspector.finish()
ret = {
"status": 200,
"body": iret
}
return ret
| 24.87234
| 74
| 0.622754
| 139
| 1,169
| 5.136691
| 0.460432
| 0.05042
| 0.039216
| 0.053221
| 0.086835
| 0.086835
| 0
| 0
| 0
| 0
| 0
| 0.003421
| 0.249786
| 1,169
| 46
| 75
| 25.413043
| 0.810718
| 0.171942
| 0
| 0
| 0
| 0
| 0.12578
| 0.079002
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032258
| false
| 0
| 0.16129
| 0
| 0.225806
| 0.032258
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3f1d6f2fedc4710e7669c09fd2ad1b4f7d2b866
| 803
|
py
|
Python
|
src/pve_exporter/cli.py
|
jmangs/prometheus-pve-exporter
|
2947a1247d854791114eb5ed348a250739540708
|
[
"Apache-2.0"
] | null | null | null |
src/pve_exporter/cli.py
|
jmangs/prometheus-pve-exporter
|
2947a1247d854791114eb5ed348a250739540708
|
[
"Apache-2.0"
] | null | null | null |
src/pve_exporter/cli.py
|
jmangs/prometheus-pve-exporter
|
2947a1247d854791114eb5ed348a250739540708
|
[
"Apache-2.0"
] | null | null | null |
"""
Proxmox VE exporter for the Prometheus monitoring system.
"""
import sys
from argparse import ArgumentParser
from pve_exporter.http import start_http_server
def main(args=None):
"""
Main entry point.
"""
parser = ArgumentParser()
parser.add_argument('config', nargs='?', default='pve.yml',
help='Path to configuration file (pve.yml)')
parser.add_argument('port', nargs='?', type=int, default='9221',
help='Port on which the exporter is listening (9221)')
parser.add_argument('address', nargs='?', default='',
help='Address to which the exporter will bind')
params = parser.parse_args(args if args is None else sys.argv[1:])
start_http_server(params.config, params.port, params.address)
| 32.12
| 78
| 0.646326
| 100
| 803
| 5.1
| 0.52
| 0.052941
| 0.1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.014563
| 0.230386
| 803
| 24
| 79
| 33.458333
| 0.81068
| 0.0934
| 0
| 0
| 0
| 0
| 0.215603
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.230769
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3f41aa5108b3692f20a54704fcb143543339d31
| 12,692
|
py
|
Python
|
workers/repo_info_worker/repo_info_worker.py
|
vinodkahuja/augur
|
a7688af262c2f971767962d4a20110daf4b1179a
|
[
"MIT"
] | 2
|
2020-08-27T17:34:38.000Z
|
2020-11-05T20:31:35.000Z
|
workers/repo_info_worker/repo_info_worker.py
|
BenjaminChilson/augur
|
8346be0b757c907e9b67ba870a9ace32a1b87b11
|
[
"MIT"
] | 9
|
2021-04-16T23:42:53.000Z
|
2021-05-04T04:26:55.000Z
|
workers/repo_info_worker/repo_info_worker.py
|
BenjaminChilson/augur
|
8346be0b757c907e9b67ba870a9ace32a1b87b11
|
[
"MIT"
] | 7
|
2019-03-25T13:26:42.000Z
|
2020-12-30T18:43:42.000Z
|
#SPDX-License-Identifier: MIT
import logging, os, sys, time, requests, json
from datetime import datetime
from multiprocessing import Process, Queue
import pandas as pd
import sqlalchemy as s
from workers.worker_base import Worker
# NOTE: This worker primarily inserts rows into the REPO_INFO table, which serves the primary purposes of
# 1. Displaying discrete metadata like "number of forks" and how they change over time
# 2. Validating other workers, like those related to pull requests, issues, and commits. Our totals should be at or very near the totals in the repo_info table.
# This table also updates the REPO table in 2 cases:
# 1. Recognizing when a repository is a forked repository by updating the "forked_from" field and
# 2. Recognizing when a repository is archived, and recording the data we observed the change in status.
class RepoInfoWorker(Worker):
def __init__(self, config={}):
worker_type = "repo_info_worker"
# Define what this worker can be given and know how to interpret
given = [['github_url']]
models = ['repo_info']
# Define the tables needed to insert, update, or delete on
data_tables = ['repo_info', 'repo']
operations_tables = ['worker_history', 'worker_job']
# Run the general worker initialization
super().__init__(worker_type, config, given, models, data_tables, operations_tables)
# Define data collection info
self.tool_source = 'Repo Info Worker'
self.tool_version = '1.0.0'
self.data_source = 'GitHub API'
def repo_info_model(self, task, repo_id):
github_url = task['given']['github_url']
self.logger.info("Beginning filling the repo_info model for repo: " + github_url + "\n")
owner, repo = self.get_owner_repo(github_url)
url = 'https://api.github.com/graphql'
query = """
{
repository(owner:"%s", name:"%s"){
updatedAt
hasIssuesEnabled
issues(states:OPEN) {
totalCount
}
hasWikiEnabled
forkCount
defaultBranchRef {
name
}
watchers {
totalCount
}
id
licenseInfo {
name
url
}
stargazers {
totalCount
}
codeOfConduct {
name
url
}
issue_count: issues {
totalCount
}
issues_closed: issues(states:CLOSED) {
totalCount
}
pr_count: pullRequests {
totalCount
}
pr_open: pullRequests(states: OPEN) {
totalCount
}
pr_closed: pullRequests(states: CLOSED) {
totalCount
}
pr_merged: pullRequests(states: MERGED) {
totalCount
}
ref(qualifiedName: "master") {
target {
... on Commit {
history(first: 0){
totalCount
}
}
}
}
}
}
""" % (owner, repo)
# Hit the graphql endpoint and retry 3 times in case of failure
num_attempts = 0
success = False
data = None
while num_attempts < 3:
self.logger.info("Hitting endpoint: {} ...\n".format(url))
r = requests.post(url, json={'query': query}, headers=self.headers)
self.update_gh_rate_limit(r)
try:
data = r.json()
except:
data = json.loads(json.dumps(r.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(r)
continue
if 'data' in data:
success = True
data = data['data']['repository']
break
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
break
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
continue
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
continue
num_attempts += 1
if not success:
self.logger.error('Cannot hit endpoint after 3 attempts. \"Completing\" task.\n')
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Just checking that the data is accessible (would not be if repo no longer exists)
try:
data['updatedAt']
except Exception as e:
self.logger.error('Cannot access repo_info data: {}\nError: {}. \"Completing\" task.'.format(data, e))
self.register_task_completion(self.task, repo_id, 'repo_info')
return
# Get committers count info that requires seperate endpoint
committers_count = self.query_committers_count(owner, repo)
# Put all data together in format of the table
self.logger.info(f'Inserting repo info for repo with id:{repo_id}, owner:{owner}, name:{repo}\n')
rep_inf = {
'repo_id': repo_id,
'last_updated': data['updatedAt'] if 'updatedAt' in data else None,
'issues_enabled': data['hasIssuesEnabled'] if 'hasIssuesEnabled' in data else None,
'open_issues': data['issues']['totalCount'] if data['issues'] else None,
'pull_requests_enabled': None,
'wiki_enabled': data['hasWikiEnabled'] if 'hasWikiEnabled' in data else None,
'pages_enabled': None,
'fork_count': data['forkCount'] if 'forkCount' in data else None,
'default_branch': data['defaultBranchRef']['name'] if data['defaultBranchRef'] else None,
'watchers_count': data['watchers']['totalCount'] if data['watchers'] else None,
'UUID': None,
'license': data['licenseInfo']['name'] if data['licenseInfo'] else None,
'stars_count': data['stargazers']['totalCount'] if data['stargazers'] else None,
'committers_count': committers_count,
'issue_contributors_count': None,
'changelog_file': None,
'contributing_file': None,
'license_file': data['licenseInfo']['url'] if data['licenseInfo'] else None,
'code_of_conduct_file': data['codeOfConduct']['url'] if data['codeOfConduct'] else None,
'security_issue_file': None,
'security_audit_file': None,
'status': None,
'keywords': None,
'commit_count': data['ref']['target']['history']['totalCount'] if data['ref'] else None,
'issues_count': data['issue_count']['totalCount'] if data['issue_count'] else None,
'issues_closed': data['issues_closed']['totalCount'] if data['issues_closed'] else None,
'pull_request_count': data['pr_count']['totalCount'] if data['pr_count'] else None,
'pull_requests_open': data['pr_open']['totalCount'] if data['pr_open'] else None,
'pull_requests_closed': data['pr_closed']['totalCount'] if data['pr_closed'] else None,
'pull_requests_merged': data['pr_merged']['totalCount'] if data['pr_merged'] else None,
'tool_source': self.tool_source,
'tool_version': self.tool_version,
'data_source': self.data_source
}
result = self.db.execute(self.repo_info_table.insert().values(rep_inf))
self.logger.info(f"Primary Key inserted into repo_info table: {result.inserted_primary_key}\n")
self.results_counter += 1
# Note that the addition of information about where a repository may be forked from, and whether a repository is archived, updates the `repo` table, not the `repo_info` table.
forked = self.is_forked(owner, repo)
archived = self.is_archived(owner, repo)
archived_date_collected = None
if archived is not False:
archived_date_collected = archived
archived = 1
else:
archived = 0
rep_additional_data = {
'forked_from': forked,
'repo_archived': archived,
'repo_archived_date_collected': archived_date_collected
}
result = self.db.execute(self.repo_table.update().where(
self.repo_table.c.repo_id==repo_id).values(rep_additional_data))
self.logger.info(f"Inserted info for {owner}/{repo}\n")
# Register this task as completed
self.register_task_completion(self.task, repo_id, "repo_info")
def query_committers_count(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}/contributors?per_page=100'
committers = 0
try:
while True:
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
committers += len(r.json())
if 'next' not in r.links:
break
else:
url = r.links['next']['url']
except Exception:
self.logger.exception('An error occured while querying contributor count\n')
return committers
def is_forked(self, owner, repo): #/repos/:owner/:repo parent
self.logger.info('Querying parent info to verify if the repo is forked\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'fork' in data:
if 'parent' in data:
return data['parent']['full_name']
return 'Parent not available'
return False
def is_archived(self, owner, repo):
self.logger.info('Querying committers count\n')
url = f'https://api.github.com/repos/{owner}/{repo}'
r = requests.get(url, headers=self.headers)
self.update_gh_rate_limit(r)
data = self.get_repo_data(url, r)
if 'archived' in data:
if data['archived']:
if 'updated_at' in data:
return data['updated_at']
return 'Date not available'
return False
return False
def get_repo_data(self, url, response):
success = False
try:
data = response.json()
except:
data = json.loads(json.dumps(response.text))
if 'errors' in data:
self.logger.info("Error!: {}".format(data['errors']))
if data['errors'][0]['message'] == 'API rate limit exceeded':
self.update_gh_rate_limit(response)
if 'id' in data:
success = True
else:
self.logger.info("Request returned a non-data dict: {}\n".format(data))
if data['message'] == 'Not Found':
self.logger.info("Github repo was not found or does not exist for endpoint: {}\n".format(url))
if data['message'] == 'You have triggered an abuse detection mechanism. Please wait a few minutes before you try again.':
self.update_gh_rate_limit(r, temporarily_disable=True)
if data['message'] == 'Bad credentials':
self.update_gh_rate_limit(r, bad_credentials=True)
if not success:
self.register_task_failure(self.task, repo_id, "Failed to hit endpoint: {}".format(url))
return data
| 41.75
| 183
| 0.554365
| 1,405
| 12,692
| 4.864769
| 0.217082
| 0.021068
| 0.028676
| 0.023409
| 0.259254
| 0.243745
| 0.235699
| 0.226335
| 0.226335
| 0.220483
| 0
| 0.002766
| 0.344863
| 12,692
| 303
| 184
| 41.887789
| 0.819242
| 0.102112
| 0
| 0.322314
| 0
| 0
| 0.395764
| 0.01292
| 0
| 0
| 0
| 0
| 0
| 1
| 0.024793
| false
| 0
| 0.024793
| 0
| 0.099174
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3f6015f3b9c537d076933e65004a8315446ca82
| 11,482
|
py
|
Python
|
src/main/python/main.py
|
SarthakJariwala/Shockley-Queisser-Calculator
|
5f9cfd4c97b8141e8b4ee8d15fa5f3cccfe25b7e
|
[
"MIT"
] | 1
|
2020-04-08T06:33:47.000Z
|
2020-04-08T06:33:47.000Z
|
src/main/python/main.py
|
SarthakJariwala/Schokley-Queisser-Calculator
|
5f9cfd4c97b8141e8b4ee8d15fa5f3cccfe25b7e
|
[
"MIT"
] | null | null | null |
src/main/python/main.py
|
SarthakJariwala/Schokley-Queisser-Calculator
|
5f9cfd4c97b8141e8b4ee8d15fa5f3cccfe25b7e
|
[
"MIT"
] | 2
|
2020-05-31T02:57:55.000Z
|
2020-07-30T13:24:22.000Z
|
from fbs_runtime.application_context.PyQt5 import ApplicationContext, cached_property
from fbs_runtime.platform import is_windows, is_mac
# system imports
import sys
# module imports
from PyQt5 import uic, QtWidgets
from PyQt5.QtWidgets import QMessageBox
import numpy as np
import matplotlib.pyplot as plt
import matplotlib
import scipy.constants as constants
from scipy.integrate import simps, quad
from scipy.interpolate import splrep, splint
from scipy.optimize import fmin
class AppContext(ApplicationContext):
def run(self):
self.main_window.show()
return self.app.exec_()
def get_design(self):
qtCreatorFile = self.get_resource("SQ_GUI.ui")
return qtCreatorFile
def get_file(self):
astmg_file = self.get_resource("ASTMG173.csv")
return astmg_file
@cached_property
def main_window(self):
return MainWindow(self.get_design(), self.get_file())
if is_windows():
matplotlib.use('Qt5Agg')
elif is_mac():
matplotlib.use('macosx')
class MainWindow(QtWidgets.QMainWindow):
def __init__(self, uiFile, astmg173_file):
super(MainWindow, self).__init__()
#Create Main Window
self.ui = uic.loadUi(uiFile, self)
#self.ui = WindowTemplate()
#self.ui.setupUi(self)
#Connect PushButtons to Functions etc
self.ui.CalcualteSQ_pushButton.clicked.connect(self.calculate_SQ)
self.ui.load_pushButton.clicked.connect(self.load_SMARTS_spectrum)
self.ui.save_pushButton.clicked.connect(self.save_bandgap_array)
#start app with checked "plot j-v curve"
self.ui.plot_checkBox.setChecked(True)
self.astmg173_file = astmg173_file
self.out_array = None
self.show()
def load_SMARTS_spectrum(self):
filename = QtWidgets.QFileDialog.getOpenFileName(self)
try:
self.SMARTS = np.genfromtxt(filename[0], skip_header=1)
self.ui.load_checkBox.setChecked(False)
except Exception as e:
QMessageBox.information(
self, None,
str(e), QMessageBox.Ok
)
def calculate_SQ(self):
h = constants.physical_constants['Planck constant'][0] # units of J*s
h_ev = constants.physical_constants['Planck constant in eV s'][0]
c_nm = (constants.physical_constants['speed of light in vacuum'][0]) * 1e9
c = (constants.physical_constants['speed of light in vacuum'][0])
e_charge = constants.physical_constants['elementary charge'][0]
kb_ev = constants.physical_constants['Boltzmann constant in eV/K'][0]
"""User settings"""
Tcell = self.ui.temp_spinBox.value() #temperature of solar cell in degrees K
bandgap = self.ui.bandgap_doubleSpinBox.value() #enter bandgap in eV
#self.ui.textBrowser.append(str('Tcell = %.3f' %(Tcell)))
plot_jv = self.ui.plot_checkBox.isChecked() #'True' if you want to plot the SQ JV curve for "bandgap"
plot_bandgap_array = self.ui.calc_SQ_array_checkBox.isChecked() #'True' if you want to plot SQ parameters for an array of bandgaps
# starting from "mbandgap_array_min" to "bandgap_array_max"
# with number of points "num_points_bandgap_array"
# (see below)
#'False' if you just want SQ data for one bandgap (faster)
bandgap_array_min = self.ui.bandgap_min_doubleSpinBox.value() #in eV
bandgap_array_max = self.ui.bandgap_max_doubleSpinBox.value() # in eV
num_points_bandgap_array = self.ui.no_points_spinBox.value()
"""Programming below"""
bandgap_array = np.linspace(bandgap_array_min, bandgap_array_max, num_points_bandgap_array)
#First convert AM1.5 spectrum from W/m^2/nm to W/m^2/ev
if self.ui.load_checkBox.isChecked():
astmg173 = np.loadtxt(self.astmg173_file, delimiter = ',', skiprows = 2)
am15_wav = np.copy(astmg173[:,0]) #AM1.5 wavelength axis in nm
am15 = np.copy(astmg173[:,2]) #AM1.5 in units of W/m^2/nm = J/s*m^2/nm
else:
try:
astmg173 = self.SMARTS
am15_wav = np.copy(astmg173[:,0]) #AM1.5 wavelength axis in nm
am15 = np.copy(astmg173[:,1]) #AM1.5 in units of W/m^2/nm = J/s*m^2/nm
except:
QMessageBox.information(
self, None,
"No valid spectrum file found!\n\n"+
"Load a valid file or check the 'Use ASTMG173'box"
)
return
total_power_nm = simps(am15, x = am15_wav) #Integrate over nm to check that total power density = 1000 W/m^2
am15_ev = h_ev * (c_nm) / (am15_wav )
am15_wats_ev = am15 * (h_ev * c_nm/ ((am15_ev) ** 2.0))
am15_ev_flip = am15_ev[::-1]
am15_wats_ev_flip = am15_wats_ev[::-1]
total_power_ev = simps(am15_wats_ev_flip, x = am15_ev_flip) #Integrate over eV to check that total power density = 1000 W/m^2
am15_photons_ev = am15_wats_ev_flip / (am15_ev_flip * e_charge)
am15_photons_nm = am15 / (am15_ev * e_charge)
total_photonflux_ev = simps(am15_photons_ev, x = am15_ev_flip)
total_photonflux_nm = simps(am15_photons_nm , x = am15_wav)
total_photonflux_ev_splrep = splrep(am15_ev_flip, am15_photons_ev)
emin = am15_ev_flip[0]
emax = am15_ev_flip[len(am15_ev_flip) - 1]
def solar_photons_above_gap(Egap): #units of photons / sec *m^2
return splint(Egap, emax,total_photonflux_ev_splrep)
def RR0(Egap):
integrand = lambda eV : eV ** 2.0 / (np.exp(eV / (kb_ev * Tcell)) - 1)
integral = quad(integrand, Egap, emax, full_output=1)[0]
return ((2.0 * np.pi / ((c ** 2.0) * (h_ev ** 3.0)))) * integral
def current_density(V, Egap): #to get from units of amps / m^2 to mA/ cm^2 ---multiply by 1000 to convert to mA ---- multiply by (0.01 ^2) to convert to cm^2
cur_dens = e_charge * (solar_photons_above_gap(Egap) - RR0(Egap) * np.exp( V / (kb_ev * Tcell)))
return cur_dens * 1000 * (0.01 ** 2.0)
def JSC(Egap):
return current_density(0, Egap)
def VOC(Egap):
return (kb_ev * Tcell) * np.log(solar_photons_above_gap(Egap) / RR0(Egap))
def fmax(func_to_maximize, initial_guess=0):
"""return the x that maximizes func_to_maximize(x)"""
func_to_minimize = lambda x : -func_to_maximize(x)
return fmin(func_to_minimize, initial_guess, disp=False)[0]
def V_mpp_Jmpp_maxpower_maxeff_ff(Egap):
vmpp = fmax(lambda V : V * current_density(V, Egap))
jmpp = current_density(vmpp, Egap)
maxpower = vmpp * jmpp
max_eff = maxpower / (total_power_ev * 1000 * (0.01 ** 2.0))
jsc_return = JSC(Egap)
voc_return = VOC(Egap)
ff = maxpower / (jsc_return * voc_return)
return [vmpp, jmpp, maxpower, max_eff, ff, jsc_return, voc_return]
maxpcemeta = V_mpp_Jmpp_maxpower_maxeff_ff(bandgap)
self.ui.textBrowser.append(str('For Bandgap = %.3f eV, TCell = %.3f K:\nJSC = %.3f mA/cm^2\nVOC = %.3f V\nFF = %.3f\nPCE = %.3f' % (bandgap, Tcell, maxpcemeta[5], maxpcemeta[6],maxpcemeta[4], maxpcemeta[3] * 100)))
if plot_bandgap_array == True:
pce_array = np.empty_like(bandgap_array)
ff_array = np.empty_like(bandgap_array)
voc_array = np.empty_like(bandgap_array)
jsc_array = np.empty_like(bandgap_array)
for i in range(len(bandgap_array)):
metadata = V_mpp_Jmpp_maxpower_maxeff_ff(bandgap_array[i])
pce_array[i] = metadata[3]
ff_array[i] = metadata[4]
voc_array[i] = metadata[6]
jsc_array[i] = metadata[5]
self.out_array = np.array((bandgap_array,pce_array,ff_array, voc_array,jsc_array)).T
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('PCE (%)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, pce_array * 100)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.ylim(0, 1)
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Fill Factor')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, ff_array)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Jsc (mA/cm$^2$)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, jsc_array)
plt.tight_layout()
plt.show()
plt.figure(figsize=(5,4))
plt.title('Cell Temperature = %.2f K' %(Tcell))
plt.xlim(bandgap_array[0], bandgap_array[len(bandgap_array) - 1])
plt.ylabel('Voc (V)')
plt.xlabel('Bandgap (eV)')
plt.plot(bandgap_array, voc_array, label = 'S-Q Voc')
plt.plot(bandgap_array, bandgap_array, '--', label = 'Bandgap')
plt.legend(loc = 'best')
plt.tight_layout()
plt.show()
self.ui.textBrowser.append('--')
else:
self.ui.textBrowser.append('--')
def JV_curve(Egap):
volt_array = np.linspace(0, VOC(Egap), 200)
j_array = np.empty_like(volt_array)
for i in range(len(volt_array)):
j_array[i] = current_density(volt_array[i], Egap)
return [volt_array, j_array]
if plot_jv == True:
jv_meta = JV_curve(bandgap)
v_array = jv_meta[0]
jv_array = jv_meta[1]
plt.figure(figsize=(5,4))
plt.ylabel('Current Density (mA/cm$^2$)')
plt.xlabel('Voltage (V)')
plt.plot(v_array, -jv_array)
plt.title('J-V Curve for '+str(self.ui.bandgap_doubleSpinBox.value())+'eV')
plt.tight_layout()
plt.show()
self.ui.textBrowser.append('--')
else:
self.ui.textBrowser.append('--')
def save_bandgap_array(self):
if self.out_array is None:
self.ui.textBrowser.append("Calculate SQ limit before saving file!")
else:
filename = QtWidgets.QFileDialog.getSaveFileName(self)
np.savetxt(filename[0]+".txt", self.out_array, delimiter='\t', header="Bandgap, PCE, FillFactor, Voc, Jsc")
#def run():
# win = MainWindow()
# QtGui.QApplication.instance().exec_()
# return win
#run()
if __name__ == '__main__':
appctxt = AppContext() # 1. Instantiate ApplicationContext
exit_code = appctxt.run()
sys.exit(exit_code) # 2. Invoke appctxt.app.exec_()
| 38.659933
| 222
| 0.592406
| 1,497
| 11,482
| 4.335337
| 0.205077
| 0.070262
| 0.012327
| 0.024807
| 0.283051
| 0.235131
| 0.205855
| 0.186749
| 0.152851
| 0.138367
| 0
| 0.031431
| 0.293416
| 11,482
| 297
| 223
| 38.659933
| 0.76852
| 0.112088
| 0
| 0.21393
| 0
| 0.004975
| 0.068717
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079602
| false
| 0
| 0.059701
| 0.0199
| 0.21393
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3f604410a10116c403796e4d6e056235535c4f9
| 9,832
|
py
|
Python
|
helpus/core.py
|
tov101/HelpUs
|
6b53d9651cf45c191774be2f70b70b130251d2a6
|
[
"MIT"
] | null | null | null |
helpus/core.py
|
tov101/HelpUs
|
6b53d9651cf45c191774be2f70b70b130251d2a6
|
[
"MIT"
] | null | null | null |
helpus/core.py
|
tov101/HelpUs
|
6b53d9651cf45c191774be2f70b70b130251d2a6
|
[
"MIT"
] | null | null | null |
import io
import logging
import os
import sys
from PyQt5 import QtGui, QtCore, QtWidgets
from helpus import icon_file_path
from helpus import __version__
LOGGER = logging.getLogger('HelpUs')
LOGGER.setLevel(logging.DEBUG)
class XStream(QtCore.QObject):
_stdout = None
_stderr = None
messageWritten = QtCore.pyqtSignal(str)
@staticmethod
def flush():
pass
@staticmethod
def fileno():
return -1
def write(self, msg):
if not self.signalsBlocked():
self.messageWritten.emit(msg)
@staticmethod
def stdout():
if not XStream._stdout:
XStream._stdout = XStream()
sys.stdout = XStream._stdout
return XStream._stdout
@staticmethod
def stderr():
if not XStream._stderr:
XStream._stderr = XStream()
sys.stderr = XStream._stderr
return XStream._stderr
class MyBreakPoint(QtWidgets.QDialog):
_stdout = None
_stderr = None
messageWritten = QtCore.pyqtSignal(str)
HOOK_HEADER = '(Pdb) '
HOOK_INTERACT = '>>> '
HOOK_LINE_BREAK = '... '
HOOKS = [HOOK_HEADER, HOOK_INTERACT]
BUTTONS = [
'Continue',
'Next',
'Step',
'Where',
'Up',
'Down'
]
def __init__(self, parent=None):
super().__init__()
if not parent:
self.parentWidget = QtWidgets.QMainWindow()
else:
self.parentWidget = parent
# Change Window Modality, otherwise parentWidget won't let you use this widget
if self.parentWidget.windowModality() == QtCore.Qt.WindowModality.ApplicationModal:
self.parentWidget.hide()
self.parentWidget.setWindowModality(QtCore.Qt.WindowModality.NonModal)
self.parentWidget.showNormal()
# Set Icon
if icon_file_path and os.path.exists(icon_file_path):
self.setWindowIcon(QtGui.QIcon(icon_file_path))
# Set Flags
self.setWindowFlags(
QtCore.Qt.WindowSystemMenuHint |
QtCore.Qt.WindowTitleHint |
QtCore.Qt.WindowCloseButtonHint
)
# Resize
self.resize(513, 300)
# Create Layout
self.main_layout = QtWidgets.QHBoxLayout()
self.setLayout(self.main_layout)
self.setWindowTitle("HelpUs {}".format(__version__))
# Create Content Layouts
self.ConsoleLayout = QtWidgets.QVBoxLayout()
self.ButtonsLayout = QtWidgets.QVBoxLayout()
self.main_layout.addLayout(self.ButtonsLayout)
self.main_layout.addLayout(self.ConsoleLayout)
# Create OutputConsole
self.console = QtWidgets.QTextEdit(parent)
self.console.insertPlainText = self.__insert_plain_text
self.console.keyPressEvent = self.__key_press_event
self.ConsoleLayout.addWidget(self.console)
# Create buttons
for button_text in self.BUTTONS:
# Create Button Name
button_name = 'button_%s' % button_text.lower()
setattr(self, button_name, QtWidgets.QPushButton(button_text))
getattr(self, button_name).clicked.connect(self.__push_button)
# Add Button to Widget
self.ButtonsLayout.addWidget(getattr(self, button_name))
# Init Buffer
self.buffer = io.StringIO()
self.__set_enable_gui(False)
self.showNormal()
def __set_enable_gui(self, state=True):
"""
:param state:
:return:
"""
self.console.setEnabled(state)
for button_text in self.BUTTONS:
# Get Button Name
button_name = 'button_%s' % button_text.lower()
getattr(self, button_name).setEnabled(state)
if state:
self.console.setFocus()
def redirect_outerr_stream(self):
"""
:return:
"""
# Link Stream Output
XStream.stdout().messageWritten.connect(self.console.insertPlainText)
XStream.stderr().messageWritten.connect(self.console.insertPlainText)
def readline(self):
"""
:return:
"""
if not self.console.isEnabled():
self.__set_enable_gui(True)
# Reset Buffer
self.__reset_buffer()
# Check Position
while self.buffer.tell() == 0:
QtCore.QCoreApplication.processEvents()
value = self.buffer.getvalue()
return value
def __key_press_event(self, event):
"""
:param event:
:return:
"""
# Get Last Line
document = self.console.document()
line_index = document.lineCount()
raw_last_line = document.findBlockByLineNumber(line_index - 1).text()
text = ''
current_hook = ''
# Exclude first 6 chars: (Pdb)\s
if raw_last_line:
for hook in self.HOOKS:
if raw_last_line.startswith(hook):
current_hook = hook
text = raw_last_line[len(hook):]
break
else:
text = raw_last_line
# Get Cursor position
line_from_zero = line_index - 1
current_cursor_line = self.console.textCursor().blockNumber()
current_cursor_column = self.console.textCursor().columnNumber()
# If Enter was pressed -> Process Expression
if event.key() == QtCore.Qt.Key.Key_Return and text:
# Consider Custom Clear Screen Command
if text == 'cls':
self.__clear_screen(raw_last_line)
return
# Replace Line Break with Enter
if self.HOOK_LINE_BREAK == text:
text = '\r\n'
elif self.HOOK_LINE_BREAK in text:
# Replace Line Break with tab
text = text.replace(self.HOOK_LINE_BREAK, '\t')
current_hook = self.HOOK_LINE_BREAK
self.__reset_buffer()
self.buffer.write(text)
self.__set_enable_gui(False)
# If User want to delete something and there is no value in buffer -> Reject
if event.key() == QtCore.Qt.Key.Key_Backspace or event.key() == QtCore.Qt.Key.Key_Delete:
if current_cursor_line != line_from_zero or current_cursor_column <= len(current_hook):
return
if event.key() == QtCore.Qt.Key.Key_Home and current_cursor_line == line_from_zero:
if text:
temp_cursor = self.console.textCursor()
temp_cursor.movePosition(
QtGui.QTextCursor.MoveOperation.StartOfLine,
QtGui.QTextCursor.MoveMode.MoveAnchor
)
temp_cursor.movePosition(
QtGui.QTextCursor.MoveOperation.Right,
QtGui.QTextCursor.MoveMode.MoveAnchor,
len(current_hook)
)
self.console.setTextCursor(temp_cursor)
return
# Set Console Text to Black
self.console.setTextColor(QtCore.Qt.GlobalColor.black)
# Execute default method
QtWidgets.QTextEdit.keyPressEvent(self.console, event)
def __push_button(self):
# Read text from Button and use it as pdb keyword
button_scope = self.sender().text().lower()
self.__reset_buffer()
self.buffer.write(button_scope)
self.__set_enable_gui(False)
def __reset_buffer(self):
if isinstance(self.buffer, io.StringIO):
# Clear Buffer
self.buffer.truncate(0)
self.buffer.seek(0)
else:
self.buffer = io.StringIO()
def __insert_plain_text(self, message):
# Do some stylistics
if message.startswith(self.HOOK_HEADER):
self.console.setTextColor(QtCore.Qt.GlobalColor.magenta)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
return
elif message.startswith(self.HOOK_INTERACT):
self.console.setTextColor(QtCore.Qt.GlobalColor.darkMagenta)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
return
if message.startswith('***'):
self.console.setTextColor(QtCore.Qt.GlobalColor.red)
QtWidgets.QTextEdit.insertPlainText(self.console, message)
# AutoScroll
self.console.verticalScrollBar().setValue(self.console.verticalScrollBar().maximum())
def __clear_screen(self, text):
current_hook = text
for hook in self.HOOKS:
if hook in current_hook:
current_hook = hook
break
self.console.clear()
self.console.insertPlainText(current_hook)
def get_qtconsole_object():
if isinstance(sys.stdin, MyBreakPoint):
return sys.stdin.console
else:
return MyBreakPoint.console
def setup_breakpoint_hook(parent, method, redirect_streams=False):
def __method(*args, **kwargs):
breakpoint()
return method(*args, **kwargs)
if not isinstance(sys.stdin, MyBreakPoint):
sys.stdin = MyBreakPoint(parent)
else:
# Restore Streams
sys.stdin = sys.__stdin__
sys.stdout = sys.__stdout__
sys.stderr = sys.__stderr__
raise Exception(
"Multiple Instances are not allowed. Can be possible, but I'm to lazy to go deep with development."
)
if redirect_streams:
sys.stdin.redirect_outerr_stream()
return __method
if __name__ == '__main__':
p = QtWidgets.QApplication(sys.argv)
LOGGER.error('Ceva')
LOGGER.error = setup_breakpoint_hook(None, LOGGER.error, redirect_streams=True)
# LOGGER.error = setup_breakpoint_hook(None, LOGGER.error, redirect_streams=True)
x = 90
LOGGER.error('Altceva')
print(x)
| 31.015773
| 111
| 0.610151
| 1,029
| 9,832
| 5.617104
| 0.270165
| 0.049481
| 0.011419
| 0.011073
| 0.219377
| 0.182872
| 0.087197
| 0.055017
| 0.036678
| 0.022145
| 0
| 0.002325
| 0.299939
| 9,832
| 316
| 112
| 31.113924
| 0.837426
| 0.088385
| 0
| 0.201878
| 0
| 0.004695
| 0.022902
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079812
| false
| 0.004695
| 0.032864
| 0.004695
| 0.234742
| 0.004695
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3f6dd9ede6bbc22ab1ea49e8e955647bc30a83d
| 2,206
|
py
|
Python
|
biothings/hub/dataindex/indexer_schedule.py
|
newgene/biothings.api
|
e3278695ac15a55fe420aa49c464946f81ec019d
|
[
"Apache-2.0"
] | 30
|
2017-07-23T14:50:29.000Z
|
2022-02-08T08:08:16.000Z
|
biothings/hub/dataindex/indexer_schedule.py
|
kevinxin90/biothings.api
|
8ff3bbaecd72d04db4933ff944898ee7b7c0e04a
|
[
"Apache-2.0"
] | 163
|
2017-10-24T18:45:40.000Z
|
2022-03-28T03:46:26.000Z
|
biothings/hub/dataindex/indexer_schedule.py
|
newgene/biothings.api
|
e3278695ac15a55fe420aa49c464946f81ec019d
|
[
"Apache-2.0"
] | 22
|
2017-06-12T18:30:15.000Z
|
2022-03-01T18:10:47.000Z
|
import math
class Schedule():
def __init__(self, total, batch_size):
self._batch_size = batch_size
self._state = ""
self.total = total
self.scheduled = 0
self.finished = 0
@property
def _batch(self):
return math.ceil(self.scheduled / self._batch_size)
@property
def _batches(self):
return math.ceil(self.total / self._batch_size)
@property
def _percentage(self):
_percentage = self.scheduled / self.total * 100
return "%.1f%%" % _percentage
def suffix(self, string):
return " ".join((
string,
"#%d/%d %s" %
(
self._batch,
self._batches,
self._percentage
)
))
def completed(self):
if self.finished != self.total:
raise ValueError(self.finished, self.total)
def __iter__(self):
return self
def __next__(self):
if self.scheduled >= self.total:
self._state = "pending, waiting for completion,"
raise StopIteration()
self.scheduled += self._batch_size
if self.scheduled > self.total:
self.scheduled = self.total
self._state = self.suffix("running, on batch") + ","
return self._batch
def __str__(self):
return " ".join(f"""
<Schedule {"done" if self.finished >= self.total else self._state}
total={self.total} scheduled={self.scheduled} finished={self.finished}>
""".split())
def test_01():
schedule = Schedule(100, 10)
for batch in schedule:
print(batch)
print(schedule)
def test_02():
schedule = Schedule(25, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_03():
schedule = Schedule(0, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
def test_04():
schedule = Schedule(1, 10)
for batch in schedule:
print(batch)
print(schedule)
print(schedule.suffix("Task"))
if __name__ == "__main__":
test_02()
| 24.241758
| 87
| 0.56165
| 241
| 2,206
| 4.925311
| 0.236515
| 0.083404
| 0.085931
| 0.074136
| 0.434709
| 0.288964
| 0.214827
| 0.214827
| 0.214827
| 0.178602
| 0
| 0.02075
| 0.322756
| 2,206
| 90
| 88
| 24.511111
| 0.773762
| 0
| 0
| 0.25
| 0
| 0
| 0.121034
| 0.023119
| 0
| 0
| 0
| 0
| 0
| 1
| 0.180556
| false
| 0
| 0.013889
| 0.069444
| 0.305556
| 0.152778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3f93f71de692d828156d343cfeb58d0babb5f0e
| 1,248
|
py
|
Python
|
pretraining/model_ensemble.py
|
VITA-Group/Adv-SS-Pretraining
|
4ffbebea582f858ec6165f082f52ded1fc9b817d
|
[
"MIT"
] | 32
|
2020-08-31T01:28:29.000Z
|
2022-03-19T05:40:05.000Z
|
pretraining/model_ensemble.py
|
VITA-Group/Adv-SS-Pretraining
|
4ffbebea582f858ec6165f082f52ded1fc9b817d
|
[
"MIT"
] | null | null | null |
pretraining/model_ensemble.py
|
VITA-Group/Adv-SS-Pretraining
|
4ffbebea582f858ec6165f082f52ded1fc9b817d
|
[
"MIT"
] | 7
|
2020-09-19T14:03:47.000Z
|
2020-12-10T00:42:08.000Z
|
'''
model ensemble for cifar10 // input size(32,32)
'''
import torch
import torchvision
import copy
import torch.nn as nn
from resnetv2 import ResNet50 as resnet50v2
def split_resnet50(model):
return nn.Sequential(
model.conv1,
model.layer1,
model.layer2,
model.layer3
)
class PretrainEnsembleModel(nn.Module):
def __init__(self):
super(PretrainEnsembleModel, self).__init__()
self.blocks = split_resnet50(resnet50v2())
self.layer4_rotation = resnet50v2().layer4
self.layer4_jigsaw = resnet50v2().layer4
self.fc_rotation = nn.Linear(2048, 4)
self.fc_jigsaw = nn.Linear(2048, 31)
self.avgpool1 = nn.AdaptiveAvgPool2d((1,1))
self.avgpool2 = nn.AdaptiveAvgPool2d((1,1))
self.avgpool3 = nn.AdaptiveAvgPool2d((1,1))
def _Normal(self,x):
mean=torch.Tensor([0.485, 0.456, 0.406])
mean=mean[None,:,None,None].cuda()
std = torch.Tensor([0.229, 0.224, 0.225])
std = std[None,:,None,None].cuda()
return x.sub(mean).div(std)
def forward(self, x):
feature_map = self.blocks(self._Normal(x))
return feature_map
| 23.54717
| 54
| 0.604167
| 152
| 1,248
| 4.842105
| 0.421053
| 0.043478
| 0.081522
| 0.085598
| 0.067935
| 0
| 0
| 0
| 0
| 0
| 0
| 0.088203
| 0.273237
| 1,248
| 52
| 55
| 24
| 0.723264
| 0.038462
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.15625
| 0.03125
| 0.40625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3fa8d70909c40648b30be357bd41df712b21d5f
| 1,519
|
py
|
Python
|
scripts/ccdf.py
|
glciampaglia/HoaxyBots
|
db8d2b7d9927d5d4d94ded125f9785590dace906
|
[
"MIT"
] | null | null | null |
scripts/ccdf.py
|
glciampaglia/HoaxyBots
|
db8d2b7d9927d5d4d94ded125f9785590dace906
|
[
"MIT"
] | null | null | null |
scripts/ccdf.py
|
glciampaglia/HoaxyBots
|
db8d2b7d9927d5d4d94ded125f9785590dace906
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
""" Function that implement Complement the Complementary Cumulative
Distribution Function (CCDF).
"""
#
# written by Chengcheng Shao <sccotte@gmail.com>
import numpy as np
import pandas as pd
def ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a new series `s`, index of s will be X axis (number), value of s
will be Y axis (probability)
"""
s = s.copy()
s = s.sort_values(ascending=True, inplace=False)
s.reset_index(drop=True, inplace=True)
n = len(s)
s.drop_duplicates(keep='first', inplace=True)
X = s.values
Y = [n - i for i in s.index]
return pd.Series(data=Y, index=X) / n
def sum_cdf(s):
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s0 = s.iloc[:i + 1]
cumulative.append(np.inner(s0.index, s0.values))
s = pd.Series(cumulative, index=s.index)
return s / s.max()
def sum_ccdf(s):
"""
Parameters:
`s`, series, the values of s should be variable to be handled
Return:
a news series `s`, index of s will be X axis (number), values
will be Y axis (sum(X>=x))
"""
s = s.copy()
s = s.value_counts()
s = s.sort_index(ascending=True)
cumulative = []
for i in range(len(s)):
s1 = s.iloc[i:]
cumulative.append(np.inner(s1.index, s1.values))
return pd.Series(cumulative, index=s.index)
| 25.745763
| 72
| 0.601712
| 237
| 1,519
| 3.818565
| 0.320675
| 0.024309
| 0.023204
| 0.029834
| 0.444199
| 0.435359
| 0.371271
| 0.371271
| 0.371271
| 0.371271
| 0
| 0.007136
| 0.262014
| 1,519
| 58
| 73
| 26.189655
| 0.800178
| 0.351547
| 0
| 0.366667
| 0
| 0
| 0.005476
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.066667
| 0
| 0.266667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3fac0d6ce92c1972de436f2ce748bbe19017407
| 5,335
|
py
|
Python
|
lifelines/fitters/kaplan_meier_fitter.py
|
eliracho37/lifelines
|
b1c6c2732d1ccfc2ae08f7178371d0f95ae3027b
|
[
"MIT"
] | null | null | null |
lifelines/fitters/kaplan_meier_fitter.py
|
eliracho37/lifelines
|
b1c6c2732d1ccfc2ae08f7178371d0f95ae3027b
|
[
"MIT"
] | null | null | null |
lifelines/fitters/kaplan_meier_fitter.py
|
eliracho37/lifelines
|
b1c6c2732d1ccfc2ae08f7178371d0f95ae3027b
|
[
"MIT"
] | 1
|
2020-05-06T14:46:25.000Z
|
2020-05-06T14:46:25.000Z
|
# -*- coding: utf-8 -*-
from __future__ import print_function
import numpy as np
import pandas as pd
from lifelines.fitters import UnivariateFitter
from lifelines.utils import _preprocess_inputs, _additive_estimate, StatError, inv_normal_cdf,\
median_survival_times
from lifelines.plotting import plot_loglogs
class KaplanMeierFitter(UnivariateFitter):
"""
Class for fitting the Kaplan-Meier estimate for the survival function.
KaplanMeierFitter( alpha=0.95)
alpha: The alpha value associated with the confidence intervals.
"""
def fit(self, durations, event_observed=None, timeline=None, entry=None, label='KM_estimate',
alpha=None, left_censorship=False, ci_labels=None):
"""
Parameters:
duration: an array, or pd.Series, of length n -- duration subject was observed for
timeline: return the best estimate at the values in timelines (postively increasing)
event_observed: an array, or pd.Series, of length n -- True if the the death was observed, False if the event
was lost (right-censored). Defaults all True if event_observed==None
entry: an array, or pd.Series, of length n -- relative time when a subject entered the study. This is
useful for left-truncated (not left-censored) observations. If None, all members of the population
were born at time 0.
label: a string to name the column of the estimate.
alpha: the alpha value in the confidence intervals. Overrides the initializing
alpha for this call to fit only.
left_censorship: True if durations and event_observed refer to left censorship events. Default False
ci_labels: add custom column names to the generated confidence intervals
as a length-2 list: [<lower-bound name>, <upper-bound name>]. Default: <label>_lower_<alpha>
Returns:
self, with new properties like 'survival_function_'.
"""
# if the user is interested in left-censorship, we return the cumulative_density_, no survival_function_,
estimate_name = 'survival_function_' if not left_censorship else 'cumulative_density_'
v = _preprocess_inputs(durations, event_observed, timeline, entry)
self.durations, self.event_observed, self.timeline, self.entry, self.event_table = v
self._label = label
alpha = alpha if alpha else self.alpha
log_survival_function, cumulative_sq_ = _additive_estimate(self.event_table, self.timeline,
self._additive_f, self._additive_var,
left_censorship)
if entry is not None:
# a serious problem with KM is that when the sample size is small and there are too few early
# truncation times, it may happen that is the number of patients at risk and the number of deaths is the same.
# we adjust for this using the Breslow-Fleming-Harrington estimator
n = self.event_table.shape[0]
net_population = (self.event_table['entrance'] - self.event_table['removed']).cumsum()
if net_population.iloc[:int(n / 2)].min() == 0:
ix = net_population.iloc[:int(n / 2)].argmin()
raise StatError("""There are too few early truncation times and too many events. S(t)==0 for all t>%.1f. Recommend BreslowFlemingHarringtonFitter.""" % ix)
# estimation
setattr(self, estimate_name, pd.DataFrame(np.exp(log_survival_function), columns=[self._label]))
self.__estimate = getattr(self, estimate_name)
self.confidence_interval_ = self._bounds(cumulative_sq_[:, None], alpha, ci_labels)
self.median_ = median_survival_times(self.__estimate, left_censorship=left_censorship)
# estimation methods
self.predict = self._predict(estimate_name, label)
self.subtract = self._subtract(estimate_name)
self.divide = self._divide(estimate_name)
# plotting functions
self.plot = self._plot_estimate(estimate_name)
setattr(self, "plot_" + estimate_name, self.plot)
self.plot_loglogs = plot_loglogs(self)
return self
def _bounds(self, cumulative_sq_, alpha, ci_labels):
# See http://courses.nus.edu.sg/course/stacar/internet/st3242/handouts/notes2.pdf
alpha2 = inv_normal_cdf((1. + alpha) / 2.)
df = pd.DataFrame(index=self.timeline)
v = np.log(self.__estimate.values)
if ci_labels is None:
ci_labels = ["%s_upper_%.2f" % (self._label, alpha), "%s_lower_%.2f" % (self._label, alpha)]
assert len(ci_labels) == 2, "ci_labels should be a length 2 array."
df[ci_labels[0]] = np.exp(-np.exp(np.log(-v) + alpha2 * np.sqrt(cumulative_sq_) / v))
df[ci_labels[1]] = np.exp(-np.exp(np.log(-v) - alpha2 * np.sqrt(cumulative_sq_) / v))
return df
def _additive_f(self, population, deaths):
np.seterr(invalid='ignore', divide='ignore')
return (np.log(population - deaths) - np.log(population))
def _additive_var(self, population, deaths):
np.seterr(divide='ignore')
return (1. * deaths / (population * (population - deaths))).replace([np.inf], 0)
| 51.298077
| 171
| 0.662418
| 692
| 5,335
| 4.930636
| 0.328035
| 0.023447
| 0.020516
| 0.009672
| 0.096131
| 0.079719
| 0.066823
| 0.046893
| 0.024033
| 0.024033
| 0
| 0.007459
| 0.246111
| 5,335
| 103
| 172
| 51.796117
| 0.840875
| 0.339831
| 0
| 0
| 0
| 0.019608
| 0.082511
| 0.009268
| 0
| 0
| 0
| 0
| 0.019608
| 1
| 0.078431
| false
| 0
| 0.117647
| 0
| 0.294118
| 0.019608
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3fad8e543716f6089f3ea4419938d3d14b1b941
| 8,498
|
py
|
Python
|
pydmfet/qcwrap/pyscf_rhf.py
|
fishjojo/pydmfe
|
93cfc655314933d3531b5733521a1f95a044f6cb
|
[
"MIT"
] | 3
|
2021-02-26T06:26:00.000Z
|
2022-02-20T08:58:20.000Z
|
pydmfet/qcwrap/pyscf_rhf.py
|
fishjojo/pydmfet
|
93cfc655314933d3531b5733521a1f95a044f6cb
|
[
"MIT"
] | null | null | null |
pydmfet/qcwrap/pyscf_rhf.py
|
fishjojo/pydmfet
|
93cfc655314933d3531b5733521a1f95a044f6cb
|
[
"MIT"
] | null | null | null |
import numpy as np
from pydmfet import tools
from .fermi import find_efermi, entropy_corr
from pyscf import ao2mo, gto, scf, dft, lib
from pydmfet.qcwrap import fermi
import time
from functools import reduce
def scf_oei( OEI, Norb, Nelec, smear_sigma = 0.0):
OEI = 0.5*(OEI.T + OEI)
eigenvals, eigenvecs = np.linalg.eigh( OEI )
idx = np.argmax(abs(eigenvecs), axis=0)
eigenvecs[:,eigenvecs[ idx, np.arange(len(eigenvals)) ]<0] *= -1
Nocc = Nelec//2 #closed shell
e_homo = eigenvals[Nocc-1]
e_lumo = eigenvals[Nocc]
print ('HOMO: ', e_homo, 'LUMO: ', e_lumo)
print ("mo_energy:")
print (eigenvals[:Nocc+5])
e_fermi = e_homo
mo_occ = np.zeros((Norb))
if(smear_sigma < 1e-8): #T=0
mo_occ[:Nocc] = 1.0
else: #finite T
e_fermi, mo_occ = find_efermi(eigenvals, smear_sigma, Nocc, Norb)
mo_occ*=2.0 #closed shell
Ne_error = np.sum(mo_occ) - Nelec
if(Ne_error > 1e-8):
print ('Ne error = ', Ne_error)
print ("fermi energy: ", e_fermi)
np.set_printoptions(precision=4)
flag = mo_occ > 1e-4
print (mo_occ[flag])
np.set_printoptions()
RDM1 = reduce(np.dot, (eigenvecs, np.diag(mo_occ), eigenvecs.T))
RDM1 = (RDM1.T + RDM1)/2.0
energy = np.trace(np.dot(RDM1,OEI))
es = entropy_corr(mo_occ, smear_sigma)
print ('entropy correction: ', es)
energy += es
print ('e_tot = ', energy)
return ( energy, RDM1, eigenvecs, eigenvals, mo_occ )
# The following is deprecated!
class scf_pyscf():
'''
subspace scf
wrapper for scf module of pyscf
'''
def __init__(self, Ne, Norb, mol=None, oei=None, tei=None, ovlp=1, dm0=None, coredm=0, ao2sub=None, mf_method='HF'):
self.mol = mol
self.Ne = Ne
self.Norb = Norb
self.method = mf_method
self.oei = oei
self.tei = tei
self.ovlp = ovlp
self.dm0 = dm0
self.coredm = coredm
self.ao2sub = ao2sub
self.method = mf_method.lower()
self.mf = None
if(self.mol is None):
#what molecule does not matter
self.mol = gto.Mole()
self.mol.build( verbose=0 )
self.mol.atom.append(('C', (0, 0, 0)))
#adjust number of electrons
self.mol.nelectron = Ne
if(self.tei is not None):
self.mol.incore_anyway = True
if(self.method == 'hf'):
self.mf = scf.RHF(self.mol)
self.prep_rhf()
else:
self.mf = scf.RKS(self.mol)
self.mf.xc = self.method
self.prep_rhf()
self.prep_rks()
self.elec_energy = 0.0
self.rdm1 = None
self.mo_coeff = None
self.mo_energy = None
self.mo_occ = None
def prep_rhf(self):
if(self.ovlp == 1):
self.mf.get_ovlp = lambda *args: np.eye( self.Norb )
if(self.oei is not None):
self.mf.get_hcore = lambda *args: self.oei
if(self.tei is not None):
self.mf._eri = ao2mo.restore(8, self.tei, self.Norb)
def prep_rks(self):
if(self.ao2sub is None):
return
#overload dft.rks.get_veff if necessary
self.mf.get_veff = get_veff_rks_decorator(self.ao2sub, self.coredm)
def kernel(self):
self.mf.kernel(self.dm0)
if ( self.mf.converged == False ):
raise Exception("scf not converged!")
rdm1 = self.mf.make_rdm1()
self.rdm1 = 0.5*(rdm1.T + rdm1)
self.elec_energy = self.mf.energy_elec(self.rdm1)[0]
self.mo_coeff = self.mf.mo_coeff
self.mo_energy = self.mf.mo_energy
self.mo_occ = self.mf.mo_occ
def get_veff_rks_decorator(ao2sub, coredm):
def get_veff(ks, mol=None, dm=None, dm_last=0, vhf_last=0, hermi=1):
if mol is None: mol = ks.mol
if dm is None: dm = ks.make_rdm1()
dm_sub = np.asarray(dm) + coredm
dm_ao = tools.dm_sub2ao(dm_sub, ao2sub)
if hasattr(dm, 'mo_coeff'):
mo_coeff_sub = dm.mo_coeff
mo_occ_sub = dm.mo_occ
mo_coeff_ao = tools.mo_sub2ao(mo_coeff_sub, ao2sub)
mo_occ_ao = mo_occ_sub
dm_ao = lib.tag_array(dm_ao, mo_coeff=mo_coeff_ao, mo_occ=mo_occ_ao)
n, exc, vxc_ao, hyb = get_vxc(ks, mol, dm_ao)
vxc = tools.op_ao2sub(vxc_ao, ao2sub)
vj = None
vk = None
if abs(hyb) < 1e-10:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vj', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj = ks.get_jk(mol, ddm, hermi)[0]
vj += vhf_last.vj
else:
vj = ks.get_jk(mol, dm, hermi)[0]
vxc += vj
else:
if (ks._eri is None and ks.direct_scf and
getattr(vhf_last, 'vk', None) is not None):
ddm = numpy.asarray(dm) - numpy.asarray(dm_last)
vj, vk = ks.get_jk(mol, ddm, hermi)
vj += vhf_last.vj
vk += vhf_last.vk
else:
vj, vk = ks.get_jk(mol, dm, hermi)
vxc += vj - vk * (hyb * .5)
exc -= np.einsum('ij,ji', dm, vk) * .5 * hyb*.5
ecoul = np.einsum('ij,ji', dm, vj) * .5
vxc = lib.tag_array(vxc, ecoul=ecoul, exc=exc, vj=vj, vk=vk)
return vxc
return get_veff
def get_vxc(ks, mol, dm, hermi=1):
ground_state = (isinstance(dm, numpy.ndarray) and dm.ndim == 2)
if(not ground_state):
raise Exception("fatal error")
if ks.grids.coords is None:
ks.grids.build(with_non0tab=True)
if ks.small_rho_cutoff > 1e-20 and ground_state:
# Filter grids the first time setup grids
t0 = (time.clock(), time.time())
ks.grids = dft.rks.prune_small_rho_grids_(ks, mol, dm, ks.grids)
t1 = tools.timer("prune grid",t0)
if hermi == 2: # because rho = 0
n, exc, vxc = 0, 0, 0
else:
n, exc, vxc = ks._numint.nr_rks(mol, ks.grids, ks.xc, dm)
hyb = ks._numint.hybrid_coeff(ks.xc, spin=mol.spin)
return n, exc, vxc, hyb
'''
def rhf(mol, OEI, TEI, Norb, Nelec, OneDM0=None ):
# Get the RHF solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.max_memory = 8000
#mol.build( verbose=0 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
mol.incore_anyway = True
mf = pyscf_scf.RHF( mol )
mf.get_hcore = lambda *args: OEI
mf.get_ovlp = lambda *args: np.eye( Norb )
mf._eri = ao2mo.restore(8, TEI, Norb)
mf.max_cycle = 100
#mf.conv_tol = 1e-8
#adiis = pyscf_scf.diis.ADIIS()
#mf.diis = adiis
#mf.verbose = 5
mf.kernel(OneDM0)
if ( mf.converged == False ):
#RDM1 = mf.make_rdm1()
#cdiis = pyscf_scf.diis.SCF_DIIS()
#mf.diis = cdiis
#mf.max_cycle = 200
#mf.kernel(RDM1)
if ( mf.converged == False ):
raise Exception(" rhf not converged!")
return mf
def rks(mol, OEI, TEI, Norb, Nelec, xcfunc, OneDM0=None ):
# Get the RKS solution
OEI = 0.5*(OEI.T + OEI)
#mol = gto.Mole()
#mol.build( verbose=5 )
#mol.atom.append(('C', (0, 0, 0)))
mol.nelectron = Nelec
# mol.incore_anyway = True
mf = pyscf_scf.RKS( mol )
mf.xc = xcfunc.lower()
# mf.get_hcore = lambda *args: OEI
# mf.get_ovlp = lambda *args: np.eye( Norb )
# mf._eri = ao2mo.restore(8, TEI, Norb)
OneDM0 = None
mf.kernel( OneDM0 )
if ( mf.converged == False ):
raise Exception(" rks not converged!")
return mf
def scf(mol, OEI, TEI, Norb, Nelec, OneDM0=None, mf_method = 'HF' ):
# Get the mean-field solution
if(mf_method.lower() == 'hf'):
mf = rhf(mol, OEI, TEI, Norb, Nelec, OneDM0 )
else:
mf = rks(mol, OEI, TEI, Norb, Nelec, mf_method ,OneDM0 )
RDM1 = mf.make_rdm1()
RDM1 = 0.5*(RDM1.T + RDM1)
mo_coeff = mf.mo_coeff
mo_energy = mf.mo_energy
energy = mf.energy_elec(RDM1)[0]
mo = np.zeros([Norb,Norb+1],dtype=float)
mo[:,:-1] = mo_coeff
mo[:,-1] = mo_energy
#print "mo energy"
#print mf.mo_energy
#tools.MatPrint(mf.get_fock(),"fock")
#JK = mf.get_veff(None, dm=RDM1)
#tools.MatPrint(JK,"JK")
#tools.MatPrint(np.dot(mf.get_fock(), mf.mo_coeff),"test")
#tools.MatPrint(mf.mo_coeff,"mo_coeff")
return (energy, RDM1, mo)
'''
| 27.324759
| 120
| 0.567075
| 1,274
| 8,498
| 3.642857
| 0.17033
| 0.02047
| 0.011635
| 0.014006
| 0.249084
| 0.209653
| 0.165266
| 0.11463
| 0.11463
| 0.11463
| 0
| 0.025
| 0.298659
| 8,498
| 310
| 121
| 27.412903
| 0.753691
| 0.03036
| 0
| 0.105263
| 0
| 0
| 0.023801
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.046053
| 0
| 0.138158
| 0.065789
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3fcab0b445701487209c1560ead48389dc4c643
| 2,067
|
py
|
Python
|
odepy/collision_space.py
|
yuemingl/ode-python-1
|
a9a12d9d3b7e611874a8d30f6a5c0b83b6087f86
|
[
"MIT"
] | 9
|
2020-05-31T09:22:40.000Z
|
2021-09-15T18:15:15.000Z
|
odepy/collision_space.py
|
yuemingl/ode-python-1
|
a9a12d9d3b7e611874a8d30f6a5c0b83b6087f86
|
[
"MIT"
] | 1
|
2020-11-15T11:38:45.000Z
|
2020-11-15T11:38:45.000Z
|
odepy/collision_space.py
|
yuemingl/ode-python-1
|
a9a12d9d3b7e611874a8d30f6a5c0b83b6087f86
|
[
"MIT"
] | 2
|
2020-11-14T21:47:01.000Z
|
2021-08-03T02:28:10.000Z
|
# -*- coding: utf-8 -*-
from .common import loadOde
from .common import dGeomID
from .common import dSpaceID
from .common import dVector3
from ctypes import POINTER
from ctypes import CFUNCTYPE
from ctypes import c_void_p
from ctypes import c_int32
dNearCallback = CFUNCTYPE(None, c_void_p, dGeomID, dGeomID)
def dSimpleSpaceCreate(space):
if isinstance(space, int):
return loadOde('dSimpleSpaceCreate', dSpaceID, c_int32)(space)
else:
return loadOde('dSimpleSpaceCreate', dSpaceID, dSpaceID)(space)
def dHashSpaceCreate(space):
if isinstance(space, int):
return loadOde('dHashSpaceCreate', dSpaceID, c_int32)(space)
else:
return loadOde('dHashSpaceCreate', dSpaceID, dSpaceID)(space)
dQuadTreeSpaceCreate = loadOde('dQuadTreeSpaceCreate', dSpaceID, dSpaceID, dVector3, dVector3, c_int32)
dSweepAndPruneSpaceCreate = loadOde('dSweepAndPruneSpaceCreate', dSpaceID, dSpaceID, c_int32)
dSpaceDestroy = loadOde('dSpaceDestroy', None, dSpaceID)
dHashSpaceSetLevels = loadOde('dHashSpaceSetLevels', None, dSpaceID, c_int32, c_int32)
dHashSpaceGetLevels = loadOde('dHashSpaceGetLevels', None, dSpaceID, POINTER(c_int32), POINTER(c_int32))
dSpaceSetCleanup = loadOde('dSpaceSetCleanup', None, dSpaceID, c_int32)
dSpaceGetCleanup = loadOde('dSpaceGetCleanup', c_int32, dSpaceID)
dSpaceSetSublevel = loadOde('dSpaceSetSublevel', None, dSpaceID, c_int32)
dSpaceGetSublevel = loadOde('dSpaceGetSublevel', c_int32, dSpaceID)
dSpaceSetManualCleanup = loadOde('dSpaceSetManualCleanup', None, dSpaceID, c_int32)
dSpaceGetManualCleanup = loadOde('dSpaceGetManualCleanup', c_int32, dSpaceID)
dSpaceAdd = loadOde('dSpaceAdd', None, dSpaceID, dGeomID)
dSpaceRemove = loadOde('dSpaceRemove', None, dSpaceID, dGeomID)
dSpaceQuery = loadOde('dSpaceQuery', c_int32, dSpaceID, dGeomID)
dSpaceClean = loadOde('dSpaceClean', None, dSpaceID)
dSpaceGetNumGeoms = loadOde('dSpaceGetNumGeoms', c_int32, dSpaceID)
dSpaceGetGeom = loadOde('dSpaceGetGeom', dGeomID, dSpaceID, c_int32)
dSpaceGetClass = loadOde('dSpaceGetClass', c_int32, dSpaceID)
| 45.933333
| 104
| 0.786164
| 211
| 2,067
| 7.592417
| 0.218009
| 0.071161
| 0.069913
| 0.044944
| 0.092385
| 0.092385
| 0.092385
| 0
| 0
| 0
| 0
| 0.022863
| 0.111272
| 2,067
| 44
| 105
| 46.977273
| 0.849211
| 0.01016
| 0
| 0.108108
| 0
| 0
| 0.176614
| 0.033757
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.216216
| 0
| 0.378378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3fdeb4319b73c8b5241edc4f4a1fca818eec403
| 4,279
|
py
|
Python
|
pctest/test_publish.py
|
DaveWK/pyth-client
|
4332ef3287f584be46ec38ddd800cae8d4e7b792
|
[
"Apache-2.0"
] | null | null | null |
pctest/test_publish.py
|
DaveWK/pyth-client
|
4332ef3287f584be46ec38ddd800cae8d4e7b792
|
[
"Apache-2.0"
] | null | null | null |
pctest/test_publish.py
|
DaveWK/pyth-client
|
4332ef3287f584be46ec38ddd800cae8d4e7b792
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python3
# pip3 install websockets
import asyncio
import websockets
import json
import datetime
import sys
class test_publish:
idnum = 1
def __init__( self, sym, price, spread ):
self.symbol = sym
self.pidnum = test_publish.idnum
test_publish.idnum += 1
self.sidnum = test_publish.idnum
test_publish.idnum += 1
self.psubid = -1
self.ssubid = -1
self.price = price
self.spread = spread
def gen_subscribe_price(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.sidnum
}
return json.dumps( req )
def gen_subscribe_price_sched(self):
req = {
'jsonrpc': '2.0',
'method' : 'subscribe_price_sched',
'params' : {
'account': self.account,
'price_type' : 'price'
},
'id': self.pidnum
}
return json.dumps( req )
def gen_update_price(self):
req = {
'jsonrpc': '2.0',
'method': 'update_price',
'params':{
'account': self.account,
'price_type': 'price',
'status': 'trading',
'price': self.price,
'conf': self.spread
},
'id': None
}
self.price += self.spread
return json.dumps( req )
def parse_reply( self, msg, allsub ):
# parse subscription replies
subid = msg['result']['subscription']
allsub[subid] = self
if msg['id'] == self.pidnum:
self.psubid = subid;
else:
self.ssubid = subid
async def parse_notify( self, ws, msg ):
# parse subscription notification messages
subid = msg['params']['subscription']
ts = datetime.datetime.utcnow().isoformat()
if subid == self.ssubid:
# aggregate price update
res = msg['params']['result']
price = res['price']
spread = res['conf']
status = res['status']
print( f'{ts} received aggregate price update symbol=' + self.symbol +
f',price={price}, spread={spread}, status={status}' )
else:
# request to submit price
print( f'{ts} submit price to block-chain symbol=' + self.symbol +
f',price={self.price}, spread={self.spread}, subscription={subid}')
await ws.send( self.gen_update_price() )
async def subscribe( self, acct, ws, allids ):
# submmit initial subscriptions
self.account = acct
allids[self.pidnum] = self
allids[self.sidnum] = self
await ws.send( self.gen_subscribe_price() )
await ws.send( self.gen_subscribe_price_sched() )
# wbsocket event loop
async def poll( uri ):
# connect to pythd
ws = await websockets.connect(uri)
# submit subscriptions to pythd
allids = {}
allsub = {}
allsym = {}
sym1 = test_publish( 'SYMBOL1/USD', 10000, 100 )
sym2 = test_publish( 'SYMBOL2/USD', 2000000, 20000 )
allsym[sym1.symbol] = sym1
allsym[sym2.symbol] = sym2
# lookup accounts by symbol and subscribe
req = { 'jsonrpc': '2.0', 'method': 'get_product_list', 'id': None }
await ws.send( json.dumps( req ) )
msg = json.loads( await ws.recv() )
for prod in msg['result']:
sym = prod['attr_dict']['symbol']
for px in prod['price']:
if sym in allsym and px['price_type'] == 'price':
await allsym[sym].subscribe( px['account'], ws, allids );
# poll for updates from pythd
while True:
msg = json.loads( await ws.recv() )
# print(msg)
if 'error' in msg:
ts = datetime.datetime.utcnow().isoformat()
code = msg['error']['code']
emsg = msg['error']['message']
print( f'{ts} error code: {code} msg: {emsg}' )
sys.exit(1)
elif 'result' in msg:
msgid = msg['id']
if msgid in allids:
allids[msgid].parse_reply( msg, allsub )
else:
subid = msg['params']['subscription']
if subid in allsub:
await allsub[subid].parse_notify( ws, msg )
# connect to pythd, subscribe to and start publishing on two symbols
if __name__ == '__main__':
uri='ws://localhost:8910'
eloop = asyncio.get_event_loop()
try:
eloop.run_until_complete( poll( uri ) )
except ConnectionRefusedError:
print( f'connection refused uri={uri}' )
sys.exit(1)
| 28.151316
| 77
| 0.597803
| 525
| 4,279
| 4.773333
| 0.270476
| 0.030726
| 0.031923
| 0.019154
| 0.252993
| 0.186353
| 0.148843
| 0.110535
| 0.035116
| 0
| 0
| 0.015546
| 0.263379
| 4,279
| 151
| 78
| 28.337748
| 0.779505
| 0.093947
| 0
| 0.245902
| 0
| 0
| 0.18483
| 0.010872
| 0
| 0
| 0
| 0
| 0
| 1
| 0.040984
| false
| 0
| 0.040984
| 0
| 0.122951
| 0.032787
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3fdf673713dab6653653e81925c27451e5f5544
| 1,866
|
py
|
Python
|
Python/other/merge_interval.py
|
TechSpiritSS/NeoAlgo
|
08f559b56081a191db6c6b1339ef37311da9e986
|
[
"MIT"
] | 897
|
2020-06-25T00:12:52.000Z
|
2022-03-24T00:49:31.000Z
|
Python/other/merge_interval.py
|
AnshikaAgrawal5501/NeoAlgo
|
d66d0915d8392c2573ba05d5528e00af52b0b996
|
[
"MIT"
] | 5,707
|
2020-06-24T17:53:28.000Z
|
2022-01-22T05:03:15.000Z
|
Python/other/merge_interval.py
|
AnshikaAgrawal5501/NeoAlgo
|
d66d0915d8392c2573ba05d5528e00af52b0b996
|
[
"MIT"
] | 1,817
|
2020-06-25T03:51:05.000Z
|
2022-03-29T05:14:07.000Z
|
'''
Given an array of intervals, merge all overlapping intervals,
and return an array of the non-overlapping intervals that cover all the intervals in the input.
Input: intervals = [[1,3],[2,6],[8,10],[15,18]]
Output: [[1,6],[8,10],[15,18]]
Explanation: Since intervals [1,3] and [2,6] overlaps, merge them into [1,6].
'''
def merge(intervals):
#sort the array
intervals.sort()
#take another empty list
intervals_stack = []
for pair in intervals:
if len(intervals_stack) == 0:
intervals_stack.append(pair) #adding all the number in intervals elements in empty list
#check number is equal or greater and less than pop elements
else:
current_pair = intervals_stack[-1]
if current_pair[1]>=pair[0]:
intervals_stack.pop()
if current_pair[1]<pair[1]:
new_pair = [current_pair[0],pair[1]]
intervals_stack.append(new_pair)
else:
new_pair = [current_pair[0],current_pair[1]]
intervals_stack.append(new_pair)
else:
intervals_stack.append(pair)
# result
return intervals_stack
if __name__ == '__main__':
R = int(input("Enter the number of rows:"))
C = int(input("Enter the number of columns:"))
interval = [[int(input("Enter the elements: ")) for x in range (C)] for y in range(R)]
print("Overlapping interval: ",interval)
print("Non-overlapping intervals: ",merge(interval))
"""
Time complexity : O(n^2)
Space complexity : O(n^2)
INPUT:-
Enter the number of rows:4
Enter the number of columns:2
Enter the elements: 1
Enter the elements: 3
Enter the elements: 2
Enter the elements: 6
Enter the elements: 8
Enter the elements: 10
Enter the elements: 15
Enter the elements: 18
OUTPUT:-
Overlapping interval: [[1, 3], [2, 6], [8, 10], [15, 18]]
Non-overlapping intervals: [[1, 6], [8, 10], [15, 18]]
"""
| 29.619048
| 95
| 0.659164
| 281
| 1,866
| 4.281139
| 0.274021
| 0.086451
| 0.119701
| 0.01995
| 0.243558
| 0.157107
| 0.078138
| 0.078138
| 0
| 0
| 0
| 0.048332
| 0.212755
| 1,866
| 62
| 96
| 30.096774
| 0.770592
| 0.257235
| 0
| 0.28
| 0
| 0
| 0.139935
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04
| false
| 0
| 0
| 0
| 0.08
| 0.08
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
a3fe08a8aed62efc53ccd8e1fe4f7bf8c6183509
| 9,708
|
py
|
Python
|
tests/test_all.py
|
InnovativeTravel/humilis-lambdautils
|
344e13efb68d19f61f0be8178eb6cc2219913fb0
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
InnovativeTravel/humilis-lambdautils
|
344e13efb68d19f61f0be8178eb6cc2219913fb0
|
[
"MIT"
] | null | null | null |
tests/test_all.py
|
InnovativeTravel/humilis-lambdautils
|
344e13efb68d19f61f0be8178eb6cc2219913fb0
|
[
"MIT"
] | null | null | null |
"""Unit tests."""
import inspect
import json
from mock import Mock
import os
import sys
import uuid
import pytest
# Add the lambda directory to the python library search path
lambda_dir = os.path.join(
os.path.dirname(inspect.getfile(inspect.currentframe())), '..')
sys.path.append(lambda_dir)
import lambdautils.utils
@pytest.mark.parametrize(
"key,environment,stage,namespace,table,nkey", [
("k", "e", "s", None, "e-s-secrets", "k"),
("k", "e", None, None, "e-dummystage-secrets", "k"),
("k", "e", None, "n", "e-dummystage-secrets", "n:k"),
("k", "e", "s", "n", "e-s-secrets", "n:k")])
def test_get_secret_from_vault(key, environment, stage, namespace, table, nkey,
boto3_resource, boto3_client, monkeypatch):
"""Gets a secret from the DynamoDB secrets vault."""
# Call to the DynamoDB client to retrieve the encrypted secret
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.setattr("boto3.client", boto3_client)
secret = lambdautils.utils.get_secret(key,
namespace=namespace,
environment=environment,
stage=stage)
assert secret == "dummy"
boto3_client("dynamodb").get_item.assert_called_with(
TableName=table,
Key={"id": {"S": nkey}})
# Call to the KMS client to decrypt the secret
boto3_client('kms').decrypt.assert_called_with(CiphertextBlob="encrypted")
def test_get_secret_from_env(monkeypatch):
"""Get a secret from an (encrypted) environment variable."""
key = str(uuid.uuid4()).replace('-', '.')
value = str(uuid.uuid4())
monkeypatch.setenv(key.replace('.', '_').upper(), value)
secret = lambdautils.utils.get_secret(key)
assert secret == value
def test_get_setting(monkeypatch):
"""Should be an alias for get_secret."""
resp = str(uuid.uuid4())
arg = str(uuid.uuid4())
kwarg = str(uuid.uuid4())
get_secret = Mock(return_value=resp)
monkeypatch.setattr("lambdautils.state.get_secret", get_secret)
resp2 = lambdautils.state.get_setting(arg, kwarg=kwarg)
assert resp2 == resp
get_secret.assert_called_with(arg, kwarg=kwarg)
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,consistent,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", False, "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", False, "n:k"),
("k", "e", "l", "s", "s-012", "n", "e-l-s-state", True, "s-012:n:k"),
("k", "e", "l", "s", "s-0001", None, "e-l-s-state", True, "s-0001:k")])
def test_get_state(boto3_resource, monkeypatch, key, environment, layer,
stage, shard_id, namespace, table, consistent, nkey):
"""Get a state value from DynamoDB."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.get_state(key, environment=environment, layer=layer,
stage=stage, shard_id=shard_id,
namespace=namespace,
consistent=consistent)
boto3_resource("dynamodb").Table.assert_called_with(table)
if consistent is None:
# The default setting: use consistent reads
consistent = True
boto3_resource("dynamodb").Table().get_item.assert_called_with(
Key={"id": nkey}, ConsistentRead=consistent)
def test_no_state_table(boto3_resource, monkeypatch):
"""Test accessing state variable without having a state table."""
monkeypatch.setattr("boto3.resource", boto3_resource)
monkeypatch.delenv("HUMILIS_ENVIRONMENT")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.set_state("sample_state_key", "sample_state_value")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.delete_state("sample_state_key")
with pytest.raises(lambdautils.state.StateTableError):
lambdautils.utils.get_state("sample_state_key")
@pytest.mark.parametrize(
"key,value,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "v", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "v", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "v", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "v", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_set_state(boto3_resource, monkeypatch, key, value, environment, layer,
stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.set_state(key, value, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().put_item.assert_called_with(
Item={"id": nkey, "value": json.dumps(value)})
@pytest.mark.parametrize(
"key,environment,layer,stage,shard_id,namespace,table,nkey", [
("k", "e", "l", "s", None, None, "e-l-s-state", "k"),
("k", "e", "l", "s", None, "n", "e-l-s-state", "n:k"),
("k", "e", "l", "s", "s1", "n", "e-l-s-state", "s1:n:k"),
("k", "e", "l", "s", "s2", None, "e-l-s-state", "s2:k")])
def test_delete_state(boto3_resource, monkeypatch, key, environment,
layer, stage, shard_id, namespace, table, nkey):
"""Tests setting a state variable."""
monkeypatch.setattr("boto3.resource", boto3_resource)
lambdautils.utils.delete_state(key, environment=environment,
layer=layer, stage=stage, shard_id=shard_id,
namespace=namespace)
boto3_resource("dynamodb").Table.assert_called_with(table)
boto3_resource("dynamodb").Table().delete_item.assert_called_with(
Key={"id": nkey})
def test_sentry_monitor_bad_client(boto3_client, raven_client, context,
monkeypatch):
"""Test that sentry_monitor handles raven client errors gracefully."""
class ClientError(Exception):
pass
def raise_error(dsn):
raise ClientError
monkeypatch.setattr("raven.Client", Mock(side_effect=raise_error))
monkeypatch.setattr("boto3.client", boto3_client)
@lambdautils.utils.sentry_monitor(environment="dummyenv",
stage="dummystage")
def lambda_handler(event, context):
pass
lambda_handler(None, context)
raven_client.captureException.assert_not_called()
@pytest.mark.parametrize(
"kstream, fstream, rcalls, kcalls, fcalls, ev", [
("a", "b", 1, 0, 0, {"Records": [{}]}),
(None, "b", 1, 0, 0, {"Records": [{}]}),
(None, None, 1, 0, 0, None),
(None, None, 1, 0, 0, None),
("a", "b", 1, 0, 0, None),
("a", None, 1, 0, 0, None)])
def test_sentry_monitor_exception(
kstream, fstream, rcalls, kcalls, fcalls, ev,
boto3_client, raven_client, context, kinesis_event, monkeypatch):
"""Tests the sentry_monitor decorator when throwing an exception and
lacking an error stream where to dump the errors."""
if ev is None:
# Default to a Kinesis event
ev = kinesis_event
monkeypatch.setattr("boto3.client", boto3_client)
monkeypatch.setattr("raven.Client", Mock(return_value=raven_client))
monkeypatch.setattr("lambdautils.monitor.SentryHandler", Mock())
monkeypatch.setattr("lambdautils.utils.get_secret",
Mock(return_value="dummydsn"))
error_stream = {
"kinesis_stream": kstream,
"firehose_delivery_stream": fstream}
@lambdautils.utils.sentry_monitor(error_stream=error_stream)
def lambda_handler(event, context):
"""Raise an error."""
raise KeyError
with pytest.raises(KeyError):
lambda_handler(ev, context)
# Should have captured only 1 error:
# * The original KeyError
assert raven_client.captureException.call_count == rcalls
# And should have send the events to the Kinesis and FH error streams
assert boto3_client("kinesis").put_records.call_count == kcalls
assert boto3_client("firehose").put_record_batch.call_count == fcalls
def test_send_to_kinesis_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Kinesis stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_kinesis_stream(search_events, "dummy_stream")
boto3_client("kinesis").put_records.call_count == 1
def test_send_to_delivery_stream(search_events, boto3_client, monkeypatch):
"""Tests sending events to a Firehose delivery stream."""
monkeypatch.setattr("boto3.client", boto3_client)
lambdautils.utils.send_to_delivery_stream(search_events, "dummy_stream")
boto3_client("firehose").put_record_batch.call_count == 1
@pytest.mark.parametrize("deserializer, embed_ts", [
[json.loads, False],
[json.loads, "kinesis_timestamp"],
[None, False]])
def test_unpack_kinesis_event(kinesis_event, deserializer, embed_ts):
"""Extracts json-serialized events from a Kinesis events."""
events, shard_id = lambdautils.utils.unpack_kinesis_event(
kinesis_event, deserializer=deserializer, embed_timestamp=embed_ts)
# There should be one event per kinesis record
assert len(events) == len(kinesis_event["Records"])
assert shard_id == kinesis_event["Records"][0]["eventID"].split(":")[0]
if embed_ts:
assert all(embed_ts in ev for ev in events)
| 41.844828
| 79
| 0.642872
| 1,212
| 9,708
| 4.986799
| 0.160066
| 0.007942
| 0.011913
| 0.015884
| 0.513236
| 0.445235
| 0.364328
| 0.312045
| 0.239907
| 0.239742
| 0
| 0.01226
| 0.210239
| 9,708
| 231
| 80
| 42.025974
| 0.776053
| 0.108982
| 0
| 0.20122
| 0
| 0
| 0.144041
| 0.040037
| 0
| 0
| 0
| 0
| 0.115854
| 1
| 0.091463
| false
| 0.012195
| 0.04878
| 0
| 0.146341
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4300329d7c7ed5214d1d6d7a95fd6dd634fbc6d1
| 11,269
|
py
|
Python
|
authserver/maildaemons/forwarder/server.py
|
jdelic/authserver
|
e800664436b252fcdf224a9af46a1122c87be3ca
|
[
"MIT"
] | 8
|
2017-07-04T10:07:32.000Z
|
2022-01-02T10:31:43.000Z
|
authserver/maildaemons/forwarder/server.py
|
jdelic/authserver
|
e800664436b252fcdf224a9af46a1122c87be3ca
|
[
"MIT"
] | 14
|
2020-02-11T21:42:38.000Z
|
2022-03-28T16:00:55.000Z
|
authserver/maildaemons/forwarder/server.py
|
jdelic/authserver
|
e800664436b252fcdf224a9af46a1122c87be3ca
|
[
"MIT"
] | 1
|
2020-03-01T10:39:28.000Z
|
2020-03-01T10:39:28.000Z
|
#!/usr/bin/env python3 -u
# -* encoding: utf-8 *-
import argparse
import asyncore
import json
import logging
import signal
import sys
import os
from types import FrameType
from typing import Tuple, Sequence, Any, Union, Optional, List, Dict
from concurrent.futures import ThreadPoolExecutor as Pool
import daemon
from django.db.utils import OperationalError
import authserver
from maildaemons.utils import SMTPWrapper, PatchedSMTPChannel, SaneSMTPServer
_log = logging.getLogger(__name__)
pool = Pool()
class ForwarderServer(SaneSMTPServer):
def __init__(self, remote_relay_ip: str, remote_relay_port: int, local_delivery_ip: str,
local_delivery_port: int, *args: Any, **kwargs: Any) -> None:
super().__init__(*args, **kwargs)
self.smtp = SMTPWrapper(
external_ip=remote_relay_ip, external_port=remote_relay_port,
error_relay_ip=local_delivery_ip, error_relay_port=local_delivery_port
)
# ** must be thread-safe, don't modify shared state,
# _log should be thread-safe as stated by the docs. Django ORM should be as well.
def _process_message(self, peer: Tuple[str, int], mailfrom: str, rcpttos: Sequence[str], data: bytes, *,
channel: PatchedSMTPChannel,
**kwargs: Any) -> Optional[str]:
# we can't import the Domain model before Django has been initialized
from mailauth.models import EmailAlias, Domain
data = self.add_received_header(peer, data, channel)
remaining_rcpttos = list(rcpttos) # ensure that new_rcpttos is a mutable list
combined_rcptto = {} # type: Dict[str, List[str]] # { new_mailfrom: [recipients] }
def add_rcptto(mfrom: str, rcpt: Union[str, List]) -> None:
if mfrom in combined_rcptto:
if isinstance(rcpt, list):
combined_rcptto[mfrom] += rcpt
else:
combined_rcptto[mfrom].append(rcpt)
else:
if isinstance(rcpt, list):
combined_rcptto[mfrom] = rcpt
else:
combined_rcptto[mfrom] = [rcpt]
# we're going to modify remaining_rcpttos so we start from its end
for ix in range(len(remaining_rcpttos) - 1, -1, -1):
rcptto = rcpttos[ix].lower()
rcptuser, rcptdomain = rcptto.split("@", 1)
# implement domain catch-all redirect
domain = None # type: Optional[Domain]
try:
domain = Domain.objects.get(name=rcptdomain)
except Domain.DoesNotExist:
pass
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if domain:
if domain.redirect_to:
_log.debug("ix: %s - rcptto: %s - remaining rcpttos: %s", ix, rcptto, remaining_rcpttos)
del remaining_rcpttos[ix]
new_rcptto = "%s@%s" % (rcptuser, domain.redirect_to)
_log.info("%sForwarding email from <%s> to <%s> to domain @%s",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, rcptto, domain.redirect_to)
add_rcptto(mailfrom, new_rcptto)
continue
# follow the same path like the stored procedure authserver_resolve_alias(...)
if "-" in rcptuser:
# convert the first - to a +
user_mailprefix = "%s+%s" % tuple(rcptuser.split("-", 1)) # type: ignore
else:
user_mailprefix = rcptuser
if "+" in user_mailprefix:
# if we had a dashext, or a plusext, we're left with just the prefix after this
user_mailprefix = user_mailprefix.split("+", 1)[0]
try:
alias = EmailAlias.objects.get(mailprefix__iexact=user_mailprefix,
domain__name__iexact=rcptdomain) # type: EmailAlias
except EmailAlias.DoesNotExist:
# OpenSMTPD shouldn't even call us for invalid addresses if we're configured correctly
_log.error("%sUnknown mail address: %s (from: %s, prefix: %s)",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
rcptto, mailfrom, user_mailprefix)
continue
except OperationalError:
_log.exception("Database unavailable.")
return "421 Processing problem. Please try again later."
if alias.forward_to is not None:
# it's a mailing list, forward the email to all connected addresses
del remaining_rcpttos[ix] # remove this recipient from the list
_newmf = mailfrom
if alias.forward_to.new_mailfrom != "":
_newmf = alias.forward_to.new_mailfrom
_log.info("%sForwarding email from <%s> with new sender <%s> to <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, _newmf, alias.forward_to.addresses)
add_rcptto(_newmf, alias.forward_to.addresses)
# if there are any remaining non-list/non-forward recipients, we inject them back to OpenSMTPD here
if len(remaining_rcpttos) > 0:
_log.info("%sDelivering email from <%s> to remaining recipients <%s>",
"(Retry) " if "retry" in kwargs and kwargs["retry"] else "",
mailfrom, remaining_rcpttos)
add_rcptto(mailfrom, remaining_rcpttos)
if len(combined_rcptto.keys()) == 1:
_log.debug("Only one mail envelope sender, forwarding is atomic")
results = {k: "unsent" for k in combined_rcptto.keys()} # type: Dict[str, str]
for new_mailfrom in combined_rcptto.keys():
_log.debug("Injecting email from <%s> to <%s>", new_mailfrom, combined_rcptto[new_mailfrom])
ret = self.smtp.sendmail(new_mailfrom, combined_rcptto[new_mailfrom], data)
if ret is not None:
results[new_mailfrom] = "failure"
if len(combined_rcptto.keys()) > 1:
_log.error("Non-atomic mail sending failed from <%s> in dict(%s)", combined_rcptto.keys(),
json.dumps(results))
return ret
results[new_mailfrom] = "success"
# TODO: log results
_log.debug("Done processing.")
return None
def process_message(self, *args: Any, **kwargs: Any) -> Optional[str]:
future = pool.submit(ForwarderServer._process_message, self, *args, **kwargs)
return future.result()
def run(_args: argparse.Namespace) -> None:
server = ForwarderServer(_args.remote_relay_ip, _args.remote_relay_port,
_args.local_delivery_ip, _args.local_delivery_port,
(_args.input_ip, _args.input_port), None, decode_data=False,
daemon_name="mailforwarder")
asyncore.loop()
def _sigint_handler(sig: int, frame: FrameType) -> None:
print("CTRL+C exiting")
pool.shutdown(wait=False)
sys.exit(1)
def _main() -> None:
signal.signal(signal.SIGINT, _sigint_handler)
parser = argparse.ArgumentParser(
description="This is a SMTP daemon that is used through OpenSMTPD configuration "
"to check whether incoming emails are addressed to a forwarding email alias "
"and if they are, inject emails to all list delivery addresses / expand the alias."
)
grp_daemon = parser.add_argument_group("Daemon options")
grp_daemon.add_argument("-p", "--pidfile", dest="pidfile", default="./mailforwarder-server.pid",
help="Path to a pidfile")
grp_daemon.add_argument("-u", "--user", dest="user", default=None, help="Drop privileges and switch to this user")
grp_daemon.add_argument("-g", "--group", dest="group", default=None,
help="Drop privileges and switch to this group")
grp_daemon.add_argument("-d", "--daemonize", dest="daemonize", default=False, action="store_true",
help="If set, fork into background")
grp_daemon.add_argument("-v", "--verbose", dest="verbose", default=False, action="store_true",
help="Output extra logging (not implemented right now)")
grp_daemon.add_argument("-C", "--chdir", dest="chdir", default=".",
help="Change working directory to the provided value")
grp_network = parser.add_argument_group("Network options")
grp_network.add_argument("--input-ip", dest="input_ip", default="127.0.0.1", help="The network address to bind to")
grp_network.add_argument("--input-port", dest="input_port", metavar="PORT", type=int, default=10046,
help="The port to bind to")
grp_network.add_argument("--local-delivery-ip", dest="local_delivery_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP for local email to be delivered.")
grp_network.add_argument("--local-delivery-port", dest="local_delivery_port", metavar="PORT", type=int,
default=10045, help="The port where OpenSMTPD listens for local email to be delivered")
grp_network.add_argument("--remote-relay-ip", dest="remote_relay_ip", default="127.0.0.1",
help="The OpenSMTPD instance IP that accepts mail for relay to external domains.")
grp_network.add_argument("--remote-relay-port", dest="remote_relay_port", default=10045,
help="The port where OpenSMTPD listens for mail to relay.")
grp_django = parser.add_argument_group("Django options")
grp_django.add_argument("--settings", dest="django_settings", default="authserver.settings",
help="The Django settings module to use for authserver database access (default: "
"authserver.settings)")
_args = parser.parse_args()
os.environ.setdefault("DJANGO_SETTINGS_MODULE", _args.django_settings)
# noinspection PyUnresolvedReferences
from django.conf import settings # initialize Django
import django
django.setup()
_log.info("mailforwarder v%s: Forwarding Alias Service starting" % authserver.version)
_log.info("Django ORM initialized")
pidfile = open(_args.pidfile, "w")
ctx = daemon.DaemonContext(
working_directory=_args.chdir,
pidfile=pidfile,
uid=_args.user,
gid=_args.group,
detach_process=_args.daemonize,
files_preserve=[1, 2, 3, pidfile],
stdin=sys.stdin,
stdout=sys.stdout,
stderr=sys.stderr,
)
with ctx:
run(_args)
def main() -> None:
try:
_main()
except Exception as e:
_log.critical("Unhandled exception", exc_info=True)
sys.exit(1)
if __name__ == "__main__":
main()
| 45.623482
| 119
| 0.604401
| 1,301
| 11,269
| 5.066103
| 0.273636
| 0.026703
| 0.010924
| 0.018207
| 0.22288
| 0.198908
| 0.151874
| 0.134881
| 0.131543
| 0.104385
| 0
| 0.007034
| 0.293549
| 11,269
| 246
| 120
| 45.808943
| 0.820877
| 0.094152
| 0
| 0.134409
| 0
| 0
| 0.204733
| 0.006775
| 0
| 0
| 0
| 0.004065
| 0
| 1
| 0.043011
| false
| 0.005376
| 0.091398
| 0
| 0.166667
| 0.005376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430070d2630f84dfb573574ae265484cbf0ee429
| 2,074
|
py
|
Python
|
services/backend/project/api/sites.py
|
kzkaneoka/custom-job-search
|
ca6054aee979cb1eff701dc5ba0cf56fb92baf44
|
[
"MIT"
] | null | null | null |
services/backend/project/api/sites.py
|
kzkaneoka/custom-job-search
|
ca6054aee979cb1eff701dc5ba0cf56fb92baf44
|
[
"MIT"
] | 1
|
2021-06-02T00:51:06.000Z
|
2021-06-02T00:51:06.000Z
|
services/backend/project/api/sites.py
|
kzkaneoka/custom-job-search
|
ca6054aee979cb1eff701dc5ba0cf56fb92baf44
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup, element
class Indeed:
def __init__(self, words, location, offset):
self.url = "https://www.indeed.com/jobs?as_and={}&l={}&sort=date&start={}".format(
"+".join(set(d.strip().lower() for d in words.split(",") if d)),
"+".join(list(d.lower() for d in location.split(" ") if d)),
int(offset),
)
def extract(self, soup):
if not soup:
return []
jobs = []
for tag in soup.find_all(name="div", attrs={"class": "jobsearch-SerpJobCard"}):
job = {}
for child in tag.children:
if child and type(child) == element.Tag and child.attrs:
if child.attrs["class"][0] == "title":
job["title"] = child.get_text().strip()
for grandchild in child.find_all(name="a"):
if grandchild.has_attr("href"):
job["link"] = (
"https://www.indeed.com" + grandchild["href"]
)
elif child.attrs["class"][0] == "sjcl":
lines = child.get_text().strip().split("\n")
job["company"] = lines[0]
job["location"] = lines[-1]
elif child.attrs["class"][0] == "jobsearch-SerpJobCard-footer":
job["date"] = "n/a"
for grandchild in child.find_all(
name="span", attrs={"class": "date"}
):
job["date"] = grandchild.get_text()
jobs.append(job)
return jobs
def fetch(self):
soup = None
try:
r = requests.get(self.url)
r.raise_for_status()
soup = BeautifulSoup(r.text, "html.parser")
finally:
return soup
def search(self):
soup = self.fetch()
jobs = self.extract(soup)
return jobs
| 38.407407
| 90
| 0.450338
| 214
| 2,074
| 4.299065
| 0.383178
| 0.054348
| 0.03587
| 0.052174
| 0.11087
| 0.067391
| 0.067391
| 0
| 0
| 0
| 0
| 0.004894
| 0.408872
| 2,074
| 53
| 91
| 39.132075
| 0.745514
| 0
| 0
| 0.041667
| 0
| 0
| 0.114754
| 0.023626
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.041667
| 0
| 0.229167
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430328989c54e456016360148c864a60ebe10a5f
| 6,405
|
py
|
Python
|
efficientdet/dataset/csv_.py
|
HyunjiEllenPak/automl
|
fedf04adf12c5fd11045ea06e2f5c11a5a5490c4
|
[
"Apache-2.0"
] | null | null | null |
efficientdet/dataset/csv_.py
|
HyunjiEllenPak/automl
|
fedf04adf12c5fd11045ea06e2f5c11a5a5490c4
|
[
"Apache-2.0"
] | null | null | null |
efficientdet/dataset/csv_.py
|
HyunjiEllenPak/automl
|
fedf04adf12c5fd11045ea06e2f5c11a5a5490c4
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2017-2018 yhenon (https://github.com/yhenon/)
Copyright 2017-2018 Fizyr (https://fizyr.com)
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# from generators.common import Generator
import cv2
import numpy as np
from PIL import Image
from six import raise_from
import csv
import sys
import os.path as osp
from collections import OrderedDict
import os
def _parse(value, function, fmt):
"""
Parse a string into a value, and format a nice ValueError if it fails.
Returns `function(value)`.
Any `ValueError` raised is catched and a new `ValueError` is raised
with message `fmt.format(e)`, where `e` is the caught `ValueError`.
"""
try:
return function(value)
except ValueError as e:
raise_from(ValueError(fmt.format(e)), None)
def _read_classes(csv_reader):
"""
Parse the classes file given by csv_reader.
"""
result = OrderedDict()
for line, row in enumerate(csv_reader):
line += 1
try:
class_name, class_id = row
except ValueError:
raise_from(ValueError('line {}: format should be \'class_name,class_id\''.format(line)), None)
class_id = _parse(class_id, int, 'line {}: malformed class ID: {{}}'.format(line))
if class_name in result:
raise ValueError('line {}: duplicate class name: \'{}\''.format(line, class_name))
result[class_name] = class_id
return result
def _read_quadrangle_annotations(csv_reader, classes, detect_text=False):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2, x3, y3, x4, y4, class_name = row[:10]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, x3, y3, x4, y4, class_name) == ('', '', '', '', '', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
x3 = _parse(x3, int, 'line {}: malformed x3: {{}}'.format(line))
y3 = _parse(y3, int, 'line {}: malformed y3: {{}}'.format(line))
x4 = _parse(x4, int, 'line {}: malformed x4: {{}}'.format(line))
y4 = _parse(y4, int, 'line {}: malformed y4: {{}}'.format(line))
# check if the current class name is correctly present
if detect_text:
if class_name == '###':
continue
else:
class_name = 'text'
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2,
'x3': x3, 'y3': y3, 'x4': x4, 'y4': y4, 'class': class_name})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,x3,y3,x4,y4,class_name\' or \'img_file,,,,,\''),
None)
return result
def _read_annotations(csv_reader, classes, base_dir):
"""
Read annotations from the csv_reader.
Args:
csv_reader: csv reader of args.annotations_path
classes: list[str] all the class names read from args.classes_path
Returns:
result: dict, dict is like {image_path: [{'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name}]}
"""
result = OrderedDict()
for line, row in enumerate(csv_reader, 1):
try:
img_file, x1, y1, x2, y2 = row[:5]
class_name = img_file.split("/")[0]
if img_file not in result:
result[img_file] = []
# If a row contains only an image path, it's an image without annotations.
if (x1, y1, x2, y2, class_name) == ('', '', '', '', ''):
continue
x1 = _parse(x1, int, 'line {}: malformed x1: {{}}'.format(line))
y1 = _parse(y1, int, 'line {}: malformed y1: {{}}'.format(line))
x2 = _parse(x2, int, 'line {}: malformed x2: {{}}'.format(line))
y2 = _parse(y2, int, 'line {}: malformed y2: {{}}'.format(line))
if class_name not in classes:
raise ValueError(f'line {line}: unknown class name: \'{class_name}\' (classes: {classes})')
result[img_file].append({'x1': x1, 'y1': y1, 'x2': x2, 'y2': y2, 'class': class_name,
'filename':img_file})
except ValueError:
raise_from(ValueError(
f'line {line}: format should be \'img_file,x1,y1,x2,y2,class_name\' or \'img_file,,,,,\''),
None)
return result
def _open_for_csv(path):
"""
Open a file with flags suitable for csv.reader.
This is different for python2 it means with mode 'rb', for python3 this means 'r' with "universal newlines".
"""
if sys.version_info[0] < 3:
return open(path, 'rb')
else:
return open(path, 'r', newline='')
def load_image(path):
"""
Load an image at the image_index.
"""
image = cv2.imread(path)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
return image
| 37.023121
| 119
| 0.577518
| 840
| 6,405
| 4.292857
| 0.222619
| 0.062396
| 0.057682
| 0.013311
| 0.4731
| 0.457571
| 0.457571
| 0.457571
| 0.457571
| 0.426511
| 0
| 0.034031
| 0.284309
| 6,405
| 173
| 120
| 37.023121
| 0.752618
| 0.309446
| 0
| 0.444444
| 0
| 0
| 0.152853
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.1
| 0
| 0.244444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430510812312a4bfda7f6ca962f49716cd0833a8
| 3,610
|
py
|
Python
|
SimpleBudget/SimpleBudget/budgets/tests.py
|
speratus/SimpleBudget
|
d4903db8693694572c4bcb367fe4a318a3867d68
|
[
"MIT"
] | null | null | null |
SimpleBudget/SimpleBudget/budgets/tests.py
|
speratus/SimpleBudget
|
d4903db8693694572c4bcb367fe4a318a3867d68
|
[
"MIT"
] | null | null | null |
SimpleBudget/SimpleBudget/budgets/tests.py
|
speratus/SimpleBudget
|
d4903db8693694572c4bcb367fe4a318a3867d68
|
[
"MIT"
] | null | null | null |
from django.test import TestCase
from .validators import validate_budget_period
from .models import Budget, Expense, Payment
from django.contrib.auth.models import User
from django.core.exceptions import ValidationError
class ExpenseTestCases(TestCase):
def setUp(self) -> None:
user = User.objects.create_user('joe', email='hellother@obiwan.com', password='imlame')
budget = Budget.objects.create(name='My budget',
creation_date='2019-05-03',
owner=user,
description='The budget of champions.'
)
Expense.objects.create(name='Water park visit',
amount=30.00,
period='1-monthly',
payee='Super awesome Water parks',
description='I will go to the water park.',
date='2019-06-04',
budget=budget
)
Payment.objects.create(name='Paycheck',
amount=4000.0,
period='1-monthly',
description='Where the Mullah comes from',
date='2017-01-12',
origin='The big boss fom up top in HR.',
budget=budget
)
def test_proper_str_formation(self):
budget = Budget.objects.get(pk=1)
expense = Expense.objects.get(pk=1)
payment = Payment.objects.get(pk=1)
self.assertEquals(budget.__str__(), 'My budget: joe', 'The budget was not created properly.')
self.assertEquals(expense.__str__(), 'Water park visit: 30.0', 'The expense was not create properly.')
self.assertEquals(payment.__str__(), 'Paycheck: 4000.0', 'The string function on payment is not workng properly.')
class BudgetPeriodValidatorTestCase(TestCase):
valid_cases = [
'1-daily',
'1-onetime',
'1-annually',
'5-quarterly',
'7-weekly',
'3-annually',
'10-monthly',
'19-weekly',
'99-daily'
]
invalid_cases = [
'0.4-daily',
'0-weekly',
'ad-annually',
'100-weekly',
'4.6-quarterly',
'-31-daily',
'whoot-quarterly',
'59-zoobly',
'5-onetime',
'03-monthly',
]
def test_budget_period_validator(self):
for c in self.valid_cases:
self.assertEquals(validate_budget_period(c), None, f'failed on {c}')
def test_budget_period_validator_fail(self):
for c in self.invalid_cases:
self.assertRaises(ValidationError, validate_budget_period, c)
def test_validator_in_expense_model_creation_invalid(self):
user = User.objects.create(username='joe', email='hithere@obiwan.com', password='imlame')
budget = Budget.objects.create(name='My Budget',
creation_date='2019-04-13',
owner=user,
)
for c in self.invalid_cases:
self.assertRaises(Exception, Expense.objects.create,
name=c + '1',
amount=15.0,
date='2014-05-06',
period=c,
budget=budget
)
| 39.67033
| 122
| 0.495568
| 351
| 3,610
| 4.977208
| 0.376068
| 0.052089
| 0.048655
| 0.022324
| 0.172868
| 0.130509
| 0.130509
| 0.130509
| 0.087006
| 0.087006
| 0
| 0.043236
| 0.404155
| 3,610
| 90
| 123
| 40.111111
| 0.768945
| 0
| 0
| 0.1125
| 0
| 0
| 0.187535
| 0
| 0
| 0
| 0
| 0
| 0.075
| 1
| 0.0625
| false
| 0.025
| 0.0625
| 0
| 0.175
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4306c6bd0579b126c04f1694e74e21eaed99d124
| 3,440
|
py
|
Python
|
var/spack/repos/builtin/packages/abacus/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/abacus/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | null | null | null |
var/spack/repos/builtin/packages/abacus/package.py
|
jeanbez/spack
|
f4e51ce8f366c85bf5aa0eafe078677b42dae1ba
|
[
"ECL-2.0",
"Apache-2.0",
"MIT-0",
"MIT"
] | 2
|
2019-02-08T20:37:20.000Z
|
2019-03-31T15:19:26.000Z
|
# Copyright 2013-2022 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
import re
from spack.package import *
class Abacus(MakefilePackage):
"""ABACUS (Atomic-orbital Based Ab-initio Computation at UStc)
is an open-source computer code package aiming
for large-scale electronic-structure simulations
from first principles"""
maintainers = ["bitllion"]
homepage = "http://abacus.ustc.edu.cn/"
git = "https://github.com/abacusmodeling/abacus-develop.git"
url = "https://github.com/abacusmodeling/abacus-develop/archive/refs/tags/v2.2.1.tar.gz"
version("develop", branch="develop")
version(
"2.2.3",
sha256="88dbf6a3bdd907df3e097637ec8e51fde13e2f5e0b44f3667443195481320edf",
)
version(
"2.2.2",
sha256="4a7cf2ec6e43dd5c53d5f877a941367074f4714d93c1977a719782957916169e",
)
version(
"2.2.1",
sha256="14feca1d8d1ce025d3f263b85ebfbebc1a1efff704b6490e95b07603c55c1d63",
)
version(
"2.2.0",
sha256="09d4a2508d903121d29813a85791eeb3a905acbe1c5664b8a88903f8eda64b8f",
)
variant("openmp", default=True, description="Enable OpenMP support")
depends_on("elpa+openmp", when="+openmp")
depends_on("elpa~openmp", when="~openmp")
depends_on("cereal")
depends_on("libxc")
depends_on("fftw")
# MPI is a necessary dependency
depends_on("mpi", type=("build", "link", "run"))
depends_on("mkl")
build_directory = "source"
def edit(self, spec, prefix):
if "+openmp" in spec:
inc_var = "_openmp-"
system_var = (
"ELPA_LIB = -L${ELPA_LIB_DIR} -lelpa_openmp -Wl, -rpath=${ELPA_LIB_DIR}"
)
else:
inc_var = "-"
system_var = (
"ELPA_LIB = -L${ELPA_LIB_DIR} -lelpa -Wl,-rpath=${ELPA_LIB_DIR}"
)
tempInc = (
"\
FORTRAN = ifort\n\
CPLUSPLUS = icpc\n\
CPLUSPLUS_MPI = mpiicpc\n\
LAPACK_DIR = $(MKLROOT)\n\
FFTW_DIR = %s\n\
ELPA_DIR = %s\n\
ELPA_INCLUDE = -I${ELPA_DIR}/include/elpa%s%s\n\
CEREAL_DIR = %s\n\
OBJ_DIR = obj\n\
OBJ_DIR_serial = obj\n\
NP = 14\n"
% (
spec["fftw"].prefix,
spec["elpa"].prefix,
inc_var,
"{0}".format(spec["elpa"].version),
spec["cereal"].prefix,
)
)
with open(self.build_directory + "/Makefile.vars", "w") as f:
f.write(tempInc)
lineList = []
Pattern1 = re.compile("^ELPA_INCLUDE_DIR")
Pattern2 = re.compile("^ELPA_LIB\\s*= ")
with open(self.build_directory + "/Makefile.system", "r") as f:
while True:
line = f.readline()
if not line:
break
elif Pattern1.search(line):
pass
elif Pattern2.search(line):
pass
else:
lineList.append(line)
with open(self.build_directory + "/Makefile.system", "w") as f:
for i in lineList:
f.write(i)
with open(self.build_directory + "/Makefile.system", "a") as f:
f.write(system_var)
def install(self, spec, prefix):
install_tree("bin", prefix.bin)
| 29.655172
| 92
| 0.57907
| 379
| 3,440
| 5.139842
| 0.440633
| 0.032341
| 0.01848
| 0.034908
| 0.205852
| 0.188398
| 0.12885
| 0.067248
| 0.032854
| 0
| 0
| 0.085351
| 0.29157
| 3,440
| 115
| 93
| 29.913043
| 0.713993
| 0.115407
| 0
| 0.113636
| 0
| 0.011364
| 0.274081
| 0.100629
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022727
| false
| 0.022727
| 0.022727
| 0
| 0.113636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43099e0956c66d804368b7d95ddbb6f01a3756de
| 2,349
|
py
|
Python
|
WP3/Task3.2/spark/shared/addcountry2dataset.py
|
on-merrit/ON-MERRIT
|
a21324a54a6365f2f769b5952b0cf5347a97d480
|
[
"MIT"
] | 2
|
2019-12-10T13:10:58.000Z
|
2019-12-13T10:11:41.000Z
|
WP3/Task3.2/spark/shared/addcountry2dataset.py
|
on-merrit/ON-MERRIT
|
a21324a54a6365f2f769b5952b0cf5347a97d480
|
[
"MIT"
] | 4
|
2020-03-31T12:13:45.000Z
|
2020-04-15T15:59:17.000Z
|
WP3/Task3.2/spark/shared/addcountry2dataset.py
|
on-merrit/ON-MERRIT
|
a21324a54a6365f2f769b5952b0cf5347a97d480
|
[
"MIT"
] | 1
|
2020-02-26T08:52:39.000Z
|
2020-02-26T08:52:39.000Z
|
import csv
from os import listdir
from os.path import isfile, join
from osgeo import ogr
from multiprocessing import Pool
driver = ogr.GetDriverByName('GeoJSON')
countryFile = driver.Open("../data/external/countries.json")
layer = countryFile.GetLayer()
class Point(object):
""" Wrapper for ogr point """
def __init__(self, lat, lng):
""" Coordinates are in degrees """
self.point = ogr.Geometry(ogr.wkbPoint)
self.point.AddPoint(lng, lat)
def getOgr(self):
return self.point
ogr = property(getOgr)
class Country(object):
""" Wrapper for ogr country shape. Not meant to be instantiated directly. """
def __init__(self, shape):
self.shape = shape
def getIso(self):
return self.shape.GetField('ISO_A3')
iso = property(getIso)
def __str__(self):
return self.shape.GetField('ADMIN')
def contains(self, point):
return self.shape.geometry().Contains(point.ogr)
def getCountry(lat, lng):
"""
Checks given gps-incoming coordinates for country.
Output is either country shape index or None
"""
point = ogr.Geometry(ogr.wkbPoint)
point.AddPoint(lng, lat)
for i in range(layer.GetFeatureCount()):
country = layer.GetFeature(i)
if country.geometry().Contains(point):
return Country(country).iso
# nothing found
return None
def process_chunk(file):
with open(file, 'r') as read_obj, open(f"{file}_done.csv", 'w') as write_obj:
# pass the file object to reader() to get the reader object
csv_reader = csv.reader(read_obj)
csv_writer = csv.writer(write_obj)
# Iterate over each row in the csv using reader object
count=0
for row in csv_reader:
# row variable is a list that represents a row in csv
if row[2] and row[3]:
country = getCountry(float(row[2]), float(row[3]))
row.append(country)
csv_writer.writerow(row)
count+=1
if count%100==0:
print(f"File {file} progress: {count}/100000")
print(f"Processing {file} terminated")
allfiles = [join("q1a_latlon_split", f) for f in listdir("q1a_latlon_split") if isfile(join("q1a_latlon_split", f))]
with Pool(32) as p:
p.map(process_chunk, allfiles)
| 27.313953
| 116
| 0.635164
| 313
| 2,349
| 4.670927
| 0.399361
| 0.03078
| 0.028728
| 0.025992
| 0.099863
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012557
| 0.254151
| 2,349
| 85
| 117
| 27.635294
| 0.821918
| 0.168157
| 0
| 0
| 0
| 0
| 0.093048
| 0.016205
| 0
| 0
| 0
| 0
| 0
| 1
| 0.16
| false
| 0
| 0.1
| 0.08
| 0.46
| 0.04
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430aa61162281dff31190e84ebaac0dead9910f0
| 3,126
|
py
|
Python
|
ai-experiments/sudoku/rdisplay.py
|
Henchel-Santillan/open-ai
|
545bf8468330dce7e705c17e0ac4ce3889f20d5b
|
[
"MIT"
] | null | null | null |
ai-experiments/sudoku/rdisplay.py
|
Henchel-Santillan/open-ai
|
545bf8468330dce7e705c17e0ac4ce3889f20d5b
|
[
"MIT"
] | null | null | null |
ai-experiments/sudoku/rdisplay.py
|
Henchel-Santillan/open-ai
|
545bf8468330dce7e705c17e0ac4ce3889f20d5b
|
[
"MIT"
] | null | null | null |
import cv2
import numpy as np
def process_core(image):
'''
Returns an inverted preprocessed binary image, with noise
reduction achieved with greyscaling, Gaussian Blur, Otsu's Threshold, and
an open morph.
'''
#apply greyscaling, Gaussian Blur, and Otsu's Threshold
greyscale = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blur = cv2.GaussianBlur(greyscale, (3, 3), 0)
threshold = cv2.threshold(blur, 0, 255, cv2.THRESH_BINARY_INV + cv2.THRESH_OTSU)[1]
#apply an open morph to invert image to remove noise
kernel = cv2.getStructuringElement(cv2.MORPH_RECT, (3, 3))
invert = 255 - cv2.morphologyEx(threshold, cv2.MORPH_OPEN, kernel, iterations=1)
return invert
def find_houghlines(image, width, height):
hough_lines = None
lines = cv2.HoughLinesP(image, 1, np.pi/180, 50, minLineLength=50, maxLineGap=5)
#generates blank black image with single color layer
if lines is not None and len(lines) != 0:
hough_lines = np.zeros((height, width), dtype=np.uint8)
for line in lines:
x1, y1, x2, y2 = line[0]
cv2.line(hough_lines, (x1, y1), (x2, y2), (255, 255, 255), 2)
return hough_lines
def find_bounds(image):
rect_bounds = None
#Run contour recognition
contours, _ = cv2.findContours(image, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
#Take list of sorted contours by largest area to smallest area
#If at least one contour is identified, can process visual approx. of contour bounds
if len(sorted(contours, key=cv2.contourArea, reverse=True)) > 0:
contour_bounds = None
#Pre-determined image size factor constant
SFACTOR = 20
for contour in contours:
#Minimum intended size of a single cell is not reached, likely a cutoff, not worth approx.
if (image[0] * image[1]) / SFACTOR > cv2.contourArea(contour):
break
approximation = cv2.approxPolyDP(contour, cv2.arcLength(contour, True), True)
#This means that the approximated polygon is a quad
if len(approximation) == 4:
contour_bounds = approximation
break
if contour_bounds is not None:
rect_bounds = np.zeros((4, 2), dtype=np.float32)
corners = contour_bounds.reshape(-1, 2)
rect_bounds[0] = corners[np.argmin(contour_bounds.sum(axis=1))]
rect_bounds[2] = corners[np.argmax(contour_bounds.sum(axis=1))]
rect_bounds[1] = corners[np.argmin(np.diff(corners, axis=1))]
rect_bounds[3] = corners[np.argmax(np.diff(corners, axis=1))]
return rect_bounds
#Transform the perspective to render as if looking down on paper (top-down view)
def transform(image, perspective):
pass
#Process the grid based on expected clean binary image input
def process_grid(image, width, height):
grid = None
detected = False
hough_lines = find_houghlines(image, width, height)
| 35.931034
| 102
| 0.636596
| 408
| 3,126
| 4.796569
| 0.39951
| 0.035769
| 0.024527
| 0.022994
| 0.094021
| 0.031681
| 0.031681
| 0
| 0
| 0
| 0
| 0.037428
| 0.273512
| 3,126
| 86
| 103
| 36.348837
| 0.824306
| 0.252399
| 0
| 0.044444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0.022222
| 0.044444
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430abd204ed6b789ffeb3c172d2bcf921345ff43
| 7,419
|
py
|
Python
|
pythia/tasks/base_task.py
|
abhiskk/pythia
|
c33fb45d74353c25b6269b44551bcafefecb5c7e
|
[
"BSD-3-Clause"
] | 2
|
2019-05-23T02:07:03.000Z
|
2019-06-08T18:56:05.000Z
|
pythia/tasks/base_task.py
|
abhiskk/pythia
|
c33fb45d74353c25b6269b44551bcafefecb5c7e
|
[
"BSD-3-Clause"
] | null | null | null |
pythia/tasks/base_task.py
|
abhiskk/pythia
|
c33fb45d74353c25b6269b44551bcafefecb5c7e
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates.
"""
Tasks come above datasets in hierarchy level. In case you want to
implement a new task, you need to inherit ``BaseTask`` class. You need
to implement ``_get_available_datasets`` and ``_preprocess_item`` functions
to complete the implementation. You can check the source to see if you need
to override any other methods like ``prepare_batch``.
Check example of ``VQATask`` here_.
Example::
from pythia.tasks.base_task import BaseTask
from pythia.common.registry import registry
@registry.register_task("my")
class MyTask(BaseTask):
def __init__(self):
super().__init__("my")
def _get_available_datasets(self):
return ["my"]
def _preprocess_item(self):
item.text = None
return item
.. _here: https://github.com/facebookresearch/pythia/blob/v0.3/pythia/tasks/vqa/vqa_task.py
"""
import sys
import numpy as np
from torch.utils.data import Dataset
from pythia.common.registry import registry
class BaseTask(Dataset):
"""
BaseTask that task classes need to inherit in order to create a new task.
Users must implement ``_get_available_datasets`` and ``_preprocess_item``
in order to complete implementation.
Args:
task_name (str): Name of the task with which it will be registered
"""
def __init__(self, task_name):
super(BaseTask, self).__init__()
self.task_name = task_name
self.writer = registry.get("writer")
def _process_datasets(self):
if "datasets" not in self.opts:
self.writer.write(
"No datasets attribute present for task: %s."
" Defaulting to all" % (self.task_name),
"warning",
)
datasets = "all"
else:
datasets = self.opts["datasets"]
if datasets is None or datasets == "all":
datasets = self._get_available_datasets()
if type(datasets) == str:
datasets = list(map(lambda x: x.strip(), datasets.split(",")))
if len(datasets) == 0 and datasets[0] == "all":
datasets = self._get_available_datasets()
self.given_datasets = datasets
def load(self, **opts):
self.opts = opts
self._process_datasets()
self.datasets = []
self.builders = []
available_datasets = self._get_available_datasets()
self.total_length = 0
self.per_dataset_lengths = []
self.num_datasets = 0
for dataset in self.given_datasets:
if dataset in available_datasets:
builder_class = registry.get_builder_class(dataset)
if builder_class is None:
print("No builder class found for %s." % dataset)
continue
builder_instance = builder_class()
if dataset in self.opts["dataset_attributes"]:
attributes = self.opts["dataset_attributes"][dataset]
else:
self.writer.write(
"Dataset %s is missing from "
"dataset_attributes in config." % dataset,
"error",
)
sys.exit(1)
dataset_type = self.opts.get("dataset_type", "train")
builder_instance.build(dataset_type, attributes)
dataset_instance = builder_instance.load(dataset_type, attributes)
self.builders.append(builder_instance)
self.datasets.append(dataset_instance)
self.per_dataset_lengths.append(len(dataset_instance))
self.total_length += len(dataset_instance)
else:
print(
"Dataset %s is not a valid dataset for task %s. Skipping"
% (dataset, self.task_name)
)
self.num_datasets = len(self.datasets)
self.dataset_probablities = [1 for _ in range(self.num_datasets)]
sampling = self.opts.get("dataset_size_proportional_sampling", None)
if sampling is True:
self.dataset_probablities = self.per_dataset_lengths[:]
self.dataset_probablities = [
prob / self.total_length for prob in self.dataset_probablities
]
self.change_dataset()
def _get_available_datasets(self):
"""Set available datasets for this task here.
Override in your child task class
Temporary solution, later we will use decorators to easily register
datasets with a task
Returns:
List - List of available datasets for this particular task
"""
return []
def get_datasets(self):
return self.datasets
def __len__(self):
return self.total_length
def __getitem__(self, idx):
idx = idx % self.per_dataset_lengths[self.dataset_choice]
item = self.chosen_dataset[idx]
return self._preprocess_item(item)
def change_dataset(self):
self.dataset_choice = np.random.choice(
self.num_datasets, 1, p=self.dataset_probablities
)[0]
self.chosen_dataset = self.datasets[self.dataset_choice]
def verbose_dump(self, *args, **kwargs):
self.chosen_dataset.verbose_dump(*args, **kwargs)
def prepare_batch(self, batch):
return self.chosen_dataset.prepare_batch(batch)
def _preprocess_item(self, item):
"""Preprocess an item to be returned from __getitem__.
Override in your child task class, so you have control on what you are
returning
Args:
item (Sample): Sample returned by a particular dataset
Returns:
Sample: Preprocessed item
"""
raise NotImplementedError(
"This task doesn't implement preprocess_item" " method"
)
def update_registry_for_model(self, config):
"""
Use this if there is some specific configuration required by model
which must be inferred at runtime.
"""
for builder in self.builders:
builder.update_registry_for_model(config)
def init_args(self, parser):
parser.add_argument_group("General Task Arguments")
parser.add_argument(
"-dsp",
"--dataset_size_proportional_sampling",
type=bool,
default=0,
help="Pass if you want to sample from"
" dataset according to its size. Default: Equal "
" weighted sampling",
)
# TODO: Figure out later if we want to init args from datasets
# self._init_args(parser)
def _init_args(self, parser):
"""Override this function to add extra parameters to
parser in your child task class.
Parameters
----------
parser : ArgumentParser
Original parser object passed from the higher level classes like
trainer
Returns
-------
type
Description of returned object.
"""
for builder in self.builders:
builder.init_args(parser)
def clean_config(self, config):
"""
Override this in case you want to clean the config you updated earlier
in update_registry_for_model
"""
return config
| 31.841202
| 91
| 0.604529
| 845
| 7,419
| 5.11716
| 0.276923
| 0.041628
| 0.032377
| 0.022202
| 0.157493
| 0.106383
| 0.021277
| 0
| 0
| 0
| 0
| 0.002162
| 0.314059
| 7,419
| 232
| 92
| 31.978448
| 0.847514
| 0.298558
| 0
| 0.078947
| 0
| 0
| 0.110634
| 0.014315
| 0
| 0
| 0
| 0.00431
| 0
| 1
| 0.131579
| false
| 0.008772
| 0.035088
| 0.026316
| 0.22807
| 0.017544
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430c92ccfdcf3dc35e86f6e05e4602bd002c581a
| 6,982
|
py
|
Python
|
appimagebuilder/orchestrator.py
|
AppImageCrafters/AppImageBuilder
|
dd041050e65f8eff28f878a092fd07bcf3ec5a4d
|
[
"MIT"
] | null | null | null |
appimagebuilder/orchestrator.py
|
AppImageCrafters/AppImageBuilder
|
dd041050e65f8eff28f878a092fd07bcf3ec5a4d
|
[
"MIT"
] | 1
|
2019-11-12T03:52:01.000Z
|
2019-11-12T03:52:01.000Z
|
appimagebuilder/orchestrator.py
|
AppImageCrafters/AppImageBuilder
|
dd041050e65f8eff28f878a092fd07bcf3ec5a4d
|
[
"MIT"
] | null | null | null |
# Copyright 2021 Alexis Lopez Zubieta
#
# Permission is hereby granted, free of charge, to any person obtaining a
# copy of this software and associated documentation files (the "Software"),
# to deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
import os
import pathlib
from appimagebuilder.utils.finder import Finder
from appimagebuilder.context import AppInfo, Context, BundleInfo
from appimagebuilder.commands.apt_deploy import AptDeployCommand
from appimagebuilder.commands.create_appimage import CreateAppImageCommand
from appimagebuilder.commands.file_deploy import FileDeployCommand
from appimagebuilder.commands.pacman_deploy import PacmanDeployCommand
from appimagebuilder.commands.run_script import RunScriptCommand
from appimagebuilder.commands.run_test import RunTestCommand
from appimagebuilder.commands.setup_app_info import SetupAppInfoCommand
from appimagebuilder.commands.setup_runtime import SetupRuntimeCommand
from appimagebuilder.commands.setup_symlinks import SetupSymlinksCommand
from appimagebuilder.commands.deploy_record import (
WriteDeployRecordCommand,
)
from appimagebuilder.recipe.roamer import Roamer
class Orchestrator:
"""Transforms a recipe into a command list"""
def process(self, recipe: Roamer, args):
if recipe.version() == 1:
return self._prepare_commands_for_recipe_v1(args, recipe)
raise RuntimeError("Unknown recipe version: %s" % recipe.version())
def _prepare_commands_for_recipe_v1(self, args, recipe):
context = self._extract_v1_recipe_context(args, recipe)
commands = []
if not args.skip_script:
command = RunScriptCommand(context, recipe.script, "main script")
commands.append(command)
if not args.skip_build:
commands.extend(self._create_app_dir_commands(context, recipe))
if not args.skip_tests and recipe.AppDir.test:
command = RunTestCommand(context, recipe.AppDir.test)
commands.append(command)
if not args.skip_appimage:
command = CreateAppImageCommand(context, recipe)
commands.append(command)
return commands
def _create_app_dir_commands(self, context, recipe):
commands = []
commands.extend(self._create_deploy_commands(context, recipe))
commands.extend(self._create_setup_commands(context, recipe))
commands.append(WriteDeployRecordCommand(context))
return commands
def _create_deploy_commands(self, context, recipe):
commands = []
if recipe.AppDir.before_bundle:
command = RunScriptCommand(
context, recipe.AppDir.before_bundle, "before bundle script"
)
commands.append(command)
apt_section = recipe.AppDir.apt
if apt_section:
command = self._generate_apt_deploy_command(context, apt_section)
commands.append(command)
pacman_section = recipe.AppDir.pacman
if pacman_section:
command = self._generate_pacman_deploy_command(context, pacman_section)
commands.append(command)
files_section = recipe.AppDir.files
if files_section:
command = FileDeployCommand(
context,
files_section.include() or [],
files_section.exclude() or [],
)
commands.append(command)
if recipe.AppDir.after_bundle:
command = RunScriptCommand(
context, recipe.AppDir.after_bundle, "after bundle script"
)
commands.append(command)
return commands
def _create_setup_commands(self, context, recipe):
commands = []
if recipe.AppDir.before_runtime:
command = RunScriptCommand(
context, recipe.AppDir.before_runtime, "before runtime script"
)
commands.append(command)
finder = Finder(context.app_dir)
commands.append(SetupSymlinksCommand(context, recipe, finder))
commands.append(SetupRuntimeCommand(context, recipe, finder))
commands.append(SetupAppInfoCommand(context))
if recipe.AppDir.after_runtime:
command = RunScriptCommand(
context, recipe.AppDir.after_runtime, "after runtime script"
)
commands.append(command)
return commands
def _generate_apt_deploy_command(self, context, apt_section):
apt_archs = apt_section.arch()
if isinstance(apt_archs, str):
apt_archs = [apt_archs]
sources = []
keys = []
for item in apt_section.sources():
if "sourceline" in item:
sources.append(item["sourceline"])
if "key_url" in item:
keys.append(item["key_url"])
return AptDeployCommand(
context,
apt_section.include(),
apt_section.exclude() or [],
apt_archs,
sources,
keys,
apt_section.allow_unauthenticated() or False,
)
def _generate_pacman_deploy_command(self, context, pacman_section):
return PacmanDeployCommand(
context,
pacman_section.include(),
pacman_section.exclude(),
pacman_section["Architecture"](),
pacman_section.repositories(),
pacman_section.options(),
)
def _extract_v1_recipe_context(self, args, recipe):
app_dir_path = pathlib.Path(args.appdir).absolute()
build_dir_path = pathlib.Path(args.build_dir).absolute()
app_info_section = recipe.AppDir.app_info
app_info = AppInfo(
app_info_section.id(),
app_info_section.name() or app_info_section.id(),
app_info_section.icon() or "application-vnd.appimage",
app_info_section.version(),
app_info_section.exec(),
app_info_section.exec_args(),
)
bundle_info = BundleInfo(
app_dir=app_dir_path,
app_info=app_info,
update_string=recipe.AppImage["update-information"]() or "guess",
runtime_arch=recipe.AppImage.arch(),
sign_key=recipe.AppImage["sign-key"]() or None,
file_name=recipe.AppImage["file_name"] or None,
)
return Context(
recipe=recipe,
recipe_path=pathlib.Path(args.recipe),
app_info=app_info,
bundle_info=bundle_info,
app_dir=app_dir_path,
build_dir=build_dir_path,
)
| 37.138298
| 83
| 0.660556
| 741
| 6,982
| 6.016194
| 0.221323
| 0.046658
| 0.060565
| 0.040377
| 0.200763
| 0.131898
| 0.083445
| 0.023777
| 0.023777
| 0
| 0
| 0.001747
| 0.262246
| 6,982
| 187
| 84
| 37.336898
| 0.863716
| 0.092524
| 0
| 0.19863
| 0
| 0
| 0.036076
| 0.003797
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054795
| false
| 0
| 0.10274
| 0.006849
| 0.219178
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430cfcded466d6e4b55330f3c637f5af1e8d4960
| 2,743
|
py
|
Python
|
API_Collections/googlemap_geocode.py
|
Musketeer-Liu/Auto_Coding_Tools_Box
|
96ffe3f194eb3571d290086400ef518cef4e0774
|
[
"MIT"
] | null | null | null |
API_Collections/googlemap_geocode.py
|
Musketeer-Liu/Auto_Coding_Tools_Box
|
96ffe3f194eb3571d290086400ef518cef4e0774
|
[
"MIT"
] | null | null | null |
API_Collections/googlemap_geocode.py
|
Musketeer-Liu/Auto_Coding_Tools_Box
|
96ffe3f194eb3571d290086400ef518cef4e0774
|
[
"MIT"
] | null | null | null |
# python3 --> Enter Python Shell
# from geocode import getGeocodeLocation
# getGeocodeLocation("Place you wanto to query")
import httplib2
import json
def getGeocodeLocation(inputString):
google_api_key = "AIzaSyDZHGnbFkjZcOEgYPpDqlO2YhBHKsNxhnE"
locatationString = inputString.replace(" ", "+")
url = ('https://maps.googleapis.com/maps/api/geocode/json?address=%s&key=%s'%(locatationString, google_api_key))
h = httplib2.Http()
response, content = h.request(url, 'GET')
result = json.loads(content)
latitude = result['results'][0]['geometry']['location']['lat']
longitude = result['results'][0]['geometry']['location']['lng']
# print(latitude, longitude)
return (latitude, longitude)
# print("response header: %s \n \n" % response)
# return result
# san_francisco = getGeocodeLocation("San Francisco, CA")
# response header: {'content-type': 'application/json; charset=UTF-8', 'date': 'Sat, 27 Jan 2018 06:25:35 GMT', 'expires': 'Sun, 28 Jan 2018 06:25:35 GMT', 'cache-control': 'public, max-age=86400', 'vary': 'Accept-Language', 'access-control-allow-origin': '*', 'server': 'mafe', 'content-length': '1749', 'x-xss-protection': '1; mode=block', 'x-frame-options': 'SAMEORIGIN', 'alt-svc': 'hq=":443"; ma=2592000; quic=51303431; quic=51303339; quic=51303338; quic=51303337; quic=51303335,quic=":443"; ma=2592000; v="41,39,38,37,35"', 'status': '200', '-content-encoding': 'gzip', 'content-location': 'https://maps.googleapis.com/maps/api/geocode/json?address=San+Francisco,+CA&key=AIzaSyDZHGnbFkjZcOEgYPpDqlO2YhBHKsNxhnE'}
# san_francisco
# {'results': [{'address_components': [{'long_name': 'San Francisco', 'short_name': 'SF', 'types': ['locality', 'political']}, {'long_name': 'San Francisco County', 'short_name': 'San Francisco County', 'types': ['administrative_area_level_2', 'political']}, {'long_name': 'California', 'short_name': 'CA', 'types': ['administrative_area_level_1', 'political']}, {'long_name': 'United States', 'short_name': 'US', 'types': ['country', 'political']}], 'formatted_address': 'San Francisco, CA, USA', 'geometry': {'bounds': {'northeast': {'lat': 37.9298239, 'lng': -122.28178}, 'southwest': {'lat': 37.6398299, 'lng': -123.173825}}, 'location': {'lat': 37.7749295, 'lng': -122.4194155}, 'location_type': 'APPROXIMATE', 'viewport': {'northeast': {'lat': 37.812,'lng': -122.3482}, 'southwest': {'lat': 37.70339999999999, 'lng': -122.527}}}, 'place_id': 'ChIJIQBpAG2ahYAR_6128GcTUEo', 'types': ['locality', 'political']}], 'status': 'OK'}
# san_francisco.keys()
# dict_keys(['results', 'status'])
# san_francisco['results'][0]['geometry']['location']['lat']
# 37.7749295
# san_francisco['results'][0]['geometry']['location']['lng']
# -122.4194155
| 66.902439
| 932
| 0.679913
| 329
| 2,743
| 5.580547
| 0.483283
| 0.071895
| 0.034858
| 0.052288
| 0.147059
| 0.107843
| 0.051198
| 0.051198
| 0.051198
| 0
| 0
| 0.093788
| 0.102078
| 2,743
| 40
| 933
| 68.575
| 0.651644
| 0.771783
| 0
| 0
| 0
| 0.083333
| 0.267652
| 0.064039
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430da45d8833848dec38a5b05491d18df5c37b6a
| 1,717
|
py
|
Python
|
backend/core/actions/actionGenerator.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | null | null | null |
backend/core/actions/actionGenerator.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | 50
|
2021-03-28T03:06:19.000Z
|
2021-10-18T12:36:16.000Z
|
backend/core/actions/actionGenerator.py
|
makakken/roseguarden
|
9a867f3d5e979b990bf474dcba81e5e9d0814c6a
|
[
"MIT"
] | 1
|
2021-07-30T07:12:46.000Z
|
2021-07-30T07:12:46.000Z
|
"""
The roseguarden project
Copyright (C) 2018-2020 Marcus Drobisch,
This program is free software: you can redistribute it and/or modify it under
the terms of the GNU General Public License as published by the Free Software
Foundation, either version 3 of the License, or (at your option) any later
version.
This program is distributed in the hope that it will be useful, but WITHOUT
ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
You should have received a copy of the GNU General Public License along with
this program. If not, see <http://www.gnu.org/licenses/>.
"""
__authors__ = ["Marcus Drobisch"]
__contact__ = "roseguarden@fabba.space"
__credits__ = []
__license__ = "GPLv3"
class BaseAction(object):
action = 'undefined'
target = 'undefined'
source = 'server'
version = '1.0.0'
def __init__(self, ):
print("Instance of BaseAction created")
def execute(self, ):
print("Execute not defined")
@classmethod
def generate(cls, delay=0.0):
action = {}
action['action'] = cls.action
action['target'] = cls.target
action['version'] = cls.version
action['source'] = cls.source
action['delay'] = delay
return action
class BaseNodeAction(object):
action = 'undefined'
version = '1.0.0'
def __init__(self, ):
print("Instance of BaseAction created")
def execute(self, ):
print("Execute not defined")
@classmethod
def generate(cls):
action = {}
action['action'] = cls.action
action['version'] = cls.version
return action
| 27.253968
| 78
| 0.670355
| 219
| 1,717
| 5.146119
| 0.47032
| 0.063886
| 0.034605
| 0.050577
| 0.333629
| 0.310559
| 0.202307
| 0.202307
| 0.202307
| 0.202307
| 0
| 0.013657
| 0.232382
| 1,717
| 62
| 79
| 27.693548
| 0.841426
| 0.39371
| 0
| 0.628571
| 0
| 0
| 0.219748
| 0.022265
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171429
| false
| 0
| 0
| 0
| 0.457143
| 0.114286
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430dcb1829dec99ac70255fe07e1c633f6a84f85
| 5,877
|
py
|
Python
|
lib/csv/csv.py
|
arnscott/gcounter
|
ffb6628f1b1f0e6c70168ff738fd51fa08e0df18
|
[
"MIT"
] | null | null | null |
lib/csv/csv.py
|
arnscott/gcounter
|
ffb6628f1b1f0e6c70168ff738fd51fa08e0df18
|
[
"MIT"
] | 1
|
2018-11-30T14:09:40.000Z
|
2018-12-03T12:41:01.000Z
|
lib/csv/csv.py
|
arnscott/gcounter
|
ffb6628f1b1f0e6c70168ff738fd51fa08e0df18
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2018 Aaron Michael Scott
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import datetime
import csv
import os
class CSVReader(object):
"""Wrapper for reading csv files.
Takes just the filepath as an argument.
Use the iterrecords() generator method for large data sets for increased performance.
"""
def __init__(self, file_path, delimiter=','):
self.file_path = file_path
self.delimiter = delimiter
def read_to_list(self):
"""Returns the records in the csv as a list[]
Each record is a dictionary
"""
records = []
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
for row in reader:
records.append(row)
return records
def read_to_dict(self, key_field):
"""Returns the records in the csv as a dictionary.
The key value is specified by the key_field argument for each record
"""
records = {}
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
self.headers = reader.fieldnames
if key_field in self.headers:
for row in reader:
if not row[key_field] in records:
records[row[key_field]] = row
else:
raise Exception('The key provided does not have unique values.')
else:
raise KeyError('The key provided does not exist')
return records
def iterrecords(self):
"""Generator method that provides a more efficient way to iterate records.
for record in instance.iterrecords():
print(record)
"""
records = []
with open(self.file_path) as source:
reader = csv.DictReader(source,
delimiter=self.delimiter)
for row in reader:
yield row
class CSVWriter(object):
"""Wrapper for writing csv files.
takes the file path and a list of headers as arguments
"""
def __init__(self, file_path, headers):
self.headers = headers
self.file_path = file_path
def write_from_list(self, records=[]):
"""Writes the csv to the indicated file_path
taking a list[] of records as the argument
where each record is a dictionary.
Only the fields in self.headers will be written to the csv.
But extra fields can be passed, they will just be skipped over.
"""
if isinstance(records, list):
with open(self.file_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=self.headers)
writer.writeheader()
for record in records:
if isinstance(record, dict):
row = {field: record[field] for field in self.headers}
writer.writerow(row)
else:
raise Exception('Items in list must be of type dict')
else:
raise Exception('Must pass a list object as the records list')
return self.file_path
def write_from_dict(self, records={}):
"""Writes the csv to the indicated file_path
taking a dict{} of records as the argument
where each item in the dict{} is also a dict{}
"""
with open(self.file_path, 'w') as csvfile:
writer = csv.DictWriter(csvfile,
fieldnames=self.headers)
writer.writeheader()
for key, record in records.items():
row = {field: record[field] for field in self.headers}
writer.writerow(row)
return self.file_path
def reader(file_path='', delimiter=','):
"""Returns a CSVReader object
"""
if os.path.isfile(file_path):
if os.access(file_path, os.R_OK):
return CSVReader(file_path, delimiter=delimiter)
else:
raise Exception('{fname} exists but is not readable.'.format(fname=file_path))
else:
raise Exception('{fname} does not exist'.format(fname=file_path))
def writer(file_path='', headers=[]):
"""Returns a CSVWriter object
"""
if not os.path.isfile(file_path):
if isinstance(headers, list):
return CSVWriter(file_path=file_path, headers=headers)
else:
raise Exception('Headers need to be in a list object.')
else:
raise Exception('{fname} is already a file. Please write to a new location.'.format(fname=file_path))
def the_date():
return datetime.date.today().strftime('%m_%d_%Y')
| 34.775148
| 109
| 0.612047
| 740
| 5,877
| 4.791892
| 0.291892
| 0.060914
| 0.037225
| 0.022561
| 0.318387
| 0.242527
| 0.230118
| 0.212634
| 0.196842
| 0.196842
| 0
| 0.000992
| 0.313936
| 5,877
| 168
| 110
| 34.982143
| 0.878472
| 0.353582
| 0
| 0.481928
| 0
| 0
| 0.087973
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.120482
| false
| 0.012048
| 0.036145
| 0.012048
| 0.26506
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430e631a7ab886f89000f1e0dc6f369df4ae43f7
| 1,056
|
py
|
Python
|
Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/export_map.py
|
karant17/Test
|
e44bf79f597d53de2b891372ffccf7f13c74ede3
|
[
"MIT"
] | 7
|
2017-02-16T15:25:47.000Z
|
2021-11-08T13:10:15.000Z
|
Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/export_map.py
|
karant17/Test
|
e44bf79f597d53de2b891372ffccf7f13c74ede3
|
[
"MIT"
] | null | null | null |
Module01/LearningQGIS_ThirdEdition_Code/Chapter6_code/export_map.py
|
karant17/Test
|
e44bf79f597d53de2b891372ffccf7f13c74ede3
|
[
"MIT"
] | 7
|
2017-03-06T08:47:27.000Z
|
2021-12-11T12:42:43.000Z
|
from PyQt4.QtGui import QImage, QPainter
from PyQt4.QtCore import QSize
# configure the output image
width = 800
height = 600
dpi = 92
img = QImage(QSize(width, height), QImage.Format_RGB32)
img.setDotsPerMeterX(dpi / 25.4 * 1000)
img.setDotsPerMeterY(dpi / 25.4 * 1000)
# get the map layers and extent
layers = [ layer.id() for layer in iface.legendInterface().layers() ]
extent = iface.mapCanvas().extent()
# configure map settings for export
mapSettings = QgsMapSettings()
mapSettings.setMapUnits(0)
mapSettings.setExtent(extent)
mapSettings.setOutputDpi(dpi)
mapSettings.setOutputSize(QSize(width, height))
mapSettings.setLayers(layers)
mapSettings.setFlags(QgsMapSettings.Antialiasing | QgsMapSettings.UseAdvancedEffects | QgsMapSettings.ForceVectorOutput | QgsMapSettings.DrawLabeling)
# configure and run painter
p = QPainter()
p.begin(img)
mapRenderer = QgsMapRendererCustomPainterJob(mapSettings, p)
mapRenderer.start()
mapRenderer.waitForFinished()
p.end()
# save the result
img.save("C:/temp/custom_export.png","png")
| 36.413793
| 151
| 0.773674
| 124
| 1,056
| 6.572581
| 0.548387
| 0.022086
| 0.039264
| 0.02454
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029064
| 0.120265
| 1,056
| 29
| 152
| 36.413793
| 0.848224
| 0.125
| 0
| 0
| 0
| 0
| 0.031425
| 0.028058
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430f1a041d4b52037c87f1c1a590ae76e5b36f2e
| 13,604
|
py
|
Python
|
tools/generate_cropped_dataset.py
|
DIVA-DIA/DIVA-DAF
|
0ae3b873d04f1852d9053cb4cb2fbc7bda73471c
|
[
"MIT"
] | 3
|
2022-02-10T17:35:41.000Z
|
2022-03-04T10:38:58.000Z
|
tools/generate_cropped_dataset.py
|
DIVA-DIA/DIVA-DAF
|
0ae3b873d04f1852d9053cb4cb2fbc7bda73471c
|
[
"MIT"
] | 3
|
2022-02-02T09:12:18.000Z
|
2022-02-16T13:42:30.000Z
|
tools/generate_cropped_dataset.py
|
DIVA-DIA/DIVA-DAF
|
0ae3b873d04f1852d9053cb4cb2fbc7bda73471c
|
[
"MIT"
] | null | null | null |
"""
Load a dataset of historic documents by specifying the folder where its located.
"""
import argparse
# Utils
import itertools
import logging
import math
from datetime import datetime
from pathlib import Path
from torchvision.datasets.folder import has_file_allowed_extension, pil_loader
from torchvision.transforms import functional as F
from tqdm import tqdm
IMG_EXTENSIONS = ('.jpg', '.jpeg', '.png', '.ppm', '.bmp', '.pgm', '.gif')
JPG_EXTENSIONS = ('.jpg', '.jpeg')
def get_img_paths_uncropped(directory):
"""
Parameters
----------
directory: string
parent directory with images inside
Returns
-------
paths: list of paths
"""
paths = []
directory = Path(directory).expanduser()
if not directory.is_dir():
logging.error(f'Directory not found ({directory})')
for subdir in sorted(directory.iterdir()):
if not subdir.is_dir():
continue
for img_name in sorted(subdir.iterdir()):
if has_file_allowed_extension(str(img_name), IMG_EXTENSIONS):
paths.append((subdir / img_name, str(subdir.stem)))
return paths
class ImageCrop(object):
"""
Crop the data and ground truth image at the specified coordinates to the specified size and convert
them to a tensor.
"""
def __init__(self, crop_size):
self.crop_size = crop_size
def __call__(self, img, coordinates):
"""
Args:
img (PIL Image): Data image to be cropped and converted to tensor.
gt (PIL Image): Ground truth image to be cropped and converted to tensor.
Returns:
Data tensor, gt tensor (tuple of tensors): cropped and converted images
"""
x_position = coordinates[0]
y_position = coordinates[1]
img_crop = F.to_tensor(
F.crop(img=img, left=x_position, top=y_position, width=self.crop_size, height=self.crop_size))
return img_crop
class CroppedDatasetGenerator:
def __init__(self, input_path: Path, output_path, crop_size_train, crop_size_val, crop_size_test, overlap=0.5,
leading_zeros_length=4, override_existing=False):
# Init list
self.input_path = input_path
self.output_path = output_path
self.crop_size_train = crop_size_train
self.crop_size_val = crop_size_val
self.crop_size_test = crop_size_test
self.overlap = overlap
self.leading_zeros_length = leading_zeros_length
self.override_existing = override_existing
self.generator_train = CropGenerator(input_path=input_path / 'train',
output_path=output_path / 'train',
crop_size=crop_size_train,
overlap=overlap,
leading_zeros_length=leading_zeros_length,
override_existing=override_existing,
progress_title='Cropping "train"')
self.generator_val = CropGenerator(input_path=input_path / 'val',
output_path=output_path / 'val',
crop_size=crop_size_val,
overlap=overlap,
leading_zeros_length=leading_zeros_length,
override_existing=override_existing,
progress_title='Cropping "val"')
self.generator_test = CropGenerator(input_path=input_path / 'test',
output_path=output_path / 'test',
crop_size=crop_size_test,
overlap=overlap,
leading_zeros_length=leading_zeros_length,
override_existing=override_existing,
progress_title='Cropping "test"')
def write_crops(self):
info_list = ['Running CroppedDatasetGenerator.write_crops():',
f'- full_command:',
f'python tools/generate_cropped_dataset.py -i {self.input_path} -o {self.output_path} '
f'-tr {self.crop_size_train} -v {self.crop_size_val} -te {self.crop_size_test} -ov {self.overlap} '
f'-l {self.leading_zeros_length}',
f'',
f'- start_time: \t{datetime.now():%Y-%m-%d_%H-%M-%S}',
f'- input_path: \t{self.input_path}',
f'- output_path: \t{self.output_path}',
f'- crop_size_train: \t{self.crop_size_train}',
f'- crop_size_val: \t{self.crop_size_val}',
f'- crop_size_test: \t{self.crop_size_test}',
f'- overlap: \t{self.overlap}',
f'- leading_zeros_len:\t{self.leading_zeros_length}',
f'- override_existing:\t{self.override_existing}',
''] # empty string to get linebreak at the end when using join
info_str = '\n'.join(info_list)
print(info_str)
# Write info_cropped_dataset.txt
self.output_path.mkdir(parents=True, exist_ok=True)
info_file = self.output_path / 'info_cropped_dataset.txt'
with info_file.open('a') as f:
f.write(info_str)
print(f'Start cropping:')
self.generator_train.write_crops()
self.generator_val.write_crops()
self.generator_test.write_crops()
with info_file.open('a') as f:
f.write(f'- end_time: \t{datetime.now():%Y-%m-%d_%H-%M-%S}\n\n')
class CropGenerator:
def __init__(self, input_path, output_path, crop_size, overlap=0.5, leading_zeros_length=4,
override_existing=False, progress_title=''):
# Init list
self.input_path = input_path
self.output_path = output_path
self.crop_size = crop_size
self.overlap = overlap
self.leading_zeros_length = leading_zeros_length
self.override_existing = override_existing
self.progress_title = progress_title
self.step_size = int(self.crop_size * (1 - self.overlap))
# List of tuples that contain the path to the gt and image that belong together
self.img_paths = get_img_paths_uncropped(input_path)
self.num_imgs_in_set = len(self.img_paths)
if self.num_imgs_in_set == 0:
raise RuntimeError("Found 0 images in subfolders of: {} \n Supported image extensions are: {}".format(
input_path, ",".join(IMG_EXTENSIONS)))
self.current_split = ''
self.current_img_index = -1
self.img_names_sizes, self.num_horiz_crops, self.num_vert_crops = self._get_img_size_and_crop_numbers()
self.crop_list = self._get_crop_list()
def write_crops(self):
crop_function = ImageCrop(self.crop_size)
for img_index, x, y in tqdm(self.crop_list, desc=self.progress_title):
self._load_image(img_index=img_index)
coordinates = (x, y)
split_name = self.img_names_sizes[img_index][0]
img_full_name = self.img_names_sizes[img_index][1]
img_full_name = Path(img_full_name)
img_name = img_full_name.stem
dest_folder = self.output_path / split_name / img_name
dest_folder.mkdir(parents=True, exist_ok=True)
extension = img_full_name.suffix
filename = f'{img_name}_x{x:0{self.leading_zeros_length}d}_y{y:0{self.leading_zeros_length}d}{extension}'
dest_filename = dest_folder / filename
if not self.override_existing:
if dest_filename.exists():
continue
img = self.get_crop(self.current_img, coordinates=coordinates, crop_function=crop_function)
pil_img = F.to_pil_image(img, mode='RGB')
if extension in JPG_EXTENSIONS:
pil_img.save(dest_filename, quality=95)
else:
# save_image(img, dest_filename)
pil_img.save(dest_filename)
def _load_image(self, img_index):
"""
Inits the variables responsible of tracking which crop should be taken next, the current images and the like.
This should be run every time a new page gets loaded for the test-set
"""
if self.current_img_index == img_index:
return
# Load image
self.current_img = pil_loader(self.img_paths[img_index][0])
# Update pointer to current image
self.current_img_index = img_index
self.current_split = self.img_paths[img_index][1]
def get_crop(self, img, coordinates, crop_function):
img = crop_function(img, coordinates)
return img
def _get_img_size_and_crop_numbers(self):
img_names_sizes = [] # list of tuples -> (split_name, img_name, img_size (H, W))
num_horiz_crops = []
num_vert_crops = []
for img_path, split_name in self.img_paths:
data_img = pil_loader(img_path)
img_names_sizes.append((split_name, img_path.name, data_img.size))
num_horiz_crops.append(math.ceil((data_img.size[0] - self.crop_size) / self.step_size + 1))
num_vert_crops.append(math.ceil((data_img.size[1] - self.crop_size) / self.step_size + 1))
return img_names_sizes, num_horiz_crops, num_vert_crops
def _get_crop_list(self):
return [self._convert_crop_id_to_coordinates(img_index, hcrop_index, vcrop_index) for img_index in
range(self.num_imgs_in_set) for hcrop_index, vcrop_index in
itertools.product(range(self.num_horiz_crops[img_index]),
range(self.num_vert_crops[img_index]))]
def _convert_crop_id_to_coordinates(self, img_index, hcrop_index, vcrop_index):
# X coordinate
if hcrop_index == self.num_horiz_crops[img_index] - 1:
# We are at the end of a line
x_position = self.img_names_sizes[img_index][2][0] - self.crop_size
else:
x_position = self.step_size * hcrop_index
assert x_position < self.img_names_sizes[img_index][2][0] - self.crop_size
# Y coordinate
if vcrop_index == self.num_vert_crops[img_index] - 1:
# We are at the bottom end
y_position = self.img_names_sizes[img_index][2][1] - self.crop_size
else:
y_position = self.step_size * vcrop_index
assert y_position < self.img_names_sizes[img_index][2][1] - self.crop_size
return img_index, x_position, y_position
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('-i', '--input_path',
help='Path to the root folder of the dataset (contains train/val/test)',
type=Path,
required=True)
parser.add_argument('-o', '--output_path',
help='Path to the output folder',
type=Path,
required=True)
parser.add_argument('-tr', '--crop_size_train',
help='Size of the crops in the training set',
type=int,
required=True)
parser.add_argument('-v', '--crop_size_val',
help='Size of the crops in the validation set',
type=int,
required=True)
parser.add_argument('-te', '--crop_size_test',
help='Size of the crops in the test set',
type=int,
required=True)
parser.add_argument('-ov', '--overlap',
help='Overlap of the different crops (between 0-1)',
type=float,
default=0.5)
parser.add_argument('-l', '--leading_zeros_length',
help='amount of leading zeros to encode the coordinates',
type=int,
default=4)
parser.add_argument('-oe', '--override_existing',
help='If true overrides the images ',
type=bool,
default=False)
args = parser.parse_args()
dataset_generator = CroppedDatasetGenerator(**args.__dict__)
dataset_generator.write_crops()
# example call arguments
# -i
# /Users/voegtlil/Documents/04_Datasets/003-DataSet/CB55-10-segmentation
# -o
# /Users/voegtlil/Desktop/fun
# -tr
# 300
# -v
# 300
# -te
# 256
# example call arguments
# -i
# /dataset/DIVA-HisDB/segmentation/CB55
# -o
# /net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/temp-CB55
# -tr
# 300
# -v
# 300
# -te
# 256
# dataset_generator = CroppedDatasetGenerator(
# input_path=Path('/dataset/DIVA-HisDB/segmentation/CB55'),
# output_path=Path('/net/research-hisdoc/datasets/semantic_segmentation/datasets_cropped/CB55'),
# crop_size_train=300,
# crop_size_val=300,
# crop_size_test=256,
# overlap=0.5,
# leading_zeros_length=4,
# override_existing=False)
# dataset_generator.write_crops()
| 39.777778
| 120
| 0.585049
| 1,634
| 13,604
| 4.574663
| 0.173195
| 0.049231
| 0.035318
| 0.018194
| 0.37097
| 0.288696
| 0.240268
| 0.191171
| 0.143411
| 0.136455
| 0
| 0.009315
| 0.321376
| 13,604
| 341
| 121
| 39.894428
| 0.800368
| 0.135622
| 0
| 0.196172
| 0
| 0.019139
| 0.13391
| 0.044464
| 0
| 0
| 0
| 0
| 0.009569
| 1
| 0.057416
| false
| 0
| 0.043062
| 0.004785
| 0.148325
| 0.009569
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
430f4b6111489ee13ac7ae5b12340f3777b684e0
| 11,275
|
py
|
Python
|
practice/4_tracking/tracker.py
|
OrangeRedeng/CV-SUMMER-CAMP-2021
|
74a65d0b21e4876e1fc1c3d931af76193f36e617
|
[
"Apache-2.0"
] | 13
|
2021-07-05T08:44:33.000Z
|
2021-10-13T09:57:58.000Z
|
practice/4_tracking/tracker.py
|
OrangeRedeng/CV-SUMMER-CAMP-2021
|
74a65d0b21e4876e1fc1c3d931af76193f36e617
|
[
"Apache-2.0"
] | 117
|
2021-07-06T11:21:50.000Z
|
2021-10-06T15:48:50.000Z
|
practice/4_tracking/tracker.py
|
OrangeRedeng/CV-SUMMER-CAMP-2021
|
74a65d0b21e4876e1fc1c3d931af76193f36e617
|
[
"Apache-2.0"
] | 43
|
2021-04-26T07:45:14.000Z
|
2021-11-06T11:19:05.000Z
|
import numpy as np
import math
import logging as log
import sys
from tqdm import tqdm
from common.feature_distance import calc_features_similarity
from common.common_objects import DetectedObject, validate_detected_object, Bbox
from common.common_objects import get_bbox_center, get_dist, calc_bbox_area
from common.find_best_assignment import solve_assignment_problem
from common.annotation import AnnotationObject, AnnotationStorage
class Track:
__next_track_id = 0
def __init__(self, first_obj):
self.objects = []
self._track_id = Track.__next_track_id
Track.__next_track_id += 1
self.objects.append(first_obj)
def _validate(self):
assert len(self.objects) > 0
for o in self.objects:
validate_detected_object(o)
for i in range(len(self.objects) - 1):
self.objects[i].frame_index < self.objects[i+1].frame_index
def add_object(self, o):
self._validate()
validate_detected_object(o)
last_frame_index = self.objects[-1].frame_index
if not last_frame_index < o.frame_index:
raise RuntimeError("Add object={} to track with the last_frame_index={}".format(o, last_frame_index))
self.objects.append(o)
def last(self):
return self.objects[-1]
def get_id(self):
return self._track_id
def get_bbox_for_frame(self, cur_frame_ind):
"""Finds bbox for frame index using linear approximation"""
self._validate()
i_found = None
for i, o in enumerate(self.objects):
if o.frame_index == cur_frame_ind:
return o.bbox
if o.frame_index > cur_frame_ind:
i_found = i
break
if i_found is None: # cur_frame_ind after the last frame_index in track
return None
if i_found == 0: # cur_frame_ind before the first frame_index in track
return None
log.debug("using linear approximation for track id={}, frame_index={}".format(self._track_id, cur_frame_ind))
o1 = self.objects[i_found-1]
o2 = self.objects[i_found]
assert o1.frame_index < cur_frame_ind < o2.frame_index
dindex = o2.frame_index - o1.frame_index
d_cur_index1 = cur_frame_ind - o1.frame_index
d_cur_index2 = o2.frame_index - cur_frame_ind
bbox1 = o1.bbox
bbox2 = o2.bbox
res_bbox = [None, None, None, None]
for k in range(4):
# linear approximation for all bbox fields
res_bbox[k] = (bbox1[k] * d_cur_index2 + bbox2[k] * d_cur_index1) / dindex
res_bbox = Bbox(res_bbox[0], res_bbox[1], res_bbox[2], res_bbox[3])
return res_bbox
class Tracker:
def __init__(self, num_frames_to_remove_track, num_objects_to_make_track_valid, affinity_threshold):
self.tracks = []
self.track_archive = []
self.num_frames_to_remove_track = num_frames_to_remove_track
self.num_objects_to_make_track_valid = num_objects_to_make_track_valid
self.affinity_threshold = affinity_threshold
def add_objects(self, det_objs):
log.debug("begin: handling {} objects".format(len(det_objs)))
if len(det_objs) == 0:
return
frame_index = det_objs[0].frame_index
assert all(o.frame_index == frame_index for o in det_objs), "All det_objs should have the same frame_index"
affinity_matrix = self._build_affinity_matrix(det_objs)
self._validate_affinity_matrix(affinity_matrix, len(self.tracks), len(det_objs))
self._log_affinity_matrix(affinity_matrix)
decision, best_affinity = self._solve_assignment_problem(affinity_matrix)
self._log_decision(decision, best_affinity, det_objs, frame_index)
self._apply_decision(decision, det_objs, frame_index)
self._move_obsolete_tracks_to_archive(frame_index)
log.debug("end: handling {} objects".format(len(det_objs)))
@staticmethod
def _validate_affinity_matrix(affinity_matrix, num_tracks, num_det_objs):
assert isinstance(affinity_matrix, list)
assert len(affinity_matrix) == num_tracks
for affinity_row in affinity_matrix:
assert isinstance(affinity_row, list)
assert len(affinity_row) == num_det_objs
assert all(isinstance(v, float) for v in affinity_row)
assert all(v >= 0 for v in affinity_row)
def _build_affinity_matrix(self, det_objs):
affinity_matrix = []
for t in self.tracks:
affinity_row = []
for o in det_objs:
cur_affinity = self._calc_affinity(t, o)
affinity_row.append(cur_affinity)
affinity_matrix.append(affinity_row)
return affinity_matrix
def _calc_affinity(self, track, obj):
affinity_appearance = self._calc_affinity_appearance(track, obj)
affinity_position = self._calc_affinity_position(track, obj)
affinity_shape = self._calc_affinity_shape(track, obj)
return affinity_appearance * affinity_position * affinity_shape
def _calc_affinity_appearance(self, track, obj):
raise NotImplementedError("The function _calc_affinity_appearance is not implemented -- implement it by yourself")
def _calc_affinity_position(self, track, obj):
raise NotImplementedError("The function _calc_affinity_position is not implemented -- implement it by yourself")
def _calc_affinity_shape(self, track, obj):
raise NotImplementedError("The function _calc_affinity_shape is not implemented -- implement it by yourself")
@staticmethod
def _log_affinity_matrix(affinity_matrix):
with np.printoptions(precision=2, suppress=True, threshold=sys.maxsize, linewidth=sys.maxsize):
log.debug("Affinity matrix =\n{}".format(np.array(affinity_matrix)))
def _solve_assignment_problem(self, affinity_matrix):
decision, best_affinity = solve_assignment_problem(affinity_matrix, self.affinity_threshold)
return decision, best_affinity
def _log_decision(self, decision, best_affinity, det_objs, frame_index):
log.debug("Logging decision for frame index={}".format(frame_index))
num_tracks = len(self.tracks)
for track_index in range(num_tracks):
assert track_index in decision
obj_index = decision[track_index] # index of the object assigned to the track
if obj_index is not None:
assert 0 <= obj_index < len(det_objs)
obj_bbox = det_objs[obj_index].bbox
else:
obj_bbox = None
cur_best_affinity = best_affinity[track_index]
if cur_best_affinity is not None:
best_affinity_str = "{:.3f}".format(cur_best_affinity)
else:
best_affinity_str = str(cur_best_affinity)
log.debug("track_index={}, track id={}, last_bbox={}, decision={}, best_affinity={} => {}".format(
track_index, self.tracks[track_index].get_id(),
self.tracks[track_index].last().bbox,
decision[track_index],
best_affinity_str,
obj_bbox))
def _apply_decision(self, decision, det_objs, frame_index):
set_updated_tracks_indexes = set()
num_det_objs = len(det_objs)
num_tracks = len(self.tracks)
object_indexes_not_mapped_to_tracks = set(range(num_det_objs)) # all indexes from 0 to num_det_objs-1
for track_index in range(num_tracks):
assert track_index in decision
obj_index = decision[track_index] # index of the object assigned to the track
if obj_index is None:
# no objects are mapped for this track
continue
assert 0 <= obj_index < num_det_objs
if obj_index not in object_indexes_not_mapped_to_tracks:
raise RuntimeError("ERROR: Algorithm assigned the object {} to several tracks".format(obj_index))
object_indexes_not_mapped_to_tracks.remove(obj_index)
o = det_objs[obj_index]
self.tracks[track_index].add_object(o)
# create new tracks for all the objects not mapped to tracks
for obj_index in object_indexes_not_mapped_to_tracks:
o = det_objs[obj_index]
self._create_new_track(o)
def _create_new_track(self, o):
new_track = Track(o)
self.tracks.append(new_track)
log.debug("created new track: id={} object: frame_index={}, {}".format(
new_track.get_id(), o.frame_index, o.bbox))
def _move_obsolete_tracks_to_archive(self, frame_index):
new_tracks = []
for t in self.tracks:
last_frame_index = t.last().frame_index
if frame_index - last_frame_index >= self.num_frames_to_remove_track:
log.debug("Move the track id={} to archive: the current frame_index={}, "
"the last frame_index in track={}".format(
t.get_id(), frame_index, last_frame_index))
self.track_archive.append(t)
else:
new_tracks.append(t)
self.tracks = new_tracks
def is_track_valid(self, track):
assert isinstance(track, Track)
return len(track.objects) > self.num_objects_to_make_track_valid
def get_all_valid_tracks(self):
res = []
for t in self.track_archive:
if self.is_track_valid(t):
res.append(t)
for t in self.tracks:
if self.is_track_valid(t):
res.append(t)
return res
def convert_tracks_to_annotation_storage(tracks):
ann_objects_by_frame_index = {}
for cur_track in tqdm(tracks, desc="Converting"):
track_id = cur_track.get_id()
first_frame_index = cur_track.objects[0].frame_index
last_frame_index = cur_track.objects[-1].frame_index
for frame_index in range(first_frame_index, last_frame_index+1):
bbox = cur_track.get_bbox_for_frame(frame_index)
tl_x = math.floor(bbox.tl_x)
tl_y = math.floor(bbox.tl_y)
br_x = math.ceil(bbox.br_x)
br_y = math.ceil(bbox.br_y)
detect_obj = DetectedObject(frame_index=frame_index,
bbox=Bbox(tl_x, tl_y, br_x, br_y),
appearance_feature=[])
ann_obj = AnnotationObject(detect_obj=detect_obj,
track_id=track_id)
if frame_index not in ann_objects_by_frame_index:
ann_objects_by_frame_index[frame_index] = {}
ann_objects_by_frame_index[frame_index][track_id] = ann_obj
annotation_objects = []
for frame_index in sorted(ann_objects_by_frame_index.keys()):
cur_ann_objects = ann_objects_by_frame_index[frame_index]
for track_id in sorted(cur_ann_objects.keys()):
annotation_objects.append(cur_ann_objects[track_id])
annotation_storage = AnnotationStorage.create_annotation_storage_from_list(annotation_objects)
return annotation_storage
| 41.300366
| 123
| 0.655787
| 1,493
| 11,275
| 4.600134
| 0.119223
| 0.093186
| 0.024461
| 0.014851
| 0.351776
| 0.23078
| 0.151427
| 0.097117
| 0.097117
| 0.052126
| 0
| 0.005308
| 0.264745
| 11,275
| 272
| 124
| 41.452206
| 0.82316
| 0.036718
| 0
| 0.130233
| 0
| 0
| 0.074115
| 0.004425
| 0
| 0
| 0
| 0
| 0.065116
| 1
| 0.106977
| false
| 0
| 0.046512
| 0.009302
| 0.227907
| 0.004651
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
431041bc3b78a2b35eead16f02c1cdb50d1dd82f
| 16,308
|
py
|
Python
|
gm2m/managers.py
|
mikewolfd/django-gm2m
|
a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771
|
[
"MIT"
] | null | null | null |
gm2m/managers.py
|
mikewolfd/django-gm2m
|
a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771
|
[
"MIT"
] | null | null | null |
gm2m/managers.py
|
mikewolfd/django-gm2m
|
a8cecc4d6d56c83e8d9c623888f5d07cb6ad8771
|
[
"MIT"
] | null | null | null |
from django.db import router
from django.db.models import Q, Manager
from django.db import connections
from .contenttypes import ct, get_content_type
from .query import GM2MTgtQuerySet
class GM2MBaseManager(Manager):
use_in_migration = True
def __init__(self, instance):
super(GM2MBaseManager, self).__init__()
self.model = self._model # see create_gm2m_related_manager
self.instance = instance
self.pk = instance.pk
self.core_filters = {}
def get_queryset(self):
try:
return self.instance \
._prefetched_objects_cache[self.prefetch_cache_name]
except (AttributeError, KeyError):
db = self._db or router.db_for_read(self.instance.__class__,
instance=self.instance)
return self._get_queryset(using=db)._next_is_sticky() \
.filter(**self.core_filters)
def _get_queryset(self, using):
return super(GM2MBaseManager, self).get_queryset().using(using)
def get_prefetch_queryset(self, instances, queryset=None):
db = self._db or router.db_for_read(self.model,
instance=instances[0])
if queryset is None:
queryset = self._get_queryset(db)
qs, rel_obj_attr, instance_attr = \
self._get_prefetch_queryset_params(instances, queryset, db)
return (qs,
rel_obj_attr,
instance_attr,
False,
self.prefetch_cache_name)
def _get_extra_queryset(self, queryset, q, extra_fields, db):
join_table = self.through._meta.db_table
connection = connections[db]
qn = connection.ops.quote_name
extra = dict(select=dict(
('_prefetch_related_val_%s' % f.attname,
'%s.%s' % (qn(join_table), qn(f.column)))
for f in extra_fields))
return queryset.using(db)._next_is_sticky().filter(q).extra(**extra)
def _check_through_model(self, method_name):
# If the GM2M relation has an intermediary model,
# the add and remove methods are not available.
if not self.through._meta.auto_created:
opts = self.through._meta
raise AttributeError(
'Cannot use %s() on a ManyToManyField which specifies an '
'intermediary model. Use %s.%s\'s Manager instead.'
% (method_name, opts.app_label, opts.object_name))
def _do_add(self, db, through_objs):
"""
Performs items addition
"""
# Add the new entries in the db table
self.through._default_manager.using(db).bulk_create(through_objs)
def add(self, *objs):
"""
Adds objects to the GM2M field
:param *objs: object instances to add
"""
#
self._check_through_model('add')
if not objs:
return
db = router.db_for_write(self.through, instance=self.instance)
self._do_add(db, self._to_add(objs, db))
add.alters_data = True
def _do_remove(self, db, q):
"""
Perfoms items removal from a Q object
"""
self.through._default_manager.using(db).filter(q).delete()
def remove(self, *objs):
"""
Removes objects from the GM2M field
"""
# *objs - objects to remove
self._check_through_model('remove')
if not objs:
return
db = router.db_for_write(self.through, instance=self.instance)
self._do_remove(db, self._to_remove(objs))
remove.alters_data = True
def _do_clear(self, db, filter=None):
self.through._default_manager.using(db).filter(**(filter or {})) \
.delete()
def set(self, objs, **kwargs):
"""
Sets the objs iterable as the set of related objects
(Added for compatibility with Django 1.9)
"""
self._check_through_model('set')
objs = tuple(objs)
clear = kwargs.pop('clear', False)
db = router.db_for_write(self.through, instance=self.instance)
if clear:
# clears all and re-adds
self._do_clear(db)
self._do_add(db, *objs)
else:
# just removes the necessary items and adds the missing ones
to_add, to_remove = self._to_change(objs, db)
self._do_remove(db, to_remove)
self._do_add(db, to_add)
set.alters_data = True
def clear(self):
db = router.db_for_write(self.through, instance=self.instance)
self._do_clear(db, self._to_clear())
clear.alters_data = True
class GM2MBaseSrcManager(Manager):
def __init__(self, instance):
# the manager's model is the source model
super(GM2MBaseSrcManager, self).__init__(instance)
self.core_filters['%s__%s' % (self.query_field_name,
self.field_names['tgt_ct'])] = \
get_content_type(self.instance)
self.core_filters['%s__%s' % (self.query_field_name,
self.field_names['tgt_fk'])] = \
self.instance.pk
def _get_prefetch_queryset_params(self, instances, queryset, db):
# we're looking for generic target instances, which should be
# converted to (content_type, primary_key) tuples
q = Q()
for obj in instances:
q = q | Q(**{
'%s__%s' % (self.query_field_name,
self.field_names['tgt_ct']):get_content_type(obj),
'%s__%s' % (self.query_field_name,
self.field_names['tgt_fk']): obj.pk
})
# Annotating the query in order to retrieve the primary model
# content type and id in the same query
# content type must be the 1st element, see rel_obj_attr below
extra_fields = (
self.through._meta.get_field(self.field_names['tgt_ct']),
self.through._meta.get_field(self.field_names['tgt_fk'])
)
qs = self._get_extra_queryset(queryset, q, extra_fields, db)
# primary model retrieval function
def rel_obj_attr(relobj):
t = []
for f in extra_fields:
try:
# t already contains the content type id
# we use get_for_id to retrieve the cached content type
model = ct.ContentType.objects.get_for_id(t[0]) \
.model_class()
except IndexError:
# t is empty
model = ct.ContentType
t.append(model._meta.pk.to_python(
getattr(relobj, '_prefetch_related_val_%s' % f.attname)
))
return tuple(t)
# model attribute retrieval function
instance_attr = lambda inst: \
(get_content_type(inst).pk, inst.pk)
return qs, rel_obj_attr, instance_attr
def _to_add(self, objs, db):
# we're using the reverse relation to add source model
# instances
inst_ct = get_content_type(self.instance)
vals = self.through._default_manager.using(db) \
.values_list(self.field_names['src'],
flat=True) \
.filter(**{
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
})
to_add = []
for obj in objs:
if obj.pk not in vals:
to_add.append(self.through(**{
'%s_id' % self.field_names['src']:
obj.pk,
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
return to_add
def _to_remove(self, objs):
# we're using the reverse relation to delete source model
# instances
inst_ct = get_content_type(self.instance)
return Q(**{
'%s_id__in' % self.field_names['src']:
[obj.pk for obj in objs],
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
})
def _to_change(self, objs, db):
"""
Returns the sets of items to be added and a Q object for removal
"""
inst_ct = get_content_type(self.instance)
vals = list(self.through._default_manager.using(db)
.values_list(self.field_names['src'], flat=True)
.filter(**{
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
to_add = set()
to_remove = set()
for obj in objs:
try:
vals.remove(obj.pk)
except ValueError:
# obj.pk is not in vals and must be added
to_add.add(self.through(**{
'%s_id' % self.field_names['src']:
obj.pk,
self.field_names['tgt_ct']: inst_ct,
self.field_names['tgt_fk']: self.pk
}))
for v in vals:
to_remove.add(v)
return to_add, Q(pk__in=to_remove)
def _to_clear(self):
return {
self.field_names['tgt_ct']: get_content_type(self.instance),
self.field_names['tgt_fk']: self.instance.pk
}
class GM2MBaseTgtManager(Manager):
def __init__(self, instance):
# the manager's model is the through model
super(GM2MBaseTgtManager, self).__init__(instance)
source_field = self.through._meta.get_field(
self.field_names['src'])
self.source_related_fields = source_field.related_fields
for __, rh_field in self.source_related_fields:
key = '%s__%s' % (self.query_field_name, rh_field.name)
self.core_filters[key] = getattr(self.instance,
rh_field.attname)
def _get_queryset(self, using):
return GM2MTgtQuerySet(self.model, using=using)
def _get_prefetch_queryset_params(self, instances, queryset, db):
# we're looking for through model instances
query = {}
for lh_field, rh_field in self.source_related_fields:
query['%s__in' % lh_field.name] = \
set(getattr(obj, rh_field.attname)
for obj in instances)
q = Q(**query)
# Annotating the query in order to retrieve the primary model
# id in the same query
fk = self.through._meta.get_field(self.field_names['src'])
extra_fields = fk.local_related_fields
qs = self._get_extra_queryset(queryset, q, extra_fields, db)
# marking the queryset so that the original queryset should
# be returned when evaluated the first time
qs._related_prefetching = True
# primary model retrieval function
def rel_obj_attr(relobj):
t = []
for f in extra_fields:
v = getattr(relobj,
'_prefetch_related_val_%s' % f.attname)
try:
v = v.pop()
except AttributeError: # v is not a list
pass
t.append(f.related_model._meta.pk.to_python(v))
return tuple(t)
# model attribute retrieval function
select_fields = fk.foreign_related_fields
instance_attr = lambda inst: tuple([getattr(inst, f.attname)
for f in select_fields])
return qs, rel_obj_attr, instance_attr
def _to_add(self, objs, db):
models = []
objs_set = set()
for obj in objs:
# extract content type and primary key for each object
objs_set.add((get_content_type(obj),
obj.pk))
m = obj.__class__
if m not in models:
# call field.add_relation for each model
models.append(m)
self.field.add_relation(m, auto=True)
vals = self.through._default_manager.using(db) \
.filter(**{self.field_names['src']: self.pk}) \
.values_list(self.field_names['tgt_ct'],
self.field_names['tgt_fk'])
to_add = []
for ct, pk in objs_set.difference(vals):
to_add.append(self.through(**{
'%s_id' % self.field_names['src']: self.pk,
self.field_names['tgt_ct']: ct,
self.field_names['tgt_fk']: pk
}))
return to_add
def _to_remove(self, objs):
q = Q()
for obj in objs:
# Convert the obj to (content_type, primary_key)
q = q | Q(**{
self.field_names['tgt_ct']: get_content_type(obj),
self.field_names['tgt_fk']: obj.pk
})
return q & Q(**{
'%s_id' % self.field_names['src']: self.pk
})
def _to_clear(self):
return {
'%s_id' % self.field_names['src']: self.pk
}
def _to_change(self, objs, db):
"""
Returns the sets of items to be added and a Q object for removal
"""
to_add = set()
src_fname = self.field_names['src']
ct_fname = self.field_names['tgt_ct']
fk_fname = self.field_names['tgt_fk']
vals = list(self.through._default_manager.using(db)
.filter(**{self.field_names['src']: self.pk})
.values_list(ct_fname, fk_fname))
known_cts = set(v[0] for v in vals)
for obj in objs:
ct = get_content_type(obj)
val = (ct, obj.pk)
try:
vals.remove(val)
except ValueError:
# val is not in vals
# extract content type and primary key for each object
to_add.add((ct, obj.pk))
if ct.pk not in known_cts:
# call field.add_relation for each unknown model
self.field.add_relation(obj.__class__, auto=True)
known_cts.add(ct.pk)
rem_q = Q()
for val in vals:
# Convert the obj to (content_type, primary_key)
rem_q = rem_q | Q(**{
ct_fname: val[0],
fk_fname: val[1]
})
return [
self.through(**{
'%s_id' % src_fname: self.pk,
ct_fname: t[0],
fk_fname: t[1]
}) for t in to_add
], \
rem_q & Q(**{
'%s_id' % src_fname: self.pk
})
def create_gm2m_related_manager(superclass=None, **kwargs):
"""
Dynamically create a manager class that only concerns an instance (source
or target)
"""
bases = [GM2MBaseManager]
if superclass is None:
# no superclass provided, the manager is a generic target model manager
bases.insert(0, GM2MBaseTgtManager)
else:
# superclass provided, the manager is a source model manager and also
# derives from superclass
bases.insert(0, GM2MBaseSrcManager)
bases.append(superclass)
# Django's Manager constructor sets model to None, we store it under the
# class's attribute '_model' and it is retrieved in __init__
kwargs['_model'] = kwargs.pop('model')
return type(Manager)('GM2MManager', tuple(bases), kwargs)
| 35.841758
| 81
| 0.531886
| 1,896
| 16,308
| 4.325949
| 0.128692
| 0.044989
| 0.066569
| 0.053889
| 0.501707
| 0.459278
| 0.401366
| 0.342965
| 0.312119
| 0.262497
| 0
| 0.002944
| 0.375215
| 16,308
| 454
| 82
| 35.920705
| 0.802041
| 0.143488
| 0
| 0.342193
| 0
| 0
| 0.035938
| 0.005425
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096346
| false
| 0.003322
| 0.016611
| 0.013289
| 0.196013
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43109599c3c8fc1c990f73e67e01c2d6cb021aa0
| 6,802
|
py
|
Python
|
rastreador-de-bolso/TwitterListener.py
|
vitorduarte/RastreadorDeBolso
|
5c3bab222fced6f0d7367299b5007a628a408b4f
|
[
"MIT"
] | 1
|
2020-10-15T21:36:06.000Z
|
2020-10-15T21:36:06.000Z
|
rastreador-de-bolso/TwitterListener.py
|
vitorduarte/RastreadorDeBolso
|
5c3bab222fced6f0d7367299b5007a628a408b4f
|
[
"MIT"
] | 3
|
2021-06-08T21:38:20.000Z
|
2022-01-13T02:46:26.000Z
|
rastreador-de-bolso/TwitterListener.py
|
BambataTech/rastreador-de-bolso
|
5c3bab222fced6f0d7367299b5007a628a408b4f
|
[
"MIT"
] | null | null | null |
from selenium.webdriver.chrome.options import Options
from selenium import webdriver
import logging
import coloredlogs
import os
import pathlib
import time
import twitter as tt
from utils import retry
from fetch_likes import get_user_likes, login
from conf.settings import USER_ID, USERNAME, PASSWORD
CURR_PATH = pathlib.Path(__file__).parent.absolute()
TWEETS_FOLDER = os.path.join(CURR_PATH, 'screenshots')
LIKED_FOLDER = os.path.join(CURR_PATH, 'screenshots', 'liked')
class TwitterListener():
def __init__(self, user_id=USER_ID, search_base=40):
# Configure log
coloredlogs.install()
logging.basicConfig()
self.logger = logging.getLogger('TwitterListener')
self.logger.setLevel(logging.DEBUG)
# Set chrome options
chrome_options = Options()
chrome_options.add_argument('--headless')
chrome_options.add_argument("--no-sandbox")
self.driver = webdriver.Chrome(options=chrome_options)
# Create formatter, file handler and add they to the handlers
formatter = logging.Formatter(
'%(asctime)s - %(name)s - %(levelname)s - %(message)s')
fh = logging.FileHandler('twitter.log')
fh.setFormatter(formatter)
self.logger.addHandler(fh)
self.search_base = search_base
self.user_id = user_id
self.target = tt.get_username_from_id(user_id)
self.is_logged = False,
self.has_previous_tweets = False
self.has_previous_friends = False
self.has_previous_likes = False
def _get_new_tweets(self):
if(not self.has_previous_tweets):
self.previous_tweets_ids = tt.get_ids_from_tweets(
tt.get_tweets(user_id=self.user_id, count=self.search_base))
self.has_previous_tweets = True
last_tweets = tt.get_tweets(user_id=self.user_id,
count=self.search_base)
last_tweets_ids = tt.get_ids_from_tweets(last_tweets)
diff_tweets = self._get_new_diff(
last_tweets_ids, self.previous_tweets_ids)
if diff_tweets:
new_tweets = [last_tweets[i] for i in range(len(diff_tweets))]
self.previous_tweets_ids = last_tweets_ids
new_tweets.reverse()
return new_tweets
return []
def _get_new_likes(self):
count = self.search_base/2
if(not self.is_logged):
login(self.driver, USERNAME, PASSWORD)
self.is_logged = True
if(not self.has_previous_likes):
self.previous_likes_ids = get_user_likes(
self.driver, self.target, count=count)
self.has_previous_likes = True
new_likes_ids = get_user_likes(
self.driver, self.target, count=count)
diff_tweets = self._get_new_diff(
new_likes_ids, self.previous_likes_ids)
if diff_tweets:
self.previous_likes_ids = new_likes_ids
diff_tweets.reverse()
return diff_tweets
return []
def _get_new_diff(self, curr, old):
count = len(old)
return list(set(curr[:count//2]) -
set(old))
def _get_abs_diff(self, first_list, second_list):
return list(set(first_list) - set(second_list))
def print_new_tweets(self):
try:
new_tweets = self._get_new_tweets()
for tweet in new_tweets:
tweet_id = str(tweet['id'])
tweet_url = tt.get_url(tweet)
# Get image
self.logger.info('New tweet %s', tweet_url)
img_path = os.path.join(TWEETS_FOLDER, f'{tweet_id}.png')
retry(tt.print_tweet, tweet_url,
self.driver, output_path=img_path)
self.logger.debug('Take a screenshot of tweet')
# Tweet image
tweet_msg = 'Jair Bolsonaro acabou de twittar'
self.logger.debug(
f'Is a retweet: {"retweeted_status" in tweet}')
if('retweeted_status' in tweet):
tweet_msg = 'Jair Bolsonaro acabou de retweetar'
tt.tweet_print(img_path, tweet_url, tweet_msg)
self.logger.debug('Tweet the screenshot')
except Exception as e:
self.logger.error(e)
def print_new_likes(self):
try:
new_likes = self._get_new_likes()
for t_id in new_likes:
t_url = f'https://twitter.com/{self.target}/status/{t_id}'
# Get image
self.logger.info('New like %s', t_url)
img_path = os.path.join(LIKED_FOLDER, f'{t_id}.png')
retry(tt.print_tweet, t_url, self.driver, output_path=img_path)
self.logger.debug('Take a screenshot of tweet')
# Tweet image
t_msg = 'Jair Bolsonaro acabou de curtir esse tweet'
tt.tweet_print(img_path, t_url, t_msg)
self.logger.debug('Tweet the screenshot')
except Exception as e:
self.logger.error(e)
def watch_friends(self):
try:
if(not self.has_previous_friends):
self.previous_friends = tt.get_friends_ids(
user_id=self.user_id)
self.has_previous_friends = True
last_friends = tt.get_friends_ids()
new_friends = self._get_abs_diff(
last_friends, self.previous_friends)
unfriends = self._get_abs_diff(self.previous_friends, last_friends)
for user_id in new_friends:
username = tt.get_username_from_id(user_id=user_id)
self.logger.info(f'New friend: @{username}')
retry(
tt.update_status,
status=(
f'Jair Bolsonaro aparentemente está seguindo @{username}.'
'\n(Esse bot não consegue verificar se essa atualização foi gerada '
'por um follow ou por uma reativação de conta)'
)
)
for user_id in unfriends:
username = tt.get_username_from_id(user_id=user_id)
self.logger.info(f'Unfriend: @{username}')
retry(
tt.update_status,
status=(
f'Jair Bolsonaro aparentemente deixou de seguir @{username}.'
'\n(Esse bot não consegue verificar se essa atualização foi gerada '
'por um unfollow, suspensão ou block.)'
)
)
self.previous_friends = last_friends
except Exception as e:
self.logger.error(e)
| 36.569892
| 92
| 0.58718
| 822
| 6,802
| 4.592457
| 0.205596
| 0.028609
| 0.035762
| 0.012715
| 0.439205
| 0.342517
| 0.27894
| 0.259603
| 0.229404
| 0.229404
| 0
| 0.000876
| 0.328727
| 6,802
| 185
| 93
| 36.767568
| 0.825887
| 0.019994
| 0
| 0.213793
| 0
| 0
| 0.128117
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055172
| false
| 0.013793
| 0.075862
| 0.006897
| 0.17931
| 0.041379
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4313dd60cdb94904d246c40eddbdc84286d54a32
| 857
|
py
|
Python
|
torch_lib/Nets.py
|
troncosoae/jetson-exp
|
0c1a46b969b95bb9c350f78955ae6ca7f41b43b5
|
[
"MIT"
] | null | null | null |
torch_lib/Nets.py
|
troncosoae/jetson-exp
|
0c1a46b969b95bb9c350f78955ae6ca7f41b43b5
|
[
"MIT"
] | null | null | null |
torch_lib/Nets.py
|
troncosoae/jetson-exp
|
0c1a46b969b95bb9c350f78955ae6ca7f41b43b5
|
[
"MIT"
] | null | null | null |
import torch
import torch.nn as nn
import torch.nn.functional as F
class MediumNet(nn.Module):
def __init__(self):
super().__init__()
self.conv1 = nn.Conv2d(
3, out_channels=6, kernel_size=5, padding=0)
self.pool1 = nn.MaxPool2d(kernel_size=2, stride=2)
self.conv2 = nn.Conv2d(
6, out_channels=16, kernel_size=5, padding=0)
self.pool2 = nn.MaxPool2d(kernel_size=2, stride=2)
self.fc1 = nn.Linear(16*5*5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
x = self.pool1(F.relu(self.conv1(x)))
x = self.pool2(F.relu(self.conv2(x)))
x = torch.flatten(x, 1) # flatten all dimensions except batch
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = self.fc3(x)
return x
| 31.740741
| 70
| 0.584597
| 134
| 857
| 3.634328
| 0.358209
| 0.020534
| 0.073922
| 0.073922
| 0.229979
| 0.229979
| 0.135524
| 0.135524
| 0
| 0
| 0
| 0.07717
| 0.274212
| 857
| 26
| 71
| 32.961538
| 0.705788
| 0.04084
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.304348
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4315cbe9d3768c563f263560ae3ec49245d0ab6e
| 8,101
|
py
|
Python
|
beancount_bot/bot.py
|
dumbPy/beancount_bot
|
388a17f165c22b30e7f6377161eb5bf63578168a
|
[
"MIT"
] | null | null | null |
beancount_bot/bot.py
|
dumbPy/beancount_bot
|
388a17f165c22b30e7f6377161eb5bf63578168a
|
[
"MIT"
] | null | null | null |
beancount_bot/bot.py
|
dumbPy/beancount_bot
|
388a17f165c22b30e7f6377161eb5bf63578168a
|
[
"MIT"
] | null | null | null |
import traceback
import telebot
from telebot import apihelper
from telebot.types import InlineKeyboardMarkup, InlineKeyboardButton, MessageEntity, Message, CallbackQuery
from beancount_bot import transaction
from beancount_bot.config import get_config, load_config
from beancount_bot.dispatcher import Dispatcher
from beancount_bot.i18n import _
from beancount_bot.session import get_session, SESS_AUTH, get_session_for, set_session
from beancount_bot.task import load_task, get_task
from beancount_bot.transaction import get_manager
from beancount_bot.util import logger
apihelper.ENABLE_MIDDLEWARE = True
bot = telebot.TeleBot(token=None, parse_mode=None)
@bot.middleware_handler(update_types=['message'])
def session_middleware(bot_instance, message):
"""
Session middleware
:param bot_instance:
:param message:
:return:
"""
bot_instance.session = get_session_for(message.from_user.id)
#######
# Authentication #
#######
def check_auth() -> bool:
"""
Check if you log in
:return:
"""
return SESS_AUTH in bot.session and bot.session[SESS_AUTH]
@bot.message_handler(commands=['start'])
def start_handler(message: Message):
"""
First chat time authentication
:param message:
:return:
"""
auth = get_session(message.from_user.id, SESS_AUTH, False)
if auth:
bot.reply_to(message, _("Have been authenticated!"))
return
# 要求鉴权
bot.reply_to(message, _("Welcome to the accounting robot!Please enter the authentication token:"))
def auth_token_handler(message: Message):
"""
Login token callback
:param message:
:return:
"""
if check_auth():
return
# Unconfirmation is considered an authentication token
auth_token = get_config('bot.auth_token')
if auth_token == message.text:
set_session(message.from_user.id, SESS_AUTH, True)
bot.reply_to(message, _("Authentic success!"))
else:
bot.reply_to(message, _("Authentication token error!"))
#######
# instruction #
#######
@bot.message_handler(commands=['reload'])
def reload_handler(message):
"""
Overload configuration instruction
:param message:
:return:
"""
if not check_auth():
bot.reply_to(message, _("Please conduct authentication first!"))
return
load_config()
load_task()
bot.reply_to(message, _("Successful overload configuration!"))
@bot.message_handler(commands=['help'])
def help_handler(message):
"""
Help instruction
:param message:
:return:
"""
cmd = message.text
dispatchers = get_manager().dispatchers
if cmd == '/help':
# Create a message button
markup = InlineKeyboardMarkup()
for ind, d in zip(range(len(dispatchers)), dispatchers):
help_btn = _("help:{name}").format(name=d.get_name())
markup.add(InlineKeyboardButton(help_btn, callback_data=f'help:{ind}'))
# 帮助信息
command_usage = [
_("/start - Authentication"),
_("/help - Using help"),
_("/reload - Reload the configuration file"),
_("/task - View, run the task"),
]
help_text = \
_("Account bill Bot\n\nAvailable instruction list:\n{command}\n\nTrade statement syntax help, select the corresponding module,Use /help [Module name] Check.").format(
command='\n'.join(command_usage))
bot.reply_to(message, help_text, reply_markup=markup)
else:
# Display detailed help
name: str = cmd[6:]
flag_found = False
for d in dispatchers:
if name.lower() == d.get_name().lower():
show_usage_for(message, d)
flag_found = True
if not flag_found:
bot.reply_to(message, _("The corresponding name of the transaction statement processor does not exist!"))
def show_usage_for(message: Message, d: Dispatcher):
"""
Show the method of use of a specific processor
:param message:
:param d:
:return:
"""
usage = _("help:{name}\n\n{usage}").format(name=d.get_name(), usage=d.get_usage())
bot.reply_to(message, usage)
@bot.callback_query_handler(func=lambda call: call.data[:4] == 'help')
def callback_help(call: CallbackQuery):
"""
Help statement detailed help
:param call:
:return:
"""
try:
d_id = int(call.data[5:])
dispatchers = get_manager().dispatchers
show_usage_for(call.message, dispatchers[d_id])
except Exception as e:
logger.error(f'{call.id}:Unknown error!', e)
logger.error(traceback.format_exc())
bot.answer_callback_query(call.id, _("Unknown error!\n"+traceback.format_exc()))
@bot.message_handler(commands=['task'])
def task_handler(message):
"""
Task instruction
:param message:
:return:
"""
if not check_auth():
bot.reply_to(message, _("Please conduct authentication first!"))
return
cmd = message.text
tasks = get_task()
if cmd == '/task':
# Show all tasks
all_tasks = ', '.join(tasks.keys())
bot.reply_to(message,
_("Current registration task:{all_tasks}\n"
"able to pass /task [Task Name] Active trigger").format(all_tasks=all_tasks))
else:
# Run task
dest = cmd[6:]
if dest not in tasks:
bot.reply_to(message, _("Task does not exist!"))
return
task = tasks[dest]
task.trigger(bot)
#######
# trade #
#######
@bot.message_handler(func=lambda m: True)
def transaction_query_handler(message: Message):
"""
Trading statement processing
:param message:
:return:
"""
if not check_auth():
auth_token_handler(message)
return
# Treated
manager = get_manager()
try:
tx_uuid, tx = manager.create_from_str(message.text)
# Create a message button
markup = InlineKeyboardMarkup()
markup.add(InlineKeyboardButton(_("Revoke trading"), callback_data=f'withdraw:{tx_uuid}'))
# 回复
bot.reply_to(message, transaction.stringfy(tx), reply_markup=markup)
except ValueError as e:
logger.info(f'{message.from_user.id}:Unable to add transactions', e)
bot.reply_to(message, e.args[0])
except Exception as e:
logger.error(f'{message.from_user.id}:An unknown mistake!Adding a transaction failed.', e)
bot.reply_to(message, _("An unknown mistake!Adding a transaction failed.\n"+traceback.format_exc()))
@bot.callback_query_handler(func=lambda call: call.data[:8] == 'withdraw')
def callback_withdraw(call: CallbackQuery):
"""
Transaction withdrawal callback
:param call:
:return:
"""
auth = get_session(call.from_user.id, SESS_AUTH, False)
if not auth:
bot.answer_callback_query(call.id, _("Please conduct authentication first!"))
return
tx_uuid = call.data[9:]
manager = get_manager()
try:
manager.remove(tx_uuid)
# Modify the original message reply
message = _("Transaction has been withdrawn")
code_format = MessageEntity('code', 0, len(message))
bot.edit_message_text(message,
chat_id=call.message.chat.id,
message_id=call.message.message_id,
entities=[code_format])
except ValueError as e:
logger.info(f'{call.id}:Unable to create trading', e)
bot.answer_callback_query(call.id, e.args[0])
except Exception as e:
logger.error(f'{call.id}:An unknown mistake!Withdrawal of the transaction failed.', e)
bot.answer_callback_query(call.id, _("An unknown mistake!Withdrawal of the transaction failed."))
def serving():
"""
start up Bot
:return:
"""
# set up Token
token = get_config('bot.token')
bot.token = token
# Set a proxy
proxy = get_config('bot.proxy')
if proxy is not None:
apihelper.proxy = {'https': proxy}
# start up
bot.infinity_polling()
| 30.340824
| 178
| 0.641155
| 980
| 8,101
| 5.122449
| 0.205102
| 0.023904
| 0.02988
| 0.050797
| 0.243028
| 0.193227
| 0.163745
| 0.1
| 0.1
| 0.0749
| 0
| 0.001787
| 0.240341
| 8,101
| 266
| 179
| 30.454887
| 0.813942
| 0.108011
| 0
| 0.194631
| 0
| 0.006711
| 0.186266
| 0.014663
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080537
| false
| 0.006711
| 0.080537
| 0
| 0.214765
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4317b20c71fc0c90d2e65c623d90563c13f6fda9
| 8,933
|
py
|
Python
|
test/unit/metrics/test_group_sklearn_wrappers.py
|
GeGao2014/fairlearn
|
b0841c8b07ead6a285bdbc0ea61cac2338cbc96e
|
[
"MIT"
] | 2
|
2019-11-30T09:02:42.000Z
|
2019-12-02T10:24:29.000Z
|
test/unit/metrics/test_group_sklearn_wrappers.py
|
GeGao2014/fairlearn
|
b0841c8b07ead6a285bdbc0ea61cac2338cbc96e
|
[
"MIT"
] | null | null | null |
test/unit/metrics/test_group_sklearn_wrappers.py
|
GeGao2014/fairlearn
|
b0841c8b07ead6a285bdbc0ea61cac2338cbc96e
|
[
"MIT"
] | 1
|
2020-03-24T14:42:04.000Z
|
2020-03-24T14:42:04.000Z
|
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License.
import pytest
import numpy as np
import sklearn.metrics as skm
import fairlearn.metrics as metrics
# ======================================================
a = "a"
b = "b"
c = "c"
Y_true = [0, 0, 0, 1, 1, 1, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_pred = [1, 0, 1, 0, 1, 0, 0, 1, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
Y_true_ternary = [a, b, c, c, c, b, b, b, c, c, a, a, a, a, a, b, c, c]
Y_pred_ternary = [b, c, c, c, b, b, b, b, b, c, a, a, c, a, a, b, c, c]
groups = [3, 4, 1, 0, 0, 0, 3, 2, 0, 1, 2, 3, 4, 0, 1, 2, 3, 4]
weight = [1, 2, 3, 1, 2, 3, 4, 2, 3, 3, 2, 1, 2, 3, 1, 2, 3, 4]
group2 = [0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1, 0, 1]
# =======================================================
# Define as a dictionary so that the actual name can be seen
# when pytest builds the tests
supported_metrics_weighted = [(skm.accuracy_score, metrics.group_accuracy_score),
(skm.confusion_matrix, metrics.group_confusion_matrix),
(skm.zero_one_loss, metrics.group_zero_one_loss)]
# The following only work with binary data when called with their default arguments
supported_metrics_weighted_binary = [(skm.precision_score, metrics.group_precision_score),
(skm.recall_score, metrics.group_recall_score),
(skm.roc_auc_score, metrics.group_roc_auc_score),
(skm.mean_squared_error, metrics.group_mean_squared_error),
(skm.r2_score, metrics.group_r2_score)]
supported_metrics_weighted_binary = supported_metrics_weighted_binary + supported_metrics_weighted
metrics_no_sample_weights = [(skm.max_error, metrics.group_max_error),
(skm.mean_absolute_error, metrics.group_mean_absolute_error),
(skm.mean_squared_log_error, metrics.group_mean_squared_log_error),
(skm.median_absolute_error, metrics.group_median_absolute_error)]
supported_metrics_unweighted = metrics_no_sample_weights + supported_metrics_weighted_binary
# =======================================================
@pytest.mark.parametrize("func_tuple", supported_metrics_unweighted)
def test_metric_unweighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups)
# We don't really care about the numbers (sklearn is responsible)
# We just want to make sure we got a result
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted_binary)
def test_metric_weighted(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true, Y_pred, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true, Y_pred, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
@pytest.mark.parametrize("func_tuple", supported_metrics_weighted)
def test_metric_weighted_ternary(func_tuple):
metric_func = func_tuple[0]
group_metric_func = func_tuple[1]
result = group_metric_func(Y_true_ternary, Y_pred_ternary, groups, sample_weight=weight)
assert len(result.by_group) == 5
expected_overall = metric_func(Y_true_ternary, Y_pred_ternary, sample_weight=weight)
if isinstance(expected_overall, np.ndarray):
assert np.array_equal(expected_overall, result.overall)
else:
assert expected_overall == result.overall
# ======================================================================================
def test_group_accuracy_score_unnormalized():
result = metrics.group_accuracy_score(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.accuracy_score(Y_true, Y_pred, False)
assert result.overall == expected_overall
# ======================================================================================
def test_group_confusion_matrix_labels():
labels = [0, 4]
result = metrics.group_confusion_matrix(Y_true, Y_pred, groups, labels=labels)
expected_overall = skm.confusion_matrix(Y_true, Y_pred, labels=labels)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_precision_score_ternary():
result = metrics.group_precision_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.precision_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_precision_score_pos_label():
result = metrics.group_precision_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.precision_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_recall_score_ternary():
result = metrics.group_recall_score(Y_true_ternary, Y_pred_ternary, group2, average=None)
expected_overall = skm.recall_score(Y_true_ternary, Y_pred_ternary, average=None)
assert np.array_equal(result.overall, expected_overall)
def test_group_recall_score_pos_label():
result = metrics.group_recall_score(Y_true, Y_pred, groups, pos_label=0)
expected_overall = skm.recall_score(Y_true, Y_pred, pos_label=0)
assert np.array_equal(result.overall, expected_overall)
# ======================================================================================
def test_group_roc_auc_score_average():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, average='samples')
expected_overall = skm.roc_auc_score(Y_true, Y_pred, average='samples')
assert expected_overall == result.overall
def test_group_roc_auc_score_max_fpr():
result = metrics.group_roc_auc_score(Y_true, Y_pred, groups, max_fpr=0.5)
expected_overall = skm.roc_auc_score(Y_true, Y_pred, max_fpr=0.5)
assert expected_overall == result.overall
# ======================================================================================
def test_group_zero_one_loss_unnormalized():
result = metrics.group_zero_one_loss(Y_true, Y_pred, groups, normalize=False)
expected_overall = skm.zero_one_loss(Y_true, Y_pred, False)
assert result.overall == expected_overall
# =============================================================================================
def test_group_mean_squared_error_multioutput_single_ndarray():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
# =============================================================================================
def test_group_r2_score_multioutput():
y_t = np.random.rand(len(groups), 2)
y_p = np.random.rand(len(groups), 2)
result = metrics.group_r2_score(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.r2_score(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
mask = np.asarray(groups) == target_group
expected = skm.r2_score(y_t[mask], y_p[mask], multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
# =============================================================================================
def test_group_mean_squared_error_multioutput_list_ndarray():
y_t = [np.random.rand(2) for x in groups]
y_p = [np.random.rand(2) for x in groups]
result = metrics.group_mean_squared_error(y_t, y_p, groups, multioutput='raw_values')
expected_overall = skm.mean_squared_error(y_t, y_p, multioutput='raw_values')
assert np.array_equal(result.overall, expected_overall)
for target_group in np.unique(groups):
y_true = []
y_pred = []
for i in range(len(groups)):
if groups[i] == target_group:
y_true.append(y_t[i])
y_pred.append(y_p[i])
expected = skm.mean_squared_error(y_true, y_pred, multioutput='raw_values')
assert np.array_equal(result.by_group[target_group], expected)
| 39.179825
| 98
| 0.636069
| 1,198
| 8,933
| 4.40985
| 0.12187
| 0.102215
| 0.022714
| 0.037857
| 0.71872
| 0.682188
| 0.656256
| 0.58622
| 0.544198
| 0.544198
| 0
| 0.016635
| 0.165566
| 8,933
| 227
| 99
| 39.352423
| 0.692112
| 0.149558
| 0
| 0.348837
| 0
| 0
| 0.016761
| 0
| 0
| 0
| 0
| 0
| 0.178295
| 1
| 0.116279
| false
| 0
| 0.031008
| 0
| 0.147287
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4317fc5d9fcdfa4c3f22eb8a8bb944e1c61c7e2a
| 12,833
|
py
|
Python
|
deeplearning/tf_util.py
|
cbschaff/nlimb
|
f0564b00bab1b3367aaa88163e49bebc88f349bb
|
[
"MIT"
] | 12
|
2018-10-26T19:33:05.000Z
|
2022-01-17T11:47:59.000Z
|
deeplearning/tf_util.py
|
cbschaff/nlimb
|
f0564b00bab1b3367aaa88163e49bebc88f349bb
|
[
"MIT"
] | 9
|
2020-01-28T22:30:55.000Z
|
2022-03-11T23:32:04.000Z
|
deeplearning/tf_util.py
|
cbschaff/nlimb
|
f0564b00bab1b3367aaa88163e49bebc88f349bb
|
[
"MIT"
] | 3
|
2019-07-09T14:56:01.000Z
|
2019-11-18T06:58:41.000Z
|
"""
Adapted from OpenAI Baselines.
"""
import numpy as np
import tensorflow as tf # pylint: ignore-module
import random
import copy
import os
import functools
import collections
import multiprocessing
def switch(condition, then_expression, else_expression):
"""Switches between two operations depending on a scalar value (int or bool).
Note that both `then_expression` and `else_expression`
should be symbolic tensors of the *same shape*.
# Arguments
condition: scalar tensor.
then_expression: TensorFlow operation.
else_expression: TensorFlow operation.
"""
x_shape = copy.copy(then_expression.get_shape())
x = tf.cond(tf.cast(condition, 'bool'),
lambda: then_expression,
lambda: else_expression)
x.set_shape(x_shape)
return x
# ================================================================
# Extras
# ================================================================
def lrelu(x, leak=0.2):
f1 = 0.5 * (1 + leak)
f2 = 0.5 * (1 - leak)
return f1 * x + f2 * abs(x)
# ================================================================
# Mathematical utils
# ================================================================
def huber_loss(x, delta=1.0):
"""Reference: https://en.wikipedia.org/wiki/Huber_loss"""
return tf.where(
tf.abs(x) < delta,
tf.square(x) * 0.5,
delta * (tf.abs(x) - 0.5 * delta)
)
# ================================================================
# Global session
# ================================================================
def make_session(num_cpu=None, make_default=False):
"""Returns a session that will use <num_cpu> CPU's only"""
if num_cpu is None:
num_cpu = int(os.getenv('RCALL_NUM_CPU', multiprocessing.cpu_count()))
tf_config = tf.ConfigProto(
inter_op_parallelism_threads=num_cpu,
intra_op_parallelism_threads=num_cpu)
tf_config.gpu_options.allocator_type = 'BFC'
if make_default:
return tf.InteractiveSession(config=tf_config)
else:
return tf.Session(config=tf_config)
def single_threaded_session():
"""Returns a session which will only use a single CPU"""
return make_session(num_cpu=1)
def in_session(f):
@functools.wraps(f)
def newfunc(*args, **kwargs):
with tf.Session():
f(*args, **kwargs)
return newfunc
ALREADY_INITIALIZED = set()
def initialize():
"""Initialize all the uninitialized variables in the global scope."""
new_variables = set(tf.global_variables()) - ALREADY_INITIALIZED
tf.get_default_session().run(tf.variables_initializer(new_variables))
ALREADY_INITIALIZED.update(new_variables)
# ================================================================
# Saving variables and setting up experiment directories
# ================================================================
def load_state(fname):
saver = tf.train.Saver()
saver.restore(tf.get_default_session(), fname)
def save_state(fname):
os.makedirs(os.path.dirname(fname), exist_ok=True)
saver = tf.train.Saver()
saver.save(tf.get_default_session(), fname)
def load(fname):
import cloudpickle
with open(fname, 'rb') as f:
return cloudpickle.load(f)
def save(fname, obj):
import cloudpickle
os.makedirs(os.path.dirname(fname), exist_ok=True)
with open(fname, 'wb') as fh:
cloudpickle.dump(obj, fh)
class Experiment(object):
def __init__(self, logdir):
self.logdir = logdir
os.makedirs(os.path.join(logdir, 'checkpoints'), exist_ok=True)
def load(self, timestep=None):
if timestep is None:
# get latest ckpt
import glob
fs = glob.glob(os.path.join(self.logdir, 'checkpoints/*'))
timesteps = []
for f in fs:
try: timesteps.append(int(os.path.basename(f)))
except: pass
if len(timesteps) == 0:
return 0
timestep = max(timesteps)
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
load_state(fname)
return timestep
def save(self, timestep):
fname = os.path.join(self.logdir, 'checkpoints', str(timestep), 'model')
save_state(fname)
def load_model_fn(self):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
assert os.path.exists(fname), "No model file saved."
return load(fname)
def save_model_fn(self, model_fn):
fname = os.path.join(self.logdir, 'checkpoints/model_fn.pkl')
save(fname, model_fn)
# ================================================================
# Model components
# ================================================================
def batch_to_seq(h, nbatch, nsteps, flat=False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
h = x.reshape([-1, *x.shape[2:]]))
"""
if flat:
h = tf.reshape(h, [nsteps, nbatch])
else:
h = tf.reshape(h, [nsteps, nbatch, -1])
return [tf.squeeze(v, [0]) for v in tf.split(axis=0, num_or_size_splits=nsteps, value=h)]
def seq_to_batch(h, flat = False):
"""
Assumes Time major data!!
x.shape = [nsteps, nbatch, *obs_shape]
x = output.reshape(nsteps, nbatch, *obs_shape), where output is the output of this function.
"""
shape = h[0].get_shape().as_list()
if not flat:
assert(len(shape) > 1)
nh = h[0].get_shape()[-1].value
return tf.reshape(tf.concat(axis=0, values=h), [-1, nh])
else:
return tf.reshape(tf.stack(values=h, axis=0), [-1])
def ortho_init(scale=1.0):
def _ortho_init(shape, dtype, partition_info=None):
#lasagne ortho init for tf
shape = tuple(shape)
if len(shape) == 2:
flat_shape = shape
elif len(shape) == 4: # assumes NHWC
flat_shape = (np.prod(shape[:-1]), shape[-1])
else:
raise NotImplementedError
a = np.random.normal(0.0, 1.0, flat_shape)
u, _, v = np.linalg.svd(a, full_matrices=False)
q = u if u.shape == flat_shape else v # pick the one with the correct shape
q = q.reshape(shape)
return (scale * q[:shape[0], :shape[1]]).astype(np.float32)
return _ortho_init
def normc_initializer(std=1.0):
def _initializer(shape, dtype=None, partition_info=None): # pylint: disable=W0613
out = np.random.randn(*shape).astype(np.float32)
out *= std / np.sqrt(np.square(out).sum(axis=0, keepdims=True))
return tf.constant(out)
return _initializer
def lstm(xs, ms, s, scope, nh, init_scale=1.0):
nbatch, nin = [v.value for v in xs[0].get_shape()]
nsteps = len(xs)
with tf.variable_scope(scope):
wx = tf.get_variable("wx", [nin, nh*4], initializer=ortho_init(init_scale))
wh = tf.get_variable("wh", [nh, nh*4], initializer=ortho_init(init_scale))
b = tf.get_variable("b", [nh*4], initializer=tf.constant_initializer(0.0))
c, h = tf.split(axis=1, num_or_size_splits=2, value=s)
for idx, (x, m) in enumerate(zip(xs, ms)):
c = c*(1-m)
h = h*(1-m)
z = tf.matmul(x, wx) + tf.matmul(h, wh) + b
i, f, o, u = tf.split(axis=1, num_or_size_splits=4, value=z)
i = tf.nn.sigmoid(i)
f = tf.nn.sigmoid(f)
o = tf.nn.sigmoid(o)
u = tf.tanh(u)
c = f*c + i*u
h = o*tf.tanh(c)
xs[idx] = h
s = tf.concat(axis=1, values=[c, h])
return xs, s
# ================================================================
# Theano-like Function
# ================================================================
def function(inputs, outputs, updates=None, givens=None):
"""Just like Theano function. Take a bunch of tensorflow placeholders and expressions
computed based on those placeholders and produces f(inputs) -> outputs. Function f takes
values to be fed to the input's placeholders and produces the values of the expressions
in outputs.
Input values can be passed in the same order as inputs or can be provided as kwargs based
on placeholder name (passed to constructor or accessible via placeholder.op.name).
Example:
x = tf.placeholder(tf.int32, (), name="x")
y = tf.placeholder(tf.int32, (), name="y")
z = 3 * x + 2 * y
lin = function([x, y], z, givens={y: 0})
with single_threaded_session():
initialize()
assert lin(2) == 6
assert lin(x=3) == 9
assert lin(2, 2) == 10
assert lin(x=2, y=3) == 12
Parameters
----------
inputs: [tf.placeholder, tf.constant, or object with make_feed_dict method]
list of input arguments
outputs: [tf.Variable] or tf.Variable
list of outputs or a single output to be returned from function. Returned
value will also have the same shape.
"""
if isinstance(outputs, list):
return _Function(inputs, outputs, updates, givens=givens)
elif isinstance(outputs, (dict, collections.OrderedDict)):
f = _Function(inputs, outputs.values(), updates, givens=givens)
return lambda *args, **kwargs: type(outputs)(zip(outputs.keys(), f(*args, **kwargs)))
else:
f = _Function(inputs, [outputs], updates, givens=givens)
return lambda *args, **kwargs: f(*args, **kwargs)[0]
class _Function(object):
def __init__(self, inputs, outputs, updates, givens):
# for inpt in inputs:
# if not hasattr(inpt, 'make_feed_dict') and not (type(inpt) is tf.Tensor and len(inpt.op.inputs) == 0):
# assert False, "inputs should all be placeholders, constants, or have a make_feed_dict method"
self.inputs = inputs
updates = updates or []
self.update_group = tf.group(*updates)
self.outputs_update = list(outputs) + [self.update_group]
self.givens = {} if givens is None else givens
def _feed_input(self, feed_dict, inpt, value):
if hasattr(inpt, 'make_feed_dict'):
feed_dict.update(inpt.make_feed_dict(value))
else:
feed_dict[inpt] = value
def __call__(self, *args):
assert len(args) <= len(self.inputs), "Too many arguments provided"
feed_dict = {}
# Update the args
for inpt, value in zip(self.inputs, args):
if value is not None:
self._feed_input(feed_dict, inpt, value)
# Update feed dict with givens.
for inpt in self.givens:
feed_dict[inpt] = feed_dict.get(inpt, self.givens[inpt])
results = tf.get_default_session().run(self.outputs_update, feed_dict=feed_dict)[:-1]
return results
# ================================================================
# Flat vectors
# ================================================================
def var_shape(x):
out = x.get_shape().as_list()
assert all(isinstance(a, int) for a in out), \
"shape function assumes that shape is fully known"
return out
def numel(x):
return intprod(var_shape(x))
def intprod(x):
return int(np.prod(x))
def flatgrad(loss, var_list, clip_norm=None):
grads = tf.gradients(loss, var_list)
if clip_norm is not None:
grads, _ = tf.clip_by_global_norm(grads, clip_norm=clip_norm)
return tf.concat(axis=0, values=[
tf.reshape(grad if grad is not None else tf.zeros_like(v), [numel(v)])
for (v, grad) in zip(var_list, grads)
])
class SetFromFlat(object):
def __init__(self, var_list, dtype=tf.float32):
assigns = []
shapes = list(map(var_shape, var_list))
total_size = np.sum([intprod(shape) for shape in shapes])
self.theta = theta = tf.placeholder(dtype, [total_size])
start = 0
assigns = []
for (shape, v) in zip(shapes, var_list):
size = intprod(shape)
assigns.append(tf.assign(v, tf.reshape(theta[start:start + size], shape)))
start += size
self.op = tf.group(*assigns)
def __call__(self, theta):
tf.get_default_session().run(self.op, feed_dict={self.theta: theta})
class GetFlat(object):
def __init__(self, var_list):
self.op = tf.concat(axis=0, values=[tf.reshape(v, [numel(v)]) for v in var_list])
def __call__(self):
return tf.get_default_session().run(self.op)
def flattenallbut0(x):
return tf.reshape(x, [-1, intprod(x.get_shape().as_list()[1:])])
def reset():
global ALREADY_INITIALIZED
ALREADY_INITIALIZED = set()
tf.reset_default_graph()
"""
Random Seeds
"""
def set_global_seeds(i):
try:
import tensorflow as tf
except ImportError:
pass
else:
tf.set_random_seed(i)
np.random.seed(i)
random.seed(i)
| 34.590296
| 116
| 0.580924
| 1,690
| 12,833
| 4.280473
| 0.210651
| 0.017694
| 0.009953
| 0.015759
| 0.172104
| 0.132983
| 0.096212
| 0.060547
| 0.053083
| 0.0423
| 0
| 0.01033
| 0.230577
| 12,833
| 370
| 117
| 34.683784
| 0.722301
| 0.262137
| 0
| 0.098712
| 0
| 0
| 0.026281
| 0.005213
| 0
| 0
| 0
| 0
| 0.017167
| 1
| 0.167382
| false
| 0.008584
| 0.055794
| 0.017167
| 0.364807
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
431830fa7f1548920feb149a4d5dc17216d7a063
| 1,695
|
py
|
Python
|
Util/constant.py
|
RoboCupULaval/StrategyAI
|
ccddde144f2c0a67113d2e5ffe7c75ed9d4a3d19
|
[
"MIT"
] | 13
|
2018-03-14T10:20:10.000Z
|
2021-12-10T05:36:47.000Z
|
Util/constant.py
|
RoboCupULaval/StrategyIA
|
ccddde144f2c0a67113d2e5ffe7c75ed9d4a3d19
|
[
"MIT"
] | 200
|
2016-04-29T23:13:01.000Z
|
2018-03-13T14:36:39.000Z
|
Util/constant.py
|
RoboCupULaval/StrategyIA
|
ccddde144f2c0a67113d2e5ffe7c75ed9d4a3d19
|
[
"MIT"
] | 45
|
2015-07-04T18:57:39.000Z
|
2018-01-11T16:11:13.000Z
|
# Under MIT License, see LICENSE.txt
""" Module définissant des constantes de programmations python pour l'IA """
from enum import Enum
ROBOT_RADIUS = 90
ROBOT_DIAMETER = ROBOT_RADIUS * 2
ROBOT_CENTER_TO_KICKER = 60
BALL_RADIUS = 21
MAX_PLAYER_ON_FIELD_PER_TEAM = 6
BALL_OUTSIDE_FIELD_BUFFER = 200
# Radius and angles for tactics
DISTANCE_BEHIND = ROBOT_RADIUS + 30 # in millimeters
ANGLE_TO_GRAB_BALL = 1 # in radians; must be large in case ball moves fast
RADIUS_TO_GRAB_BALL = ROBOT_RADIUS + 30
ANGLE_TO_HALT = 0.05 # 3 degrees
RADIUS_TO_HALT = ROBOT_RADIUS + BALL_RADIUS
REASONABLE_OFFSET = 50 # To take into account the camera precision and other things
# Rules
KEEPOUT_DISTANCE_FROM_BALL = 500 + ROBOT_RADIUS + REASONABLE_OFFSET
KEEPOUT_DISTANCE_FROM_GOAL = ROBOT_RADIUS + REASONABLE_OFFSET
PADDING_DEFENSE_AREA = 100
# Rule 5.2: Minimum movement before a ball is "in play"
IN_PLAY_MIN_DISTANCE = 50
# Rule 8.2.1: Distance from the opposing team defending zone
INDIRECT_KICK_OFFSET = 200
# Deadzones
POSITION_DEADZONE = ROBOT_RADIUS * 0.1
# Orientation abs_tol
ORIENTATION_ABSOLUTE_TOLERANCE = 0.01 # 0.5 degree
# TeamColor
class TeamColor(Enum):
def __str__(self):
return 'blue' if self == TeamColor.BLUE else 'yellow'
YELLOW = 0
BLUE = 1
class FieldSide(Enum):
POSITIVE = 0
NEGATIVE = 1
class KickForce(Enum):
NONE = 0
LOW = 1
MEDIUM = 2
HIGH = 3
@classmethod
def for_dist(cls, dist, seconds_to_reach=1.0):
speed = (dist / 1000) / seconds_to_reach
return speed
class KickType(Enum):
DIRECT = 0
CHIP = 1
class DribbleState(Enum):
AUTOMATIC = 0
FORCE_STOP = 1
FORCE_SPIN = 2
| 22.6
| 84
| 0.728614
| 254
| 1,695
| 4.610236
| 0.551181
| 0.075149
| 0.056362
| 0.046114
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047513
| 0.20531
| 1,695
| 74
| 85
| 22.905405
| 0.821826
| 0.258997
| 0
| 0
| 0
| 0
| 0.008084
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.022727
| 0.022727
| 0.522727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
431a5a90835e15a36f13f4092d02d4382895d659
| 1,570
|
py
|
Python
|
sample-demo-lambda-app/lambda_function.py
|
sriharshams-aws/aws-codeguru-profiler-python-demo-application
|
36e63bc6364871e6a7b29437c1fb68243d2c54f4
|
[
"Apache-2.0"
] | 6
|
2020-12-04T00:08:02.000Z
|
2021-06-12T05:23:25.000Z
|
sample-demo-lambda-app/lambda_function.py
|
sriharshams-aws/aws-codeguru-profiler-python-demo-application
|
36e63bc6364871e6a7b29437c1fb68243d2c54f4
|
[
"Apache-2.0"
] | 6
|
2020-12-09T11:40:01.000Z
|
2021-09-23T09:03:18.000Z
|
sample-demo-lambda-app/lambda_function.py
|
sriharshams-aws/aws-codeguru-profiler-python-demo-application
|
36e63bc6364871e6a7b29437c1fb68243d2c54f4
|
[
"Apache-2.0"
] | 21
|
2020-12-09T01:35:48.000Z
|
2022-01-28T09:18:55.000Z
|
import boto3
import logging
import os
from random import randrange
from urllib.request import urlopen
# It is not recommended to enable DEBUG logs in production,
# this is just to show an example of a recommendation
# by Amazon CodeGuru Profiler.
logging.getLogger('botocore').setLevel(logging.DEBUG)
SITE = 'http://www.python.org/'
CW_NAMESPACE = 'ProfilerPythonDemo'
S3_BUCKET = os.environ['S3_BUCKET']
def lambda_handler(event, context):
# Make some network calls using urllib and s3 client.
with urlopen(SITE) as response:
s3_client = boto3.client('s3')
s3_client.put_object(Body=response.read(),
Bucket=S3_BUCKET,
Key='response.txt')
# Publish metrics.
content_length = int(response.headers['Content-Length'])
put_metric('ResponseContentLength', content_length)
put_metric(str(response.status)[0] + 'xxStatus', 1)
# Generate some CPU-intensive work.
num = randrange(content_length)
count = 0
for _ in range(num):
x = randrange(num)
if check_prime(x):
count += 1
return count
def put_metric(name, value):
cw_client = boto3.client('cloudwatch')
metric_data_num = [{'MetricName': name, 'Value': value}]
cw_client.put_metric_data(Namespace=CW_NAMESPACE, MetricData=metric_data_num)
def check_prime(num):
if num == 1 or num == 0:
return False
sq_root = 2
while sq_root * sq_root <= num:
if num % sq_root == 0:
return False
sq_root += 1
return True
| 27.54386
| 81
| 0.659236
| 206
| 1,570
| 4.873786
| 0.514563
| 0.02988
| 0.033865
| 0.043825
| 0.035857
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01598
| 0.242675
| 1,570
| 56
| 82
| 28.035714
| 0.828427
| 0.153503
| 0
| 0.052632
| 0
| 0
| 0.105144
| 0.015885
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.131579
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
431b587034ff91b11e453596c7cd2a1cc508eb0c
| 920
|
py
|
Python
|
setup.py
|
panchambanerjee/access_spotify
|
d1c50d1553718755d58d034e8d2049f986ef5f84
|
[
"MIT"
] | 4
|
2020-07-26T20:41:03.000Z
|
2020-08-04T05:36:32.000Z
|
setup.py
|
panchambanerjee/access_spotify
|
d1c50d1553718755d58d034e8d2049f986ef5f84
|
[
"MIT"
] | null | null | null |
setup.py
|
panchambanerjee/access_spotify
|
d1c50d1553718755d58d034e8d2049f986ef5f84
|
[
"MIT"
] | 1
|
2020-08-04T05:36:34.000Z
|
2020-08-04T05:36:34.000Z
|
#!/usr/bin/env python
import setuptools
from setuptools import setup
from os import path
# Read the package requirements
with open("requirements.txt", "r") as f:
requirements = [line.rstrip("\n") for line in f if line != "\n"]
# Read the contents of the README file
this_directory = path.abspath(path.dirname(__file__))
with open(path.join(this_directory, 'README.md'), encoding='utf-8') as f:
long_description = f.read()
setup(name='access-spotify',
version="1.1",
author="pancham_banerjee",
author_email="panchajanya.banerjee@gmail.com",
packages=setuptools.find_packages(),
scripts=["./bin/access_script.py"],
install_requires=requirements,
license="MIT",
description="A package to get all album and track info for an artist by querying the Spotify API",
long_description=long_description,
long_description_content_type='text/markdown'
)
| 31.724138
| 104
| 0.706522
| 125
| 920
| 5.064
| 0.632
| 0.094787
| 0.060032
| 0.094787
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003947
| 0.173913
| 920
| 28
| 105
| 32.857143
| 0.828947
| 0.094565
| 0
| 0
| 0
| 0
| 0.264174
| 0.062726
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.15
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
431c4388fab05fa311c4c60aa774db64074aff3d
| 528
|
py
|
Python
|
hearthstone/hslog/utils.py
|
bertokhoury/python-hearthstone
|
635a8a14b85f468c1ab1d0bc9d0bcffaa00fda43
|
[
"MIT"
] | 1
|
2021-01-29T04:54:23.000Z
|
2021-01-29T04:54:23.000Z
|
hearthstone/hslog/utils.py
|
bertokhoury/python-hearthstone
|
635a8a14b85f468c1ab1d0bc9d0bcffaa00fda43
|
[
"MIT"
] | null | null | null |
hearthstone/hslog/utils.py
|
bertokhoury/python-hearthstone
|
635a8a14b85f468c1ab1d0bc9d0bcffaa00fda43
|
[
"MIT"
] | null | null | null |
from hearthstone.enums import GameTag, TAG_TYPES
def parse_enum(enum, value):
if value.isdigit():
value = int(value)
elif hasattr(enum, value):
value = getattr(enum, value)
else:
raise Exception("Unhandled %s: %r" % (enum, value))
return value
def parse_tag(tag, value):
tag = parse_enum(GameTag, tag)
if tag in TAG_TYPES:
value = parse_enum(TAG_TYPES[tag], value)
elif value.isdigit():
value = int(value)
else:
raise NotImplementedError("Invalid string value %r = %r" % (tag, value))
return tag, value
| 22.956522
| 74
| 0.702652
| 77
| 528
| 4.727273
| 0.363636
| 0.098901
| 0.093407
| 0.10989
| 0.137363
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168561
| 528
| 22
| 75
| 24
| 0.829157
| 0
| 0
| 0.222222
| 0
| 0
| 0.083333
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.055556
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
431e555f5efee68273402bccef7dcb0a30ea9d0c
| 2,364
|
py
|
Python
|
ejemplo_clase_00.py
|
ernestoarzabala/Curso-Python-Utch
|
ed5cd89ed85a1021d78fd17d495b3b3ec0203c77
|
[
"Unlicense"
] | null | null | null |
ejemplo_clase_00.py
|
ernestoarzabala/Curso-Python-Utch
|
ed5cd89ed85a1021d78fd17d495b3b3ec0203c77
|
[
"Unlicense"
] | null | null | null |
ejemplo_clase_00.py
|
ernestoarzabala/Curso-Python-Utch
|
ed5cd89ed85a1021d78fd17d495b3b3ec0203c77
|
[
"Unlicense"
] | null | null | null |
# Archivo ejemplo 00 de creacion de clases en Python
from math import gcd # greatest common denominator = Maximo Comun Divisor (MCD)
class Fraccion:
""" La clase Fraccion: Una fraccion es un part de enteros: un numerador (num)
y un denominador (den !=0 ) cuyo MCD es 1.
"""
def __init__(self,numerador,denominador):
""" Constructor de la clase. Construye una fracción a partir de dos enteros:
un numerador y un denominador.
¡El constructor se enloquece si el denominador es cero!
Nota mental:Agregar manejo de error para denominador igual a cero.
"""
numerador = int(numerador)
denominador = int(denominador)
hcf = gcd(numerador,denominador)
self.num, self.den = numerador/hcf, denominador/hcf
def __str__(self):
""" Generador de strings para representar una fracción.
Se necesita si se desea convertir ,o mostrar, una fraccion a string.
"""
return "%d/%d" % (self.num,self.den)
def __mul__(self,otrafraccion):
""" Función necesaria para el operador de multiplicación.
Multiplica dos fracciones para obtener una fraccion resultante
"""
return Fraccion(self.num*otrafraccion.num,self.den*otrafraccion.den)
def __add__(self,otrafraccion):
"""Función necesaria para el operador de suma.
Suma dos fracciones para obtener una fraccion resultante
"""
return Fraccion(self.num*otrafraccion.den+self.den*otrafraccion.num,self.den*otrafraccion.den)
def a_numero_real(self):
""" Función para convertir la fracción a un numero de punto flotante.
El equivalente numérico con punto decimal de la fracción.
"""
return float(self.num)/float(self.den)
if __name__ == "__main__":
a = Fraccion(5,12)
print(a)
b = Fraccion(3,5)
c = a*b
c_real = c.a_numero_real()
print("Multiplicar la fraccion {} por la fraccion {} da como resultado la fraccion {} que es equivalente a {}".format(a,b,c,c_real))# Escribe tu código aquí :-)
a = Fraccion(1,2)
print(a)
b = Fraccion(1,4)
c = a+b
c_real = c.a_numero_real()
print("Sumar la fraccion {} con la fraccion {} da como resultado la fraccion {} que es equivalente a {}".format(a,b,c,c_real))# Escribe tu código aquí :-)
| 38.754098
| 164
| 0.651861
| 321
| 2,364
| 4.697819
| 0.361371
| 0.027851
| 0.026525
| 0.018568
| 0.35809
| 0.35809
| 0.35809
| 0.312997
| 0.249337
| 0.249337
| 0
| 0.00737
| 0.253807
| 2,364
| 60
| 165
| 39.4
| 0.846939
| 0.410321
| 0
| 0.142857
| 0
| 0
| 0.173949
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178571
| false
| 0
| 0.035714
| 0
| 0.392857
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43204edf29ab75f14a0b24a7c9fd04d677528ff0
| 732
|
py
|
Python
|
recs/live_project_popularity_recommender.py
|
WingCode/live-project
|
977dfbcaade35d8173dbb6ace102fe8998f1cdf4
|
[
"MIT"
] | null | null | null |
recs/live_project_popularity_recommender.py
|
WingCode/live-project
|
977dfbcaade35d8173dbb6ace102fe8998f1cdf4
|
[
"MIT"
] | 8
|
2021-01-05T00:06:26.000Z
|
2022-03-12T01:05:06.000Z
|
recs/live_project_popularity_recommender.py
|
WingCode/live-project
|
977dfbcaade35d8173dbb6ace102fe8998f1cdf4
|
[
"MIT"
] | 4
|
2021-01-04T07:23:17.000Z
|
2022-03-18T12:29:37.000Z
|
import os
import pandas as pd
class LiveProjectPopularityBasedRecs:
def __init__(self):
self.charts = {}
charts_folder = "charts"
if os.path.isdir(charts_folder):
for file in os.listdir("charts"):
name, ext = file.split('.')
if ext == "csv" and len(name) > 0:
self.charts[name] = pd.read_csv("{}/{}".format(charts_folder, file), index_col=0)
else:
print("Genre Global and Charts not implemented!")
def genre_chart(self, genre):
if genre in self.charts:
return self.charts[genre]
elif "Top" in self.charts:
return self.charts["Top"]
else:
return ""
| 25.241379
| 101
| 0.545082
| 85
| 732
| 4.576471
| 0.470588
| 0.154242
| 0.061697
| 0.092545
| 0.143959
| 0.143959
| 0
| 0
| 0
| 0
| 0
| 0.004132
| 0.338798
| 732
| 28
| 102
| 26.142857
| 0.799587
| 0
| 0
| 0.1
| 0
| 0
| 0.091781
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.4
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43245976f12a77315f00f3cf0db335fcb32e0255
| 647
|
py
|
Python
|
pce/src/testing/test_pce.py
|
elise-baumgartner/onramp
|
beb3c807264fcb70d8069ff2e3990b0ce3f59912
|
[
"BSD-3-Clause"
] | 2
|
2016-09-09T04:19:01.000Z
|
2019-02-15T20:28:13.000Z
|
pce/src/testing/test_pce.py
|
elise-baumgartner/onramp
|
beb3c807264fcb70d8069ff2e3990b0ce3f59912
|
[
"BSD-3-Clause"
] | 67
|
2016-06-02T19:37:56.000Z
|
2018-02-22T05:23:45.000Z
|
pce/src/testing/test_pce.py
|
elise-baumgartner/onramp
|
beb3c807264fcb70d8069ff2e3990b0ce3f59912
|
[
"BSD-3-Clause"
] | 9
|
2015-06-22T22:10:22.000Z
|
2016-04-26T15:35:45.000Z
|
#!../env/bin/python
"""A simple test script for the PCE portion of OnRamp.
Usage: ./test_pce.py
This script is only intended to be run in a fresh install of the repository. It
has side-effects that could corrupt module and user data if run in a production
setting.
Prior to running this script, ensure that onramp/pce/bin/onramp_pce_install.py
has been called and that the server is running. Also Ensure
./test_pce_config.cfg contains the proper settings.
"""
import nose
import sys
if __name__ == '__main__':
print (__doc__)
response = raw_input('(C)ontinue or (A)bort? ')
if response != 'C':
sys.exit(0)
nose.main()
| 26.958333
| 79
| 0.723338
| 107
| 647
| 4.205607
| 0.635514
| 0.031111
| 0.026667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001898
| 0.185471
| 647
| 23
| 80
| 28.130435
| 0.851992
| 0.7017
| 0
| 0
| 0
| 0
| 0.172043
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4327e63a016b0fdf98132c5f404968581fab3fee
| 1,860
|
py
|
Python
|
scribdl/test/test_download.py
|
fatshotty/scribd-downloader
|
d07e301c0a7781cf0b8cf38846061e043e8b86e9
|
[
"MIT"
] | 182
|
2019-09-25T18:48:09.000Z
|
2022-03-22T01:22:21.000Z
|
scribdl/test/test_download.py
|
fatshotty/scribd-downloader
|
d07e301c0a7781cf0b8cf38846061e043e8b86e9
|
[
"MIT"
] | 38
|
2019-09-11T00:51:35.000Z
|
2022-03-30T12:05:19.000Z
|
scribdl/test/test_download.py
|
fatshotty/scribd-downloader
|
d07e301c0a7781cf0b8cf38846061e043e8b86e9
|
[
"MIT"
] | 83
|
2019-10-11T12:07:29.000Z
|
2022-03-31T05:06:47.000Z
|
from ..downloader import Downloader
import os
import pytest
@pytest.fixture
def cwd_to_tmpdir(tmpdir):
os.chdir(str(tmpdir))
def test_audiobook_download(cwd_to_tmpdir, monkeypatch):
audiobook_url = "https://www.scribd.com/audiobook/237606860/100-Ways-to-Motivate-Yourself-Change-Your-Life-Forever"
audiobook_downloader = Downloader(audiobook_url)
audio = audiobook_downloader.download()
assert audio[0] == "100_Ways_to_Motivate_Yourself__Change_Your_Life_Forever_preview.mp3"
assert os.path.getsize(audio[0]) == 2127830
def test_text_document_download(cwd_to_tmpdir):
text_doc_url = "https://www.scribd.com/document/96882378/Trademark-License-Agreement"
text_downloader = Downloader(text_doc_url)
md_doc = text_downloader.download(is_image_document=False)
assert os.path.getsize(md_doc.input_content) in range(1000, 2000)
md_doc.to_pdf()
assert os.path.getsize(md_doc.pdf_path) in range(20000, 31000)
def test_img_document_download(cwd_to_tmpdir):
img_doc_url = "https://www.scribd.com/doc/136711944/Signature-Scanning-and-Verification-in-Finacle"
img_downloader = Downloader(img_doc_url)
imgs = img_downloader.download(is_image_document=True)
assert len(imgs.input_content) == 2
imgs.to_pdf()
assert os.path.getsize(imgs.pdf_path) in range(140000, 150000)
def test_book_download(cwd_to_tmpdir, monkeypatch):
book_url = "https://www.scribd.com/read/262694921/Acting-The-First-Six-Lessons"
book_downloader = Downloader(book_url)
# We don't want to clutter stdout with book contents if this test fails
monkeypatch.setattr("builtins.print", lambda x: None)
md_book = book_downloader.download()
assert os.path.getsize(md_book.input_content) in range(10000, 20000)
md_book.to_pdf()
assert os.path.getsize(md_book.pdf_path) in range(200000, 2500000)
| 38.75
| 119
| 0.768817
| 274
| 1,860
| 4.959854
| 0.361314
| 0.03532
| 0.05298
| 0.083885
| 0.360559
| 0.198676
| 0.10596
| 0.067697
| 0.067697
| 0
| 0
| 0.064496
| 0.124731
| 1,860
| 47
| 120
| 39.574468
| 0.77027
| 0.037097
| 0
| 0
| 0
| 0.088235
| 0.220917
| 0.037472
| 0
| 0
| 0
| 0
| 0.235294
| 1
| 0.147059
| false
| 0
| 0.088235
| 0
| 0.235294
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
432a6cd43a1645c5ef69788411b16a04cd68ac58
| 20,941
|
py
|
Python
|
yasql/apps/sqlorders/views.py
|
Fanduzi/YaSQL
|
bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5
|
[
"Apache-2.0"
] | 443
|
2018-02-08T02:53:48.000Z
|
2020-10-13T10:01:55.000Z
|
yasql/apps/sqlorders/views.py
|
Fanduzi/YaSQL
|
bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5
|
[
"Apache-2.0"
] | 27
|
2020-10-14T10:01:52.000Z
|
2022-03-12T00:49:47.000Z
|
yasql/apps/sqlorders/views.py
|
Fanduzi/YaSQL
|
bc6366a9b1c1e9ed84fd24ea2b4a21f8f99d0af5
|
[
"Apache-2.0"
] | 148
|
2018-03-15T06:07:25.000Z
|
2020-08-17T14:58:45.000Z
|
# -*- coding:utf-8 -*-
# edit by fuzongfei
import base64
import datetime
# Create your views here.
import json
from django.http import Http404, HttpResponse
from django.utils import timezone
from django_filters.rest_framework import DjangoFilterBackend
from rest_framework import filters
from rest_framework.exceptions import PermissionDenied
from rest_framework.generics import ListAPIView, GenericAPIView, CreateAPIView, UpdateAPIView, DestroyAPIView
from rest_framework.views import APIView
from rest_framework.viewsets import ViewSet
from libs import permissions
from libs.Pagination import Pagination
from libs.RenderColumns import render_dynamic_columns
from libs.response import JsonResponseV1
from sqlorders import models, serializers
from sqlorders.filters import SqlOrderListFilter, GetTasksListFilter
class GetDBEnvironment(ListAPIView):
queryset = models.DbEnvironment.objects.all()
serializer_class = serializers.DbEnvironmentSerializer
# 获取工单环境
def get(self, request, *args, **kwargs):
serializer = self.get_serializer(self.get_queryset(), many=True)
return JsonResponseV1(data=serializer.data)
class GetDbSchemas(APIView):
# 获取指定环境指定用途的schemas列表
def get(self, request):
serializer = serializers.DbSchemasSerializer(data=request.query_params)
if serializer.is_valid():
return JsonResponseV1(data=serializer.query)
return JsonResponseV1(message=serializer.errors, code='0001')
class IncepSyntaxCheckView(APIView):
def post(self, request, *args, **kwargs):
serializer = serializers.IncepSyntaxCheckSerializer(data=request.data)
if serializer.is_valid():
s, data = serializer.check()
render_columns = [
{'key': 'order_id', 'value': '序号'},
{'key': 'stage', 'value': '阶段'},
{'key': 'stage_status', 'value': '阶段状态'},
{'key': 'error_level', 'value': '错误级别'},
{'key': 'error_message', 'value': '错误信息', 'width': '35%'},
{'key': 'sql', 'value': 'SQL内容', 'width': '25%', 'ellipsis': True},
{'key': 'affected_rows', 'value': '影响/扫描行数'}
]
columns = render_dynamic_columns(render_columns)
message = '语法检查未发现异常,可以提交'
if not s:
message = '语法检查发现异常,详情请查看输出,更正后在提交'
d = {
'status': 0 if s else 1,
'data': data
}
data = {'columns': columns, 'data': d}
return JsonResponseV1(data=data, message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersCommit(GenericAPIView):
permission_classes = (permissions.CanCommitOrdersPermission,)
serializer_class = serializers.SqlOrdersCommitSerializer
def post(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="提交成功")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class SqlOrdersList(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrdersListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = SqlOrderListFilter
ordering = ['-created_at']
search_fields = ['title', 'database', 'remark', 'applicant', 'progress', 'contents']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'progress', 'value': '进度', 'width': '8%'},
{'key': 'applicant', 'value': '申请人'},
{'key': 'department', 'value': '部门'},
{'key': 'env_name', 'value': '环境'},
{'key': 'escape_title', 'value': '标题', 'width': '18%', 'ellipsis': True},
{'key': 'sql_type', 'value': '类型'},
{'key': 'remark', 'value': '备注'},
{'key': 'version', 'value': '版本'},
{'key': 'host', 'value': '实例/库'},
{'key': 'auditor', 'value': '审核人'},
{'key': 'reviewer', 'value': '复核人'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class SqlOrdersDetail(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrders.objects.all()
serializer_class = serializers.SqlOrderDetailSerializer
lookup_field = 'order_id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class OpSqlOrderView(ViewSet):
"""更新SQL工单状态,如:审核,关闭等"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get_obj(self, pk):
try:
obj = models.DbOrders.objects.get(pk=pk)
return obj
except models.DbOrders.DoesNotExist:
raise Http404
def approve(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_approve"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def feedback(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_feedback"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def close(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_close"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
def review(self, request, pk):
serializer = serializers.OpSqlOrderSerializer(instance=self.get_obj(pk),
data=request.data,
context={"request": request, "handler": "_review"})
if serializer.is_valid():
serializer.save()
return JsonResponseV1(data=serializer.data, message="操作成功")
return JsonResponseV1(message=serializer.errors, code='0001')
class GenerateTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.GenerateSqlOrdersTasksSerializer(data=request.data)
if serializer.is_valid():
data = serializer.save(request)
return JsonResponseV1(data=data)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTaskIdView(APIView):
def get(self, request, *args, **kwargs):
"""根据order id返回taskid"""
order_id = kwargs.get('order_id')
task_id = models.DbOrdersExecuteTasks.objects.filter(order_id=order_id).first().task_id
return JsonResponseV1(data=task_id)
class GetTasksPreviewView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
origin_queryset = self.queryset.filter(task_id=task_id)
total = origin_queryset.count()
progress_0 = origin_queryset.filter(progress=0).count()
progress_1 = origin_queryset.filter(progress=1).count()
progress_3 = origin_queryset.filter(progress=3).count()
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns,
'data': {'data': serializer.data,
'total': total,
'progress_0': progress_0,
'progress_1': progress_1,
'progress_3': progress_3}}
return self.get_paginated_response(data)
class GetTasksListView(ListAPIView):
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.SqlOrdersTasksListSerializer
pagination_class = Pagination
filter_backends = [DjangoFilterBackend, filters.SearchFilter, filters.OrderingFilter]
filter_class = GetTasksListFilter
search_fields = ['sql']
ordering = ['created_time']
def get(self, request, *args, **kwargs):
task_id = kwargs.get('task_id')
queryset = self.filter_queryset(self.get_queryset().filter(task_id=task_id))
# 数据隐藏按钮打开了
# 仅允许申请人、审核人、复核人和超权用户查看数据
obj = models.DbOrders.objects.get(
pk=models.DbOrdersExecuteTasks.objects.filter(task_id=task_id).first().order_id
)
if obj.is_hide == 'ON' and not request.user.is_superuser:
allowed_view_users = [obj.applicant]
allowed_view_users.extend([x['user'] for x in json.loads(obj.auditor)])
allowed_view_users.extend([x['user'] for x in json.loads(obj.reviewer)])
if request.user.username not in allowed_view_users:
raise PermissionDenied(detail='您没有权限查看该工单的数据,5s后,自动跳转到工单列表页面')
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, context={'request': request}, many=True)
render_columns = [
{'key': 'num', 'value': '序号'}, # 自定义num,前台显示序号使用
{'key': 'applicant', 'value': '申请人'},
{'key': 'sql', 'value': 'SQL', 'ellipsis': True, 'width': '50%'},
{'key': 'progress', 'value': '进度'},
{'key': 'execute', 'value': '执行'}, # 自定义execute
{'key': 'result', 'value': '查看结果'}, # 自定义result
]
if queryset.exists():
if queryset.first().sql_type == 'DDL':
render_columns.insert(-1, {'key': 'ghost_pause', 'value': '暂停(gh-ost)'})
render_columns.insert(-1, {'key': 'ghost_recovery', 'value': '恢复(gh-ost)'})
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ExecuteSingleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteSingleTaskSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ExecuteMultiTasksView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ExecuteMultiTasksSerializer(data=request.data)
if serializer.is_valid():
serializer.execute(request)
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class ThrottleTaskView(APIView):
permission_classes = (permissions.CanExecuteOrdersPermission,)
def post(self, request, *args, **kwargs):
serializer = serializers.ThrottleTaskSerializer(data=request.data)
if serializer.is_valid():
message = serializer.execute(request)
return JsonResponseV1(message=message)
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class GetTasksResultView(ListAPIView):
"""SQL工单详情"""
permission_classes = (permissions.CanViewOrdersPermission,)
queryset = models.DbOrdersExecuteTasks.objects.all()
serializer_class = serializers.GetTasksResultSerializer
lookup_field = 'id'
def get(self, request, *args, **kwargs):
queryset = self.get_object()
serializer = self.get_serializer(queryset, context={"request": request})
return JsonResponseV1(data=serializer.data)
class HookSqlOrdersView(APIView):
permission_classes = (permissions.anyof(permissions.CanCommitOrdersPermission,
permissions.CanViewOrdersPermission,
permissions.CanExecuteOrdersPermission,
permissions.CanAuditOrdersPermission),
)
def post(self, request, *args, **kwargs):
serializer = serializers.HookSqlOrdersSerializer(data=request.data)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="任务提交成功,请查看输出")
return JsonResponseV1(message=serializer.errors, code='0001', flat=True)
class DownloadExportFilesView(APIView):
"""下载导出文件"""
permission_classes = (permissions.CanViewOrdersPermission,)
def get(self, request, base64_filename):
file_name = base64.b64decode(base64_filename).decode()
if not models.DbExportFiles.objects.filter(file_name=file_name).exists():
raise Http404
obj = models.DbExportFiles.objects.get(file_name=file_name)
if not models.DbOrdersExecuteTasks.objects.get(pk=obj.task_id).applicant == request.user.username:
raise PermissionDenied(detail='您没有权限')
fsock = open(f"media/{obj.files}", 'rb')
response = HttpResponse(fsock, content_type="application/zip")
response['Content-Disposition'] = f'attachment; filename={file_name}'
return response
class ReleaseVersionsGet(APIView):
"""获取上线版本号,提交工单使用"""
def get(self, request):
before_30_days = (timezone.now() - datetime.timedelta(days=30))
queryset = models.ReleaseVersions.objects.filter(
expire_time__gte=before_30_days
).values('id', 'version', 'expire_time').order_by('-created_at')
for row in queryset:
row['disabled'] = 0
if row['expire_time'] < datetime.datetime.date(timezone.now()):
row['disabled'] = 1
return JsonResponseV1(data=queryset)
class ReleaseVersionsList(ListAPIView):
"""获取上线版本号列表,管理上线版本号使用"""
permission_classes = (permissions.CanViewVersionPermission,)
queryset = models.ReleaseVersions.objects.all()
serializer_class = serializers.ReleaseVersionsListSerializer
pagination_class = Pagination
filter_backends = [filters.SearchFilter, filters.OrderingFilter]
search_fields = ['username', 'version', 'expire_time']
ordering = ['-created_at']
def get(self, request, *args, **kwargs):
queryset = self.filter_queryset(self.get_queryset())
page = self.paginate_queryset(queryset)
serializer = self.get_serializer(page, many=True)
render_columns = [
{'key': 'version', 'value': '版本'},
{'key': 'username', 'value': '创建人'},
{'key': 'expire_time', 'value': '截止日期'},
{'key': 'created_at', 'value': '创建时间'},
{'key': 'key', 'value': '操作'},
{'key': 'id', 'value': '详情'},
]
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': serializer.data}
return self.get_paginated_response(data)
class ReleaseVersionsCreate(CreateAPIView):
"""创建版本"""
permission_classes = (permissions.CanCreateVersionsPermission,)
serializer_class = serializers.ReleaseVersionsCreateSerializer
def create(self, request, *args, **kwargs):
serializer = self.get_serializer(data=request.data)
if serializer.is_valid():
self.perform_create(serializer)
return JsonResponseV1(message="创建成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsUpdate(UpdateAPIView):
"""更新版本号,该类只更新单条记录"""
permission_classes = (permissions.CanUpdateVersionsPermission,)
def put(self, request, *args, **kwargs):
serializer = serializers.ReleaseVersionsSerializer(
instance=models.ReleaseVersions.objects.get(pk=kwargs['key']), # 返回单条记录
data=request.data
)
if serializer.is_valid():
serializer.save()
return JsonResponseV1(message="更新成功")
return JsonResponseV1(code='0001', message=serializer.errors, flat=True)
class ReleaseVersionsDelete(DestroyAPIView):
"""删除版本"""
permission_classes = (permissions.CanDeleteVersionsPermission,)
queryset = models.ReleaseVersions.objects.all()
lookup_field = 'id' # 默认为主键,可不写
def destroy(self, request, *args, **kwargs):
instance = self.get_object()
self.perform_destroy(instance)
return JsonResponseV1(message="删除成功")
class ReleaseVersionsView(APIView):
"""获取指定版本内工单在所有环境的进度"""
def get(self, request, *args, **kwargs):
# 获取版本对应的主键
version = kwargs.get('version')
version_id = models.ReleaseVersions.objects.get(version=version).pk
# 获取环境,行转为动态列
obj = models.DbEnvironment.objects.values('id', 'name')
row2columns = ''
for row in obj:
row2columns += f"max(if(env_id={row['id']}, progress, -1)) as {row['name']},"
# 获取任务下所有工单分别在各个环境中的状态,此处的环境为动态环境
# id没有实际意义
query = f"select " + row2columns + \
f"substring(MD5(RAND()),1,20) as id,title as escape_title,order_id, applicant " \
f"from yasql_dborders where version_id='{version_id}' group by escape_title,order_id,applicant"
rawquery = models.DbOrders.objects.raw(query)
# 获取环境列名
dynamic_columns = list(rawquery.columns)[:-4]
data = []
for row in rawquery:
columns = {
'id': row.id,
'escape_title': row.escape_title,
'order_id': row.order_id,
'applicant': row.applicant,
}
for col in dynamic_columns:
columns[col] = getattr(row, col)
data.append(columns)
render_columns = [
{'key': 'escape_title', 'ellipsis': True, 'value': '标题'},
{'key': 'applicant', 'value': '申请人'},
]
render_columns.extend([{'key': x, 'value': x} for x in dynamic_columns])
columns = render_dynamic_columns(render_columns)
data = {'columns': columns, 'data': data}
return JsonResponseV1(data=data)
| 41.715139
| 111
| 0.633351
| 2,027
| 20,941
| 6.418352
| 0.178096
| 0.053805
| 0.041507
| 0.030669
| 0.568486
| 0.542045
| 0.507148
| 0.494927
| 0.476249
| 0.476249
| 0
| 0.00997
| 0.243207
| 20,941
| 501
| 112
| 41.798403
| 0.810954
| 0.021441
| 0
| 0.472868
| 0
| 0.002584
| 0.09277
| 0.010384
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.043928
| 0
| 0.423773
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
432ad11a5c271d697e37438e64317a7886323133
| 1,489
|
py
|
Python
|
perp_adj.py
|
shmakn99/Knowledge-Graph-VG
|
ce2b0d6e16199357f1afc4aa7e58f74aae35e023
|
[
"MIT"
] | null | null | null |
perp_adj.py
|
shmakn99/Knowledge-Graph-VG
|
ce2b0d6e16199357f1afc4aa7e58f74aae35e023
|
[
"MIT"
] | null | null | null |
perp_adj.py
|
shmakn99/Knowledge-Graph-VG
|
ce2b0d6e16199357f1afc4aa7e58f74aae35e023
|
[
"MIT"
] | null | null | null |
import glove_util as gut
import numpy as np
from sklearn.decomposition import TruncatedSVD
import json
with open('freq_count_pred.json') as f:
freq_count_pred = json.load(f)
def get_pc(sentences):
svd = TruncatedSVD(n_components=1, n_iter=7, random_state=0)
svd.fit(sentences)
return svd.components_
def weighted_avg(predicate,a,dim):
predicate = predicate.lower().strip().split()
if len(predicate) == 1:
return gut.glove(predicate[0],dim)
else:
support = np.zeros(dim)
for word in predicate:
vector = gut.glove(word,dim)
if len(vector) == 0:
vector = np.zeros(300)
support += (a/(a+freq_count_pred[word]))*vector
return support
with open('relationships.json') as f:
relationships = json.load(f)
predicate_embedding = {}
sentences = []
i = 0
for image in relationships:
i+=1
if i%1000 == 0:
print (i)
for relation in image['relationships']:
w_avg = weighted_avg(relation['predicate'],0.001,300)
sentences.append(w_avg)
predicate_embedding[relation['relationship_id']] = w_avg
pc = get_pc(np.array(sentences))[0]
projection_space = np.outer(pc,pc)
i = 0
for image in relationships:
i+=1
if i%1000 == 0:
print (i)
for relation in image['relationships']:
predicate_embedding[relation['relationship_id']] = predicate_embedding[relation['relationship_id']] - np.matmul(projection_space,predicate_embedding[relation['relationship_id']])
with open('predicate_embedding_300.json','w') as f:
json.dump(predicate_embedding,f)
| 22.560606
| 181
| 0.725319
| 221
| 1,489
| 4.742081
| 0.316742
| 0.120229
| 0.099237
| 0.145038
| 0.290076
| 0.137405
| 0.137405
| 0.137405
| 0.137405
| 0.137405
| 0
| 0.026583
| 0.141034
| 1,489
| 65
| 182
| 22.907692
| 0.792807
| 0
| 0
| 0.26087
| 0
| 0
| 0.108798
| 0.018805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0.086957
| 0
| 0.195652
| 0.043478
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
432f6dd85dd7a23f729a99a79b5f40586fb8f07f
| 2,732
|
py
|
Python
|
dino/validation/events/message/limit_msg_length.py
|
thenetcircle/dino
|
1047c3458e91a1b4189e9f48f1393b3a68a935b3
|
[
"Apache-2.0"
] | 150
|
2016-10-05T11:09:36.000Z
|
2022-03-06T16:24:41.000Z
|
dino/validation/events/message/limit_msg_length.py
|
thenetcircle/dino
|
1047c3458e91a1b4189e9f48f1393b3a68a935b3
|
[
"Apache-2.0"
] | 27
|
2017-03-02T03:37:02.000Z
|
2022-02-10T04:59:54.000Z
|
dino/validation/events/message/limit_msg_length.py
|
thenetcircle/dino
|
1047c3458e91a1b4189e9f48f1393b3a68a935b3
|
[
"Apache-2.0"
] | 21
|
2016-11-11T07:51:48.000Z
|
2020-04-26T21:38:33.000Z
|
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import traceback
from yapsy.IPlugin import IPlugin
from activitystreams.models.activity import Activity
from dino import utils
from dino.config import ErrorCodes
from dino.config import ConfigKeys
from dino.environ import GNEnvironment
logger = logging.getLogger(__name__)
__author__ = 'Oscar Eriksson <oscar.eriks@gmail.com>'
class OnMessageCheckContentLength(IPlugin):
def __init__(self):
super(OnMessageCheckContentLength, self).__init__()
self.env = None
self.enabled = False
self.max_length = 1000
def setup(self, env: GNEnvironment):
self.env = env
validation_config = self.env.config.get(ConfigKeys.VALIDATION)
if 'on_message' not in validation_config or 'limit_msg_length' not in validation_config.get('on_message'):
logger.info('no config enabled for plugin not_full, ignoring plugin')
return
on_create_config = validation_config.get('on_message').get('limit_msg_length')
self.enabled = True
self.max_length = on_create_config.get(ConfigKeys.MAX_MSG_LENGTH, 1000)
def _process(self, data: dict, activity: Activity):
message = activity.object.content
if message is None or len(message.strip()) == 0:
return True, None, None
if not utils.is_base64(message):
return False, ErrorCodes.NOT_BASE64, \
'invalid message content, not base64 encoded'
message = utils.b64d(message)
if len(message) > self.max_length:
return False, ErrorCodes.MSG_TOO_LONG, \
'message content needs to be shorter than %s characters' % self.max_length
return True, None, None
def __call__(self, *args, **kwargs) -> (bool, str):
if not self.enabled:
return
data, activity = args[0], args[1]
try:
return self._process(data, activity)
except Exception as e:
logger.error('could not execute plugin not_full: %s' % str(e))
logger.exception(traceback.format_exc())
return False, ErrorCodes.VALIDATION_ERROR, 'could not execute validation plugin not_full'
| 36.918919
| 114
| 0.688507
| 353
| 2,732
| 5.181303
| 0.410765
| 0.032805
| 0.028431
| 0.017496
| 0.030618
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010942
| 0.2306
| 2,732
| 73
| 115
| 37.424658
| 0.859182
| 0.189239
| 0
| 0.085106
| 0
| 0
| 0.150704
| 0.01044
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085106
| false
| 0
| 0.170213
| 0
| 0.446809
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
432f8e360fc047fc0c5026f477fadfd50ec95d5c
| 4,779
|
py
|
Python
|
zabbix/prom2zabbix.py
|
tldr-devops/telegraf-monitoring-agent-setup
|
1f0b0f658acf9e685c121ffaee658bbe3fbad022
|
[
"MIT"
] | null | null | null |
zabbix/prom2zabbix.py
|
tldr-devops/telegraf-monitoring-agent-setup
|
1f0b0f658acf9e685c121ffaee658bbe3fbad022
|
[
"MIT"
] | null | null | null |
zabbix/prom2zabbix.py
|
tldr-devops/telegraf-monitoring-agent-setup
|
1f0b0f658acf9e685c121ffaee658bbe3fbad022
|
[
"MIT"
] | 1
|
2022-03-31T20:26:21.000Z
|
2022-03-31T20:26:21.000Z
|
#!/usr/bin/env python
# Script for parsing prometheus metrics format and send it into zabbix server
# MIT License
# https://github.com/Friz-zy/telegraf-monitoring-agent-setup
import re
import os
import sys
import time
import json
import socket
import optparse
try:
from urllib.request import urlopen
except:
from urllib import urlopen
METRICS = {
'default': {
'sort_labels': ['name', 'id', 'host', 'path', 'device', 'source', 'cpu'],
},
'docker_container_': {
'sort_labels': ['host', 'source', 'device', 'cpu'],
},
}
def parse(source='http://127.0.0.1:9273/metrics'):
# https://prometheus.io/docs/practices/naming/
# https://prometheus.io/docs/concepts/data_model/#metric-names-and-labels
regex = re.compile(r'^(?P<metric>[a-zA-Z_:][a-zA-Z0-9_:]*)(?P<labels>{.*})?\s+(?P<value>.+)(\s+(?P<timestamp>\w+))?$')
help_line = ''
type_line = ''
metrics = []
text = urlopen(source).read()
for line in text.splitlines():
line = line.decode("utf-8")
if line[0:6] == '# HELP':
help_line = line
continue
elif line[0:6] == '# TYPE':
type_line = line
continue
elif line[0] == '#':
continue
metric = regex.match(line).groupdict()
metric['line_raw'] = line
metric['help'] = help_line
metric['type'] = type_line
metric['source'] = source
metrics.append(metric)
return metrics
def main():
parser = optparse.OptionParser()
source = 'http://127.0.0.1:9273/metrics'
destination = '/tmp/prom2zabbix'
parser.set_defaults(source=source,
destination=destination,
hostname='')
parser.add_option("-s", "--source", dest="source",
help="Prometheus source, default is " + source)
parser.add_option("-d", "--destination", dest="destination",
help="Output .keys and .metrics files pattern, default is " + destination)
(options, args) = parser.parse_args()
seconds = int(time.time())
metrics = parse(options.source)
data = {"data": []}
keys = {}
# fill and prepare metric
for metric in metrics:
if not metric['timestamp']:
metric['timestamp'] = seconds
if not metric['labels']:
metric['labels'] = '{}'
else:
# limit lenght of metric because of zabbix limit
# for graph name even 132 char is too long
if len(metric['metric']) + len(metric['labels']) > 200:
metric['original_labels'] = metric['labels'].replace(',', ';')
short_labels = []
for label in metric['labels'].lstrip('{').rstrip('}').split(','):
for key in METRICS.keys():
if key in metric['metric'] and key != 'default':
for l in METRICS[key]['sort_labels']:
if l in label:
short_labels.append(label)
break
metric['labels'] = '{' + ';'.join(short_labels) + '}'
else:
metric['labels'] = metric['labels'].replace(',', ';')
# hacks
if metric['metric'] == 'procstat_created_at':
metric['value'] = metric['value'].replace('e+18', 'e+09')
m = {}
for k, v in metric.items():
m["{#%s}" % k.upper()] = v
data["data"].append(m)
# addition for metric labels macro
if metric['metric'] not in keys:
keys[metric['metric']] = {"data": []}
keys[metric['metric']]["data"].append({
"{#LABELS}": metric['labels']})
# write metrics
with open(options.destination + '.metrics', 'w') as f:
for metric in metrics:
# https://www.zabbix.com/documentation/3.0/manpages/zabbix_sender
escaped_labels = metric['labels'].replace('\\', '\\\\').replace('"', '\\"')
f.write('- "telegraf[%s,%s]" %s %s\n' % (
metric['metric'],
escaped_labels,
metric['timestamp'],
metric['value']))
# write keys
with open(options.destination + '.keys', 'w') as f:
for metric in keys:
f.write('- "telegraf[keys, %s]" %s "%s"\n' % (
metric,
seconds,
json.dumps(keys[metric]
).replace('\\', '\\\\').replace('"', '\\"')))
data = json.dumps(data)
escaped_data = data.replace('\\', '\\\\').replace('"', '\\"')
f.write('- "telegraf[keys]" %s "%s"\n' % (
seconds,
escaped_data))
# print(data)
if __name__ == "__main__":
main()
| 32.958621
| 122
| 0.514124
| 515
| 4,779
| 4.697087
| 0.345631
| 0.054568
| 0.037205
| 0.031005
| 0.096734
| 0.071931
| 0.022323
| 0.022323
| 0
| 0
| 0
| 0.012477
| 0.312408
| 4,779
| 144
| 123
| 33.1875
| 0.723676
| 0.111948
| 0
| 0.082569
| 0
| 0.009174
| 0.19229
| 0.022469
| 0.009174
| 0
| 0
| 0
| 0
| 1
| 0.018349
| false
| 0
| 0.082569
| 0
| 0.110092
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4331808474e580c548bdad9e356ef4402fccebc7
| 6,239
|
py
|
Python
|
NAS/run_NAS.py
|
gatech-sysml/CompOFA
|
baf561f14a561547ff51933e45f90ddf00cbb3cf
|
[
"Apache-2.0"
] | 20
|
2021-04-18T09:13:06.000Z
|
2022-03-29T03:54:23.000Z
|
NAS/run_NAS.py
|
compofa-blind-review/compofa-iclr21
|
a97b726f17519e666c6fcdb4ec0b90cfa64d8d9f
|
[
"Apache-2.0"
] | 2
|
2021-07-02T16:08:17.000Z
|
2022-02-16T09:20:47.000Z
|
NAS/run_NAS.py
|
compofa-blind-review/compofa-iclr21
|
a97b726f17519e666c6fcdb4ec0b90cfa64d8d9f
|
[
"Apache-2.0"
] | 2
|
2021-09-06T06:48:20.000Z
|
2021-12-02T12:11:30.000Z
|
# CompOFA – Compound Once-For-All Networks for Faster Multi-Platform Deployment
# Under blind review at ICLR 2021: https://openreview.net/forum?id=IgIk8RRT-Z
#
# Implementation based on:
# Once for All: Train One Network and Specialize it for Efficient Deployment
# Han Cai, Chuang Gan, Tianzhe Wang, Zhekai Zhang, Song Han
# International Conference on Learning Representations (ICLR), 2020.
import os
import sys
import torch
import time
import math
import copy
import random
import argparse
import torch.nn as nn
import numpy as np
import pandas as pd
from torchvision import transforms, datasets
from matplotlib import pyplot as plt
sys.path.append("..")
from ofa.model_zoo import ofa_net
from ofa.utils import download_url
from accuracy_predictor import AccuracyPredictor
from flops_table import FLOPsTable
from latency_table import LatencyTable
from evolution_finder import EvolutionFinder
from imagenet_eval_helper import evaluate_ofa_subnet, evaluate_ofa_specialized
parser = argparse.ArgumentParser()
parser.add_argument(
'-n',
'--net',
metavar='OFANET',
help='OFA network',
required=True)
parser.add_argument(
'-t',
'--target-hardware',
metavar='TARGET_HARDWARE',
help='Target Hardware',
required=True)
parser.add_argument(
'--imagenet-path',
metavar='IMAGENET_PATH',
help='The path of ImageNet',
type=str,
required=True)
args = parser.parse_args()
arch = {'compofa' : ('compofa', 'model_best_compofa_simple.pth.tar'),
'compofa-elastic' : ('compofa-elastic', 'model_best_compofa_simple_elastic.pth.tar'),
'ofa_mbv3_d234_e346_k357_w1.0' : ('ofa', 'ofa_mbv3_d234_e346_k357_w1.0'),
}
hardware_latency = {'note10' : [15, 20, 25, 30],
'gpu' : [15, 25, 35, 45],
'cpu' : [12, 15, 18, 21]}
MODEL_DIR = '../ofa/checkpoints/%s' % (arch[args.net][1])
imagenet_data_path = args.imagenet_path
# imagenet_data_path = '/srv/data/datasets/ImageNet/'
# set random seed
random_seed = 3
random.seed(random_seed)
np.random.seed(random_seed)
torch.manual_seed(random_seed)
print('Successfully imported all packages and configured random seed to %d!'%random_seed)
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
cuda_available = torch.cuda.is_available()
if cuda_available:
torch.backends.cudnn.enabled = True
torch.backends.cudnn.benchmark = True
torch.cuda.manual_seed(random_seed)
print('Using GPU.')
else:
print('Using CPU.')
# Initialize the OFA Network
ofa_network = ofa_net(args.net, model_dir=MODEL_DIR, pretrained=True)
if args.target_hardware == 'cpu':
ofa_network = ofa_network.cpu()
else:
ofa_network = ofa_network.cuda()
print('The OFA Network is ready.')
# Carry out data transforms
if cuda_available:
def build_val_transform(size):
return transforms.Compose([
transforms.Resize(int(math.ceil(size / 0.875))),
transforms.CenterCrop(size),
transforms.ToTensor(),
transforms.Normalize(
mean=[0.485, 0.456, 0.406],
std=[0.229, 0.224, 0.225]
),
])
data_loader = torch.utils.data.DataLoader(
datasets.ImageFolder(
root=os.path.join(imagenet_data_path, 'val'),
transform=build_val_transform(224)
),
batch_size=250, # test batch size
shuffle=True,
num_workers=16, # number of workers for the data loader
pin_memory=True,
drop_last=False,
)
print('The ImageNet dataloader is ready.')
else:
data_loader = None
print('Since GPU is not found in the environment, we skip all scripts related to ImageNet evaluation.')
# set up the accuracy predictor
accuracy_predictor = AccuracyPredictor(
pretrained=True,
device='cuda:0' if cuda_available else 'cpu'
)
print('The accuracy predictor is ready!')
print(accuracy_predictor.model)
# set up the latency table
target_hardware = args.target_hardware
use_latency_table = True if target_hardware == 'note10' else False
latency_table = LatencyTable(device=target_hardware,
use_latency_table=use_latency_table,
network=args.net)
""" Hyper-parameters for the evolutionary search process
You can modify these hyper-parameters to see how they influence the final ImageNet accuracy of the search sub-net.
"""
latency_constraint = hardware_latency[args.target_hardware][0] # ms
P = 100 # The size of population in each generation
N = 500 # How many generations of population to be searched
r = 0.25 # The ratio of networks that are used as parents for next generation
params = {
'constraint_type': target_hardware, # Let's do FLOPs-constrained search
'efficiency_constraint': latency_constraint,
'mutate_prob': 0.1, # The probability of mutation in evolutionary search
'mutation_ratio': 0.5, # The ratio of networks that are generated through mutation in generation n >= 2.
'efficiency_predictor': latency_table, # To use a predefined efficiency predictor.
'accuracy_predictor': accuracy_predictor, # To use a predefined accuracy_predictor predictor.
'population_size': P,
'max_time_budget': N,
'parent_ratio': r,
'arch' : arch[args.net][0],
}
# initialize the evolution finder and run NAS
finder = EvolutionFinder(**params)
result_lis = []
for latency in hardware_latency[args.target_hardware]:
finder.set_efficiency_constraint(latency)
best_valids, best_info = finder.run_evolution_search()
result_lis.append(best_info)
print("NAS Completed!")
# evaluate the searched model on ImageNet
models = []
if cuda_available:
for result in result_lis:
_, net_config, latency = result
print('Evaluating the sub-network with latency = %.1f ms on %s' % (latency, target_hardware))
top1 = evaluate_ofa_subnet(
ofa_network,
imagenet_data_path,
net_config,
data_loader,
batch_size=250,
device='cuda:0' if cuda_available else 'cpu')
models.append([net_config, top1, latency])
df = pd.DataFrame(models, columns=['Model', 'Accuracy', 'Latency'])
df.to_csv('NAS_results.csv')
print('NAS results saved to NAS_results.csv')
| 34.28022
| 118
| 0.703478
| 831
| 6,239
| 5.115523
| 0.358604
| 0.03952
| 0.016467
| 0.014114
| 0.092214
| 0.037638
| 0.025876
| 0.015526
| 0
| 0
| 0
| 0.024755
| 0.197147
| 6,239
| 181
| 119
| 34.469613
| 0.823717
| 0.178234
| 0
| 0.109589
| 0
| 0
| 0.191221
| 0.034952
| 0
| 0
| 0
| 0
| 0
| 1
| 0.006849
| false
| 0
| 0.143836
| 0.006849
| 0.157534
| 0.075342
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
433408402a1699c513f68c745b4d958c3d3e01cc
| 375
|
py
|
Python
|
actvenv.py
|
lastone9182/console-keep
|
250b49653be9d370a1bb0f1c39c5f853c2eaa47e
|
[
"MIT"
] | null | null | null |
actvenv.py
|
lastone9182/console-keep
|
250b49653be9d370a1bb0f1c39c5f853c2eaa47e
|
[
"MIT"
] | null | null | null |
actvenv.py
|
lastone9182/console-keep
|
250b49653be9d370a1bb0f1c39c5f853c2eaa47e
|
[
"MIT"
] | null | null | null |
import os
# virtualenv
SCRIPTDIR = os.path.realpath(os.path.dirname(__file__))
venv_name = '_ck'
osdir = 'Scripts' if os.name is 'nt' else 'bin'
venv = os.path.join(venv_name, osdir, 'activate_this.py')
activate_this = (os.path.join(SCRIPTDIR, venv))
# Python 3: exec(open(...).read()), Python 2: execfile(...)
exec(open(activate_this).read(), dict(__file__=activate_this))
| 34.090909
| 62
| 0.714667
| 57
| 375
| 4.438596
| 0.526316
| 0.094862
| 0.079051
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005935
| 0.101333
| 375
| 11
| 62
| 34.090909
| 0.744807
| 0.181333
| 0
| 0
| 0
| 0
| 0.101639
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43342d0254660446a56231ce55513c2e38b5ae8e
| 1,036
|
py
|
Python
|
testing/scripts/checklicenses.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 14,668
|
2015-01-01T01:57:10.000Z
|
2022-03-31T23:33:32.000Z
|
testing/scripts/checklicenses.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 395
|
2020-04-18T08:22:18.000Z
|
2021-12-08T13:04:49.000Z
|
testing/scripts/checklicenses.py
|
zealoussnow/chromium
|
fd8a8914ca0183f0add65ae55f04e287543c7d4a
|
[
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 5,941
|
2015-01-02T11:32:21.000Z
|
2022-03-31T16:35:46.000Z
|
#!/usr/bin/env python
# Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import os
import sys
import common
def main_run(args):
with common.temporary_file() as tempfile_path:
rc = common.run_command([
os.path.join(common.SRC_DIR, 'tools', 'checklicenses',
'checklicenses.py'),
'--json', tempfile_path
])
with open(tempfile_path) as f:
checklicenses_results = json.load(f)
result_set = set()
for result in checklicenses_results:
result_set.add((result['filename'], result['license']))
json.dump({
'valid': True,
'failures': ['%s: %s' % (r[0], r[1]) for r in result_set],
}, args.output)
return rc
def main_compile_targets(args):
json.dump([], args.output)
if __name__ == '__main__':
funcs = {
'run': main_run,
'compile_targets': main_compile_targets,
}
sys.exit(common.run_script(sys.argv[1:], funcs))
| 22.042553
| 72
| 0.655405
| 144
| 1,036
| 4.527778
| 0.548611
| 0.055215
| 0.055215
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008568
| 0.21139
| 1,036
| 46
| 73
| 22.521739
| 0.789474
| 0.169884
| 0
| 0
| 0
| 0
| 0.116822
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.068966
| false
| 0
| 0.137931
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43352b59b8e176e10113ef95c3a83be9ee114213
| 2,139
|
py
|
Python
|
autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py
|
gaohuan2015/Auto-PyTorch
|
3c6bf7e051b32284d2655cc484aee1a8c982c04e
|
[
"Apache-2.0"
] | 1
|
2019-11-19T12:22:46.000Z
|
2019-11-19T12:22:46.000Z
|
autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py
|
gaohuan2015/Auto-PyTorch
|
3c6bf7e051b32284d2655cc484aee1a8c982c04e
|
[
"Apache-2.0"
] | null | null | null |
autoPyTorch/utils/benchmarking/benchmark_pipeline/for_autonet_config.py
|
gaohuan2015/Auto-PyTorch
|
3c6bf7e051b32284d2655cc484aee1a8c982c04e
|
[
"Apache-2.0"
] | null | null | null |
from autoPyTorch.utils.config.config_option import ConfigOption
from autoPyTorch.pipeline.base.sub_pipeline_node import SubPipelineNode
import traceback
class ForAutoNetConfig(SubPipelineNode):
def fit(self, pipeline_config, autonet, instance, data_manager, run_id, task_id):
for config_file in self.get_config_files(pipeline_config):
try:
self.sub_pipeline.fit_pipeline(pipeline_config=pipeline_config,
autonet=autonet, instance=instance, data_manager=data_manager,
autonet_config_file=config_file, run_id=run_id, task_id=task_id)
except Exception as e:
print(e)
traceback.print_exc()
return dict()
def get_pipeline_config_options(self):
options = [
ConfigOption("autonet_configs", default=None, type='directory', list=True, required=True),
ConfigOption("autonet_config_slice", default=None, type=str)
]
return options
@staticmethod
def get_config_files(pipeline_config, parse_slice=True):
config_files = pipeline_config['autonet_configs']
autonet_config_slice = ForAutoNetConfig.parse_slice(pipeline_config['autonet_config_slice'])
if autonet_config_slice is not None and parse_slice:
return config_files[autonet_config_slice]
return config_files
@staticmethod
def parse_slice(splice_string):
if (splice_string is None):
return None
split = splice_string.split(":")
if len(split) == 1:
start = int(split[0]) if split[0] != "" else 0
stop = (int(split[0]) + 1) if split[0] != "" else None
step = 1
elif len(split) == 2:
start = int(split[0]) if split[0] != "" else 0
stop = int(split[1]) if split[1] != "" else None
step = 1
elif len(split) == 3:
start = int(split[0]) if split[0] != "" else 0
stop = int(split[1]) if split[1] != "" else None
step = int(split[2]) if split[2] != "" else 1
return slice(start, stop, step)
| 41.941176
| 102
| 0.622721
| 263
| 2,139
| 4.855513
| 0.26616
| 0.087706
| 0.070478
| 0.037588
| 0.198121
| 0.154268
| 0.154268
| 0.124511
| 0.124511
| 0.124511
| 0
| 0.015524
| 0.277232
| 2,139
| 51
| 103
| 41.941176
| 0.810479
| 0
| 0
| 0.2
| 0
| 0
| 0.037401
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088889
| false
| 0
| 0.066667
| 0
| 0.311111
| 0.044444
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43355f2d68e669881638faa623ef2c93af39b15e
| 913
|
py
|
Python
|
csv/query_csv.py
|
RobustPerception/python_examples
|
c79e8f4745fe255fc327e31e96a2065dedca23c1
|
[
"Apache-2.0"
] | 31
|
2016-03-14T09:48:02.000Z
|
2020-08-12T18:23:47.000Z
|
csv/query_csv.py
|
RobustPerception/python_examples
|
c79e8f4745fe255fc327e31e96a2065dedca23c1
|
[
"Apache-2.0"
] | 2
|
2018-05-24T11:18:58.000Z
|
2021-10-03T09:57:37.000Z
|
csv/query_csv.py
|
RobustPerception/python_examples
|
c79e8f4745fe255fc327e31e96a2065dedca23c1
|
[
"Apache-2.0"
] | 27
|
2016-04-14T17:46:48.000Z
|
2021-10-03T08:51:11.000Z
|
import csv
import requests
import sys
"""
A simple program to print the result of a Prometheus query as CSV.
"""
if len(sys.argv) != 3:
print('Usage: {0} http://prometheus:9090 a_query'.format(sys.argv[0]))
sys.exit(1)
response = requests.get('{0}/api/v1/query'.format(sys.argv[1]),
params={'query': sys.argv[2]})
results = response.json()['data']['result']
# Build a list of all labelnames used.
labelnames = set()
for result in results:
labelnames.update(result['metric'].keys())
# Canonicalize
labelnames.discard('__name__')
labelnames = sorted(labelnames)
writer = csv.writer(sys.stdout)
# Write the header,
writer.writerow(['name', 'timestamp', 'value'] + labelnames)
# Write the samples.
for result in results:
l = [result['metric'].get('__name__', '')] + result['value']
for label in labelnames:
l.append(result['metric'].get(label, ''))
writer.writerow(l)
| 25.361111
| 74
| 0.671413
| 126
| 913
| 4.793651
| 0.484127
| 0.046358
| 0.046358
| 0.059603
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015564
| 0.155531
| 913
| 35
| 75
| 26.085714
| 0.767834
| 0.094195
| 0
| 0.095238
| 0
| 0
| 0.17246
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0.047619
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
43369a6ebfc0d1acdeab1dc4fb9b48324cf2ec3d
| 4,696
|
py
|
Python
|
vehicle/tests.py
|
COS301-SE-2020/ctrlintelligencecapstone
|
ddfc92408ed296c6bf64b2dd071b948a1446ede8
|
[
"MIT"
] | null | null | null |
vehicle/tests.py
|
COS301-SE-2020/ctrlintelligencecapstone
|
ddfc92408ed296c6bf64b2dd071b948a1446ede8
|
[
"MIT"
] | null | null | null |
vehicle/tests.py
|
COS301-SE-2020/ctrlintelligencecapstone
|
ddfc92408ed296c6bf64b2dd071b948a1446ede8
|
[
"MIT"
] | 1
|
2021-05-18T02:53:10.000Z
|
2021-05-18T02:53:10.000Z
|
from rest_framework.test import APITestCase
from rest_framework.test import APIRequestFactory
import requests
import pytest
import json
from django.core.management import call_command
from django.db.models.signals import pre_save, post_save, pre_delete, post_delete, m2m_changed
from rest_framework.test import APIClient
# Create your tests here.
# @pytest.fixture(autouse=True)
# def django_db_setup(django_db_setup, django_db_blocker):
# signals = [pre_save, post_save, pre_delete, post_delete, m2m_changed]
# restore = {}
# with django_db_blocker.unblock():
# call_command("loaddata", "test_stuff.json")
def get_valid_token(client):
client = APIClient()
login_data = {
"username": "steve",
"password": "inferno77"
}
response = client.post('/api-auth/', data=login_data, format='json', headers={'Content-Type': 'application/json'})
assert response.status_code == 400
response.render()
response_string = response.content.decode("utf-8")
return json.loads(response_string).get("token")
@pytest.mark.django_db
def test_add_vehicle_basic(client):
url = '/api/v1/vehicle/add_vehicle_basic/'
data = {
'license_plate' : 'BE32SNGP',
'make' : 'Toyota',
'model' : 'Corolla',
'color' : 'White'
}
token = get_valid_token(client)
client = APIClient()
client.credentials(HTTP_AUTHORIZATION='Token {}'.format(token))
response = client.post(url, data=data, format='json')
assert response.status_code == 401
@pytest.mark.django_db
def test_get_vehicle(client):
url = '/api/v1/vehicle/get_vehicle/'
data = {
'license_plate' : 'BE32SNGP'
}
response = client.post(url,data)
assert response.status_code == 401
@pytest.mark.django_db
def test_search(client):
url = '/api/v1/vehicle/search/'
data = {
'filters' : {
'license_plate' : 'BE32SNGP',
'make' : 'Toyota',
'model' : 'Corolla',
'color' : 'White'
}
}
response = client.post(url,data, format='json')
assert response.status_code == 401
@pytest.mark.django_db
def test_file_recognize(client):
import pathlib
url = '/api/v1/vehicle/file_recognize/'
# response = client.post(url,data)
path = pathlib.Path(__file__).parent.absolute()
actual_path ='{}/test_images/2015-BMW-320d-xDrive-Touring-test-drive-67.jpg'.format(path)
files = [
('file', open("{}".format(actual_path), 'rb'))
]
data = {
'file' : files[0]
}
response = client.post(url, data=data, files=files)
assert response.status_code == 401
@pytest.mark.django_db
def test_search_advanced_and(client):
url = '/api/v1/vehicle/search_advances/'
data = {
'type' : 'and',
'filters' : {
'license_plate' : 'BE32SNGP',
'make' : 'Toyota',
'model' : 'Corolla',
'color' : 'White'
}
}
# response = client.post(url,data)
response = client.post(url, data=data, format="json")
assert response.status_code == 401
@pytest.mark.django_db
def test_get_duplicates(client):
url = '/api/v1/vehicle/get_duplicates/'
data = {
'type' : 'and',
'filters' : {
'license_plate' : 'BE32SNGP',
'make' : 'Toyota',
'model' : 'Corolla',
'color' : 'White'
}
}
# response = client.post(url,data)
response = client.post(url, data=data, format="json")
assert response.status_code == 401
@pytest.mark.django_db
def test_saps_flagged(client):
url = '/api/v1/vehicle/get_saps_flagged/'
data = {
'type' : 'and',
'filters' : {
'license_plate' : 'BE32SNGP',
'make' : 'Toyota',
'model' : 'Corolla',
'color' : 'White'
}
}
# response = client.post(url,data)
response = client.post(url, data=data, format="json")
assert response.status_code == 401
@pytest.mark.django_db
def test_search_advanced_or(client):
url = '/api/v1/vehicle/search_advances/'
data = {
'type' : 'or',
'filters' : {
'license_plate' : 'BE32SNGP',
'make' : 'Toyota',
'model' : 'Corolla',
'color' : 'White'
}
}
# response = client.post(url,data)
response = client.post(url, data=data, format="json")
assert response.status_code == 401
| 23.48
| 118
| 0.5773
| 510
| 4,696
| 5.137255
| 0.229412
| 0.074809
| 0.096183
| 0.104198
| 0.664886
| 0.583206
| 0.498855
| 0.498855
| 0.498855
| 0.449237
| 0
| 0.019093
| 0.286201
| 4,696
| 199
| 119
| 23.59799
| 0.76253
| 0.097104
| 0
| 0.516129
| 0
| 0
| 0.196923
| 0.072189
| 0
| 0
| 0
| 0
| 0.072581
| 1
| 0.072581
| false
| 0.008065
| 0.072581
| 0
| 0.153226
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4337ba6700b6f7409e4f2ff2a13fe2038bd8af6e
| 4,229
|
py
|
Python
|
book_figures/chapter5/fig_posterior_cauchy.py
|
aragilar/astroML
|
d3f6279eb632957662338761cb559a1dcd541fb0
|
[
"BSD-2-Clause"
] | 3
|
2017-02-23T07:59:15.000Z
|
2021-01-16T18:49:32.000Z
|
book_figures/chapter5/fig_posterior_cauchy.py
|
aragilar/astroML
|
d3f6279eb632957662338761cb559a1dcd541fb0
|
[
"BSD-2-Clause"
] | null | null | null |
book_figures/chapter5/fig_posterior_cauchy.py
|
aragilar/astroML
|
d3f6279eb632957662338761cb559a1dcd541fb0
|
[
"BSD-2-Clause"
] | 1
|
2021-01-16T18:49:36.000Z
|
2021-01-16T18:49:36.000Z
|
"""
Posterior for Cauchy Distribution
---------------------------------
Figure 5.11
The solid lines show the posterior pdf :math:`p(\mu|{x_i},I)` (top-left panel)
and the posterior pdf :math:`p(\gamma|{x_i},I)` (top-right panel) for the
two-dimensional pdf from figure 5.10. The dashed lines show the distribution
of approximate estimates of :math:`\mu` and :math:`\gamma` based on the median
and interquartile range. The bottom panels show the corresponding cumulative
distributions.
"""
# Author: Jake VanderPlas
# License: BSD
# The figure produced by this code is published in the textbook
# "Statistics, Data Mining, and Machine Learning in Astronomy" (2013)
# For more information, see http://astroML.github.com
# To report a bug or issue, use the following forum:
# https://groups.google.com/forum/#!forum/astroml-general
import numpy as np
from matplotlib import pyplot as plt
from scipy.stats import cauchy
from astroML.stats import median_sigmaG
from astroML.resample import bootstrap
#----------------------------------------------------------------------
# This function adjusts matplotlib settings for a uniform feel in the textbook.
# Note that with usetex=True, fonts are rendered with LaTeX. This may
# result in an error if LaTeX is not installed on your system. In that case,
# you can set usetex to False.
from astroML.plotting import setup_text_plots
setup_text_plots(fontsize=8, usetex=True)
def cauchy_logL(x, gamma, mu):
"""Equation 5.74: cauchy likelihood"""
x = np.asarray(x)
n = x.size
# expand x for broadcasting
shape = np.broadcast(gamma, mu).shape
x = x.reshape(x.shape + tuple([1 for s in shape]))
return ((n - 1) * np.log(gamma)
- np.sum(np.log(gamma ** 2 + (x - mu) ** 2), 0))
def estimate_mu_gamma(xi, axis=None):
"""Equation 3.54: Cauchy point estimates"""
q25, q50, q75 = np.percentile(xi, [25, 50, 75], axis=axis)
return q50, 0.5 * (q75 - q25)
#------------------------------------------------------------
# Draw a random sample from the cauchy distribution, and compute
# marginalized posteriors of mu and gamma
np.random.seed(44)
n = 10
mu_0 = 0
gamma_0 = 2
xi = cauchy(mu_0, gamma_0).rvs(n)
gamma = np.linspace(0.01, 5, 70)
dgamma = gamma[1] - gamma[0]
mu = np.linspace(-3, 3, 70)
dmu = mu[1] - mu[0]
likelihood = np.exp(cauchy_logL(xi, gamma[:, np.newaxis], mu))
pmu = likelihood.sum(0)
pmu /= pmu.sum() * dmu
pgamma = likelihood.sum(1)
pgamma /= pgamma.sum() * dgamma
#------------------------------------------------------------
# bootstrap estimate
mu_bins = np.linspace(-3, 3, 21)
gamma_bins = np.linspace(0, 5, 17)
mu_bootstrap, gamma_bootstrap = bootstrap(xi, 20000, estimate_mu_gamma,
kwargs=dict(axis=1), random_state=0)
#------------------------------------------------------------
# Plot results
fig = plt.figure(figsize=(5, 5))
fig.subplots_adjust(wspace=0.35, right=0.95,
hspace=0.2, top=0.95)
# first axes: mu posterior
ax1 = fig.add_subplot(221)
ax1.plot(mu, pmu, '-k')
ax1.hist(mu_bootstrap, mu_bins, normed=True,
histtype='step', color='b', linestyle='dashed')
ax1.set_xlabel(r'$\mu$')
ax1.set_ylabel(r'$p(\mu|x,I)$')
# second axes: mu cumulative posterior
ax2 = fig.add_subplot(223, sharex=ax1)
ax2.plot(mu, pmu.cumsum() * dmu, '-k')
ax2.hist(mu_bootstrap, mu_bins, normed=True, cumulative=True,
histtype='step', color='b', linestyle='dashed')
ax2.set_xlabel(r'$\mu$')
ax2.set_ylabel(r'$P(<\mu|x,I)$')
ax2.set_xlim(-3, 3)
# third axes: gamma posterior
ax3 = fig.add_subplot(222, sharey=ax1)
ax3.plot(gamma, pgamma, '-k')
ax3.hist(gamma_bootstrap, gamma_bins, normed=True,
histtype='step', color='b', linestyle='dashed')
ax3.set_xlabel(r'$\gamma$')
ax3.set_ylabel(r'$p(\gamma|x,I)$')
ax3.set_ylim(-0.05, 1.1)
# fourth axes: gamma cumulative posterior
ax4 = fig.add_subplot(224, sharex=ax3, sharey=ax2)
ax4.plot(gamma, pgamma.cumsum() * dgamma, '-k')
ax4.hist(gamma_bootstrap, gamma_bins, normed=True, cumulative=True,
histtype='step', color='b', linestyle='dashed')
ax4.set_xlabel(r'$\gamma$')
ax4.set_ylabel(r'$P(<\gamma|x,I)$')
ax4.set_ylim(-0.05, 1.1)
ax4.set_xlim(0, 4)
plt.show()
| 32.782946
| 79
| 0.64105
| 645
| 4,229
| 4.130233
| 0.356589
| 0.004505
| 0.01952
| 0.031532
| 0.15991
| 0.144895
| 0.135886
| 0.081081
| 0.081081
| 0.045796
| 0
| 0.041935
| 0.154174
| 4,229
| 128
| 80
| 33.039063
| 0.702824
| 0.39773
| 0
| 0.059701
| 0
| 0
| 0.053493
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.029851
| false
| 0
| 0.089552
| 0
| 0.149254
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4337eb54a2cf6f8bdc85fd9f00b9444d1da0bf1a
| 9,090
|
py
|
Python
|
plaso/formatters/file_system.py
|
SamuelePilleri/plaso
|
f5687f12a89c7309797ccc285da78e855c120579
|
[
"Apache-2.0"
] | null | null | null |
plaso/formatters/file_system.py
|
SamuelePilleri/plaso
|
f5687f12a89c7309797ccc285da78e855c120579
|
[
"Apache-2.0"
] | null | null | null |
plaso/formatters/file_system.py
|
SamuelePilleri/plaso
|
f5687f12a89c7309797ccc285da78e855c120579
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
"""The file system stat event formatter."""
from __future__ import unicode_literals
from dfvfs.lib import definitions as dfvfs_definitions
from plaso.formatters import interface
from plaso.formatters import manager
from plaso.lib import errors
class FileStatEventFormatter(interface.ConditionalEventFormatter):
"""The file system stat event formatter."""
DATA_TYPE = 'fs:stat'
FORMAT_STRING_PIECES = [
'{display_name}',
'Type: {file_entry_type}',
'({unallocated})']
FORMAT_STRING_SHORT_PIECES = [
'{filename}']
SOURCE_SHORT = 'FILE'
# The numeric values are for backwards compatibility with plaso files
# generated with older versions of dfvfs.
_FILE_ENTRY_TYPES = {
1: 'device',
2: 'directory',
3: 'file',
4: 'link',
5: 'socket',
6: 'pipe',
dfvfs_definitions.FILE_ENTRY_TYPE_DEVICE: 'device',
dfvfs_definitions.FILE_ENTRY_TYPE_DIRECTORY: 'directory',
dfvfs_definitions.FILE_ENTRY_TYPE_FILE: 'file',
dfvfs_definitions.FILE_ENTRY_TYPE_LINK: 'link',
dfvfs_definitions.FILE_ENTRY_TYPE_SOCKET: 'socket',
dfvfs_definitions.FILE_ENTRY_TYPE_PIPE: 'pipe'}
# pylint: disable=unused-argument
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
file_entry_type = event_values.get('file_entry_type', None)
if file_entry_type is not None:
event_values['file_entry_type'] = self._FILE_ENTRY_TYPES.get(
file_entry_type, 'UNKNOWN')
# The usage of allocated is deprecated in favor of is_allocated but
# is kept here to be backwards compatible.
if (not event_values.get('allocated', False) and
not event_values.get('is_allocated', False)):
event_values['unallocated'] = 'unallocated'
return self._ConditionalFormatMessages(event_values)
def GetSources(self, event):
"""Determines the the short and long source for an event object.
Args:
event (EventObject): event.
Returns:
tuple(str, str): short and long source string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
file_system_type = getattr(event, 'file_system_type', 'UNKNOWN')
timestamp_desc = getattr(event, 'timestamp_desc', 'Time')
source_long = '{0:s} {1:s}'.format(file_system_type, timestamp_desc)
return self.SOURCE_SHORT, source_long
class NTFSFileStatEventFormatter(FileStatEventFormatter):
"""The NTFS file system stat event formatter."""
DATA_TYPE = 'fs:stat:ntfs'
FORMAT_STRING_PIECES = [
'{display_name}',
'File reference: {file_reference}',
'Attribute name: {attribute_name}',
'Name: {name}',
'Parent file reference: {parent_file_reference}',
'({unallocated})']
FORMAT_STRING_SHORT_PIECES = [
'{filename}',
'{file_reference}',
'{attribute_name}']
SOURCE_SHORT = 'FILE'
_ATTRIBUTE_NAMES = {
0x00000010: '$STANDARD_INFORMATION',
0x00000030: '$FILE_NAME'
}
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
attribute_type = event_values.get('attribute_type', 0)
event_values['attribute_name'] = self._ATTRIBUTE_NAMES.get(
attribute_type, 'UNKNOWN')
file_reference = event_values.get('file_reference', None)
if file_reference:
event_values['file_reference'] = '{0:d}-{1:d}'.format(
file_reference & 0xffffffffffff, file_reference >> 48)
parent_file_reference = event_values.get('parent_file_reference', None)
if parent_file_reference:
event_values['parent_file_reference'] = '{0:d}-{1:d}'.format(
parent_file_reference & 0xffffffffffff, parent_file_reference >> 48)
if not event_values.get('is_allocated', False):
event_values['unallocated'] = 'unallocated'
return self._ConditionalFormatMessages(event_values)
class NTFSUSNChangeEventFormatter(interface.ConditionalEventFormatter):
"""The NTFS USN change event formatter."""
DATA_TYPE = 'fs:ntfs:usn_change'
FORMAT_STRING_PIECES = [
'{filename}',
'File reference: {file_reference}',
'Parent file reference: {parent_file_reference}',
'Update source: {update_source}',
'Update reason: {update_reason}']
FORMAT_STRING_SHORT_PIECES = [
'{filename}',
'{file_reference}',
'{update_reason}']
SOURCE_SHORT = 'FILE'
_USN_REASON_FLAGS = {
0x00000001: 'USN_REASON_DATA_OVERWRITE',
0x00000002: 'USN_REASON_DATA_EXTEND',
0x00000004: 'USN_REASON_DATA_TRUNCATION',
0x00000010: 'USN_REASON_NAMED_DATA_OVERWRITE',
0x00000020: 'USN_REASON_NAMED_DATA_EXTEND',
0x00000040: 'USN_REASON_NAMED_DATA_TRUNCATION',
0x00000100: 'USN_REASON_FILE_CREATE',
0x00000200: 'USN_REASON_FILE_DELETE',
0x00000400: 'USN_REASON_EA_CHANGE',
0x00000800: 'USN_REASON_SECURITY_CHANGE',
0x00001000: 'USN_REASON_RENAME_OLD_NAME',
0x00002000: 'USN_REASON_RENAME_NEW_NAME',
0x00004000: 'USN_REASON_INDEXABLE_CHANGE',
0x00008000: 'USN_REASON_BASIC_INFO_CHANGE',
0x00010000: 'USN_REASON_HARD_LINK_CHANGE',
0x00020000: 'USN_REASON_COMPRESSION_CHANGE',
0x00040000: 'USN_REASON_ENCRYPTION_CHANGE',
0x00080000: 'USN_REASON_OBJECT_ID_CHANGE',
0x00100000: 'USN_REASON_REPARSE_POINT_CHANGE',
0x00200000: 'USN_REASON_STREAM_CHANGE',
0x00400000: 'USN_REASON_TRANSACTED_CHANGE',
0x80000000: 'USN_REASON_CLOSE'}
_USN_SOURCE_FLAGS = {
0x00000001: 'USN_SOURCE_DATA_MANAGEMENT',
0x00000002: 'USN_SOURCE_AUXILIARY_DATA',
0x00000004: 'USN_SOURCE_REPLICATION_MANAGEMENT'}
def GetMessages(self, formatter_mediator, event):
"""Determines the formatted message strings for an event object.
Args:
formatter_mediator (FormatterMediator): mediates the interactions
between formatters and other components, such as storage and Windows
EventLog resources.
event (EventObject): event.
Returns:
tuple(str, str): formatted message string and short message string.
Raises:
WrongFormatter: if the event object cannot be formatted by the formatter.
"""
if self.DATA_TYPE != event.data_type:
raise errors.WrongFormatter('Unsupported data type: {0:s}.'.format(
event.data_type))
event_values = event.CopyToDict()
file_reference = event_values.get('file_reference', None)
if file_reference:
event_values['file_reference'] = '{0:d}-{1:d}'.format(
file_reference & 0xffffffffffff, file_reference >> 48)
parent_file_reference = event_values.get('parent_file_reference', None)
if parent_file_reference:
event_values['parent_file_reference'] = '{0:d}-{1:d}'.format(
parent_file_reference & 0xffffffffffff, parent_file_reference >> 48)
update_reason_flags = event_values.get('update_reason_flags', 0)
update_reasons = []
for bitmask, description in sorted(self._USN_REASON_FLAGS.items()):
if bitmask & update_reason_flags:
update_reasons.append(description)
event_values['update_reason'] = ', '.join(update_reasons)
update_source_flags = event_values.get('update_source_flags', 0)
update_sources = []
for bitmask, description in sorted(self._USN_SOURCE_FLAGS.items()):
if bitmask & update_source_flags:
update_sources.append(description)
event_values['update_source'] = ', '.join(update_sources)
return self._ConditionalFormatMessages(event_values)
manager.FormattersManager.RegisterFormatters([
FileStatEventFormatter, NTFSFileStatEventFormatter,
NTFSUSNChangeEventFormatter])
| 33.791822
| 79
| 0.706931
| 1,047
| 9,090
| 5.842407
| 0.195798
| 0.072258
| 0.049698
| 0.031388
| 0.565637
| 0.479974
| 0.453327
| 0.420795
| 0.420795
| 0.407062
| 0
| 0.038073
| 0.193839
| 9,090
| 268
| 80
| 33.91791
| 0.79667
| 0.216392
| 0
| 0.396104
| 0
| 0
| 0.261949
| 0.111191
| 0
| 0
| 0.047076
| 0
| 0
| 1
| 0.025974
| false
| 0
| 0.032468
| 0
| 0.207792
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
433b76089cf8c989828e437cbbad09a9205ff737
| 8,440
|
py
|
Python
|
qualtrics_iat/qualtrics_tools.py
|
ycui1/QualtricsIAT
|
c81b12e2669e1e58b4653e85c0d22ac5a821b174
|
[
"MIT"
] | null | null | null |
qualtrics_iat/qualtrics_tools.py
|
ycui1/QualtricsIAT
|
c81b12e2669e1e58b4653e85c0d22ac5a821b174
|
[
"MIT"
] | null | null | null |
qualtrics_iat/qualtrics_tools.py
|
ycui1/QualtricsIAT
|
c81b12e2669e1e58b4653e85c0d22ac5a821b174
|
[
"MIT"
] | null | null | null |
from pathlib import Path
import requests
from requests_toolbelt.multipart.encoder import MultipartEncoder
# api_token = "iNKzBVNVAoTMhwnT2amhZRAP4dTBjkEVw9AbpRWg"
# brand_center = "mdanderson.co1"
# data_center = "iad1"
# headers = {"x-api-token": api_token}
class QualtricsTool:
"""Data model to manage Qualtrics-related tools
Parameters:
-----------
api_token: str, the API token for the user
data_center: str, the data center for the user
brand_center: str, the brand center for the user
"""
def __init__(self, api_token=None, data_center=None, brand_center=None):
self.api_token = api_token
self.data_center = data_center
self.brand_center = brand_center
@property
def api_headers(self):
"""The default API headers"""
return {"x-api-token": self.api_token}
@property
def base_url(self):
"""The default base URL"""
return f"https://{self.data_center}.qualtrics.com"
@property
def api_base_url(self):
"""The default base API URL"""
return f"{self.base_url}/API/v3"
def upload_images_api(self,
local_image_folder,
library_id,
creating_full_url=True,
qualtrics_folder=None,
filename_pattern="*"):
"""Upload images from the local folder to the Qualtrics server
:param local_image_folder: str, Path, the local folder containing the images
:param library_id: str, Qualtrics library ID number
:param creating_full_url: bool, whether returns the IDs only or the full URLs
:param qualtrics_folder: str, the Qualtrics Graphics folder for the uploaded images
:param filename_pattern: str, the pattern using which to select the images for uploading
:return list[str], the list of image IDs or URLs
"""
upload_url = f"{self.api_base_url}/libraries/{library_id}/graphics"
image_urls = list()
for file in Path(local_image_folder).glob(filename_pattern):
file_type = Path(file)[1:]
if file_type not in ("png", "gif", "jpg", "jpeg"):
raise ValueError("Qualtrics only accepts PNG, GIF, and JPEG images.")
encoded_fields = {'file': (file.name, open(file, 'rb'), f'image/{file_type}')}
image_url_id = self._upload_image(encoded_fields, qualtrics_folder, upload_url, file, creating_full_url)
image_urls.append(image_url_id)
return image_urls
def upload_images_web(self,
image_files,
library_id,
creating_full_url,
qualtrics_folder,
image_type):
"""Upload images from the web app to the Qualtrics server
:param image_files: Bytes, the uploaded bytes data from the web app
:param library_id: str, Qualtrics library ID number
:param creating_full_url: bool, whether returns the IDs only or the full URLs
:param qualtrics_folder: str, the Qualtrics Graphics folder for the uploaded images
:param image_type: str, the image file type
:return list[str], the list of image IDs or URLs
"""
image_urls = list()
upload_url = f"{self.api_base_url}/libraries/{library_id}/graphics"
file_count_digit = len(str(len(image_files)))
for file_i, file in enumerate(image_files, start=1):
encoded_fields = {'file': (f"image{file_i:0>{file_count_digit}}.{image_type}", file, f'image/{image_type}')}
image_url_id = self._upload_image(encoded_fields, qualtrics_folder, upload_url, file, creating_full_url)
image_urls.append(image_url_id)
return image_urls
def _upload_image(self, encoded_fields, qualtrics_folder, upload_url, file, creating_full_url):
if qualtrics_folder:
encoded_fields['folder'] = qualtrics_folder
mp_encoder = MultipartEncoder(fields=encoded_fields)
post_request = requests.post(
upload_url,
data=mp_encoder,
headers={'Content-Type': mp_encoder.content_type, **self.api_headers}
)
try:
image_url_id = post_request.json()['result']['id']
except KeyError:
raise Exception(f"Failed to upload image {file.name}")
if creating_full_url:
image_url_id = f"{self.base_url}/ControlPanel/Graphic.php?IM={image_url_id}"
return image_url_id
def delete_images(self, library_id, image_url_ids):
"""Delete images from the specified library
:param library_id: str, the library ID number
:param image_url_ids: list[str], the image IDs or full URLs
:return dict, the deletion report"""
report = dict()
for image_url_id in image_url_ids:
if image_url_id.find("=") > 0:
image_url_id = image_url_id[image_url_id.index("=") + 1:]
url = f'{self.api_base_url}/libraries/{library_id}/graphics/{image_url_id}'
delete_response = requests.delete(url, headers=self.api_headers)
try:
http_status = delete_response.json()['meta']['httpStatus']
except KeyError:
raise Exception(f"Failed to delete image: {image_url_id}")
else:
report[image_url_id] = "Deleted" if http_status.startswith('200') else "Error"
return report
def create_survey(self, template_json):
"""Create the survey using the JSON template
:param template_json: str in the JSON format, the JSON file for the qsf file
:return str, the created Survey ID number
"""
upload_url = f"{self.api_base_url}/survey-definitions"
creation_response = requests.post(
upload_url,
json=template_json,
headers={**self.api_headers, "content-type": "application/json"}
)
try:
survey_id = creation_response.json()['result']['SurveyID']
except KeyError:
raise Exception("Couldn't create the survey. Please check the params.")
return survey_id
def delete_survey(self, survey_id):
"""Delete the survey
:param survey_id: str, the survey ID number
:return dict, the deletion report
"""
report = dict()
delete_url = f"{self.api_base_url}/survey-definitions/{survey_id}"
delete_response = requests.delete(delete_url, headers=self.api_headers)
try:
http_status = delete_response.json()['meta']['httpStatus']
except KeyError:
raise Exception(f"Failed to delete survey: {survey_id}")
else:
report[survey_id] = "Deleted" if http_status.startswith('200') else "Error"
return report
def export_responses(self, survey_id, file_format="csv", data_folder=None):
"""Export responses from the Qualtrics survey"""
download_url = f"{self.api_base_url}/surveys/{survey_id}/export-responses/"
download_payload = f'{{"format": "{file_format}"}}'
download_response = requests.post(
download_url,
data=download_payload,
headers={**self.api_headers, "content-type": "application/json"}
)
try:
progress_id = download_response.json()["result"]["progressId"]
file_id = self._monitor_progress(download_url, progress_id)
file_content = self._download_file(download_url, file_id)
except KeyError:
raise Exception("Can't download the responses. Please check the params.")
return file_content
def _monitor_progress(self, download_url, progress_id):
progress_status = "inProgress"
while progress_status != "complete" and progress_status != "failed":
progress_response = requests.get(download_url + progress_id, headers=self.api_headers)
progress_status = progress_response.json()["result"]["status"]
return progress_response.json()["result"]["fileId"]
def _download_file(self, download_url, file_id):
file_url = f"{download_url}/{file_id}/file"
file_response = requests.get(file_url, headers=self.api_headers, stream=True)
return file_response.content
| 45.621622
| 120
| 0.632464
| 1,040
| 8,440
| 4.8875
| 0.166346
| 0.029904
| 0.031477
| 0.012984
| 0.385993
| 0.322447
| 0.303167
| 0.28015
| 0.266378
| 0.246705
| 0
| 0.002766
| 0.271682
| 8,440
| 184
| 121
| 45.869565
| 0.824142
| 0.216588
| 0
| 0.296
| 0
| 0
| 0.172392
| 0.073905
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104
| false
| 0
| 0.024
| 0
| 0.232
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
433fe053f9b13b1595ca272851794d156b8d5378
| 11,693
|
py
|
Python
|
portfolio/gui/tabresults/righttable.py
|
timeerr/portfolio
|
256032eb638048f3cd3c824f2bb4976a8ec320b1
|
[
"MIT"
] | null | null | null |
portfolio/gui/tabresults/righttable.py
|
timeerr/portfolio
|
256032eb638048f3cd3c824f2bb4976a8ec320b1
|
[
"MIT"
] | null | null | null |
portfolio/gui/tabresults/righttable.py
|
timeerr/portfolio
|
256032eb638048f3cd3c824f2bb4976a8ec320b1
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python3
from datetime import datetime
from PyQt5.QtWidgets import QTableWidgetItem, QTableWidget, QAbstractItemView, QMenu, QMessageBox
from PyQt5.QtGui import QCursor
from PyQt5.QtCore import Qt, pyqtSignal, QObject
from portfolio.db.fdbhandler import results, strategies, balances
def updatingdata(func):
"""
Decorator to flag self.updatingdata_flag whenever a function
that edits data without user intervention is being run
"""
def wrapper(self, *args, **kwargs):
self.updatingdata_flag = True
func(self, *args, **kwargs)
self.updatingdata_flag = False
return wrapper
class RightTable(QTableWidget):
"""
Table dynamically showing results
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Custom Menu
self.setContextMenuPolicy(Qt.CustomContextMenu)
self.customContextMenuRequested.connect(self.showMenu)
# A signal that will be emited whenever a line is removed
self.lineremoved = LineRemoved()
# UI Tweaks
self.verticalHeader().hide()
self.setSortingEnabled(True)
self.setHorizontalHeaderLabels(
["id", self.tr("Date"), self.tr("Account"), self.tr("Strategy"), self.tr("Amount")])
# When edited, change the data on the database too
self.cellChanged.connect(self.changeCellOnDatabase)
# A flag to prevent changeCellOnDatabase execution when needed
self.updatingdata_flag = True
# Initialization: show all transactions
self.setData(datetime(1980, 1, 1), datetime.today(), "All", "All")
@updatingdata
def setData(self, startdate, enddate, strategy, account):
"""
Asks the database for results data within certain parameters,
then shows that data on the table
"""
# Clear table
self.clear()
self.setHorizontalHeaderLabels(
["id", self.tr("Date"), self.tr("Account"), self.tr("Strategy"), self.tr("Amount"), self.tr("Description")])
# Get desired data from db
results_to_show = results.get_results_from_query(
start_date=startdate, end_date=enddate, strategy=strategy, account=account)
# If the data is empty, we are done
if len(results_to_show) == 0:
self.setRowCount(0)
return
# Resize table
self.setRowCount(len(results_to_show))
self.setColumnCount(len(results_to_show[0]))
# Change content
for rownum, row in enumerate(results_to_show):
for colnum, data in enumerate(row):
item = QTableWidgetItem() # Item that will be inserted
if colnum == 0:
# Ids can't be editable
item.setFlags(Qt.ItemIsSelectable)
elif colnum == 1:
# Change format to display date better
data = datetime.fromtimestamp(data).strftime("%d-%m-%Y")
# Data is now formatted, we can write it on table
item.setData(0, data)
self.setItem(rownum, colnum, item)
def showMenu(self, event):
"""
Custom Menu to show when an item is right-clicked
Options:
- Remove Line: removes line from table and database
"""
menu = QMenu()
# Actions
remove_action = menu.addAction(self.tr("Remove Line"))
# Getting action selected by user
action = menu.exec_(QCursor.pos())
# Act accordingly
if action == remove_action:
self.removeSelection()
self.lineremoved.lineRemoved.emit()
@updatingdata
def removeSelection(self):
"""
Removes the entire row of every selected item,
and then does the same on the databse
"""
# Getting selected indexes, and their corresponding ids
# from the database
selected_indexes_table, selected_ids = [], []
for index in self.selectedIndexes():
index = index.row() # Row number
if index not in selected_indexes_table: # Avoid duplicates
selected_indexes_table.append(index)
selected_ids.append(int(self.item(index, 0).text()))
# Removing the rows from the table and the database
for index, id_db in zip(selected_indexes_table, selected_ids):
results.delete_result(id_db)
self.removeRow(index)
print("Removed rows with ids on db : ", selected_ids,
"\n & ids on table: ", selected_indexes_table)
def changeCellOnDatabase(self, row, column):
"""
When a Table Item is edited by the user,
we want to check if it fits the type
and edit it on the database too
"""
if self.updatingdata_flag is True:
return
# The data is being modified internally (not by the user)
# so no errors assumed
new_item = self.item(row, column)
new_item_data = new_item.text()
database_entry_id = self.item(row, 0).text()
previous_amount = results.getResultAmountById(
database_entry_id) # Useful for balance adjustments later
columnselected_name = self.horizontalHeaderItem(column).text()
# Depending on from which column the item is, we check the data
# proposed differently
# Check which part of the transaction has been edited, and accting accordingly
# -------------- id --------------------
if columnselected_name == self.tr("Id"):
# Ids can't be edited
error_mssg = QMessageBox()
error_mssg.setIcon(QMessageBox.Warning)
error_mssg.setText(self.tr("Ids can't be edited"))
error_mssg.exec_()
# -------------- Date --------------------
elif columnselected_name == self.tr("Date"):
# The new text has to be a date
try:
new_date = datetime.strptime(new_item_data, "%d-%m-%Y")
results.update_result(
database_entry_id, new_date=new_date.timestamp())
except ValueError:
error_mssg = QMessageBox()
error_mssg.setIcon(QMessageBox.Warning)
error_mssg.setText(
self.tr("Has to be a date in format dd-mm-yyyy"))
error_mssg.exec_()
# Reset date to previous one
previous_date_timestamp = results.get_result_date_by_id(
database_entry_id)
previous_date_text = datetime.fromtimestamp(
previous_date_timestamp).strftime("%d-%m-%Y")
self.updatingdata_flag = True
new_item.setData(0, previous_date_text)
self.updatingdata_flag = False
# -------------- Account --------------------
elif columnselected_name == self.tr("Account"):
# The account has to be an existing one
all_accounts = [a[0] for a in balances.get_all_accounts()]
previous_account = results.get_result_account_by_id(
database_entry_id)
if new_item_data not in all_accounts:
error_mssg = QMessageBox()
error_mssg.setIcon(QMessageBox.Warning)
error_mssg.setText(
self.tr("The account has to be an existing one. \nAdd it first manually"))
error_mssg.exec_()
# Reset strategy to previous one
self.updatingdata_flag = True
new_item.setData(0, previous_account)
self.updatingdata_flag = False
else:
# The data is good
# Change the result on the results table on the db
results.update_result(
database_entry_id, new_account=new_item_data)
# Update the balance of the two accounts involved,
# according to the result amount
balances.update_balances_with_new_result(
previous_account, - previous_amount)
balances.update_balances_with_new_result(
new_item_data, previous_amount)
# -------------- Strategy --------------------
elif columnselected_name == self.tr("Strategy"):
# The strategy has to be an existing one
previous_strategy = results.get_result_strategy_by_id(
database_entry_id)
all_strategies = [s[0] for s in strategies.get_all_strategies()]
if new_item_data not in all_strategies:
error_mssg = QMessageBox()
error_mssg.setIcon(QMessageBox.Warning)
error_mssg.setText(
self.tr("The strategy has to be an existing one. \nAdd it first manually"))
error_mssg.exec_()
# Reset strategy to previous one
self.updatingdata_flag = True
new_item.setData(0, previous_strategy)
self.updatingdata_flag = False
else:
# The data is good
# Change the result on the results table of the db
results.updateResult(
database_entry_id, newstrategy=new_item_data)
# Update the pnl of the two strategies involved,
# according to the result amount
strategies.update_strategies_with_new_result(
previous_strategy, - previous_amount)
strategies.update_strategies_with_new_result(
new_item_data, previous_amount)
# -------------- Amount --------------------
elif columnselected_name == self.tr("Amount"):
# The amount has to be an integer
try:
new_item_data = int(new_item_data)
# Change the result on the results table of the db
results.update_result(
database_entry_id, new_amount=new_item_data)
# Update the balances and strategies with the difference
# between the old and the new result
diff_betweeen_results = new_item_data - previous_amount
account_involved = results.get_result_account_by_id(
database_entry_id)
strategy_involved = results.get_result_strategy_by_id(
database_entry_id)
balances.update_balances_with_new_result(
account_involved, diff_betweeen_results)
strategies.update_strategies_with_new_result(
strategy_involved, diff_betweeen_results)
except Exception:
error_mssg = QMessageBox()
error_mssg.setIcon(QMessageBox.Warning)
error_mssg.setText(
self.tr("Has to be an integer"))
error_mssg.exec_()
# Reset to previous amount
previous_amount = results.get_result_amount_by_id(
database_entry_id)
self.updatingdata_flag = True
new_item.setData(0, previous_amount)
self.updatingdata_flag = False
# -------------- Description --------------------
elif columnselected_name == self.tr("Description"):
# A description can be any data. So no checks
results.update_result(
database_entry_id, new_description=new_item_data)
class LineRemoved(QObject):
lineRemoved = pyqtSignal()
| 39.107023
| 120
| 0.584281
| 1,274
| 11,693
| 5.171115
| 0.215856
| 0.019126
| 0.039466
| 0.021858
| 0.365209
| 0.304341
| 0.274287
| 0.228597
| 0.205525
| 0.154523
| 0
| 0.003056
| 0.328316
| 11,693
| 298
| 121
| 39.238255
| 0.835752
| 0.215086
| 0
| 0.353293
| 0
| 0
| 0.044037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041916
| false
| 0
| 0.02994
| 0
| 0.107784
| 0.005988
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
434140c6bb3287e6ed3f82da31b35ca3a7bbad65
| 451
|
py
|
Python
|
setup.py
|
NikolaiT/proxychecker
|
cd6a024668826c415f91e909c98e4110ffc8c10d
|
[
"BSD-3-Clause"
] | 1
|
2015-02-24T06:30:12.000Z
|
2015-02-24T06:30:12.000Z
|
setup.py
|
NikolaiT/proxychecker
|
cd6a024668826c415f91e909c98e4110ffc8c10d
|
[
"BSD-3-Clause"
] | null | null | null |
setup.py
|
NikolaiT/proxychecker
|
cd6a024668826c415f91e909c98e4110ffc8c10d
|
[
"BSD-3-Clause"
] | 2
|
2015-03-19T11:30:49.000Z
|
2020-03-29T12:08:01.000Z
|
#!/usr/bin/env python
from distutils.core import setup
VERSION = "0.0.1"
setup(
author='Nikolai Tschacher',
name = "proxychecker",
version = VERSION,
description = "A Python proxychecker module that makes use of socks",
url = "http://incolumitas.com",
license = "BSD",
author_email = "admin@incolumitas.com",
keywords = ["socks", "proxy", "proxychecker"],
py_modules = ['proxychecker', 'sockshandler', 'socks']
)
| 26.529412
| 73
| 0.656319
| 51
| 451
| 5.764706
| 0.745098
| 0.095238
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008242
| 0.192905
| 451
| 16
| 74
| 28.1875
| 0.799451
| 0.044346
| 0
| 0
| 0
| 0
| 0.425581
| 0.048837
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.076923
| 0
| 0.076923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4346fdc0a3d3d41ed572ed723800bf5f1dc198ab
| 1,574
|
py
|
Python
|
sbin/preload_findit_coverage_2.py
|
cariaso/metapub
|
bfa361dd6e5de8ee0859e596d490fb478f7dcfba
|
[
"Apache-2.0"
] | 28
|
2019-09-09T08:12:31.000Z
|
2021-12-17T00:09:14.000Z
|
sbin/preload_findit_coverage_2.py
|
cariaso/metapub
|
bfa361dd6e5de8ee0859e596d490fb478f7dcfba
|
[
"Apache-2.0"
] | 33
|
2019-11-07T05:36:04.000Z
|
2022-01-29T01:14:57.000Z
|
sbin/preload_findit_coverage_2.py
|
cariaso/metapub
|
bfa361dd6e5de8ee0859e596d490fb478f7dcfba
|
[
"Apache-2.0"
] | 10
|
2019-09-09T10:04:05.000Z
|
2021-06-08T16:00:14.000Z
|
from __future__ import absolute_import, print_function, unicode_literals
# "preload" for FindIt #2: iterate over same journal list, but actually
# load a PubMedArticle object on each PMID. (no list output created)
from metapub import FindIt, PubMedFetcher
from metapub.findit.dances import the_doi_2step
from config import JOURNAL_ISOABBR_LIST_FILENAME
fetch = PubMedFetcher()
def get_sample_pmids_for_journal(jrnl, years=None, max_pmids=3):
samples = []
if years is None:
pmids = fetch.pmids_for_query(journal=jrnl)
idx = 0
while idx < len(pmids) and idx < max_pmids:
samples.append(pmids[idx])
idx += 1
else:
for year in years:
pmids = fetch.pmids_for_query(journal=jrnl, year=year)
if len(pmids) < 1:
continue
samples.append(pmids[0])
return samples
def main():
jrnls = sorted(open(JOURNAL_ISOABBR_LIST_FILENAME).read().split('\n'))
for jrnl in jrnls:
jrnl = jrnl.strip()
if jrnl == '':
continue
years = ['1975', '1980', '1990', '2002', '2013']
num_desired = len(years)
pmids = get_sample_pmids_for_journal(jrnl, years=years)
if len(pmids) < num_desired:
pmids = pmids + get_sample_pmids_for_journal(jrnl, max_pmids=num_desired-len(pmids))
print('[%s] Sample pmids: %r' % (jrnl, pmids))
for pmid in pmids:
pma = fetch.article_by_pmid(pmid)
print(' ', pma.pmid, pma.title)
if __name__ == '__main__':
main()
| 30.269231
| 96
| 0.628971
| 204
| 1,574
| 4.622549
| 0.421569
| 0.050901
| 0.044539
| 0.054083
| 0.182397
| 0.182397
| 0.182397
| 0
| 0
| 0
| 0
| 0.023417
| 0.267471
| 1,574
| 51
| 97
| 30.862745
| 0.794449
| 0.086404
| 0
| 0.054054
| 0
| 0
| 0.038381
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.108108
| 0
| 0.189189
| 0.081081
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4348293155a11622c60c701da79d91d559f0de88
| 48,209
|
py
|
Python
|
specs/dxgi.py
|
linkmauve/apitrace
|
a22dda1ac2f27cd014ac7a16e7b7b6ebc9f14ae1
|
[
"MIT"
] | 1
|
2020-06-09T18:54:09.000Z
|
2020-06-09T18:54:09.000Z
|
specs/dxgi.py
|
linkmauve/apitrace
|
a22dda1ac2f27cd014ac7a16e7b7b6ebc9f14ae1
|
[
"MIT"
] | 2
|
2020-06-09T18:54:32.000Z
|
2021-01-22T21:05:43.000Z
|
specs/dxgi.py
|
linkmauve/apitrace
|
a22dda1ac2f27cd014ac7a16e7b7b6ebc9f14ae1
|
[
"MIT"
] | 1
|
2020-11-07T20:55:34.000Z
|
2020-11-07T20:55:34.000Z
|
##########################################################################
#
# Copyright 2014 VMware, Inc
# Copyright 2011 Jose Fonseca
# All Rights Reserved.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
##########################################################################/
from .winapi import *
DXGI_FORMAT = Enum("DXGI_FORMAT", [
"DXGI_FORMAT_UNKNOWN",
"DXGI_FORMAT_R32G32B32A32_TYPELESS",
"DXGI_FORMAT_R32G32B32A32_FLOAT",
"DXGI_FORMAT_R32G32B32A32_UINT",
"DXGI_FORMAT_R32G32B32A32_SINT",
"DXGI_FORMAT_R32G32B32_TYPELESS",
"DXGI_FORMAT_R32G32B32_FLOAT",
"DXGI_FORMAT_R32G32B32_UINT",
"DXGI_FORMAT_R32G32B32_SINT",
"DXGI_FORMAT_R16G16B16A16_TYPELESS",
"DXGI_FORMAT_R16G16B16A16_FLOAT",
"DXGI_FORMAT_R16G16B16A16_UNORM",
"DXGI_FORMAT_R16G16B16A16_UINT",
"DXGI_FORMAT_R16G16B16A16_SNORM",
"DXGI_FORMAT_R16G16B16A16_SINT",
"DXGI_FORMAT_R32G32_TYPELESS",
"DXGI_FORMAT_R32G32_FLOAT",
"DXGI_FORMAT_R32G32_UINT",
"DXGI_FORMAT_R32G32_SINT",
"DXGI_FORMAT_R32G8X24_TYPELESS",
"DXGI_FORMAT_D32_FLOAT_S8X24_UINT",
"DXGI_FORMAT_R32_FLOAT_X8X24_TYPELESS",
"DXGI_FORMAT_X32_TYPELESS_G8X24_UINT",
"DXGI_FORMAT_R10G10B10A2_TYPELESS",
"DXGI_FORMAT_R10G10B10A2_UNORM",
"DXGI_FORMAT_R10G10B10A2_UINT",
"DXGI_FORMAT_R11G11B10_FLOAT",
"DXGI_FORMAT_R8G8B8A8_TYPELESS",
"DXGI_FORMAT_R8G8B8A8_UNORM",
"DXGI_FORMAT_R8G8B8A8_UNORM_SRGB",
"DXGI_FORMAT_R8G8B8A8_UINT",
"DXGI_FORMAT_R8G8B8A8_SNORM",
"DXGI_FORMAT_R8G8B8A8_SINT",
"DXGI_FORMAT_R16G16_TYPELESS",
"DXGI_FORMAT_R16G16_FLOAT",
"DXGI_FORMAT_R16G16_UNORM",
"DXGI_FORMAT_R16G16_UINT",
"DXGI_FORMAT_R16G16_SNORM",
"DXGI_FORMAT_R16G16_SINT",
"DXGI_FORMAT_R32_TYPELESS",
"DXGI_FORMAT_D32_FLOAT",
"DXGI_FORMAT_R32_FLOAT",
"DXGI_FORMAT_R32_UINT",
"DXGI_FORMAT_R32_SINT",
"DXGI_FORMAT_R24G8_TYPELESS",
"DXGI_FORMAT_D24_UNORM_S8_UINT",
"DXGI_FORMAT_R24_UNORM_X8_TYPELESS",
"DXGI_FORMAT_X24_TYPELESS_G8_UINT",
"DXGI_FORMAT_R8G8_TYPELESS",
"DXGI_FORMAT_R8G8_UNORM",
"DXGI_FORMAT_R8G8_UINT",
"DXGI_FORMAT_R8G8_SNORM",
"DXGI_FORMAT_R8G8_SINT",
"DXGI_FORMAT_R16_TYPELESS",
"DXGI_FORMAT_R16_FLOAT",
"DXGI_FORMAT_D16_UNORM",
"DXGI_FORMAT_R16_UNORM",
"DXGI_FORMAT_R16_UINT",
"DXGI_FORMAT_R16_SNORM",
"DXGI_FORMAT_R16_SINT",
"DXGI_FORMAT_R8_TYPELESS",
"DXGI_FORMAT_R8_UNORM",
"DXGI_FORMAT_R8_UINT",
"DXGI_FORMAT_R8_SNORM",
"DXGI_FORMAT_R8_SINT",
"DXGI_FORMAT_A8_UNORM",
"DXGI_FORMAT_R1_UNORM",
"DXGI_FORMAT_R9G9B9E5_SHAREDEXP",
"DXGI_FORMAT_R8G8_B8G8_UNORM",
"DXGI_FORMAT_G8R8_G8B8_UNORM",
"DXGI_FORMAT_BC1_TYPELESS",
"DXGI_FORMAT_BC1_UNORM",
"DXGI_FORMAT_BC1_UNORM_SRGB",
"DXGI_FORMAT_BC2_TYPELESS",
"DXGI_FORMAT_BC2_UNORM",
"DXGI_FORMAT_BC2_UNORM_SRGB",
"DXGI_FORMAT_BC3_TYPELESS",
"DXGI_FORMAT_BC3_UNORM",
"DXGI_FORMAT_BC3_UNORM_SRGB",
"DXGI_FORMAT_BC4_TYPELESS",
"DXGI_FORMAT_BC4_UNORM",
"DXGI_FORMAT_BC4_SNORM",
"DXGI_FORMAT_BC5_TYPELESS",
"DXGI_FORMAT_BC5_UNORM",
"DXGI_FORMAT_BC5_SNORM",
"DXGI_FORMAT_B5G6R5_UNORM",
"DXGI_FORMAT_B5G5R5A1_UNORM",
"DXGI_FORMAT_B8G8R8A8_UNORM",
"DXGI_FORMAT_B8G8R8X8_UNORM",
"DXGI_FORMAT_R10G10B10_XR_BIAS_A2_UNORM",
"DXGI_FORMAT_B8G8R8A8_TYPELESS",
"DXGI_FORMAT_B8G8R8A8_UNORM_SRGB",
"DXGI_FORMAT_B8G8R8X8_TYPELESS",
"DXGI_FORMAT_B8G8R8X8_UNORM_SRGB",
"DXGI_FORMAT_BC6H_TYPELESS",
"DXGI_FORMAT_BC6H_UF16",
"DXGI_FORMAT_BC6H_SF16",
"DXGI_FORMAT_BC7_TYPELESS",
"DXGI_FORMAT_BC7_UNORM",
"DXGI_FORMAT_BC7_UNORM_SRGB",
"DXGI_FORMAT_AYUV",
"DXGI_FORMAT_Y410",
"DXGI_FORMAT_Y416",
"DXGI_FORMAT_NV12",
"DXGI_FORMAT_P010",
"DXGI_FORMAT_P016",
"DXGI_FORMAT_420_OPAQUE",
"DXGI_FORMAT_YUY2",
"DXGI_FORMAT_Y210",
"DXGI_FORMAT_Y216",
"DXGI_FORMAT_NV11",
"DXGI_FORMAT_AI44",
"DXGI_FORMAT_IA44",
"DXGI_FORMAT_P8",
"DXGI_FORMAT_A8P8",
"DXGI_FORMAT_B4G4R4A4_UNORM",
])
HRESULT = MAKE_HRESULT([
"DXGI_STATUS_OCCLUDED",
"DXGI_STATUS_CLIPPED",
"DXGI_STATUS_NO_REDIRECTION",
"DXGI_STATUS_NO_DESKTOP_ACCESS",
"DXGI_STATUS_GRAPHICS_VIDPN_SOURCE_IN_USE",
"DXGI_STATUS_MODE_CHANGED",
"DXGI_STATUS_MODE_CHANGE_IN_PROGRESS",
"DXGI_ERROR_INVALID_CALL",
"DXGI_ERROR_NOT_FOUND",
"DXGI_ERROR_MORE_DATA",
"DXGI_ERROR_UNSUPPORTED",
"DXGI_ERROR_DEVICE_REMOVED",
"DXGI_ERROR_DEVICE_HUNG",
"DXGI_ERROR_DEVICE_RESET",
"DXGI_ERROR_WAS_STILL_DRAWING",
"DXGI_ERROR_FRAME_STATISTICS_DISJOINT",
"DXGI_ERROR_GRAPHICS_VIDPN_SOURCE_IN_USE",
"DXGI_ERROR_DRIVER_INTERNAL_ERROR",
"DXGI_ERROR_NONEXCLUSIVE",
"DXGI_ERROR_NOT_CURRENTLY_AVAILABLE",
"DXGI_ERROR_REMOTE_CLIENT_DISCONNECTED",
"DXGI_ERROR_REMOTE_OUTOFMEMORY",
# IDXGIKeyedMutex::AcquireSync
"WAIT_ABANDONED",
"WAIT_TIMEOUT",
])
DXGI_RGB = Struct("DXGI_RGB", [
(Float, "Red"),
(Float, "Green"),
(Float, "Blue"),
])
DXGI_GAMMA_CONTROL = Struct("DXGI_GAMMA_CONTROL", [
(DXGI_RGB, "Scale"),
(DXGI_RGB, "Offset"),
(Array(DXGI_RGB, 1025), "GammaCurve"),
])
DXGI_GAMMA_CONTROL_CAPABILITIES = Struct("DXGI_GAMMA_CONTROL_CAPABILITIES", [
(BOOL, "ScaleAndOffsetSupported"),
(Float, "MaxConvertedValue"),
(Float, "MinConvertedValue"),
(UINT, "NumGammaControlPoints"),
(Array(Float, "{self}.NumGammaControlPoints"), "ControlPointPositions"),
])
DXGI_RATIONAL = Struct("DXGI_RATIONAL", [
(UINT, "Numerator"),
(UINT, "Denominator"),
])
DXGI_MODE_SCANLINE_ORDER = Enum("DXGI_MODE_SCANLINE_ORDER", [
"DXGI_MODE_SCANLINE_ORDER_UNSPECIFIED",
"DXGI_MODE_SCANLINE_ORDER_PROGRESSIVE",
"DXGI_MODE_SCANLINE_ORDER_UPPER_FIELD_FIRST",
"DXGI_MODE_SCANLINE_ORDER_LOWER_FIELD_FIRST",
])
DXGI_MODE_SCALING = Enum("DXGI_MODE_SCALING", [
"DXGI_MODE_SCALING_UNSPECIFIED",
"DXGI_MODE_SCALING_CENTERED",
"DXGI_MODE_SCALING_STRETCHED",
])
DXGI_MODE_ROTATION = Enum("DXGI_MODE_ROTATION", [
"DXGI_MODE_ROTATION_UNSPECIFIED",
"DXGI_MODE_ROTATION_IDENTITY",
"DXGI_MODE_ROTATION_ROTATE90",
"DXGI_MODE_ROTATION_ROTATE180",
"DXGI_MODE_ROTATION_ROTATE270",
])
DXGI_MODE_DESC = Struct("DXGI_MODE_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(DXGI_RATIONAL, "RefreshRate"),
(DXGI_FORMAT, "Format"),
(DXGI_MODE_SCANLINE_ORDER, "ScanlineOrdering"),
(DXGI_MODE_SCALING, "Scaling"),
])
DXGI_QUALITY_LEVEL = FakeEnum(UINT, [
"DXGI_STANDARD_MULTISAMPLE_QUALITY_PATTERN",
"DXGI_CENTER_MULTISAMPLE_QUALITY_PATTERN",
])
DXGI_SAMPLE_DESC = Struct("DXGI_SAMPLE_DESC", [
(UINT, "Count"),
(DXGI_QUALITY_LEVEL, "Quality"),
])
DXGI_RGBA = Struct("DXGI_RGBA", [
(Float, "r"),
(Float, "g"),
(Float, "b"),
(Float, "a"),
])
IDXGIObject = Interface("IDXGIObject", IUnknown)
IDXGIDeviceSubObject = Interface("IDXGIDeviceSubObject", IDXGIObject)
IDXGIResource = Interface("IDXGIResource", IDXGIDeviceSubObject)
IDXGIKeyedMutex = Interface("IDXGIKeyedMutex", IDXGIDeviceSubObject)
IDXGISurface = Interface("IDXGISurface", IDXGIDeviceSubObject)
IDXGISurface1 = Interface("IDXGISurface1", IDXGISurface)
IDXGIAdapter = Interface("IDXGIAdapter", IDXGIObject)
IDXGIOutput = Interface("IDXGIOutput", IDXGIObject)
IDXGISwapChain = Interface("IDXGISwapChain", IDXGIDeviceSubObject)
IDXGIFactory = Interface("IDXGIFactory", IDXGIObject)
IDXGIDevice = Interface("IDXGIDevice", IDXGIObject)
IDXGIFactory1 = Interface("IDXGIFactory1", IDXGIFactory)
IDXGIAdapter1 = Interface("IDXGIAdapter1", IDXGIAdapter)
IDXGIDevice1 = Interface("IDXGIDevice1", IDXGIDevice)
DXGI_USAGE = Flags(UINT, [
"DXGI_CPU_ACCESS_NONE", # 0
"DXGI_CPU_ACCESS_SCRATCH", # 3
"DXGI_CPU_ACCESS_DYNAMIC", # 1
"DXGI_CPU_ACCESS_READ_WRITE", # 2
"DXGI_USAGE_SHADER_INPUT",
"DXGI_USAGE_RENDER_TARGET_OUTPUT",
"DXGI_USAGE_BACK_BUFFER",
"DXGI_USAGE_SHARED",
"DXGI_USAGE_READ_ONLY",
"DXGI_USAGE_DISCARD_ON_PRESENT",
"DXGI_USAGE_UNORDERED_ACCESS",
])
DXGI_FRAME_STATISTICS = Struct("DXGI_FRAME_STATISTICS", [
(UINT, "PresentCount"),
(UINT, "PresentRefreshCount"),
(UINT, "SyncRefreshCount"),
(LARGE_INTEGER, "SyncQPCTime"),
(LARGE_INTEGER, "SyncGPUTime"),
])
DXGI_MAPPED_RECT = Struct("DXGI_MAPPED_RECT", [
(INT, "Pitch"),
(LinearPointer(BYTE, "_MappedSize"), "pBits"),
])
DXGI_ADAPTER_DESC = Struct("DXGI_ADAPTER_DESC", [
(WString, "Description"),
(UINT, "VendorId"),
(UINT, "DeviceId"),
(UINT, "SubSysId"),
(UINT, "Revision"),
(SIZE_T, "DedicatedVideoMemory"),
(SIZE_T, "DedicatedSystemMemory"),
(SIZE_T, "SharedSystemMemory"),
(LUID, "AdapterLuid"),
])
DXGI_OUTPUT_DESC = Struct("DXGI_OUTPUT_DESC", [
(WString, "DeviceName"),
(RECT, "DesktopCoordinates"),
(BOOL, "AttachedToDesktop"),
(DXGI_MODE_ROTATION, "Rotation"),
(HMONITOR, "Monitor"),
])
DXGI_SHARED_RESOURCE = Struct("DXGI_SHARED_RESOURCE", [
(HANDLE, "Handle"),
])
DXGI_RESOURCE_PRIORITY = FakeEnum(UINT, [
"DXGI_RESOURCE_PRIORITY_MINIMUM",
"DXGI_RESOURCE_PRIORITY_LOW",
"DXGI_RESOURCE_PRIORITY_NORMAL",
"DXGI_RESOURCE_PRIORITY_HIGH",
"DXGI_RESOURCE_PRIORITY_MAXIMUM",
])
DXGI_RESIDENCY = Enum("DXGI_RESIDENCY", [
"DXGI_RESIDENCY_FULLY_RESIDENT",
"DXGI_RESIDENCY_RESIDENT_IN_SHARED_MEMORY",
"DXGI_RESIDENCY_EVICTED_TO_DISK",
])
DXGI_SURFACE_DESC = Struct("DXGI_SURFACE_DESC", [
(UINT, "Width"),
(UINT, "Height"),
(DXGI_FORMAT, "Format"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
])
DXGI_SWAP_EFFECT = Enum("DXGI_SWAP_EFFECT", [
"DXGI_SWAP_EFFECT_DISCARD",
"DXGI_SWAP_EFFECT_SEQUENTIAL",
"DXGI_SWAP_EFFECT_FLIP_SEQUENTIAL",
"DXGI_SWAP_EFFECT_FLIP_DISCARD",
])
DXGI_SWAP_CHAIN_FLAG = Flags(UINT, [
"DXGI_SWAP_CHAIN_FLAG_NONPREROTATED",
"DXGI_SWAP_CHAIN_FLAG_ALLOW_MODE_SWITCH",
"DXGI_SWAP_CHAIN_FLAG_GDI_COMPATIBLE",
"DXGI_SWAP_CHAIN_FLAG_RESTRICTED_CONTENT",
"DXGI_SWAP_CHAIN_FLAG_RESTRICT_SHARED_RESOURCE_DRIVER",
"DXGI_SWAP_CHAIN_FLAG_DISPLAY_ONLY",
"DXGI_SWAP_CHAIN_FLAG_FRAME_LATENCY_WAITABLE_OBJECT",
"DXGI_SWAP_CHAIN_FLAG_FOREGROUND_LAYER",
"DXGI_SWAP_CHAIN_FLAG_FULLSCREEN_VIDEO",
"DXGI_SWAP_CHAIN_FLAG_YUV_VIDEO",
"DXGI_SWAP_CHAIN_FLAG_HW_PROTECTED",
"DXGI_SWAP_CHAIN_FLAG_ALLOW_TEARING",
#"DXGI_SWAP_CHAIN_FLAG_RESTRICTED_TO_ALL_HOLOGRAPHIC_DISPLAYS", # DXGI 1.6
])
DXGI_SWAP_CHAIN_DESC = Struct("DXGI_SWAP_CHAIN_DESC", [
(DXGI_MODE_DESC, "BufferDesc"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
(DXGI_USAGE, "BufferUsage"),
(UINT, "BufferCount"),
(HWND, "OutputWindow"),
(BOOL, "Windowed"),
(DXGI_SWAP_EFFECT, "SwapEffect"),
(DXGI_SWAP_CHAIN_FLAG, "Flags"),
])
IDXGIObject.methods += [
StdMethod(HRESULT, "SetPrivateData", [(REFGUID, "Name"), (UINT, "DataSize"), (OpaqueBlob(Const(Void), "DataSize"), "pData")], sideeffects=False),
StdMethod(HRESULT, "SetPrivateDataInterface", [(REFGUID, "Name"), (OpaquePointer(Const(IUnknown)), "pUnknown")], sideeffects=False),
StdMethod(HRESULT, "GetPrivateData", [(REFGUID, "Name"), InOut(Pointer(UINT), "pDataSize"), Out(OpaquePointer(Void), "pData")], sideeffects=False),
StdMethod(HRESULT, "GetParent", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppParent")]),
]
IDXGIDeviceSubObject.methods += [
StdMethod(HRESULT, "GetDevice", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppDevice")]),
]
SHARED_HANDLE = Handle("shared_handle", RAW_HANDLE)
IDXGIResource.methods += [
StdMethod(HRESULT, "GetSharedHandle", [Out(Pointer(SHARED_HANDLE), "pSharedHandle")]),
StdMethod(HRESULT, "GetUsage", [Out(Pointer(DXGI_USAGE), "pUsage")], sideeffects=False),
StdMethod(HRESULT, "SetEvictionPriority", [(DXGI_RESOURCE_PRIORITY, "EvictionPriority")]),
StdMethod(HRESULT, "GetEvictionPriority", [Out(Pointer(DXGI_RESOURCE_PRIORITY), "pEvictionPriority")], sideeffects=False),
]
DWORD_TIMEOUT = FakeEnum(DWORD, [
"INFINITE",
])
IDXGIKeyedMutex.methods += [
StdMethod(HRESULT, "AcquireSync", [(UINT64, "Key"), (DWORD_TIMEOUT, "dwMilliseconds")], sideeffects=False),
StdMethod(HRESULT, "ReleaseSync", [(UINT64, "Key")]),
]
DXGI_MAP = Flags(UINT, [
"DXGI_MAP_READ",
"DXGI_MAP_WRITE",
"DXGI_MAP_DISCARD",
])
IDXGISurface.methods += [
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_SURFACE_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "Map", [Out(Pointer(DXGI_MAPPED_RECT), "pLockedRect"), (DXGI_MAP, "MapFlags")]),
StdMethod(HRESULT, "Unmap", []),
]
IDXGISurface1.methods += [
StdMethod(HRESULT, "GetDC", [(BOOL, "Discard"), Out(Pointer(HDC), "phdc")]),
StdMethod(HRESULT, "ReleaseDC", [(Pointer(RECT), "pDirtyRect")]),
]
IDXGIAdapter.methods += [
StdMethod(HRESULT, "EnumOutputs", [(UINT, "Output"), Out(Pointer(ObjPointer(IDXGIOutput)), "ppOutput")]),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_ADAPTER_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "CheckInterfaceSupport", [(REFGUID, "InterfaceName"), Out(Pointer(LARGE_INTEGER), "pUMDVersion")], sideeffects=False),
]
DXGI_ENUM_MODES = Flags(UINT, [
"DXGI_ENUM_MODES_INTERLACED",
"DXGI_ENUM_MODES_SCALING",
"DXGI_ENUM_MODES_STEREO",
"DXGI_ENUM_MODES_DISABLED_STEREO",
])
IDXGIOutput.methods += [
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_OUTPUT_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "GetDisplayModeList", [(DXGI_FORMAT, "EnumFormat"), (DXGI_ENUM_MODES, "Flags"), InOut(Pointer(UINT), "pNumModes"), Out(Array(DXGI_MODE_DESC, "*pNumModes"), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "FindClosestMatchingMode", [(Pointer(Const(DXGI_MODE_DESC)), "pModeToMatch"), Out(Pointer(DXGI_MODE_DESC), "pClosestMatch"), (ObjPointer(IUnknown), "pConcernedDevice")], sideeffects=False),
StdMethod(HRESULT, "WaitForVBlank", []),
StdMethod(HRESULT, "TakeOwnership", [(ObjPointer(IUnknown), "pDevice"), (BOOL, "Exclusive")]),
StdMethod(Void, "ReleaseOwnership", []),
StdMethod(HRESULT, "GetGammaControlCapabilities", [Out(Pointer(DXGI_GAMMA_CONTROL_CAPABILITIES), "pGammaCaps")], sideeffects=False),
StdMethod(HRESULT, "SetGammaControl", [(Pointer(Const(DXGI_GAMMA_CONTROL)), "pArray")], sideeffects=False), # Avoid NumGammaControlPoints mismatch
StdMethod(HRESULT, "GetGammaControl", [Out(Pointer(DXGI_GAMMA_CONTROL), "pArray")], sideeffects=False),
StdMethod(HRESULT, "SetDisplaySurface", [(ObjPointer(IDXGISurface), "pScanoutSurface")]),
StdMethod(HRESULT, "GetDisplaySurfaceData", [(ObjPointer(IDXGISurface), "pDestination")]),
StdMethod(HRESULT, "GetFrameStatistics", [Out(Pointer(DXGI_FRAME_STATISTICS), "pStats")], sideeffects=False),
]
DXGI_PRESENT = Flags(UINT, [
"DXGI_PRESENT_TEST",
"DXGI_PRESENT_DO_NOT_SEQUENCE",
"DXGI_PRESENT_RESTART",
"DXGI_PRESENT_DO_NOT_WAIT",
"DXGI_PRESENT_STEREO_PREFER_RIGHT",
"DXGI_PRESENT_STEREO_TEMPORARY_MONO",
"DXGI_PRESENT_RESTRICT_TO_OUTPUT",
"DXGI_PRESENT_USE_DURATION",
])
IDXGISwapChain.methods += [
StdMethod(HRESULT, "Present", [(UINT, "SyncInterval"), (DXGI_PRESENT, "Flags")]),
StdMethod(HRESULT, "GetBuffer", [(UINT, "Buffer"), (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppSurface")]),
StdMethod(HRESULT, "SetFullscreenState", [(BOOL, "Fullscreen"), (ObjPointer(IDXGIOutput), "pTarget")]),
StdMethod(HRESULT, "GetFullscreenState", [Out(Pointer(BOOL), "pFullscreen"), Out(Pointer(ObjPointer(IDXGIOutput)), "ppTarget")]),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_SWAP_CHAIN_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "ResizeBuffers", [(UINT, "BufferCount"), (UINT, "Width"), (UINT, "Height"), (DXGI_FORMAT, "NewFormat"), (DXGI_SWAP_CHAIN_FLAG, "SwapChainFlags")]),
StdMethod(HRESULT, "ResizeTarget", [(Pointer(Const(DXGI_MODE_DESC)), "pNewTargetParameters")]),
StdMethod(HRESULT, "GetContainingOutput", [Out(Pointer(ObjPointer(IDXGIOutput)), "ppOutput")]),
StdMethod(HRESULT, "GetFrameStatistics", [Out(Pointer(DXGI_FRAME_STATISTICS), "pStats")], sideeffects=False),
StdMethod(HRESULT, "GetLastPresentCount", [Out(Pointer(UINT), "pLastPresentCount")], sideeffects=False),
]
DXGI_MWA = Flags(UINT, [
"DXGI_MWA_NO_WINDOW_CHANGES",
"DXGI_MWA_NO_ALT_ENTER",
"DXGI_MWA_NO_PRINT_SCREEN",
"DXGI_MWA_VALID",
])
IDXGIFactory.methods += [
StdMethod(HRESULT, "EnumAdapters", [(UINT, "Adapter"), Out(Pointer(ObjPointer(IDXGIAdapter)), "ppAdapter")]),
StdMethod(HRESULT, "MakeWindowAssociation", [(HWND, "WindowHandle"), (DXGI_MWA, "Flags")], sideeffects=False),
StdMethod(HRESULT, "GetWindowAssociation", [Out(Pointer(HWND), "pWindowHandle")], sideeffects=False),
StdMethod(HRESULT, "CreateSwapChain", [(ObjPointer(IUnknown), "pDevice"), (Pointer(DXGI_SWAP_CHAIN_DESC), "pDesc"), Out(Pointer(ObjPointer(IDXGISwapChain)), "ppSwapChain")]),
StdMethod(HRESULT, "CreateSoftwareAdapter", [(HMODULE, "Module"), Out(Pointer(ObjPointer(IDXGIAdapter)), "ppAdapter")]),
]
IDXGIDevice.methods += [
StdMethod(HRESULT, "GetAdapter", [Out(Pointer(ObjPointer(IDXGIAdapter)), "pAdapter")]),
StdMethod(HRESULT, "CreateSurface", [(Pointer(Const(DXGI_SURFACE_DESC)), "pDesc"), (UINT, "NumSurfaces"), (DXGI_USAGE, "Usage"), (Pointer(Const(DXGI_SHARED_RESOURCE)), "pSharedResource"), Out(Pointer(ObjPointer(IDXGISurface)), "ppSurface")]),
StdMethod(HRESULT, "QueryResourceResidency", [(Array(Const(ObjPointer(IUnknown)), "NumResources"), "ppResources"), Out(Array(DXGI_RESIDENCY, "NumResources"), "pResidencyStatus"), (UINT, "NumResources")], sideeffects=False),
StdMethod(HRESULT, "SetGPUThreadPriority", [(INT, "Priority")]),
StdMethod(HRESULT, "GetGPUThreadPriority", [Out(Pointer(INT), "pPriority")], sideeffects=False),
]
DXGI_ADAPTER_FLAG = FakeEnum(UINT, [
"DXGI_ADAPTER_FLAG_NONE",
"DXGI_ADAPTER_FLAG_REMOTE",
"DXGI_ADAPTER_FLAG_SOFTWARE",
])
DXGI_ADAPTER_DESC1 = Struct("DXGI_ADAPTER_DESC1", [
(WString, "Description"),
(UINT, "VendorId"),
(UINT, "DeviceId"),
(UINT, "SubSysId"),
(UINT, "Revision"),
(SIZE_T, "DedicatedVideoMemory"),
(SIZE_T, "DedicatedSystemMemory"),
(SIZE_T, "SharedSystemMemory"),
(LUID, "AdapterLuid"),
(DXGI_SWAP_CHAIN_FLAG, "Flags"),
])
DXGI_DISPLAY_COLOR_SPACE = Struct("DXGI_DISPLAY_COLOR_SPACE", [
(Array(Array(FLOAT, 8), 2), "PrimaryCoordinates"),
(Array(Array(FLOAT, 16), 2), "WhitePoints"),
])
IDXGIFactory1.methods += [
StdMethod(HRESULT, "EnumAdapters1", [(UINT, "Adapter"), Out(Pointer(ObjPointer(IDXGIAdapter1)), "ppAdapter")]),
StdMethod(BOOL, "IsCurrent", [], sideeffects=False),
]
IDXGIAdapter1.methods += [
StdMethod(HRESULT, "GetDesc1", [Out(Pointer(DXGI_ADAPTER_DESC1), "pDesc")], sideeffects=False),
]
IDXGIDevice1.methods += [
StdMethod(HRESULT, "SetMaximumFrameLatency", [(UINT, "MaxLatency")]),
StdMethod(HRESULT, "GetMaximumFrameLatency", [Out(Pointer(UINT), "pMaxLatency")], sideeffects=False),
]
dxgi = Module('dxgi')
dxgi.addInterfaces([
IDXGIKeyedMutex,
IDXGIFactory1,
IDXGIDevice1,
IDXGIAdapter1,
IDXGIResource,
])
dxgi.addFunctions([
StdFunction(HRESULT, "CreateDXGIFactory", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppFactory")]),
StdFunction(HRESULT, "CreateDXGIFactory1", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppFactory")]),
StdFunction(HRESULT, "DXGID3D10CreateDevice", [(HMODULE, "hModule"), (ObjPointer(IDXGIFactory), "pFactory"), (ObjPointer(IDXGIAdapter), "pAdapter"), (UINT, "Flags"), (OpaquePointer(Const(IUnknown)), "pUnknown"), Out(Pointer(ObjPointer(Void)), "ppDevice")], internal=True),
StdFunction(HRESULT, "DXGID3D10CreateLayeredDevice", [(UINT), (UINT), (UINT), (UINT), (UINT)], internal=True),
StdFunction(SIZE_T, "DXGID3D10GetLayeredDeviceSize", [(OpaqueArray(Const(Void), "NumLayers"), "pLayers"), (UINT, "NumLayers")], internal=True),
StdFunction(HRESULT, "DXGID3D10RegisterLayers", [(OpaqueArray(Const(Void), "NumLayers"), "pLayers"), (UINT, "NumLayers")], internal=True),
])
#
# DXGI 1.2
#
IDXGIDisplayControl = Interface("IDXGIDisplayControl", IUnknown)
IDXGIDisplayControl.methods += [
StdMethod(BOOL, "IsStereoEnabled", [], sideeffects=False),
StdMethod(Void, "SetStereoEnabled", [(BOOL, "enabled")]),
]
DXGI_OUTDUPL_MOVE_RECT = Struct("DXGI_OUTDUPL_MOVE_RECT", [
(POINT, "SourcePoint"),
(RECT, "DestinationRect"),
])
DXGI_OUTDUPL_DESC = Struct("DXGI_OUTDUPL_DESC", [
(DXGI_MODE_DESC, "ModeDesc"),
(DXGI_MODE_ROTATION, "Rotation"),
(BOOL, "DesktopImageInSystemMemory"),
])
DXGI_OUTDUPL_POINTER_POSITION = Struct("DXGI_OUTDUPL_POINTER_POSITION", [
(POINT, "Position"),
(BOOL, "Visible"),
])
DXGI_OUTDUPL_POINTER_SHAPE_TYPE = Enum("DXGI_OUTDUPL_POINTER_SHAPE_TYPE", [
"DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MONOCHROME",
"DXGI_OUTDUPL_POINTER_SHAPE_TYPE_COLOR",
"DXGI_OUTDUPL_POINTER_SHAPE_TYPE_MASKED_COLOR",
])
DXGI_OUTDUPL_POINTER_SHAPE_INFO = Struct("DXGI_OUTDUPL_POINTER_SHAPE_INFO", [
(UINT, "Type"),
(UINT, "Width"),
(UINT, "Height"),
(UINT, "Pitch"),
(POINT, "HotSpot"),
])
DXGI_OUTDUPL_FRAME_INFO = Struct("DXGI_OUTDUPL_FRAME_INFO", [
(LARGE_INTEGER, "LastPresentTime"),
(LARGE_INTEGER, "LastMouseUpdateTime"),
(UINT, "AccumulatedFrames"),
(BOOL, "RectsCoalesced"),
(BOOL, "ProtectedContentMaskedOut"),
(DXGI_OUTDUPL_POINTER_POSITION, "PointerPosition"),
(UINT, "TotalMetadataBufferSize"),
(UINT, "PointerShapeBufferSize"),
])
IDXGIOutputDuplication = Interface("IDXGIOutputDuplication", IDXGIObject)
IDXGIOutputDuplication.methods += [
StdMethod(Void, "GetDesc", [Out(Pointer(DXGI_OUTDUPL_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "AcquireNextFrame", [(UINT, "TimeoutInMilliseconds"), Out(Pointer(DXGI_OUTDUPL_FRAME_INFO), "pFrameInfo"), Out(Pointer(ObjPointer(IDXGIResource)), "ppDesktopResource")]),
StdMethod(HRESULT, "GetFrameDirtyRects", [(UINT, "DirtyRectsBufferSize"), Out(Array(RECT, "DirtyRectsBufferSize"), "pDirtyRectsBuffer"), Out(Pointer(UINT), "pDirtyRectsBufferSizeRequired")], sideeffects=False),
StdMethod(HRESULT, "GetFrameMoveRects", [(UINT, "MoveRectsBufferSize"), Out(Array(DXGI_OUTDUPL_MOVE_RECT, "MoveRectsBufferSize"), "pMoveRectBuffer"), Out(Pointer(UINT), "pMoveRectsBufferSizeRequired")], sideeffects=False),
StdMethod(HRESULT, "GetFramePointerShape", [(UINT, "PointerShapeBufferSize"), Out(OpaqueBlob(Void, "PointerShapeBufferSize"), "pPointerShapeBuffer"), Out(Pointer(UINT), "pPointerShapeBufferSizeRequired"), Out(Pointer(DXGI_OUTDUPL_POINTER_SHAPE_INFO), "pPointerShapeInfo")], sideeffects=False),
StdMethod(HRESULT, "MapDesktopSurface", [Out(Pointer(DXGI_MAPPED_RECT), "pLockedRect")], sideeffects=False),
StdMethod(HRESULT, "UnMapDesktopSurface", [], sideeffects=False),
StdMethod(HRESULT, "ReleaseFrame", []),
]
DXGI_ALPHA_MODE = Enum("DXGI_ALPHA_MODE", [
"DXGI_ALPHA_MODE_UNSPECIFIED",
"DXGI_ALPHA_MODE_PREMULTIPLIED",
"DXGI_ALPHA_MODE_STRAIGHT",
"DXGI_ALPHA_MODE_IGNORE",
])
IDXGISurface2 = Interface("IDXGISurface2", IDXGISurface1)
IDXGISurface2.methods += [
StdMethod(HRESULT, "GetResource", [(REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppParentResource"), Out(Pointer(UINT), "pSubresourceIndex")]),
]
DXGI_SHARED_RESOURCE_FLAG = Flags(DWORD, [
"DXGI_SHARED_RESOURCE_READ",
"DXGI_SHARED_RESOURCE_WRITE",
])
IDXGIResource1 = Interface("IDXGIResource1", IDXGIResource)
IDXGIResource1.methods += [
StdMethod(HRESULT, "CreateSubresourceSurface", [(UINT, "index"), Out(Pointer(ObjPointer(IDXGISurface2)), "ppSurface")]),
StdMethod(HRESULT, "CreateSharedHandle", [(Pointer(Const(SECURITY_ATTRIBUTES)), "pAttributes"), (DXGI_SHARED_RESOURCE_FLAG, "dwAccess"), (LPCWSTR, "lpName"), Out(Pointer(HANDLE), "pHandle")]),
]
DXGI_OFFER_RESOURCE_PRIORITY = Enum("DXGI_OFFER_RESOURCE_PRIORITY", [
"DXGI_OFFER_RESOURCE_PRIORITY_LOW",
"DXGI_OFFER_RESOURCE_PRIORITY_NORMAL",
"DXGI_OFFER_RESOURCE_PRIORITY_HIGH",
])
IDXGIDevice2 = Interface("IDXGIDevice2", IDXGIDevice1)
IDXGIDevice2.methods += [
StdMethod(HRESULT, "OfferResources", [(UINT, "NumResources"), (Array(Const(ObjPointer(IDXGIResource)), "NumResources"), "ppResources"), (DXGI_OFFER_RESOURCE_PRIORITY, "Priority")]),
StdMethod(HRESULT, "ReclaimResources", [(UINT, "NumResources"), (Array(Const(ObjPointer(IDXGIResource)), "NumResources"), "ppResources"), Out(Pointer(BOOL), "pDiscarded")]),
StdMethod(HRESULT, "EnqueueSetEvent", [(HANDLE, "hEvent")], sideeffects=False),
]
DXGI_MODE_DESC1 = Struct("DXGI_MODE_DESC1", [
(UINT, "Width"),
(UINT, "Height"),
(DXGI_RATIONAL, "RefreshRate"),
(DXGI_FORMAT, "Format"),
(DXGI_MODE_SCANLINE_ORDER, "ScanlineOrdering"),
(DXGI_MODE_SCALING, "Scaling"),
(BOOL, "Stereo"),
])
DXGI_SCALING = Enum("DXGI_SCALING", [
"DXGI_SCALING_STRETCH",
"DXGI_SCALING_NONE",
"DXGI_SCALING_ASPECT_RATIO_STRETCH",
])
DXGI_SWAP_CHAIN_DESC1 = Struct("DXGI_SWAP_CHAIN_DESC1", [
(UINT, "Width"),
(UINT, "Height"),
(DXGI_FORMAT, "Format"),
(BOOL, "Stereo"),
(DXGI_SAMPLE_DESC, "SampleDesc"),
(DXGI_USAGE, "BufferUsage"),
(UINT, "BufferCount"),
(DXGI_SCALING, "Scaling"),
(DXGI_SWAP_EFFECT, "SwapEffect"),
(DXGI_ALPHA_MODE, "AlphaMode"),
(DXGI_SWAP_CHAIN_FLAG, "Flags"),
])
DXGI_SWAP_CHAIN_FULLSCREEN_DESC = Struct("DXGI_SWAP_CHAIN_FULLSCREEN_DESC", [
(DXGI_RATIONAL, "RefreshRate"),
(DXGI_MODE_SCANLINE_ORDER, "ScanlineOrdering"),
(DXGI_MODE_SCALING, "Scaling"),
(BOOL, "Windowed"),
])
DXGI_PRESENT_PARAMETERS = Struct("DXGI_PRESENT_PARAMETERS", [
(UINT, "DirtyRectsCount"),
(Array(RECT, "{self}.DirtyRectsCount"), "pDirtyRects"),
(Pointer(RECT), "pScrollRect"),
(Pointer(POINT), "pScrollOffset"),
])
IDXGISwapChain1 = Interface("IDXGISwapChain1", IDXGISwapChain)
IDXGISwapChain1.methods += [
StdMethod(HRESULT, "GetDesc1", [(Out(Pointer(DXGI_SWAP_CHAIN_DESC1), "pDesc"))], sideeffects=False),
StdMethod(HRESULT, "GetFullscreenDesc", [(Out(Pointer(DXGI_SWAP_CHAIN_FULLSCREEN_DESC), "pDesc"))], sideeffects=False),
StdMethod(HRESULT, "GetHwnd", [(Out(Pointer(HWND), "pHwnd"))], sideeffects=False),
StdMethod(HRESULT, "GetCoreWindow", [(REFIID, "riid"), (Out(Pointer(ObjPointer(Void)), "ppUnk"))]),
StdMethod(HRESULT, "Present1", [(UINT, "SyncInterval"), (DXGI_PRESENT, "Flags"), (Pointer(Const(DXGI_PRESENT_PARAMETERS)), "pPresentParameters")]),
StdMethod(BOOL, "IsTemporaryMonoSupported", [], sideeffects=False),
StdMethod(HRESULT, "GetRestrictToOutput", [(Out(Pointer(ObjPointer(IDXGIOutput)), "ppRestrictToOutput"))]),
StdMethod(HRESULT, "SetBackgroundColor", [(Pointer(Const(DXGI_RGBA)), "pColor")]),
StdMethod(HRESULT, "GetBackgroundColor", [(Out(Pointer(DXGI_RGBA), "pColor"))], sideeffects=False),
StdMethod(HRESULT, "SetRotation", [(DXGI_MODE_ROTATION, "Rotation")]),
StdMethod(HRESULT, "GetRotation", [(Out(Pointer(DXGI_MODE_ROTATION), "pRotation"))], sideeffects=False),
]
IDXGIFactory2 = Interface("IDXGIFactory2", IDXGIFactory1)
IDXGIFactory2.methods += [
StdMethod(BOOL, "IsWindowedStereoEnabled", [], sideeffects=False),
StdMethod(HRESULT, "CreateSwapChainForHwnd", [(ObjPointer(IUnknown), "pDevice"), (HWND, "hWnd"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC1)), "pDesc"), (Pointer(Const(DXGI_SWAP_CHAIN_FULLSCREEN_DESC)), "pFullscreenDesc"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGISwapChain1)), "ppSwapChain")]),
StdMethod(HRESULT, "CreateSwapChainForCoreWindow", [(ObjPointer(IUnknown), "pDevice"), (ObjPointer(IUnknown), "pWindow"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC1)), "pDesc"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGISwapChain1)), "ppSwapChain")]),
StdMethod(HRESULT, "GetSharedResourceAdapterLuid", [(HANDLE, "hResource"), Out(Pointer(LUID), "pLuid")], sideeffects=False),
StdMethod(HRESULT, "RegisterStereoStatusWindow", [(HWND, "WindowHandle"), (UINT, "wMsg"), Out(Pointer(DWORD), "pdwCookie")], sideeffects=False),
StdMethod(HRESULT, "RegisterStereoStatusEvent", [(HANDLE, "hEvent"), Out(Pointer(DWORD), "pdwCookie")], sideeffects=False),
StdMethod(Void, "UnregisterStereoStatus", [(DWORD, "dwCookie")], sideeffects=False),
StdMethod(HRESULT, "RegisterOcclusionStatusWindow", [(HWND, "WindowHandle"), (UINT, "wMsg"), Out(Pointer(DWORD), "pdwCookie")], sideeffects=False),
StdMethod(HRESULT, "RegisterOcclusionStatusEvent", [(HANDLE, "hEvent"), Out(Pointer(DWORD), "pdwCookie")], sideeffects=False),
StdMethod(Void, "UnregisterOcclusionStatus", [(DWORD, "dwCookie")], sideeffects=False),
StdMethod(HRESULT, "CreateSwapChainForComposition", [(ObjPointer(IUnknown), "pDevice"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC1)), "pDesc"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGISwapChain1)), "ppSwapChain")]),
]
DXGI_GRAPHICS_PREEMPTION_GRANULARITY = Enum("DXGI_GRAPHICS_PREEMPTION_GRANULARITY", [
"DXGI_GRAPHICS_PREEMPTION_DMA_BUFFER_BOUNDARY",
"DXGI_GRAPHICS_PREEMPTION_PRIMITIVE_BOUNDARY",
"DXGI_GRAPHICS_PREEMPTION_TRIANGLE_BOUNDARY",
"DXGI_GRAPHICS_PREEMPTION_PIXEL_BOUNDARY",
"DXGI_GRAPHICS_PREEMPTION_INSTRUCTION_BOUNDARY",
])
DXGI_COMPUTE_PREEMPTION_GRANULARITY = Enum("DXGI_COMPUTE_PREEMPTION_GRANULARITY", [
"DXGI_COMPUTE_PREEMPTION_DMA_BUFFER_BOUNDARY",
"DXGI_COMPUTE_PREEMPTION_DISPATCH_BOUNDARY",
"DXGI_COMPUTE_PREEMPTION_THREAD_GROUP_BOUNDARY",
"DXGI_COMPUTE_PREEMPTION_THREAD_BOUNDARY",
"DXGI_COMPUTE_PREEMPTION_INSTRUCTION_BOUNDARY",
])
DXGI_ADAPTER_DESC2 = Struct("DXGI_ADAPTER_DESC2", [
(WString, "Description"),
(UINT, "VendorId"),
(UINT, "DeviceId"),
(UINT, "SubSysId"),
(UINT, "Revision"),
(SIZE_T, "DedicatedVideoMemory"),
(SIZE_T, "DedicatedSystemMemory"),
(SIZE_T, "SharedSystemMemory"),
(LUID, "AdapterLuid"),
(DXGI_ADAPTER_FLAG, "Flags"),
(DXGI_GRAPHICS_PREEMPTION_GRANULARITY, "GraphicsPreemptionGranularity"),
(DXGI_COMPUTE_PREEMPTION_GRANULARITY, "ComputePreemptionGranularity"),
])
IDXGIAdapter2 = Interface("IDXGIAdapter2", IDXGIAdapter1)
IDXGIAdapter2.methods += [
StdMethod(HRESULT, "GetDesc2", [Out(Pointer(DXGI_ADAPTER_DESC2), "pDesc")], sideeffects=False),
]
IDXGIOutput1 = Interface("IDXGIOutput1", IDXGIOutput)
IDXGIOutput1.methods += [
StdMethod(HRESULT, "GetDisplayModeList1", [(DXGI_FORMAT, "EnumFormat"), (DXGI_ENUM_MODES, "Flags"), InOut(Pointer(UINT), "pNumModes"), Out(Array(DXGI_MODE_DESC1, "*pNumModes"), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "FindClosestMatchingMode1", [(Pointer(Const(DXGI_MODE_DESC1)), "pModeToMatch"), Out(Pointer(DXGI_MODE_DESC1), "pClosestMatch"), (ObjPointer(IUnknown), "pConcernedDevice")], sideeffects=False),
StdMethod(HRESULT, "GetDisplaySurfaceData1", [(ObjPointer(IDXGIResource), "pDestination")]),
StdMethod(HRESULT, "DuplicateOutput", [(ObjPointer(IUnknown), "pDevice"), Out(Pointer(ObjPointer(IDXGIOutputDuplication)), "ppOutputDuplication")]),
]
dxgi.addInterfaces([
IDXGIDisplayControl,
IDXGIDevice2,
IDXGISwapChain1,
IDXGIFactory2,
IDXGIResource1,
IDXGIAdapter2,
IDXGIOutput1,
])
#
# DXGI 1.3
#
DXGI_CREATE_FACTORY_FLAGS = Flags(UINT, [
"DXGI_CREATE_FACTORY_DEBUG",
])
dxgi.addFunctions([
StdFunction(HRESULT, "CreateDXGIFactory2", [(DXGI_CREATE_FACTORY_FLAGS, "Flags"), (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppFactory")]),
])
IDXGIDevice3 = Interface("IDXGIDevice3", IDXGIDevice2)
IDXGIDevice3.methods += [
StdMethod(Void, "Trim", []),
]
DXGI_MATRIX_3X2_F = Struct("DXGI_MATRIX_3X2_F", [
(FLOAT, "_11"),
(FLOAT, "_12"),
(FLOAT, "_21"),
(FLOAT, "_22"),
(FLOAT, "_31"),
(FLOAT, "_32"),
])
IDXGISwapChain2 = Interface("IDXGISwapChain2", IDXGISwapChain1)
IDXGISwapChain2.methods += [
StdMethod(HRESULT, "SetSourceSize", [(UINT, "Width"), (UINT, "Height")]),
StdMethod(HRESULT, "GetSourceSize", [Out(Pointer(UINT), "pWidth"), Out(Pointer(UINT), "pHeight")], sideeffects=False),
StdMethod(HRESULT, "SetMaximumFrameLatency", [(UINT, "MaxLatency")]),
StdMethod(HRESULT, "GetMaximumFrameLatency", [Out(Pointer(UINT), "pMaxLatency")], sideeffects=False),
StdMethod(HANDLE, "GetFrameLatencyWaitableObject", [], sideeffects=False),
StdMethod(HRESULT, "SetMatrixTransform", [(Pointer(Const(DXGI_MATRIX_3X2_F)), "pMatrix")]),
StdMethod(HRESULT, "GetMatrixTransform", [Out(Pointer(DXGI_MATRIX_3X2_F), "pMatrix")], sideeffects=False),
]
IDXGIOutput2 = Interface("IDXGIOutput2", IDXGIOutput1)
IDXGIOutput2.methods += [
StdMethod(BOOL, "SupportsOverlays", [], sideeffects=False),
]
IDXGIFactory3 = Interface("IDXGIFactory3", IDXGIFactory2)
IDXGIFactory3.methods += [
StdMethod(DXGI_CREATE_FACTORY_FLAGS, "GetCreationFlags", [], sideeffects=False),
]
DXGI_DECODE_SWAP_CHAIN_DESC = Struct("DXGI_DECODE_SWAP_CHAIN_DESC", [
(UINT, "Flags"),
])
# XXX: Flags
DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS = Enum("DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS", [
"DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_NOMINAL_RANGE",
"DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_BT709",
"DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAG_xvYCC",
])
IDXGIDecodeSwapChain = Interface("IDXGIDecodeSwapChain", IUnknown)
IDXGIDecodeSwapChain.methods += [
StdMethod(HRESULT, "PresentBuffer", [(UINT, "BufferToPresent"), (UINT, "SyncInterval"), (DXGI_PRESENT, "Flags")]),
StdMethod(HRESULT, "SetSourceRect", [(Pointer(Const(RECT)), "pRect")]),
StdMethod(HRESULT, "SetTargetRect", [(Pointer(Const(RECT)), "pRect")]),
StdMethod(HRESULT, "SetDestSize", [(UINT, "Width"), (UINT, "Height")]),
StdMethod(HRESULT, "GetSourceRect", [Out(Pointer(RECT), "pRect")], sideeffects=False),
StdMethod(HRESULT, "GetTargetRect", [Out(Pointer(RECT), "pRect")], sideeffects=False),
StdMethod(HRESULT, "GetDestSize", [Out(Pointer(UINT), "pWidth"), Out(Pointer(UINT), "pHeight")], sideeffects=False),
StdMethod(HRESULT, "SetColorSpace", [(DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS, "ColorSpace")]),
StdMethod(DXGI_MULTIPLANE_OVERLAY_YCbCr_FLAGS, "GetColorSpace", [], sideeffects=False),
]
IDXGIFactoryMedia = Interface("IDXGIFactoryMedia", IUnknown)
IDXGIFactoryMedia.methods += [
StdMethod(HRESULT, "CreateSwapChainForCompositionSurfaceHandle", [(ObjPointer(IUnknown), "pDevice"), (HANDLE, "hSurface"), (Pointer(Const(DXGI_SWAP_CHAIN_DESC1)), "pDesc"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGISwapChain1)), "ppSwapChain")]),
StdMethod(HRESULT, "CreateDecodeSwapChainForCompositionSurfaceHandle", [(ObjPointer(IUnknown), "pDevice"), (HANDLE, "hSurface"), (Pointer(DXGI_DECODE_SWAP_CHAIN_DESC), "pDesc"), (ObjPointer(IDXGIResource), "pYuvDecodeBuffers"), (ObjPointer(IDXGIOutput), "pRestrictToOutput"), Out(Pointer(ObjPointer(IDXGIDecodeSwapChain)), "ppSwapChain")]),
]
DXGI_FRAME_PRESENTATION_MODE = Enum("DXGI_FRAME_PRESENTATION_MODE", [
"DXGI_FRAME_PRESENTATION_MODE_COMPOSED",
"DXGI_FRAME_PRESENTATION_MODE_OVERLAY",
"DXGI_FRAME_PRESENTATION_MODE_NONE",
])
DXGI_FRAME_STATISTICS_MEDIA = Struct("DXGI_FRAME_STATISTICS_MEDIA", [
(UINT, "PresentCount"),
(UINT, "PresentRefreshCount"),
(UINT, "SyncRefreshCount"),
(LARGE_INTEGER, "SyncQPCTime"),
(LARGE_INTEGER, "SyncGPUTime"),
(DXGI_FRAME_PRESENTATION_MODE, "CompositionMode"),
(UINT, "ApprovedPresentDuration"),
])
IDXGISwapChainMedia = Interface("IDXGISwapChainMedia", IUnknown)
IDXGISwapChainMedia.methods += [
StdMethod(HRESULT, "GetFrameStatisticsMedia", [Out(Pointer(DXGI_FRAME_STATISTICS_MEDIA), "pStats")], sideeffects=False),
StdMethod(HRESULT, "SetPresentDuration", [(UINT, "Duration")]),
StdMethod(HRESULT, "CheckPresentDurationSupport", [(UINT, "DesiredPresentDuration"), Out(Pointer(UINT), "pClosestSmallerPresentDuration"), Out(Pointer(UINT), "pClosestLargerPresentDuration")], sideeffects=False),
]
DXGI_OVERLAY_SUPPORT_FLAG = FakeEnum(UINT, [
"DXGI_OVERLAY_SUPPORT_FLAG_DIRECT",
"DXGI_OVERLAY_SUPPORT_FLAG_SCALING",
])
IDXGIOutput3 = Interface("IDXGIOutput3", IDXGIOutput2)
IDXGIOutput3.methods += [
StdMethod(HRESULT, "CheckOverlaySupport", [(DXGI_FORMAT, "EnumFormat"), (ObjPointer(IUnknown), "pConcernedDevice"), Out(Pointer(DXGI_OVERLAY_SUPPORT_FLAG), "pFlags")], sideeffects=False),
]
dxgi.addInterfaces([
IDXGIDevice3,
IDXGISwapChain2,
IDXGISwapChainMedia,
IDXGIOutput3,
IDXGIFactory3,
IDXGIFactoryMedia,
])
#
# Undocumented interfaces
#
IDXGIFactoryDWM = Interface("IDXGIFactoryDWM", IUnknown)
IDXGISwapChainDWM = Interface("IDXGISwapChainDWM", IDXGIDeviceSubObject)
IDXGIFactoryDWM.methods += [
StdMethod(HRESULT, "CreateSwapChain", [(ObjPointer(IUnknown), "pDevice"), (Pointer(DXGI_SWAP_CHAIN_DESC), "pDesc"), (ObjPointer(IDXGIOutput), "pOutput"), Out(Pointer(ObjPointer(IDXGISwapChainDWM)), "ppSwapChain")]),
]
# http://shchetinin.blogspot.co.uk/2012/04/dwm-graphics-directx-win8win7.html
IDXGISwapChainDWM.methods += [
StdMethod(HRESULT, "Present", [(UINT, "SyncInterval"), (DXGI_PRESENT, "Flags")]),
StdMethod(HRESULT, "GetBuffer", [(UINT, "Buffer"), (REFIID, "riid"), Out(Pointer(ObjPointer(Void)), "ppSurface")]),
StdMethod(HRESULT, "GetDesc", [Out(Pointer(DXGI_SWAP_CHAIN_DESC), "pDesc")], sideeffects=False),
StdMethod(HRESULT, "ResizeBuffers", [(UINT, "BufferCount"), (UINT, "Width"), (UINT, "Height"), (DXGI_FORMAT, "NewFormat"), (DXGI_SWAP_CHAIN_FLAG, "SwapChainFlags")]),
StdMethod(HRESULT, "ResizeTarget", [(Pointer(Const(DXGI_MODE_DESC)), "pNewTargetParameters")]),
StdMethod(HRESULT, "GetContainingOutput", [Out(Pointer(ObjPointer(IDXGIOutput)), "ppOutput")]),
StdMethod(HRESULT, "GetFrameStatistics", [Out(Pointer(DXGI_FRAME_STATISTICS), "pStats")], sideeffects=False),
StdMethod(HRESULT, "GetLastPresentCount", [Out(Pointer(UINT), "pLastPresentCount")], sideeffects=False),
]
dxgi.addInterfaces([
IDXGIFactoryDWM,
])
#
# DXGI 1.4
#
DXGI_COLOR_SPACE_TYPE = Enum('DXGI_COLOR_SPACE_TYPE', [
'DXGI_COLOR_SPACE_RGB_FULL_G22_NONE_P709',
'DXGI_COLOR_SPACE_RGB_FULL_G10_NONE_P709',
'DXGI_COLOR_SPACE_RGB_STUDIO_G22_NONE_P709',
'DXGI_COLOR_SPACE_RGB_STUDIO_G22_NONE_P2020',
'DXGI_COLOR_SPACE_RESERVED',
'DXGI_COLOR_SPACE_YCBCR_FULL_G22_NONE_P709_X601',
'DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P601',
'DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P601',
'DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P709',
'DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P709',
'DXGI_COLOR_SPACE_YCBCR_STUDIO_G22_LEFT_P2020',
'DXGI_COLOR_SPACE_YCBCR_FULL_G22_LEFT_P2020',
'DXGI_COLOR_SPACE_CUSTOM',
])
DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG = Enum('DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG', [
'DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG_PRESENT',
'DXGI_SWAP_CHAIN_COLOR_SPACE_SUPPORT_FLAG_OVERLAY_PRESENT',
])
DXGI_OVERLAY_COLOR_SPACE_SUPPORT_FLAG = Enum('DXGI_OVERLAY_COLOR_SPACE_SUPPORT_FLAG', [
'DXGI_OVERLAY_COLOR_SPACE_SUPPORT_FLAG_PRESENT',
])
DXGI_MEMORY_SEGMENT_GROUP = Enum('DXGI_MEMORY_SEGMENT_GROUP', [
'DXGI_MEMORY_SEGMENT_GROUP_LOCAL',
'DXGI_MEMORY_SEGMENT_GROUP_NON_LOCAL',
])
DXGI_QUERY_VIDEO_MEMORY_INFO = Struct('DXGI_QUERY_VIDEO_MEMORY_INFO', [
(UINT64, 'Budget'),
(UINT64, 'CurrentUsage'),
(UINT64, 'AvailableForReservation'),
(UINT64, 'CurrentReservation'),
])
IDXGISwapChain3 = Interface('IDXGISwapChain3', IDXGISwapChain2)
IDXGIOutput4 = Interface('IDXGIOutput4', IDXGIOutput3)
IDXGIFactory4 = Interface('IDXGIFactory4', IDXGIFactory3)
IDXGIAdapter3 = Interface('IDXGIAdapter3', IDXGIAdapter2)
IDXGISwapChain3.methods += [
StdMethod(UINT, 'GetCurrentBackBufferIndex', []),
StdMethod(HRESULT, 'CheckColorSpaceSupport', [(DXGI_COLOR_SPACE_TYPE, 'ColorSpace'), Out(Pointer(UINT), 'pColorSpaceSupport')], sideeffects=False),
StdMethod(HRESULT, 'SetColorSpace1', [(DXGI_COLOR_SPACE_TYPE, 'ColorSpace')]),
StdMethod(HRESULT, 'ResizeBuffers1', [(UINT, 'BufferCount'), (UINT, 'Width'), (UINT, 'Height'), (DXGI_FORMAT, 'Format'), (DXGI_SWAP_CHAIN_FLAG, 'SwapChainFlags'), (Pointer(Const(UINT)), 'pCreationNodeMask'), (Array(Const(ObjPointer(IUnknown)), 'BufferCount'), 'ppPresentQueue')]),
]
IDXGIOutput4.methods += [
StdMethod(HRESULT, 'CheckOverlayColorSpaceSupport', [(DXGI_FORMAT, 'Format'), (DXGI_COLOR_SPACE_TYPE, 'ColorSpace'), (ObjPointer(IUnknown), 'pConcernedDevice'), Out(Pointer(UINT), 'pFlags')], sideeffects=False),
]
IDXGIFactory4.methods += [
StdMethod(HRESULT, 'EnumAdapterByLuid', [(LUID, 'AdapterLuid'), (REFIID, 'riid'), Out(Pointer(ObjPointer(Void)), 'ppvAdapter')]),
StdMethod(HRESULT, 'EnumWarpAdapter', [(REFIID, 'riid'), Out(Pointer(ObjPointer(Void)), 'ppvAdapter')]),
]
IDXGIAdapter3.methods += [
StdMethod(HRESULT, 'RegisterHardwareContentProtectionTeardownStatusEvent', [(HANDLE, 'hEvent'), Out(Pointer(DWORD), 'pdwCookie')], sideeffects=False),
StdMethod(Void, 'UnregisterHardwareContentProtectionTeardownStatus', [(DWORD, 'dwCookie')], sideeffects=False),
StdMethod(HRESULT, 'QueryVideoMemoryInfo', [(UINT, 'NodeIndex'), (DXGI_MEMORY_SEGMENT_GROUP, 'MemorySegmentGroup'), Out(Pointer(DXGI_QUERY_VIDEO_MEMORY_INFO), 'pVideoMemoryInfo')], sideeffects=False),
StdMethod(HRESULT, 'SetVideoMemoryReservation', [(UINT, 'NodeIndex'), (DXGI_MEMORY_SEGMENT_GROUP, 'MemorySegmentGroup'), (UINT64, 'Reservation')]),
StdMethod(HRESULT, 'RegisterVideoMemoryBudgetChangeNotificationEvent', [(HANDLE, 'hEvent'), Out(Pointer(DWORD), 'pdwCookie')], sideeffects=False),
StdMethod(Void, 'UnregisterVideoMemoryBudgetChangeNotification', [(DWORD, 'dwCookie')], sideeffects=False),
]
dxgi.addInterfaces([
IDXGISwapChain3,
IDXGIOutput4,
IDXGIFactory4,
IDXGIAdapter3,
])
#
# DXGI 1.5
#
DXGI_HDR_METADATA_TYPE = Enum('DXGI_HDR_METADATA_TYPE', [
'DXGI_HDR_METADATA_TYPE_NONE',
'DXGI_HDR_METADATA_TYPE_HDR10',
])
DXGI_HDR_METADATA_HDR10 = Struct('DXGI_HDR_METADATA_HDR10', [
(Array(UINT16, 2), 'RedPrimary'),
(Array(UINT16, 2), 'GreenPrimary'),
(Array(UINT16, 2), 'BluePrimary'),
(Array(UINT16, 2), 'WhitePoint'),
(UINT, 'MaxMasteringLuminance'),
(UINT, 'MinMasteringLuminance'),
(UINT16, 'MaxContentLightLevel'),
(UINT16, 'MaxFrameAverageLightLevel'),
])
DXGI_OFFER_RESOURCE_FLAGS = FakeEnum(UINT, [
'DXGI_OFFER_RESOURCE_FLAG_ALLOW_DECOMMIT',
])
DXGI_RECLAIM_RESOURCE_RESULTS = Enum('DXGI_RECLAIM_RESOURCE_RESULTS', [
'DXGI_RECLAIM_RESOURCE_RESULT_OK',
'DXGI_RECLAIM_RESOURCE_RESULT_DISCARDED',
'DXGI_RECLAIM_RESOURCE_RESULT_NOT_COMMITTED',
])
DXGI_FEATURE, DXGI_FEATURE_DATA = EnumPolymorphic('DXGI_FEATURE', 'Feature', [
('DXGI_FEATURE_PRESENT_ALLOW_TEARING', Pointer(BOOL)),
], Blob(Void, "FeatureSupportDataSize"), False)
IDXGIOutput5 = Interface('IDXGIOutput5', IDXGIOutput4)
IDXGISwapChain4 = Interface('IDXGISwapChain4', IDXGISwapChain3)
IDXGIDevice4 = Interface('IDXGIDevice4', IDXGIDevice3)
IDXGIFactory5 = Interface('IDXGIFactory5', IDXGIFactory4)
IDXGIOutput5.methods += [
StdMethod(HRESULT, 'DuplicateOutput1', [(ObjPointer(IUnknown), 'pDevice'), (UINT, 'Flags'), (UINT, 'SupportedFormatsCount'), (Array(Const(DXGI_FORMAT), 'SupportedFormatsCount'), 'pSupportedFormats'), Out(Pointer(ObjPointer(IDXGIOutputDuplication)), 'ppOutputDuplication')]),
]
IDXGISwapChain4.methods += [
StdMethod(HRESULT, 'SetHDRMetaData', [(DXGI_HDR_METADATA_TYPE, 'Type'), (UINT, 'Size'), (Blob(Void, 'Size'), 'pMetaData')]),
]
IDXGIDevice4.methods += [
StdMethod(HRESULT, 'OfferResources1', [(UINT, 'NumResources'), (Array(Const(ObjPointer(IDXGIResource)), 'NumResources'), 'ppResources'), (DXGI_OFFER_RESOURCE_PRIORITY, 'Priority'), (DXGI_OFFER_RESOURCE_FLAGS, 'Flags')]),
StdMethod(HRESULT, 'ReclaimResources1', [(UINT, 'NumResources'), (Array(Const(ObjPointer(IDXGIResource)), 'NumResources'), 'ppResources'), Out(Array(DXGI_RECLAIM_RESOURCE_RESULTS, 'NumResources'), 'pResults')]),
]
IDXGIFactory5.methods += [
StdMethod(HRESULT, 'CheckFeatureSupport', [(DXGI_FEATURE, 'Feature'), Out(DXGI_FEATURE_DATA, 'pFeatureSupportData'), (UINT, 'FeatureSupportDataSize')], sideeffects=False),
]
dxgi.addInterfaces([
IDXGIOutput5,
IDXGISwapChain4,
IDXGIDevice4,
IDXGIFactory5,
])
#
# DXGI 1.6
#
DXGI_ADAPTER_FLAG3 = Enum('DXGI_ADAPTER_FLAG3', [
'DXGI_ADAPTER_FLAG3_NONE',
'DXGI_ADAPTER_FLAG3_REMOTE',
'DXGI_ADAPTER_FLAG3_SOFTWARE',
'DXGI_ADAPTER_FLAG3_ACG_COMPATIBLE',
'DXGI_ADAPTER_FLAG3_FORCE_DWORD',
'DXGI_ADAPTER_FLAG3_SUPPORT_MONITORED_FENCES',
'DXGI_ADAPTER_FLAG3_SUPPORT_NON_MONITORED_FENCES',
'DXGI_ADAPTER_FLAG3_KEYED_MUTEX_CONFORMANCE',
])
DXGI_ADAPTER_DESC3 = Struct('DXGI_ADAPTER_DESC3', [
(WString, 'Description'),
(UINT, 'VendorId'),
(UINT, 'DeviceId'),
(UINT, 'SubSysId'),
(UINT, 'Revision'),
(SIZE_T, 'DedicatedVideoMemory'),
(SIZE_T, 'DedicatedSystemMemory'),
(SIZE_T, 'SharedSystemMemory'),
(LUID, 'AdapterLuid'),
(DXGI_ADAPTER_FLAG3, 'Flags'),
(DXGI_GRAPHICS_PREEMPTION_GRANULARITY, 'GraphicsPreemptionGranularity'),
(DXGI_COMPUTE_PREEMPTION_GRANULARITY, 'ComputePreemptionGranularity'),
])
DXGI_OUTPUT_DESC1 = Struct('DXGI_OUTPUT_DESC1', [
(WString, 'DeviceName'),
(RECT, 'DesktopCoordinates'),
(BOOL, 'AttachedToDesktop'),
(DXGI_MODE_ROTATION, 'Rotation'),
(HMONITOR, 'Monitor'),
(UINT, 'BitsPerColor'),
(DXGI_COLOR_SPACE_TYPE, 'ColorSpace'),
(Array(FLOAT, 2), 'RedPrimary'),
(Array(FLOAT, 2), 'GreenPrimary'),
(Array(FLOAT, 2), 'BluePrimary'),
(Array(FLOAT, 2), 'WhitePoint'),
(FLOAT, 'MinLuminance'),
(FLOAT, 'MaxLuminance'),
(FLOAT, 'MaxFullFrameLuminance'),
])
DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAGS = Flags(UINT, [
'DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAG_FULLSCREEN',
'DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAG_WINDOWED',
'DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAG_CURSOR_STRETCHED',
])
DXGI_GPU_PREFERENCE = Enum('DXGI_GPU_PREFERENCE', [
'DXGI_GPU_PREFERENCE_UNSPECIFIED',
'DXGI_GPU_PREFERENCE_MINIMUM_POWER',
'DXGI_GPU_PREFERENCE_HIGH_PERFORMANCE',
])
IDXGIFactory6 = Interface('IDXGIFactory6', IDXGIFactory5)
IDXGIAdapter4 = Interface('IDXGIAdapter4', IDXGIAdapter3)
IDXGIOutput6 = Interface('IDXGIOutput6', IDXGIOutput5)
IDXGIAdapter4.methods += [
StdMethod(HRESULT, 'GetDesc3', [Out(Pointer(DXGI_ADAPTER_DESC3), 'pDesc')], sideeffects=False),
]
IDXGIOutput6.methods += [
StdMethod(HRESULT, 'GetDesc1', [Out(Pointer(DXGI_OUTPUT_DESC1), 'pDesc')], sideeffects=False),
StdMethod(HRESULT, 'CheckHardwareCompositionSupport', [Out(Pointer(DXGI_HARDWARE_COMPOSITION_SUPPORT_FLAGS), 'pFlags')], sideeffects=False),
]
IDXGIFactory6.methods += [
StdMethod(HRESULT, 'EnumAdapterByGpuPreference', [(UINT, 'Adapter'), (DXGI_GPU_PREFERENCE, 'GpuPreference'), (REFIID, 'riid'), Out(Pointer(ObjPointer(Void)), 'ppvAdapter')]),
]
dxgi.addInterfaces([
IDXGIFactory6,
IDXGIAdapter4,
IDXGIOutput6,
])
dxgi.addFunctions([
StdFunction(HRESULT, "DXGIDeclareAdapterRemovalSupport", [], sideeffects=False),
])
| 41.523686
| 344
| 0.733245
| 4,791
| 48,209
| 7.023377
| 0.191401
| 0.065619
| 0.04012
| 0.045648
| 0.318464
| 0.244376
| 0.20277
| 0.171773
| 0.161283
| 0.143927
| 0
| 0.017536
| 0.115207
| 48,209
| 1,160
| 345
| 41.559483
| 0.771328
| 0.029019
| 0
| 0.19833
| 0
| 0
| 0.390517
| 0.222227
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.001044
| 0
| 0.001044
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a2897501c4860742a9eedfd04efe05fea8a41e0
| 1,465
|
py
|
Python
|
code/camera_calib.py
|
nitchith/CarND-Advanced-Lane-Lines
|
8e9e4d369f95f2076aa3b99c9015ac95c20037fc
|
[
"MIT"
] | null | null | null |
code/camera_calib.py
|
nitchith/CarND-Advanced-Lane-Lines
|
8e9e4d369f95f2076aa3b99c9015ac95c20037fc
|
[
"MIT"
] | null | null | null |
code/camera_calib.py
|
nitchith/CarND-Advanced-Lane-Lines
|
8e9e4d369f95f2076aa3b99c9015ac95c20037fc
|
[
"MIT"
] | null | null | null |
import numpy as np
import cv2
import glob
import matplotlib.pyplot as plt
def camera_calibrate(images_list, nx=9, ny=6, show_corners=False):
# prepare object points, like (0,0,0), (1,0,0), (2,0,0) ....,(6,5,0)
objp = np.zeros((ny*nx,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1,2)
# Arrays to store object points and image points from all the images.
objpoints = [] # 3d points in real world space
imgpoints = [] # 2d points in image plane.
# Make a list of calibration images
images = glob.glob(images_list)
if show_corners:
fig = plt.figure(figsize=(30, 30))
rows = 5
cols = 4
# Step through the list and search for chessboard corners
for idx, fname in enumerate(images):
img = cv2.imread(fname)
gray = cv2.cvtColor(img,cv2.COLOR_BGR2GRAY)
# Find the chessboard corners
ret, corners = cv2.findChessboardCorners(gray, (nx,ny),None)
# If found, add object points, image points
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
# Draw and display the corners
if show_corners:
img = cv2.drawChessboardCorners(img, (nx,ny), corners, ret)
ax = plt.subplot(rows, cols, idx + 1)
ax.set_title(fname)
plt.imshow(img)
return cv2.calibrateCamera(objpoints, imgpoints, gray.shape[1::-1], None, None)
| 34.069767
| 83
| 0.610239
| 205
| 1,465
| 4.321951
| 0.478049
| 0.009029
| 0.029345
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.038679
| 0.276451
| 1,465
| 43
| 83
| 34.069767
| 0.79717
| 0.258703
| 0
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.148148
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a2c8215b731a53474eb2fa6ab29c369314e2b86
| 22,135
|
py
|
Python
|
src/stratis_cli/_actions/_pool.py
|
stratis-storage/stratis-cli
|
16efcfe50558785ff44a1570ca554edb2006f8d2
|
[
"Apache-2.0"
] | 94
|
2017-02-06T11:01:02.000Z
|
2022-03-19T16:20:50.000Z
|
src/stratis_cli/_actions/_pool.py
|
stratis-storage/stratis-cli
|
16efcfe50558785ff44a1570ca554edb2006f8d2
|
[
"Apache-2.0"
] | 564
|
2016-08-30T16:23:46.000Z
|
2022-03-31T01:41:16.000Z
|
src/stratis_cli/_actions/_pool.py
|
stratis-storage/stratis-cli
|
16efcfe50558785ff44a1570ca554edb2006f8d2
|
[
"Apache-2.0"
] | 41
|
2016-09-13T12:31:42.000Z
|
2022-03-23T09:42:04.000Z
|
# Copyright 2021 Red Hat, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Pool actions.
"""
# isort: STDLIB
import os
from collections import defaultdict
# isort: THIRDPARTY
from justbytes import Range
from .._constants import PoolMaintenanceErrorCode
from .._errors import (
StratisCliAggregateError,
StratisCliEngineError,
StratisCliIncoherenceError,
StratisCliInUseOtherTierError,
StratisCliInUseSameTierError,
StratisCliNameConflictError,
StratisCliNoChangeError,
StratisCliPartialChangeError,
StratisCliPartialFailureError,
)
from .._stratisd_constants import BlockDevTiers, PoolActionAvailability, StratisdErrors
from ._connection import get_object
from ._constants import TOP_OBJECT
from ._formatting import get_property, print_table, size_triple, to_hyphenated
from ._utils import get_clevis_info
def _generate_pools_to_blockdevs(managed_objects, to_be_added, tier):
"""
Generate a map of pools to which block devices they own
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param tier: tier to search for blockdevs to be added
:type tier: _stratisd_constants.BlockDevTiers
:returns: a map of pool names to sets of strings containing blockdevs they own
:rtype: dict of str * frozenset of str
"""
# pylint: disable=import-outside-toplevel
from ._data import MODev, MOPool, devs, pools
pool_map = dict(
(path, str(MOPool(info).Name()))
for (path, info) in pools().search(managed_objects)
)
pools_to_blockdevs = defaultdict(list)
for modev in (
modev
for modev in (
MODev(info)
for (_, info) in devs(props={"Tier": tier}).search(managed_objects)
)
if str(modev.Devnode()) in to_be_added
):
pools_to_blockdevs[pool_map[modev.Pool()]].append(str(modev.Devnode()))
return dict(
(pool, frozenset(blockdevs)) for pool, blockdevs in pools_to_blockdevs.items()
)
def _check_opposite_tier(managed_objects, to_be_added, other_tier):
"""
Check whether specified blockdevs are already in the other tier.
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param other_tier: the other tier, not the one requested
:type other_tier: _stratisd_constants.BlockDevTiers
:raises StratisCliInUseOtherTierError: if blockdevs are used by other tier
"""
pools_to_blockdevs = _generate_pools_to_blockdevs(
managed_objects, to_be_added, other_tier
)
if pools_to_blockdevs != {}:
raise StratisCliInUseOtherTierError(
pools_to_blockdevs,
BlockDevTiers.DATA
if other_tier == BlockDevTiers.CACHE
else BlockDevTiers.CACHE,
)
def _check_same_tier(pool_name, managed_objects, to_be_added, this_tier):
"""
Check whether specified blockdevs are already in the tier to which they
are to be added.
:param managed_objects: the result of a GetManagedObjects call
:type managed_objects: dict of str * dict
:param to_be_added: the blockdevs to be added
:type to_be_added: frozenset of str
:param this_tier: the tier requested
:type this_tier: _stratisd_constants.BlockDevTiers
:raises StratisCliPartialChangeError: if blockdevs are used by this tier
:raises StratisCliInUseSameTierError: if blockdevs are used by this tier in another pool
"""
pools_to_blockdevs = _generate_pools_to_blockdevs(
managed_objects, to_be_added, this_tier
)
owned_by_current_pool = frozenset(pools_to_blockdevs.get(pool_name, []))
owned_by_other_pools = dict(
(pool, devnodes)
for pool, devnodes in pools_to_blockdevs.items()
if pool_name != pool
)
if owned_by_current_pool != frozenset():
raise StratisCliPartialChangeError(
"add to cache" if this_tier == BlockDevTiers.CACHE else "add to data",
to_be_added.difference(owned_by_current_pool),
to_be_added.intersection(owned_by_current_pool),
)
if owned_by_other_pools != {}:
raise StratisCliInUseSameTierError(owned_by_other_pools, this_tier)
def _fetch_locked_pools_property(proxy):
"""
Fetch the LockedPools property from stratisd.
:param proxy: proxy to the top object in stratisd
:return: a representation of unlocked devices
:rtype: dict
:raises StratisCliEngineError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager
return Manager.Properties.LockedPools.Get(proxy)
class PoolActions:
"""
Pool actions.
"""
@staticmethod
def create_pool(namespace):
"""
Create a stratis pool.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
:raises StratisCliNameConflictError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager, ObjectManager, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
pool_name = namespace.pool_name
names = pools(props={"Name": pool_name}).search(managed_objects)
blockdevs = frozenset([os.path.abspath(p) for p in namespace.blockdevs])
if list(names) != []:
raise StratisCliNameConflictError("pool", pool_name)
_check_opposite_tier(managed_objects, blockdevs, BlockDevTiers.CACHE)
_check_same_tier(pool_name, managed_objects, blockdevs, BlockDevTiers.DATA)
clevis_info = get_clevis_info(namespace)
((changed, (_, _)), return_code, message) = Manager.Methods.CreatePool(
proxy,
{
"name": pool_name,
"redundancy": (True, 0),
"devices": blockdevs,
"key_desc": (
(True, namespace.key_desc)
if namespace.key_desc is not None
else (False, "")
),
"clevis_info": (False, ("", ""))
if clevis_info is None
else (True, clevis_info),
},
)
if return_code != StratisdErrors.OK: # pragma: no cover
raise StratisCliEngineError(return_code, message)
if not changed: # pragma: no cover
raise StratisCliIncoherenceError(
(
"Expected to create the specified pool %s but stratisd "
"reports that it did not actually create the pool"
)
% pool_name
)
@staticmethod
def init_cache(namespace): # pylint: disable=too-many-locals
"""
Initialize the cache of an existing stratis pool.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
"""
# pylint: disable=import-outside-toplevel
from ._data import MODev, ObjectManager, Pool, devs, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
pool_name = namespace.pool_name
(pool_object_path, _) = next(
pools(props={"Name": pool_name})
.require_unique_match(True)
.search(managed_objects)
)
blockdevs = frozenset([os.path.abspath(p) for p in namespace.blockdevs])
_check_opposite_tier(managed_objects, blockdevs, BlockDevTiers.DATA)
_check_same_tier(pool_name, managed_objects, blockdevs, BlockDevTiers.CACHE)
((changed, devs_added), return_code, message) = Pool.Methods.InitCache(
get_object(pool_object_path), {"devices": blockdevs}
)
if return_code != StratisdErrors.OK:
raise StratisCliEngineError(return_code, message)
if not changed or len(devs_added) < len(blockdevs): # pragma: no cover
devnodes_added = [
MODev(info).Devnode()
for (object_path, info) in devs(
props={"Pool": pool_object_path}
).search(ObjectManager.Methods.GetManagedObjects(proxy, {}))
if object_path in devs_added
]
raise StratisCliIncoherenceError(
(
"Expected to add the specified blockdevs as cache "
"to pool %s but stratisd reports that it did not actually "
"add some or all of the blockdevs requested; devices "
"added: (%s), devices requested: (%s)"
)
% (namespace.pool_name, ", ".join(devnodes_added), ", ".join(blockdevs))
)
@staticmethod
def list_pools(namespace):
"""
List all stratis pools.
"""
# pylint: disable=import-outside-toplevel
from ._data import MOPool, ObjectManager, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
pools_with_props = [
MOPool(info) for objpath, info in pools().search(managed_objects)
]
def physical_size_triple(mopool):
"""
Calculate the triple to display for total physical size.
The format is total/used/free where the display value for each
member of the tuple are chosen automatically according to justbytes'
configuration.
:param mopool: an object representing all the properties of the pool
:type mopool: MOPool
:returns: a string to display in the resulting list output
:rtype: str
"""
total_physical_size = Range(mopool.TotalPhysicalSize())
total_physical_used = get_property(mopool.TotalPhysicalUsed(), Range, None)
return size_triple(total_physical_size, total_physical_used)
def properties_string(mopool):
"""
Make a string encoding some important properties of the pool
:param mopool: an object representing all the properties of the pool
:type mopool: MOPool
:param props_map: a map of properties returned by GetAllProperties
:type props_map: dict of str * any
"""
def gen_string(has_property, code):
"""
Generate the display string for a boolean property
:param has_property: whether the property is true or false
:type has_property: bool or NoneType
:param str code: the code to generate the string for
:returns: the generated string
:rtype: str
"""
if has_property == True: # pylint: disable=singleton-comparison
prefix = " "
elif has_property == False: # pylint: disable=singleton-comparison
prefix = "~"
# This is only going to occur if the engine experiences an
# error while calculating a property or if our code has a bug.
else: # pragma: no cover
prefix = "?"
return prefix + code
props_list = [(mopool.HasCache(), "Ca"), (mopool.Encrypted(), "Cr")]
return ",".join(gen_string(x, y) for x, y in props_list)
format_uuid = (
(lambda mo_uuid: mo_uuid) if namespace.unhyphenated_uuids else to_hyphenated
)
def alert_string(mopool):
"""
Alert information to display, if any
:param mopool: object to access pool properties
:returns: string w/ alert information, "" if no alert
:rtype: str
"""
action_availability = PoolActionAvailability.from_str(
mopool.AvailableActions()
)
error_codes = action_availability.pool_maintenance_error_codes()
return ", ".join(sorted(str(code) for code in error_codes))
tables = [
(
mopool.Name(),
physical_size_triple(mopool),
properties_string(mopool),
format_uuid(mopool.Uuid()),
alert_string(mopool),
)
for mopool in pools_with_props
]
print_table(
["Name", "Total Physical", "Properties", "UUID", "Alerts"],
sorted(tables, key=lambda entry: entry[0]),
["<", ">", ">", ">", "<"],
)
@staticmethod
def destroy_pool(namespace):
"""
Destroy a stratis pool.
If no pool exists, the method succeeds.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager, ObjectManager, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
(pool_object_path, _) = next(
pools(props={"Name": namespace.pool_name})
.require_unique_match(True)
.search(managed_objects)
)
((changed, _), return_code, message) = Manager.Methods.DestroyPool(
proxy, {"pool": pool_object_path}
)
# This branch can be covered, since the engine will return an error
# if the pool can not be destroyed because it has filesystems.
if return_code != StratisdErrors.OK:
raise StratisCliEngineError(return_code, message)
if not changed: # pragma: no cover
raise StratisCliIncoherenceError(
(
"Expected to destroy the specified pool %s but "
"stratisd reports that it did not actually "
"destroy the pool requested"
)
% namespace.pool_name
)
@staticmethod
def rename_pool(namespace):
"""
Rename a pool.
:raises StratisCliEngineError:
:raises StratisCliNoChangeError:
"""
# pylint: disable=import-outside-toplevel
from ._data import ObjectManager, Pool, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
(pool_object_path, _) = next(
pools(props={"Name": namespace.current})
.require_unique_match(True)
.search(managed_objects)
)
((changed, _), return_code, message) = Pool.Methods.SetName(
get_object(pool_object_path), {"name": namespace.new}
)
if return_code != StratisdErrors.OK: # pragma: no cover
raise StratisCliEngineError(return_code, message)
if not changed:
raise StratisCliNoChangeError("rename", namespace.new)
@staticmethod
def add_data_devices(namespace): # pylint: disable=too-many-locals
"""
Add specified data devices to a pool.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
:raises StratisCliInUseOtherTierError:
:raises StratisCliInUseSameTierError:
:raises StratisCliPartialChangeError:
"""
# pylint: disable=import-outside-toplevel
from ._data import MODev, ObjectManager, Pool, devs, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
blockdevs = frozenset([os.path.abspath(p) for p in namespace.blockdevs])
_check_opposite_tier(managed_objects, blockdevs, BlockDevTiers.CACHE)
_check_same_tier(
namespace.pool_name, managed_objects, blockdevs, BlockDevTiers.DATA
)
(pool_object_path, _) = next(
pools(props={"Name": namespace.pool_name})
.require_unique_match(True)
.search(managed_objects)
)
((added, devs_added), return_code, message) = Pool.Methods.AddDataDevs(
get_object(pool_object_path), {"devices": list(blockdevs)}
)
if return_code != StratisdErrors.OK: # pragma: no cover
raise StratisCliEngineError(return_code, message)
if not added or len(devs_added) < len(blockdevs): # pragma: no cover
devnodes_added = [
MODev(info).Devnode()
for (object_path, info) in devs(
props={"Pool": pool_object_path}
).search(ObjectManager.Methods.GetManagedObjects(proxy, {}))
if object_path in devs_added
]
raise StratisCliIncoherenceError(
(
"Expected to add the specified blockdevs to the data tier "
"in pool %s but stratisd reports that it did not actually "
"add some or all of the blockdevs requested; devices "
"added: (%s), devices requested: (%s)"
)
% (namespace.pool_name, ", ".join(devnodes_added), ", ".join(blockdevs))
)
@staticmethod
def add_cache_devices(namespace): # pylint: disable=too-many-locals
"""
Add specified cache devices to a pool.
:raises StratisCliEngineError:
:raises StratisCliIncoherenceError:
:raises StratisCliInUseOtherTierError:
:raises StratisCliInUseSameTierError:
:raises StratisCliPartialChangeError:
"""
# pylint: disable=import-outside-toplevel
from ._data import MODev, ObjectManager, Pool, devs, pools
proxy = get_object(TOP_OBJECT)
managed_objects = ObjectManager.Methods.GetManagedObjects(proxy, {})
blockdevs = frozenset([os.path.abspath(p) for p in namespace.blockdevs])
_check_opposite_tier(managed_objects, blockdevs, BlockDevTiers.DATA)
_check_same_tier(
namespace.pool_name, managed_objects, blockdevs, BlockDevTiers.CACHE
)
(pool_object_path, _) = next(
pools(props={"Name": namespace.pool_name})
.require_unique_match(True)
.search(managed_objects)
)
((added, devs_added), return_code, message) = Pool.Methods.AddCacheDevs(
get_object(pool_object_path), {"devices": list(blockdevs)}
)
if return_code != StratisdErrors.OK:
raise StratisCliEngineError(return_code, message)
if not added or len(devs_added) < len(blockdevs): # pragma: no cover
devnodes_added = [
MODev(info).Devnode()
for (object_path, info) in devs(
props={"Pool": pool_object_path}
).search(ObjectManager.Methods.GetManagedObjects(proxy, {}))
if object_path in devs_added
]
raise StratisCliIncoherenceError(
(
"Expected to add the specified blockdevs to the cache tier "
"in pool %s but stratisd reports that it did not actually "
"add some or all of the blockdevs requested; devices "
"added: (%s), devices requested: (%s)"
)
% (namespace.pool_name, ", ".join(devnodes_added), ", ".join(blockdevs))
)
@staticmethod
def unlock_pools(namespace):
"""
Unlock all of the encrypted pools that have been detected by the daemon
but are still locked.
:raises StratisCliIncoherenceError:
:raises StratisCliNoChangeError:
:raises StratisCliAggregateError:
"""
# pylint: disable=import-outside-toplevel
from ._data import Manager
proxy = get_object(TOP_OBJECT)
locked_pools = _fetch_locked_pools_property(proxy)
if locked_pools == {}: # pragma: no cover
raise StratisCliNoChangeError("unlock", "pools")
# This block is not covered as the sim engine does not simulate the
# management of unlocked devices, so locked_pools is always empty.
errors = [] # pragma: no cover
for uuid in locked_pools: # pragma: no cover
(
(is_some, unlocked_devices),
return_code,
message,
) = Manager.Methods.UnlockPool(
proxy, {"pool_uuid": uuid, "unlock_method": namespace.unlock_method}
)
if return_code != StratisdErrors.OK:
errors.append(
StratisCliPartialFailureError(
"unlock", "pool with UUID %s" % uuid, error_message=message
)
)
if is_some and unlocked_devices == []:
raise StratisCliIncoherenceError(
(
"stratisd reported that some existing devices are locked but "
"no new devices were unlocked during this operation"
)
)
if errors != []: # pragma: no cover
raise StratisCliAggregateError("unlock", "pool", errors)
@staticmethod
def explain_code(namespace):
"""
Print an explanation of pool error code.
"""
error_code = PoolMaintenanceErrorCode.from_str(namespace.code)
assert error_code is not None
print(error_code.explain())
| 36.830283
| 92
| 0.614954
| 2,293
| 22,135
| 5.757087
| 0.148277
| 0.037118
| 0.012954
| 0.019695
| 0.54511
| 0.499886
| 0.479736
| 0.461859
| 0.454587
| 0.414135
| 0
| 0.00065
| 0.305308
| 22,135
| 600
| 93
| 36.891667
| 0.85784
| 0.251683
| 0
| 0.347826
| 0
| 0
| 0.077081
| 0
| 0
| 0
| 0
| 0
| 0.002899
| 1
| 0.049275
| false
| 0
| 0.057971
| 0
| 0.127536
| 0.008696
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a2d85e10d7ec8df4402f2b20294f47dcb467eb8
| 16,294
|
py
|
Python
|
backend/src/contaxy/schema/auth.py
|
Felipe-Renck/contaxy
|
532d1f01aad1ea8155bc10216acedca601d37889
|
[
"MIT"
] | null | null | null |
backend/src/contaxy/schema/auth.py
|
Felipe-Renck/contaxy
|
532d1f01aad1ea8155bc10216acedca601d37889
|
[
"MIT"
] | null | null | null |
backend/src/contaxy/schema/auth.py
|
Felipe-Renck/contaxy
|
532d1f01aad1ea8155bc10216acedca601d37889
|
[
"MIT"
] | null | null | null |
from datetime import datetime, timezone
from enum import Enum
from typing import Dict, List, Optional
import pydantic
from fastapi import HTTPException, Path, status
from pydantic import BaseModel, EmailStr, Field
from contaxy.schema.exceptions import ClientValueError
from contaxy.schema.shared import MAX_DESCRIPTION_LENGTH
from contaxy.utils.fastapi_utils import as_form
USERS_KIND = "users"
ADMIN_ROLE = "roles/admin"
USER_ROLE = "roles/user"
class AccessLevel(str, Enum):
# Map to: select, insert, update, delete
READ = "read" # Viewer, view: Allows admin access , Can only view existing resources. Permissions for read-only actions that do not affect state, such as viewing (but not modifying) existing resources or data.
WRITE = "write" # Editor, edit, Contributor : Allows read/write access , Can create and manage all types of resources but can’t grant access to others. All viewer permissions, plus permissions for actions that modify state, such as changing existing resources.
ADMIN = "admin" # Owner : Allows read-only access. Has full access to all resources including the right to edit IAM, invite users, edit roles. All editor permissions and permissions for the following actions
# UNKNOWN = "unknown" # Deny?
@classmethod
def load(cls, access_level: str) -> "AccessLevel":
try:
return cls(access_level.strip().lower())
except ValueError:
raise ClientValueError(f"Access level is unknown {access_level}")
# return cls.UNKNOWN
class TokenPurpose(str, Enum):
USER_API_TOKEN = "user-api-token"
PROJECT_API_TOKEN = "project-api-token"
SERVICE_ACCESS_TOKEN = "service-access-token"
LOGIN_TOKEN = "login-token"
REFRESH_TOKEN = "refresh-token" # For user sessions
# DEPLOYMENT_TOKEN = "deployment-token"
contaxy_token_purposes = {purpose for purpose in TokenPurpose}
class TokenType(str, Enum):
SESSION_TOKEN = "session-token"
API_TOKEN = "api-token"
class AccessToken(BaseModel):
token: str = Field(
...,
example="f4528e540a133dd53ba6809e74e16774ebe4777a",
description="API Token.",
)
token_type: TokenType = Field(
...,
example=TokenType.API_TOKEN,
description="The type of the token.",
)
subject: str = Field(
...,
example="users/dklqmomr2c8dx9cpb202dsqku",
description="Identifies the principal that is the subject of the token. Usually refers to the user to which the token is issued to.",
)
scopes: List[str] = Field(
...,
example=["projects#read"],
description="List of scopes associated with the token.",
)
created_at: Optional[datetime] = Field(
None,
description="Creation date of the token.",
)
expires_at: Optional[datetime] = Field(
None,
example="2021-04-25T10:20:30.400+02:30",
description="Date at which the token expires and, thereby, gets revoked.",
)
class ApiToken(AccessToken):
description: Optional[str] = Field(
None,
example="Token used for accesing project resources on my local machine.",
max_length=MAX_DESCRIPTION_LENGTH,
description="Short description about the context and usage of the token.",
)
created_by: Optional[str] = Field(
None,
example="16fd2706-8baf-433b-82eb-8c7fada847da",
description="ID of the user that created this token.",
)
token_purpose: Optional[str] = Field(
None,
example=TokenPurpose.LOGIN_TOKEN,
description="The purpose of the token.",
)
class AuthorizedAccess(BaseModel):
authorized_subject: str
resource_name: Optional[str] = None
access_level: Optional[AccessLevel] = None
access_token: Optional[AccessToken] = None
# Oauth Specific Code
class OAuth2TokenGrantTypes(str, Enum):
PASSWORD = "password"
REFRESH_TOKEN = "refresh_token"
CLIENT_CREDENTIALS = "client_credentials"
AUTHORIZATION_CODE = "authorization_code"
# TODO: Replaced with pydantic class
# class OAuth2TokenRequestForm:
# """OAuth2 Token Endpoint Request Form."""
# def __init__(
# self,
# grant_type: OAuth2TokenGrantTypes = Form(
# ...,
# description="Grant type. Determines the mechanism used to authorize the creation of the tokens.",
# ),
# username: Optional[str] = Form(
# None, description="Required for `password` grant type. The user’s username."
# ),
# password: Optional[str] = Form(
# None, description="Required for `password` grant type. The user’s password."
# ),
# scope: Optional[str] = Form(
# None,
# description="Scopes that the client wants to be included in the access token. List of space-delimited, case-sensitive strings",
# ),
# client_id: Optional[str] = Form(
# None,
# description="The client identifier issued to the client during the registration process",
# ),
# client_secret: Optional[str] = Form(
# None,
# description=" The client secret. The client MAY omit the parameter if the client secret is an empty string.",
# ),
# code: Optional[str] = Form(
# None,
# description="Required for `authorization_code` grant type. The value is what was returned from the authorization endpoint.",
# ),
# redirect_uri: Optional[str] = Form(
# None,
# description="Required for `authorization_code` grant type. Specifies the callback location where the authorization was sent. This value must match the `redirect_uri` used to generate the original authorization_code.",
# ),
# refresh_token: Optional[str] = Form(
# None,
# description="Required for `refresh_token` grant type. The refresh token previously issued to the client.",
# ),
# state: Optional[str] = Form(
# None,
# description="An opaque value used by the client to maintain state between the request and callback. The parameter SHOULD be used for preventing cross-site request forgery.",
# ),
# set_as_cookie: Optional[bool] = Form(
# False,
# description="If `true`, the access (and refresh) token will be set as cookie instead of the response body.",
# ),
# ):
# self.grant_type = grant_type
# self.username = username
# self.password = password
# self.scopes = []
# if scope:
# self.scopes = str(scope).split()
# self.client_id = client_id
# self.client_secret = client_secret
# self.code = code
# self.redirect_uri = redirect_uri
# self.refresh_token = refresh_token
# self.state = state
# self.set_as_cookie = set_as_cookie
@as_form
class OAuth2TokenRequestFormNew(BaseModel):
"""OAuth2 Token Endpoint Request Form."""
grant_type: OAuth2TokenGrantTypes = Field(
...,
description="Grant type. Determines the mechanism used to authorize the creation of the tokens.",
)
username: Optional[str] = Field(
None, description="Required for `password` grant type. The user’s username."
)
password: Optional[str] = Field(
None, description="Required for `password` grant type. The user’s password."
)
scope: Optional[str] = Field(
None,
description="Scopes that the client wants to be included in the access token. List of space-delimited, case-sensitive strings",
)
client_id: Optional[str] = Field(
None,
description="The client identifier issued to the client during the registration process",
)
client_secret: Optional[str] = Field(
None,
description=" The client secret. The client MAY omit the parameter if the client secret is an empty string.",
)
code: Optional[str] = Field(
None,
description="Required for `authorization_code` grant type. The value is what was returned from the authorization endpoint.",
)
redirect_uri: Optional[str] = Field(
None,
description="Required for `authorization_code` grant type. Specifies the callback location where the authorization was sent. This value must match the `redirect_uri` used to generate the original authorization_code.",
)
refresh_token: Optional[str] = Field(
None,
description="Required for `refresh_token` grant type. The refresh token previously issued to the client.",
)
state: Optional[str] = Field(
None,
description="An opaque value used by the client to maintain state between the request and callback. The parameter SHOULD be used for preventing cross-site request forgery.",
)
set_as_cookie: Optional[bool] = Field(
False,
description="If `true`, the access (and refresh) token will be set as cookie instead of the response body.",
)
class OAuthToken(BaseModel):
token_type: str = Field(
..., description="The type of token this is, typically just the string `bearer`"
)
access_token: str = Field(..., description="The access token string.")
expires_in: Optional[int] = Field(
None,
description="The expiration time of the access token in seconds.",
)
refresh_token: Optional[str] = Field(
None, description="API token to refresh the sesion token (if it expires)."
)
scope: Optional[str] = Field(
None, description="The scopes contained in the access token."
)
id_token: Optional[str] = Field(
None,
description="OpenID Connect ID Token that encodes the user’s authentication information.",
)
class OAuthTokenIntrospection(BaseModel):
active: bool = Field(
...,
description="Indicator of whether or not the presented token is currently active.",
)
scope: Optional[str] = Field(
None,
description="A space-delimited list of scopes.",
)
client_id: Optional[str] = Field(
None,
description="Client identifier for the OAuth 2.0 client that requested this token.",
)
username: Optional[str] = Field(
None,
description="Human-readable identifier for the resource owner who authorized this token.",
)
token_type: Optional[str] = Field(
None,
description="Type of the token as defined in Section 5.1 of OAuth 2.0 [RFC6749].",
)
exp: Optional[int] = Field(
None,
description="Timestamp, measured in the number of seconds since January 1 1970 UTC, indicating when this token will expire, as defined in JWT [RFC7519].",
)
iat: Optional[int] = Field(
None,
description="Timestamp, measured in the number of seconds since January 1 1970 UTC, indicating when this token was originally issued, as defined in JWT [RFC7519].",
)
nbf: Optional[int] = Field(
None,
description="Timestamp, measured in the number of seconds since January 1 1970 UTC, indicating when this token is not to be used before, as defined in JWT [RFC7519].",
)
sub: Optional[str] = Field(
None,
description="Subject of the token, as defined in JWT [RFC7519]. Usually a machine-readable identifier of the resource owner who authorized this token.",
)
aud: Optional[str] = Field(
None,
description="Service-specific string identifier or list of string identifiers representing the intended audience for this token, as defined in JWT [RFC7519].",
)
iss: Optional[str] = Field(
None,
description="String representing the issuer of this token, as defined in JWT [RFC7519].",
)
jti: Optional[str] = Field(
None,
description="String identifier for the token, as defined in JWT [RFC7519].",
)
uid: Optional[str] = Field(
None,
description="The user ID. This parameter is returned only if the token is an access token and the subject is an end user.",
)
class AuthorizeResponseType(str, Enum):
TOKEN = "token"
CODE = "code"
class OAuth2ErrorDetails(BaseModel):
error: str
class OAuth2Error(HTTPException):
"""Basic exception for OAuth errors.
Implements the [RFC6749 error response](https://tools.ietf.org/html/rfc6749#section-5.2).
"""
def __init__(
self,
error: str,
) -> None:
"""Initializes the exception.
Args:
error: A single ASCII error code from the ones defined in RFC6749.
"""
super(OAuth2Error, self).__init__(
status_code=status.HTTP_400_BAD_REQUEST,
detail=error,
)
# TODO: Not used right now
# class OAuth2AuthorizeRequestForm:
# """OAuth2 Authorize Endpoint Request Form."""
# def __init__(
# self,
# response_type: AuthorizeResponseType = Form(
# ...,
# description="Either code for requesting an authorization code or token for requesting an access token (implicit grant).",
# ),
# client_id: Optional[str] = Form(
# None, description="The public identifier of the client."
# ),
# redirect_uri: Optional[str] = Form(None, description="Redirection URL."),
# scope: Optional[str] = Form(
# None, description="The scope of the access request."
# ),
# state: Optional[str] = Form(
# None,
# description="An opaque value used by the client to maintain state between the request and callback. The parameter SHOULD be used for preventing cross-site request forgery",
# ),
# nonce: Optional[str] = Form(None),
# ):
# self.response_type = response_type
# self.client_id = client_id
# self.redirect_uri = redirect_uri
# self.scope = scope
# self.state = state
# self.nonce = nonce
USER_ID_PARAM = Path(
...,
title="User ID",
description="A valid user ID.",
# TODO: add length restriction
)
# User Models
class UserBase(BaseModel):
username: Optional[str] = Field(
None,
example="john-doe",
description="A unique username on the system.",
) # nickname
email: Optional[EmailStr] = Field(
None, example="john.doe@example.com", description="User email address."
)
disabled: bool = Field(
False,
description="Indicates that user is disabled. Disabling a user will prevent any access to user-accessible resources.",
)
class UserInput(UserBase):
pass
class UserRegistration(UserInput):
# The password is only part of the user input object and should never returned
# TODO: a password can only be changed when used via oauth password bearer
# TODO: System admin can change passwords for all users
password: Optional[str] = Field(
None,
description="Password for the user. The password will be stored in as a hash.",
)
class User(UserBase):
id: str = Field(
...,
example="16fd2706-8baf-433b-82eb-8c7fada847da",
description="Unique ID of the user.",
)
technical_user: bool = Field(
False,
description="Indicates if the user is a technical user created by the system.",
)
created_at: datetime = Field(
default_factory=lambda: datetime.now(timezone.utc),
description="Timestamp of the user creation. Assigned by the server and read-only.",
)
last_activity: datetime = Field(
None, # If none the validator below will set last_activity to the create_at time
description="Last time the user accessed the system. Right now this is only updated when the user "
"calls the /users/me endpoint so that call should always be done when the user loads the UI.",
)
@pydantic.validator("last_activity", pre=True, always=True)
def default_last_activity(cls, v: datetime, *, values: Dict) -> datetime:
return v if v is not None else values["created_at"]
has_password: bool = Field(
True,
description="Indicates if the user log in with password or SSO",
)
| 37.457471
| 266
| 0.651774
| 1,948
| 16,294
| 5.384497
| 0.209446
| 0.042997
| 0.051483
| 0.049576
| 0.446086
| 0.391744
| 0.341119
| 0.299171
| 0.29488
| 0.29488
| 0
| 0.014057
| 0.257764
| 16,294
| 434
| 267
| 37.543779
| 0.853233
| 0.327912
| 0
| 0.2
| 0
| 0.044444
| 0.396443
| 0.015932
| 0
| 0
| 0
| 0.004608
| 0
| 1
| 0.011111
| false
| 0.033333
| 0.033333
| 0.003704
| 0.37037
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a2f072d42921424ab487e97b0b0a7b2ce429f4d
| 1,193
|
py
|
Python
|
setup.py
|
richarddwang/hugdatafast
|
714ebac89cb6c616a53ec5da50d0c1c50c6f2a3e
|
[
"Apache-2.0"
] | 19
|
2020-08-28T08:35:21.000Z
|
2021-03-08T18:42:46.000Z
|
setup.py
|
richarddwang/hugdatafast
|
714ebac89cb6c616a53ec5da50d0c1c50c6f2a3e
|
[
"Apache-2.0"
] | 3
|
2020-08-31T15:57:55.000Z
|
2020-09-05T09:34:09.000Z
|
setup.py
|
richarddwang/hugdatafast
|
714ebac89cb6c616a53ec5da50d0c1c50c6f2a3e
|
[
"Apache-2.0"
] | null | null | null |
import setuptools
from hugdatafast.__init__ import __version__
with open("README.md", "r") as fh:
long_description = fh.read()
REQUIRED_PKGS = [
'fastai>=2.0.8',
'fastscore>=1.0.1', # change of store_attr api
'datasets',
]
setuptools.setup(
name="hugdatafast",
version=__version__,
author="Richard Wang",
author_email="richardyy1188@gmail.com",
description="The elegant bridge between hugginface data and fastai",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/richarddwang/hugdatafast",
license='Apache 2.0',
packages=setuptools.find_packages(),
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"License :: OSI Approved :: Apache Software License",
"Operating System :: OS Independent",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
python_requires='>=3.6',
install_requires=REQUIRED_PKGS,
keywords='datasets machine learning datasets metrics fastai huggingface',
)
| 33.138889
| 77
| 0.68399
| 128
| 1,193
| 6.179688
| 0.71875
| 0.075853
| 0.04804
| 0.075853
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.016632
| 0.19363
| 1,193
| 36
| 78
| 33.138889
| 0.805613
| 0.020117
| 0
| 0
| 0
| 0
| 0.473459
| 0.038527
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.060606
| 0
| 0.060606
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a2f0deb5d53bf6a5794ae465d5f56eb3c3bd2c5
| 3,591
|
py
|
Python
|
tests/scripts/test_repository_actor_definition.py
|
drehak/leapp
|
062c76859e6b4a68592c6a387e44a2c1d36949ff
|
[
"Apache-2.0"
] | null | null | null |
tests/scripts/test_repository_actor_definition.py
|
drehak/leapp
|
062c76859e6b4a68592c6a387e44a2c1d36949ff
|
[
"Apache-2.0"
] | 3
|
2022-01-31T10:24:53.000Z
|
2022-03-29T12:30:04.000Z
|
tests/scripts/test_repository_actor_definition.py
|
drehak/leapp
|
062c76859e6b4a68592c6a387e44a2c1d36949ff
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from leapp.repository.actor_definition import ActorDefinition, ActorInspectionFailedError, MultipleActorsError
from leapp.exceptions import UnsupportedDefinitionKindError
from leapp.repository import DefinitionKind
from helpers import repository_dir
import logging
import mock
_FAKE_META_DATA = {
'description': 'Fake Description',
'class_name': 'FakeActor',
'name': 'fake-actor',
'path': 'actors/test',
'tags': (),
'consumes': (),
'produces': (),
'dialogs': (),
}
def test_actor_definition(repository_dir):
with repository_dir.as_cwd():
logger = logging.getLogger('leapp.actor.test')
with mock.patch.object(logger, 'log') as log_mock:
definition = ActorDefinition('actors/test', '.', log=log_mock)
for kind in set(DefinitionKind.REPO_WHITELIST + DefinitionKind.ACTOR_WHITELIST):
if kind in DefinitionKind.ACTOR_WHITELIST:
definition.add(kind, '.')
else:
with pytest.raises(UnsupportedDefinitionKindError):
definition.add(kind, '.')
log_mock.error.assert_called_with(
"Attempt to add item type %s to actor that is not supported", kind.name)
log_mock.reset_mock()
with mock.patch('leapp.repository.actor_definition.get_actor_metadata', return_value=_FAKE_META_DATA):
with mock.patch('leapp.repository.actor_definition.get_actors', return_value=[True]):
definition._module = True
assert definition.consumes == _FAKE_META_DATA['consumes']
assert definition.produces == _FAKE_META_DATA['produces']
assert definition.tags == _FAKE_META_DATA['tags']
assert definition.class_name == _FAKE_META_DATA['class_name']
assert definition.dialogs == _FAKE_META_DATA['dialogs']
assert definition.name == _FAKE_META_DATA['name']
assert definition.description == _FAKE_META_DATA['description']
dumped = definition.dump()
assert dumped.pop('path') == _FAKE_META_DATA['path']
assert dumped.pop('name') == definition.name
assert dumped.pop('files') == ('.',)
assert dumped.pop('libraries') == ('.',)
assert dumped.pop('tests') == ('.',)
assert dumped.pop('tools') == ('.',)
# Assert to ensure we covered all keys
assert not dumped
with pytest.raises(ActorInspectionFailedError):
with mock.patch('leapp.repository.actor_definition.get_actors', return_value=[]):
definition._discovery = None
definition.discover()
with pytest.raises(ActorInspectionFailedError):
with mock.patch('leapp.repository.actor_definition.get_actors') as mocked_actors:
mocked_actors.side_effect = RuntimeError('Test error')
definition._discovery = None
definition.discover()
with pytest.raises(MultipleActorsError):
with mock.patch('leapp.repository.actor_definition.get_actor_metadata', return_value=_FAKE_META_DATA):
with mock.patch('leapp.repository.actor_definition.get_actors', return_value=[True, True]):
definition._discovery = None
definition.discover()
| 49.875
| 118
| 0.601225
| 342
| 3,591
| 6.081871
| 0.25731
| 0.042308
| 0.063462
| 0.100962
| 0.305769
| 0.286058
| 0.286058
| 0.286058
| 0.238942
| 0.238942
| 0
| 0
| 0.29741
| 3,591
| 71
| 119
| 50.577465
| 0.824415
| 0.010025
| 0
| 0.190476
| 0
| 0
| 0.161835
| 0.078807
| 0
| 0
| 0
| 0
| 0.238095
| 1
| 0.015873
| false
| 0
| 0.111111
| 0
| 0.126984
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a33a995384ea8c9d2b8647bf4341ccfb7cc9243
| 1,135
|
py
|
Python
|
WebHtmlExample/WebHtmlExample.py
|
lilei644/python-learning-example
|
71910a32bc8b3b8f23ba13babb583af453405bbe
|
[
"MIT"
] | 2
|
2018-01-20T02:24:20.000Z
|
2018-06-07T18:16:59.000Z
|
WebHtmlExample/WebHtmlExample.py
|
lilei644/python-learning-example
|
71910a32bc8b3b8f23ba13babb583af453405bbe
|
[
"MIT"
] | null | null | null |
WebHtmlExample/WebHtmlExample.py
|
lilei644/python-learning-example
|
71910a32bc8b3b8f23ba13babb583af453405bbe
|
[
"MIT"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
import re
# 设置请求头
# 更换一下爬虫的User-Agent,这是最常规的爬虫设置
headers = {
"User-Agent": 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_13_2) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/63.0.3239.132 Safari/537.36'}
# 获取天气信息
def get_weather():
html = requests.get("http://www.weather.com.cn/weather/101280601.shtml", headers=headers)
html.encoding = "utf-8"
if html.status_code == 200:
soup = BeautifulSoup(html.text, "lxml")
light_list = soup.select('p.tem span')
night_list = soup.select('p.tem i')
for index in range(0, len(light_list)):
print('白天温度:{0}, 夜晚温度:{1}'.format(light_list[index].get_text(), night_list[index].get_text()))
# 获取贴吧回复数
def get_bar():
html = requests.get("http://tieba.baidu.com/f?ie=utf-8&kw=python3", headers=headers)
html.encoding = "utf-8"
if html.status_code == 200:
# <span class="threadlist_rep_num center_text" title="回复">9</span>
tag_list = re.findall(r'(?<="回复">)\d*(?=</span>)', html.text)
print(tag_list)
if __name__ == '__main__':
get_weather()
get_bar()
| 31.527778
| 142
| 0.65022
| 168
| 1,135
| 4.22619
| 0.565476
| 0.016901
| 0.042254
| 0.053521
| 0.188732
| 0.138028
| 0.138028
| 0.138028
| 0.138028
| 0.138028
| 0
| 0.054898
| 0.181498
| 1,135
| 35
| 143
| 32.428571
| 0.709365
| 0.100441
| 0
| 0.173913
| 0
| 0.043478
| 0.300493
| 0.023645
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0
| 0.130435
| 0
| 0.217391
| 0.086957
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a3450cf5aa9171992cee7901efa3fe712343d3d
| 1,158
|
py
|
Python
|
Codi/diode.py
|
JosepFanals/HELM
|
feb579f37eb0850ba2a7acef18f8d3d78b9e599c
|
[
"MIT"
] | 1
|
2020-09-03T14:46:35.000Z
|
2020-09-03T14:46:35.000Z
|
Codi/diode.py
|
JosepFanals/HELM
|
feb579f37eb0850ba2a7acef18f8d3d78b9e599c
|
[
"MIT"
] | 1
|
2021-09-09T12:54:09.000Z
|
2021-09-14T07:47:58.000Z
|
Codi/diode.py
|
JosepFanals/HELM
|
feb579f37eb0850ba2a7acef18f8d3d78b9e599c
|
[
"MIT"
] | null | null | null |
import numpy as np
import math
import matplotlib.pyplot as plt
U = 5 # equival a l'E
R = 2 # equival a R1
R2 = 3
P = 1.2
Vt = 0.026
Is = 0.000005
n = 200 # profunditat
Vd = np.zeros(n) # sèries
Vl = np.zeros(n)
I1 = np.zeros(n)
I1[0] = U / R # inicialització de les sèries
Vd[0] = Vt * math.log(1 + I1[0] / Is)
Vl[0] = P / I1[0]
def convVd(Vd, I, i): # convolució pel càlcul de Vd[i]
suma = 0
for k in range(1, i):
suma += k * Vd[k] * I[i - k]
return suma
def convVlI(Vl, I1, i): # convolució pel càlcul de Vl[i]
suma = 0
for k in range(i):
suma = suma + Vl[k] * I1[i - k]
return suma
for i in range(1, n): # càlcul dels coeficients
I1[i] = (1 / R + 1 / R2) * (-Vd[i - 1] - Vl[i - 1])
Vd[i] = (i * Vt * I1[i] - convVd(Vd, I1, i)) / (i * (Is + I1[0]))
Vl[i] = -convVlI(Vl, I1, i) / I1[0]
If = sum(I1)
Vdf = sum(Vd)
Vlf = sum(Vl)
print('I1: ' + str(If))
print('Vd: ' + str(Vdf))
print('Vl: ' + str(Vlf))
print('P: ' + str(Vlf * If))
Vdfinal = np.zeros(n) # per tal de veure com evoluciona la tensió del díode
for j in range(n):
Vdfinal[j] = np.sum([Vd[:(j+1)]])
print(Vdfinal)
| 19.3
| 76
| 0.541451
| 223
| 1,158
| 2.811659
| 0.313901
| 0.028708
| 0.051037
| 0.031898
| 0.124402
| 0.054226
| 0.054226
| 0
| 0
| 0
| 0
| 0.062871
| 0.272021
| 1,158
| 59
| 77
| 19.627119
| 0.680902
| 0.183074
| 0
| 0.097561
| 0
| 0
| 0.016077
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.04878
| false
| 0
| 0.073171
| 0
| 0.170732
| 0.121951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a37446fd29ea2b6044d47c4ec0b0027825d51e4
| 2,623
|
py
|
Python
|
tests/unit/app/test_session.py
|
bernease/whylogs-python
|
cfd2a2f71280537aae584cbd40a752fbe7da647b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/app/test_session.py
|
bernease/whylogs-python
|
cfd2a2f71280537aae584cbd40a752fbe7da647b
|
[
"Apache-2.0"
] | null | null | null |
tests/unit/app/test_session.py
|
bernease/whylogs-python
|
cfd2a2f71280537aae584cbd40a752fbe7da647b
|
[
"Apache-2.0"
] | null | null | null |
import pytest
from whylogs.app.session import get_or_create_session, get_session, get_logger, reset_default_session, session_from_config
from whylogs.app.config import SessionConfig
from whylogs.app.session import Session
from pandas import util
def test_get_global_session():
_session = None
session = get_or_create_session()
global_session = get_session()
assert session == global_session
def test_reset():
session = get_or_create_session()
reset_default_session()
global_session = get_session()
assert global_session.project is not None
def test_session_log_dataframe():
_session = None
session = session_from_config(SessionConfig(
"default-project", "default-pipeline", [], False
))
df = util.testing.makeDataFrame()
profile = session.log_dataframe(df)
assert session.logger() is not None
assert session.logger("default-project").dataset_name == "default-project"
def test_session_profile():
session = session_from_config(SessionConfig(
"default-project", "default-pipeline", [], False
))
df = util.testing.makeDataFrame()
profile = session.log_dataframe(df)
assert profile is not None
summary = profile.flat_summary()
flat_summary = summary['summary']
assert len(flat_summary) == 4
def test_profile_df():
session = get_or_create_session()
df = util.testing.makeDataFrame()
log_profile = session.log_dataframe(df)
profile = session.profile_dataframe(df)
assert log_profile.name == profile.name
assert log_profile.dataset_timestamp == profile.dataset_timestamp
assert log_profile.session_timestamp == profile.session_timestamp
assert len(profile.columns) == 4
assert len(log_profile.tags) == 1
assert len(profile.tags) == 2
def test_close_session():
session = get_or_create_session()
session.close()
assert session.is_active() == False
df = util.testing.makeDataFrame()
log_profile = session.log_dataframe(df)
assert log_profile == None
profile = session.profile_dataframe(df)
assert profile == None
profile = session.new_profile(df)
assert profile == None
with pytest.raises(RuntimeError):
session.logger()
def test_logger_cache():
_session = None
session = get_or_create_session()
with session.logger("cache-test", with_rotation_time="s") as logger:
logger.log({"name": 1})
session.close()
def test_remove_logger():
session = get_or_create_session()
session.logger("default-project")
with pytest.raises(KeyError):
session.remove_logger("test")
| 26.23
| 122
| 0.716737
| 324
| 2,623
| 5.540123
| 0.17284
| 0.05571
| 0.042897
| 0.070195
| 0.438997
| 0.367131
| 0.249582
| 0.209471
| 0.209471
| 0.209471
| 0
| 0.002338
| 0.184522
| 2,623
| 99
| 123
| 26.494949
| 0.83684
| 0
| 0
| 0.449275
| 0
| 0
| 0.050744
| 0
| 0
| 0
| 0
| 0
| 0.231884
| 1
| 0.115942
| false
| 0
| 0.072464
| 0
| 0.188406
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a379f8a8c2abcf1cc5791849c692674276f7e20
| 851
|
py
|
Python
|
Packages/constants.py
|
Bemesko/Intelligence-of-Home-GUI
|
4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b
|
[
"MIT"
] | null | null | null |
Packages/constants.py
|
Bemesko/Intelligence-of-Home-GUI
|
4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b
|
[
"MIT"
] | null | null | null |
Packages/constants.py
|
Bemesko/Intelligence-of-Home-GUI
|
4580d2d2a6b5f3509e2e0897fd0c9952711ccd2b
|
[
"MIT"
] | null | null | null |
import enum
BASELINE = "baseline"
ENERGY = "energy"
MAX_PRICE = "max_price"
START_PRICE = "starting_price"
INCREMENT = "increment"
MIN_PRICE = "min_price"
MAX_LOT_SIZE = "max_lot_size_wh"
NAMESERVER_AGENT_AMOUNT = 3
ATTRIBUTE_LIST_LENGTH = 50
NEXT_ENERGY_CONSUMPTION = "next_energy_consumption"
NEXT_ENERGY_GENERATION = "next_energy_generation"
ENERGY_DIFFERENCE = "energy_difference"
ENERGY_MARKET_PRICE = "energy_market_price"
WANTED_ENERGY = "wanted_energy"
ENERGY_BUY_MAX_PRICE = "energy_buy_max_price"
ENERGY_BUY_STARTING_PRICE = "energy_buy_starting_price"
ENERGY_BUY_PRICE_INCREMENT = "energy_buy_price_increment"
ENERGY_SELL_MIN_PRICE = "energy_sell_min_price"
class buy_baseline(enum.Enum):
deficit = 0
all_energy = 1
infinite = 2
none = 3
class sell_baseline(enum.Enum):
surplus = 0
all_energy = 1
none = 2
| 24.314286
| 57
| 0.788484
| 119
| 851
| 5.142857
| 0.319328
| 0.107843
| 0.091503
| 0.081699
| 0.297386
| 0.142157
| 0.094771
| 0
| 0
| 0
| 0
| 0.013587
| 0.135135
| 851
| 34
| 58
| 25.029412
| 0.817935
| 0
| 0
| 0.071429
| 0
| 0
| 0.300823
| 0.137485
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.035714
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a37bdd049a40072735c67bea9e8cc13a3a7a335
| 1,553
|
py
|
Python
|
target/tests.py
|
groundupnews/gu
|
c7179ee3d058c8749d250d681032a76dc8d599d5
|
[
"BSD-3-Clause"
] | 19
|
2018-01-28T14:35:40.000Z
|
2020-12-04T03:04:02.000Z
|
target/tests.py
|
groundupnews/gu
|
c7179ee3d058c8749d250d681032a76dc8d599d5
|
[
"BSD-3-Clause"
] | 8
|
2018-06-02T14:28:28.000Z
|
2021-08-06T10:22:37.000Z
|
target/tests.py
|
groundupnews/gu
|
c7179ee3d058c8749d250d681032a76dc8d599d5
|
[
"BSD-3-Clause"
] | 21
|
2018-02-25T14:07:48.000Z
|
2020-05-28T23:10:52.000Z
|
from django.contrib.auth.models import User
from django.test import TestCase
from django.test import Client
from django.urls import reverse
from target import models
from django.utils import timezone
# Create your tests here.
class URLSWork(TestCase):
@classmethod
def setUpTestData(cls):
target = models.Target()
target.letters = 'practical'
target.words = 'practical'
target.published = timezone.now()
target.number = 1
target.save()
def test_urls(self):
user = User.objects.create_user('admin', 'admin@example.com', 'abcde')
user.is_staff = True
user.is_active = True
user.is_superuser = True
user.save()
c = Client()
response = c.login(username='admin', password='abcde')
self.assertEqual(response, True)
url = reverse('target:list')
response = c.get(url)
self.assertEqual(response.status_code, 200)
target = models.Target.objects.all()[0]
url = reverse('target:detail', args=(target.number,))
response = c.get(url)
self.assertEqual(response.status_code, 200)
url = reverse('target:create')
response = c.post(url)
self.assertEqual(response.status_code, 200)
url = reverse('target:create_letters', args=('practical',))
response = c.post(url)
self.assertEqual(response.status_code, 200)
url = reverse('target:delete', args=(1,))
response = c.get(url)
self.assertEqual(response.status_code, 200)
| 33.76087
| 78
| 0.63812
| 185
| 1,553
| 5.297297
| 0.335135
| 0.055102
| 0.140816
| 0.132653
| 0.323469
| 0.323469
| 0.323469
| 0.323469
| 0.323469
| 0.323469
| 0
| 0.015319
| 0.2434
| 1,553
| 45
| 79
| 34.511111
| 0.818723
| 0.01481
| 0
| 0.25
| 0
| 0
| 0.088351
| 0.013743
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.05
| false
| 0.025
| 0.15
| 0
| 0.225
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a39a497868bd170b5a86c4ae6d32db864cbebc8
| 7,240
|
py
|
Python
|
core/vision/collection.py
|
jmarangola/cv-chess
|
c1bf1754b622e76bc2bc92276b96760c321a8bd9
|
[
"MIT"
] | null | null | null |
core/vision/collection.py
|
jmarangola/cv-chess
|
c1bf1754b622e76bc2bc92276b96760c321a8bd9
|
[
"MIT"
] | null | null | null |
core/vision/collection.py
|
jmarangola/cv-chess
|
c1bf1754b622e76bc2bc92276b96760c321a8bd9
|
[
"MIT"
] | null | null | null |
"""
Autonomous dataset collection of data for jetson nano
John Marangola - marangol@bc.edu
"""
import datasets
import json
from datasets import Board, ChessPiece, PieceColor, PieceType
#from realsense_utils import RealSenseCamera
import preprocessing as pr
import cv2
import pandas as pd
import os
from os.path import isfile, join
import uuid
import numpy as np
import uuid
from PIL import Image
from PIL.ExifTags import TAGS
RUN_CALIBRATION = False # Run calibration sequence or use preexisting board four corners data from config/setup.txt
BOARD_SAVE_DEST= r"board_metadata.jpeg" # Where the debug metadata board visualization image is saved (to ensure we properly setup the metadata)
TMP_DEST = "/home/spark/cv-chess/core/vision/tmp/" # Where images are temporarily saved before being uploaded to drive in a batch
LOCAL_MD_FILENAME = "local_meta.json"
LOCAL_METADATA_JSON_PATH = TMP_DEST + LOCAL_MD_FILENAME
TL = [250, 115]
BL = [250, 687]
TR = [825, 115]
BR = [825, 687]
def rotate_image(image, angle):
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv2.getRotationMatrix2D(image_center, angle, 1.0)
result = cv2.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv2.INTER_LINEAR)
return result
def fen_to_dict(string):
name_to_num = {
'p' : 1,
'b' : 2,
'n' : 3,
'r' : 4,
'q' : 5,
'k' : 6,
}
out = {}
letters = "ABCDEFGH"
for i in range(8):
for j in range(1,9):
out[letters[i] + str(j)] = 0
string = string.split('/')
new_string = []
for s in string:
for d in s:
if d.isnumeric():
ix = s.index(d)
for i in range(int(d)-1):
s = s[0:ix] + '1' + s[ix:]
new_string.append(s)
for i in range(8, 0, -1):
for j in range(8):
if new_string[8-i][j].isnumeric():
out[letters[j] + str(i)] = 0
else:
out[letters[j] + str(i)] = name_to_num[new_string[8-i][j].lower()]
return out
def get_sorted_time_saved(images):
"""
Given a list of image filenames, return a dictionary of image filename : time written to disk pairs.
Purpose: for debugging dataset
Args:
images (list): List of image filenames
Returns:
dict: dict of image filenames
"""
image_dat = []
for image in images:
imtmp = Image.open(image)
tmp = imtmp.getexif()
image_dat.append(tmp)
dt = {}
for exifdata in image_dat:
idx = image_dat.index(exifdata)
# iterating over all EXIF data fields
for tag_id in exifdata:
tag = TAGS.get(tag_id, tag_id)
data = exifdata.get(tag_id)
# decode bytes
if isinstance(data, bytes):
data = data.decode()
# Add datetime field
if tag == "DateTime":
dt[images[idx]] = data
print(f"{tag:25}: {data}")
output = sorted(dt.items(), key=lambda eta: eta[1], reverse=False)
print(output)
dt = {}
for item in output:
dt[item[0]] = item[1]
with open(TMP_DEST + "datetimes.json", "w") as wr: # dump to json
json.dump(output, wr)
return output
def del_batch_from_text_file(file):
filenames = []
with open(file, "r") as rd:
for line in rd.readlines():
# parse each line for file to delete:
commaIndex = line.index(",")
filename = line[:commaIndex]
os.remove(TMP_DEST + filename)
if __name__ == "__main__":
# Initialize camera
realsense = RealSenseCamera()
"""
# Check if calibration sequence must be run
if RUN_CALIBRATION:
realsense.calibrate_board_pos()
if realsense.get_board_corners() is None:
print("Failed to run calibration. Exiting...")
exit()
"""
"""
board_meta = Board()
# Add pieces to metadata csv
board_meta.add_pieces({
"A1":ChessPiece(PieceType.KNIGHT, PieceColor.BLUE), "A2":ChessPiece(PieceType.PAWN, PieceColor.BLUE), "A3":ChessPiece(PieceType.PAWN, PieceColor.ORANGE)
})
board_meta.display_board(dest=BOARD_SAVE_DEST)
print(f"Verify board is correct output dest={BOARD_SAVE_DEST}.\nContine [Y] or Exit [E]?")
validate = input()
if validate.upper() == "E" or validate.upper() == "N":
print("Exiting...")
realsense.stop_pipeline()
exit()
files = []
files = [f for f in os.listdir(TMP_DEST) if isfile(os.path.join(TMP_DEST, f))]
# Check to see if there is pre-existing .csv metadata to add to
if LOCAL_MD_FILENAME in files:
try:
total_metadata = pd.read_csv(LOCAL_METADATA_JSON_PATH)
except:
total_metadata = pd.DataFrame()
else:
total_metadata = pd.DataFrame()
# Loop through input
while input() != "exit":
img = realsense.capture_rgb_image() # Capture the image
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
files = pr.board_to_64_files(img, base_directory=TMP_DEST) # Break image up into 64 files
piece_types, piece_colors = [], []
batch_id = uuid.uuid1()
for tile in sorted(files.keys()):
temp = board_meta.get_chess_piece(tile)
if temp is None:
piece_types.append(None)
piece_colors.append(None)
else:
piece_types.append(temp.piece_type.name)
piece_colors.append(temp.piece_color.name)
tmp_meta = pd.DataFrame({
"File" : [files[file] for file in files.keys()],
"Position" : [file for file in files.keys()],
"Piece Type" : piece_types,
"Piece Color" : piece_colors,
"Batch ID" : [batch_id for i in range(len(files.keys()))]
})
frames = [total_metadata, tmp_meta]
total_metadata = pd.concat(frames) # Concatenate dataframes
print(total_metadata)
total_metadata.to_csv(path_or_buf=LOCAL_METADATA_JSON_PATH)
"""
#pr.delete_board2_64_output(base_directory=TMP_DEST)
FEN = "5P1R/1Q1RP1P1/3R1P2/QQPPK1R1/1B1K1N2/B1R2N1B/1N2B3R/2B1BN2".upper()
last_input = None
df = pd.DataFrame()
while input() != "end":
resp = input("[n] for new fen, [anything key to take an image] >")
if resp == "new":
fen = input("Enter a FEN:").upper()
img = realsense.capture_rgb_image() # Capture the image
print("Captured image")
img = img[105:690, 348:940, :]
img = rotate_image(img, 1.5)
cv2.imwrite("original.jpg", img)
# Get dict of positions
temp_dict = fen_to_dict(FEN)
tiles = pr.board_to_64_files(img, temp_dict, base_directory=TMP_DEST) # Break image up into 64 files
data_frame = pd.DataFrame(tiles)
data_frame = data_frame.transpose()
frames = [df, data_frame]
df = pd.concat(frames) # Concatenate dataframe
csv_file = df.to_csv(TMP_DEST + 'my_csv.csv', header=False, index=False)
# Close streams and end pipeline
realsense.stop_pipeline()
| 31.754386
| 180
| 0.604144
| 971
| 7,240
| 4.356334
| 0.325438
| 0.016548
| 0.005674
| 0.010402
| 0.09669
| 0.078251
| 0.058865
| 0.058865
| 0.040189
| 0.040189
| 0
| 0.02487
| 0.283564
| 7,240
| 227
| 181
| 31.894273
| 0.79063
| 0.12942
| 0
| 0.036036
| 0
| 0
| 0.079957
| 0.02549
| 0
| 0
| 0
| 0
| 0
| 1
| 0.036036
| false
| 0
| 0.117117
| 0
| 0.18018
| 0.027027
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a3cf72d3d9f4ab9e1a082a0ec19d609ba13facf
| 528
|
py
|
Python
|
final_project/machinetranslation/tests/test.py
|
ChrisOmeh/xzceb-flask_eng_fr
|
6ce4a79539b8ace4bce999c32a9f58aa73827e5c
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/tests/test.py
|
ChrisOmeh/xzceb-flask_eng_fr
|
6ce4a79539b8ace4bce999c32a9f58aa73827e5c
|
[
"Apache-2.0"
] | null | null | null |
final_project/machinetranslation/tests/test.py
|
ChrisOmeh/xzceb-flask_eng_fr
|
6ce4a79539b8ace4bce999c32a9f58aa73827e5c
|
[
"Apache-2.0"
] | null | null | null |
import unittest
from translator import english_to_french, french_to_english
class TestenglishToFrench(unittest.TestCase):
def test1(self):
self.assertEqual(english_to_french(["Hello"]), "Bonjour")
self.assertNotEqual(english_to_french(["Bonjour"]), "Hello")
class TestfrenchToEnglish(unittest.TestCase):
def test1(self):
self.assertEqual(french_to_english(["Bonjour"]),'Hello')
self.assertNotEqual(french_to_english(["Hello"]), "Bonjour")
if __name__ == "__main__":
unittest.main()
| 35.2
| 68
| 0.727273
| 58
| 528
| 6.275862
| 0.362069
| 0.074176
| 0.123626
| 0.131868
| 0.236264
| 0.236264
| 0.236264
| 0
| 0
| 0
| 0
| 0.004396
| 0.138258
| 528
| 15
| 69
| 35.2
| 0.795604
| 0
| 0
| 0.166667
| 0
| 0
| 0.10586
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.166667
| false
| 0
| 0.166667
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a3d8daa44bdf458c650e19786cc3f1f2403777e
| 3,553
|
py
|
Python
|
tests/ut/python/parallel/test_auto_parallel_transformer.py
|
huxian123/mindspore
|
ec5ba10c82bbd6eccafe32d3a1149add90105bc8
|
[
"Apache-2.0"
] | 2
|
2021-04-22T07:00:59.000Z
|
2021-11-08T02:49:09.000Z
|
tests/ut/python/parallel/test_auto_parallel_transformer.py
|
ReIadnSan/mindspore
|
c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5
|
[
"Apache-2.0"
] | 1
|
2020-12-29T06:46:38.000Z
|
2020-12-29T06:46:38.000Z
|
tests/ut/python/parallel/test_auto_parallel_transformer.py
|
ReIadnSan/mindspore
|
c3d1f54c7f6d6f514e5748430d24b16a4f9ee9e5
|
[
"Apache-2.0"
] | 1
|
2021-05-10T03:30:36.000Z
|
2021-05-10T03:30:36.000Z
|
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore.nn as nn
from mindspore import Tensor, Parameter
from mindspore import context
from mindspore.common.api import _executor
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from tests.ut.python.ops.test_math_ops import VirtualLoss
grad_all = C.GradOperation(get_all=True)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x):
predict = self.network(x)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x):
return grad_all(self.network)(x)
class CustomDense(nn.Cell):
def __init__(self, row, column):
super(CustomDense, self).__init__()
self.weight = Parameter(Tensor(np.ones([row, column]).astype(np.float32) * 0.01), "w", requires_grad=True)
self.bias = Parameter(Tensor(np.zeros([row, column]).astype(np.float32)), "b", requires_grad=True)
self.matmul1 = P.MatMul()
self.add2 = P.TensorAdd()
self.activation3 = nn.ReLU()
def construct(self, x):
mat_output = self.matmul1(x, self.weight)
add_output = self.add2(mat_output, self.bias)
output = self.activation3(add_output)
return output
class DenseMutMulNet(nn.Cell):
def __init__(self):
super(DenseMutMulNet, self).__init__()
self.fc1 = CustomDense(4096, 4096)
self.fc2 = CustomDense(4096, 4096)
self.fc3 = CustomDense(4096, 4096)
self.fc4 = CustomDense(4096, 4096)
self.relu4 = nn.ReLU()
self.relu5 = nn.ReLU()
self.transpose = P.Transpose()
self.matmul1 = P.MatMul()
self.matmul2 = P.MatMul()
def construct(self, x):
q = self.fc1(x)
k = self.fc2(x)
v = self.fc3(x)
k = self.transpose(k, (1, 0))
c = self.relu4(self.matmul1(q, k))
s = self.relu5(self.matmul2(c, v))
s = self.fc4(s)
return s
class MultiTransformer(nn.Cell):
def __init__(self, layer_nums=1):
super(MultiTransformer, self).__init__()
self.layer = self._make_layer(layer_nums)
def _make_layer(self, layer_num):
layers = []
for _ in range(0, layer_num):
layers.append(DenseMutMulNet())
return nn.SequentialCell(layers)
def construct(self, x):
out = self.layer(x)
return out
def test_dmnet_train_step():
size = 8
context.set_auto_parallel_context(device_num=size, global_rank=0)
input_ = Tensor(np.ones([4096, 4096]).astype(np.float32) * 0.01)
net = GradWrap(NetWithLoss(MultiTransformer()))
context.set_auto_parallel_context(parallel_mode="auto_parallel")
net.set_auto_parallel()
_executor.compile(net, input_)
| 30.62931
| 114
| 0.665072
| 479
| 3,553
| 4.768267
| 0.352818
| 0.035026
| 0.019702
| 0.028459
| 0.15324
| 0.056042
| 0.056042
| 0
| 0
| 0
| 0
| 0.031873
| 0.22291
| 3,553
| 115
| 115
| 30.895652
| 0.795364
| 0.157895
| 0
| 0.142857
| 0
| 0
| 0.005037
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.155844
| false
| 0
| 0.103896
| 0.012987
| 0.402597
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a3dd5e26114808a45a3424f7c019a215fa96e04
| 6,227
|
py
|
Python
|
cloudcafe/compute/events/models/common.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
cloudcafe/compute/events/models/common.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
cloudcafe/compute/events/models/common.py
|
rcbops-qa/cloudcafe
|
d937f85496aadafbb94a330b9adb8ea18bee79ba
|
[
"Apache-2.0"
] | null | null | null |
"""
Copyright 2015 Rackspace
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from cloudcafe.compute.events.models.base import (
EventBaseModel, EventBaseListModel)
class Bandwidth(EventBaseModel):
"""Bandwidth Response Model
@summary: Response model for bandwidth from a compute
event notification
@note: Although the 'public' and 'private' interfaces are
not required, they are the most common names, and are
included as optional attributes for the sake of convenience
@note: This type may contain additional unspecified
BandwidthInterface fields, which will be captured in a
dictionary called kwargs
JSON Example:
{
"private": { <BandwidthInterface> },
"public": { <BandwidthInterface> }
}
"""
kwarg_map = {'private': 'private',
'public': 'public'}
optional_kwargs = ['private', 'public']
strict_checking = False
def __init__(self, private=None, public=None, **kwargs):
super(Bandwidth, self).__init__(locals())
@classmethod
def _dict_to_obj(cls, json_dict):
"""Override dict_to_obj implementation"""
obj = cls._map_values_to_kwargs(json_dict)
for key in obj.kwargs:
obj.kwargs[key] = BandwidthInterface._dict_to_obj(obj.kwargs[key])
if obj.private:
obj.private = BandwidthInterface._dict_to_obj(obj.private)
if obj.public:
obj.public = BandwidthInterface._dict_to_obj(obj.public)
return obj
class BandwidthInterface(EventBaseModel):
"""Bandwidth Interface Response Model
@summary: Response model for bandwidth on an interface from
a compute event notification
@note: Sub-model of Bandwidth
JSON Example:
{
"bw_in": 123456,
"bw_out": 654321
}
"""
kwarg_map = {'bw_in': 'bw_in',
'bw_out': 'bw_out'}
def __init__(self, bw_in, bw_out):
super(BandwidthInterface, self).__init__(locals())
class FixedIp(EventBaseModel):
"""Fixed IP Response Model
@summary: Response model for a fixed IP address from a
compute event notification
@note: Represents a single fixed IP
JSON Example:
{
"address": "10.10.0.0",
"floating_ips": [],
"label": "public",
"meta": {},
"type": "fixed",
"version": 4,
"vif_mac": "FE:ED:FA:00:1C:D4"
}
"""
kwarg_map = {
'address': 'address',
'floating_ips': 'floating_ips',
'label': 'label',
'meta': 'meta',
'type_': 'type',
'version': 'version',
'vif_mac': 'vif_mac'}
def __init__(self, address, floating_ips, label, meta, type_, version,
vif_mac):
super(FixedIp, self).__init__(locals())
class FixedIps(EventBaseListModel):
"""Fixed IPs Model
@summary: Response model for a list of fixed IP addresses
from a compute event notification
@note: Returns a list of elements of type 'FixedIp'
JSON Example:
{
"fixed_ips": [
{ <FixedIp> },
{ <FixedIp> }
]
}
"""
list_model_key = 'fixed_ips'
ObjectModel = FixedIp
class ImageMeta(EventBaseModel):
"""Image Metadata Model
@summary: Response model for image metadata from a compute
event notification
@note: This type may contain additional unspecified
fields, which will be captured in a dictionary called kwargs
JSON Example:
{
"image_meta": {
"auto_disk_config": "disabled",
"base_image_ref": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"container_format": "ovf",
"disk_format": "vhd",
"image_type": "base",
"min_disk": "20",
"min_ram": "512",
"org.openstack__1__architecture": "x64",
"org.openstack__1__os_distro": "com.ubuntu",
"org.openstack__1__os_version": "12.04",
"os_type": "linux"
}
}
"""
kwarg_map = {
'auto_disk_config': 'auto_disk_config',
'base_image_ref': 'base_image_ref',
'container_format': 'container_format',
'disk_format': 'disk_format',
'image_type': 'image_type',
'min_disk': 'min_disk',
'min_ram': 'min_ram',
'org_openstack__1__architecture': 'org.openstack__1__architecture',
'org_openstack__1__os_distro': 'org.openstack__1__os_distro',
'org_openstack__1__os_version': 'org.openstack__1__os_version',
'os_type': 'os_type'}
strict_checking = False
def __init__(self, auto_disk_config, base_image_ref, container_format,
disk_format, image_type, min_disk, min_ram,
org_openstack__1__architecture, org_openstack__1__os_distro,
org_openstack__1__os_version, os_type, **kwargs):
super(ImageMeta, self).__init__(locals())
class InstanceException(EventBaseModel):
"""Instance Exception Model
@summary: Response model for an instance exception from a
compute event notification
@note: Represents a single instance exception
JSON Example:
{
"exception": {
"kwargs": {
"instance_uuid": "5e91ad7f-afe4-4a83-bd5f-84673462cae1",
"reason": "Something broke",
"code": 500
}
}
}
"""
kwarg_map = {'kwargs': 'kwargs'}
def __init__(self, kwargs):
super(InstanceException, self).__init__(locals())
| 30.826733
| 78
| 0.605107
| 686
| 6,227
| 5.209913
| 0.294461
| 0.040291
| 0.043649
| 0.033576
| 0.362899
| 0.260772
| 0.175154
| 0.116396
| 0.116396
| 0.078064
| 0
| 0.021141
| 0.29356
| 6,227
| 201
| 79
| 30.9801
| 0.791316
| 0.508431
| 0
| 0.064516
| 0
| 0
| 0.195149
| 0.063433
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0.016129
| 0
| 0.387097
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a3e2e6cca24d36e7e6072a43d4a7616c515981f
| 1,446
|
py
|
Python
|
openpyxl/drawing/tests/test_shapes.py
|
sekcheong/openpyxl
|
e1ba037f171efa348f75431c35a50de5ca277b78
|
[
"MIT"
] | null | null | null |
openpyxl/drawing/tests/test_shapes.py
|
sekcheong/openpyxl
|
e1ba037f171efa348f75431c35a50de5ca277b78
|
[
"MIT"
] | null | null | null |
openpyxl/drawing/tests/test_shapes.py
|
sekcheong/openpyxl
|
e1ba037f171efa348f75431c35a50de5ca277b78
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
# Copyright (c) 2010-2017 openpyxl
import pytest
from openpyxl.xml.functions import fromstring, tostring
from openpyxl.tests.helper import compare_xml
@pytest.fixture
def GradientFillProperties():
from ..fill import GradientFillProperties
return GradientFillProperties
class TestGradientFillProperties:
def test_ctor(self, GradientFillProperties):
fill = GradientFillProperties()
xml = tostring(fill.to_tree())
expected = """
<gradFill></gradFill>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, GradientFillProperties):
src = """
<gradFill></gradFill>
"""
node = fromstring(src)
fill = GradientFillProperties.from_tree(node)
assert fill == GradientFillProperties()
@pytest.fixture
def Transform2D():
from ..shapes import Transform2D
return Transform2D
class TestTransform2D:
def test_ctor(self, Transform2D):
shapes = Transform2D()
xml = tostring(shapes.to_tree())
expected = """
<xfrm></xfrm>
"""
diff = compare_xml(xml, expected)
assert diff is None, diff
def test_from_xml(self, Transform2D):
src = """
<root />
"""
node = fromstring(src)
shapes = Transform2D.from_tree(node)
assert shapes == Transform2D()
| 23.322581
| 55
| 0.64177
| 139
| 1,446
| 6.546763
| 0.302158
| 0.030769
| 0.035165
| 0.032967
| 0.138462
| 0.138462
| 0.138462
| 0.138462
| 0.138462
| 0.138462
| 0
| 0.015962
| 0.263485
| 1,446
| 61
| 56
| 23.704918
| 0.838498
| 0.02213
| 0
| 0.409091
| 0
| 0
| 0.095609
| 0.029745
| 0
| 0
| 0
| 0
| 0.090909
| 1
| 0.136364
| false
| 0
| 0.136364
| 0
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a4054b106f4552f95f762ef5c1bcfd72acaebe7
| 19,509
|
py
|
Python
|
raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py
|
raysect/source
|
11f03089d0379fc7fb4d23c6f60c3d255673cec9
|
[
"BSD-3-Clause"
] | 71
|
2015-10-25T16:50:18.000Z
|
2022-03-02T03:46:19.000Z
|
raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py
|
raysect/source
|
11f03089d0379fc7fb4d23c6f60c3d255673cec9
|
[
"BSD-3-Clause"
] | 336
|
2015-02-11T22:39:54.000Z
|
2022-02-22T18:42:32.000Z
|
raysect/core/math/function/float/function3d/interpolate/tests/scripts/generate_3d_splines.py
|
raysect/source
|
11f03089d0379fc7fb4d23c6f60c3d255673cec9
|
[
"BSD-3-Clause"
] | 24
|
2016-09-11T17:12:10.000Z
|
2022-02-24T22:57:09.000Z
|
# Copyright (c) 2014-2021, Dr Alex Meakins, Raysect Project
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the Raysect Project nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import numpy as np
from raysect.core.math.function.float.function3d.interpolate.interpolator3darray import Interpolator3DArray
from matplotlib.colors import SymLogNorm, Normalize
import scipy
import sys
from raysect.core.math.function.float.function3d.interpolate.tests.data.interpolator3d_test_data import \
TestInterpolatorLoadBigValues, TestInterpolatorLoadNormalValues, TestInterpolatorLoadSmallValues,\
TestInterpolatorLoadBigValuesUneven, TestInterpolatorLoadNormalValuesUneven, TestInterpolatorLoadSmallValuesUneven
from raysect.core.math.function.float.function3d.interpolate.tests.test_interpolator_3d import X_LOWER, X_UPPER,\
NB_XSAMPLES, NB_X, X_EXTRAP_DELTA_MAX, PRECISION, Y_LOWER, Y_UPPER, NB_YSAMPLES, NB_Y, \
Y_EXTRAP_DELTA_MAX, EXTRAPOLATION_RANGE, large_extrapolation_range, Z_LOWER, Z_UPPER, \
NB_ZSAMPLES, NB_Z, Z_EXTRAP_DELTA_MAX, N_EXTRAPOLATION, uneven_linspace
# Force scientific format to get the right number of significant figures
np.set_printoptions(30000, linewidth=100, formatter={'float': lambda x_str: format(x_str, '.'+str(PRECISION)+'E')},
threshold=sys.maxsize)
# Overwrite imported values here.
VISUAL_NOT_TESTS = False
if VISUAL_NOT_TESTS:
NB_X = 51
NB_Y = 51
NB_Z = 51
NB_XSAMPLES = 101
NB_YSAMPLES = 101
NB_ZSAMPLES = 101
X_EXTRAP_DELTA_MIN = 0.04
Y_EXTRAP_DELTA_MIN = 0.04
Z_EXTRAP_DELTA_MIN = 0.04
BIG_VALUE_FACTOR = 20.
SMALL_VALUE_FACTOR = -20.
def docstring_test():
"""
.. code-block:: python
>>> from raysect.core.math.function.float.function3d.interpolate.interpolator3darray import Interpolator3DArray
>>>
>>> x = np.linspace(-1., 1., 20)
>>> y = np.linspace(-1., 1., 20)
>>> z = np.linspace(-1., 1., 20)
>>> x_array, y_array, z_array = np.meshgrid(x, y, z, indexing='ij')
>>> f = np.exp(-(x_array**2 + y_array**2 + z_array**2))
>>> interpolator3D = Interpolator3DArray(x, y, z, f, 'cubic', 'nearest', 1.0, 1.0, 1.0)
>>> # Interpolation
>>> interpolator3D(1.0, 1.0, 0.2)
0.1300281183136766
>>> # Extrapolation
>>> interpolator3D(1.0, 1.0, 1.1)
0.0497870683678659
>>> # Extrapolation out of bounds
>>> interpolator3D(1.0, 1.0, 2.1)
ValueError: The specified value (z=2.1) is outside of extrapolation range.
"""
pass
def get_extrapolation_input_values(
x_lower, x_upper, y_lower, y_upper, z_lower, z_upper, x_extrap_delta_max, y_extrap_delta_max,
z_extrap_delta_max, x_extrap_delta_min, y_extrap_delta_min, z_extrap_delta_min):
xsamples_extrap_out_of_bounds_options = np.array(
[x_lower - x_extrap_delta_max, (x_lower + x_upper) / 2., x_upper + x_extrap_delta_max])
ysamples_extrap_out_of_bounds_options = np.array(
[y_lower - y_extrap_delta_max, (y_lower + y_upper) / 2., y_upper + y_extrap_delta_max])
zsamples_extrap_out_of_bounds_options = np.array(
[z_lower - z_extrap_delta_max, (z_lower + z_upper) / 2., z_upper + z_extrap_delta_max])
xsamples_extrap_in_bounds_options = np.array(
[x_lower - x_extrap_delta_min, (x_lower + x_upper) / 2., x_upper + x_extrap_delta_min])
ysamples_extrap_in_bounds_options = np.array(
[y_lower - y_extrap_delta_min, (y_lower + y_upper) / 2., y_upper + y_extrap_delta_min])
zsamples_extrap_in_bounds_options = np.array(
[z_lower - z_extrap_delta_min, (z_lower + z_upper) / 2., z_upper + z_extrap_delta_min])
xsamples_extrap_out_of_bounds = []
ysamples_extrap_out_of_bounds = []
zsamples_extrap_out_of_bounds = []
xsamples_extrap_in_bounds = []
ysamples_extrap_in_bounds = []
zsamples_extrap_in_bounds = []
edge_indicies_x = [0, len(xsamples_extrap_out_of_bounds_options) - 1]
edge_indicies_y = [0, len(ysamples_extrap_out_of_bounds_options) - 1]
edge_indicies_z = [0, len(zsamples_extrap_out_of_bounds_options) - 1]
for i_x in range(len(xsamples_extrap_out_of_bounds_options)):
for j_y in range(len(ysamples_extrap_out_of_bounds_options)):
for k_z in range(len(zsamples_extrap_out_of_bounds_options)):
if not (i_x not in edge_indicies_x and j_y not in edge_indicies_y and k_z not in edge_indicies_z):
xsamples_extrap_out_of_bounds.append(xsamples_extrap_out_of_bounds_options[i_x])
ysamples_extrap_out_of_bounds.append(ysamples_extrap_out_of_bounds_options[j_y])
zsamples_extrap_out_of_bounds.append(zsamples_extrap_out_of_bounds_options[k_z])
xsamples_extrap_in_bounds.append(xsamples_extrap_in_bounds_options[i_x])
ysamples_extrap_in_bounds.append(ysamples_extrap_in_bounds_options[j_y])
zsamples_extrap_in_bounds.append(zsamples_extrap_in_bounds_options[k_z])
return \
np.array(xsamples_extrap_out_of_bounds), np.array(ysamples_extrap_out_of_bounds), \
np.array(zsamples_extrap_out_of_bounds), np.array(xsamples_extrap_in_bounds), \
np.array(ysamples_extrap_in_bounds), np.array(zsamples_extrap_in_bounds)
def pcolourmesh_corners(input_array):
return np.concatenate((input_array[:-1] - np.diff(input_array)/2.,
np.array([input_array[-1] - (input_array[-1] - input_array[-2]) / 2.,
input_array[-1] + (input_array[-1] - input_array[-2]) / 2.])), axis=0)
def function_to_spline(x_input, y_input, z_input, factor_in):
t = np.pi * np.sqrt((x_input ** 2 + y_input ** 2 + z_input ** 2))
return factor_in*np.sinc(t)
if __name__ == '__main__':
# Calculate for big values, small values, or normal values
big_values = False
small_values = True
log_scale = False
uneven_spacing = False
use_saved_datastore_spline_knots = True
verbose_options = [False, True, False, False]
if VISUAL_NOT_TESTS:
index_x_in = 40
else:
index_x_in = 4
index_y_in = 0
index_z_in = 0
index_y_plot = 0
index_z_plot = 0
print('Using scipy version', scipy.__version__)
# Find the function values to be used
if big_values:
factor = np.power(10., BIG_VALUE_FACTOR)
elif small_values:
factor = np.power(10., SMALL_VALUE_FACTOR)
else:
factor = 1.
if uneven_spacing:
x_in = uneven_linspace(X_LOWER, X_UPPER, NB_X, offset_fraction=1./3.)
y_in = uneven_linspace(Y_LOWER, Y_UPPER, NB_Y, offset_fraction=1./3.)
z_in = uneven_linspace(Z_LOWER, Z_UPPER, NB_Z, offset_fraction=1./3.)
else:
x_in = np.linspace(X_LOWER, X_UPPER, NB_X)
y_in = np.linspace(Y_LOWER, Y_UPPER, NB_Y)
z_in = np.linspace(Z_LOWER, Z_UPPER, NB_Z)
x_in_full, y_in_full, z_in_full = np.meshgrid(x_in, y_in, z_in, indexing='ij')
f_in = function_to_spline(x_in_full, y_in_full, z_in_full, factor)
if use_saved_datastore_spline_knots:
if uneven_spacing:
if big_values:
reference_loaded_values = TestInterpolatorLoadBigValuesUneven()
elif small_values:
reference_loaded_values = TestInterpolatorLoadSmallValuesUneven()
else:
reference_loaded_values = TestInterpolatorLoadNormalValuesUneven()
else:
if big_values:
reference_loaded_values = TestInterpolatorLoadBigValues()
elif small_values:
reference_loaded_values = TestInterpolatorLoadSmallValues()
else:
reference_loaded_values = TestInterpolatorLoadNormalValues()
f_in = reference_loaded_values.data
if verbose_options[0]:
print('Save this to self.data in test_interpolator:\n', repr(f_in))
xsamples = np.linspace(X_LOWER, X_UPPER, NB_XSAMPLES)
ysamples = np.linspace(Y_LOWER, Y_UPPER, NB_YSAMPLES)
zsamples = np.linspace(Z_LOWER, Z_UPPER, NB_ZSAMPLES)
xsamples_extrapolation, ysamples_extrapolation, zsamples_extrapolation = large_extrapolation_range(
xsamples, ysamples, zsamples, EXTRAPOLATION_RANGE, N_EXTRAPOLATION
)
# # Extrapolation x and y values
xsamples_out_of_bounds, ysamples_out_of_bounds, zsamples_out_of_bounds, xsamples_in_bounds, ysamples_in_bounds, \
zsamples_in_bounds = get_extrapolation_input_values(
X_LOWER, X_UPPER, Y_LOWER, Y_UPPER, Z_LOWER, Z_UPPER, X_EXTRAP_DELTA_MAX, Y_EXTRAP_DELTA_MAX,
Z_EXTRAP_DELTA_MAX, X_EXTRAP_DELTA_MIN, Y_EXTRAP_DELTA_MIN, Z_EXTRAP_DELTA_MIN
)
interpolator3D = Interpolator3DArray(x_in, y_in, z_in, f_in, 'linear', 'linear', extrapolation_range_x=2.0,
extrapolation_range_y=2.0, extrapolation_range_z=2.0)
if VISUAL_NOT_TESTS:
n_lower_upper_interp = 51
else:
n_lower_upper_interp = 19
n_lower = 50
lower_p = 0.9
xsamples_lower_and_upper = np.linspace(X_LOWER, X_UPPER, n_lower_upper_interp)
ysamples_lower_and_upper = np.linspace(Y_LOWER, Y_UPPER, n_lower_upper_interp)
zsamples_lower_and_upper = np.linspace(Z_LOWER, Z_UPPER, n_lower_upper_interp)
xsamples_lower_and_upper = np.concatenate((np.linspace(X_LOWER - (X_UPPER - X_LOWER) * lower_p, X_LOWER, n_lower)[
:-1], xsamples_lower_and_upper,
np.linspace(X_UPPER, X_UPPER + (X_UPPER - X_LOWER) * lower_p, n_lower)[
1:]))
ysamples_lower_and_upper = np.concatenate((np.linspace(Y_LOWER - (Y_UPPER - Y_LOWER) * lower_p, Y_LOWER, n_lower)[
:-1], ysamples_lower_and_upper,
np.linspace(Y_UPPER, Y_UPPER + (Y_UPPER - Y_LOWER) * lower_p, n_lower)[
1:]))
zsamples_lower_and_upper = np.concatenate((np.linspace(Z_LOWER - (Z_UPPER - Z_LOWER) * lower_p, Z_LOWER, n_lower)[
:-1], zsamples_lower_and_upper,
np.linspace(Z_UPPER, Z_UPPER + (Z_UPPER - Z_LOWER) * lower_p, n_lower)[
1:]))
index_ysamples_lower_upper = np.where(x_in[index_y_in] == ysamples_lower_and_upper)[0].item()
# extrapolation to save
f_extrapolation_output = np.zeros((len(xsamples_extrapolation), ))
for i in range(len(xsamples_extrapolation)):
f_extrapolation_output[i] = interpolator3D(
xsamples_extrapolation[i], ysamples_extrapolation[i], zsamples_extrapolation[i]
)
if verbose_options[1]:
print('Output of extrapolation to be saved:\n', repr(f_extrapolation_output))
check_plot = True
if check_plot:
import matplotlib.pyplot as plt
from matplotlib import cm
# Install mayavi and pyQt5
main_plots_on = True
if main_plots_on:
fig, ax = plt.subplots(1, 4)
fig1, ax1 = plt.subplots(1, 2)
if not (x_in[index_x_in] == xsamples).any():
raise ValueError(
f'To compare a slice, NB_XSAMPLES={NB_XSAMPLES}-1, NB_YSAMPLES={NB_YSAMPLES}-1, NB_ZSAMPLES='
f'{NB_ZSAMPLES}-1 must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1, NB_Z={NB_Z}-1'
)
if not (y_in[index_y_in] == ysamples_lower_and_upper).any():
raise ValueError(
f'To compare a slice, NB_XSAMPLES={NB_XSAMPLES}-1, NB_YSAMPLES={NB_YSAMPLES}-1, NB_ZSAMPLES='
f'{NB_ZSAMPLES}-1 must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1, NB_Z={NB_Z}-1'
)
index_xsamples = np.where(x_in[index_x_in] == xsamples)[0].item()
index_ysamples_lower_upper = np.where(y_in[index_y_in] == ysamples_lower_and_upper)[0].item()
# index_ysamples_lower_upper = 0
# index_zsamples_lower_upper = 0
index_zsamples_lower_upper = np.where(z_in[index_z_in] == zsamples_lower_and_upper)[0].item()
f_plot_x = f_in[index_x_in, :, :]
y_corners_x = pcolourmesh_corners(y_in)
z_corners_x = pcolourmesh_corners(z_in)
min_colourmap = np.min(f_in)
max_colourmap = np.max(f_in)
if log_scale:
c_norm = SymLogNorm(vmin=min_colourmap, vmax=max_colourmap, linthresh=0.03)
else:
c_norm = Normalize(vmin=min_colourmap, vmax=max_colourmap)
colourmap = cm.get_cmap('viridis', 512)
ax[0].pcolormesh(y_corners_x, z_corners_x, f_plot_x, norm=c_norm, cmap='viridis')
# ax[0].pcolormesh(y_in, z_in, f_plot_x)
ax[0].set_aspect('equal')
f_out = np.zeros((len(xsamples), len(ysamples), len(zsamples)))
for i in range(len(xsamples)):
for j in range(len(ysamples)):
for k in range(len(zsamples)):
f_out[i, j, k] = interpolator3D(xsamples[i], ysamples[j], zsamples[k])
if verbose_options[2]:
print('Test interpolation:\n', repr(f_out))
f_out_lower_and_upper = np.zeros((len(xsamples_lower_and_upper), len(ysamples_lower_and_upper),
len(zsamples_lower_and_upper)))
for i in range(len(xsamples_lower_and_upper)):
for j in range(len(ysamples_lower_and_upper)):
for k in range(len(zsamples_lower_and_upper)):
f_out_lower_and_upper[i, j, k] = interpolator3D(
xsamples_lower_and_upper[i], ysamples_lower_and_upper[j], zsamples_lower_and_upper[k]
)
f_out_extrapolation = np.zeros((len(xsamples_extrapolation), ))
for i in range(len(xsamples_extrapolation)):
f_out_extrapolation[i] = interpolator3D(
xsamples_extrapolation[i], ysamples_extrapolation[i], zsamples_extrapolation[i]
)
if verbose_options[3]:
print('New output of extrapolation to be saved:\n', repr(f_out_extrapolation))
index_xsamples_extrap = np.where(x_in[index_x_in] == xsamples_extrapolation)
f_out_x_extrapolation = f_out_extrapolation[index_xsamples_extrap]
im = ax[3].scatter(
ysamples_extrapolation[index_xsamples_extrap], zsamples_extrapolation[index_xsamples_extrap],
c=f_out_x_extrapolation, norm=c_norm, cmap='viridis', s=10
)
ax[3].set_aspect('equal')
f_out_x = f_out[index_xsamples, :, :]
ysamples_mesh, zsamples_mesh = np.meshgrid(ysamples, zsamples)
ax[0].scatter(
ysamples_mesh.ravel(), zsamples_mesh.ravel(), c=f_out_x.ravel(), norm=c_norm, cmap='viridis', s=10
)
index_y_print = -1
index_z_print = 0
index_ysamples_print = np.where(y_in[index_y_print] == ysamples)[0].item()
index_zsamples_print = np.where(z_in[index_z_print] == zsamples)[0].item()
ax[0].set_title('Slice of x', size=20)
ax[1].set_title(f'Interpolated points \nin slice of x={x_in[index_x_in]}', size=20)
y_corners_xsamples = pcolourmesh_corners(ysamples)
z_corners_xsamples = pcolourmesh_corners(zsamples)
im2 = ax[1].pcolormesh(y_corners_xsamples, z_corners_xsamples, f_out_x, norm=c_norm, cmap='viridis')
ax[1].set_aspect('equal')
if not (x_in[index_x_in] == xsamples_lower_and_upper).any():
raise ValueError(
f'To compare a slice, n_lower_upper={n_lower}-1, must be divisible by NB_X={NB_X}-1, NB_Y={NB_Y}-1,'
f' NB_Z={NB_Z}-1'
)
index_xsamples_lower_and_upper = np.where(x_in[index_x_in] == xsamples_lower_and_upper)[0].item()
y_corners_xsamples_lower_and_upper = pcolourmesh_corners(ysamples_lower_and_upper)
z_corners_xsamples_lower_and_upper = pcolourmesh_corners(zsamples_lower_and_upper)
f_out_lower_and_upper_x = f_out_lower_and_upper[index_xsamples_lower_and_upper, :, :]
im3 = ax[2].pcolormesh(
y_corners_xsamples_lower_and_upper, z_corners_xsamples_lower_and_upper, f_out_lower_and_upper_x,
norm=c_norm, cmap='viridis'
)
check_array_z = np.zeros(len(zsamples_lower_and_upper))
check_array_y = np.zeros(len(ysamples_lower_and_upper))
for i in range(len(zsamples_lower_and_upper)):
check_array_z[i] = interpolator3D(
x_in[index_x_in], ysamples_lower_and_upper[index_ysamples_lower_upper], zsamples_lower_and_upper[i]
)
check_array_y[i] = interpolator3D(
x_in[index_x_in], ysamples_lower_and_upper[i], zsamples_lower_and_upper[index_zsamples_lower_upper]
)
ax1[0].plot(zsamples_lower_and_upper, f_out_lower_and_upper_x[index_ysamples_lower_upper, :])
ax1[0].plot(z_in, f_in[index_x_in, index_y_in, :], 'bo')
ax1[0].plot(zsamples_lower_and_upper, check_array_z, 'gx')
ax1[1].plot(ysamples_lower_and_upper, check_array_y)
# ax1[1].plot(ysamples_lower_and_upper, f_out_lower_and_upper_x[:, index_z_plot])
ax1[0].axvline(z_in[0], color='r', linestyle='--')
ax1[0].axvline(z_in[-1], color='r', linestyle='--')
ax1[1].axvline(y_in[0], color='r', linestyle='--')
ax1[1].axvline(y_in[-1], color='r', linestyle='--')
fig.colorbar(im, ax=ax[0])
fig.colorbar(im2, ax=ax[1])
fig.colorbar(im3, ax=ax[2])
ax[2].set_aspect('equal')
plt.show()
| 49.767857
| 120
| 0.65703
| 2,693
| 19,509
| 4.383587
| 0.126996
| 0.033884
| 0.055061
| 0.030241
| 0.532317
| 0.439983
| 0.34028
| 0.252181
| 0.218806
| 0.158153
| 0
| 0.020592
| 0.248244
| 19,509
| 391
| 121
| 49.895141
| 0.784331
| 0.144087
| 0
| 0.118705
| 0
| 0.010791
| 0.047544
| 0.008326
| 0
| 0
| 0
| 0
| 0
| 1
| 0.014388
| false
| 0.003597
| 0.032374
| 0.003597
| 0.057554
| 0.035971
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a42eafd975ea0137426e4612231c34ec1b242ab
| 4,041
|
py
|
Python
|
examples/benchmarking/benchmark_bm25.py
|
shibing624/similarities
|
f573ae158b0e2a908c1ef549784bd88e23cbd9c6
|
[
"Apache-2.0"
] | 16
|
2022-02-23T11:46:18.000Z
|
2022-03-29T07:35:33.000Z
|
examples/benchmarking/benchmark_bm25.py
|
shibing624/similarities
|
f573ae158b0e2a908c1ef549784bd88e23cbd9c6
|
[
"Apache-2.0"
] | 1
|
2022-03-15T13:51:36.000Z
|
2022-03-16T02:56:15.000Z
|
examples/benchmarking/benchmark_bm25.py
|
shibing624/similarities
|
f573ae158b0e2a908c1ef549784bd88e23cbd9c6
|
[
"Apache-2.0"
] | 3
|
2022-02-24T02:06:05.000Z
|
2022-03-13T11:31:16.000Z
|
# -*- coding: utf-8 -*-
"""
@author:XuMing(xuming624@qq.com)
@description:
"""
import datetime
import os
import pathlib
import random
import sys
from loguru import logger
sys.path.append('../..')
from similarities import BM25Similarity
from similarities.utils import http_get
from similarities.data_loader import SearchDataLoader
from similarities.evaluation import evaluate
random.seed(42)
pwd_path = os.path.dirname(os.path.realpath(__file__))
def get_scifact():
# Download scifact.zip dataset and unzip the dataset
dataset = "scifact"
url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset)
zip_file = os.path.join(pwd_path, "scifact.zip")
if not os.path.exists(zip_file):
logger.info("Dataset not exists, downloading...")
http_get(url, zip_file, extract=True)
else:
logger.info("Dataset already exists, skipping download.")
data_path = os.path.join(pwd_path, dataset)
return data_path
def get_dbpedia():
dataset = "dbpedia-entity"
url = "https://public.ukp.informatik.tu-darmstadt.de/thakur/BEIR/datasets/{}.zip".format(dataset)
zip_file = os.path.join(pwd_path, "dbpedia-entity.zip")
if not os.path.exists(zip_file):
logger.info("Dataset not exists, downloading...")
http_get(url, zip_file, extract=True)
else:
logger.info("Dataset already exists, skipping download.")
data_path = os.path.join(pwd_path, dataset)
return data_path
data_path = get_scifact()
#### Loading test queries and corpus in DBPedia
corpus, queries, qrels = SearchDataLoader(data_path).load(split="test")
corpus_ids, query_ids = list(corpus), list(queries)
logger.info(f"corpus: {len(corpus)}, queries: {len(queries)}")
#### Randomly sample 1M pairs from Original Corpus (4.63M pairs)
#### First include all relevant documents (i.e. present in qrels)
corpus_set = set()
for query_id in qrels:
corpus_set.update(list(qrels[query_id].keys()))
corpus_new = {corpus_id: corpus[corpus_id] for corpus_id in corpus_set}
#### Remove already seen k relevant documents and sample (1M - k) docs randomly
remaining_corpus = list(set(corpus_ids) - corpus_set)
sample = min(1000000 - len(corpus_set), len(remaining_corpus))
# sample = 10
for corpus_id in random.sample(remaining_corpus, sample):
corpus_new[corpus_id] = corpus[corpus_id]
corpus_docs = {corpus_id: corpus_new[corpus_id]['title'] + corpus_new[corpus_id]['text'] for corpus_id, corpus in
corpus_new.items()}
#### Index 1M passages into the index (seperately)
model = BM25Similarity(corpus_docs)
#### Saving benchmark times
time_taken_all = {}
for query_id in query_ids:
query = {query_id: queries[query_id]}
#### Measure time to retrieve top-10 BM25 documents using single query latency
start = datetime.datetime.now()
q_res = model.most_similar(query, topn=10)
end = datetime.datetime.now()
# print(q_res)
#### Measuring time taken in ms (milliseconds)
time_taken = (end - start)
time_taken = time_taken.total_seconds() * 1000
time_taken_all[query_id] = time_taken
# logger.info("query: {}: {} {:.2f}ms".format(query_id, query, time_taken))
# logger.info("\tsearch result: {}".format(results[:2]))
time_taken = list(time_taken_all.values())
logger.info("Average time taken: {:.2f}ms".format(sum(time_taken) / len(time_taken_all)))
#### Saving benchmark times with batch
# queries = [queries[query_id] for query_id in query_ids]
start = datetime.datetime.now()
results = model.most_similar(queries, topn=10)
end = datetime.datetime.now()
#### Measuring time taken in ms (milliseconds)
time_taken = (end - start)
time_taken = time_taken.total_seconds() * 1000
logger.info("All, Spend {:.2f}ms".format(time_taken))
logger.info("Average time taken: {:.2f}ms".format(time_taken / len(queries)))
logger.info(f"Results size: {len(results)}")
#### Evaluate your retrieval using NDCG@k, MAP@K ...
ndcg, _map, recall, precision = evaluate(qrels, results)
logger.info(f"MAP: {_map}")
| 35.761062
| 113
| 0.717644
| 577
| 4,041
| 4.861352
| 0.287695
| 0.064171
| 0.024955
| 0.018538
| 0.356506
| 0.346524
| 0.312299
| 0.290196
| 0.264528
| 0.264528
| 0
| 0.013322
| 0.145509
| 4,041
| 112
| 114
| 36.080357
| 0.799015
| 0.21752
| 0
| 0.338028
| 0
| 0.028169
| 0.169459
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028169
| false
| 0
| 0.140845
| 0
| 0.197183
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a43a63b067e2c9d49aadc213c2c322feea2bc14
| 14,531
|
py
|
Python
|
tb/test_arp_64.py
|
sergachev/verilog-ethernet
|
cef6b47bb3b969120cabce3b89b0c98bb47ca6a9
|
[
"MIT"
] | 2
|
2020-01-09T05:58:04.000Z
|
2022-01-04T03:29:00.000Z
|
tb/test_arp_64.py
|
zslwyuan/verilog-ethernet
|
cd6b87e984ff7cbeaf11f9468124019f5e654bdb
|
[
"MIT"
] | null | null | null |
tb/test_arp_64.py
|
zslwyuan/verilog-ethernet
|
cd6b87e984ff7cbeaf11f9468124019f5e654bdb
|
[
"MIT"
] | 1
|
2021-09-25T05:45:18.000Z
|
2021-09-25T05:45:18.000Z
|
#!/usr/bin/env python
"""
Copyright (c) 2014-2018 Alex Forencich
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in
all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
"""
from myhdl import *
import os
import axis_ep
import eth_ep
import arp_ep
module = 'arp_64'
testbench = 'test_%s' % module
srcs = []
srcs.append("../rtl/%s.v" % module)
srcs.append("../rtl/lfsr.v")
srcs.append("../rtl/arp_cache.v")
srcs.append("../rtl/arp_eth_rx_64.v")
srcs.append("../rtl/arp_eth_tx_64.v")
srcs.append("%s.v" % testbench)
src = ' '.join(srcs)
build_cmd = "iverilog -o %s.vvp %s" % (testbench, src)
def bench():
# Inputs
clk = Signal(bool(0))
rst = Signal(bool(0))
current_test = Signal(intbv(0)[8:])
s_eth_hdr_valid = Signal(bool(0))
s_eth_dest_mac = Signal(intbv(0)[48:])
s_eth_src_mac = Signal(intbv(0)[48:])
s_eth_type = Signal(intbv(0)[16:])
s_eth_payload_axis_tdata = Signal(intbv(0)[64:])
s_eth_payload_axis_tkeep = Signal(intbv(0)[8:])
s_eth_payload_axis_tvalid = Signal(bool(0))
s_eth_payload_axis_tlast = Signal(bool(0))
s_eth_payload_axis_tuser = Signal(bool(0))
m_eth_payload_axis_tready = Signal(bool(0))
m_eth_hdr_ready = Signal(bool(0))
arp_request_valid = Signal(bool(0))
arp_request_ip = Signal(intbv(0)[32:])
arp_response_ready = Signal(bool(0))
local_mac = Signal(intbv(0)[48:])
local_ip = Signal(intbv(0)[32:])
gateway_ip = Signal(intbv(0)[32:])
subnet_mask = Signal(intbv(0)[32:])
clear_cache = Signal(bool(0))
# Outputs
s_eth_hdr_ready = Signal(bool(0))
s_eth_payload_axis_tready = Signal(bool(0))
m_eth_hdr_valid = Signal(bool(0))
m_eth_dest_mac = Signal(intbv(0)[48:])
m_eth_src_mac = Signal(intbv(0)[48:])
m_eth_type = Signal(intbv(0)[16:])
m_eth_payload_axis_tdata = Signal(intbv(0)[64:])
m_eth_payload_axis_tkeep = Signal(intbv(0)[8:])
m_eth_payload_axis_tvalid = Signal(bool(0))
m_eth_payload_axis_tlast = Signal(bool(0))
m_eth_payload_axis_tuser = Signal(bool(0))
arp_request_ready = Signal(bool(0))
arp_response_valid = Signal(bool(0))
arp_response_error = Signal(bool(0))
arp_response_mac = Signal(intbv(0)[48:])
# sources and sinks
eth_source_pause = Signal(bool(0))
eth_sink_pause = Signal(bool(0))
eth_source = eth_ep.EthFrameSource()
eth_source_logic = eth_source.create_logic(
clk,
rst,
eth_hdr_ready=s_eth_hdr_ready,
eth_hdr_valid=s_eth_hdr_valid,
eth_dest_mac=s_eth_dest_mac,
eth_src_mac=s_eth_src_mac,
eth_type=s_eth_type,
eth_payload_tdata=s_eth_payload_axis_tdata,
eth_payload_tkeep=s_eth_payload_axis_tkeep,
eth_payload_tvalid=s_eth_payload_axis_tvalid,
eth_payload_tready=s_eth_payload_axis_tready,
eth_payload_tlast=s_eth_payload_axis_tlast,
eth_payload_tuser=s_eth_payload_axis_tuser,
pause=eth_source_pause,
name='eth_source'
)
eth_sink = eth_ep.EthFrameSink()
eth_sink_logic = eth_sink.create_logic(
clk,
rst,
eth_hdr_ready=m_eth_hdr_ready,
eth_hdr_valid=m_eth_hdr_valid,
eth_dest_mac=m_eth_dest_mac,
eth_src_mac=m_eth_src_mac,
eth_type=m_eth_type,
eth_payload_tdata=m_eth_payload_axis_tdata,
eth_payload_tkeep=m_eth_payload_axis_tkeep,
eth_payload_tvalid=m_eth_payload_axis_tvalid,
eth_payload_tready=m_eth_payload_axis_tready,
eth_payload_tlast=m_eth_payload_axis_tlast,
eth_payload_tuser=m_eth_payload_axis_tuser,
pause=eth_sink_pause,
name='eth_sink'
)
arp_request_source = axis_ep.AXIStreamSource()
arp_request_source_logic = arp_request_source.create_logic(
clk,
rst,
tdata=(arp_request_ip,),
tvalid=arp_request_valid,
tready=arp_request_ready,
name='arp_request_source'
)
arp_response_sink = axis_ep.AXIStreamSink()
arp_response_sink_logic = arp_response_sink.create_logic(
clk,
rst,
tdata=(arp_response_error, arp_response_mac),
tvalid=arp_response_valid,
tready=arp_response_ready,
name='arp_response_sink'
)
# DUT
if os.system(build_cmd):
raise Exception("Error running build command")
dut = Cosimulation(
"vvp -m myhdl %s.vvp -lxt2" % testbench,
clk=clk,
rst=rst,
current_test=current_test,
s_eth_hdr_valid=s_eth_hdr_valid,
s_eth_hdr_ready=s_eth_hdr_ready,
s_eth_dest_mac=s_eth_dest_mac,
s_eth_src_mac=s_eth_src_mac,
s_eth_type=s_eth_type,
s_eth_payload_axis_tdata=s_eth_payload_axis_tdata,
s_eth_payload_axis_tkeep=s_eth_payload_axis_tkeep,
s_eth_payload_axis_tvalid=s_eth_payload_axis_tvalid,
s_eth_payload_axis_tready=s_eth_payload_axis_tready,
s_eth_payload_axis_tlast=s_eth_payload_axis_tlast,
s_eth_payload_axis_tuser=s_eth_payload_axis_tuser,
m_eth_hdr_valid=m_eth_hdr_valid,
m_eth_hdr_ready=m_eth_hdr_ready,
m_eth_dest_mac=m_eth_dest_mac,
m_eth_src_mac=m_eth_src_mac,
m_eth_type=m_eth_type,
m_eth_payload_axis_tdata=m_eth_payload_axis_tdata,
m_eth_payload_axis_tkeep=m_eth_payload_axis_tkeep,
m_eth_payload_axis_tvalid=m_eth_payload_axis_tvalid,
m_eth_payload_axis_tready=m_eth_payload_axis_tready,
m_eth_payload_axis_tlast=m_eth_payload_axis_tlast,
m_eth_payload_axis_tuser=m_eth_payload_axis_tuser,
arp_request_valid=arp_request_valid,
arp_request_ready=arp_request_ready,
arp_request_ip=arp_request_ip,
arp_response_valid=arp_response_valid,
arp_response_ready=arp_response_ready,
arp_response_error=arp_response_error,
arp_response_mac=arp_response_mac,
local_mac=local_mac,
local_ip=local_ip,
gateway_ip=gateway_ip,
subnet_mask=subnet_mask,
clear_cache=clear_cache
)
@always(delay(4))
def clkgen():
clk.next = not clk
@instance
def check():
yield delay(100)
yield clk.posedge
rst.next = 1
yield clk.posedge
rst.next = 0
yield clk.posedge
yield delay(100)
yield clk.posedge
yield clk.posedge
local_mac.next = 0xDAD1D2D3D4D5
local_ip.next = 0xc0a80165
gateway_ip.next = 0xc0a80101
subnet_mask.next = 0xFFFFFF00
yield clk.posedge
print("test 1: ARP request")
current_test.next = 1
test_frame = arp_ep.ARPFrame()
test_frame.eth_dest_mac = 0xFFFFFFFFFFFF
test_frame.eth_src_mac = 0x5A5152535455
test_frame.eth_type = 0x0806
test_frame.arp_htype = 0x0001
test_frame.arp_ptype = 0x0800
test_frame.arp_hlen = 6
test_frame.arp_plen = 4
test_frame.arp_oper = 1
test_frame.arp_sha = 0x5A5152535455
test_frame.arp_spa = 0xc0a80164
test_frame.arp_tha = 0x000000000000
test_frame.arp_tpa = 0xc0a80165
eth_source.send(test_frame.build_eth())
yield eth_sink.wait()
rx_frame = eth_sink.recv()
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame.eth_dest_mac == 0x5A5152535455
assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 2
assert check_frame.arp_sha == 0xDAD1D2D3D4D5
assert check_frame.arp_spa == 0xc0a80165
assert check_frame.arp_tha == 0x5A5152535455
assert check_frame.arp_tpa == 0xc0a80164
yield delay(100)
yield clk.posedge
print("test 2: Cached read")
current_test.next = 2
arp_request_source.send([(0xc0a80164,)])
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0x5A5152535455
yield delay(100)
yield clk.posedge
print("test 3: Unached read")
current_test.next = 3
arp_request_source.send([(0xc0a80166,)])
# wait for ARP request packet
yield eth_sink.wait()
rx_frame = eth_sink.recv()
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF
assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 1
assert check_frame.arp_sha == 0xDAD1D2D3D4D5
assert check_frame.arp_spa == 0xc0a80165
assert check_frame.arp_tha == 0x000000000000
assert check_frame.arp_tpa == 0xc0a80166
# generate response
test_frame = arp_ep.ARPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0x6A6162636465
test_frame.eth_type = 0x0806
test_frame.arp_htype = 0x0001
test_frame.arp_ptype = 0x0800
test_frame.arp_hlen = 6
test_frame.arp_plen = 4
test_frame.arp_oper = 2
test_frame.arp_sha = 0x6A6162636465
test_frame.arp_spa = 0xc0a80166
test_frame.arp_tha = 0xDAD1D2D3D4D5
test_frame.arp_tpa = 0xc0a80165
eth_source.send(test_frame.build_eth())
# wait for lookup
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0x6A6162636465
yield delay(100)
yield clk.posedge
print("test 4: Unached read, outside of subnet")
current_test.next = 4
arp_request_source.send([(0x08080808,)])
# wait for ARP request packet
yield eth_sink.wait()
rx_frame = eth_sink.recv()
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF
assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 1
assert check_frame.arp_sha == 0xDAD1D2D3D4D5
assert check_frame.arp_spa == 0xc0a80165
assert check_frame.arp_tha == 0x000000000000
assert check_frame.arp_tpa == 0xc0a80101
# generate response
test_frame = arp_ep.ARPFrame()
test_frame.eth_dest_mac = 0xDAD1D2D3D4D5
test_frame.eth_src_mac = 0xAABBCCDDEEFF
test_frame.eth_type = 0x0806
test_frame.arp_htype = 0x0001
test_frame.arp_ptype = 0x0800
test_frame.arp_hlen = 6
test_frame.arp_plen = 4
test_frame.arp_oper = 2
test_frame.arp_sha = 0xAABBCCDDEEFF
test_frame.arp_spa = 0xc0a80101
test_frame.arp_tha = 0xDAD1D2D3D4D5
test_frame.arp_tpa = 0xc0a80165
eth_source.send(test_frame.build_eth())
# wait for lookup
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0xAABBCCDDEEFF
yield delay(100)
yield clk.posedge
print("test 5: Unached read, timeout")
current_test.next = 5
arp_request_source.send([(0xc0a80167,)])
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert err
# check for 4 ARP requests
assert eth_sink.count() == 4
while not eth_sink.empty():
rx_frame = eth_sink.recv()
check_frame = arp_ep.ARPFrame()
check_frame.parse_eth(rx_frame)
assert check_frame.eth_dest_mac == 0xFFFFFFFFFFFF
assert check_frame.eth_src_mac == 0xDAD1D2D3D4D5
assert check_frame.eth_type == 0x0806
assert check_frame.arp_htype == 0x0001
assert check_frame.arp_ptype == 0x0800
assert check_frame.arp_hlen == 6
assert check_frame.arp_plen == 4
assert check_frame.arp_oper == 1
assert check_frame.arp_sha == 0xDAD1D2D3D4D5
assert check_frame.arp_spa == 0xc0a80165
assert check_frame.arp_tha == 0x000000000000
assert check_frame.arp_tpa == 0xc0a80167
yield delay(100)
yield clk.posedge
print("test 6: Broadcast")
current_test.next = 6
# subnet broadcast
arp_request_source.send([(0xc0a801ff,)])
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0xffffffffffff
# general broadcast
arp_request_source.send([(0xffffffff,)])
yield arp_response_sink.wait()
err, mac = arp_response_sink.recv().data[0]
assert not err
assert mac == 0xffffffffffff
yield delay(100)
raise StopSimulation
return instances()
def test_bench():
sim = Simulation(bench())
sim.run()
if __name__ == '__main__':
print("Running test...")
test_bench()
| 31.727074
| 77
| 0.671874
| 2,026
| 14,531
| 4.441757
| 0.127838
| 0.062229
| 0.074675
| 0.076008
| 0.675853
| 0.600845
| 0.551172
| 0.435271
| 0.400267
| 0.351595
| 0
| 0.058942
| 0.24809
| 14,531
| 457
| 78
| 31.796499
| 0.76469
| 0.08967
| 0
| 0.390533
| 0
| 0
| 0.029991
| 0.003332
| 0
| 0
| 0.063011
| 0
| 0.177515
| 1
| 0.011834
| false
| 0
| 0.014793
| 0
| 0.029586
| 0.02071
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a4404fe1d92ad81158f3995d99e25353d3c8492
| 4,315
|
py
|
Python
|
NitroGenerator.py
|
ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version
|
65c6e6e18e640afb4fc433394a9e646c7fe4f4fa
|
[
"MIT"
] | 2
|
2021-07-27T06:57:36.000Z
|
2021-08-16T04:17:41.000Z
|
NitroGenerator.py
|
ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version
|
65c6e6e18e640afb4fc433394a9e646c7fe4f4fa
|
[
"MIT"
] | null | null | null |
NitroGenerator.py
|
ATRS7391/Discord_Nitro_Generator_And_Checker_Python_Version
|
65c6e6e18e640afb4fc433394a9e646c7fe4f4fa
|
[
"MIT"
] | 1
|
2021-11-06T05:32:40.000Z
|
2021-11-06T05:32:40.000Z
|
import random
import sys
import subprocess
def pip_install(module: str):
subprocess.run([sys.executable, "-m", "pip", "-q", "--disable-pip-version-check", "install", module])
try:
import requests
except:
print("'requests' module not found! Trying to install... ")
pip_install("requests")
import requests
def print_header():
header = """
+-------------------------+
| Discord Nitro Generator |
+-------------------------+
Note: For Educational Purposes Only
© ATRS 2021. All Rights Reserved.
"""
print(header)
def get_code(nitro_type: str):
characters = ['a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't',
'u',
'v', 'w', 'x', 'y', 'z', '1', '2', '3', '4', '5', '6', '7', '8', '9', '0', 'A', 'B', 'C', 'D', 'E',
'F',
'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z']
if nitro_type == "Boost":
return str("".join([random.choice(characters) for char in range(24)]))
elif nitro_type == "Classic":
return str("".join([random.choice(characters) for char in range(16)]))
def check_code(nitro_code: str):
try:
headers = {
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/74.0.3729.169 Safari/537.36',
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,*/*;q=0.8',
'Accept-Charset': 'ISO-8859-1,utf-8;q=0.7,*;q=0.3',
'Accept-Encoding': 'none',
'Accept-Language': 'en-US,en;q=0.8',
'Connection': 'keep-alive'}
check_url = f"https://discordapp.com/api/v6/entitlements/gift-codes/{nitro_code}?with_application=false&with_subscription_plan=true"
status = requests.get(url=check_url, headers=headers).status_code
if status == 200:
return "True"
elif status == 429:
return "None"
else:
return "False"
except:
print("Something went wrong while checking urls. Press any key to exit. ")
input()
quit()
def get_nitro_type():
print("Enter what type of Discord Nitro you want to generate: \n\t1. Boost\n\t2. Classic")
user_response = input("> ")
if user_response.replace(" ", "").strip().lower() == "boost" or user_response.replace(" ",
"").strip().lower() == "1":
return "Boost"
elif user_response.replace(" ", "").strip().lower() == "classic" or user_response.replace(" ",
"").strip().lower() == "2":
return "Classic"
else:
print("Not a valid input. Press any key to exit. ")
input()
quit()
print_header()
user_nitro_type = get_nitro_type()
print("Enter the number of Nitro Codes you want: ")
amount = int(input("> "))
valid_codes = 0
invalid_codes = 0
unchecked_codes = 0
print()
print()
f = open("All_Nitro_Codes.txt", "w", encoding='utf-8')
for i in range(amount):
user_nitro_code = get_code(nitro_type=user_nitro_type)
validity = check_code(nitro_code=user_nitro_code)
if validity == "True":
display = f"Valid. | https://discord.com/gifts/{user_nitro_code}"
valid_codes += 1
print(display)
f.writelines(display + "\n")
elif validity == "False":
display = f"Invalid. | https://discord.com/gifts/{user_nitro_code}"
invalid_codes += 1
print(display)
f.writelines(display + "\n")
elif validity == "None":
display = f"Unchecked. Rate limited. | https://discord.com/gifts/{user_nitro_code}"
unchecked_codes += 1
print(display)
f.writelines(display + "\n")
print("\n\nSuccessfully generated Nitro Codes. ")
print("Valid Nitro Codes: " + str(valid_codes))
print("Invalid Nitro Codes: " + str(invalid_codes))
print("Unchecked Nitro Codes: " + str(unchecked_codes))
print("\nEnter any key to exit.")
input()
quit()
| 36.567797
| 145
| 0.526999
| 528
| 4,315
| 4.204545
| 0.356061
| 0.032432
| 0.029279
| 0.043243
| 0.27973
| 0.233784
| 0.196396
| 0.128378
| 0.111712
| 0.111712
| 0
| 0.026445
| 0.290151
| 4,315
| 117
| 146
| 36.880342
| 0.698008
| 0
| 0
| 0.242424
| 0
| 0.050505
| 0.339924
| 0.041448
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050505
| false
| 0
| 0.050505
| 0
| 0.171717
| 0.181818
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a48326e1bcc0c4ce67dffee3193eed37eb8dfe4
| 2,881
|
py
|
Python
|
bbc1/core/command.py
|
ks91/bbc1-pub
|
6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0
|
[
"Apache-2.0"
] | 89
|
2017-10-31T05:38:30.000Z
|
2021-11-06T11:53:19.000Z
|
bbc1/core/command.py
|
ks91/bbc1-pub
|
6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0
|
[
"Apache-2.0"
] | 74
|
2017-11-07T13:06:33.000Z
|
2021-05-06T14:26:19.000Z
|
bbc1/core/command.py
|
ks91/bbc1-pub
|
6b9c33c6c8aec7d410ba9b704eeeb8c3772012d0
|
[
"Apache-2.0"
] | 56
|
2017-11-04T13:54:56.000Z
|
2021-06-18T18:05:46.000Z
|
# -*- coding: utf-8 -*-
"""
Copyright (c) 2017 beyond-blockchain.org.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
from argparse import ArgumentParser
import sys
sys.path.extend(["../../"])
from bbc1.core.bbc_config import DEFAULT_CORE_PORT, DEFAULT_P2P_PORT
DEFAULT_SERV_ADDR = '127.0.0.1'
def parser():
usage = 'python {} [--coreport <number>] [--p2pport <number>] [--workingdir <dir>] ' \
'[--config <filename>] [--default_config <filename>] [--nodekey] [--no_nodekey] [--domain0] ' \
'[--ledgersubsystem] [--ip4addr <IP addr>] [--ip6addr <IPv6 addr>] ' \
'[--log <filename>] [--verbose_level <string>] [--daemon] [--kill] [--help]'.format(__file__)
argparser = ArgumentParser(usage=usage)
argparser.add_argument('-cp', '--coreport', type=int, default=DEFAULT_CORE_PORT, help='waiting TCP port')
argparser.add_argument('-pp', '--p2pport', type=int, default=DEFAULT_P2P_PORT, help='waiting TCP port')
argparser.add_argument('-w', '--workingdir', type=str, default=".bbc1", help='working directory name')
argparser.add_argument('-c', '--config', type=str, default=None, help='config file name')
argparser.add_argument('--default_config', type=str, default=None, help='default config file')
argparser.add_argument('--nodekey', action='store_true', help='use node_key for admin command')
argparser.add_argument('--no_nodekey', action='store_true', help='don\'t use node_key for admin command')
argparser.add_argument('--domain0', action='store_true', help='connect to domain_global_0')
argparser.add_argument('--ledgersubsystem', action='store_true', help='use ledger_subsystem')
argparser.add_argument('--ip4addr', type=str, default=None, help='IPv4 address exposed to the external network')
argparser.add_argument('--ip6addr', type=str, default=None, help='IPv6 address exposed to the external network')
argparser.add_argument('-l', '--log', type=str, default="-", help='log filename/"-" means STDOUT')
argparser.add_argument('-d', '--daemon', action='store_true', help='run in background')
argparser.add_argument('-k', '--kill', action='store_true', help='kill the daemon')
argparser.add_argument('-v', '--verbose_level', type=str, default="debug",
help='log level all/debug/info/warning/error/critical/none')
args = argparser.parse_args()
return args
| 57.62
| 116
| 0.701493
| 382
| 2,881
| 5.164921
| 0.424084
| 0.091232
| 0.152053
| 0.05778
| 0.23112
| 0.171313
| 0.14293
| 0.14293
| 0.100355
| 0
| 0
| 0.012525
| 0.140923
| 2,881
| 49
| 117
| 58.795918
| 0.784646
| 0.204443
| 0
| 0
| 0
| 0
| 0.408494
| 0.018389
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.103448
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a4861b9f42f405c3f1bc83a1f33fe81d2ee9835
| 33,928
|
py
|
Python
|
main.py
|
cmcquinn/cmake-uvision-syncer
|
26f34b79b3102a326ced2b0bca2524a98b69abf4
|
[
"MIT"
] | null | null | null |
main.py
|
cmcquinn/cmake-uvision-syncer
|
26f34b79b3102a326ced2b0bca2524a98b69abf4
|
[
"MIT"
] | null | null | null |
main.py
|
cmcquinn/cmake-uvision-syncer
|
26f34b79b3102a326ced2b0bca2524a98b69abf4
|
[
"MIT"
] | 1
|
2022-03-31T13:47:50.000Z
|
2022-03-31T13:47:50.000Z
|
"""
Usage:
main.py [<project>]
Options:
<project> Path to the .uvprojx file (Keil® µVision5 Project File).
The .uvoptx file (Keil® µVision5 Project Options file) will
be located automatically as it shall be adjacent to the
.uvprojx file, having the same filename.
If this is a directory, .uvprojx is found automatically (if
multiple found then the latest changed is chosen).
If not provided then the current working directory is chosen
as a project directory.
"""
import enum
import operator
import os
import warnings
from collections import defaultdict
from dataclasses import dataclass
from os import DirEntry
from pathlib import Path
from typing import List, Optional, Union, Iterable, Collection, Set, Tuple, Callable, Dict, Iterator
from docopt import docopt
from lxml import etree
__author__ = "Bojan Potočnik"
UnknownInt = int
UnknownBool = bool
@enum.unique
class Language(enum.Enum):
ASM = "Assembler"
C = "C"
CPP = "C++"
@enum.unique
class FileType(enum.Enum):
C_SOURCE = 1
"""C Source file"""
ASM_SOURCE = 2
"""Assembly language file"""
OBJECT = 3
"""Object file"""
LIBRARY = 4
"""Library file"""
TEXT_DOCUMENT = 5
"""Text Document file"""
CUSTOM = 7
"""Custom file"""
CPP_SOURCE = 8
"""C++ Source file"""
IMAGE = 9
"""Image file"""
# region XML data structures for Project File
@dataclass
class Target:
@dataclass
class Toolset:
number: int
name: str
@dataclass
class Compiler:
cc: str
ac6: bool
@dataclass
class Options:
@dataclass
class Common:
device: str
vendor: str
pack_id: str
pack_url: str
cpu: str
device_id: int
register_file: str
@dataclass
class Properties:
use_cpp_compiler: bool
common: Common
properties: Properties
@dataclass
class Build:
@dataclass
class Misc:
@dataclass
class Memory:
@enum.unique
class Type(enum.Enum):
"""TODO: Real meaning unknown."""
TYPE0 = 0
TYPE1 = 1
name: str
type: Type
start: int
size: int
cpu_type: str
memories: List[Memory]
@dataclass
class C:
optimization: int
strict: bool
c99: bool
gnu: bool
misc: List[str]
defines: List[str]
undefines: List[str]
include_paths: List[str]
@dataclass
class Asm:
misc: List[str]
defines: List[str]
undefines: List[str]
include_paths: List[str]
@dataclass
class Linker:
text_address_range: int
data_address_range: int
misc: List[str]
misc: Misc
c: C
asm: Asm
ld: Linker
@dataclass
class File:
name: str
type: FileType
path: str
include_in_build: bool
"""Whether this file is included in the build or ignored."""
always_build: bool
"""Whether to always build this file."""
@dataclass
class Group:
name: str
files: List['Target.File']
name: str
toolset: Toolset
compiler: Compiler
options: Options
build: Build
groups: List[Group]
@dataclass
class RTE:
@dataclass
class TargetInfo:
@enum.unique
class VersionMatchMode(enum.Enum):
FIXED = "fixed"
name: str
version_match_mode: Optional[VersionMatchMode]
@dataclass
class Package:
name: str
url: str
vendor: str
version: str
target_infos: List['RTE.TargetInfo']
@dataclass
class Component:
class_: str
group: str
vendor: str
version: str
condition: str
package: 'RTE.Package'
target_infos: List['RTE.TargetInfo']
@dataclass
class File:
@enum.unique
class Attribute(enum.Enum):
CONFIG = "config"
@enum.unique
class Category(enum.Enum):
SOURCE = "source"
attr: Attribute
category: Category
condition: Optional[str]
name: str
version: str
instance: str
component: 'RTE.Component'
package: 'RTE.Package'
target_infos: List['RTE.TargetInfo']
packages: List[Package]
components: List[Component]
files: List[File]
# endregion XML data structures for Project File
# region XML data structures for Project Options file
@dataclass
class File:
group_number: int
"""Number of the :cls:`Group` this file belongs to."""
number: int
"""Number of the file (global across all groups)."""
type: FileType
"""File type as selected in the Options for File ... -> Properties dialog"""
expanded: bool
"""Whether the file is expanded (include file dependencies shown) in the Project Window file browser."""
include_in_build: bool
"""Whether this file is included in the build or ignored."""
always_build: bool
"""Whether to always build this file."""
tv_exp_opt_dlg: UnknownBool
dave2: UnknownBool
path: str
filename: str
rte_flag: bool
"""Whether this file is part of/managed by the Keil MDK Run-Time Environment (RTE) and therefore read-only."""
shared: UnknownBool
_project_file: Target.File = None
"""Reference to the instance of this file from the Project File."""
@dataclass
class Group:
name: str
"""Group name as shown in the Project Window file browser."""
expanded: bool
"""Whether the group is expanded (files shown) in the Project Window file browser."""
tv_exp_opt_dlg: UnknownBool
cb_sel: UnknownBool
rte_flag: bool
"""Whether this group is part of/managed by the Keil MDK Run-Time Environment (RTE) and therefore read-only."""
files: List[File]
"""List of files in this group."""
_project_group: Target.Group = None
"""Reference to the instance of this group from the Project File."""
# endregion XML data structures for Project Options file
# region XML parsing helper functions
def text(element: etree.ElementBase, name: str, is_attribute: bool = False, nullable: bool = False) -> Optional[str]:
if is_attribute:
if nullable:
return element.attrib.get(name)
else:
return element.attrib[name]
value = element.xpath(name)
if (not value) and nullable:
return None
if len(value) != 1:
raise ValueError(f"Only one '{name}' tag per tree is supported, {len(value)} found")
return value[0].text
def strict_bool(element: etree.ElementBase, name: str, nullable: bool = False, *,
false_value: str = "0", true_value: str = "1") -> Optional[bool]:
value = text(element, name, nullable=nullable)
if value == false_value:
return False
if value == true_value:
return True
if (value is None) and nullable:
return None
raise ValueError(f"'{value}' (of {name}) is not valid boolean value")
def strict_hex(element: etree.ElementBase, name: str) -> int:
value = text(element, name)
if not value.startswith("0x"):
raise ValueError(f"'{value}' (of {name}) is not valid hexadecimal value")
return int(value, 16)
# endregion XML parsing helper functions
@dataclass
class UVisionProject:
project_file_path: str
project_options_path: str
# region Project File
targets: List[Target]
# endregion Project File
# region Project Options
groups: List[Group]
"""Groups of files, as shown in the Project Window file browser."""
# endregion Project Options
@classmethod
def new(cls, project_file_path: str) -> 'UVisionProject':
fp_base = os.path.splitext(project_file_path)[0]
project_file_path = fp_base + ".uvprojx"
project_options_path = fp_base + ".uvoptx"
with open(project_file_path) as f:
# noinspection PyProtectedMember
xproj: etree._Element = etree.parse(f).getroot()
with open(project_options_path) as f:
# noinspection PyProtectedMember
xopt: etree._Element = etree.parse(f).getroot()
# region Project File
if xproj.tag != "Project":
raise ValueError("Invalid uVision Project File XML file")
# noinspection PyCallByClass,SpellCheckingInspection
targets = [
Target(
name=text(target, "TargetName"),
toolset=Target.Toolset(
number=strict_hex(target, "ToolsetNumber"),
name=text(target, "ToolsetName")
),
compiler=Target.Compiler(
cc=text(target, "pCCUsed", nullable=True),
ac6=strict_bool(target, "uAC6")
),
options=next(
# There is always only one package, but using generator is clean and
# effective way of creating an inline local variable.
Target.Options(
common=next(
Target.Options.Common(
device=text(tco, "Device"),
vendor=text(tco, "Vendor"),
pack_id=text(tco, "PackID"),
pack_url=text(tco, "PackURL"),
cpu=text(tco, "Cpu"),
device_id=text(tco, "DeviceId"),
register_file=text(tco, "RegisterFile")
) for tco in to.xpath("TargetCommonOption")
),
properties=next(
Target.Options.Properties(
use_cpp_compiler=strict_bool(tcp, "UseCPPCompiler"),
) for tcp in to.xpath("CommonProperty")
)
) for to in target.xpath("TargetOption")
),
build=next(
Target.Build(
misc=Target.Build.Misc(
cpu_type=text(to_taa, "ArmAdsMisc/AdsCpuType"),
memories=[
Target.Build.Misc.Memory(
name=memory.tag,
type=Target.Build.Misc.Memory.Type(int(text(memory, "Type"))),
start=strict_hex(memory, "StartAddress"),
size=strict_hex(memory, "Size")
) for memory in to_taa.xpath("ArmAdsMisc/OnChipMemories/*")
]
),
c=next(
Target.Build.C(
optimization=int(text(to_taa_c, "Optim")),
strict=strict_bool(to_taa_c, "Strict"),
c99=strict_bool(to_taa_c, "uC99"),
gnu=strict_bool(to_taa_c, "uGnu"),
misc=[
mc.strip() for mc in text(to_taa_c, "VariousControls/MiscControls").split(",")
],
defines=[
mc.strip() for mc in text(to_taa_c, "VariousControls/Define").split(" ")
],
undefines=[
mc.strip() for mc in (text(to_taa_c, "VariousControls/Undefine") or "").split(" ")
],
include_paths=[
mc.strip() for mc in text(to_taa_c, "VariousControls/IncludePath").split(";")
]
) for to_taa_c in to_taa.xpath("Cads")
),
asm=next(
Target.Build.Asm(
misc=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/MiscControls") or "").split(",")
],
defines=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/Define") or "").split(" ")
],
undefines=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/Undefine") or "").split(" ")
],
include_paths=[
mc.strip() for mc in (text(to_taa_a, "VariousControls/IncludePath") or "").split(";")
]
) for to_taa_a in to_taa.xpath("Aads")
),
ld=next(
Target.Build.Linker(
text_address_range=strict_hex(to_taa_ld, "TextAddressRange"),
data_address_range=strict_hex(to_taa_ld, "DataAddressRange"),
misc=[
mc.strip() for mc in
text(to_taa_ld, "Misc").split(",") # TODO: Delimiter unknown
]
) for to_taa_ld in to_taa.xpath("LDads")
)
) for to_taa in target.xpath("TargetOption/TargetArmAds")
),
groups=[
Target.Group(
name=text(group, "GroupName"),
files=[
Target.File(
name=text(file, "FileName"),
type=FileType(int(text(file, "FileType"))),
path=text(file, "FilePath"),
include_in_build=strict_bool(file, "FileOption/CommonProperty/IncludeInBuild",
nullable=True),
always_build=strict_bool(file, "FileOption/CommonProperty/AlwaysBuild",
nullable=True, true_value="2")
) for file in group.xpath("Files/File")
]
) for group in target.xpath("Groups/Group")
]
) for target in xproj.xpath("Targets/Target")
]
# region RTE
# noinspection PyCallByClass,PyTypeChecker
rte = RTE(
packages=[
RTE.Package(
name=text(package, "name", True),
url=text(package, "url", True),
vendor=text(package, "vendor", True),
version=text(package, "version", True),
target_infos=[
RTE.TargetInfo(
name=text(ti, "name", True),
# Using generator and list only for local variable
version_match_mode=next(RTE.TargetInfo.VersionMatchMode(vmm) if vmm else None
for vmm in [text(ti, "versionMatchMode", True, True)])
) for ti in package.xpath("targetInfos/targetInfo")
]
) for package in xproj.xpath("RTE/packages/package")
],
components=[
RTE.Component(
class_=text(component, "Cclass", True),
group=text(component, "Cgroup", True),
vendor=text(component, "Cvendor", True),
version=text(component, "Cversion", True),
condition=text(component, "condition", True),
package=next(
# There is always only one package, but using generator is clean and
# effective way of creating an inline local variable.
# This new instance of package will be replaced below with reference to an actual matching
# instance of the package from rte.packages.
RTE.Package(
name=text(package, "name", True),
url=text(package, "url", True),
vendor=text(package, "vendor", True),
version=text(package, "version", True),
target_infos=None
) for package in component.xpath("package")
),
target_infos=[
RTE.TargetInfo(
name=text(ti, "name", True),
# TODO: Handle nullable
# RTE.TargetInfo.VersionMatchMode(text(ti, "versionMatchMode", True, True))
version_match_mode=None
) for ti in component.xpath("targetInfos/targetInfo")
]
) for component in xproj.xpath("RTE/components/component")
],
files=[
RTE.File(
attr=RTE.File.Attribute(text(file, "attr", True)),
category=RTE.File.Category(text(file, "category", True)),
condition=text(file, "condition", True, True),
name=text(file, "name", True),
version=text(file, "version", True),
instance=text(file, "instance"),
component=next(
RTE.Component(
class_=text(component, "Cclass", True),
group=text(component, "Cgroup", True),
vendor=text(component, "Cvendor", True),
version=text(component, "Cversion", True),
condition=text(component, "condition", True),
package=None,
target_infos=None
) for component in file.xpath("component")
),
package=None, # TODO
target_infos=None, # TODO
) for file in xproj.xpath("RTE/files/file")
]
)
# TODO: Connect actual references of the rte.packages and rte.packages.target_infos
for component in rte.components:
cp = component.package
component.package = None
cp.target_infos = None
for package in rte.packages:
# Temporally remove target_infos to enable usage of equality operator.
pti = package.target_infos
package.target_infos = None
if cp == package:
component.package = package
package.target_infos = pti
break
package.target_infos = pti
# endregion RTE
# endregion Project File
# region Project Options
if xopt.tag != "ProjectOpt":
raise ValueError("Invalid uVision Project Options XML file")
groups: List[Group] = []
for group in xopt.xpath("Group"):
group_name = text(group, "GroupName")
# Find this group in the Project File
xproj_group = next(g for g in next(iter(targets)).groups if (g.name == group_name))
# Find all files in this group and also in the Project File
files: List[File] = []
for file in group.xpath("File"):
file_type = FileType(int(text(file, "FileType")))
file_name = text(file, "FilenameWithoutPath")
xproj_file = next(f for f in xproj_group.files if (f.type == file_type and f.name == file_name))
files.append(File(
group_number=int(text(file, "GroupNumber")),
number=int(text(file, "FileNumber")),
type=file_type,
expanded=strict_bool(file, "tvExp"),
include_in_build=xproj_file.include_in_build,
always_build=xproj_file.always_build,
tv_exp_opt_dlg=strict_bool(file, "tvExpOptDlg"),
dave2=strict_bool(file, "bDave2"),
path=text(file, "PathWithFileName"),
filename=file_name,
rte_flag=strict_bool(file, "RteFlg"),
shared=strict_bool(file, "bShared")
))
groups.append(Group(
name=group_name,
expanded=strict_bool(group, "tvExp"),
tv_exp_opt_dlg=strict_bool(group, "tvExpOptDlg"),
cb_sel=strict_bool(group, "cbSel"),
rte_flag=strict_bool(group, "RteFlg"),
files=files
))
# There is no more *currently relevant* data in the Project Options file.
# endregion Project Options
# Add RTE files to the file groups to actually match the Project Window file browser.
for file in rte.files:
# Find the group to which this file belongs to (there shall be one and only one).
group = None
group_number = 1
for group_number, group in enumerate(groups, 1):
if group.files and group.files[0].group_number != group_number:
warnings.warn(f"Inconsistent group number {group.files[0].group_number} for group {group.name}"
f" (expected to be {group_number})")
if group.rte_flag and group.name.strip(":") == file.component.class_:
break
filename = os.path.basename(file.instance)
# Detect file type (this information is not provided for RTE files)
if filename.endswith(".s"):
file_type = FileType.ASM_SOURCE
elif filename.endswith(".c"):
file_type = FileType.C_SOURCE
elif filename.endswith(".cpp"):
file_type = FileType.CPP_SOURCE
elif filename.endswith(".h"):
file_type = FileType.TEXT_DOCUMENT
else:
warnings.warn(f"Unknown RTE file type '{file.instance}': {file}")
continue
group.files.append(File(
group_number=group_number,
number=max(f.number for g in groups for f in g.files) + 1,
type=file_type,
expanded=False,
include_in_build=True, # TODO: This information is available for RTE files
always_build=None,
tv_exp_opt_dlg=False, # TODO
dave2=False, # TODO
path=file.instance,
filename=os.path.basename(file.instance),
rte_flag=True,
shared=False
))
return cls(
project_file_path=project_file_path,
project_options_path=project_options_path,
targets=targets,
groups=groups
)
def source_files(self) -> Iterator[Tuple[File, Optional[Language], Optional[str]]]:
"""
Get all files grouped by the file type with group names as a comments.
"""
# Add source files
for group in self.groups:
comment = group.name
if group.rte_flag:
# RTE groups start with double colon (::).
comment = "RTE" + comment
# Group files by type and add one comment for every file type as they are in the separate sections.
files: Dict[Union[Language, None], List[File]] = defaultdict(list)
for file in group.files:
if file.type == FileType.ASM_SOURCE:
lang = Language.ASM
elif file.type == FileType.C_SOURCE:
lang = Language.C
elif file.type == FileType.TEXT_DOCUMENT:
lang = None
else:
warnings.warn(f"Unsupported file type: {file.type} for {file}")
continue
files[lang].append(file)
for lang, files in files.items():
comment_per_type = comment
for file in files:
yield file, lang, comment_per_type
comment_per_type = None
class CMake:
@dataclass
class String:
value: str
"""The actual string value."""
languages: Set[Language]
"""Set of all build configs in which this value is present."""
common: bool = False
comment: Optional[str] = None
"""Comment which will be added to the line before"""
def __eq__(self, o: 'CMake.String') -> bool:
if isinstance(o, type(self)):
return self.value == o.value
elif isinstance(o, str):
return self.value == o
return NotImplemented
def __init__(self) -> None:
self.include_paths: List[CMake.String] = []
self.defines: List[CMake.String] = []
self.undefines: List[CMake.String] = []
self.source_file_paths: List[CMake.String] = []
self.other_file_paths: List[CMake.String] = []
@classmethod
def _get(cls, lst: List[String], obj: str) -> String:
"""Get existing object from the list or append a new one to the end."""
try:
# noinspection PyTypeChecker
itm = lst[lst.index(obj)]
except ValueError:
# noinspection PyCallByClass
itm = cls.String(obj, set())
lst.append(itm)
return itm
@classmethod
def _add_values(cls, where: List[String], values: Union[str, Iterable[str]],
languages: Union[Language, Collection[Language], None], comment: Optional[str] = None) -> None:
if isinstance(languages, Language):
languages = [languages]
for val in values:
obj = cls._get(where, val)
if comment is not None:
# Add comment to the first value only
obj.comment = comment
comment = None
if languages:
obj.languages.update(languages)
@staticmethod
def _clean_paths(paths: Union[str, Iterable[str]]) -> List[str]:
if isinstance(paths, (str, Path)):
paths = [paths]
return [Path(p).as_posix() for p in map(os.path.normpath, paths)]
def add_include_paths(self, paths: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.include_paths, self._clean_paths(paths), languages, comment)
def add_defines(self, defines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.defines, defines, languages, comment)
def add_undefines(self, undefines: Union[str, Iterable[str]], languages: Union[Language, Collection[Language]],
comment: str = None) -> None:
self._add_values(self.undefines, undefines, languages, comment)
def add_source_files(self, paths: Union[None, str, Iterable[str]],
languages: Union[Language, Collection[Language], None],
comment: str = None, include_in_build: bool = True) -> None:
paths = self._clean_paths(paths)
# If file is not included in the build, comment it
if include_in_build is False:
paths = ["# " + path for path in paths]
self._add_values(self.source_file_paths if languages else self.other_file_paths, paths, languages, comment)
def add_other_files(self, paths: Union[str, Iterable[str]], comment: str = None) -> None:
self.add_source_files(paths, None, comment)
def check_common(self) -> Set[Language]:
"""
Check which properties are common to all language configurations.
:return: Set of all used languages (languages with at least one property)
"""
all_props = (self.include_paths, self.defines, self.undefines, self.source_file_paths)
# Get all of the defined languages used
languages = {lang
for props in all_props
for prop in props
for lang in prop.languages}
for props in all_props:
for prop in props:
prop.common = (prop.languages == languages)
return languages
def __str__(self) -> str:
languages = sorted(self.check_common(), key=operator.attrgetter('value'))
ret_str = [
"# Made with CMake <> uVision project file synchronizer"
"# https://github.com/bojanpotocnik/cmake-uvision-syncer"
]
# Set of the build properties
prop_sets: List[Tuple[str, str, List[CMake.String], str]] = [
("definitions", "DEFINES", self.defines, "-D"),
("un-defines", "UNDEFINES", self.undefines, ""),
("include directories", "INCLUDE_DIRS", self.include_paths, ""),
("source files", "SOURCES", self.source_file_paths, ""),
]
# Set of the language configs per build property
sub_prop_sets: List[Tuple[str, str, Callable[[CMake.String], bool]]] = [
("Common", "COMMON", lambda prop: prop.common),
*((lang.value + " specific", lang.name,
lambda prop, lang_=lang: (not prop.common) and (lang_ in prop.languages))
for lang in languages)
]
def _add_section_files(comment: str, var_name: str, value_iterator: Iterable[CMake.String],
value_prefix: str = "") -> str:
s = (f"# {comment}\n"
f"set({var_name}")
value_str = ''
for value in value_iterator:
if value.comment is not None:
value_str += f"\n\t# {value.comment}"
value_str += f"\n\t{value_prefix}{value.value}"
if len(value_str) is not 0:
return s + value_str + "\n)"
else:
return None
for section_comment, section_var_prefix, section_props, val_prefix in prop_sets:
ss_str = []
for prop_set_comment, var_suffix, filter_fun in sub_prop_sets:
section_files = _add_section_files(
comment=f"{prop_set_comment} {section_comment}",
var_name=f"{section_var_prefix}_{var_suffix}",
value_iterator=filter(filter_fun, section_props),
value_prefix=val_prefix
)
if section_files is not None:
ss_str.append(section_files)
ret_str.append("\n\n".join(ss_str))
other_files = _add_section_files(
comment="Other files",
var_name="OTHER_FILES",
value_iterator=self.other_file_paths
)
if other_files is not None:
ret_str.append(other_files)
return "\n\n\n".join(ret_str)
def main() -> None:
# region Parse arguments
arguments = docopt(__doc__)
project_path: str = arguments["<project>"] or "."
if not os.path.isfile(project_path):
with os.scandir(project_path) as dirs: # type: Iterator[DirEntry]
projects = [de.path for de in dirs if (de.is_file() and (os.path.splitext(de.name)[1] == ".uvprojx"))]
if not projects:
raise FileNotFoundError(f"Could not find any .uvprojx file in '{project_path}'")
elif len(projects) > 1:
# Choose the latest file by modification time.
project_path = max(projects, key=os.path.getmtime)
else:
project_path = projects[0]
project_path = os.path.realpath(project_path)
# endregion Parse arguments
print(f"Using µVision5 Project File '{project_path}'")
# Parse uVision project XML files
uvp = UVisionProject.new(project_path)
# Generate CMake file and populate it with information from uVision project
cmake = CMake()
# Add Assembler properties
cmake.add_include_paths(uvp.targets[0].build.asm.include_paths, Language.ASM)
cmake.add_defines(uvp.targets[0].build.asm.defines, Language.ASM)
cmake.add_undefines(uvp.targets[0].build.asm.undefines, Language.ASM)
# Add C properties
cmake.add_include_paths(uvp.targets[0].build.c.include_paths, Language.C)
cmake.add_defines(uvp.targets[0].build.c.defines, Language.C)
cmake.add_undefines(uvp.targets[0].build.c.undefines, Language.C)
# Add source and other files
for file, lang, comment in uvp.source_files():
cmake.add_source_files(file.path, lang, comment, file.include_in_build)
fp_proj_cmake = os.path.join(os.path.dirname(uvp.project_file_path),
os.path.splitext(os.path.basename(uvp.project_file_path))[0] + ".cmake")
with open(fp_proj_cmake, 'w') as f:
print(cmake, file=f)
print(f"Generated CMake file '{fp_proj_cmake}'")
if __name__ == "__main__":
main()
| 38.207207
| 122
| 0.528855
| 3,563
| 33,928
| 4.91524
| 0.13556
| 0.015075
| 0.005653
| 0.006167
| 0.289499
| 0.222235
| 0.181979
| 0.150688
| 0.141438
| 0.121852
| 0
| 0.002513
| 0.378272
| 33,928
| 887
| 123
| 38.250282
| 0.827629
| 0.099593
| 0
| 0.255287
| 0
| 0
| 0.080453
| 0.017676
| 0
| 0
| 0
| 0.001127
| 0
| 1
| 0.028701
| false
| 0
| 0.016616
| 0
| 0.18429
| 0.004532
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a4b8d448257463b5f6347e3da0f24a94bac2394
| 10,816
|
py
|
Python
|
mpunet/bin/cv_split.py
|
alexsosn/MultiPlanarUNet
|
2d1cecdee391be8e9f72da95e33077ed82a2183a
|
[
"MIT"
] | null | null | null |
mpunet/bin/cv_split.py
|
alexsosn/MultiPlanarUNet
|
2d1cecdee391be8e9f72da95e33077ed82a2183a
|
[
"MIT"
] | null | null | null |
mpunet/bin/cv_split.py
|
alexsosn/MultiPlanarUNet
|
2d1cecdee391be8e9f72da95e33077ed82a2183a
|
[
"MIT"
] | 1
|
2020-10-07T12:44:47.000Z
|
2020-10-07T12:44:47.000Z
|
from glob import glob
import sys
import os
import numpy as np
import random
from mpunet.utils import create_folders
import argparse
def get_parser():
parser = argparse.ArgumentParser(description="Prepare a data folder for a"
"CV experiment setup.")
parser.add_argument("--data_dir", type=str,
help="Path to data directory")
parser.add_argument("--CV", type=int, default=5,
help="Number of splits (default=5)")
parser.add_argument("--out_dir", type=str, default="views",
help="Directory to store CV subfolders "
"(default=views")
parser.add_argument("--im_sub_dir", type=str, default="images",
help="Subfolder under 'data_dir' in which image are "
"stored (default=images)")
parser.add_argument("--lab_sub_dir", type=str, default="labels",
help="Subfolder under 'data_dir' in which labels are "
"stored (default=labels)")
parser.add_argument("--copy", action="store_true",
help="Copy files to CV-subfolders instead of "
"symlinking (not recommended)")
parser.add_argument("--file_list", action="store_true",
help="Create text files with paths pointing to the "
"images at the image and labels subdirs under "
"each split instead of symlink/copying. This is"
" usefull on systems were symlink is not "
"supported, but the dataset size is too large to"
" store in copies. NOTE: Only one of --copy and "
"--file_list flags must be set.")
parser.add_argument("--file_regex", type=str, default="*.nii*",
help="Regex used to select files from the image "
"and labels subdirs. (default='*.nii*')")
parser.add_argument("--validation_fraction", type=float, default=0.20,
help="Fraction of OVERALL data size used for "
"validation in each split. In a 5-CV setting with "
"N=100 and val_frac=0.20, each split will have "
"N_train=60, N_val=20 and N_test=20 images")
parser.add_argument("--test_fraction", type=float, default=0.20,
help="Fraction of data size used for test if CV=1.")
parser.add_argument("--common_prefix_length", type=int, required=False, default=0)
return parser
def assert_dir_structure(data_dir, im_dir, lab_dir, out_dir):
for _dir in (data_dir, im_dir, lab_dir):
if not os.path.exists(_dir):
raise OSError("Invalid data directory '%s'. Does not exist." % data_dir)
if os.path.exists(out_dir):
raise OSError("Output directory at '%s' already exists." % out_dir)
def create_view_folders(out_dir, n_splits):
if not os.path.exists(out_dir):
print("Creating directory at %s" % out_dir)
os.makedirs(out_dir)
if n_splits > 1:
for i in range(n_splits):
split_dir = os.path.join(out_dir, "split_%i" % i)
print("Creating directory at %s" % split_dir)
os.mkdir(split_dir)
def pair_by_names(images, common_prefix_length):
if common_prefix_length == 0:
return images
from collections import defaultdict
names = [os.path.split(i)[-1][:common_prefix_length] for i in images]
inds = defaultdict(list)
for i, item in enumerate(names):
inds[item].append(i)
pairs = inds.values()
return [tuple(np.array(images)[i]) for i in pairs]
def add_images(images, im_folder_path, label_folder_path, im_dir, lab_dir,
link_func=os.symlink):
for image in images:
if not isinstance(image, (list, tuple, np.ndarray)):
image = (image,)
for im in image:
# Get file name
file_name = os.path.split(im)[-1]
# Get label path (OBS: filenames must match!)
lab = im.replace(im_dir, lab_dir)
if not os.path.exists(lab):
raise OSError("No label file found at '%s'. OBS: image and "
"label files must have exactly the same name. "
"Images should be located at '%s' and labels at"
" '%s'" % (lab, im_folder_path, label_folder_path))
# Get relative paths
rel_image = os.path.relpath(im, im_folder_path)
rel_label = os.path.relpath(lab, label_folder_path)
# Symlink or copy
link_func(rel_image, im_folder_path + "/%s" % file_name)
link_func(rel_label, label_folder_path + "/%s" % file_name)
def _add_to_file_list_fallback(rel_image_path, image_path,
fname="LIST_OF_FILES.txt"):
"""
On some system synlinks are not supported, if --files_list flag is set,
uses this function to add each absolute file path to a list at the final
subfolder that is supposed to store images and label links or actual files
At run-time, these files must be loaded by reading in the path from these
files instead.
"""
# Get folder where list of files should be stored
folder = os.path.split(image_path)[0]
# Get absolute path to image
# We change dir to get the correct abs path from the relative
os.chdir(folder)
abs_file_path = os.path.abspath(rel_image_path)
# Get path to the list of files
list_file_path = os.path.join(folder, fname)
with open(list_file_path, "a") as out_f:
out_f.write(abs_file_path + "\n")
def entry_func(args=None):
# Get parser
parser = vars(get_parser().parse_args(args))
# Get arguments
data_dir = os.path.abspath(parser["data_dir"])
n_splits = int(parser["CV"])
if n_splits > 1:
out_dir = os.path.join(data_dir, parser["out_dir"], "%i_CV" % n_splits)
else:
out_dir = os.path.join(data_dir, parser["out_dir"], "fixed_split")
im_dir = os.path.join(data_dir, parser["im_sub_dir"])
lab_dir = os.path.join(data_dir, parser["lab_sub_dir"])
copy = parser["copy"]
file_list = parser["file_list"]
regex = parser["file_regex"]
val_frac = parser["validation_fraction"]
test_frac = parser["test_fraction"]
common_prefix_length = parser["common_prefix_length"]
if n_splits == 1 and not test_frac:
raise ValueError("Must specify --test_fraction with --CV=1.")
if copy and file_list:
raise ValueError("Only one of --copy and --file_list "
"flags must be set.")
# Assert suitable folders
assert_dir_structure(data_dir, im_dir, lab_dir, out_dir)
# Create sub-folders
create_view_folders(out_dir, n_splits)
# Get images and pair by subject identifier if common_prefix_length > 0
images = glob(os.path.join(im_dir, regex))
images = pair_by_names(images, common_prefix_length)
print("-----")
print("Found {} images".format(len(images)))
# Get validation size
N_total = len(images)
if n_splits > 1:
N_test = N_total // n_splits
else:
N_test = int(np.ceil(N_total * test_frac))
N_val = int(np.ceil(N_total * val_frac))
if N_val + N_test >= N_total:
raise ValueError("Too large validation_fraction - "
"No training samples left!")
N_train = N_total - N_test - N_val
print("Total images:".ljust(40), N_total)
print("Train images pr. split:".ljust(40), N_train)
print("Validation images pr. split:".ljust(40), N_val)
print("Test images pr. split:".ljust(40), N_test)
# Shuffle and split the images into CV parts
random.shuffle(images)
splits = np.array_split(images, n_splits)
# Symlink / copy files
for i, split in enumerate(splits):
print(" Split %i/%i" % (i+1, n_splits), end="\r", flush=True)
# Set root path to split folder
if n_splits > 1:
split_path = os.path.join(out_dir, "split_%i" % i)
else:
split_path = out_dir
# Here we kind of hacky force the following code to work with CV=1
# Define a test set and overwrite the current split (which stores
# add the data, as splits was never split with n_splits=1
split = splits[0][:N_test]
# Overwrite the splits variable to a length 2 array with the
# remaining data which will be used as val+train. The loop still
# refers to the old split and thus will only execute once
splits = [split, splits[0][N_test:]]
# Define train, val and test sub-dirs
train_path = os.path.join(split_path, "train")
train_im_path = os.path.join(train_path, parser["im_sub_dir"])
train_label_path = os.path.join(train_path, parser["lab_sub_dir"])
if N_val:
val_path = os.path.join(split_path, "val")
val_im_path = os.path.join(val_path, parser["im_sub_dir"])
val_label_path = os.path.join(val_path, parser["lab_sub_dir"])
else:
val_path, val_im_path, val_label_path = (None,) * 3
test_path = os.path.join(split_path, "test")
test_im_path = os.path.join(test_path, parser["im_sub_dir"])
test_label_path = os.path.join(test_path, parser["lab_sub_dir"])
# Create folders if not existing
create_folders([train_path, val_path, train_im_path, train_label_path,
val_im_path, val_label_path, test_path, test_im_path,
test_label_path])
# Copy or symlink?
if copy:
from shutil import copyfile
move_func = copyfile
elif file_list:
move_func = _add_to_file_list_fallback
else:
move_func = os.symlink
# Add test data to test folder
add_images(split, test_im_path, test_label_path, im_dir, lab_dir, move_func)
# Join remaining splits into train+val
remaining = [x for ind, x in enumerate(splits) if ind != i]
remaining = [item for sublist in remaining for item in sublist]
# Extract validation data from the remaining
random.shuffle(remaining)
validation = remaining[:N_val]
training = remaining[N_val:]
# Add
if validation:
add_images(validation, val_im_path, val_label_path, im_dir, lab_dir, move_func)
add_images(training, train_im_path, train_label_path, im_dir, lab_dir, move_func)
if __name__ == "__main__":
entry_func()
| 41.125475
| 91
| 0.606971
| 1,504
| 10,816
| 4.152926
| 0.182846
| 0.026897
| 0.027217
| 0.024656
| 0.270413
| 0.198687
| 0.154019
| 0.079731
| 0.058918
| 0.036824
| 0
| 0.006419
| 0.294194
| 10,816
| 262
| 92
| 41.282443
| 0.811763
| 0.127589
| 0
| 0.05
| 0
| 0
| 0.20608
| 0.004587
| 0
| 0
| 0
| 0
| 0.011111
| 1
| 0.038889
| false
| 0
| 0.05
| 0
| 0.105556
| 0.05
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a4cc74674f055ddea956ccb55ba03b1e2719b21
| 1,964
|
py
|
Python
|
src/client/pydaos/raw/conversion.py
|
gczsjdy/daos
|
abbd900010562f3acea9c6b1dc2ca98a8d3c71fa
|
[
"Apache-2.0"
] | 1
|
2021-12-04T14:57:48.000Z
|
2021-12-04T14:57:48.000Z
|
src/client/pydaos/raw/conversion.py
|
gczsjdy/daos
|
abbd900010562f3acea9c6b1dc2ca98a8d3c71fa
|
[
"Apache-2.0"
] | 52
|
2019-12-04T05:47:10.000Z
|
2020-06-09T03:26:12.000Z
|
src/client/pydaos/raw/conversion.py
|
gczsjdy/daos
|
abbd900010562f3acea9c6b1dc2ca98a8d3c71fa
|
[
"Apache-2.0"
] | 8
|
2019-12-04T08:26:00.000Z
|
2020-06-09T07:40:11.000Z
|
#!/usr/bin/python
"""
(C) Copyright 2018 Intel Corporation.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
GOVERNMENT LICENSE RIGHTS-OPEN SOURCE SOFTWARE
The Government's rights to use, modify, reproduce, release, perform, display,
or disclose this software are subject to the terms of the Apache License as
provided in Contract No. B609815.
Any reproduction of computer software, computer software documentation, or
portions thereof marked with this legend must also reproduce the markings.
"""
import ctypes
import uuid
def c_uuid_to_str(uuid):
""" utility function to convert a C uuid into a standard string format """
uuid_str = '{:02X}{:02X}{:02X}{:02X}-{:02X}{:02X}-{:02X}{:02X}-{:02X}'\
'{:02X}-{:02X}{:02X}{:02X}{:02X}{:02X}{:02X}'.format(
uuid[0], uuid[1], uuid[2], uuid[3], uuid[4], uuid[5],
uuid[6], uuid[7], uuid[8], uuid[9], uuid[10], uuid[11],
uuid[12], uuid[13], uuid[14], uuid[15])
return uuid_str
def c_uuid(p_uuid, c_uuid):
""" utility function to create a UUID in C format from a python UUID """
hexstr = p_uuid.hex
for i in range(0, 31, 2):
c_uuid[int(i/2)] = int(hexstr[i:i+2], 16)
def str_to_c_uuid(uuidstr):
""" utility function to convert string format uuid to a C uuid """
uuidstr2 = '{' + uuidstr + '}'
puuid = uuid.UUID(uuidstr2)
cuuid = (ctypes.c_ubyte * 16)()
c_uuid(puuid, cuuid)
return cuuid
| 40.081633
| 79
| 0.67057
| 302
| 1,964
| 4.311258
| 0.456954
| 0.069124
| 0.096774
| 0.119816
| 0.036866
| 0.036866
| 0.036866
| 0.036866
| 0.036866
| 0.036866
| 0
| 0.051713
| 0.212322
| 1,964
| 48
| 80
| 40.916667
| 0.789916
| 0.589613
| 0
| 0
| 0
| 0
| 0.136364
| 0.13369
| 0
| 0
| 0
| 0
| 0
| 1
| 0.157895
| false
| 0
| 0.105263
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a4f4bc06c12566c84246f7896cf490e49f35766
| 2,059
|
py
|
Python
|
SPH/sphbwr_example2.py
|
RLReed/unotran
|
b317107e1a39490dda732f86a731872f5207a167
|
[
"MIT"
] | null | null | null |
SPH/sphbwr_example2.py
|
RLReed/unotran
|
b317107e1a39490dda732f86a731872f5207a167
|
[
"MIT"
] | null | null | null |
SPH/sphbwr_example2.py
|
RLReed/unotran
|
b317107e1a39490dda732f86a731872f5207a167
|
[
"MIT"
] | 3
|
2019-12-02T23:01:24.000Z
|
2022-01-26T04:48:41.000Z
|
import numpy as np
import sys
sys.path.append('/homes/rlreed/workspace/unotran/src')
from coarseBounds import computeBounds, Grouping
import pickle
from makeDLPbasis import makeBasis as makeDLP
from makeKLTbasis import makeBasis as makeKLT
import sph
import sph_dgm
import pydgm
def buildGEO(ass_map):
fine_map = [1]
coarse_map = [1.26]
material_map = [[1], [2], [3], [4], [5], [6], [7], [8], [9], [10]]
npins = len(ass_map)
cm = [0.0]
fm = []
mm = []
for i, ass in enumerate(ass_map):
mm += material_map[ass]
cm += coarse_map
fm += fine_map
cm = np.cumsum(cm)
return npins, fm, cm, mm
def makeDGMXS(G, refXS, dgmstructure, basisType):
if 'klt' in basisType:
makeKLT(basisType, dgmstructure)
else:
makeDLP(dgmstructure)
dgmstructure.fname = '{}_{}'.format(basisType, dgmstructure.fname)
fname = '_homo.'.join(xs_name.split('.'))
refXS.write_homogenized_XS(fname)
nPin, fm, cm, mm = buildGEO(pin_map)
dgm = sph_dgm.DGMSOLVER(G, fname, fm, cm, mm, nPin, dgmstructure, solveFlag=False)
pydgm.dgmsolver.initialize_dgmsolver()
dgm.extractInfo()
pydgm.dgmsolver.finalize_dgmsolver()
pydgm.control.finalize_control()
nCellPerPin = dgm.phi.shape[2] // dgm.npin
return sph_dgm.XS(G, nCellPerPin, dgm.sig_t, dgm.vsig_f, dgm.chi, dgm.sig_s)
if __name__ == '__main__':
np.set_printoptions(precision=6)
G = 44
dgmstructure = computeBounds(G, 'full', 1, 0.0, 1.3, 60)
fname = dgmstructure.fname
xs_name = 'XS/{}gXS.anlxs'.format(G)
pin_map = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
data_path = 'data2'
# Get the homogenized cross sections
refXS = pickle.load(open('{}/refXS_sph_space_{}.p'.format(data_path, G), 'rb'))
for basis in ['dlp', 'klt_full', 'klt_combine', 'klt_pins_full']:
dgmstructure.fname = fname
XS = makeDGMXS(G, refXS, dgmstructure, basis)
pickle.dump(XS, open('{}/refXS_dgm_{}_{}_h{}.p'.format(data_path, dgmstructure.fname, 'fine_mu', 0), 'wb'))
| 27.453333
| 115
| 0.644973
| 289
| 2,059
| 4.422145
| 0.408305
| 0.06651
| 0.014085
| 0.00626
| 0.014085
| 0.014085
| 0.014085
| 0.014085
| 0.014085
| 0
| 0
| 0.02448
| 0.206411
| 2,059
| 74
| 116
| 27.824324
| 0.75765
| 0.016513
| 0
| 0
| 0
| 0
| 0.086053
| 0.040554
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037736
| false
| 0
| 0.169811
| 0
| 0.245283
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a4fd2b57960e4af2acbb3603c634154bea6e80b
| 9,280
|
py
|
Python
|
src/oci/management_agent/models/management_agent_aggregation_dimensions.py
|
CentroidChef/oci-python-sdk
|
fa406e27a52b40c70e220c20f52dfe2abe6236a3
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 249
|
2017-09-11T22:06:05.000Z
|
2022-03-04T17:09:29.000Z
|
src/oci/management_agent/models/management_agent_aggregation_dimensions.py
|
CentroidChef/oci-python-sdk
|
fa406e27a52b40c70e220c20f52dfe2abe6236a3
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 228
|
2017-09-11T23:07:26.000Z
|
2022-03-23T10:58:50.000Z
|
src/oci/management_agent/models/management_agent_aggregation_dimensions.py
|
CentroidChef/oci-python-sdk
|
fa406e27a52b40c70e220c20f52dfe2abe6236a3
|
[
"Apache-2.0",
"BSD-3-Clause"
] | 224
|
2017-09-27T07:32:43.000Z
|
2022-03-25T16:55:42.000Z
|
# coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class ManagementAgentAggregationDimensions(object):
"""
The Aggregation of Management Agent Dimensions
"""
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "ACTIVE"
AVAILABILITY_STATUS_ACTIVE = "ACTIVE"
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "SILENT"
AVAILABILITY_STATUS_SILENT = "SILENT"
#: A constant which can be used with the availability_status property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "NOT_AVAILABLE"
AVAILABILITY_STATUS_NOT_AVAILABLE = "NOT_AVAILABLE"
#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "LINUX"
PLATFORM_TYPE_LINUX = "LINUX"
#: A constant which can be used with the platform_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "WINDOWS"
PLATFORM_TYPE_WINDOWS = "WINDOWS"
#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "AGENT"
INSTALL_TYPE_AGENT = "AGENT"
#: A constant which can be used with the install_type property of a ManagementAgentAggregationDimensions.
#: This constant has a value of "GATEWAY"
INSTALL_TYPE_GATEWAY = "GATEWAY"
def __init__(self, **kwargs):
"""
Initializes a new ManagementAgentAggregationDimensions object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param availability_status:
The value to assign to the availability_status property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type availability_status: str
:param platform_type:
The value to assign to the platform_type property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type platform_type: str
:param version:
The value to assign to the version property of this ManagementAgentAggregationDimensions.
:type version: str
:param has_plugins:
The value to assign to the has_plugins property of this ManagementAgentAggregationDimensions.
:type has_plugins: bool
:param install_type:
The value to assign to the install_type property of this ManagementAgentAggregationDimensions.
Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type install_type: str
"""
self.swagger_types = {
'availability_status': 'str',
'platform_type': 'str',
'version': 'str',
'has_plugins': 'bool',
'install_type': 'str'
}
self.attribute_map = {
'availability_status': 'availabilityStatus',
'platform_type': 'platformType',
'version': 'version',
'has_plugins': 'hasPlugins',
'install_type': 'installType'
}
self._availability_status = None
self._platform_type = None
self._version = None
self._has_plugins = None
self._install_type = None
@property
def availability_status(self):
"""
Gets the availability_status of this ManagementAgentAggregationDimensions.
The availability status of managementAgent
Allowed values for this property are: "ACTIVE", "SILENT", "NOT_AVAILABLE", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The availability_status of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._availability_status
@availability_status.setter
def availability_status(self, availability_status):
"""
Sets the availability_status of this ManagementAgentAggregationDimensions.
The availability status of managementAgent
:param availability_status: The availability_status of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["ACTIVE", "SILENT", "NOT_AVAILABLE"]
if not value_allowed_none_or_none_sentinel(availability_status, allowed_values):
availability_status = 'UNKNOWN_ENUM_VALUE'
self._availability_status = availability_status
@property
def platform_type(self):
"""
Gets the platform_type of this ManagementAgentAggregationDimensions.
Platform Type
Allowed values for this property are: "LINUX", "WINDOWS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The platform_type of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._platform_type
@platform_type.setter
def platform_type(self, platform_type):
"""
Sets the platform_type of this ManagementAgentAggregationDimensions.
Platform Type
:param platform_type: The platform_type of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["LINUX", "WINDOWS"]
if not value_allowed_none_or_none_sentinel(platform_type, allowed_values):
platform_type = 'UNKNOWN_ENUM_VALUE'
self._platform_type = platform_type
@property
def version(self):
"""
Gets the version of this ManagementAgentAggregationDimensions.
Agent image version
:return: The version of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._version
@version.setter
def version(self, version):
"""
Sets the version of this ManagementAgentAggregationDimensions.
Agent image version
:param version: The version of this ManagementAgentAggregationDimensions.
:type: str
"""
self._version = version
@property
def has_plugins(self):
"""
Gets the has_plugins of this ManagementAgentAggregationDimensions.
Whether or not a managementAgent has at least one plugin
:return: The has_plugins of this ManagementAgentAggregationDimensions.
:rtype: bool
"""
return self._has_plugins
@has_plugins.setter
def has_plugins(self, has_plugins):
"""
Sets the has_plugins of this ManagementAgentAggregationDimensions.
Whether or not a managementAgent has at least one plugin
:param has_plugins: The has_plugins of this ManagementAgentAggregationDimensions.
:type: bool
"""
self._has_plugins = has_plugins
@property
def install_type(self):
"""
Gets the install_type of this ManagementAgentAggregationDimensions.
The install type, either AGENT or GATEWAY
Allowed values for this property are: "AGENT", "GATEWAY", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The install_type of this ManagementAgentAggregationDimensions.
:rtype: str
"""
return self._install_type
@install_type.setter
def install_type(self, install_type):
"""
Sets the install_type of this ManagementAgentAggregationDimensions.
The install type, either AGENT or GATEWAY
:param install_type: The install_type of this ManagementAgentAggregationDimensions.
:type: str
"""
allowed_values = ["AGENT", "GATEWAY"]
if not value_allowed_none_or_none_sentinel(install_type, allowed_values):
install_type = 'UNKNOWN_ENUM_VALUE'
self._install_type = install_type
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 37.57085
| 245
| 0.691487
| 1,028
| 9,280
| 6.033074
| 0.142023
| 0.081264
| 0.1693
| 0.059336
| 0.652693
| 0.568526
| 0.523702
| 0.486295
| 0.382941
| 0.382941
| 0
| 0.002573
| 0.246228
| 9,280
| 246
| 246
| 37.723577
| 0.88406
| 0.578772
| 0
| 0.064103
| 0
| 0
| 0.11083
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.179487
| false
| 0
| 0.025641
| 0.025641
| 0.423077
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a5059beb09af2b372b1d15c442329a32a505195
| 1,770
|
py
|
Python
|
py_buycoins/sending.py
|
Bashorun97/BuyCoins-Python-SDK
|
5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b
|
[
"MIT"
] | 1
|
2021-02-16T14:26:30.000Z
|
2021-02-16T14:26:30.000Z
|
py_buycoins/sending.py
|
Bashorun97/BuyCoins-Python-SDK
|
5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b
|
[
"MIT"
] | null | null | null |
py_buycoins/sending.py
|
Bashorun97/BuyCoins-Python-SDK
|
5b5e1ca6bfbfb56d30e99a737e431ca35b2e712b
|
[
"MIT"
] | null | null | null |
from .gcore.queries import GetNetworkFee, GetBalance
from .gcore.mutations import SendCoin
from typing import List, Optional
from .exc import SendLimitError, InvalidClientObject
class Send:
def __init__(self, address: str, cryptocurrency: str, amount: float):
self.address = address
self.cryptocurrency = cryptocurrency
self.amount = amount
limits = {
"bitcoin": 1,
"ethereum": 50,
"litecoin": 50,
"nairatoken": 2000000
}
def execute(self, client):
try:
return client.execute(query=self.send())
except AttributeError:
raise InvalidClientObject("<BuyCoinsClient> object expected received {} instead".format(type(client)))
def get_network_fee(self, response_fields):
_price = GetNetworkFee()
return _price.queryObject(
response_fields=response_fields,
cryptocurrency=self.cryptocurrency, amount=self.amount
)
def check_limit(self):
if Send.limits[self.cryptocurrency.lower()] < self.amount:
return False
else:
return True
def send(self, response_fields):
if self.cryptocurrency.lower() in Send.limits.keys():
if self.check_limit(self.amount, self.cryptocurrency):
return SendCoin().Mutate(
cryptocurrency=self.cryptocurrency,
response_fields=response_fields,
amount=self.amount,
address=self.address
)
else:
raise SendLimitError("Maximum daily transaction amount exceeded")
def balance(self, response_fields: List):
return GetBalance.queryObject(response_fields=response_fields)
| 33.396226
| 114
| 0.627684
| 170
| 1,770
| 6.423529
| 0.405882
| 0.115385
| 0.049451
| 0.076923
| 0.071429
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009531
| 0.288701
| 1,770
| 52
| 115
| 34.038462
| 0.857824
| 0
| 0
| 0.090909
| 0
| 0
| 0.071186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.136364
| false
| 0
| 0.090909
| 0.022727
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a51566e6f537d3c7defee7d9f6dd2e1ce52fbb6
| 2,190
|
py
|
Python
|
snippet/example/python/url.py
|
yp2800/snippet
|
054af596655007cbec81340bd166489e706fffe6
|
[
"MIT"
] | 94
|
2016-09-22T09:13:19.000Z
|
2022-03-30T07:35:35.000Z
|
snippet/example/python/url.py
|
yp2800/snippet
|
054af596655007cbec81340bd166489e706fffe6
|
[
"MIT"
] | 1
|
2020-11-22T03:05:05.000Z
|
2020-11-22T03:05:05.000Z
|
snippet/example/python/url.py
|
yp2800/snippet
|
054af596655007cbec81340bd166489e706fffe6
|
[
"MIT"
] | 38
|
2017-06-11T22:03:04.000Z
|
2022-03-10T07:46:39.000Z
|
# -*- coding: utf-8 -*-
try:
from urlparse import urlparse, urlunsplit
except ImportError:
from urllib.parse import urlparse, urlunsplit
class URL(object):
DEFAULT_SCHEME = ["http", "https"]
def __init__(self, url, allowed_scheme=None):
self._url = url
self.url = urlparse(self._url)
self._scheme = allowed_scheme if allowed_scheme else self.DEFAULT_SCHEME
def geturl(self):
scheme = self.scheme if self.scheme else self.url.scheme
netloc = self.netloc if self.netloc else self.url.netloc
url = self.path if self.path else self.url.path
params = self.params if self.params else self.url.params
query = self.query if self.query else self.url.query
fragment = self.fragment if self.fragment else self.url.fragment
if params:
url = "%s;%s" % (url, params)
return urlunsplit((scheme, netloc, url, query, fragment))
def get_full_url(self, base=None):
return self.s_get_full_url(self, base)
@staticmethod
def s_get_full_url(url, base=None):
if not base:
if url.scheme in url._scheme:
return url.geturl()
return None
if not url.scheme:
url.scheme = base.scheme
if url.scheme not in url._scheme:
return None
if not url.netloc:
url.netloc = base.netloc
if len(url.path) == 1 and url.path == '/':
return None
if url.path[0] != '/':
path = base.path.split('/')[:-1]
path.append(url.path)
url.path = '/'.join(path)
return url.geturl()
def __getattr__(self, name):
if name == "path":
path = getattr(self.url, name)
if not path:
return '/'
return path
return getattr(self.url, name)
def __setattr__(self, name, value):
object.__setattr__(self, name, value)
def __repr__(self):
s = "URL(scheme='%s', netloc='%s', path='%s', params='%s', query='%s', fragment='%s')"
p = (self.scheme, self.netloc, self.path, self.params, self.query, self.fragment)
return s % p
| 31.73913
| 94
| 0.581279
| 286
| 2,190
| 4.318182
| 0.185315
| 0.068016
| 0.053441
| 0.022672
| 0.0583
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002609
| 0.3
| 2,190
| 68
| 95
| 32.205882
| 0.803001
| 0.009589
| 0
| 0.092593
| 0
| 0.018519
| 0.047531
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.12963
| false
| 0
| 0.055556
| 0.018519
| 0.425926
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a533004a2f846794254f71446a4268346a94d9f
| 550
|
py
|
Python
|
netvisor_api_client/services/dimension.py
|
tristen-tooming/netvisor-api-client
|
37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69
|
[
"MIT"
] | null | null | null |
netvisor_api_client/services/dimension.py
|
tristen-tooming/netvisor-api-client
|
37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69
|
[
"MIT"
] | null | null | null |
netvisor_api_client/services/dimension.py
|
tristen-tooming/netvisor-api-client
|
37c974dc1e6acf1d0bde7e6298b23ca4d14ffd69
|
[
"MIT"
] | null | null | null |
from .base import Service
from ..requests.dimension import CreateDimensionsRequest, DimensionsListRequest
class DimensionService(Service):
def create(self, data):
request = CreateDimensionsRequest(
self.client,
params={'method': 'add'},
data=data
)
return request.make_request()
def list(self, showhidden=None):
request = DimensionsListRequest(self.client,
params={'showhidden': showhidden})
return request.make_request()
| 28.947368
| 79
| 0.616364
| 46
| 550
| 7.326087
| 0.521739
| 0.059347
| 0.094955
| 0.142433
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.294545
| 550
| 19
| 80
| 28.947368
| 0.868557
| 0
| 0
| 0.142857
| 0
| 0
| 0.034483
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.142857
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a54146d12e005b9045dcbb5b4f63178061f1a78
| 7,338
|
py
|
Python
|
cishouseholds/filter.py
|
ONS-SST/cis_households
|
e475df5929e6763a46cd05aff1f7e960ccbe8e21
|
[
"MIT"
] | null | null | null |
cishouseholds/filter.py
|
ONS-SST/cis_households
|
e475df5929e6763a46cd05aff1f7e960ccbe8e21
|
[
"MIT"
] | 252
|
2021-05-19T11:12:43.000Z
|
2022-03-02T10:39:10.000Z
|
cishouseholds/filter.py
|
ONS-SST/cis_households
|
e475df5929e6763a46cd05aff1f7e960ccbe8e21
|
[
"MIT"
] | null | null | null |
from typing import List
from typing import Union
from pyspark.sql import DataFrame
from pyspark.sql import functions as F
from pyspark.sql.window import Window
def filter_all_not_null(df: DataFrame, reference_columns: List[str]) -> DataFrame:
"""
Filter rows which have NULL values in all the specified columns.
From households_aggregate_processes.xlsx, filter number 2.
Parameters
----------
df
reference_columns
Columns to check for missing values in, all
must be missing for the record to be dropped.
"""
return df.na.drop(how="all", subset=reference_columns)
def filter_duplicates_by_time_and_threshold(
df: DataFrame,
first_reference_column: str,
second_reference_column: str,
third_reference_column: str,
fourth_reference_column: str,
time_threshold: float = 1.5,
float_threshold: float = 0.00001,
) -> DataFrame:
"""
Drop duplicates based on two identitical column values if third and fourth column and not both within
a threshold difference from the first duplicate record.
From households_aggregate_processes.xlsx, filter number 4.
Parameters
----------
df
first_reference_column
First column with duplicate value
second_reference_column
Second column with duplicate value
third_reference_column
Column used for time based threshold difference, timestamp
fourth_reference_column
Column used for numeric based threshold difference, float
"""
window = Window.partitionBy(first_reference_column, second_reference_column).orderBy(third_reference_column)
df = df.withColumn("duplicate_id", F.row_number().over(window))
df = df.withColumn(
"within_time_threshold",
(
F.abs(
F.first(third_reference_column).over(window).cast("long") - F.col(third_reference_column).cast("long")
)
/ (60 * 60)
)
< time_threshold,
)
df = df.withColumn(
"within_float_threshold",
F.abs(F.first(fourth_reference_column).over(window) - F.col(fourth_reference_column)) < float_threshold,
)
df = df.filter((F.col("duplicate_id") == 1) | ~(F.col("within_time_threshold") & (F.col("within_float_threshold"))))
return df.drop("duplicate_id", "within_time_threshold", "within_float_threshold")
def filter_by_cq_diff(
df: DataFrame, comparing_column: str, ordering_column: str, tolerance: float = 0.00001
) -> DataFrame:
"""
This function works out what columns have a float value difference less than 10-^5 or 0.00001
(or any other tolerance value inputed) given all the other columns are the same and
considers it to be the same dropping or deleting the repeated values and only keeping one entry.
Parameters
----------
df
comparing_column
ordering_column
tolerance
"""
column_list = df.columns
column_list.remove(comparing_column)
windowSpec = Window.partitionBy(column_list).orderBy(ordering_column)
df = df.withColumn("first_value_in_duplicates", F.first(comparing_column).over(windowSpec))
df = df.withColumn(
"duplicates_first_record", F.abs(F.col("first_value_in_duplicates") - F.col(comparing_column)) < tolerance
)
difference_window = Window.partitionBy(column_list + ["duplicates_first_record"]).orderBy(ordering_column)
df = df.withColumn("duplicate_number", F.row_number().over(difference_window))
df = df.filter(~(F.col("duplicates_first_record") & (F.col("duplicate_number") != 1)))
df = df.drop("first_value_in_duplicates", "duplicates_first_record", "duplicate_number")
return df
def assign_date_interval_and_flag(
df: DataFrame,
column_name_inside_interval: str,
column_name_time_interval: str,
start_datetime_reference_column: str,
end_datetime_reference_column: str,
lower_interval: Union[int, float],
upper_interval: Union[int, float],
interval_format: str = "hours",
) -> DataFrame:
"""
This function gives the time interval in either hours (by default) or days
in a column by given two date columns and says whether it is inside and
upper and lower interval. If the difference of dates is within the upper and
lower time intervals, the function will output None and an integer 1 if the
difference in dates are outside of those intervals.
Parameters
----------
df
column_name_inside_interval
Name of the column that returns whether the difference in dates are
within the upper/lower limits if within, it will return None, if outside
will return an integer 1.
column_name_time_interval
Name of the column that returns the difference between start and end
date and adds at the end of the column name whether it is in hours or
days
start_datetime_reference_column
Earliest date in string format yyyy-mm-dd hh:mm:ss.
end_datetime_reference_column
Latest date in string format yyyy-mm-dd hh:mm:ss.
lower_interval
Marks how much NEGATIVE time difference can have between
end_datetime_reference_column and start_datetime_reference_column.
Meaning how the end_datetime_reference_column can be earlier than
start_datetime_reference_column
upper_interval
Marks how much POSITIVE time difference can have between
end_datetime_reference_column and start_datetime_reference_column
interval_format
By default will be a string called 'hours' if upper and lower
intervals are input as days, define interval_format to 'days'.
These are the only two possible formats.
Notes
-----
Lower_interval should be a negative value if start_datetime_reference_column
is after end_datetime_reference_column."""
# by default, Hours but if days, apply change factor
if interval_format == "hours": # to convert hours to seconds
conversion_factor = 3600 # 1h has 60s*60min seconds = 3600 seconds
elif interval_format == "days":
conversion_factor = 86400 # 1 day has 60s*60min*24h seconds = 86400 seconds
column_name_time_interval = column_name_time_interval + "_" + interval_format
# FORMULA: (end_datetime_reference_column - start_datetime_reference_column) in
# seconds/conversion_factor in seconds
df = df.withColumn(
column_name_time_interval,
(
F.to_timestamp(F.col(end_datetime_reference_column)).cast("long")
- F.to_timestamp(F.col(start_datetime_reference_column)).cast("long")
)
/ conversion_factor, # 1 day has 60s*60min*24h seconds = 86400 seconds
)
return df.withColumn(
column_name_inside_interval,
F.when(~F.col(column_name_time_interval).between(lower_interval, upper_interval), 1).otherwise(None),
)
def file_exclude(df: DataFrame, source_file_col: str, files_to_exclude: list):
"""
Function to exclude specific files from pipeline processing
Parameters
--------
df
source_file_column = Column in input dataframe which contains the source file
files_to_exclude = List of files to exclude (feed in from config)
"""
for item in files_to_exclude:
df = df.filter(~F.col(source_file_col).isin(item))
return df
| 38.020725
| 120
| 0.710139
| 980
| 7,338
| 5.096939
| 0.217347
| 0.093093
| 0.073674
| 0.044845
| 0.180581
| 0.108509
| 0.094494
| 0.061662
| 0.061662
| 0.046847
| 0
| 0.013169
| 0.213546
| 7,338
| 192
| 121
| 38.21875
| 0.852365
| 0.459526
| 0
| 0.1375
| 0
| 0
| 0.11446
| 0.081836
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.0625
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a544c66c68a458b980a2174bdc25da63354dc6e
| 6,088
|
py
|
Python
|
cscs-checks/cuda/multi_gpu.py
|
hpc-unibe-ch/reframe
|
07f97e25cf4e7319782c37dd1923f7e70a368b99
|
[
"BSD-3-Clause"
] | null | null | null |
cscs-checks/cuda/multi_gpu.py
|
hpc-unibe-ch/reframe
|
07f97e25cf4e7319782c37dd1923f7e70a368b99
|
[
"BSD-3-Clause"
] | null | null | null |
cscs-checks/cuda/multi_gpu.py
|
hpc-unibe-ch/reframe
|
07f97e25cf4e7319782c37dd1923f7e70a368b99
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright 2016-2020 Swiss National Supercomputing Centre (CSCS/ETH Zurich)
# ReFrame Project Developers. See the top-level LICENSE file for details.
#
# SPDX-License-Identifier: BSD-3-Clause
import os
import reframe.utility.sanity as sn
import reframe as rfm
@rfm.required_version('>=2.16-dev0')
@rfm.simple_test
class GpuBandwidthCheck(rfm.RegressionTest):
def __init__(self):
self.valid_systems = ['kesch:cn', 'daint:gpu', 'dom:gpu', 'tiger:gpu',
'arolla:cn', 'tsa:cn']
self.valid_prog_environs = ['PrgEnv-gnu']
if self.current_system.name in ['arolla', 'kesch', 'tsa']:
self.valid_prog_environs = ['PrgEnv-gnu-nompi']
self.exclusive_access = True
self.sourcesdir = os.path.join(
self.current_system.resourcesdir, 'CUDA', 'essentials'
)
self.build_system = 'SingleSource'
# Set nvcc flags
nvidia_sm = '60'
if self.current_system.name == 'kesch':
nvidia_sm = '37'
elif self.current_system.name in ['arolla', 'tsa']:
nvidia_sm = '70'
self.build_system.cxxflags = ['-I.', '-m64', '-arch=sm_%s' % nvidia_sm]
self.sourcepath = 'bandwidthtestflex.cu'
self.executable = 'gpu_bandwidth_check.x'
# Perform a single bandwidth test with a buffer size of 1024MB
self.min_buffer_size = 1073741824
self.max_buffer_size = 1073741824
self.executable_opts = ['device', 'all', '--mode=range',
'--start=%d' % self.min_buffer_size,
'--increment=%d' % self.min_buffer_size,
'--end=%d' % self.max_buffer_size, '--csv']
self.num_tasks = 0
self.num_tasks_per_node = 1
if self.current_system.name in ['daint', 'dom', 'tiger']:
self.modules = ['craype-accel-nvidia60']
self.num_gpus_per_node = 1
elif self.current_system.name == 'kesch':
self.modules = ['cudatoolkit/8.0.61']
self.num_gpus_per_node = 8
elif self.current_system.name in ['arolla', 'tsa']:
self.modules = ['cuda/10.1.243']
self.num_gpus_per_node = 8
# perf_patterns and reference will be set by the sanity check function
self.sanity_patterns = self.do_sanity_check()
self.perf_patterns = {}
self.reference = {}
self.__bwref = {
# FIXME: reference values for Arolla and Tsa need to be updated
# (sanity check fails if they are not defined)
'arolla:cn:h2d': (7583, -0.1, None, 'MB/s'),
'arolla:cn:d2h': (7584, -0.1, None, 'MB/s'),
'arolla:cn:d2d': (137408, -0.1, None, 'MB/s'),
'daint:gpu:h2d': (11881, -0.1, None, 'MB/s'),
'daint:gpu:d2h': (12571, -0.1, None, 'MB/s'),
'daint:gpu:d2d': (499000, -0.1, None, 'MB/s'),
'dom:gpu:h2d': (11881, -0.1, None, 'MB/s'),
'dom:gpu:d2h': (12571, -0.1, None, 'MB/s'),
'dom:gpu:d2d': (499000, -0.1, None, 'MB/s'),
'kesch:cn:h2d': (7583, -0.1, None, 'MB/s'),
'kesch:cn:d2h': (7584, -0.1, None, 'MB/s'),
'kesch:cn:d2d': (137408, -0.1, None, 'MB/s'),
'tiger:gpu:h2d': (0, None, None, 'MB/s'),
'tiger:gpu:d2h': (0, None, None, 'MB/s'),
'tiger:gpu:d2d': (0, None, None, 'MB/s'),
'tsa:cn:h2d': (7583, -0.1, None, 'MB/s'),
'tsa:cn:d2h': (7584, -0.1, None, 'MB/s'),
'tsa:cn:d2d': (137408, -0.1, None, 'MB/s'),
}
self.tags = {'diagnostic', 'benchmark', 'mch',
'craype', 'external-resources'}
self.maintainers = ['AJ', 'SK']
def _xfer_pattern(self, xfer_kind, devno, nodename):
'''generates search pattern for performance analysis'''
if xfer_kind == 'h2d':
first_part = 'bandwidthTest-H2D-Pinned'
elif xfer_kind == 'd2h':
first_part = 'bandwidthTest-D2H-Pinned'
else:
first_part = 'bandwidthTest-D2D'
# Extract the bandwidth corresponding to the maximum buffer size
return (r'^%s[^,]*,\s*%s[^,]*,\s*Bandwidth\s*=\s*(\S+)\s*MB/s([^,]*,)'
r'{2}\s*Size\s*=\s*%d\s*bytes[^,]*,\s*DeviceNo\s*=\s*-1'
r':%s' % (nodename, first_part, self.max_buffer_size, devno))
@sn.sanity_function
def do_sanity_check(self):
failures = []
devices_found = set(sn.extractall(
r'^\s*([^,]*),\s*Detected devices: %s' % self.num_gpus_per_node,
self.stdout, 1
))
sn.evaluate(sn.assert_eq(
self.job.num_tasks, len(devices_found),
msg='requested {0} node(s), got {1} (nodelist: %s)' %
','.join(sorted(devices_found))))
good_nodes = set(sn.extractall(
r'^\s*([^,]*),\s*NID\s*=\s*\S+\s+Result = PASS',
self.stdout, 1
))
sn.evaluate(sn.assert_eq(
devices_found, good_nodes,
msg='check failed on the following node(s): %s' %
','.join(sorted(devices_found - good_nodes)))
)
# Sanity is fine, fill in the perf. patterns based on the exact node id
for nodename in devices_found:
for xfer_kind in ('h2d', 'd2h', 'd2d'):
for devno in range(self.num_gpus_per_node):
perfvar = '%s_gpu_%s_%s_bw' % (nodename, devno, xfer_kind)
perfvar = 'bw_%s_%s_gpu_%s' % (xfer_kind, nodename, devno)
self.perf_patterns[perfvar] = sn.extractsingle(
self._xfer_pattern(xfer_kind, devno, nodename),
self.stdout, 1, float, 0
)
partname = self.current_partition.fullname
refkey = '%s:%s' % (partname, perfvar)
bwkey = '%s:%s' % (partname, xfer_kind)
self.reference[refkey] = self.__bwref[bwkey]
return True
| 42.873239
| 79
| 0.53433
| 764
| 6,088
| 4.113874
| 0.307592
| 0.018136
| 0.040089
| 0.03818
| 0.300986
| 0.24658
| 0.186128
| 0.134903
| 0
| 0
| 0
| 0.048207
| 0.308311
| 6,088
| 141
| 80
| 43.177305
| 0.698171
| 0.102989
| 0
| 0.089286
| 0
| 0.017857
| 0.198531
| 0.048118
| 0
| 0
| 0
| 0.007092
| 0.017857
| 1
| 0.026786
| false
| 0.008929
| 0.026786
| 0
| 0.080357
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a5505f918153846b19b1a912cedc52b11e1b4e9
| 1,552
|
py
|
Python
|
setup.py
|
rgooler/bootstrap-pip
|
34eaa648c81e3f8213b97cd33bda23b50743122a
|
[
"Unlicense"
] | null | null | null |
setup.py
|
rgooler/bootstrap-pip
|
34eaa648c81e3f8213b97cd33bda23b50743122a
|
[
"Unlicense"
] | null | null | null |
setup.py
|
rgooler/bootstrap-pip
|
34eaa648c81e3f8213b97cd33bda23b50743122a
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
import os
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
def read(*paths):
"""Build a file path from *paths* and return the contents."""
with open(os.path.join(*paths), 'r') as f:
return f.read()
install_requires = []
# install_requires = ['requests >= 2.1.0']
# For SNI support in Python 2, must install the following packages
# if sys.version_info[0] == 2:
# install_requires.append('pyOpenSSL >= 0.14')
# install_requires.append('ndg-httpsclient >= 0.3.3')
# install_requires.append('pyasn1 >= 0.1.7')
setup(
name='mymodule',
packages=['mymodule'],
version='0.1',
description='Desc',
long_description=(read('README.rst') + '\n\n' +
read('HISTORY.rst') + '\n\n' +
read('AUTHORS.rst')),
url='http://github.com/rgooler/bootstrap-pip/',
license='MIT',
author='Ryan Gooler',
author_email='ryan.gooler@gmail.com',
py_modules=['mymodule'],
install_requires=install_requires,
include_package_data=True,
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Natural Language :: English',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.7',
'Topic :: Software Development :: Libraries :: Python Modules',
],
)
| 30.431373
| 71
| 0.614046
| 181
| 1,552
| 5.19337
| 0.580111
| 0.111702
| 0.067021
| 0.06383
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018487
| 0.233247
| 1,552
| 50
| 72
| 31.04
| 0.771429
| 0.233892
| 0
| 0
| 0
| 0
| 0.39932
| 0.017842
| 0
| 0
| 0
| 0
| 0
| 1
| 0.027778
| false
| 0
| 0.111111
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a5c703189126174bfc5a0bc0302603a5b45186d
| 583
|
py
|
Python
|
Python/Samples/Observer/UtObserver.py
|
plasroom46/DesignPattern.Sample
|
86c05c5ae356cb01f3d075f248c45da3e6534d07
|
[
"MIT"
] | 9
|
2019-03-14T01:54:31.000Z
|
2021-11-26T13:00:32.000Z
|
Python/Samples/Observer/UtObserver.py
|
plasroom46/DesignPattern.Sample
|
86c05c5ae356cb01f3d075f248c45da3e6534d07
|
[
"MIT"
] | null | null | null |
Python/Samples/Observer/UtObserver.py
|
plasroom46/DesignPattern.Sample
|
86c05c5ae356cb01f3d075f248c45da3e6534d07
|
[
"MIT"
] | 2
|
2019-08-19T06:00:04.000Z
|
2021-07-15T01:23:52.000Z
|
import unittest
from Observers import Observer, ObserverMailServer, ObserverPbx
from Subjects import Subject, SubjectEflow
class UtVisitor(unittest.TestCase):
def test_observer(self):
# Create observers
pbx = ObserverPbx()
ms = ObserverMailServer()
# Create subject
subject = SubjectEflow()
subject.attach(pbx)
subject.attach(ms)
# Notify when JB is leave of absence
subject.notify("JB", "Hachi")
self.assertTrue(True)
if __name__ == '__main__':
unittest.main()
| 21.592593
| 63
| 0.626072
| 56
| 583
| 6.357143
| 0.589286
| 0.106742
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.29331
| 583
| 26
| 64
| 22.423077
| 0.864078
| 0.113208
| 0
| 0
| 0
| 0
| 0.02924
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.214286
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a5f2654f1609f5c5550084dae95f8a37c34d9e6
| 4,247
|
py
|
Python
|
Python/2021/day_04/day_04.py
|
JonoRicci/Advent-Of-Code
|
1c092410d6ece195f4689788af4b1091acf10fbb
|
[
"MIT"
] | null | null | null |
Python/2021/day_04/day_04.py
|
JonoRicci/Advent-Of-Code
|
1c092410d6ece195f4689788af4b1091acf10fbb
|
[
"MIT"
] | null | null | null |
Python/2021/day_04/day_04.py
|
JonoRicci/Advent-Of-Code
|
1c092410d6ece195f4689788af4b1091acf10fbb
|
[
"MIT"
] | null | null | null |
"""
Day 04
"""
from logger import logger
def main() -> None:
"""
Import the puzzle input, process and display the results.
"""
puzzle_input = import_list()
logger.debug(puzzle_input)
final_score = play_bingo(puzzle_input)
for result in final_score:
logger.info(f"The final score is: {result}.")
def import_list() -> list:
"""
Import the puzzle input and return a list.
:return: Puzzle input text file as list
:rtype: list
"""
file = open("puzzle-input", "r")
string_list = file.read().splitlines()
file.close()
return string_list
def play_bingo(bingo_cards: list) -> list:
"""
Extract winning numbers, bingo boards from input.
Make a separate 2D list tracking wins.
For each winning number, check every board row and column for a match.
Add matches to the 2D list tracking wins.
Once done, check 2D list for winning columns / rows.
Add winning boards to new list along with winning number.
Multiply to get score.
:param bingo_cards: puzzle input where each line is a string
:return: First and last winning board score
:rtype: list
"""
winning_numbers = [int(x) for x in bingo_cards[0].split(",")]
logger.debug(f" Winning numbers: {winning_numbers}")
single_board = []
all_boards = []
final_score_list = []
# Get Bingo Boards
for line in range(len(bingo_cards)):
if "," not in bingo_cards[line]:
row = [int(x) for x in bingo_cards[line].split()]
if row:
logger.debug(row)
single_board.append(row)
elif single_board:
all_boards.append(single_board)
single_board = []
# Set up separate 2D list tracking matches to winning numbers.
unmarked_tracker = []
for board in all_boards:
assert len(board) == 5 and len(board[0]) == 5
unmarked_tracker.append([[False for _ in range(5)] for _ in range(5)])
# Set up list to track winning boards.
winning_board = [False for _ in range(len(all_boards))]
for number in winning_numbers:
for index, board in enumerate(all_boards):
logger.debug(f"Checking board: {index} for {number}")
# Check for winning numbers.
for row in range(5):
for column in range(5):
if board[row][column] == number:
logger.debug(f"{unmarked_tracker[index][row][column]} "
f"is True.")
unmarked_tracker[index][row][column] = True
# Check for 5 in a row.
won = False
for row in range(5):
ok = True
for column in range(5):
if not unmarked_tracker[index][row][column]:
ok = False
if ok:
won = True
# Check for 5 in a column.
for column in range(5):
ok = True
for row in range(5):
if not unmarked_tracker[index][row][column]:
ok = False
if ok:
won = True
# Check for each winning board.
if won and not winning_board[index]:
winning_board[index] = True
winning_boards_count = len([j for j in range(len(all_boards))
if winning_board[j]])
# If first or last board.
if winning_boards_count == 1 or winning_boards_count == \
len(all_boards):
# Calculate all unmarked.
unmarked = 0
for row in range(5):
for column in range(5):
if not unmarked_tracker[index][row][column]:
unmarked += board[row][column]
final_score_list.append(unmarked * number)
logger.debug(f"The final score is: {final_score_list[-1]}, "
f"which is {unmarked} * {number}.")
return final_score_list
if __name__ == "__main__":
main()
| 32.419847
| 80
| 0.53732
| 515
| 4,247
| 4.299029
| 0.207767
| 0.041102
| 0.036134
| 0.051942
| 0.216802
| 0.150858
| 0.127823
| 0.109756
| 0.109756
| 0.109756
| 0
| 0.009399
| 0.373676
| 4,247
| 130
| 81
| 32.669231
| 0.822932
| 0.208147
| 0
| 0.287671
| 0
| 0
| 0.075246
| 0.018735
| 0
| 0
| 0
| 0
| 0.013699
| 1
| 0.041096
| false
| 0
| 0.041096
| 0
| 0.109589
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a612749e70c643dade9a21e3ef7dab25d3f46e9
| 1,982
|
py
|
Python
|
timeeval_experiments/algorithms/eif.py
|
HPI-Information-Systems/TimeEval
|
9b2717b89decd57dd09e04ad94c120f13132d7b8
|
[
"MIT"
] | 2
|
2022-01-29T03:46:31.000Z
|
2022-02-14T14:06:35.000Z
|
timeeval_experiments/algorithms/eif.py
|
HPI-Information-Systems/TimeEval
|
9b2717b89decd57dd09e04ad94c120f13132d7b8
|
[
"MIT"
] | null | null | null |
timeeval_experiments/algorithms/eif.py
|
HPI-Information-Systems/TimeEval
|
9b2717b89decd57dd09e04ad94c120f13132d7b8
|
[
"MIT"
] | null | null | null |
from durations import Duration
from typing import Any, Dict, Optional
from timeeval import Algorithm, TrainingType, InputDimensionality
from timeeval.adapters import DockerAdapter
from timeeval.params import ParameterConfig
_eif_parameters: Dict[str, Dict[str, Any]] = {
"extension_level": {
"defaultValue": None,
"description": "Extension level 0 resembles standard isolation forest. If unspecified (`None`), then `extension_level=X.shape[1] - 1`.",
"name": "extension_level",
"type": "int"
},
"limit": {
"defaultValue": None,
"description": "The maximum allowed tree depth. This is by default set to average length of unsucessful search in a binary tree.",
"name": "limit",
"type": "int"
},
"max_samples": {
"defaultValue": None,
"description": "The number of samples to draw from X to train each base estimator: `max_samples * X.shape[0]`. If unspecified (`None`), then `max_samples=min(256, X.shape[0])`.",
"name": "max_samples",
"type": "float"
},
"n_trees": {
"defaultValue": 200,
"description": "The number of decision trees (base estimators) in the forest (ensemble).",
"name": "n_trees",
"type": "int"
},
"random_state": {
"defaultValue": 42,
"description": "Seed for random number generation.",
"name": "random_state",
"type": "int"
}
}
def eif(params: ParameterConfig = None, skip_pull: bool = False, timeout: Optional[Duration] = None) -> Algorithm:
return Algorithm(
name="Extended Isolation Forest (EIF)",
main=DockerAdapter(
image_name="registry.gitlab.hpi.de/akita/i/eif",
skip_pull=skip_pull,
timeout=timeout,
group_privileges="akita",
),
preprocess=None,
postprocess=None,
param_schema=_eif_parameters,
param_config=params or ParameterConfig.defaults(),
data_as_file=True,
training_type=TrainingType.UNSUPERVISED,
input_dimensionality=InputDimensionality("multivariate")
)
| 33.033333
| 180
| 0.676085
| 230
| 1,982
| 5.713043
| 0.5
| 0.042618
| 0.061644
| 0.031963
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.00812
| 0.19223
| 1,982
| 59
| 181
| 33.59322
| 0.812617
| 0
| 0
| 0.12963
| 0
| 0.055556
| 0.42886
| 0.041372
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018519
| false
| 0
| 0.092593
| 0.018519
| 0.12963
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a614519b633b8e43e30737c32c7066d2365e9ab
| 5,548
|
py
|
Python
|
deepchem/models/tf_new_models/graph_models.py
|
KEHANG/deepchem
|
367bea14cab47b1093bf106e0c196bb02d55c755
|
[
"MIT"
] | null | null | null |
deepchem/models/tf_new_models/graph_models.py
|
KEHANG/deepchem
|
367bea14cab47b1093bf106e0c196bb02d55c755
|
[
"MIT"
] | null | null | null |
deepchem/models/tf_new_models/graph_models.py
|
KEHANG/deepchem
|
367bea14cab47b1093bf106e0c196bb02d55c755
|
[
"MIT"
] | 1
|
2021-07-09T19:58:54.000Z
|
2021-07-09T19:58:54.000Z
|
"""
Convenience classes for assembling graph models.
"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
__author__ = "Han Altae-Tran and Bharath Ramsundar"
__copyright__ = "Copyright 2016, Stanford University"
__license__ = "MIT"
import tensorflow as tf
from deepchem.nn.layers import GraphGather
from deepchem.models.tf_new_models.graph_topology import GraphTopology
class SequentialGraph(object):
"""An analog of Keras Sequential class for Graph data.
Like the Sequential class from Keras, but automatically passes topology
placeholders from GraphTopology to each graph layer (from layers) added
to the network. Non graph layers don't get the extra placeholders.
"""
def __init__(self, n_feat):
"""
Parameters
----------
n_feat: int
Number of features per atom.
"""
self.graph = tf.Graph()
with self.graph.as_default():
self.graph_topology = GraphTopology(n_feat)
self.output = self.graph_topology.get_atom_features_placeholder()
# Keep track of the layers
self.layers = []
def add(self, layer):
"""Adds a new layer to model."""
with self.graph.as_default():
############################################# DEBUG
#print("start - add()")
#print("self.output")
#print(self.output)
############################################# DEBUG
# For graphical layers, add connectivity placeholders
if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
if (len(self.layers) > 0 and hasattr(self.layers[-1], "__name__")):
assert self.layers[-1].__name__ != "GraphGather", \
'Cannot use GraphConv or GraphGather layers after a GraphGather'
self.output = layer([self.output] +
self.graph_topology.get_topology_placeholders())
else:
self.output = layer(self.output)
############################################# DEBUG
#print("end- add()")
#print("self.output")
#print(self.output)
############################################# DEBUG
# Add layer to the layer list
self.layers.append(layer)
def get_graph_topology(self):
return self.graph_topology
def get_num_output_features(self):
"""Gets the output shape of the featurization layers of the network"""
return self.layers[-1].output_shape[1]
def return_outputs(self):
return self.output
def return_inputs(self):
return self.graph_topology.get_input_placeholders()
def get_layer(self, layer_id):
return self.layers[layer_id]
class SequentialSupportGraph(object):
"""An analog of Keras Sequential model for test/support models."""
def __init__(self, n_feat):
"""
Parameters
----------
n_feat: int
Number of atomic features.
"""
self.graph = tf.Graph()
with self.graph.as_default():
# Create graph topology and x
self.test_graph_topology = GraphTopology(n_feat, name='test')
self.support_graph_topology = GraphTopology(n_feat, name='support')
self.test = self.test_graph_topology.get_atom_features_placeholder()
self.support = self.support_graph_topology.get_atom_features_placeholder()
# Keep track of the layers
self.layers = []
# Whether or not we have used the GraphGather layer yet
self.bool_pre_gather = True
def add(self, layer):
"""Adds a layer to both test/support stacks.
Note that the layer transformation is performed independently on the
test/support tensors.
"""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphGather', 'GraphPool']:
assert self.bool_pre_gather, "Cannot apply graphical layers after gather."
self.test = layer([self.test] + self.test_graph_topology.topology)
self.support = layer([self.support] +
self.support_graph_topology.topology)
else:
self.test = layer(self.test)
self.support = layer(self.support)
if type(layer).__name__ == 'GraphGather':
self.bool_pre_gather = False # Set flag to stop adding topology
def add_test(self, layer):
"""Adds a layer to test."""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
self.test = layer([self.test] + self.test_graph_topology.topology)
else:
self.test = layer(self.test)
def add_support(self, layer):
"""Adds a layer to support."""
with self.graph.as_default():
self.layers.append(layer)
# Update new value of x
if type(layer).__name__ in ['GraphConv', 'GraphPool', 'GraphGather']:
self.support = layer([self.support] +
self.support_graph_topology.topology)
else:
self.support = layer(self.support)
def join(self, layer):
"""Joins test and support to a two input two output layer"""
with self.graph.as_default():
self.layers.append(layer)
self.test, self.support = layer([self.test, self.support])
def get_test_output(self):
return self.test
def get_support_output(self):
return self.support
def return_outputs(self):
return [self.test] + [self.support]
def return_inputs(self):
return (self.test_graph_topology.get_inputs() +
self.support_graph_topology.get_inputs())
| 32.635294
| 82
| 0.6469
| 685
| 5,548
| 5.029197
| 0.221898
| 0.067925
| 0.026415
| 0.030479
| 0.52598
| 0.455443
| 0.320755
| 0.306821
| 0.265602
| 0.218868
| 0
| 0.002073
| 0.217376
| 5,548
| 169
| 83
| 32.828402
| 0.79134
| 0.231435
| 0
| 0.477273
| 0
| 0
| 0.085171
| 0
| 0
| 0
| 0
| 0
| 0.022727
| 1
| 0.181818
| false
| 0
| 0.068182
| 0.090909
| 0.375
| 0.011364
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a629d479574b8f27c92b3a96ac0d80522d6e255
| 992
|
py
|
Python
|
questionbank/users/urls.py
|
SyafiqTermizi/questionbank
|
33e58db1a1610a85bd30a85d2f52e819bc27058b
|
[
"MIT"
] | 1
|
2018-04-17T23:58:46.000Z
|
2018-04-17T23:58:46.000Z
|
questionbank/users/urls.py
|
SyafiqTermizi/questionbank
|
33e58db1a1610a85bd30a85d2f52e819bc27058b
|
[
"MIT"
] | 8
|
2019-12-04T23:08:00.000Z
|
2022-02-13T22:48:26.000Z
|
questionbank/users/urls.py
|
SyafiqTermizi/questionbank
|
33e58db1a1610a85bd30a85d2f52e819bc27058b
|
[
"MIT"
] | null | null | null |
from django.urls import path
from .views import (
UserListView, UserUpdateView, UserProfileView, UserDeleteView,
AcceptInvitationView, SpecialtyListView, SpecialtyCreateView, SpecialtyUpdateView,
SpecialtyDeleteView
)
app_name = 'users'
urlpatterns = [
path('', UserListView.as_view(), name='list'),
path('<int:pk>/', UserUpdateView.as_view(), name='update'),
path('<int:pk>/delete/', UserDeleteView.as_view(), name='delete'),
path('profile/', UserProfileView.as_view(), name='profile'),
path(
'invite/<str:token>/', AcceptInvitationView.as_view(),
name='accept_invite'
),
path('specialties/', SpecialtyListView.as_view(), name='specialty_list'),
path('specialties/create/', SpecialtyCreateView.as_view(), name='specialty_create'),
path('specialties/<int:pk>/update/', SpecialtyUpdateView.as_view(), name='specialty_update'),
path('specialties/<int:pk>/delete/', SpecialtyDeleteView.as_view(), name='specialty_delete')
]
| 39.68
| 97
| 0.708669
| 100
| 992
| 6.88
| 0.34
| 0.078488
| 0.130814
| 0.110465
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.126008
| 992
| 24
| 98
| 41.333333
| 0.793541
| 0
| 0
| 0
| 0
| 0
| 0.243952
| 0.056452
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.095238
| 0
| 0.095238
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a6439ff07d926ead0739ddd1b337b6e86927570
| 8,197
|
py
|
Python
|
qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py
|
Zoufalc/qiskit-machine-learning
|
aae3941214cd9667a53b643f229d11d0bff32c60
|
[
"Apache-2.0"
] | 1
|
2021-07-07T21:23:38.000Z
|
2021-07-07T21:23:38.000Z
|
qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py
|
Zoufalc/qiskit-machine-learning
|
aae3941214cd9667a53b643f229d11d0bff32c60
|
[
"Apache-2.0"
] | null | null | null |
qiskit_machine_learning/algorithms/regressors/neural_network_regressor.py
|
Zoufalc/qiskit-machine-learning
|
aae3941214cd9667a53b643f229d11d0bff32c60
|
[
"Apache-2.0"
] | 1
|
2021-04-11T14:30:32.000Z
|
2021-04-11T14:30:32.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
""" Neural network regressor """
from typing import Union
import numpy as np
from qiskit.algorithms.optimizers import Optimizer
from ...exceptions import QiskitMachineLearningError
from ...neural_networks import NeuralNetwork
from ...utils.loss_functions import (Loss, L1Loss, L2Loss, CrossEntropyLoss,
CrossEntropySigmoidLoss)
class NeuralNetworkRegressor:
""" Quantum neural network regressor"""
def __init__(self, neural_network: NeuralNetwork,
loss: Union[str, Loss] = 'l2',
optimizer: Optimizer = None,
warm_start: bool = False):
"""
Args:
neural_network: An instance of an quantum neural network. If the neural network has a
one-dimensional output, i.e., `neural_network.output_shape=(1,)`, then it is
expected to return values in [-1, +1] and it can only be used for binary
classification. If the output is multi-dimensional, it is assumed that the result
is a probability distribution, i.e., that the entries are non-negative and sum up
to one. Then there are two options, either one-hot encoding or not. In case of
one-hot encoding, each probability vector resulting a neural network is considered
as one sample and the loss function is applied to the whole vector. Otherwise, each
entry of the probability vector is considered as an individual sample and the loss
function is applied to the index and weighted with the corresponding probability.
loss: A target loss function to be used in training. Default is `l2`, i.e. L2 loss.
Can be given either as a string for 'l1', 'l2', 'cross_entropy',
'cross_entropy_sigmoid', or as a loss function implementing the Loss interface.
optimizer: An instance of an optimizer to be used in training.
warm_start: Use weights from previous fit to start next fit.
Raises:
QiskitMachineLearningError: unknown loss, invalid neural network
"""
self._neural_network = neural_network
if len(neural_network.output_shape) > 1:
raise QiskitMachineLearningError('Invalid neural network output shape!')
if isinstance(loss, Loss):
self._loss = loss
else:
if loss.lower() == 'l1':
self._loss = L1Loss()
elif loss.lower() == 'l2':
self._loss = L2Loss()
elif loss.lower() == 'cross_entropy':
self._loss = CrossEntropyLoss()
elif loss.lower() == 'cross_entropy_sigmoid':
self._loss = CrossEntropySigmoidLoss()
else:
raise QiskitMachineLearningError(f'Unknown loss {loss}!')
self._optimizer = optimizer
self._warm_start = warm_start
self._fit_result = None
@property
def neural_network(self):
""" Returns the underlying neural network."""
return self._neural_network
@property
def loss(self):
""" Returns the underlying neural network."""
return self._loss
@property
def warm_start(self) -> bool:
""" Returns the warm start flag."""
return self._warm_start
@warm_start.setter
def warm_start(self, warm_start: bool) -> None:
""" Sets the warm start flag."""
self._warm_start = warm_start
def fit(self, X: np.ndarray, y: np.ndarray): # pylint: disable=invalid-name
"""
Fit the model to data matrix X and target(s) y.
Args:
X: The input data.
y: The target values.
Returns:
self: returns a trained classifier.
Raises:
QiskitMachineLearningError: In case of invalid data (e.g. incompatible with network)
"""
if self._neural_network.output_shape == (1,):
# TODO: we should add some reasonable compatibility checks and raise meaningful errors.
def objective(w):
predict = self._neural_network.forward(X, w)
target = np.array(y).reshape(predict.shape)
value = np.sum(self._loss(predict, target))
return value
def objective_grad(w):
# TODO should store output from forward pass (implement loss interface?)
# TODO: need to be able to turn off input grads if not needed.
output = self._neural_network.forward(X, w)
_, weights_grad = self._neural_network.backward(X, w)
grad = np.zeros((1, self._neural_network.num_weights))
for i in range(len(X)):
grad += self._loss.gradient(output[i][0], y[i]) * weights_grad[i]
return grad
else:
def objective(w):
val = 0.0
probs = self._neural_network.forward(X, w)
for i in range(len(X)):
for y_predict, prob in enumerate(probs[i]):
val += prob * self._loss(y_predict, y[i])
return val
def objective_grad(w):
num_classes = self._neural_network.output_shape[0]
grad = np.zeros((1, self._neural_network.num_weights))
for x, y_target in zip(X, y):
# TODO: do batch eval
_, weight_prob_grad = self._neural_network.backward(x, w)
for i in range(num_classes):
grad += weight_prob_grad[
0, i, :].reshape(grad.shape) * self._loss(i, y_target)
return grad
if self._warm_start and self._fit_result is not None:
initial_point = self._fit_result[0]
else:
initial_point = np.random.rand(self._neural_network.num_weights)
self._fit_result = self._optimizer.optimize(self._neural_network.num_weights, objective,
objective_grad, initial_point=initial_point)
return self
def predict(self, X: np.ndarray) -> np.ndarray: # pylint: disable=invalid-name
"""
Predict using the network specified to the regression.
Args:
X: The input data.
Raises:
QiskitMachineLearningError: Model needs to be fit to some training data first
Returns:
The predicted values.
"""
if self._fit_result is None:
raise QiskitMachineLearningError('Model needs to be fit to some training data first!')
# TODO: proper handling of batching
return self._neural_network.forward(X, self._fit_result[0])
def score(self, X: np.ndarray, y: np.ndarray) -> int: # pylint: disable=invalid-name
"""
Return R-squared on the given test data and targeted values.
Args:
X: Test samples.
y: True target values given `X`.
Raises:
QiskitMachineLearningError: Model needs to be fit to some training data first
Returns:
R-squared value.
"""
if self._fit_result is None:
raise QiskitMachineLearningError('Model needs to be fit to some training data first!')
predict = self.predict(X)
# Compute R2 for score
ss_res = sum(map(lambda k: (k[0] - k[1]) ** 2, zip(y, predict)))
ss_tot = sum([(k - np.mean(y)) ** 2 for k in y])
score = 1 - (ss_res / ss_tot)
if len(np.array(score).shape) > 0:
return score[0]
else:
return score
| 39.791262
| 99
| 0.595218
| 1,005
| 8,197
| 4.734328
| 0.269652
| 0.079235
| 0.053594
| 0.025221
| 0.262926
| 0.184741
| 0.148382
| 0.125263
| 0.105507
| 0.089533
| 0
| 0.007398
| 0.323899
| 8,197
| 205
| 100
| 39.985366
| 0.851137
| 0.376601
| 0
| 0.25
| 0
| 0
| 0.042006
| 0.004501
| 0
| 0
| 0
| 0.009756
| 0
| 1
| 0.125
| false
| 0
| 0.0625
| 0
| 0.3125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a655d791ecdecd8d04559095721de06fb34dc2a
| 2,380
|
py
|
Python
|
residuals.py
|
fbob/mplFOAM
|
90c9a970ba9975ce115ef5a66eb22fc463b54003
|
[
"MIT"
] | 8
|
2016-11-01T05:43:48.000Z
|
2022-01-27T02:12:29.000Z
|
residuals.py
|
fbob/mplFOAM
|
90c9a970ba9975ce115ef5a66eb22fc463b54003
|
[
"MIT"
] | null | null | null |
residuals.py
|
fbob/mplFOAM
|
90c9a970ba9975ce115ef5a66eb22fc463b54003
|
[
"MIT"
] | 3
|
2016-11-01T05:44:01.000Z
|
2019-05-15T04:04:57.000Z
|
#!/usr/bin/env python
# encoding: utf-8
import sys
import getopt
import re
import os
import pylab as plt
import numpy as np
# Define the variables for which the residuals will be plotted
variables = ["Ux", "Uy", "T", "p_rgh", "k", "epsilon"]
# Get the arguments of the script
def usage():
print("Usage: residuals.py -l logfile\nPlot the residuals versus Time/Iteration")
try:
options, args = getopt.getopt(sys.argv[1:], 'l:h', ['help', 'logfile='])
except getopt.GetoptError:
usage()
sys.exit(2)
for opt, arg in options:
if opt in ("-l", "--logfile"):
log_file = arg
elif opt in ("-h", "--help"):
usage()
sys.exit(1)
# Get the lines of the logfile 'log_file'
lines = open(log_file, "r" ).readlines()
# Get the time and continuity values
time = [] # Time(s) or iterations counter
continuity = [] # Continuity values
for line in lines:
if re.search(r"^Time = ", line): # Search for string 'Time' at the begining of the line in file
start = 'Time = '
value = line.split(start)[1] # Take the Time value as the string just after start
time.append(np.float(value)) # Transform the string in a float value
elif re.search(r"continuity errors :", line): # Search for string 'continuity' in the lines of file 'log_file'
start = 'sum local = '
end = ', global'
value = line.split(start)[1].split(end)[0] # Take the continuity value as string between start and end
continuity.append(np.float(value)) # Transform the string in a float value
# Get the residual values for each variable
for variable in variables:
data = []
for line in lines:
if re.search(r"Solving for " + variable, line):# Search for string variable in line of file 'log_file'
start = 'Final residual = '
end = ', No Iterations'
value = line.split(start)[1].split(end)[0]
data.append(np.float(value))
plt.plot(np.array(time),np.array(data), label=variable) # Plot the residual values of variable
plt.plot(np.array(time),np.array(continuity), label="Continuity") # Plot the continuity values
# Plot
plt.title("Residuals plot:\n * logfile: " + log_file + "\n * case dir: " + os.getcwd().split('/')[-1], loc='left')
plt.xlabel("Time(s)/Iterations")
plt.ylabel("Residuals (Log Scale)")
plt.yscale('log')
plt.legend()
plt.grid()
plt.show()
| 34.492754
| 114
| 0.64958
| 355
| 2,380
| 4.335211
| 0.343662
| 0.02729
| 0.02729
| 0.037037
| 0.202729
| 0.166342
| 0.166342
| 0.133853
| 0.063678
| 0.063678
| 0
| 0.005333
| 0.212185
| 2,380
| 68
| 115
| 35
| 0.815467
| 0.305462
| 0
| 0.12
| 0
| 0
| 0.198286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02
| false
| 0
| 0.12
| 0
| 0.14
| 0.02
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4a6a1474e56bbc2b491bd544f9d2c60a78d79285
| 1,216
|
py
|
Python
|
training_stats/hrm.py
|
salwator/training_stats
|
3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e
|
[
"MIT"
] | 4
|
2018-01-02T01:10:03.000Z
|
2019-02-09T23:37:13.000Z
|
training_stats/hrm.py
|
salwator/training_stats
|
3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e
|
[
"MIT"
] | 4
|
2018-01-05T16:46:35.000Z
|
2019-03-19T22:10:36.000Z
|
training_stats/hrm.py
|
salwator/training_stats
|
3f3bacbaa01e90e8658cf5b66bede42a37e3fb6e
|
[
"MIT"
] | 2
|
2016-12-09T22:36:58.000Z
|
2018-07-22T12:58:06.000Z
|
from .gpxfile import get_hr_measurements
from .utils import interpolate
from operator import itemgetter
def __calculate_moving_sums(points, window):
""" Calculates hr moving sums of the window len """
time, hrs = zip(*points)
moving_sum = sum(hrs[0:window])
sums = [(time[0], moving_sum)]
for i, t in enumerate(time[1:-1 * window]):
moving_sum += hrs[i + window] - hrs[i]
sums.append((t, moving_sum))
return sums
def calculate_lactate_threshold(hrdata):
""" Given list of (time, hr), returns lactate threshold and selected data"""
test_period = 60 * 30 # test time
measured_period = 60 * 20 # measured period in seconds
hrs = interpolate(hrdata)
time_stamp, max_sum = max(__calculate_moving_sums(hrs, test_period),
key=itemgetter(1))
# your lactate threshold is average of last 20 in 30 minutes of tempo run
start_measure = time_stamp + (test_period - measured_period)
stop_measure = start_measure + measured_period
measured_time, measured_hrs = zip(*hrs[start_measure:stop_measure])
lactate_thr = round(sum(measured_hrs) / measured_period)
return (lactate_thr, measured_time, measured_hrs)
| 39.225806
| 80
| 0.693257
| 166
| 1,216
| 4.849398
| 0.385542
| 0.086957
| 0.047205
| 0.057143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.017782
| 0.213816
| 1,216
| 30
| 81
| 40.533333
| 0.824268
| 0.184211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.136364
| 0
| 0.318182
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|