hexsha stringlengths 40 40 | size int64 3 1.03M | ext stringclasses 10
values | lang stringclasses 1
value | max_stars_repo_path stringlengths 3 972 | max_stars_repo_name stringlengths 6 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 972 | max_issues_repo_name stringlengths 6 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 116k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 972 | max_forks_repo_name stringlengths 6 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 3 1.03M | avg_line_length float64 1.13 941k | max_line_length int64 2 941k | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
a71f8d8a4438dc27df092caa2b6fafa27beb56a8 | 514 | py | Python | feedback/models.py | DisMosGit/car_library | 56adb1fb9809ba760d357599030afc2a779605eb | [
"Apache-2.0"
] | null | null | null | feedback/models.py | DisMosGit/car_library | 56adb1fb9809ba760d357599030afc2a779605eb | [
"Apache-2.0"
] | null | null | null | feedback/models.py | DisMosGit/car_library | 56adb1fb9809ba760d357599030afc2a779605eb | [
"Apache-2.0"
] | null | null | null | from django.db import models
from users.models import User
# Create your models here.
class Feedback(models.Model):
user = models.ForeignKey(User,
related_name='feedbacks',
on_delete=models.CASCADE)
title = models.CharField(max_length=255)
data = models.TextField()
datetime = models.DateTimeField(auto_now_add=True)
def __str__(self):
return self.title
class Meta:
abstract = True
| 24.47619 | 55 | 0.593385 |
f10d5262cc55206a34fe29de4a374f9c55e93263 | 432 | py | Python | TopQuarkAnalysis/TopEventProducers/python/sequences/ttSemiLepMVASelection_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | TopQuarkAnalysis/TopEventProducers/python/sequences/ttSemiLepMVASelection_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | TopQuarkAnalysis/TopEventProducers/python/sequences/ttSemiLepMVASelection_cff.py | ckamtsikis/cmssw | ea19fe642bb7537cbf58451dcf73aa5fd1b66250 | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | import FWCore.ParameterSet.Config as cms
#
# make mva event selection for semileptonic events
#
## std sequence to produce an mva discriminant
from TopQuarkAnalysis.TopEventSelection.TtSemiLepSignalSelMVAComputer_cff import *
## make mva discriminant for event selection
makeTtSemiLepMVASelDiscriminantTask = cms.Task(findTtSemiLepSignalSelMVA)
makeTtSemiLepMVASelDiscriminant = cms.Sequence(makeTtSemiLepMVASelDiscriminantTask)
| 33.230769 | 83 | 0.856481 |
a2c770014dbd4460a04786781c2b759e16853f42 | 364 | py | Python | test.py | VendettaMask/study_python | 3b9f94d20e46a3114fa65a6361b2438995c0679f | [
"MIT"
] | null | null | null | test.py | VendettaMask/study_python | 3b9f94d20e46a3114fa65a6361b2438995c0679f | [
"MIT"
] | null | null | null | test.py | VendettaMask/study_python | 3b9f94d20e46a3114fa65a6361b2438995c0679f | [
"MIT"
] | null | null | null | Str = input()
for i in range(0, len(Str)):
if Str[i] == ' ':
print(' ', end="")
elif Str[i] in ['x', 'y', 'z']:
# print('{}'.format(chr(ord(Str[i]) - 23)), end="") #另一种写法
print(chr(ord(Str[i])-23),end='')
else:
# print('{}'.format(chr(ord(Str[i]) + 3)), end="") #另一种写法
print(chr(ord(Str[i])+3),end='')
| 33.090909 | 67 | 0.431319 |
64d5f681346b1a94e0a8f499a2d355fb80861282 | 999 | py | Python | EtlPipeline/tests/test_get_stats.py | office-for-students/beta-data-pipelines | 36b90c7720e0be0f807d93a31cf7346522b6e1f0 | [
"MIT"
] | 2 | 2019-06-04T14:15:16.000Z | 2019-08-04T15:26:16.000Z | EtlPipeline/tests/test_get_stats.py | office-for-students/beta-data-pipelines | 36b90c7720e0be0f807d93a31cf7346522b6e1f0 | [
"MIT"
] | 3 | 2019-06-24T12:21:10.000Z | 2019-07-22T11:15:27.000Z | EtlPipeline/tests/test_get_stats.py | office-for-students/beta-data-pipelines | 36b90c7720e0be0f807d93a31cf7346522b6e1f0 | [
"MIT"
] | 1 | 2019-09-26T19:29:15.000Z | 2019-09-26T19:29:15.000Z | # import unittest
#
# import defusedxml.ElementTree as ET
# import xmltodict
#
# import course_stats
# from course_docs import get_country
# from testing_utils import get_string
#
#
# class TestGetStats(unittest.TestCase):
# def test_with_25_courses(self):
# """Initial smoke test"""
# xml_string = get_string("fixtures/25_courses.xml")
# root = ET.fromstring(xml_string)
# for institution in root.iter("INSTITUTION"):
# raw_inst_data = xmltodict.parse(ET.tostring(institution))[
# "INSTITUTION"
# ]
# country_code = get_country(raw_inst_data)["code"]
# for course in institution.findall("KISCOURSE"):
# raw_course_data = xmltodict.parse(ET.tostring(course))[
# "KISCOURSE"
# ]
# course_stats.get_stats(raw_course_data, country_code)
#
#
# # TODO Test more of the functionality
#
# if __name__ == "__main__":
# unittest.main()
| 31.21875 | 73 | 0.620621 |
3dc2ee1bce9672d18fecb39aae75c0ac57dfeae8 | 457 | py | Python | examples/framework_ideas/dependency_injection.py | daniel-butler/python-clean-architecture | a95da91fffb1120e1e748c9ee7717a622647288e | [
"MIT"
] | null | null | null | examples/framework_ideas/dependency_injection.py | daniel-butler/python-clean-architecture | a95da91fffb1120e1e748c9ee7717a622647288e | [
"MIT"
] | null | null | null | examples/framework_ideas/dependency_injection.py | daniel-butler/python-clean-architecture | a95da91fffb1120e1e748c9ee7717a622647288e | [
"MIT"
] | 1 | 2019-12-11T01:32:08.000Z | 2019-12-11T01:32:08.000Z | from pca.utils.dependency_injection import Container as BaseContainer, Scopes
class Container(BaseContainer):
def __init__(self, default_scope=Scopes.REQUEST, request_strategy=None):
super().__init__(default_scope)
# TODO #9. implement request scope
self.request_strategy = request_strategy
def load_from_file(self, path):
# TODO #9. implement file configuration of DI Container
raise NotImplementedError
| 32.642857 | 77 | 0.737418 |
3b7a53276560e95c2c241ad17537e0bbb3071af7 | 12,682 | py | Python | datasets/epa_historical_air_quality/hap_daily_summary/hap_daily_summary_dag.py | renovate-bot/public-datasets-pipelines | d2b5e527d9d2dcc8e01f5209e7b9409dfe2b62a8 | [
"Apache-2.0"
] | 90 | 2021-04-09T19:20:19.000Z | 2022-03-31T16:03:14.000Z | datasets/epa_historical_air_quality/hap_daily_summary/hap_daily_summary_dag.py | renovate-bot/public-datasets-pipelines | d2b5e527d9d2dcc8e01f5209e7b9409dfe2b62a8 | [
"Apache-2.0"
] | 125 | 2021-04-19T20:33:26.000Z | 2022-03-30T21:45:49.000Z | datasets/epa_historical_air_quality/hap_daily_summary/hap_daily_summary_dag.py | renovate-bot/public-datasets-pipelines | d2b5e527d9d2dcc8e01f5209e7b9409dfe2b62a8 | [
"Apache-2.0"
] | 54 | 2021-04-29T23:17:36.000Z | 2022-03-31T05:15:23.000Z | # Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from airflow import DAG
from airflow.providers.cncf.kubernetes.operators import kubernetes_pod
from airflow.providers.google.cloud.transfers import gcs_to_bigquery
default_args = {
"owner": "Google",
"depends_on_past": False,
"start_date": "2021-03-01",
}
with DAG(
dag_id="epa_historical_air_quality.hap_daily_summary",
default_args=default_args,
max_active_runs=1,
schedule_interval="30 1 * * *",
catchup=False,
default_view="graph",
) as dag:
# Run CSV transform within kubernetes pod
transform_csv = kubernetes_pod.KubernetesPodOperator(
task_id="transform_csv",
name="hap_daily_summary",
namespace="composer",
service_account_name="datasets",
image_pull_policy="Always",
image="{{ var.json.epa_historical_air_quality.container_registry.run_csv_transform_kub }}",
env_vars={
"SOURCE_URL": "https://aqs.epa.gov/aqsweb/airdata/daily_HAPS_YEAR_ITERATOR.zip",
"START_YEAR": "1990",
"SOURCE_FILE": "files/data.csv",
"TARGET_FILE": "files/data_output.csv",
"CHUNKSIZE": "2500000",
"TARGET_GCS_BUCKET": "{{ var.value.composer_bucket }}",
"TARGET_GCS_PATH": "data/epa_historical_air_quality/hap_daily_summary/files/data_output.csv",
"DATA_NAMES": '[ "state_code", "county_code", "site_num", "parameter_code", "poc",\n "latitude", "longitude", "datum", "parameter_name", "sample_duration",\n "pollutant_standard", "date_local", "units_of_measure", "event_type", "observation_count",\n "observation_percent", "arithmetic_mean", "first_max_value", "first_max_hour", "aqi",\n "method_code", "method_name", "local_site_name", "address", "state_name",\n "county_name", "city_name", "cbsa_name", "date_of_last_change" ]',
"DATA_DTYPES": '{ "state_code": "str", "county_code": "str", "site_num": "str", "parameter_code": "int32", "poc": "int32",\n "latitude": "float64", "longitude": "float64", "datum": "str", "parameter_name": "str", "sample_duration": "str",\n "pollutant_standard": "str", "date_local": "datetime64[ns]", "units_of_measure": "str", "event_type": "str", "observation_count": "int32",\n "observation_percent": "float64", "arithmetic_mean": "float64", "first_max_value": "float64", "first_max_hour": "int32", "aqi": "str",\n "method_code": "str", "method_name": "str", "local_site_name": "str", "address": "str", "state_name": "str",\n "county_name": "str", "city_name": "str", "cbsa_name": "str", "date_of_last_change": "datetime64[ns]" }',
},
resources={"limit_memory": "8G", "limit_cpu": "3"},
)
# Task to load CSV data to a BigQuery table
load_to_bq = gcs_to_bigquery.GCSToBigQueryOperator(
task_id="load_to_bq",
bucket="{{ var.value.composer_bucket }}",
source_objects=[
"data/epa_historical_air_quality/hap_daily_summary/files/data_output.csv"
],
source_format="CSV",
destination_project_dataset_table="{{ var.json.epa_historical_air_quality.container_registry.hap_daily_summary_destination_table }}",
skip_leading_rows=1,
allow_quoted_newlines=True,
write_disposition="WRITE_TRUNCATE",
schema_fields=[
{
"name": "state_code",
"type": "STRING",
"description": "The FIPS code of the state in which the monitor resides.",
"mode": "NULLABLE",
},
{
"name": "county_code",
"type": "STRING",
"description": "The FIPS code of the county in which the monitor resides.",
"mode": "NULLABLE",
},
{
"name": "site_num",
"type": "STRING",
"description": "A unique number within the county identifying the site.",
"mode": "NULLABLE",
},
{
"name": "parameter_code",
"type": "INTEGER",
"description": "The AQS code corresponding to the parameter measured by the monitor.",
"mode": "NULLABLE",
},
{
"name": "poc",
"type": "INTEGER",
"description": "This is the “Parameter Occurrence Code” used to distinguish different instruments that measure the same parameter at the same site.",
"mode": "NULLABLE",
},
{
"name": "latitude",
"type": "FLOAT",
"description": "The monitoring site’s angular distance north of the equator measured in decimal degrees.",
"mode": "NULLABLE",
},
{
"name": "longitude",
"type": "FLOAT",
"description": "The monitoring site’s angular distance east of the prime meridian measured in decimal degrees.",
"mode": "NULLABLE",
},
{
"name": "datum",
"type": "STRING",
"description": "The Datum associated with the Latitude and Longitude measures.",
"mode": "NULLABLE",
},
{
"name": "parameter_name",
"type": "STRING",
"description": "The name or description assigned in AQS to the parameter measured by the monitor. Parameters may be pollutants or non-pollutants.",
"mode": "NULLABLE",
},
{
"name": "sample_duration",
"type": "STRING",
"description": "The length of time that air passes through the monitoring device before it is analyzed (measured). So, it represents an averaging period in the atmosphere (for example, a 24-hour sample duration draws ambient air over a collection filter for 24 straight hours). For continuous monitors, it can represent an averaging time of many samples (for example, a 1-hour value may be the average of four one-minute samples collected during each quarter of the hour).",
"mode": "NULLABLE",
},
{
"name": "pollutant_standard",
"type": "STRING",
"description": "A description of the ambient air quality standard rules used to aggregate statistics. (See description at beginning of document.)",
"mode": "NULLABLE",
},
{
"name": "date_local",
"type": "TIMESTAMP",
"description": "The calendar date for the summary. All daily summaries are for the local standard day (midnight to midnight) at the monitor.",
"mode": "NULLABLE",
},
{
"name": "units_of_measure",
"type": "STRING",
"description": "The unit of measure for the parameter. QAD always returns data in the standard units for the parameter. Submitters are allowed to report data in any unit and EPA converts to a standard unit so that we may use the data in calculations.",
"mode": "NULLABLE",
},
{
"name": "event_type",
"type": "STRING",
"description": "Indicates whether data measured during exceptional events are included in the summary. A wildfire is an example of an exceptional event; it is something that affects air quality, but the local agency has no control over. No Events means no events occurred. Events Included means events occurred and the data from them is included in the summary. Events Excluded means that events occurred but data form them is excluded from the summary. Concurred Events Excluded means that events occurred but only EPA concurred exclusions are removed from the summary. If an event occurred for the parameter in question, the data will have multiple records for each monitor.",
"mode": "NULLABLE",
},
{
"name": "observation_count",
"type": "INTEGER",
"description": "The number of observations (samples) taken during the day.",
"mode": "NULLABLE",
},
{
"name": "observation_percent",
"type": "FLOAT",
"description": "The percent representing the number of observations taken with respect to the number scheduled to be taken during the day. This is only calculated for monitors where measurements are required (e.g., only certain parameters).",
"mode": "NULLABLE",
},
{
"name": "arithmetic_mean",
"type": "FLOAT",
"description": "The average (arithmetic mean) value for the day.",
"mode": "NULLABLE",
},
{
"name": "first_max_value",
"type": "FLOAT",
"description": "The highest value for the day.",
"mode": "NULLABLE",
},
{
"name": "first_max_hour",
"type": "INTEGER",
"description": "The hour (on a 24-hour clock) when the highest value for the day (the previous field) was taken.",
"mode": "NULLABLE",
},
{
"name": "aqi",
"type": "INTEGER",
"description": "The Air Quality Index for the day for the pollutant, if applicable.",
"mode": "NULLABLE",
},
{
"name": "method_code",
"type": "INTEGER",
"description": "An internal system code indicating the method (processes, equipment, and protocols) used in gathering and measuring the sample. The method name is in the next column.",
"mode": "NULLABLE",
},
{
"name": "method_name",
"type": "STRING",
"description": "A short description of the processes, equipment, and protocols used in gathering and measuring the sample.",
"mode": "NULLABLE",
},
{
"name": "local_site_name",
"type": "STRING",
"description": "The name of the site (if any) given by the State, local, or tribal air pollution control agency that operates it.",
"mode": "NULLABLE",
},
{
"name": "address",
"type": "STRING",
"description": "The approximate street address of the monitoring site.",
"mode": "NULLABLE",
},
{
"name": "state_name",
"type": "STRING",
"description": "The name of the state where the monitoring site is located.",
"mode": "NULLABLE",
},
{
"name": "county_name",
"type": "STRING",
"description": "The name of the county where the monitoring site is located.",
"mode": "NULLABLE",
},
{
"name": "city_name",
"type": "STRING",
"description": "The name of the city where the monitoring site is located. This represents the legal incorporated boundaries of cities and not urban areas.",
"mode": "NULLABLE",
},
{
"name": "cbsa_name",
"type": "STRING",
"description": "The name of the core bases statistical area (metropolitan area) where the monitoring site is located.",
"mode": "NULLABLE",
},
{
"name": "date_of_last_change",
"type": "TIMESTAMP",
"description": "The date the last time any numeric values in this record were updated in the AQS data system.",
"mode": "NULLABLE",
},
],
)
transform_csv >> load_to_bq
| 50.931727 | 752 | 0.56316 |
05d20659d1ba0fe067d10256bd34b0331ceead75 | 3,476 | py | Python | bindings/python/ensmallen/datasets/string/azonexushydrophilus.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 5 | 2021-02-17T00:44:45.000Z | 2021-08-09T16:41:47.000Z | bindings/python/ensmallen/datasets/string/azonexushydrophilus.py | AnacletoLAB/ensmallen_graph | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 18 | 2021-01-07T16:47:39.000Z | 2021-08-12T21:51:32.000Z | bindings/python/ensmallen/datasets/string/azonexushydrophilus.py | AnacletoLAB/ensmallen | b2c1b18fb1e5801712852bcc239f239e03076f09 | [
"MIT"
] | 3 | 2021-01-14T02:20:59.000Z | 2021-08-04T19:09:52.000Z | """
This file offers the methods to automatically retrieve the graph Azonexus hydrophilus.
The graph is automatically retrieved from the STRING repository.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
from typing import Dict
from ..automatic_graph_retrieval import AutomaticallyRetrievedGraph
from ...ensmallen import Graph # pylint: disable=import-error
def AzonexusHydrophilus(
directed: bool = False,
preprocess: bool = True,
load_nodes: bool = True,
verbose: int = 2,
cache: bool = True,
cache_path: str = "graphs/string",
version: str = "links.v11.5",
**additional_graph_kwargs: Dict
) -> Graph:
"""Return new instance of the Azonexus hydrophilus graph.
The graph is automatically retrieved from the STRING repository.
Parameters
-------------------
directed: bool = False
Wether to load the graph as directed or undirected.
By default false.
preprocess: bool = True
Whether to preprocess the graph to be loaded in
optimal time and memory.
load_nodes: bool = True,
Whether to load the nodes vocabulary or treat the nodes
simply as a numeric range.
verbose: int = 2,
Wether to show loading bars during the retrieval and building
of the graph.
cache: bool = True
Whether to use cache, i.e. download files only once
and preprocess them only once.
cache_path: str = "graphs"
Where to store the downloaded graphs.
version: str = "links.v11.5"
The version of the graph to retrieve.
The available versions are:
- homology.v11.5
- physical.links.v11.5
- links.v11.5
additional_graph_kwargs: Dict
Additional graph kwargs.
Returns
-----------------------
Instace of Azonexus hydrophilus graph.
References
---------------------
Please cite the following if you use the data:
```bib
@article{szklarczyk2019string,
title={STRING v11: protein--protein association networks with increased coverage, supporting functional discovery in genome-wide experimental datasets},
author={Szklarczyk, Damian and Gable, Annika L and Lyon, David and Junge, Alexander and Wyder, Stefan and Huerta-Cepas, Jaime and Simonovic, Milan and Doncheva, Nadezhda T and Morris, John H and Bork, Peer and others},
journal={Nucleic acids research},
volume={47},
number={D1},
pages={D607--D613},
year={2019},
publisher={Oxford University Press}
}
```
"""
return AutomaticallyRetrievedGraph(
graph_name="AzonexusHydrophilus",
repository="string",
version=version,
directed=directed,
preprocess=preprocess,
load_nodes=load_nodes,
verbose=verbose,
cache=cache,
cache_path=cache_path,
additional_graph_kwargs=additional_graph_kwargs
)()
| 33.104762 | 223 | 0.678078 |
99ebacb3d0bfbf6caff9d5fef4c03f8d7b0539ac | 31,720 | py | Python | deploy/env/local/lib/python2.7/site-packages/mercurial-3.1-py2.7-linux-x86_64.egg/mercurial/bundle2.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | deploy/env/local/lib/python2.7/site-packages/mercurial-3.1-py2.7-linux-x86_64.egg/mercurial/bundle2.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | deploy/env/local/lib/python2.7/site-packages/mercurial-3.1-py2.7-linux-x86_64.egg/mercurial/bundle2.py | wangvictor2012/liuwei | 0a06f8fd56d78162f81f1e7e7def7bfdeb4472e1 | [
"BSD-3-Clause"
] | null | null | null | # bundle2.py - generic container format to transmit arbitrary data.
#
# Copyright 2013 Facebook, Inc.
#
# This software may be used and distributed according to the terms of the
# GNU General Public License version 2 or any later version.
"""Handling of the new bundle2 format
The goal of bundle2 is to act as an atomically packet to transmit a set of
payloads in an application agnostic way. It consist in a sequence of "parts"
that will be handed to and processed by the application layer.
General format architecture
===========================
The format is architectured as follow
- magic string
- stream level parameters
- payload parts (any number)
- end of stream marker.
the Binary format
============================
All numbers are unsigned and big-endian.
stream level parameters
------------------------
Binary format is as follow
:params size: (16 bits integer)
The total number of Bytes used by the parameters
:params value: arbitrary number of Bytes
A blob of `params size` containing the serialized version of all stream level
parameters.
The blob contains a space separated list of parameters. Parameters with value
are stored in the form `<name>=<value>`. Both name and value are urlquoted.
Empty name are obviously forbidden.
Name MUST start with a letter. If this first letter is lower case, the
parameter is advisory and can be safely ignored. However when the first
letter is capital, the parameter is mandatory and the bundling process MUST
stop if he is not able to proceed it.
Stream parameters use a simple textual format for two main reasons:
- Stream level parameters should remain simple and we want to discourage any
crazy usage.
- Textual data allow easy human inspection of a bundle2 header in case of
troubles.
Any Applicative level options MUST go into a bundle2 part instead.
Payload part
------------------------
Binary format is as follow
:header size: (16 bits inter)
The total number of Bytes used by the part headers. When the header is empty
(size = 0) this is interpreted as the end of stream marker.
:header:
The header defines how to interpret the part. It contains two piece of
data: the part type, and the part parameters.
The part type is used to route an application level handler, that can
interpret payload.
Part parameters are passed to the application level handler. They are
meant to convey information that will help the application level object to
interpret the part payload.
The binary format of the header is has follow
:typesize: (one byte)
:parttype: alphanumerical part name
:partid: A 32bits integer (unique in the bundle) that can be used to refer
to this part.
:parameters:
Part's parameter may have arbitrary content, the binary structure is::
<mandatory-count><advisory-count><param-sizes><param-data>
:mandatory-count: 1 byte, number of mandatory parameters
:advisory-count: 1 byte, number of advisory parameters
:param-sizes:
N couple of bytes, where N is the total number of parameters. Each
couple contains (<size-of-key>, <size-of-value) for one parameter.
:param-data:
A blob of bytes from which each parameter key and value can be
retrieved using the list of size couples stored in the previous
field.
Mandatory parameters comes first, then the advisory ones.
Each parameter's key MUST be unique within the part.
:payload:
payload is a series of `<chunksize><chunkdata>`.
`chunksize` is a 32 bits integer, `chunkdata` are plain bytes (as much as
`chunksize` says)` The payload part is concluded by a zero size chunk.
The current implementation always produces either zero or one chunk.
This is an implementation limitation that will ultimately be lifted.
Bundle processing
============================
Each part is processed in order using a "part handler". Handler are registered
for a certain part type.
The matching of a part to its handler is case insensitive. The case of the
part type is used to know if a part is mandatory or advisory. If the Part type
contains any uppercase char it is considered mandatory. When no handler is
known for a Mandatory part, the process is aborted and an exception is raised.
If the part is advisory and no handler is known, the part is ignored. When the
process is aborted, the full bundle is still read from the stream to keep the
channel usable. But none of the part read from an abort are processed. In the
future, dropping the stream may become an option for channel we do not care to
preserve.
"""
import util
import struct
import urllib
import string
import pushkey
import changegroup, error
from i18n import _
_pack = struct.pack
_unpack = struct.unpack
_magicstring = 'HG2X'
_fstreamparamsize = '>H'
_fpartheadersize = '>H'
_fparttypesize = '>B'
_fpartid = '>I'
_fpayloadsize = '>I'
_fpartparamcount = '>BB'
preferedchunksize = 4096
def _makefpartparamsizes(nbparams):
"""return a struct format to read part parameter sizes
The number parameters is variable so we need to build that format
dynamically.
"""
return '>'+('BB'*nbparams)
parthandlermapping = {}
def parthandler(parttype, params=()):
"""decorator that register a function as a bundle2 part handler
eg::
@parthandler('myparttype', ('mandatory', 'param', 'handled'))
def myparttypehandler(...):
'''process a part of type "my part".'''
...
"""
def _decorator(func):
lparttype = parttype.lower() # enforce lower case matching.
assert lparttype not in parthandlermapping
parthandlermapping[lparttype] = func
func.params = frozenset(params)
return func
return _decorator
class unbundlerecords(object):
"""keep record of what happens during and unbundle
New records are added using `records.add('cat', obj)`. Where 'cat' is a
category of record and obj is an arbitrary object.
`records['cat']` will return all entries of this category 'cat'.
Iterating on the object itself will yield `('category', obj)` tuples
for all entries.
All iterations happens in chronological order.
"""
def __init__(self):
self._categories = {}
self._sequences = []
self._replies = {}
def add(self, category, entry, inreplyto=None):
"""add a new record of a given category.
The entry can then be retrieved in the list returned by
self['category']."""
self._categories.setdefault(category, []).append(entry)
self._sequences.append((category, entry))
if inreplyto is not None:
self.getreplies(inreplyto).add(category, entry)
def getreplies(self, partid):
"""get the subrecords that replies to a specific part"""
return self._replies.setdefault(partid, unbundlerecords())
def __getitem__(self, cat):
return tuple(self._categories.get(cat, ()))
def __iter__(self):
return iter(self._sequences)
def __len__(self):
return len(self._sequences)
def __nonzero__(self):
return bool(self._sequences)
class bundleoperation(object):
"""an object that represents a single bundling process
Its purpose is to carry unbundle-related objects and states.
A new object should be created at the beginning of each bundle processing.
The object is to be returned by the processing function.
The object has very little content now it will ultimately contain:
* an access to the repo the bundle is applied to,
* a ui object,
* a way to retrieve a transaction to add changes to the repo,
* a way to record the result of processing each part,
* a way to construct a bundle response when applicable.
"""
def __init__(self, repo, transactiongetter):
self.repo = repo
self.ui = repo.ui
self.records = unbundlerecords()
self.gettransaction = transactiongetter
self.reply = None
class TransactionUnavailable(RuntimeError):
pass
def _notransaction():
"""default method to get a transaction while processing a bundle
Raise an exception to highlight the fact that no transaction was expected
to be created"""
raise TransactionUnavailable()
def processbundle(repo, unbundler, transactiongetter=_notransaction):
"""This function process a bundle, apply effect to/from a repo
It iterates over each part then searches for and uses the proper handling
code to process the part. Parts are processed in order.
This is very early version of this function that will be strongly reworked
before final usage.
Unknown Mandatory part will abort the process.
"""
op = bundleoperation(repo, transactiongetter)
# todo:
# - replace this is a init function soon.
# - exception catching
unbundler.params
iterparts = unbundler.iterparts()
part = None
try:
for part in iterparts:
parttype = part.type
# part key are matched lower case
key = parttype.lower()
try:
handler = parthandlermapping.get(key)
if handler is None:
raise error.BundleValueError(parttype=key)
op.ui.debug('found a handler for part %r\n' % parttype)
unknownparams = part.mandatorykeys - handler.params
if unknownparams:
unknownparams = list(unknownparams)
unknownparams.sort()
raise error.BundleValueError(parttype=key,
params=unknownparams)
except error.BundleValueError, exc:
if key != parttype: # mandatory parts
raise
op.ui.debug('ignoring unsupported advisory part %s\n' % exc)
# consuming the part
part.read()
continue
# handler is called outside the above try block so that we don't
# risk catching KeyErrors from anything other than the
# parthandlermapping lookup (any KeyError raised by handler()
# itself represents a defect of a different variety).
output = None
if op.reply is not None:
op.ui.pushbuffer(error=True)
output = ''
try:
handler(op, part)
finally:
if output is not None:
output = op.ui.popbuffer()
if output:
outpart = op.reply.newpart('b2x:output', data=output)
outpart.addparam('in-reply-to', str(part.id), mandatory=False)
part.read()
except Exception, exc:
if part is not None:
# consume the bundle content
part.read()
for part in iterparts:
# consume the bundle content
part.read()
# Small hack to let caller code distinguish exceptions from bundle2
# processing fron the ones from bundle1 processing. This is mostly
# needed to handle different return codes to unbundle according to the
# type of bundle. We should probably clean up or drop this return code
# craziness in a future version.
exc.duringunbundle2 = True
raise
return op
def decodecaps(blob):
"""decode a bundle2 caps bytes blob into a dictionnary
The blob is a list of capabilities (one per line)
Capabilities may have values using a line of the form::
capability=value1,value2,value3
The values are always a list."""
caps = {}
for line in blob.splitlines():
if not line:
continue
if '=' not in line:
key, vals = line, ()
else:
key, vals = line.split('=', 1)
vals = vals.split(',')
key = urllib.unquote(key)
vals = [urllib.unquote(v) for v in vals]
caps[key] = vals
return caps
def encodecaps(caps):
"""encode a bundle2 caps dictionary into a bytes blob"""
chunks = []
for ca in sorted(caps):
vals = caps[ca]
ca = urllib.quote(ca)
vals = [urllib.quote(v) for v in vals]
if vals:
ca = "%s=%s" % (ca, ','.join(vals))
chunks.append(ca)
return '\n'.join(chunks)
class bundle20(object):
"""represent an outgoing bundle2 container
Use the `addparam` method to add stream level parameter. and `newpart` to
populate it. Then call `getchunks` to retrieve all the binary chunks of
data that compose the bundle2 container."""
def __init__(self, ui, capabilities=()):
self.ui = ui
self._params = []
self._parts = []
self.capabilities = dict(capabilities)
@property
def nbparts(self):
"""total number of parts added to the bundler"""
return len(self._parts)
# methods used to defines the bundle2 content
def addparam(self, name, value=None):
"""add a stream level parameter"""
if not name:
raise ValueError('empty parameter name')
if name[0] not in string.letters:
raise ValueError('non letter first character: %r' % name)
self._params.append((name, value))
def addpart(self, part):
"""add a new part to the bundle2 container
Parts contains the actual applicative payload."""
assert part.id is None
part.id = len(self._parts) # very cheap counter
self._parts.append(part)
def newpart(self, typeid, *args, **kwargs):
"""create a new part and add it to the containers
As the part is directly added to the containers. For now, this means
that any failure to properly initialize the part after calling
``newpart`` should result in a failure of the whole bundling process.
You can still fall back to manually create and add if you need better
control."""
part = bundlepart(typeid, *args, **kwargs)
self.addpart(part)
return part
# methods used to generate the bundle2 stream
def getchunks(self):
self.ui.debug('start emission of %s stream\n' % _magicstring)
yield _magicstring
param = self._paramchunk()
self.ui.debug('bundle parameter: %s\n' % param)
yield _pack(_fstreamparamsize, len(param))
if param:
yield param
self.ui.debug('start of parts\n')
for part in self._parts:
self.ui.debug('bundle part: "%s"\n' % part.type)
for chunk in part.getchunks():
yield chunk
self.ui.debug('end of bundle\n')
yield '\0\0'
def _paramchunk(self):
"""return a encoded version of all stream parameters"""
blocks = []
for par, value in self._params:
par = urllib.quote(par)
if value is not None:
value = urllib.quote(value)
par = '%s=%s' % (par, value)
blocks.append(par)
return ' '.join(blocks)
class unpackermixin(object):
"""A mixin to extract bytes and struct data from a stream"""
def __init__(self, fp):
self._fp = fp
def _unpack(self, format):
"""unpack this struct format from the stream"""
data = self._readexact(struct.calcsize(format))
return _unpack(format, data)
def _readexact(self, size):
"""read exactly <size> bytes from the stream"""
return changegroup.readexactly(self._fp, size)
class unbundle20(unpackermixin):
"""interpret a bundle2 stream
This class is fed with a binary stream and yields parts through its
`iterparts` methods."""
def __init__(self, ui, fp, header=None):
"""If header is specified, we do not read it out of the stream."""
self.ui = ui
super(unbundle20, self).__init__(fp)
if header is None:
header = self._readexact(4)
magic, version = header[0:2], header[2:4]
if magic != 'HG':
raise util.Abort(_('not a Mercurial bundle'))
if version != '2X':
raise util.Abort(_('unknown bundle version %s') % version)
self.ui.debug('start processing of %s stream\n' % header)
@util.propertycache
def params(self):
"""dictionary of stream level parameters"""
self.ui.debug('reading bundle2 stream parameters\n')
params = {}
paramssize = self._unpack(_fstreamparamsize)[0]
if paramssize:
for p in self._readexact(paramssize).split(' '):
p = p.split('=', 1)
p = [urllib.unquote(i) for i in p]
if len(p) < 2:
p.append(None)
self._processparam(*p)
params[p[0]] = p[1]
return params
def _processparam(self, name, value):
"""process a parameter, applying its effect if needed
Parameter starting with a lower case letter are advisory and will be
ignored when unknown. Those starting with an upper case letter are
mandatory and will this function will raise a KeyError when unknown.
Note: no option are currently supported. Any input will be either
ignored or failing.
"""
if not name:
raise ValueError('empty parameter name')
if name[0] not in string.letters:
raise ValueError('non letter first character: %r' % name)
# Some logic will be later added here to try to process the option for
# a dict of known parameter.
if name[0].islower():
self.ui.debug("ignoring unknown parameter %r\n" % name)
else:
raise error.BundleValueError(params=(name,))
def iterparts(self):
"""yield all parts contained in the stream"""
# make sure param have been loaded
self.params
self.ui.debug('start extraction of bundle2 parts\n')
headerblock = self._readpartheader()
while headerblock is not None:
part = unbundlepart(self.ui, headerblock, self._fp)
yield part
headerblock = self._readpartheader()
self.ui.debug('end of bundle2 stream\n')
def _readpartheader(self):
"""reads a part header size and return the bytes blob
returns None if empty"""
headersize = self._unpack(_fpartheadersize)[0]
self.ui.debug('part header size: %i\n' % headersize)
if headersize:
return self._readexact(headersize)
return None
class bundlepart(object):
"""A bundle2 part contains application level payload
The part `type` is used to route the part to the application level
handler.
The part payload is contained in ``part.data``. It could be raw bytes or a
generator of byte chunks.
You can add parameters to the part using the ``addparam`` method.
Parameters can be either mandatory (default) or advisory. Remote side
should be able to safely ignore the advisory ones.
Both data and parameters cannot be modified after the generation has begun.
"""
def __init__(self, parttype, mandatoryparams=(), advisoryparams=(),
data=''):
self.id = None
self.type = parttype
self._data = data
self._mandatoryparams = list(mandatoryparams)
self._advisoryparams = list(advisoryparams)
# checking for duplicated entries
self._seenparams = set()
for pname, __ in self._mandatoryparams + self._advisoryparams:
if pname in self._seenparams:
raise RuntimeError('duplicated params: %s' % pname)
self._seenparams.add(pname)
# status of the part's generation:
# - None: not started,
# - False: currently generated,
# - True: generation done.
self._generated = None
# methods used to defines the part content
def __setdata(self, data):
if self._generated is not None:
raise error.ReadOnlyPartError('part is being generated')
self._data = data
def __getdata(self):
return self._data
data = property(__getdata, __setdata)
@property
def mandatoryparams(self):
# make it an immutable tuple to force people through ``addparam``
return tuple(self._mandatoryparams)
@property
def advisoryparams(self):
# make it an immutable tuple to force people through ``addparam``
return tuple(self._advisoryparams)
def addparam(self, name, value='', mandatory=True):
if self._generated is not None:
raise error.ReadOnlyPartError('part is being generated')
if name in self._seenparams:
raise ValueError('duplicated params: %s' % name)
self._seenparams.add(name)
params = self._advisoryparams
if mandatory:
params = self._mandatoryparams
params.append((name, value))
# methods used to generates the bundle2 stream
def getchunks(self):
if self._generated is not None:
raise RuntimeError('part can only be consumed once')
self._generated = False
#### header
## parttype
header = [_pack(_fparttypesize, len(self.type)),
self.type, _pack(_fpartid, self.id),
]
## parameters
# count
manpar = self.mandatoryparams
advpar = self.advisoryparams
header.append(_pack(_fpartparamcount, len(manpar), len(advpar)))
# size
parsizes = []
for key, value in manpar:
parsizes.append(len(key))
parsizes.append(len(value))
for key, value in advpar:
parsizes.append(len(key))
parsizes.append(len(value))
paramsizes = _pack(_makefpartparamsizes(len(parsizes) / 2), *parsizes)
header.append(paramsizes)
# key, value
for key, value in manpar:
header.append(key)
header.append(value)
for key, value in advpar:
header.append(key)
header.append(value)
## finalize header
headerchunk = ''.join(header)
yield _pack(_fpartheadersize, len(headerchunk))
yield headerchunk
## payload
for chunk in self._payloadchunks():
yield _pack(_fpayloadsize, len(chunk))
yield chunk
# end of payload
yield _pack(_fpayloadsize, 0)
self._generated = True
def _payloadchunks(self):
"""yield chunks of a the part payload
Exists to handle the different methods to provide data to a part."""
# we only support fixed size data now.
# This will be improved in the future.
if util.safehasattr(self.data, 'next'):
buff = util.chunkbuffer(self.data)
chunk = buff.read(preferedchunksize)
while chunk:
yield chunk
chunk = buff.read(preferedchunksize)
elif len(self.data):
yield self.data
class unbundlepart(unpackermixin):
"""a bundle part read from a bundle"""
def __init__(self, ui, header, fp):
super(unbundlepart, self).__init__(fp)
self.ui = ui
# unbundle state attr
self._headerdata = header
self._headeroffset = 0
self._initialized = False
self.consumed = False
# part data
self.id = None
self.type = None
self.mandatoryparams = None
self.advisoryparams = None
self.params = None
self.mandatorykeys = ()
self._payloadstream = None
self._readheader()
def _fromheader(self, size):
"""return the next <size> byte from the header"""
offset = self._headeroffset
data = self._headerdata[offset:(offset + size)]
self._headeroffset = offset + size
return data
def _unpackheader(self, format):
"""read given format from header
This automatically compute the size of the format to read."""
data = self._fromheader(struct.calcsize(format))
return _unpack(format, data)
def _initparams(self, mandatoryparams, advisoryparams):
"""internal function to setup all logic related parameters"""
# make it read only to prevent people touching it by mistake.
self.mandatoryparams = tuple(mandatoryparams)
self.advisoryparams = tuple(advisoryparams)
# user friendly UI
self.params = dict(self.mandatoryparams)
self.params.update(dict(self.advisoryparams))
self.mandatorykeys = frozenset(p[0] for p in mandatoryparams)
def _readheader(self):
"""read the header and setup the object"""
typesize = self._unpackheader(_fparttypesize)[0]
self.type = self._fromheader(typesize)
self.ui.debug('part type: "%s"\n' % self.type)
self.id = self._unpackheader(_fpartid)[0]
self.ui.debug('part id: "%s"\n' % self.id)
## reading parameters
# param count
mancount, advcount = self._unpackheader(_fpartparamcount)
self.ui.debug('part parameters: %i\n' % (mancount + advcount))
# param size
fparamsizes = _makefpartparamsizes(mancount + advcount)
paramsizes = self._unpackheader(fparamsizes)
# make it a list of couple again
paramsizes = zip(paramsizes[::2], paramsizes[1::2])
# split mandatory from advisory
mansizes = paramsizes[:mancount]
advsizes = paramsizes[mancount:]
# retrive param value
manparams = []
for key, value in mansizes:
manparams.append((self._fromheader(key), self._fromheader(value)))
advparams = []
for key, value in advsizes:
advparams.append((self._fromheader(key), self._fromheader(value)))
self._initparams(manparams, advparams)
## part payload
def payloadchunks():
payloadsize = self._unpack(_fpayloadsize)[0]
self.ui.debug('payload chunk size: %i\n' % payloadsize)
while payloadsize:
yield self._readexact(payloadsize)
payloadsize = self._unpack(_fpayloadsize)[0]
self.ui.debug('payload chunk size: %i\n' % payloadsize)
self._payloadstream = util.chunkbuffer(payloadchunks())
# we read the data, tell it
self._initialized = True
def read(self, size=None):
"""read payload data"""
if not self._initialized:
self._readheader()
if size is None:
data = self._payloadstream.read()
else:
data = self._payloadstream.read(size)
if size is None or len(data) < size:
self.consumed = True
return data
def bundle2caps(remote):
"""return the bundlecapabilities of a peer as dict"""
raw = remote.capable('bundle2-exp')
if not raw and raw != '':
return {}
capsblob = urllib.unquote(remote.capable('bundle2-exp'))
return decodecaps(capsblob)
@parthandler('b2x:changegroup')
def handlechangegroup(op, inpart):
"""apply a changegroup part on the repo
This is a very early implementation that will massive rework before being
inflicted to any end-user.
"""
# Make sure we trigger a transaction creation
#
# The addchangegroup function will get a transaction object by itself, but
# we need to make sure we trigger the creation of a transaction object used
# for the whole processing scope.
op.gettransaction()
cg = changegroup.unbundle10(inpart, 'UN')
ret = changegroup.addchangegroup(op.repo, cg, 'bundle2', 'bundle2')
op.records.add('changegroup', {'return': ret})
if op.reply is not None:
# This is definitly not the final form of this
# return. But one need to start somewhere.
part = op.reply.newpart('b2x:reply:changegroup')
part.addparam('in-reply-to', str(inpart.id), mandatory=False)
part.addparam('return', '%i' % ret, mandatory=False)
assert not inpart.read()
@parthandler('b2x:reply:changegroup', ('return', 'in-reply-to'))
def handlechangegroup(op, inpart):
ret = int(inpart.params['return'])
replyto = int(inpart.params['in-reply-to'])
op.records.add('changegroup', {'return': ret}, replyto)
@parthandler('b2x:check:heads')
def handlechangegroup(op, inpart):
"""check that head of the repo did not change
This is used to detect a push race when using unbundle.
This replaces the "heads" argument of unbundle."""
h = inpart.read(20)
heads = []
while len(h) == 20:
heads.append(h)
h = inpart.read(20)
assert not h
if heads != op.repo.heads():
raise error.PushRaced('repository changed while pushing - '
'please try again')
@parthandler('b2x:output')
def handleoutput(op, inpart):
"""forward output captured on the server to the client"""
for line in inpart.read().splitlines():
op.ui.write(('remote: %s\n' % line))
@parthandler('b2x:replycaps')
def handlereplycaps(op, inpart):
"""Notify that a reply bundle should be created
The payload contains the capabilities information for the reply"""
caps = decodecaps(inpart.read())
if op.reply is None:
op.reply = bundle20(op.ui, caps)
@parthandler('b2x:error:abort', ('message', 'hint'))
def handlereplycaps(op, inpart):
"""Used to transmit abort error over the wire"""
raise util.Abort(inpart.params['message'], hint=inpart.params.get('hint'))
@parthandler('b2x:error:unsupportedcontent', ('parttype', 'params'))
def handlereplycaps(op, inpart):
"""Used to transmit unknown content error over the wire"""
kwargs = {}
parttype = inpart.params.get('parttype')
if parttype is not None:
kwargs['parttype'] = parttype
params = inpart.params.get('params')
if params is not None:
kwargs['params'] = params.split('\0')
raise error.BundleValueError(**kwargs)
@parthandler('b2x:error:pushraced', ('message',))
def handlereplycaps(op, inpart):
"""Used to transmit push race error over the wire"""
raise error.ResponseError(_('push failed:'), inpart.params['message'])
@parthandler('b2x:listkeys', ('namespace',))
def handlelistkeys(op, inpart):
"""retrieve pushkey namespace content stored in a bundle2"""
namespace = inpart.params['namespace']
r = pushkey.decodekeys(inpart.read())
op.records.add('listkeys', (namespace, r))
@parthandler('b2x:pushkey', ('namespace', 'key', 'old', 'new'))
def handlepushkey(op, inpart):
"""process a pushkey request"""
dec = pushkey.decode
namespace = dec(inpart.params['namespace'])
key = dec(inpart.params['key'])
old = dec(inpart.params['old'])
new = dec(inpart.params['new'])
ret = op.repo.pushkey(namespace, key, old, new)
record = {'namespace': namespace,
'key': key,
'old': old,
'new': new}
op.records.add('pushkey', record)
if op.reply is not None:
rpart = op.reply.newpart('b2x:reply:pushkey')
rpart.addparam('in-reply-to', str(inpart.id), mandatory=False)
rpart.addparam('return', '%i' % ret, mandatory=False)
@parthandler('b2x:reply:pushkey', ('return', 'in-reply-to'))
def handlepushkeyreply(op, inpart):
"""retrieve the result of a pushkey request"""
ret = int(inpart.params['return'])
partid = int(inpart.params['in-reply-to'])
op.records.add('pushkey', {'return': ret}, partid)
| 35.166297 | 79 | 0.634584 |
a713d920a03af3c7aa4eefec6f59eb3ce490ab2b | 8,569 | py | Python | backend/api/tests/test_user_history.py | amichard/tfrs | ed3973016cc5c2ae48999d550a23b41a5ddad807 | [
"Apache-2.0"
] | 18 | 2017-05-10T21:55:11.000Z | 2021-03-01T16:41:32.000Z | backend/api/tests/test_user_history.py | amichard/tfrs | ed3973016cc5c2ae48999d550a23b41a5ddad807 | [
"Apache-2.0"
] | 1,167 | 2017-03-04T00:18:43.000Z | 2022-03-03T22:31:51.000Z | backend/api/tests/test_user_history.py | amichard/tfrs | ed3973016cc5c2ae48999d550a23b41a5ddad807 | [
"Apache-2.0"
] | 48 | 2017-03-09T17:19:39.000Z | 2022-02-24T16:38:17.000Z | """
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import json
from rest_framework import status
from api.models.CreditTrade import CreditTrade
from api.tests.base_test_case import BaseTestCase
class TestUserHistory(BaseTestCase):
"""
This will test the user history and make sure that depending on the
user role, the user can only see certain activity
"""
def setUp(self):
super().setUp()
payload = {
'fairMarketValuePerCredit': '10.00',
'initiator': self.users['fs_user_1'].organization_id,
'numberOfCredits': 1,
'respondent': self.users['fs_user_2'].organization_id,
'status': self.statuses['submitted'].id,
'tradeEffectiveDate': datetime.datetime.today().strftime(
'%Y-%m-%d'
),
'type': self.credit_trade_types['sell'].id
}
# Propose a trade
self.clients['fs_user_1'].post(
'/api/credit_trades',
content_type='application/json',
data=json.dumps(payload))
# Accept a proposal
credit_trade = CreditTrade.objects.create(
status=self.statuses['submitted'],
initiator=self.users['fs_user_2'].organization,
respondent=self.users['fs_user_1'].organization,
type=self.credit_trade_types['sell'],
number_of_credits=10,
fair_market_value_per_credit=1,
zero_reason=None,
trade_effective_date=datetime.datetime.today().strftime(
'%Y-%m-%d'
)
)
payload = {
'fairMarketValuePerCredit':
credit_trade.fair_market_value_per_credit,
'initiator': credit_trade.initiator.id,
'numberOfCredits': credit_trade.number_of_credits,
'respondent': credit_trade.respondent.id,
'status': self.statuses['accepted'].id,
'tradeEffectiveDate': credit_trade.trade_effective_date,
'type': credit_trade.type.id
}
self.clients['fs_user_1'].put(
'/api/credit_trades/{}'.format(
credit_trade.id
),
content_type='application/json',
data=json.dumps(payload))
# After accepting let the government user recommend this
payload = {
'fairMarketValuePerCredit':
credit_trade.fair_market_value_per_credit,
'initiator': credit_trade.initiator.id,
'numberOfCredits': credit_trade.number_of_credits,
'respondent': credit_trade.respondent.id,
'status': self.statuses['recommended'].id,
'tradeEffectiveDate': credit_trade.trade_effective_date,
'type': credit_trade.type.id
}
self.clients['gov_director'].put(
'/api/credit_trades/{}'.format(
credit_trade.id
),
content_type='application/json',
data=json.dumps(payload))
# Rescind the proposal at this point
payload = {
'fairMarketValuePerCredit':
credit_trade.fair_market_value_per_credit,
'initiator': credit_trade.initiator.id,
'is_rescinded': True,
'numberOfCredits': credit_trade.number_of_credits,
'respondent': credit_trade.respondent.id,
'status': credit_trade.status.id,
'tradeEffectiveDate': credit_trade.trade_effective_date,
'type': credit_trade.type.id
}
self.clients['fs_user_1'].put(
'/api/credit_trades/{}'.format(
credit_trade.id
),
content_type='application/json',
data=json.dumps(payload))
# Refuse a proposal
credit_trade = CreditTrade.objects.create(
status=self.statuses['submitted'],
initiator=self.users['fs_user_2'].organization,
respondent=self.users['fs_user_1'].organization,
type=self.credit_trade_types['sell'],
number_of_credits=10,
fair_market_value_per_credit=1,
zero_reason=None,
trade_effective_date=datetime.datetime.today().strftime(
'%Y-%m-%d'
)
)
payload = {
'fairMarketValuePerCredit':
credit_trade.fair_market_value_per_credit,
'initiator': credit_trade.initiator.id,
'numberOfCredits': credit_trade.number_of_credits,
'respondent': credit_trade.respondent.id,
'status': self.statuses['refused'].id,
'tradeEffectiveDate': credit_trade.trade_effective_date,
'type': credit_trade.type.id
}
self.clients['fs_user_1'].put(
'/api/credit_trades/{}'.format(
credit_trade.id
),
content_type='application/json',
data=json.dumps(payload))
def test_user_history_as_fuel_supplier(self):
"""
As a fuel supplier, I should the activities I was involved with:
I should see Accepted, Refused, Submitted and Rescinded proposals
"""
response = self.clients['fs_user_1'].get(
'/api/users/{}/history'.format(
self.users['fs_user_1']
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
history = response.json()
for activity in history:
correct_view = False
credit_trade = CreditTrade.objects.get(
id=activity['objectId']
)
# make sure that the status is correct and we don't see anything
# that's not submitted, accepted or refused
# unless it's rescinded (even if it's rescinded we shouldn't see
# the recommended or not recommended status)
if activity['status']['id'] == self.statuses['submitted'].id or \
activity['status']['id'] == self.statuses['accepted'].id or \
activity['status']['id'] == self.statuses['refused'].id or \
activity['status'].status == 'Rescinded':
# make sure we don't see any entries that our organization is
# not a part of
if credit_trade.initiator.id == \
self.users['fs_user_1'].organization.id or \
credit_trade.respondent.id == \
self.users['fs_user_1'].organization.id:
correct_view = True
self.assertTrue(correct_view)
def test_user_history_as_government_user(self):
"""
As a government admin, I can view the activities of a person:
I should not see submitted and refused proposals
"""
response = self.clients['gov_admin'].get(
'/api/users/{}/history'.format(
self.users['fs_user_1']
)
)
self.assertEqual(response.status_code, status.HTTP_200_OK)
history = response.json()
for activity in history:
correct_view = False
# make sure that the status is correct and we don't see anything
# that's not submitted, accepted or refused
# unless it's rescinded (even if it's rescinded we shouldn't see
# the recommended or not recommended status)
if activity['status']['id'] == self.statuses['not_recommended'].id or \
activity['status']['id'] == self.statuses['recommended'].id or \
activity['status']['id'] == self.statuses['accepted'].id:
correct_view = True
self.assertTrue(correct_view)
| 38.084444 | 83 | 0.596452 |
82fd227178f7b5f1b8a383637009fd8762dd98b1 | 2,359 | py | Python | docs/conf.py | gfavre/invoice-manager | 2a1db22edd51b461c090282c6fc1f290f3265379 | [
"MIT"
] | 1 | 2021-11-27T06:40:34.000Z | 2021-11-27T06:40:34.000Z | docs/conf.py | gfavre/invoice-manager | 2a1db22edd51b461c090282c6fc1f290f3265379 | [
"MIT"
] | 2 | 2021-05-13T04:50:50.000Z | 2022-02-28T21:06:24.000Z | docs/conf.py | gfavre/invoice-manager | 2a1db22edd51b461c090282c6fc1f290f3265379 | [
"MIT"
] | null | null | null | # Configuration file for the Sphinx documentation builder.
#
# This file only contains a selection of the most common options. For a full
# list see the documentation:
# https://www.sphinx-doc.org/en/master/usage/configuration.html
# -- Path setup --------------------------------------------------------------
# If extensions (or modules to document with autodoc) are in another directory,
# add these directories to sys.path here. If the directory is relative to the
# documentation root, use os.path.abspath to make it absolute, like shown here.
import os
import sys
import django
if os.getenv("READTHEDOCS", default=False) == "True":
sys.path.insert(0, os.path.abspath(".."))
os.environ["DJANGO_READ_DOT_ENV_FILE"] = "True"
os.environ["USE_DOCKER"] = "no"
else:
sys.path.insert(0, os.path.abspath("/app"))
os.environ["DATABASE_URL"] = "sqlite:///readthedocs.db"
os.environ["CELERY_BROKER_URL"] = os.getenv("REDIS_URL", "redis://redis:6379")
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.local")
django.setup()
# -- Project information -----------------------------------------------------
project = "beyondtheadmin"
copyright = """2021, Gregory Favre"""
author = "Gregory Favre"
# -- General configuration ---------------------------------------------------
# Add any Sphinx extension module names here, as strings. They can be
# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom
# ones.
extensions = [
"sphinx.ext.autodoc",
"sphinx.ext.napoleon",
]
# Add any paths that contain templates here, relative to this directory.
# templates_path = ["_templates"]
# List of patterns, relative to source directory, that match files and
# directories to ignore when looking for source files.
# This pattern also affects html_static_path and html_extra_path.
exclude_patterns = ["_build", "Thumbs.db", ".DS_Store"]
# -- Options for HTML output -------------------------------------------------
# The theme to use for HTML and HTML Help pages. See the documentation for
# a list of builtin themes.
#
html_theme = "alabaster"
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
# html_static_path = ["_static"]
| 35.742424 | 79 | 0.668928 |
2190cd1311d488015eef0658c83fcfce6fa03413 | 195 | py | Python | 7_kyu/BAD_Hash_String_to_Int.py | UlrichBerntien/Codewars-Katas | bbd025e67aa352d313564d3862db19fffa39f552 | [
"MIT"
] | null | null | null | 7_kyu/BAD_Hash_String_to_Int.py | UlrichBerntien/Codewars-Katas | bbd025e67aa352d313564d3862db19fffa39f552 | [
"MIT"
] | null | null | null | 7_kyu/BAD_Hash_String_to_Int.py | UlrichBerntien/Codewars-Katas | bbd025e67aa352d313564d3862db19fffa39f552 | [
"MIT"
] | null | null | null | def string_hash(s: str) -> int:
a = sum( ord(c) for c in s )
n = s.count(' ')
b = ord(s[-1])-ord(s[0]) if s else 0
c = (a | b) & (~a << 2)
d = c ^ ( (n+1) << 5 )
return d | 27.857143 | 40 | 0.405128 |
7f3573eac6d6722e642804cdbf683c273d4a602b | 3,122 | py | Python | minkit/utils/splot.py | mramospe/minkit | fa6808a6ca8063751da92f683f2b810a0690a462 | [
"MIT-0"
] | null | null | null | minkit/utils/splot.py | mramospe/minkit | fa6808a6ca8063751da92f683f2b810a0690a462 | [
"MIT-0"
] | 5 | 2020-01-30T18:30:33.000Z | 2020-11-10T09:13:47.000Z | minkit/utils/splot.py | mramospe/minkit | fa6808a6ca8063751da92f683f2b810a0690a462 | [
"MIT-0"
] | 1 | 2020-02-03T22:59:00.000Z | 2020-02-03T22:59:00.000Z | ########################################
# MIT License
#
# Copyright (c) 2020 Miguel Ramos Pernas
########################################
'''
Functions and classes related to the s-plot technique.
More information at: https://arxiv.org/abs/physics/0402083
'''
from ..base import parameters
from ..base import data_types
import numpy as np
__all__ = ['sweights', 'sweights_u']
def sweights(pdfs, yields, data, range=parameters.FULL, return_covariance=False):
'''
Calculate the s-weights for the different provided species.
:param pdfs: registry of PDFs to use.
:type pdfs: Registry
:param yields: yields of the PDFs.
:type yields: Registry
:param data: data to evaluate.
:type data: DataSet
:param range: range to consider for evaluating the PDFs.
:type range: str
:param return_covariance: if set to True, it also returns the covariance matrix.
:type return_covariance: bool
:returns: Values of the s-weights for each specie and possible covariance matrix.
:rtype: list(darray), (numpy.ndarray)
:raises ValueError: If the number of provided yields is smaller than the
number of PDFs minus one.
'''
l = len(yields)
if l == len(pdfs):
yields = np.array([p.value for p in yields])
elif l + 1 == len(pdfs):
yields = data_types.empty_float(l + 1)
yields[:l] = [p.value for p in yields]
yields[-1] = 1. - yields[:l].sum()
else:
raise ValueError(
'Number of provided yields must be equal than number of PDFs, or at least only one less')
fvals = [pdf(data, range) for pdf in pdfs]
aop = pdfs[0].aop
yf = aop.sum_arrays(tuple(y * f for y, f in zip(yields, fvals)))
# Calculate the inverse of the covariance matrix
den = yf**2
iV = data_types.empty_float((l, l))
for i, fi in enumerate(fvals):
iV[i, i] = aop.sum(fi * fi / den)
for j, fj in enumerate(fvals[i + 1:], i + 1):
iV[i, j] = iV[j, i] = aop.sum(fi * fj / den)
V = np.linalg.inv(iV)
# Calculate the s-weights
w = [aop.sum_arrays(tuple(v * f for v, f in zip(V[i], fvals))) /
yf for i in np.arange(l)]
if return_covariance:
return w, V
else:
return w
def sweights_u(a, sweights, bins=10, range=None):
r'''
Get the uncertainty associated to the s-weights related to sample *a*.
Arguments are similar to those of :func:`numpy.histogram`.
By definition, the uncertainty on the s-weights (for plotting), is defined
as the sum of the squares of the weights in that bin, like
.. math:: \sigma = \sqrt{\sum_{b \in \delta x} \omega^2}
:param a: array of data.
:type a: numpy.ndarray
:param sweights: array of weights.
:type sweights: numpy.ndarray
:param bins: bins for the histogram.
:type bins: int or numpy.ndarray
:param range: range of the histogram.
:type range: tuple(float, float) or None
:returns: Uncertainties for each bin of the histogram.
:rtype: numpy.ndarray
'''
return np.sqrt(np.histogram(a, bins=bins, weights=sweights*sweights)[0])
| 31.535354 | 101 | 0.628764 |
fcb76a22ab95f239c1e0d0b1edfe02211aabaea2 | 490 | py | Python | src/posts/type_hinting/examples/11.py | pauleveritt/pauleveritt.github.io | 3e4707dba1f3a57297f90c10cc2da4c3075c1a69 | [
"BSD-3-Clause"
] | 8 | 2016-07-15T19:58:29.000Z | 2021-03-11T09:57:11.000Z | src/posts/type_hinting/examples/11.py | pauleveritt/pauleveritt.github.io | 3e4707dba1f3a57297f90c10cc2da4c3075c1a69 | [
"BSD-3-Clause"
] | 2 | 2015-11-26T13:54:52.000Z | 2016-03-03T13:04:17.000Z | src/posts/type_hinting/examples/11.py | pauleveritt/pauleveritt.github.io | 3e4707dba1f3a57297f90c10cc2da4c3075c1a69 | [
"BSD-3-Clause"
] | 6 | 2016-03-01T13:05:00.000Z | 2016-10-11T16:37:18.000Z | from typing import Union, List, Dict
def greeting(names: Union[List[str], Dict[int, List[str]]]) -> Union[
List[str], Dict[int, List[str]]]:
fmt = 'Hello, {}'
if isinstance(names, dict):
return [(k, fmt.format(', '.join(v))) for k, v in
names.items()]
else:
return fmt.format(', '.join(names))
print(greeting(['jane', 'john', 'judy']))
print(greeting(
{10: ['jane', 'judy'],
11: ['john'],
12: ['judy', 'john']
}
))
| 23.333333 | 69 | 0.522449 |
1b9569af6c2978832c02a7ade6d09fafa7ec957b | 7,567 | py | Python | chrome/common/extensions/docs/server2/github_file_system.py | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | 2 | 2020-05-03T06:33:56.000Z | 2021-11-14T18:39:42.000Z | chrome/common/extensions/docs/server2/github_file_system.py | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | chrome/common/extensions/docs/server2/github_file_system.py | pozdnyakov/chromium-crosswalk | 0fb25c7278bf1d93e53a3b0bcb75aa8b99d4b26e | [
"BSD-3-Clause-No-Nuclear-License-2014",
"BSD-3-Clause"
] | null | null | null | # Copyright (c) 2012 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import logging
import os
from StringIO import StringIO
import appengine_blobstore as blobstore
from appengine_url_fetcher import AppEngineUrlFetcher
from appengine_wrappers import GetAppVersion, urlfetch
from file_system import FileSystem, StatInfo
from future import Future
from object_store_creator import ObjectStoreCreator
import url_constants
from zipfile import ZipFile, BadZipfile
ZIP_KEY = 'zipball'
USERNAME = None
PASSWORD = None
def _MakeBlobstoreKey(version):
return ZIP_KEY + '.' + str(version)
class _AsyncFetchFutureZip(object):
def __init__(self,
fetcher,
username,
password,
blobstore,
key_to_set,
key_to_delete=None):
self._fetcher = fetcher
self._fetch = fetcher.FetchAsync(ZIP_KEY,
username=username,
password=password)
self._blobstore = blobstore
self._key_to_set = key_to_set
self._key_to_delete = key_to_delete
def Get(self):
try:
result = self._fetch.Get()
# Check if Github authentication failed.
if result.status_code == 401:
logging.error('Github authentication failed for %s, falling back to '
'unauthenticated.' % USERNAME)
blob = self._fetcher.Fetch(ZIP_KEY).content
else:
blob = result.content
except urlfetch.DownloadError as e:
logging.error('Bad github zip file: %s' % e)
return None
if self._key_to_delete is not None:
self._blobstore.Delete(_MakeBlobstoreKey(self._key_to_delete),
blobstore.BLOBSTORE_GITHUB)
try:
return_zip = ZipFile(StringIO(blob))
except BadZipfile as e:
logging.error('Bad github zip file: %s' % e)
return None
self._blobstore.Set(_MakeBlobstoreKey(self._key_to_set),
blob,
blobstore.BLOBSTORE_GITHUB)
return return_zip
class GithubFileSystem(FileSystem):
@staticmethod
def Create(object_store_creator):
return GithubFileSystem(
AppEngineUrlFetcher(url_constants.GITHUB_URL),
blobstore.AppEngineBlobstore(),
object_store_creator)
def __init__(self, fetcher, blobstore, object_store_creator):
# Password store doesn't depend on channel, and if we don't cancel the app
# version then the whole advantage of having it in the first place is
# greatly lessened (likewise it should always start populated).
password_store = object_store_creator.Create(
GithubFileSystem,
channel=None,
app_version=None,
category='password',
start_empty=False)
if USERNAME is None:
password_data = password_store.GetMulti(('username', 'password')).Get()
self._username, self._password = (password_data.get('username'),
password_data.get('password'))
else:
password_store.SetMulti({'username': USERNAME, 'password': PASSWORD})
self._username, self._password = (USERNAME, PASSWORD)
self._fetcher = fetcher
self._blobstore = blobstore
# Github has no knowledge of Chrome channels, set channel to None.
self._stat_object_store = object_store_creator.Create(
GithubFileSystem,
channel=None)
self._version = None
self._GetZip(self.Stat(ZIP_KEY).version)
def _GetZip(self, version):
blob = self._blobstore.Get(_MakeBlobstoreKey(version),
blobstore.BLOBSTORE_GITHUB)
if blob is not None:
try:
self._zip_file = Future(value=ZipFile(StringIO(blob)))
except BadZipfile as e:
self._blobstore.Delete(_MakeBlobstoreKey(version),
blobstore.BLOBSTORE_GITHUB)
logging.error('Bad github zip file: %s' % e)
self._zip_file = Future(value=None)
else:
self._zip_file = Future(
delegate=_AsyncFetchFutureZip(self._fetcher,
self._username,
self._password,
self._blobstore,
version,
key_to_delete=self._version))
self._version = version
def _ReadFile(self, path):
try:
zip_file = self._zip_file.Get()
except Exception as e:
logging.error('Github ReadFile error: %s' % e)
return ''
if zip_file is None:
logging.error('Bad github zip file.')
return ''
prefix = zip_file.namelist()[0][:-1]
return zip_file.read(prefix + path)
def _ListDir(self, path):
try:
zip_file = self._zip_file.Get()
except Exception as e:
logging.error('Github ListDir error: %s' % e)
return []
if zip_file is None:
logging.error('Bad github zip file.')
return []
filenames = zip_file.namelist()
# Take out parent directory name (GoogleChrome-chrome-app-samples-c78a30f)
filenames = [f[len(filenames[0]) - 1:] for f in filenames]
# Remove the path of the directory we're listing from the filenames.
filenames = [f[len(path):] for f in filenames
if f != path and f.startswith(path)]
# Remove all files not directly in this directory.
return [f for f in filenames if f[:-1].count('/') == 0]
def Read(self, paths, binary=False):
version = self.Stat(ZIP_KEY).version
if version != self._version:
self._GetZip(version)
result = {}
for path in paths:
if path.endswith('/'):
result[path] = self._ListDir(path)
else:
result[path] = self._ReadFile(path)
return Future(value=result)
def _DefaultStat(self, path):
version = 0
# TODO(kalman): we should replace all of this by wrapping the
# GithubFileSystem in a CachingFileSystem. A lot of work has been put into
# CFS to be robust, and GFS is missing out.
# For example: the following line is wrong, but it could be moot.
self._stat_object_store.Set(path, version)
return StatInfo(version)
def Stat(self, path):
version = self._stat_object_store.Get(path).Get()
if version is not None:
return StatInfo(version)
try:
result = self._fetcher.Fetch('commits/HEAD',
username=USERNAME,
password=PASSWORD)
except urlfetch.DownloadError as e:
logging.error('GithubFileSystem Stat: %s' % e)
return self._DefaultStat(path)
# Check if Github authentication failed.
if result.status_code == 401:
logging.error('Github authentication failed for %s, falling back to '
'unauthenticated.' % USERNAME)
try:
result = self._fetcher.Fetch('commits/HEAD')
except urlfetch.DownloadError as e:
logging.error('GithubFileSystem Stat: %s' % e)
return self._DefaultStat(path)
version = (json.loads(result.content).get('commit', {})
.get('tree', {})
.get('sha', None))
# Check if the JSON was valid, and set to 0 if not.
if version is not None:
self._stat_object_store.Set(path, version)
else:
logging.warning('Problem fetching commit hash from github.')
return self._DefaultStat(path)
return StatInfo(version)
| 36.73301 | 78 | 0.628254 |
50e3440504a52a6b67393c563ea6471a99008515 | 769 | py | Python | Blog_Yourself/Blog_Yourself/urls.py | shivansh1698/Blog_yourself | de797a4c01484fb006fddb3e213b75fad517e74b | [
"BSD-3-Clause"
] | null | null | null | Blog_Yourself/Blog_Yourself/urls.py | shivansh1698/Blog_yourself | de797a4c01484fb006fddb3e213b75fad517e74b | [
"BSD-3-Clause"
] | 5 | 2020-02-11T21:31:42.000Z | 2021-06-10T22:45:00.000Z | Blog_Yourself/Blog_Yourself/urls.py | shivansh1698/Blog_yourself | de797a4c01484fb006fddb3e213b75fad517e74b | [
"BSD-3-Clause"
] | null | null | null | """Blog_Yourself URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/dev/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: url(r'^$', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: url(r'^$', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.conf.urls import url, include
2. Add a URL to urlpatterns: url(r'^blog/', include('blog.urls'))
"""
from django.conf.urls import url
from django.contrib import admin
urlpatterns = [
url(r'^admin/', admin.site.urls),
]
| 34.954545 | 79 | 0.707412 |
cd7c2d38c38daafaac0a9178774c5ead84cb0bb3 | 8,510 | py | Python | nlpnet/word_dictionary.py | bgmartins/spatial-roles | bebb4d7084e75f4e7919308d0aa77544d12776a2 | [
"MIT"
] | 1 | 2020-12-14T20:58:54.000Z | 2020-12-14T20:58:54.000Z | nlpnet/word_dictionary.py | bgmartins/spatial-roles | bebb4d7084e75f4e7919308d0aa77544d12776a2 | [
"MIT"
] | null | null | null | nlpnet/word_dictionary.py | bgmartins/spatial-roles | bebb4d7084e75f4e7919308d0aa77544d12776a2 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import itertools
from collections import Counter, OrderedDict as OD
class WordDictionary(dict):
"""
Class to store words and their corresponding indices in
the network lookup table. Also deals with padding and
maps rare words to a special index.
"""
padding_left = u'*LEFT*'
padding_right = u'*RIGHT*'
rare = u'*RARE*'
number_transformation = {ord(c): u'9' for c in u'012345678'}
def __init__(self, tokens, size=None, minimum_occurrences=None, wordlist=None):
"""
Fills a dictionary (to be used for indexing) with the most
common words in the given text.
:param tokens: Either a list of tokens or a list of lists of tokens
(each token represented as a string).
:param size: Maximum number of token indices
(not including paddings, rare, etc.).
:param minimum_occurrences: The minimum number of occurrences a token must
have in order to be included.
:param wordlist: Use this list of words to build the dictionary. Overrides tokens
if not None and ignores maximum size.
"""
if wordlist is None:
# work with the supplied tokens. extract frequencies.
# gets frequency count
c = self._get_frequency_count(tokens)
if minimum_occurrences is None:
minimum_occurrences = 1
words = [key for key, number in c.most_common()
if number >= minimum_occurrences and key]
if size is not None and size < len(words):
words = words[:size]
else:
# using ordered dict as an ordered set
# (we need to keep the order and eliminate duplicates)
words = [word.lower().translate(WordDictionary.number_transformation)
for word in wordlist]
values = [None] * len(words)
words = OD(zip(words, values)).keys()
# verifies the maximum size
if size is None:
size = len(words)
# set all words in the dictionary
for word, num in itertools.izip(words, xrange(size)):
self[word] = num
# if the given words include one of the the rare or padding symbols, don't replace it
special_symbols = [WordDictionary.rare.lower(),
WordDictionary.padding_left.lower(),
WordDictionary.padding_right.lower()]
for symbol in special_symbols:
if symbol not in words:
self[symbol] = size
size += 1
self.check()
@classmethod
def init_from_wordlist(cls, wordlist):
"""
Initializes the WordDictionary instance with a list of words, independently from their
frequencies. Every word in the list gets an entry.
"""
return cls(None, wordlist=wordlist)
@classmethod
def init_empty(cls):
"""
Initializes an empty Word Dictionary.
"""
return cls([[]])
def save(self, filename):
"""
Saves the word dictionary to the given file as a list of word types.
Special words (paddings and rare) are also included.
"""
sorted_words = sorted(self, key=self.get)
text = '\n'.join(sorted_words)
with open(filename, 'wb') as f:
f.write(text.encode('utf-8'))
@classmethod
def load(cls, filename):
"""
Loads a WordDictionary object from a vocabulary file.
"""
words = []
with open(filename, 'rb') as f:
for word in f:
word = unicode(word, 'utf-8').strip()
if word:
words.append(word)
return cls.init_from_wordlist(words)
def _get_frequency_count(self, token_list):
"""
Returns a token counter for tokens in token_list.
:param token_list: Either a list of tokens (as strings) or a list
of lists of tokens.
"""
if type(token_list[0]) == list:
c = Counter(t.lower().translate(WordDictionary.number_transformation)
for sent in token_list for t in sent)
else:
c = Counter(t.lower().translate(WordDictionary.number_transformation)
for t in token_list)
return c
def update_tokens(self, tokens, size=None, minimum_occurrences=1, freqs=None):
"""
Updates the dictionary, adding more types until size is reached.
:param freqs: a dictionary providing a token count.
"""
if freqs is None:
freqs = self._get_frequency_count(tokens)
if size is None or size == 0:
# size None or 0 means no size limit
size = len(freqs)
if self.num_tokens >= size:
return
else:
size_diff = size - self.num_tokens
# a new version of freqs with only tokens not present in the dictionary
# and above minimum frequency
candidate_tokens = dict((token, freqs[token])
for token in freqs
if token not in self and freqs[token] >= minimum_occurrences)
# order the types from the most frequent to the least
new_tokens = sorted(candidate_tokens, key=lambda x: candidate_tokens[x], reverse=True)
next_value = len(self)
for token in new_tokens:
self[token] = next_value
next_value += 1
size_diff -= 1
if size_diff == 0:
break
self.check()
def __contains__(self, key):
"""
Overrides the "in" operator. Case insensitive.
"""
transformed = key.lower().translate(WordDictionary.number_transformation)
return super(WordDictionary, self).__contains__(transformed)
def __setitem__(self, key, value):
"""
Overrides the [] write operator. It converts every key to lower case
before assignment.
"""
transformed = key.lower().translate(WordDictionary.number_transformation)
super(WordDictionary, self).__setitem__(transformed, value)
def __getitem__(self, key):
"""
Overrides the [] read operator.
Three differences from the original:
1) when given a word without an entry, it returns the value for the *RARE* key.
2) all entries are converted to lower case before verification.
3) digits are mapped to 9
"""
# faster than regexp
transformed = key.lower().translate(WordDictionary.number_transformation)
return super(WordDictionary, self).get(transformed, self.index_rare)
def get(self, key):
"""
Overrides the dictionary get method, so when given a word without an entry, it returns
the value for the *RARE* key. Note that it is not possible to supply a default value as
in the dict class.
"""
# faster than regexp
transformed = key.lower().translate(WordDictionary.number_transformation)
return super(WordDictionary, self).get(transformed, self.index_rare)
def check(self):
"""
Checks the internal structure of the dictionary and makes necessary adjustments,
such as updating num_tokens.
"""
# since WordDictionary overrides __get__, we use the super call
# (the WordDictionary __get__ fails when self.index_rare is not set)
key = WordDictionary.rare.lower()
self.index_rare = super(WordDictionary, self).get(key)
self.index_padding_left = self[WordDictionary.padding_left]
self.index_padding_right = self[WordDictionary.padding_right]
self.num_tokens = len(self) - 3
def get_words(self, indices):
"""
Returns the words represented by a sequence of indices.
"""
words = [w for w in self if self[w] in indices]
return words
def get_indices(self, words):
"""
Returns the indices corresponding to a sequence of tokens.
"""
indices = [self[w] for w in words]
return indices
| 36.523605 | 96 | 0.579906 |
7e204d520f74101ffa6d717419893f07a426a211 | 5,463 | py | Python | datatile/datatile/summary/df.py | mouradmourafiq/datatile | 5aa88a598049645739b5ae7500719f4addba24cb | [
"Apache-2.0"
] | 20 | 2021-12-01T11:50:37.000Z | 2022-03-28T15:06:49.000Z | datatile/datatile/summary/df.py | mouradmourafiq/datatile | 5aa88a598049645739b5ae7500719f4addba24cb | [
"Apache-2.0"
] | 2 | 2021-12-27T14:11:32.000Z | 2022-02-09T18:16:12.000Z | datatile/datatile/summary/df.py | mouradmourafiq/datatile | 5aa88a598049645739b5ae7500719f4addba24cb | [
"Apache-2.0"
] | 5 | 2021-12-01T11:51:29.000Z | 2022-03-31T05:24:40.000Z | #!/usr/bin/python
#
# Copyright 2018-2022 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import pandas as pd
from traceml.processors import df_processors
class DataFrameSummary:
ALL = "ALL"
INCLUDE = "INCLUDE"
EXCLUDE = "EXCLUDE"
TYPE_BOOL = "bool"
TYPE_NUMERIC = "numeric"
TYPE_DATE = "date"
TYPE_CATEGORICAL = "categorical"
TYPE_CONSTANT = "constant"
TYPE_UNIQUE = "unique"
def __init__(self, df, plot=False):
self.df = df
self.length = len(df)
self.columns_stats = df_processors.get_df_column_stats(self.df)
self.corr = df.corr()
self.plot = plot
def __getitem__(self, column):
if isinstance(column, str) and df_processors.df_has_column(
df=self.df, column=column
):
return df_processors.get_df_column_summary(
df=self.df,
column=column,
columns_stats=self.columns_stats,
df_length=self.length,
plot=self.plot,
)
if isinstance(column, int) and column < self.df.shape[1]:
return df_processors.get_df_column_summary(
df=self.df,
column=self.df.columns[column],
columns_stats=self.columns_stats,
df_length=self.length,
plot=self.plot,
)
if isinstance(column, (tuple, list)):
error_keys = [
k
for k in column
if not df_processors.df_has_column(df=self.df, column=k)
]
if len(error_keys) > 0:
raise KeyError(", ".join(error_keys))
return self.df[list(column)].values
if isinstance(column, pd.Index):
error_keys = [
k
for k in column.values
if not df_processors.df_has_column(df=self.df, column=k)
]
if len(error_keys) > 0:
raise KeyError(", ".join(error_keys))
return self.df[column].values
if isinstance(column, np.ndarray):
error_keys = [
k
for k in column
if not df_processors.df_has_column(df=self.df, column=k)
]
if len(error_keys) > 0:
raise KeyError(", ".join(error_keys))
return self.df[column].values
raise KeyError(column)
@property
def columns_types(self):
return df_processors.get_df_columns_types(self.columns_stats)
def summary(self):
return pd.concat([self.df.describe(), self.columns_stats], sort=True)[
self.df.columns
]
""" Column summaries """
@property
def constants(self):
return self.df.columns[self.columns_stats.loc["types"] == "constant"]
@property
def categoricals(self):
return self.df.columns[self.columns_stats.loc["types"] == "categorical"]
@property
def numerics(self):
return self.df.columns[self.columns_stats.loc["types"] == "numeric"]
@property
def uniques(self):
return self.df.columns[self.columns_stats.loc["types"] == "unique"]
@property
def bools(self):
return self.df.columns[self.columns_stats.loc["types"] == "bool"]
@property
def missing_frac(self):
return self.columns_stats.loc["missing"].apply(lambda x: float(x) / self.length)
def get_columns(self, df, usage, columns=None):
"""
Returns a `data_frame.columns`.
:param df: dataframe to select columns from
:param usage: should be a value from [ALL, INCLUDE, EXCLUDE].
this value only makes sense if attr `columns` is also set.
otherwise, should be used with default value ALL.
:param columns: * if `usage` is all, this value is not used.
* if `usage` is INCLUDE, the `df` is restricted to the intersection
between `columns` and the `df.columns`
* if usage is EXCLUDE, returns the `df.columns` excluding these `columns`
:return: `data_frame` columns, excluding `target_column` and `id_column` if given.
`data_frame` columns, including/excluding the `columns` depending on `usage`.
"""
columns_excluded = pd.Index([])
columns_included = df.columns
if usage == self.INCLUDE:
try:
columns_included = columns_included.intersection(pd.Index(columns))
except TypeError:
pass
elif usage == self.EXCLUDE:
try:
columns_excluded = columns_excluded.union(pd.Index(columns))
except TypeError:
pass
columns_included = columns_included.difference(columns_excluded)
return columns_included.intersection(df.columns)
| 34.14375 | 97 | 0.592349 |
7ba4db4dbae431ad8322b58ad025ba1fbba26dc0 | 13,905 | py | Python | src/ggrc_workflows/notification/data_handler.py | kripsy/Project | 5ff892513ea74621ea5031a7d5bc7a6a614debc8 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | src/ggrc_workflows/notification/data_handler.py | kripsy/Project | 5ff892513ea74621ea5031a7d5bc7a6a614debc8 | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2021-11-15T17:52:29.000Z | 2021-11-15T17:52:29.000Z | src/ggrc_workflows/notification/data_handler.py | gaurav46/ggrc-core | 5f4ea7173ec1da7763bd5b4fef39858c8be07df2 | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | # Copyright (C) 2016 Google Inc.
# Licensed under http://www.apache.org/licenses/LICENSE-2.0 <see LICENSE file>
import urllib
from copy import deepcopy
from datetime import date
from logging import getLogger
from urlparse import urljoin
from sqlalchemy import and_
from ggrc import db
from ggrc import utils
from ggrc.models.revision import Revision
from ggrc.notifications import data_handlers
from ggrc.utils import merge_dicts, get_url_root
from ggrc_basic_permissions.models import Role, UserRole
from ggrc_workflows.models import Cycle
from ggrc_workflows.models import CycleTaskGroupObjectTask
from ggrc_workflows.models import Workflow
# pylint: disable=invalid-name
logger = getLogger(__name__)
"""
exposed functions
get_cycle_data,
get_workflow_data,
get_cycle_task_data,
"""
def get_cycle_created_task_data(notification):
cycle_task = get_object(CycleTaskGroupObjectTask, notification.object_id)
if not cycle_task:
logger.warning(
'%s for notification %s not found.',
notification.object_type, notification.id)
return {}
cycle_task_group = cycle_task.cycle_task_group
cycle = cycle_task_group.cycle
force = cycle.workflow.notify_on_change
task_assignee = data_handlers.get_person_dict(cycle_task.contact)
task_group_assignee = data_handlers.get_person_dict(cycle_task_group.contact)
workflow_owners = get_workflow_owners_dict(cycle.context_id)
task = {
cycle_task.id: get_cycle_task_dict(cycle_task)
}
result = {}
assignee_data = {
task_assignee['email']: {
"user": task_assignee,
"force_notifications": {
notification.id: force
},
"cycle_data": {
cycle.id: {
"my_tasks": deepcopy(task)
}
}
}
}
tg_assignee_data = {
task_group_assignee['email']: {
"user": task_group_assignee,
"force_notifications": {
notification.id: force
},
"cycle_data": {
cycle.id: {
"my_task_groups": {
cycle_task_group.id: deepcopy(task)
}
}
}
}
}
for workflow_owner in workflow_owners.itervalues():
wf_owner_data = {
workflow_owner['email']: {
"user": workflow_owner,
"force_notifications": {
notification.id: force
},
"cycle_data": {
cycle.id: {
"cycle_tasks": deepcopy(task)
}
}
}
}
result = merge_dicts(result, wf_owner_data)
return merge_dicts(result, assignee_data, tg_assignee_data)
def get_cycle_task_due(notification):
cycle_task = get_object(CycleTaskGroupObjectTask, notification.object_id)
if not cycle_task:
logger.warning(
'%s for notification %s not found.',
notification.object_type, notification.id)
return {}
if not cycle_task.contact:
logger.warning(
'Contact for cycle task %s not found.',
notification.object_id)
return {}
notif_name = notification.notification_type.name
due = "due_today" if notif_name == "cycle_task_due_today" else "due_in"
force = cycle_task.cycle_task_group.cycle.workflow.notify_on_change
return {
cycle_task.contact.email: {
"user": data_handlers.get_person_dict(cycle_task.contact),
"force_notifications": {
notification.id: force
},
due: {
cycle_task.id: get_cycle_task_dict(cycle_task)
}
}
}
def get_all_cycle_tasks_completed_data(notification, cycle):
workflow_owners = get_workflow_owners_dict(cycle.context_id)
force = cycle.workflow.notify_on_change
result = {}
for workflow_owner in workflow_owners.itervalues():
wf_data = {
workflow_owner['email']: {
"user": workflow_owner,
"force_notifications": {
notification.id: force
},
"all_tasks_completed": {
cycle.id: get_cycle_dict(cycle)
}
}
}
result = merge_dicts(result, wf_data)
return result
def get_cycle_created_data(notification, cycle):
if not cycle.is_current:
return {}
manual = notification.notification_type.name == "manual_cycle_created"
force = cycle.workflow.notify_on_change
result = {}
for user_role in cycle.workflow.context.user_roles:
person = user_role.person
result[person.email] = {
"user": data_handlers.get_person_dict(person),
"force_notifications": {
notification.id: force
},
"cycle_started": {
cycle.id: get_cycle_dict(cycle, manual)
}
}
return result
def get_cycle_data(notification):
cycle = get_object(Cycle, notification.object_id)
if not cycle:
return {}
notification_name = notification.notification_type.name
if notification_name in ["manual_cycle_created", "cycle_created"]:
return get_cycle_created_data(notification, cycle)
elif notification_name == "all_cycle_tasks_completed":
return get_all_cycle_tasks_completed_data(notification, cycle)
return {}
def get_cycle_task_declined_data(notification):
cycle_task = get_object(CycleTaskGroupObjectTask, notification.object_id)
if not cycle_task or not cycle_task.contact:
logger.warning(
'%s for notification %s not found.',
notification.object_type, notification.id)
return {}
force = cycle_task.cycle_task_group.cycle.workflow.notify_on_change
return {
cycle_task.contact.email: {
"user": data_handlers.get_person_dict(cycle_task.contact),
"force_notifications": {
notification.id: force
},
"task_declined": {
cycle_task.id: get_cycle_task_dict(cycle_task)
}
}
}
def get_cycle_task_data(notification):
cycle_task = get_object(CycleTaskGroupObjectTask, notification.object_id)
if not cycle_task or not cycle_task.cycle_task_group.cycle.is_current:
return {}
notification_name = notification.notification_type.name
if notification_name in ["manual_cycle_created", "cycle_created"]:
return get_cycle_created_task_data(notification)
elif notification_name == "cycle_task_declined":
return get_cycle_task_declined_data(notification)
elif notification_name in ["cycle_task_due_in",
"one_time_cycle_task_due_in",
"weekly_cycle_task_due_in",
"monthly_cycle_task_due_in",
"quarterly_cycle_task_due_in",
"annually_cycle_task_due_in",
"cycle_task_due_today"]:
return get_cycle_task_due(notification)
return {}
def get_workflow_starts_in_data(notification, workflow):
if workflow.status != "Active":
return {}
if (not workflow.next_cycle_start_date or
workflow.next_cycle_start_date < date.today()):
return {} # this can only be if the cycle has successfully started
result = {}
workflow_owners = get_workflow_owners_dict(workflow.context_id)
force = workflow.notify_on_change
for user_roles in workflow.context.user_roles:
wf_person = user_roles.person
result[wf_person.email] = {
"user": data_handlers.get_person_dict(wf_person),
"force_notifications": {
notification.id: force
},
"cycle_starts_in": {
workflow.id: {
"workflow_owners": workflow_owners,
"workflow_url": get_workflow_url(workflow),
"start_date": workflow.next_cycle_start_date,
"fuzzy_start_date": utils.get_fuzzy_date(
workflow.next_cycle_start_date),
"custom_message": workflow.notify_custom_message,
"title": workflow.title,
}
}
}
return result
def get_cycle_start_failed_data(notification, workflow):
if workflow.status != "Active":
return {}
if (not workflow.next_cycle_start_date or
workflow.next_cycle_start_date >= date.today()):
return {} # this can only be if the cycle has successfully started
result = {}
workflow_owners = get_workflow_owners_dict(workflow.context_id)
force = workflow.notify_on_change
for wf_owner in workflow_owners.itervalues():
result[wf_owner["email"]] = {
"user": wf_owner,
"force_notifications": {
notification.id: force
},
"cycle_start_failed": {
workflow.id: {
"workflow_owners": workflow_owners,
"workflow_url": get_workflow_url(workflow),
"start_date": workflow.next_cycle_start_date,
"fuzzy_start_date": utils.get_fuzzy_date(
workflow.next_cycle_start_date),
"custom_message": workflow.notify_custom_message,
"title": workflow.title,
}
}
}
return result
def get_workflow_data(notification):
workflow = get_object(Workflow, notification.object_id)
if not workflow:
return {}
if workflow.frequency == "one_time":
# one time workflows get cycles manually created and that triggers
# the instant notification.
return {}
if "_workflow_starts_in" in notification.notification_type.name:
return get_workflow_starts_in_data(notification, workflow)
if "cycle_start_failed" == notification.notification_type.name:
return get_cycle_start_failed_data(notification, workflow)
return {}
def get_object(obj_class, obj_id):
result = db.session.query(obj_class).filter(obj_class.id == obj_id)
if result.count() == 1:
return result.one()
return None
def get_workflow_owners_dict(context_id):
owners = db.session.query(UserRole).join(Role).filter(
and_(UserRole.context_id == context_id,
Role.name == "WorkflowOwner")).all()
return {user_role.person.id: data_handlers.get_person_dict(user_role.person)
for user_role in owners}
def _get_object_info_from_revision(revision, known_type):
""" returns type and id of the searched object, if we have one part of
the relationship known.
"""
object_type = revision.destination_type \
if revision.source_type == known_type \
else revision.source_type
object_id = revision.destination_id if \
revision.source_type == known_type \
else revision.source_id
return object_type, object_id
def get_cycle_task_dict(cycle_task):
object_titles = []
# every object should have a title or at least a name like person object
for related_object in cycle_task.related_objects:
object_titles.append(getattr(related_object, "title", "") or
getattr(related_object, "name", "") or
u"Untitled object")
# related objects might have been deleted or unmapped,
# check the revision history
deleted_relationships_sources = db.session.query(Revision).filter(
Revision.resource_type == "Relationship",
Revision.action == "deleted",
Revision.source_type == "CycleTaskGroupObjectTask",
Revision.source_id == cycle_task.id
)
deleted_relationships_destinations = db.session.query(Revision).filter(
Revision.resource_type == "Relationship",
Revision.action == "deleted",
Revision.destination_type == "CycleTaskGroupObjectTask",
Revision.destination_id == cycle_task.id
)
deleted_relationships = deleted_relationships_sources.union(
deleted_relationships_destinations).all()
for deleted_relationship in deleted_relationships:
removed_object_type, removed_object_id = _get_object_info_from_revision(
deleted_relationship, "CycleTaskGroupObjectTask")
object_data = db.session.query(Revision).filter(
Revision.resource_type == removed_object_type,
Revision.resource_id == removed_object_id,
).order_by(Revision.id.desc()).first()
object_titles.append(
u"{} [removed from task]".format(object_data.content["display_name"])
)
# the filter expression to be included in the cycle task's URL and
# automatically applied when user visits it
filter_exp = u"id=" + unicode(cycle_task.cycle_id)
return {
"title": cycle_task.title,
"related_objects": object_titles,
"end_date": cycle_task.end_date.strftime("%m/%d/%Y"),
"fuzzy_due_in": utils.get_fuzzy_date(cycle_task.end_date),
"cycle_task_url": get_cycle_task_url(cycle_task, filter_exp=filter_exp),
}
def get_cycle_dict(cycle, manual=False):
workflow_owners = get_workflow_owners_dict(cycle.context_id)
return {
"manually": manual,
"custom_message": cycle.workflow.notify_custom_message,
"cycle_title": cycle.title,
"workflow_owners": workflow_owners,
"cycle_url": get_cycle_url(cycle),
}
def get_workflow_url(workflow):
url = "workflows/{}#current_widget".format(workflow.id)
return urljoin(get_url_root(), url)
def get_cycle_task_url(cycle_task, filter_exp=u""):
if filter_exp:
filter_exp = u"?filter=" + urllib.quote(filter_exp)
url = (u"/workflows/{workflow_id}"
u"{filter_exp}"
u"#current_widget/cycle/{cycle_id}"
u"/cycle_task_group/{cycle_task_group_id}"
u"/cycle_task_group_object_task/{cycle_task_id}").format(
workflow_id=cycle_task.cycle_task_group.cycle.workflow.id,
filter_exp=filter_exp,
cycle_id=cycle_task.cycle_task_group.cycle.id,
cycle_task_group_id=cycle_task.cycle_task_group.id,
cycle_task_id=cycle_task.id,
)
return urljoin(get_url_root(), url)
def get_cycle_url(cycle):
url = "workflows/{workflow_id}#current_widget/cycle/{cycle_id}".format(
workflow_id=cycle.workflow.id,
cycle_id=cycle.id,
)
return urljoin(get_url_root(), url)
| 32.039171 | 79 | 0.676088 |
f848740883ea539bed7d42344dc1382bb40b72f6 | 112 | py | Python | skabase/SKAMaster/SKAMaster/__main__.py | adityadangeska/lmc-base-classes | a3dada19b27fcc889546d754ef94986c55da5acc | [
"BSD-3-Clause"
] | 1 | 2019-05-31T09:47:31.000Z | 2019-05-31T09:47:31.000Z | skabase/SKAMaster/__main__.py | adityadangeska/lmc-base-classes | a3dada19b27fcc889546d754ef94986c55da5acc | [
"BSD-3-Clause"
] | null | null | null | skabase/SKAMaster/__main__.py | adityadangeska/lmc-base-classes | a3dada19b27fcc889546d754ef94986c55da5acc | [
"BSD-3-Clause"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of the SKAMaster project
#
#
#
from SKAMaster import main
main()
| 11.2 | 44 | 0.642857 |
c6338e0d9165ee45e24b8855d213809186fa6059 | 49 | py | Python | ansible_runner/api/__init__.py | smk4664/nautobot-plugin-ansible-runner | 14e3c57e9d4b21abe9bf81484fc3e26114fc7056 | [
"Apache-2.0"
] | null | null | null | ansible_runner/api/__init__.py | smk4664/nautobot-plugin-ansible-runner | 14e3c57e9d4b21abe9bf81484fc3e26114fc7056 | [
"Apache-2.0"
] | null | null | null | ansible_runner/api/__init__.py | smk4664/nautobot-plugin-ansible-runner | 14e3c57e9d4b21abe9bf81484fc3e26114fc7056 | [
"Apache-2.0"
] | null | null | null | """REST API module for ansible_runner plugin."""
| 24.5 | 48 | 0.734694 |
2eab85f427d9ccefdc5743e1d202c527a2afe181 | 191 | py | Python | aiocloudpayments/types/notification_info.py | drforse/aiocloudpayments | 25b8827250279335d037754dca6978bc79c9b18d | [
"MIT"
] | null | null | null | aiocloudpayments/types/notification_info.py | drforse/aiocloudpayments | 25b8827250279335d037754dca6978bc79c9b18d | [
"MIT"
] | null | null | null | aiocloudpayments/types/notification_info.py | drforse/aiocloudpayments | 25b8827250279335d037754dca6978bc79c9b18d | [
"MIT"
] | null | null | null | from .base import CpObject
class NotificationInfo(CpObject):
is_enabled: bool = None
address: str = None
http_method: str = None
encoding: str = None
format: str = None
| 19.1 | 33 | 0.675393 |
192dfbae50c8ec4353b51fa77a0d90d5a35b94b9 | 2,992 | py | Python | pi_cam_neopixel_control.py | nixternal/Pi-Cam-NeoPixel | fdae6f388e8432c34ae7ad6993c079a052883d23 | [
"Unlicense"
] | 1 | 2017-04-12T21:10:22.000Z | 2017-04-12T21:10:22.000Z | pi_cam_neopixel_control.py | nixternal/Pi-Cam-NeoPixel | fdae6f388e8432c34ae7ad6993c079a052883d23 | [
"Unlicense"
] | null | null | null | pi_cam_neopixel_control.py | nixternal/Pi-Cam-NeoPixel | fdae6f388e8432c34ae7ad6993c079a052883d23 | [
"Unlicense"
] | null | null | null | #!/usr/bin/env python
import os
import time
from neopixel import Adafruit_NeoPixel, Color, ws
COUNT = 12
PIN = 18 # GPIO Pin 18 for Data In
FREQ = 800000 # LED signal frequency in hertz (usually 800khz)
DMA = 5 # DMA channel to use for generating signal
BRIGHTNESS = 255 # Set to 0 for darkest and 255 for brightest
INVERT = False # Invert signal if using NPN transistor level shift
CHANNEL = 0
#STRIP = ws.SK6812_STRIP_RGBW
STRIP = ws.SK6812W_STRIP
def colorWipe(strip, color, wait_ms=50):
for i in range(strip.numPixels()):
strip.setPixelColor(i, color)
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChase(strip, color, wait_ms=50, iterations=1000):
for j in range(iterations):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, color)
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i+q, 0)
def wheel(pos):
if pos < 85:
return Color(pos * 3, 255 - pos * 3, 0)
elif pos < 170:
pos -= 85
return Color(255 - pos * 3, 0, pos * 3)
else:
pos -= 170
return Color(0, pos * 3, 255 - pos * 3)
def rainbow(strip, wait_ms=20, iterations=1):
for j in range(256 * iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel((i + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def rainbowCycle(strip, wait_ms=20, iterations=5):
for j in range(256 * iterations):
for i in range(strip.numPixels()):
strip.setPixelColor(i, wheel(((i * 256 / strip.numPixels()) + j) & 255))
strip.show()
time.sleep(wait_ms/1000.0)
def theaterChaseRainbow(strip, wait_ms=50):
for j in range(256):
for q in range(3):
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, wheel((i + j) % 255))
strip.show()
time.sleep(wait_ms/1000.0)
for i in range(0, strip.numPixels(), 3):
strip.setPixelColor(i + q, 0)
if __name__ == '__main__':
strip = Adafruit_NeoPixel(
COUNT,
PIN,
FREQ,
DMA,
INVERT,
BRIGHTNESS,
CHANNEL,
STRIP
)
strip.begin()
runtype = os.path.basename(__file__)
if runtype == 'off':
colorWipe(strip, Color(0, 0, 0, 0), 0)
colorWipe(strip, Color(0, 0, 0), 0)
elif runtype == 'camon': # Turn on WHITE for camera
strip.setBrightness(75)
colorWipe(strip, Color(255, 255, 255, 255), 0)
elif runtype == 'heating':
theaterChase(strip, Color(127, 0, 0))
elif runtype == 'cooling':
theaterChase(strip, Color(0, 0, 127))
elif runtype == 'rainbow':
rainbow(strip)
elif runtype == 'cycle':
rainbowCycle(strip)
elif runtype == 'chase':
theaterChaseRainbow(strip)
| 30.530612 | 84 | 0.582553 |
fd8db97b580b6a1347001aa3cb2bd71edc914da9 | 3,790 | py | Python | swagger_client/models/management_persistent_workers.py | jrg1381/sm_asr_console | 47c4090075deaaa7f58e9a092423a58bc7b0a30f | [
"MIT"
] | 2 | 2019-08-07T11:08:06.000Z | 2021-01-20T11:28:37.000Z | swagger_client/models/management_persistent_workers.py | jrg1381/sm_asr_console | 47c4090075deaaa7f58e9a092423a58bc7b0a30f | [
"MIT"
] | null | null | null | swagger_client/models/management_persistent_workers.py | jrg1381/sm_asr_console | 47c4090075deaaa7f58e9a092423a58bc7b0a30f | [
"MIT"
] | null | null | null | # coding: utf-8
"""
Speechmatics Management API
Speechmatics offer a secure Management REST API that enables you to programatically control the lifecycle of the appliance, including stopping and rebooting the appliance, restarting services, licensing the appliance and controlling the available resources. # noqa: E501
OpenAPI spec version: 1.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
class ManagementPersistentWorkers(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'count': 'str',
'id': 'str'
}
attribute_map = {
'count': 'count',
'id': 'id'
}
def __init__(self, count=None, id=None): # noqa: E501
"""ManagementPersistentWorkers - a model defined in Swagger""" # noqa: E501
self._count = None
self._id = None
self.discriminator = None
if count is not None:
self.count = count
if id is not None:
self.id = id
@property
def count(self):
"""Gets the count of this ManagementPersistentWorkers. # noqa: E501
:return: The count of this ManagementPersistentWorkers. # noqa: E501
:rtype: str
"""
return self._count
@count.setter
def count(self, count):
"""Sets the count of this ManagementPersistentWorkers.
:param count: The count of this ManagementPersistentWorkers. # noqa: E501
:type: str
"""
self._count = count
@property
def id(self):
"""Gets the id of this ManagementPersistentWorkers. # noqa: E501
:return: The id of this ManagementPersistentWorkers. # noqa: E501
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""Sets the id of this ManagementPersistentWorkers.
:param id: The id of this ManagementPersistentWorkers. # noqa: E501
:type: str
"""
self._id = id
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, ManagementPersistentWorkers):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 27.266187 | 275 | 0.573615 |
c9000e29236a51001077c3d99256672ba661b53e | 28,117 | py | Python | invenio_files_rest/views.py | kprzerwa/invenio-files-rest | e4a30e8608cbfd2da63320638cabce60ec6ad3db | [
"MIT"
] | null | null | null | invenio_files_rest/views.py | kprzerwa/invenio-files-rest | e4a30e8608cbfd2da63320638cabce60ec6ad3db | [
"MIT"
] | null | null | null | invenio_files_rest/views.py | kprzerwa/invenio-files-rest | e4a30e8608cbfd2da63320638cabce60ec6ad3db | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of Invenio.
# Copyright (C) 2015-2019 CERN.
#
# Invenio is free software; you can redistribute it and/or modify it
# under the terms of the MIT License; see LICENSE file for more details.
"""Files download/upload REST API similar to S3 for Invenio."""
from __future__ import absolute_import, print_function
import uuid
from functools import partial, wraps
from flask import Blueprint, abort, current_app, json, request
from flask_login import current_user
from invenio_db import db
from invenio_rest import ContentNegotiatedMethodView
from marshmallow import missing
from six.moves.urllib.parse import parse_qsl
from webargs import fields
from webargs.flaskparser import use_kwargs
from .errors import DuplicateTagError, ExhaustedStreamError, FileSizeError, \
InvalidTagError, MissingQueryParameter, MultipartInvalidChunkSize
from .models import Bucket, MultipartObject, ObjectVersion, ObjectVersionTag, \
Part
from .proxies import current_files_rest, current_permission_factory
from .serializer import json_serializer
from .signals import file_downloaded
from .tasks import merge_multipartobject, remove_file_data
blueprint = Blueprint(
'invenio_files_rest',
__name__,
url_prefix='/files',
)
#
# Helpers
#
def as_uuid(value):
"""Convert value to UUID."""
try:
return uuid.UUID(value)
except ValueError:
abort(404)
def minsize_validator(value):
"""Validate Content-Length header.
:raises invenio_files_rest.errors.FileSizeError: If the value is less
than :data:`invenio_files_rest.config.FILES_REST_MIN_FILE_SIZE` size.
"""
if value < current_app.config['FILES_REST_MIN_FILE_SIZE']:
raise FileSizeError()
def invalid_subresource_validator(value):
"""Ensure subresource."""
abort(405)
def validate_tag(key, value):
"""Validate a tag.
Keys must be less than 128 chars and values must be less than 256 chars.
"""
# Note, parse_sql does not include a keys if the value is an empty string
# (e.g. 'key=&test=a'), and thus technically we should not get strings
# which have zero length.
klen = len(key)
vlen = len(value)
return klen > 0 and klen < 256 and vlen > 0 and vlen < 256
def parse_header_tags():
"""Parse tags specified in the HTTP request header."""
# Get the value of the custom HTTP header and interpret it as an query
# string
qs = request.headers.get(
current_app.config['FILES_REST_FILE_TAGS_HEADER'], '')
tags = {}
for key, value in parse_qsl(qs):
# Check for duplicate keys
if key in tags:
raise DuplicateTagError()
# Check for too short/long keys and values.
if not validate_tag(key, value):
raise InvalidTagError()
tags[key] = value
return tags or None
#
# Part upload factories
#
@use_kwargs({
'part_number': fields.Int(
load_from='partNumber',
location='query',
required=True,
),
'content_length': fields.Int(
load_from='Content-Length',
location='headers',
required=True,
validate=minsize_validator,
),
'content_type': fields.Str(
load_from='Content-Type',
location='headers',
),
'content_md5': fields.Str(
load_from='Content-MD5',
location='headers',
),
})
def default_partfactory(part_number=None, content_length=None,
content_type=None, content_md5=None):
"""Get default part factory.
:param part_number: The part number. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:param content_md5: The content MD5. (Default: ``None``)
:returns: The content length, the part number, the stream, the content
type, MD5 of the content.
"""
return content_length, part_number, request.stream, content_type, \
content_md5, None
@use_kwargs({
'content_md5': fields.Str(
load_from='Content-MD5',
location='headers',
missing=None,
),
'content_length': fields.Int(
load_from='Content-Length',
location='headers',
required=True,
validate=minsize_validator,
),
'content_type': fields.Str(
load_from='Content-Type',
location='headers',
missing='',
),
})
def stream_uploadfactory(content_md5=None, content_length=None,
content_type=None):
"""Get default put factory.
If Content-Type is ``'multipart/form-data'`` then the stream is aborted.
:param content_md5: The content MD5. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:returns: The stream, content length, MD5 of the content.
"""
if content_type.startswith('multipart/form-data'):
abort(422)
return request.stream, content_length, content_md5, parse_header_tags()
@use_kwargs({
'part_number': fields.Int(
load_from='_chunkNumber',
location='form',
required=True,
),
'content_length': fields.Int(
load_from='_currentChunkSize',
location='form',
required=True,
validate=minsize_validator,
),
'uploaded_file': fields.Raw(
load_from='file',
location='files',
required=True,
),
})
def ngfileupload_partfactory(part_number=None, content_length=None,
uploaded_file=None):
"""Part factory for ng-file-upload.
:param part_number: The part number. (Default: ``None``)
:param content_length: The content length. (Default: ``None``)
:param uploaded_file: The upload request. (Default: ``None``)
:returns: The content length, part number, stream, HTTP Content-Type
header.
"""
return content_length, part_number, uploaded_file.stream, \
uploaded_file.headers.get('Content-Type'), None, None
@use_kwargs({
'content_length': fields.Int(
load_from='_totalSize',
location='form',
required=True,
),
'content_type': fields.Str(
load_from='Content-Type',
location='headers',
required=True,
),
'uploaded_file': fields.Raw(
load_from='file',
location='files',
required=True,
),
})
def ngfileupload_uploadfactory(content_length=None, content_type=None,
uploaded_file=None):
"""Get default put factory.
If Content-Type is ``'multipart/form-data'`` then the stream is aborted.
:param content_length: The content length. (Default: ``None``)
:param content_type: The HTTP Content-Type. (Default: ``None``)
:param uploaded_file: The upload request. (Default: ``None``)
:param file_tags_header: The file tags. (Default: ``None``)
:returns: A tuple containing stream, content length, and empty header.
"""
if not content_type.startswith('multipart/form-data'):
abort(422)
return uploaded_file.stream, content_length, None, parse_header_tags()
#
# Object retrieval
#
def pass_bucket(f):
"""Decorate to retrieve a bucket."""
@wraps(f)
def decorate(*args, **kwargs):
bucket_id = kwargs.pop('bucket_id')
bucket = Bucket.get(as_uuid(bucket_id))
if not bucket:
abort(404, 'Bucket does not exist.')
return f(bucket=bucket, *args, **kwargs)
return decorate
def pass_multipart(with_completed=False):
"""Decorate to retrieve an object."""
def decorate(f):
@wraps(f)
def inner(self, bucket, key, upload_id, *args, **kwargs):
obj = MultipartObject.get(
bucket, key, upload_id, with_completed=with_completed)
if obj is None:
abort(404, 'uploadId does not exists.')
return f(self, obj, *args, **kwargs)
return inner
return decorate
def ensure_input_stream_is_not_exhausted(f):
"""Make sure that the input stream has not been read already."""
@wraps(f)
def decorate(*args, **kwargs):
if request.content_length and request.stream.is_exhausted:
raise ExhaustedStreamError()
return f(*args, **kwargs)
return decorate
#
# Permission checking
#
def check_permission(permission, hidden=True):
"""Check if permission is allowed.
If permission fails then the connection is aborted.
:param permission: The permission to check.
:param hidden: Determine if a 404 error (``True``) or 401/403 error
(``False``) should be returned if the permission is rejected (i.e.
hide or reveal the existence of a particular object).
"""
if permission is not None and not permission.can():
if hidden:
abort(404)
else:
if current_user.is_authenticated:
abort(403,
'You do not have a permission for this action')
abort(401)
def need_permissions(object_getter, action, hidden=True):
"""Get permission for buckets or abort.
:param object_getter: The function used to retrieve the object and pass it
to the permission factory.
:param action: The action needed.
:param hidden: Determine which kind of error to return. (Default: ``True``)
"""
def decorator_builder(f):
@wraps(f)
def decorate(*args, **kwargs):
check_permission(current_permission_factory(
object_getter(*args, **kwargs),
action(*args, **kwargs) if callable(action) else action,
), hidden=hidden)
return f(*args, **kwargs)
return decorate
return decorator_builder
need_location_permission = partial(
need_permissions,
lambda *args, **kwargs: kwargs.get('location')
)
need_bucket_permission = partial(
need_permissions,
lambda *args, **kwargs: kwargs.get('bucket')
)
#
# REST resources
#
class LocationResource(ContentNegotiatedMethodView):
"""Service resource."""
def __init__(self, *args, **kwargs):
"""Instantiate content negotiated view."""
super(LocationResource, self).__init__(*args, **kwargs)
@need_location_permission('location-update', hidden=False)
def post(self):
"""Create bucket."""
with db.session.begin_nested():
bucket = Bucket.create(
storage_class=current_app.config[
'FILES_REST_DEFAULT_STORAGE_CLASS'
],
)
db.session.commit()
return self.make_response(
data=bucket,
context={
'class': Bucket,
}
)
class BucketResource(ContentNegotiatedMethodView):
"""Bucket item resource."""
get_args = {
'versions': fields.Raw(
location='query',
),
'uploads': fields.Raw(
location='query',
)
}
def __init__(self, *args, **kwargs):
"""Instantiate content negotiated view."""
super(BucketResource, self).__init__(*args, **kwargs)
@need_permissions(lambda self, bucket: bucket, 'bucket-listmultiparts')
def multipart_listuploads(self, bucket):
"""List objects in a bucket.
:param bucket: A :class:`invenio_files_rest.models.Bucket` instance.
:returns: The Flask response.
"""
return self.make_response(
data=MultipartObject.query_by_bucket(bucket).limit(1000).all(),
context={
'class': MultipartObject,
'bucket': bucket,
'many': True,
}
)
@need_permissions(
lambda self, bucket, versions: bucket,
'bucket-read',
)
def listobjects(self, bucket, versions):
"""List objects in a bucket.
:param bucket: A :class:`invenio_files_rest.models.Bucket` instance.
:returns: The Flask response.
"""
if versions is not missing:
check_permission(
current_permission_factory(bucket, 'bucket-read-versions'),
hidden=False
)
return self.make_response(
data=ObjectVersion.get_by_bucket(
bucket.id, versions=versions is not missing).limit(1000).all(),
context={
'class': ObjectVersion,
'bucket': bucket,
'many': True,
}
)
@use_kwargs(get_args)
@pass_bucket
def get(self, bucket=None, versions=missing, uploads=missing):
"""Get list of objects in the bucket.
:param bucket: A :class:`invenio_files_rest.models.Bucket` instance.
:returns: The Flask response.
"""
if uploads is not missing:
return self.multipart_listuploads(bucket)
else:
return self.listobjects(bucket, versions)
@pass_bucket
@need_bucket_permission('bucket-read')
def head(self, bucket=None, **kwargs):
"""Check the existence of the bucket."""
class ObjectResource(ContentNegotiatedMethodView):
"""Object item resource."""
delete_args = {
'version_id': fields.UUID(
location='query',
load_from='versionId',
missing=None,
),
'upload_id': fields.UUID(
location='query',
load_from='uploadId',
missing=None,
),
'uploads': fields.Raw(
location='query',
validate=invalid_subresource_validator,
),
}
get_args = dict(
delete_args,
download=fields.Raw(
location='query',
missing=None,
)
)
post_args = {
'uploads': fields.Raw(
location='query',
),
'upload_id': fields.UUID(
location='query',
load_from='uploadId',
missing=None,
)
}
put_args = {
'upload_id': fields.UUID(
location='query',
load_from='uploadId',
missing=None,
),
}
multipart_init_args = {
'size': fields.Int(
locations=('query', 'json'),
missing=None,
),
'part_size': fields.Int(
locations=('query', 'json'),
missing=None,
load_from='partSize',
),
}
def __init__(self, *args, **kwargs):
"""Instantiate content negotiated view."""
super(ObjectResource, self).__init__(*args, **kwargs)
#
# ObjectVersion helpers
#
@staticmethod
def check_object_permission(obj):
"""Retrieve object and abort if it doesn't exists."""
check_permission(current_permission_factory(
obj,
'object-read'
))
if not obj.is_head:
check_permission(
current_permission_factory(obj, 'object-read-version'),
hidden=False
)
@classmethod
def get_object(cls, bucket, key, version_id):
"""Retrieve object and abort if it doesn't exists.
If the file is not found, the connection is aborted and the 404
error is returned.
:param bucket: The bucket (instance or id) to get the object from.
:param key: The file key.
:param version_id: The version ID.
:returns: A :class:`invenio_files_rest.models.ObjectVersion` instance.
"""
obj = ObjectVersion.get(bucket, key, version_id=version_id)
if not obj:
abort(404, 'Object does not exists.')
cls.check_object_permission(obj)
return obj
def create_object(self, bucket, key):
"""Create a new object.
:param bucket: The bucket (instance or id) to get the object from.
:param key: The file key.
:returns: A Flask response.
"""
# Initial validation of size based on Content-Length.
# User can tamper with Content-Length, so this is just an initial up
# front check. The storage subsystem must validate the size limit as
# well.
stream, content_length, content_md5, tags = \
current_files_rest.upload_factory()
size_limit = bucket.size_limit
if content_length and size_limit and content_length > size_limit:
desc = 'File size limit exceeded.' \
if isinstance(size_limit, int) else size_limit.reason
raise FileSizeError(description=desc)
with db.session.begin_nested():
obj = ObjectVersion.create(bucket, key)
obj.set_contents(
stream, size=content_length, size_limit=size_limit)
# Check add tags
if tags:
for key, value in tags.items():
ObjectVersionTag.create(obj, key, value)
db.session.commit()
return self.make_response(
data=obj,
context={
'class': ObjectVersion,
'bucket': bucket,
},
etag=obj.file.checksum
)
@need_permissions(
lambda self, bucket, obj, *args: obj,
'object-delete',
hidden=False, # Because get_object permission check has already run
)
def delete_object(self, bucket, obj, version_id):
"""Delete an existing object.
:param bucket: The bucket (instance or id) to get the object from.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion`
instance.
:param version_id: The version ID.
:returns: A Flask response.
"""
if version_id is None:
# Create a delete marker.
with db.session.begin_nested():
ObjectVersion.delete(bucket, obj.key)
else:
# Permanently delete specific object version.
check_permission(
current_permission_factory(bucket, 'object-delete-version'),
hidden=False,
)
obj.remove()
# Set newest object as head
if obj.is_head:
obj_to_restore = \
ObjectVersion.get_versions(obj.bucket,
obj.key,
desc=True).first()
if obj_to_restore:
obj_to_restore.is_head = True
if obj.file_id:
remove_file_data.delay(str(obj.file_id))
db.session.commit()
return self.make_response('', 204)
@staticmethod
def send_object(bucket, obj, expected_chksum=None,
logger_data=None, restricted=True, as_attachment=False):
"""Send an object for a given bucket.
:param bucket: The bucket (instance or id) to get the object from.
:param obj: A :class:`invenio_files_rest.models.ObjectVersion`
instance.
:params expected_chksum: Expected checksum.
:param logger_data: The python logger.
:param kwargs: Keyword arguments passed to ``Object.send_file()``
:returns: A Flask response.
"""
if not obj.is_head:
check_permission(
current_permission_factory(obj, 'object-read-version'),
hidden=False
)
if expected_chksum and obj.file.checksum != expected_chksum:
current_app.logger.warning(
'File checksum mismatch detected.', extra=logger_data)
file_downloaded.send(current_app._get_current_object(), obj=obj)
return obj.send_file(restricted=restricted,
as_attachment=as_attachment)
#
# MultipartObject helpers
#
@pass_multipart(with_completed=True)
@need_permissions(
lambda self, multipart: multipart,
'multipart-read'
)
def multipart_listparts(self, multipart):
"""Get parts of a multipart upload.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A Flask response.
"""
return self.make_response(
data=Part.query_by_multipart(
multipart).order_by(Part.part_number).limit(1000).all(),
context={
'class': Part,
'multipart': multipart,
'many': True,
}
)
@use_kwargs(multipart_init_args)
def multipart_init(self, bucket, key, size=None, part_size=None):
"""Initialize a multipart upload.
:param bucket: The bucket (instance or id) to get the object from.
:param key: The file key.
:param size: The total size.
:param part_size: The part size.
:raises invenio_files_rest.errors.MissingQueryParameter: If size or
part_size are not defined.
:returns: A Flask response.
"""
if size is None:
raise MissingQueryParameter('size')
if part_size is None:
raise MissingQueryParameter('partSize')
multipart = MultipartObject.create(bucket, key, size, part_size)
db.session.commit()
return self.make_response(
data=multipart,
context={
'class': MultipartObject,
'bucket': bucket,
}
)
@pass_multipart(with_completed=True)
def multipart_uploadpart(self, multipart):
"""Upload a part.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A Flask response.
"""
content_length, part_number, stream, content_type, content_md5, tags =\
current_files_rest.multipart_partfactory()
if content_length:
ck = multipart.last_part_size if \
part_number == multipart.last_part_number \
else multipart.chunk_size
if ck != content_length:
raise MultipartInvalidChunkSize()
# Create part
try:
p = Part.get_or_create(multipart, part_number)
p.set_contents(stream)
db.session.commit()
except Exception:
# We remove the Part since incomplete data may have been written to
# disk (e.g. client closed connection etc.) so it must be
# reuploaded.
db.session.rollback()
Part.delete(multipart, part_number)
raise
return self.make_response(
data=p,
context={
'class': Part,
},
etag=p.checksum
)
@pass_multipart(with_completed=True)
def multipart_complete(self, multipart):
"""Complete a multipart upload.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A Flask response.
"""
multipart.complete()
db.session.commit()
version_id = str(uuid.uuid4())
return self.make_response(
data=multipart,
context={
'class': MultipartObject,
'bucket': multipart.bucket,
'object_version_id': version_id,
},
# This will wait for the result, and send whitespace on the
# connection until the task has finished (or max timeout reached).
task_result=merge_multipartobject.delay(
str(multipart.upload_id),
version_id=version_id,
),
)
@pass_multipart()
@need_permissions(
lambda self, multipart: multipart,
'multipart-delete',
)
def multipart_delete(self, multipart):
"""Abort a multipart upload.
:param multipart: A :class:`invenio_files_rest.models.MultipartObject`
instance.
:returns: A Flask response.
"""
multipart.delete()
db.session.commit()
if multipart.file_id:
remove_file_data.delay(str(multipart.file_id))
return self.make_response('', 204)
#
# HTTP methods implementations
#
@use_kwargs(get_args)
@pass_bucket
def get(self, bucket=None, key=None, version_id=None, upload_id=None,
uploads=None, download=None):
"""Get object or list parts of a multpart upload.
:param bucket: The bucket (instance or id) to get the object from.
(Default: ``None``)
:param key: The file key. (Default: ``None``)
:param version_id: The version ID. (Default: ``None``)
:param upload_id: The upload ID. (Default: ``None``)
:param download: The download flag. (Default: ``None``)
:returns: A Flask response.
"""
if upload_id:
return self.multipart_listparts(bucket, key, upload_id)
else:
obj = self.get_object(bucket, key, version_id)
# If 'download' is missing from query string it will have
# the value None.
return self.send_object(bucket, obj,
as_attachment=download is not None)
@use_kwargs(post_args)
@pass_bucket
@need_bucket_permission('bucket-update')
@ensure_input_stream_is_not_exhausted
def post(self, bucket=None, key=None, uploads=missing, upload_id=None):
"""Upload a new object or start/complete a multipart upload.
:param bucket: The bucket (instance or id) to get the object from.
(Default: ``None``)
:param key: The file key. (Default: ``None``)
:param upload_id: The upload ID. (Default: ``None``)
:returns: A Flask response.
"""
if uploads is not missing:
return self.multipart_init(bucket, key)
elif upload_id is not None:
return self.multipart_complete(bucket, key, upload_id)
abort(403)
@use_kwargs(put_args)
@pass_bucket
@need_bucket_permission('bucket-update')
@ensure_input_stream_is_not_exhausted
def put(self, bucket=None, key=None, upload_id=None):
"""Update a new object or upload a part of a multipart upload.
:param bucket: The bucket (instance or id) to get the object from.
(Default: ``None``)
:param key: The file key. (Default: ``None``)
:param upload_id: The upload ID. (Default: ``None``)
:returns: A Flask response.
"""
if upload_id is not None:
return self.multipart_uploadpart(bucket, key, upload_id)
else:
return self.create_object(bucket, key)
@use_kwargs(delete_args)
@pass_bucket
def delete(self, bucket=None, key=None, version_id=None, upload_id=None,
uploads=None):
"""Delete an object or abort a multipart upload.
:param bucket: The bucket (instance or id) to get the object from.
(Default: ``None``)
:param key: The file key. (Default: ``None``)
:param version_id: The version ID. (Default: ``None``)
:param upload_id: The upload ID. (Default: ``None``)
:returns: A Flask response.
"""
if upload_id is not None:
return self.multipart_delete(bucket, key, upload_id)
else:
obj = self.get_object(bucket, key, version_id)
return self.delete_object(bucket, obj, version_id)
#
# Blueprint definition
#
location_view = LocationResource.as_view(
'location_api',
serializers={
'application/json': json_serializer,
}
)
bucket_view = BucketResource.as_view(
'bucket_api',
serializers={
'application/json': json_serializer,
}
)
object_view = ObjectResource.as_view(
'object_api',
serializers={
'application/json': json_serializer,
}
)
blueprint.add_url_rule(
'',
view_func=location_view,
)
blueprint.add_url_rule(
'/<string:bucket_id>',
view_func=bucket_view,
)
blueprint.add_url_rule(
'/<string:bucket_id>/<path:key>',
view_func=object_view,
)
| 31.31069 | 79 | 0.603265 |
e7ea33e18d66edba3b0a9e583c986f32cf9c67cc | 4,713 | py | Python | kinow_client/apis/genders_api.py | kinow-io/kinow-python-sdk | 4c1699a3c78048b84287bd049a669651a5b4e2d5 | [
"Apache-2.0"
] | 1 | 2019-06-26T14:24:54.000Z | 2019-06-26T14:24:54.000Z | kinow_client/apis/genders_api.py | kinow-io/kinow-python-sdk | 4c1699a3c78048b84287bd049a669651a5b4e2d5 | [
"Apache-2.0"
] | null | null | null | kinow_client/apis/genders_api.py | kinow-io/kinow-python-sdk | 4c1699a3c78048b84287bd049a669651a5b4e2d5 | [
"Apache-2.0"
] | 1 | 2018-02-01T10:08:40.000Z | 2018-02-01T10:08:40.000Z | # coding: utf-8
"""
Server API
Reference for Server API (REST/Json)
OpenAPI spec version: 1.4.58
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import sys
import os
import re
# python 2 and python 3 compatibility library
from six import iteritems
from ..configuration import Configuration
from ..api_client import ApiClient
class GendersApi(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
config = Configuration()
if api_client:
self.api_client = api_client
else:
if not config.api_client:
config.api_client = ApiClient()
self.api_client = config.api_client
def get_genders(self, **kwargs):
"""
Get gender list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_genders(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:return: Genders
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('callback'):
return self.get_genders_with_http_info(**kwargs)
else:
(data) = self.get_genders_with_http_info(**kwargs)
return data
def get_genders_with_http_info(self, **kwargs):
"""
Get gender list
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please define a `callback` function
to be invoked when receiving the response.
>>> def callback_function(response):
>>> pprint(response)
>>>
>>> thread = api.get_genders_with_http_info(callback=callback_function)
:param callback function: The callback function
for asynchronous request. (optional)
:param int page:
:param int per_page:
:return: Genders
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['page', 'per_page']
all_params.append('callback')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_genders" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
resource_path = '/genders'.replace('{format}', 'json')
path_params = {}
query_params = {}
if 'page' in params:
query_params['page'] = params['page']
if 'per_page' in params:
query_params['per_page'] = params['per_page']
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# Authentication setting
auth_settings = ['ApiClientId', 'ApiClientSecret']
return self.api_client.call_api(resource_path, 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Genders',
auth_settings=auth_settings,
callback=params.get('callback'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
| 33.664286 | 100 | 0.558668 |
4f17592b252a9a0a626ff6709c78c3b04bfee74d | 515 | py | Python | handlers/users/start.py | AleksZavg/Admin-telegram-bot | c671419ba9fd5e93df742ebe9443d72afa4c99aa | [
"MIT"
] | null | null | null | handlers/users/start.py | AleksZavg/Admin-telegram-bot | c671419ba9fd5e93df742ebe9443d72afa4c99aa | [
"MIT"
] | null | null | null | handlers/users/start.py | AleksZavg/Admin-telegram-bot | c671419ba9fd5e93df742ebe9443d72afa4c99aa | [
"MIT"
] | null | null | null | from typing import Text
from aiogram import types
from aiogram.dispatcher.filters.builtin import CommandStart, Text
from aiogram.types.reply_keyboard import ReplyKeyboardRemove
from keyboards.default.after_start import after_start_kb
from loader import dp
@dp.message_handler(CommandStart())
async def bot_start(message: types.Message):
await message.answer("Приветики", reply_markup=after_start_kb)
await message.answer_sticker("CAACAgIAAxkBAAECaLxgwdkd0VAsZfZbK94vR1LU1MgNiQAC7AADAexmGqwgu7-uFm-tHwQ")
| 36.785714 | 107 | 0.846602 |
1ab53055457de4506fd53fe5dcdf683f8b4b386d | 10,602 | py | Python | examples/Deeplab/experiments/deeplabv2.naked.fpn.learnable.sobel.channelwise.bilinear.py | MarcWong/tensorpack | 51ab279480dc1e3ffdc07884a9e8149dea9651e9 | [
"Apache-2.0"
] | 5 | 2018-05-04T02:04:15.000Z | 2020-04-02T05:38:48.000Z | examples/Deeplab/experiments/deeplabv2.naked.fpn.learnable.sobel.channelwise.bilinear.py | MarcWong/tensorpack | 51ab279480dc1e3ffdc07884a9e8149dea9651e9 | [
"Apache-2.0"
] | null | null | null | examples/Deeplab/experiments/deeplabv2.naked.fpn.learnable.sobel.channelwise.bilinear.py | MarcWong/tensorpack | 51ab279480dc1e3ffdc07884a9e8149dea9651e9 | [
"Apache-2.0"
] | 2 | 2018-04-23T13:43:10.000Z | 2019-10-30T09:56:54.000Z | #!/usr/bin/env python
# -*- coding: UTF-8 -*-
# File: deeplabv2.py
# Author: Tao Hu <taohu620@gmail.com>
import cv2
import tensorflow as tf
import argparse
from six.moves import zip
import os
import numpy as np
os.environ['TENSORPACK_TRAIN_API'] = 'v2' # will become default soon
from tensorpack import *
from tensorpack.dataflow import dataset
from tensorpack.utils.gpu import get_nr_gpu
from tensorpack.utils.segmentation.segmentation import predict_slider, visualize_label, predict_scaler
from tensorpack.utils.stats import MIoUStatistics
from tensorpack.dataflow.imgaug.misc import RandomCropWithPadding
from tensorpack.utils import logger
from tensorpack.tfutils import optimizer
from tensorpack.tfutils.summary import add_moving_summary, add_param_summary
import tensorpack.tfutils.symbolic_functions as symbf
from tqdm import tqdm
from resnet_model_fpn_learnable_sobel_channelwise_bilinear import (
preresnet_group, preresnet_basicblock, preresnet_bottleneck,
resnet_group, resnet_basicblock, resnet_bottleneck, se_resnet_bottleneck,
resnet_backbone)
CLASS_NUM = 21
CROP_SIZE = 512
IGNORE_LABEL = 255
class Model(ModelDesc):
def _get_inputs(self):
## Set static shape so that tensorflow knows shape at compile time.
return [InputDesc(tf.float32, [None, CROP_SIZE, CROP_SIZE, 3], 'image'),
InputDesc(tf.int32, [None, CROP_SIZE, CROP_SIZE], 'gt')]
def _build_graph(self, inputs):
def resnet101(image, label):
mode = 'resnet'
depth = 101
basicblock = preresnet_basicblock if mode == 'preact' else resnet_basicblock
bottleneck = {
'resnet': resnet_bottleneck,
'preact': preresnet_bottleneck,
'se': se_resnet_bottleneck}[mode]
num_blocks, block_func = {
18: ([2, 2, 2, 2], basicblock),
34: ([3, 4, 6, 3], basicblock),
50: ([3, 4, 6, 3], bottleneck),
101: ([3, 4, 23, 3], bottleneck),
152: ([3, 8, 36, 3], bottleneck)
}[depth]
def get_logits(image):
with argscope([Conv2D, MaxPooling, GlobalAvgPooling, BatchNorm], data_format="NHWC"):
return resnet_backbone(
image, num_blocks,
preresnet_group if mode == 'preact' else resnet_group, block_func,CLASS_NUM)
return get_logits(image)
image, label = inputs
image = image - tf.constant([104, 116, 122], dtype='float32')
label = tf.identity(label, name="label")
predict = resnet101(image, label)
costs = []
prob = tf.nn.softmax(predict, name='prob')
label4d = tf.expand_dims(label, 3, name='label4d')
new_size = prob.get_shape()[1:3]
#label_resized = tf.image.resize_nearest_neighbor(label4d, new_size)
cost = symbf.softmax_cross_entropy_with_ignore_label(logits=predict, label=label4d,
class_num=CLASS_NUM)
prediction = tf.argmax(prob, axis=-1,name="prediction")
cost = tf.reduce_mean(cost, name='cross_entropy_loss') # the average cross-entropy loss
costs.append(cost)
if get_current_tower_context().is_training:
wd_w = tf.train.exponential_decay(2e-4, get_global_step_var(),
80000, 0.7, True)
wd_cost = tf.multiply(wd_w, regularize_cost('.*/W', tf.nn.l2_loss), name='wd_cost')
costs.append(wd_cost)
add_param_summary(('.*/W', ['histogram'])) # monitor W
self.cost = tf.add_n(costs, name='cost')
add_moving_summary(costs + [self.cost])
def _get_optimizer(self):
lr = tf.get_variable('learning_rate', initializer=2.5e-4, trainable=False)
opt = tf.train.AdamOptimizer(lr, epsilon=2.5e-4)
return optimizer.apply_grad_processors(
opt, [gradproc.ScaleGradient(
[])])
def get_data(name, data_dir, meta_dir, batch_size):
isTrain = name == 'train'
ds = dataset.PascalVOC12(data_dir, meta_dir, name, shuffle=True)
if isTrain:
shape_aug = [
imgaug.RandomResize(xrange=(0.7, 1.5), yrange=(0.7, 1.5),
aspect_ratio_thres=0.15),
RandomCropWithPadding(CROP_SIZE,IGNORE_LABEL),
imgaug.Flip(horiz=True),
]
else:
shape_aug = []
pass
ds = AugmentImageComponents(ds, shape_aug, (0, 1), copy=False)
def f(ds):
return ds
if isTrain:
ds = MapData(ds, f)
ds = BatchData(ds, batch_size)
ds = PrefetchDataZMQ(ds, 1)
else:
ds = BatchData(ds, 1)
return ds
def view_data(data_dir, meta_dir, batch_size):
ds = RepeatedData(get_data('train',data_dir, meta_dir, batch_size), -1)
ds.reset_state()
for ims, labels in ds.get_data():
for im, label in zip(ims, labels):
#aa = visualize_label(label)
#pass
cv2.imshow("im", im / 255.0)
cv2.imshow("raw-label", label)
cv2.imshow("color-label", visualize_label(label))
cv2.waitKey(0)
def get_config(data_dir, meta_dir, batch_size):
logger.auto_set_dir()
dataset_train = get_data('train', data_dir, meta_dir, batch_size)
steps_per_epoch = dataset_train.size() * 8
dataset_val = get_data('val', data_dir, meta_dir, batch_size)
return TrainConfig(
dataflow=dataset_train,
callbacks=[
ModelSaver(),
ScheduledHyperParamSetter('learning_rate', [(2, 1e-4), (4, 1e-5), (6, 8e-6)]),
HumanHyperParamSetter('learning_rate'),
PeriodicTrigger(CalculateMIoU(CLASS_NUM), every_k_epochs=1),
ProgressBar(["cross_entropy_loss","cost","wd_cost"])#uncomment it to debug for every step
],
model=Model(),
steps_per_epoch=steps_per_epoch,
max_epoch=10,
)
def run(model_path, image_path, output):
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(model_path),
input_names=['image'],
output_names=['output' + str(k) for k in range(1, 7)])
predictor = OfflinePredictor(pred_config)
im = cv2.imread(image_path)
assert im is not None
im = cv2.resize(
im, (im.shape[1] // 16 * 16, im.shape[0] // 16 * 16)
)[None, :, :, :].astype('float32')
outputs = predictor(im)
if output is None:
for k in range(6):
pred = outputs[k][0]
cv2.imwrite("out{}.png".format(
'-fused' if k == 5 else str(k + 1)), pred * 255)
else:
pred = outputs[5][0]
cv2.imwrite(output, pred * 255)
def proceed_validation(args, is_save = True, is_densecrf = False):
import cv2
ds = dataset.PascalVOC12Edge(args.data_dir, args.meta_dir, "val")
ds = BatchData(ds, 1)
pred_config = PredictConfig(
model=Model(),
session_init=get_model_loader(args.load),
input_names=['image'],
output_names=['prob'])
predictor = OfflinePredictor(pred_config)
i = 0
stat = MIoUStatistics(CLASS_NUM)
logger.info("start validation....")
for image, label in tqdm(ds.get_data()):
label = np.squeeze(label)
image = np.squeeze(image)
prediction = predict_scaler(image, predictor, scales=[0.9, 1, 1.1], classes=CLASS_NUM, tile_size=CROP_SIZE, is_densecrf = is_densecrf)
prediction = np.argmax(prediction, axis=2)
stat.feed(prediction, label)
if is_save:
cv2.imwrite("result/{}.png".format(i), np.concatenate((image, visualize_label(label), visualize_label(prediction)), axis=1))
i += 1
logger.info("mIoU: {}".format(stat.mIoU))
logger.info("mean_accuracy: {}".format(stat.mean_accuracy))
logger.info("accuracy: {}".format(stat.accuracy))
stat.print_confusion_matrix()
class CalculateMIoU(Callback):
def __init__(self, nb_class):
self.nb_class = nb_class
def _setup_graph(self):
self.pred = self.trainer.get_predictor(
['image'], ['prob'])
def _before_train(self):
pass
def _trigger(self):
global args
self.val_ds = get_data('val', args.data_dir, args.meta_dir, args.batch_size)
self.val_ds.reset_state()
self.stat = MIoUStatistics(self.nb_class)
for image, label in tqdm(self.val_ds.get_data()):
label = np.squeeze(label)
image = np.squeeze(image)
prediction = predict_scaler(image, self.pred, scales=[0.9, 1, 1.1], classes=CLASS_NUM, tile_size=CROP_SIZE,
is_densecrf=False)
prediction = np.argmax(prediction, axis=2)
self.stat.feed(prediction, label)
self.trainer.monitors.put_scalar("mIoU", self.stat.mIoU)
self.trainer.monitors.put_scalar("mean_accuracy", self.stat.mean_accuracy)
self.trainer.monitors.put_scalar("accuracy", self.stat.accuracy)
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', default="2", help='comma separated list of GPU(s) to use.')
parser.add_argument('--data_dir', default="/data2/dataset/pascalvoc2012/VOC2012trainval/VOCdevkit/VOC2012",
help='dataset dir')
parser.add_argument('--meta_dir', default="../metadata/pascalvoc12", help='meta dir')
parser.add_argument('--load', default="../resnet101.npz", help='load model')
parser.add_argument('--view', help='view dataset', action='store_true')
parser.add_argument('--run', help='run model on images')
parser.add_argument('--batch_size', type=int, default = 10, help='batch_size')
parser.add_argument('--output', help='fused output filename. default to out-fused.png')
parser.add_argument('--validation', action='store_true', help='validate model on validation images')
args = parser.parse_args()
if args.gpu:
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
if args.view:
view_data(args.data_dir,args.meta_dir,args.batch_size)
elif args.run:
run(args.load, args.run, args.output)
elif args.validation:
proceed_validation(args)
else:
config = get_config(args.data_dir,args.meta_dir,args.batch_size)
if args.load:
config.session_init = get_model_loader(args.load)
launch_train_with_config(
config,
SyncMultiGPUTrainer(max(get_nr_gpu(), 1)))
| 36.8125 | 142 | 0.627523 |
ce4ea38224d0505ab8b7720eb4ced55d178a1768 | 72 | py | Python | follow/urls.py | Han-Joon-Hyeok/booklog | b1ce850d23242a53e8651543cfb9fe870b92b21f | [
"MIT"
] | 4 | 2021-06-30T07:14:22.000Z | 2021-08-16T04:52:35.000Z | follow/urls.py | Han-Joon-Hyeok/booklog | b1ce850d23242a53e8651543cfb9fe870b92b21f | [
"MIT"
] | null | null | null | follow/urls.py | Han-Joon-Hyeok/booklog | b1ce850d23242a53e8651543cfb9fe870b92b21f | [
"MIT"
] | 4 | 2021-06-27T04:26:21.000Z | 2021-08-13T13:00:56.000Z | from django.urls import path
from . import views
urlpatterns = [
] | 12 | 28 | 0.694444 |
2004cea83e88ef382295623b0dac17af2bc90e5c | 2,167 | py | Python | Configuration/DataProcessing/test/RunRepack.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 852 | 2015-01-11T21:03:51.000Z | 2022-03-25T21:14:00.000Z | Configuration/DataProcessing/test/RunRepack.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 30,371 | 2015-01-02T00:14:40.000Z | 2022-03-31T23:26:05.000Z | Configuration/DataProcessing/test/RunRepack.py | Purva-Chaudhari/cmssw | 32e5cbfe54c4d809d60022586cf200b7c3020bcf | [
"Apache-2.0"
] | 3,240 | 2015-01-02T05:53:18.000Z | 2022-03-31T17:24:21.000Z | #!/usr/bin/env python3
"""
_RunRepack_
Test/Debugging harness for the repack configuration builder
"""
from __future__ import print_function
import sys
import getopt
from Configuration.DataProcessing.Repack import repackProcess
class RunRepack:
def __init__(self):
self.selectEvents = None
self.inputLFN = None
def __call__(self):
if self.inputLFN == None:
msg = "No --lfn specified"
raise RuntimeError(msg)
outputs = []
outputs.append( { 'moduleLabel' : "write_PrimDS1_RAW" } )
outputs.append( { 'moduleLabel' : "write_PrimDS2_RAW" } )
if self.selectEvents != None:
outputs[0]['selectEvents'] = self.selectEvents.split(',')
outputs[1]['selectEvents'] = self.selectEvents.split(',')
try:
process = repackProcess(outputs = outputs)
except Exception as ex:
msg = "Error creating process for Repack:\n"
msg += str(ex)
raise RuntimeError(msg)
process.source.fileNames.append(self.inputLFN)
import FWCore.ParameterSet.Config as cms
process.maxEvents = cms.untracked.PSet( input = cms.untracked.int32(10) )
psetFile = open("RunRepackCfg.py", "w")
psetFile.write(process.dumpPython())
psetFile.close()
cmsRun = "cmsRun -e RunRepackCfg.py"
print("Now do:\n%s" % cmsRun)
if __name__ == '__main__':
valid = ["select-events=", "lfn="]
usage = \
"""
RunRepack.py <options>
Where options are:
--select-events (option, event selection based on trigger paths)
--lfn=/store/input/lfn
Example:
python RunRepack.py --select-events HLT:path1,HLT:path2 --lfn /store/whatever
"""
try:
opts, args = getopt.getopt(sys.argv[1:], "", valid)
except getopt.GetoptError as ex:
print(usage)
print(str(ex))
sys.exit(1)
repackinator = RunRepack()
for opt, arg in opts:
if opt == "--select-events":
repackinator.selectEvents = arg
if opt == "--lfn" :
repackinator.inputLFN = arg
repackinator()
| 24.625 | 81 | 0.602677 |
c1f1c1237e48c1ccecfd0da80c53e83804d660da | 3,304 | py | Python | shop/orders/models.py | Anych/mila-iris | cbb16fc9ab2b85232e4c05446697fc82b78bc8e4 | [
"MIT"
] | null | null | null | shop/orders/models.py | Anych/mila-iris | cbb16fc9ab2b85232e4c05446697fc82b78bc8e4 | [
"MIT"
] | null | null | null | shop/orders/models.py | Anych/mila-iris | cbb16fc9ab2b85232e4c05446697fc82b78bc8e4 | [
"MIT"
] | null | null | null | from django.db import models
from django.urls import reverse
from accounts.models import Account
from products.models import Product, Size
class Order(models.Model):
"""Model for order."""
class Meta:
verbose_name = 'Заказ'
verbose_name_plural = 'Заказы'
STATUS = (
('Новый', 'Новый'),
('Подтверждён', 'Подтверждён'),
('Завершён', 'Завершён'),
('Отменён', 'Отменён'),
)
user = models.ForeignKey(Account, on_delete=models.SET_NULL, null=True, verbose_name='Пользователь')
order_number = models.CharField(max_length=255, verbose_name='Номер заказа')
first_name = models.CharField(max_length=255, verbose_name='Имя')
last_name = models.CharField(max_length=255, verbose_name='Фамилия')
phone = models.CharField(max_length=255, verbose_name='Номер')
email = models.EmailField(max_length=255, verbose_name='Почта')
address = models.CharField(max_length=255, verbose_name='Адрес')
country = models.CharField(max_length=255, verbose_name='Страна')
state = models.CharField(max_length=255, verbose_name='Область')
city = models.CharField(max_length=255, verbose_name='Город')
order_note = models.TextField(max_length=255, blank=True, verbose_name='Примечания')
order_total = models.DecimalField(max_digits=10, decimal_places=0, verbose_name='Общая сумма')
delivery = models.DecimalField(max_digits=10, decimal_places=0, verbose_name='Стоимость доставки')
status = models.CharField(max_length=255, choices=STATUS, default='Новый', verbose_name='Статус')
ip = models.CharField(max_length=255, blank=True, verbose_name='IP')
is_ordered = models.BooleanField(default=False, verbose_name='В заказе')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Создано')
updated_at = models.DateTimeField(auto_now=True, verbose_name='Обновлено')
def full_name(self):
"""Function return full name."""
return f'{self.first_name} {self.last_name}'
def __str__(self):
return self.first_name
def get_absolute_url(self):
return reverse('order', kwargs={'order_number': self.order_number})
class OrderProduct(models.Model):
"""Model for order products."""
class Meta:
verbose_name = 'Заказ на продукт'
verbose_name_plural = 'Заказы на продукты'
order = models.ForeignKey(Order, on_delete=models.CASCADE, verbose_name='Заказ')
user = models.ForeignKey(Account, on_delete=models.CASCADE, verbose_name='Пользователь')
product = models.ForeignKey(Product, on_delete=models.CASCADE, verbose_name='Продукт')
size = models.ForeignKey(Size, blank=True, on_delete=models.CASCADE, verbose_name='Размер')
quantity = models.IntegerField(verbose_name='Колличество')
product_price = models.DecimalField(max_digits=10, decimal_places=0, verbose_name='Цена продукта')
ordered = models.BooleanField(default=False, verbose_name='Заказан')
created_at = models.DateTimeField(auto_now_add=True, verbose_name='Создано')
updated_at = models.DateTimeField(auto_now=True, verbose_name='Обновлено')
def __str__(self):
return f'{self.product.article} - {self.product.category.name}: {self.product.brand}'
def sub_total(self):
return self.product_price * self.quantity
| 45.888889 | 104 | 0.72609 |
f62971b52caa7cf64bb0fa1e58fd9bd467269d9c | 38 | py | Python | eddrit/const.py | corenting/eddrit | 640db842d48afa8f8b379f7412c90ad9216312df | [
"MIT"
] | 9 | 2020-10-16T20:29:05.000Z | 2022-03-06T08:07:06.000Z | eddrit/const.py | corenting/eddrit | 640db842d48afa8f8b379f7412c90ad9216312df | [
"MIT"
] | 7 | 2020-10-16T16:34:57.000Z | 2022-01-19T17:30:29.000Z | eddrit/const.py | corenting/eddrit | 640db842d48afa8f8b379f7412c90ad9216312df | [
"MIT"
] | null | null | null | IMAGE_HOSTING_DOMAINS = ["imgur.com"]
| 19 | 37 | 0.763158 |
0f7dc7fdb6694b4a902bc76b6762ba2556b4d74a | 182 | py | Python | lantern/grids/grid_phosphor.py | timkpaine/lantern | 945a26e05a7f1d7b09fde8a4da0daf6220f163f3 | [
"Apache-2.0"
] | 306 | 2017-09-28T02:30:46.000Z | 2022-03-08T10:56:58.000Z | lantern/grids/grid_phosphor.py | timkpaine/lantern | 945a26e05a7f1d7b09fde8a4da0daf6220f163f3 | [
"Apache-2.0"
] | 201 | 2017-09-27T17:49:16.000Z | 2021-06-09T00:48:55.000Z | lantern/grids/grid_phosphor.py | timkpaine/lantern | 945a26e05a7f1d7b09fde8a4da0daf6220f163f3 | [
"Apache-2.0"
] | 26 | 2017-10-08T21:12:27.000Z | 2022-02-01T08:40:32.000Z |
def phosphor_grid(data, **kwargs):
from IPython.display import display
bundle = {}
bundle['text/csv'] = data.reset_index().to_csv()
return display(bundle, raw=True)
| 26 | 52 | 0.675824 |
b994c591f532d9129f9b22751e913af4f72aa7e1 | 6,413 | py | Python | perform/visualization/probe_plot.py | wayneisaacuy/perform | 333198b538eded5c498b236cf9d598b948dbb1e3 | [
"MIT"
] | null | null | null | perform/visualization/probe_plot.py | wayneisaacuy/perform | 333198b538eded5c498b236cf9d598b948dbb1e3 | [
"MIT"
] | null | null | null | perform/visualization/probe_plot.py | wayneisaacuy/perform | 333198b538eded5c498b236cf9d598b948dbb1e3 | [
"MIT"
] | null | null | null | import os
import matplotlib.pyplot as plt
import numpy as np
from perform.visualization.visualization import Visualization
# TODO: maybe easier to make probe/residual plots under some PointPlot class
# TODO: move some of the init input arguments used for assertions outside
class ProbePlot(Visualization):
"""Class for probe plot visualization.
Produces plots of the time history of probe monitors at the interval specified by vis_interval.
Must be manually expanded to plot specific profiles.
Args:
image_output_dir: Base image output directory, within the working directory.
vis_id: Zero-indexed ID of this Visualization.
sim_type: "FOM" or "ROM", depending on simulation type.
probe_vars: List of strings of variables to be probed at each probe monitor location.
vis_vars: List of num_subplots strings of variables to be visualized.
probe_num: One-indexed probe number.
num_probes: Total number of probe monitors set in the solver parameters input file
vis_x_bounds:
List of num_subplots lists, each of length 2 including lower and upper bounds on the
x-axis for each subplot. Setting to [None, None] allows for dynamic axis sizing.
vis_y_bounds:
List of num_subplots lists, each of length 2 including lower and upper bounds on the
y-axis for each subplot. Setting to [None, None] allows for dynamic axis sizing.
species_names: List of strings of names for all num_species_full chemical species.
Attributes:
vis_type: Set to "probe".
x_label: Set to "t (s)", x-axis label.
probe_num: One-indexed probe number, corresponding to labels given in the solver parameters input file.
probe_vars: List of strings of variables to be probed at each probe monitor location.
ax_line: List of matplotlib.lines.Line2D artists for each subplot.
fig_file: Path to visualization plot output image file.
"""
def __init__(
self,
image_output_dir,
vis_id,
sim_type,
probe_vars,
vis_vars,
probe_num,
num_probes,
vis_x_bounds,
vis_y_bounds,
species_names,
):
self.vis_type = "probe"
self.vis_id = vis_id
self.x_label = "t (s)"
self.probe_num = probe_num
self.probe_vars = probe_vars
assert self.probe_num > 0, "Must provide positive integer probe number for probe plot " + str(self.vis_id)
assert self.probe_num <= num_probes, "probe_num_" + str(self.vis_id) + " must correspond to a valid probe"
assert vis_vars[0] is not None, "Must provide vis_vars for probe plot " + str(self.vis_id)
super().__init__(vis_id, vis_vars, vis_x_bounds, vis_y_bounds, species_names)
self.ax_line = [None] * self.num_subplots
# Image file on disk
vis_name = ""
for vis_var in self.vis_vars:
vis_name += "_" + vis_var
fig_name = "probe" + vis_name + "_" + str(self.probe_num) + "_" + sim_type + ".png"
self.fig_file = os.path.join(image_output_dir, fig_name)
# Check that requested variables are being probed
for vis_var in self.vis_vars:
assert vis_var in probe_vars, "Must probe " + vis_var + " to plot it"
def plot(self, probe_vals, time_vals, iter_num, line_style, first_plot):
"""Draw and display probe plot.
Since shape of plotted data changes every time, can't use set_data.
As a result, probe plotting can be pretty slow.
Args:
probe_vals: NumPy array of probe monitor time history for each monitored variable.
time_vals: NumPy array of time values associated with each discrete time step, not including t = 0.
iter_num: One-indexed physical time step iteration number.
line_style: String containing matplotlib.pyplot line style option.
first_plot: Boolean flag indicating whether this is the first time the plot is being drawn.
"""
plt.figure(self.vis_id)
if type(self.ax) != np.ndarray:
ax_list = [self.ax]
else:
ax_list = self.ax
for col_idx, col in enumerate(ax_list):
if type(col) != np.ndarray:
col_list = [col]
else:
col_list = col
for rowIdx, ax_var in enumerate(col_list):
lin_idx = np.ravel_multi_index(([col_idx], [rowIdx]), (self.num_rows, self.num_cols))[0]
if (lin_idx + 1) > self.num_subplots:
ax_var.axis("off")
continue
ax_var.cla()
y_data = self.get_y_data(probe_vals, self.vis_vars[lin_idx], iter_num)
x_data = time_vals[:iter_num]
(self.ax_line[lin_idx],) = ax_var.plot(x_data, y_data, line_style)
ax_var.set_ylabel(self.ax_labels[lin_idx])
ax_var.set_xlabel(self.x_label)
ax_var.set_ylim(bottom=self.vis_y_bounds[lin_idx][0], top=self.vis_y_bounds[lin_idx][1], auto=True)
ax_var.set_xlim(left=self.vis_x_bounds[lin_idx][0], right=self.vis_x_bounds[lin_idx][1], auto=True)
ax_var.ticklabel_format(axis="x", style="sci", scilimits=(0, 0))
if first_plot:
self.fig.tight_layout()
self.fig.canvas.draw()
def get_y_data(self, probe_vals, var_str, iter_num):
"""Extract probe data to be plotted from probe_vals.
Data extraction of probe monitor data is done in SolutionDomain, this just selects the correct variable.
Args:
probe_vals: NumPy array of probe monitor time history for each monitored variable.
var_str: String corresponding to variable profile to be plotted.
iter_num: One-indexed simulation iteration number.
"""
var_idx = np.squeeze(np.argwhere(self.probe_vars == var_str)[0])
y_data = probe_vals[self.probe_num - 1, var_idx, :iter_num]
return y_data
def save(self, iter_num, dpi=100):
"""Save plot to disk.
Args:
iter_num: One-indexed simulation iteration number.
dpi: Dots per inch of figure, determines resolution.
"""
plt.figure(self.vis_id)
self.fig.savefig(self.fig_file, dpi=dpi)
| 40.333333 | 115 | 0.643848 |
adf82a1ec6b819c64f1e4ca1a51923c2af26e092 | 1,002 | py | Python | Distance/getDistanceFromLatLonInKm.py | hugolouzada/UltimateMetroDistanceSP | a5c042cb53b7b0517c36ad043aee7281cb208d2d | [
"Apache-2.0"
] | null | null | null | Distance/getDistanceFromLatLonInKm.py | hugolouzada/UltimateMetroDistanceSP | a5c042cb53b7b0517c36ad043aee7281cb208d2d | [
"Apache-2.0"
] | null | null | null | Distance/getDistanceFromLatLonInKm.py | hugolouzada/UltimateMetroDistanceSP | a5c042cb53b7b0517c36ad043aee7281cb208d2d | [
"Apache-2.0"
] | null | null | null | from math import sin, cos, atan2, sqrt
from Distance.Coordinate import Coordinate
def getDistanceFromLatLonInKm(coordinate1: Coordinate, coordinate2: Coordinate):
"""
Calculates the distance between two Coordinates in km
Earth's radius from https://rechneronline.de/earth-radius/
Haversine formula from:
http://stackoverflow.com/questions/27928/calculate-distance-between-two-latitude-longitude-points-haversine-formula
:param coordinate1: one coordinate
:param coordinate2: other coordinate
:return: distance in km
"""
R = 6375.515 # Radius of the planet in km at lat -23.5505199 and sea height of 768m
dLat = coordinate2.latAsRad() - coordinate1.latAsRad()
dLon = coordinate2.lonAsRad() - coordinate1.lonAsRad()
a1 = sin(dLat / 2) * sin(dLat * 2)
a2 = cos(coordinate1.latAsRad()) * cos(coordinate2.latAsRad()) * sin(dLon / 2) * sin(dLon / 2)
a = a1 + a2
c = 2 * atan2(sqrt(a), sqrt(1 - a))
return R * c
#todo: tests
| 27.081081 | 119 | 0.691617 |
5c380c9e06f2a7949589bd3eb955255697e6e478 | 2,298 | py | Python | scripts/ode_scripts.py | yahmskeano/CAAM37830-Personal | 53c9ce3f24bdc6c730552d4125494d2b2bb417d1 | [
"MIT"
] | null | null | null | scripts/ode_scripts.py | yahmskeano/CAAM37830-Personal | 53c9ce3f24bdc6c730552d4125494d2b2bb417d1 | [
"MIT"
] | null | null | null | scripts/ode_scripts.py | yahmskeano/CAAM37830-Personal | 53c9ce3f24bdc6c730552d4125494d2b2bb417d1 | [
"MIT"
] | 1 | 2021-08-09T16:36:56.000Z | 2021-08-09T16:36:56.000Z | import sys
sys.path.append("../")
from sir.ode import *
# init population
N = 1_000_000
I = 5
R = 0
S = N - I - R
# init the covid class
ode_SIR = SIR(b=3, k=0.01, S=S, I=I, R=R)
# solve that class
ode_SIR.solve(t_bound=365)
# plot the numerical solution
ode_SIR.plot('../doc/checkpoint/figures/ode1.png')
# s, i, r of day 150
print(ode_SIR(100))
# init the sir class with different parameters
ode_SIR = SIR(b=3, k=0.1, S=S, I=I, R=R)
ode_SIR.solve(t_bound=365)
ode_SIR.plot('../doc/checkpoint/figures/ode2.png')
# init the sir class with different parameters
ode_SIR = SIR(b=0.8, k=0.01, S=S, I=I, R=R)
ode_SIR.solve(t_bound=365)
ode_SIR.plot('../doc/checkpoint/figures/ode3.png')
# Phase diagram with b, k axes with portion of population that is infected at time t
phase_plot(N, I, R, t=5, phase='I', save_path='../doc/checkpoint/figures/phase_diagram1.png')
phase_plot(N, I, R, t=10, phase='I', save_path='../doc/checkpoint/figures/phase_diagram2.png')
phase_plot(N, I, R, t=50, phase='I', save_path='../doc/checkpoint/figures/phase_diagram3.png')
# other models
from sir.ode import *
ode_SIS = SIS(b=3, k=0.1)
ode_SIS.solve(t_bound=365)
ode_SIS.plot('./doc/final/figures/ode_sis.png')
ode_SIRD = SIRD(b=3, k=0.1, mu=0.05)
ode_SIRD.solve(t_bound=365)
ode_SIRD.plot('./doc/final/figures/ode_sird.png')
ode_MSIR = MSIR(lam=0.0003, sigma=1/180, b=3, k=0.1, mu=0.0003)
ode_MSIR.solve(t_bound=365)
ode_MSIR.plot('./doc/final/figures/ode_msir.png')
ode_SIRC = SIRC(b=3, k=0.1, c1=0.1, c2=0.1)
ode_SIRC.solve(t_bound=365)
ode_SIRC.plot('./doc/final/figures/ode_sirc.png')
ode_SEIR = SEIR(lam=0.0003, b=3, k=0.1, a=1/14, mu=0.0003)
ode_SEIR.solve(t_bound=365)
ode_SEIR.plot('./doc/final/figures/ode_seir.png')
ode_SEIS = SEIS(lam=0.0003, b=3, k=0.1, a=1/14, mu=0.0003)
ode_SEIS.solve(t_bound=365)
ode_SEIS.plot('./doc/final/figures/ode_seis.png')
ode_MSEIR = MSEIR(lam=0.0003, sigma=1/180, b=3, k=0.1, a=1/14, mu=0.0003)
ode_MSEIR.solve(t_bound=365)
ode_MSEIR.plot('./doc/final/figures/ode_mseir.png')
ode_MSEIRS = MSEIRS(lam=0.0003, sigma=1/180, b=3, k=0.1, a=1/14, mu=0.0003, l=1/180)
ode_MSEIRS.solve(t_bound=365)
ode_MSEIRS.plot('./doc/final/figures/ode_mseirs.png')
ode_MSEIQRDS = MSEIQRDS()
ode_MSEIQRDS.solve(t_bound=365)
ode_MSEIQRDS.plot('./doc/final/figures/ode_mseiqrds.png')
| 31.479452 | 94 | 0.710183 |
dba72d12b01dfb3568db58917900b8b8181d80f3 | 7,340 | py | Python | mars/tensor/expressions/linalg/svd.py | cclauss/mars | 85decf86f6489ab1acaee6222731d66fcecd2718 | [
"Apache-2.0"
] | 1 | 2018-12-26T08:37:04.000Z | 2018-12-26T08:37:04.000Z | mars/tensor/expressions/linalg/svd.py | cclauss/mars | 85decf86f6489ab1acaee6222731d66fcecd2718 | [
"Apache-2.0"
] | null | null | null | mars/tensor/expressions/linalg/svd.py | cclauss/mars | 85decf86f6489ab1acaee6222731d66fcecd2718 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 1999-2018 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from numpy.linalg import LinAlgError
from .... import operands
from ...core import ExecutableTuple
from ..datasource import tensor as astensor
from .core import TSQR
class TensorSVD(operands.SVD, TSQR):
def __init__(self, method=None, dtype=None, **kw):
super(TensorSVD, self).__init__(_method=method, _dtype=dtype, **kw)
@classmethod
def _is_svd(cls):
return True
def _set_inputs(self, inputs):
super(TensorSVD, self)._set_inputs(inputs)
self._input = self._inputs[0]
def __call__(self, a):
a = astensor(a)
if a.ndim != 2:
raise LinAlgError('{0}-dimensional tensor given. '
'Tensor must be two-dimensional'.format(a.ndim))
tiny_U, tiny_s, tiny_V = np.linalg.svd(np.ones((1, 1), dtype=a.dtype))
U_shape = a.shape
s_shape = (a.shape[1],)
V_shape = (a.shape[1],) * 2
U, s, V = self.new_tensors([a], (U_shape, s_shape, V_shape),
kws=[
{'side': 'U', 'dtype': tiny_U.dtype},
{'side': 's', 'dtype': tiny_s.dtype},
{'side': 'V', 'dtype': tiny_V.dtype}
])
return ExecutableTuple([U, s, V])
@classmethod
def tile(cls, op):
get_obj_attr = cls._get_obj_attr
U, s, V = op.outputs
U_dtype, s_dtype, V_dtype = \
get_obj_attr(U, 'dtype'), get_obj_attr(s, 'dtype'), get_obj_attr(V, 'dtype')
U_shape, s_shape, V_shape = \
get_obj_attr(U, 'shape'), get_obj_attr(s, 'shape'), get_obj_attr(V, 'shape')
in_tensor = op.input
if in_tensor.chunk_shape == (1, 1):
in_chunk = in_tensor.chunks[0]
chunk_op = op.copy().reset_key()
svd_chunks = chunk_op.new_chunks([in_chunk], (U_shape, s_shape, V_shape),
kws=[
{'side': 'U', 'dtype': U_dtype,
'index': in_chunk.index},
{'side': 's', 'dtype': s_dtype,
'index': in_chunk.index[1:]},
{'side': 'V', 'dtype': V_dtype,
'index': in_chunk.index}
])
U_chunk, s_chunk, V_chunk = svd_chunks
new_op = op.copy()
kws = [
{'chunks': [U_chunk], 'nsplits': tuple((s,) for s in U_shape), 'dtype': U_dtype},
{'chunks': [s_chunk], 'nsplits': tuple((s,) for s in s_shape), 'dtype': s_dtype},
{'chunks': [V_chunk], 'nsplits': tuple((s,) for s in V_shape), 'dtype': V_dtype}
]
return new_op.new_tensors(op.inputs, [U_shape, s_shape, V_shape], kws=kws)
elif op.method == 'tsqr':
return super(TensorSVD, cls).tile(op)
else:
raise NotImplementedError('Only tsqr method supported for now')
def svd(a, method='tsqr'):
"""
Singular Value Decomposition.
When `a` is a 2D tensor, it is factorized as ``u @ np.diag(s) @ vh
= (u * s) @ vh``, where `u` and `vh` are 2D unitary tensors and `s` is a 1D
tensor of `a`'s singular values. When `a` is higher-dimensional, SVD is
applied in stacked mode as explained below.
Parameters
----------
a : (..., M, N) array_like
A real or complex tensor with ``a.ndim >= 2``.
method: {'tsqr'}, optional
method to calculate qr factorization, tsqr as default
TSQR is presented in:
A. Benson, D. Gleich, and J. Demmel.
Direct QR factorizations for tall-and-skinny matrices in
MapReduce architectures.
IEEE International Conference on Big Data, 2013.
http://arxiv.org/abs/1301.1071
Returns
-------
u : { (..., M, M), (..., M, K) } tensor
Unitary tensor(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
s : (..., K) tensor
Vector(s) with the singular values, within each vector sorted in
descending order. The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`.
vh : { (..., N, N), (..., K, N) } tensor
Unitary tensor(s). The first ``a.ndim - 2`` dimensions have the same
size as those of the input `a`. The size of the last two dimensions
depends on the value of `full_matrices`. Only returned when
`compute_uv` is True.
Raises
------
LinAlgError
If SVD computation does not converge.
Notes
-----
SVD is usually described for the factorization of a 2D matrix :math:`A`.
The higher-dimensional case will be discussed below. In the 2D case, SVD is
written as :math:`A = U S V^H`, where :math:`A = a`, :math:`U= u`,
:math:`S= \\mathtt{np.diag}(s)` and :math:`V^H = vh`. The 1D tensor `s`
contains the singular values of `a` and `u` and `vh` are unitary. The rows
of `vh` are the eigenvectors of :math:`A^H A` and the columns of `u` are
the eigenvectors of :math:`A A^H`. In both cases the corresponding
(possibly non-zero) eigenvalues are given by ``s**2``.
If `a` has more than two dimensions, then broadcasting rules apply, as
explained in :ref:`routines.linalg-broadcasting`. This means that SVD is
working in "stacked" mode: it iterates over all indices of the first
``a.ndim - 2`` dimensions and for each combination SVD is applied to the
last two indices. The matrix `a` can be reconstructed from the
decomposition with either ``(u * s[..., None, :]) @ vh`` or
``u @ (s[..., None] * vh)``. (The ``@`` operator can be replaced by the
function ``mt.matmul`` for python versions below 3.5.)
Examples
--------
>>> import mars.tensor as mt
>>> a = mt.random.randn(9, 6) + 1j*mt.random.randn(9, 6)
>>> b = mt.random.randn(2, 7, 8, 3) + 1j*mt.random.randn(2, 7, 8, 3)
Reconstruction based on reduced SVD, 2D case:
>>> u, s, vh = mt.linalg.svd(a)
>>> u.shape, s.shape, vh.shape
((9, 6), (6,), (6, 6))
>>> np.allclose(a, np.dot(u * s, vh))
True
>>> smat = np.diag(s)
>>> np.allclose(a, np.dot(u, np.dot(smat, vh)))
True
"""
op = TensorSVD(method=method)
return op(a)
| 40.32967 | 97 | 0.559128 |
a9ff521e7636814ac735b5ba6635f08391bb75a9 | 3,401 | py | Python | androidautotest/__main__.py | 15045120/AndroidTest | c3db39d05582c4d1c722ca2c055b1317ee6469b4 | [
"MIT"
] | 3 | 2019-12-23T13:47:38.000Z | 2019-12-24T15:57:35.000Z | androidautotest/__main__.py | 15045120/AndroidTest | c3db39d05582c4d1c722ca2c055b1317ee6469b4 | [
"MIT"
] | 2 | 2019-12-18T13:23:51.000Z | 2019-12-18T13:23:56.000Z | androidautotest/__main__.py | 15045120/AndroidTest | c3db39d05582c4d1c722ca2c055b1317ee6469b4 | [
"MIT"
] | null | null | null | import argparse
from .__init__ import MODULE,VERSION
from .tool import LINESEQ,Command
from .client import execute,create,install,startasm
def print_version():
print('androidautotest {}{}'.format(VERSION, LINESEQ))
def print_run_usage():
print('usage(run options): androidautotest --casedir <CASEDIR> --device <DEVICE> --times <TIMES>')
def print_create_usage():
print('usage(create options): androidautotest --newcase <NEWCASE> --savedir <SAVEDIR>')
def main():
parser = argparse.ArgumentParser(
prog='androidautotest',
usage='{} androidautotest --installdep {} androidautotest --startasm {} androidautotest --newcase <NEWCASE> --savedir <SAVEDIR> {} androidautotest --casedir <CASEDIR> --device <DEVICE> --times <TIMES>'.format(LINESEQ, LINESEQ, LINESEQ, LINESEQ, LINESEQ),
description='A framework to run test case for android automated test',
add_help=False,
)
parser.add_argument(
'-V', '--version', action='store_true',
help='Print version and exit'
)
parser.add_argument(
'-h', '--help', action='store_true',
help='Print this help message and exit'
)
cmd_line_grp = parser.add_argument_group('install dependency')
cmd_line_grp.add_argument(
'--installdep', action='store_true',
help='install dependency of androidautotest'
)
cmd_line_grp = parser.add_argument_group('start asm')
cmd_line_grp.add_argument(
'--startasm', action='store_true',
help='start Android Screen Monitor'
)
cmd_line_grp = parser.add_argument_group('create case')
cmd_line_grp.add_argument(
'--newcase', metavar='<NEWCASE>',nargs=1, type=str,
help='New case name to create'
)
cmd_line_grp.add_argument(
'--savedir', metavar='<SAVEDIR>', nargs=1, type=str,
help='Path to save new case'
)
cmd_line_grp = parser.add_argument_group('run case')
cmd_line_grp.add_argument(
'--casedir', metavar='<CASEDIR>', nargs=1, type=str,
help='Case path to run'
)
cmd_line_grp.add_argument(
'--device', metavar='<DEVICE>', nargs=1, type=str,
help='Device to switch'
)
cmd_line_grp.add_argument(
'--times', metavar='<TIMES>', nargs=1, type=int,
help='Times of case running'
)
args = parser.parse_args()
if args.help:
print_version()
parser.print_help()
Command.exit()
if args.version:
print_version()
Command.exit()
if args.casedir and args.device and args.times:
args = vars(args)
case_path = args['casedir'][0]
device_serial_number = args['device'][0]
run_times = args['times'][0]
execute(case_path, device_serial_number, run_times)
elif args.newcase and args.savedir:
args = vars(args)
new_case_name = args['newcase'][0]
save_dir = args['savedir'][0]
create(new_case_name, save_dir)
elif args.installdep:
install()
elif args.startasm:
startasm()
elif args.casedir or args.device or args.times:
print_run_usage()
Command.exit()
elif args.newcase or args.savedir:
print_create_usage()
Command.exit()
else:
parser.print_help()
Command.exit()
if __name__ == '__main__':
main()
| 32.390476 | 266 | 0.629521 |
52b08359d39b33797b03e98174b6448102201eb7 | 10,574 | py | Python | trio/tests/test_subprocess.py | JefffHofffman/trio | d8631117ce4ca19017bbe3850704dd5ce6cfaeb1 | [
"Apache-2.0",
"MIT"
] | null | null | null | trio/tests/test_subprocess.py | JefffHofffman/trio | d8631117ce4ca19017bbe3850704dd5ce6cfaeb1 | [
"Apache-2.0",
"MIT"
] | null | null | null | trio/tests/test_subprocess.py | JefffHofffman/trio | d8631117ce4ca19017bbe3850704dd5ce6cfaeb1 | [
"Apache-2.0",
"MIT"
] | null | null | null | import os
import signal
import subprocess
import sys
import pytest
from .. import (
_core, move_on_after, fail_after, sleep, sleep_forever, Process
)
from .._core.tests.tutil import slow
from ..testing import wait_all_tasks_blocked
posix = os.name == "posix"
if posix:
from signal import SIGKILL, SIGTERM, SIGINT
else:
SIGKILL, SIGTERM, SIGINT = None, None, None
# Since Windows has very few command-line utilities generally available,
# all of our subprocesses are Python processes running short bits of
# (mostly) cross-platform code.
def python(code):
return [sys.executable, "-u", "-c", "import sys; " + code]
EXIT_TRUE = python("sys.exit(0)")
EXIT_FALSE = python("sys.exit(1)")
CAT = python("sys.stdout.buffer.write(sys.stdin.buffer.read())")
SLEEP = lambda seconds: python("import time; time.sleep({})".format(seconds))
def got_signal(proc, sig):
if posix:
return proc.returncode == -sig
else:
return proc.returncode != 0
async def test_basic():
async with Process(EXIT_TRUE) as proc:
assert proc.returncode is None
assert proc.returncode == 0
async def test_multi_wait():
async with Process(SLEEP(10)) as proc:
# Check that wait (including multi-wait) tolerates being cancelled
async with _core.open_nursery() as nursery:
nursery.start_soon(proc.wait)
nursery.start_soon(proc.wait)
nursery.start_soon(proc.wait)
await wait_all_tasks_blocked()
nursery.cancel_scope.cancel()
# Now try waiting for real
async with _core.open_nursery() as nursery:
nursery.start_soon(proc.wait)
nursery.start_soon(proc.wait)
nursery.start_soon(proc.wait)
await wait_all_tasks_blocked()
proc.kill()
async def test_kill_when_context_cancelled():
with move_on_after(0) as scope:
async with Process(SLEEP(10)) as proc:
assert proc.poll() is None
# Process context entry is synchronous, so this is the
# only checkpoint:
await sleep_forever()
assert scope.cancelled_caught
assert got_signal(proc, SIGKILL)
COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR = python(
"data = sys.stdin.buffer.read(); "
"sys.stdout.buffer.write(data); "
"sys.stderr.buffer.write(data[::-1])"
)
async def test_pipes():
async with Process(
COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
msg = b"the quick brown fox jumps over the lazy dog"
async def feed_input():
await proc.stdin.send_all(msg)
await proc.stdin.aclose()
async def check_output(stream, expected):
seen = bytearray()
while True:
chunk = await stream.receive_some(4096)
if not chunk:
break
seen.extend(chunk)
assert seen == expected
async with _core.open_nursery() as nursery:
# fail quickly if something is broken
nursery.cancel_scope.deadline = _core.current_time() + 3.0
nursery.start_soon(feed_input)
nursery.start_soon(check_output, proc.stdout, msg)
nursery.start_soon(check_output, proc.stderr, msg[::-1])
assert not nursery.cancel_scope.cancelled_caught
assert 0 == await proc.wait()
async def test_interactive():
# Test some back-and-forth with a subprocess. This one works like so:
# in: 32\n
# out: 0000...0000\n (32 zeroes)
# err: 1111...1111\n (64 ones)
# in: 10\n
# out: 2222222222\n (10 twos)
# err: 3333....3333\n (20 threes)
# in: EOF
# out: EOF
# err: EOF
async with Process(
python(
"idx = 0\n"
"while True:\n"
" line = sys.stdin.readline()\n"
" if line == '': break\n"
" request = int(line.strip())\n"
" print(str(idx * 2) * request)\n"
" print(str(idx * 2 + 1) * request * 2, file=sys.stderr)\n"
" idx += 1\n"
),
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE,
) as proc:
newline = b"\n" if posix else b"\r\n"
async def expect(idx, request):
async with _core.open_nursery() as nursery:
async def drain_one(stream, count, digit):
while count > 0:
result = await stream.receive_some(count)
assert result == (
"{}".format(digit).encode("utf-8") * len(result)
)
count -= len(result)
assert count == 0
assert await stream.receive_some(len(newline)) == newline
nursery.start_soon(drain_one, proc.stdout, request, idx * 2)
nursery.start_soon(
drain_one, proc.stderr, request * 2, idx * 2 + 1
)
with fail_after(5):
await proc.stdin.send_all(b"12")
await sleep(0.1)
await proc.stdin.send_all(b"345" + newline)
await expect(0, 12345)
await proc.stdin.send_all(b"100" + newline + b"200" + newline)
await expect(1, 100)
await expect(2, 200)
await proc.stdin.send_all(b"0" + newline)
await expect(3, 0)
await proc.stdin.send_all(b"999999")
with move_on_after(0.1) as scope:
await expect(4, 0)
assert scope.cancelled_caught
await proc.stdin.send_all(newline)
await expect(4, 999999)
await proc.stdin.aclose()
assert await proc.stdout.receive_some(1) == b""
assert await proc.stderr.receive_some(1) == b""
assert proc.returncode == 0
async def test_stderr_stdout():
async with Process(
COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR,
stdin=subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
) as proc:
assert proc.stdout is not None
assert proc.stderr is None
await proc.stdio.send_all(b"1234")
await proc.stdio.send_eof()
output = []
while True:
chunk = await proc.stdio.receive_some(16)
if chunk == b"":
break
output.append(chunk)
assert b"".join(output) == b"12344321"
assert proc.returncode == 0
# this one hits the branch where stderr=STDOUT but stdout
# is not redirected
async with Process(
CAT, stdin=subprocess.PIPE, stderr=subprocess.STDOUT
) as proc:
assert proc.stdout is None
assert proc.stderr is None
await proc.stdin.aclose()
assert proc.returncode == 0
if posix:
try:
r, w = os.pipe()
async with Process(
COPY_STDIN_TO_STDOUT_AND_BACKWARD_TO_STDERR,
stdin=subprocess.PIPE,
stdout=w,
stderr=subprocess.STDOUT,
) as proc:
os.close(w)
assert proc.stdio is None
assert proc.stdout is None
assert proc.stderr is None
await proc.stdin.send_all(b"1234")
await proc.stdin.aclose()
assert await proc.wait() == 0
assert os.read(r, 4096) == b"12344321"
assert os.read(r, 4096) == b""
finally:
os.close(r)
async def test_errors():
with pytest.raises(TypeError) as excinfo:
Process(["ls"], encoding="utf-8")
assert "unbuffered byte streams" in str(excinfo.value)
assert "the 'encoding' option is not supported" in str(excinfo.value)
if posix:
with pytest.raises(TypeError) as excinfo:
Process(["ls"], shell=True)
with pytest.raises(TypeError) as excinfo:
Process("ls", shell=False)
async def test_signals():
async def test_one_signal(send_it, signum):
with move_on_after(1.0) as scope:
async with Process(SLEEP(3600)) as proc:
send_it(proc)
assert not scope.cancelled_caught
if posix:
assert proc.returncode == -signum
else:
assert proc.returncode != 0
await test_one_signal(Process.kill, SIGKILL)
await test_one_signal(Process.terminate, SIGTERM)
if posix:
await test_one_signal(lambda proc: proc.send_signal(SIGINT), SIGINT)
@pytest.mark.skipif(not posix, reason="POSIX specific")
async def test_wait_reapable_fails():
old_sigchld = signal.signal(signal.SIGCHLD, signal.SIG_IGN)
try:
# With SIGCHLD disabled, the wait() syscall will wait for the
# process to exit but then fail with ECHILD. Make sure we
# support this case as the stdlib subprocess module does.
async with Process(SLEEP(3600)) as proc:
async with _core.open_nursery() as nursery:
nursery.start_soon(proc.wait)
await wait_all_tasks_blocked()
proc.kill()
nursery.cancel_scope.deadline = _core.current_time() + 1.0
assert not nursery.cancel_scope.cancelled_caught
assert proc.returncode == 0 # exit status unknowable, so...
finally:
signal.signal(signal.SIGCHLD, old_sigchld)
@slow
def test_waitid_eintr():
# This only matters on PyPy (where we're coding EINTR handling
# ourselves) but the test works on all waitid platforms.
from .._subprocess_platform import wait_child_exiting
if not wait_child_exiting.__module__.endswith("waitid"):
pytest.skip("waitid only")
from .._subprocess_platform.waitid import sync_wait_reapable
got_alarm = False
sleeper = subprocess.Popen(["sleep", "3600"])
def on_alarm(sig, frame):
nonlocal got_alarm
got_alarm = True
sleeper.kill()
old_sigalrm = signal.signal(signal.SIGALRM, on_alarm)
try:
signal.alarm(1)
sync_wait_reapable(sleeper.pid)
assert sleeper.wait(timeout=1) == -9
finally:
if sleeper.returncode is None: # pragma: no cover
# We only get here if something fails in the above;
# if the test passes, wait() will reap the process
sleeper.kill()
sleeper.wait()
signal.signal(signal.SIGALRM, old_sigalrm)
| 33.251572 | 77 | 0.595612 |
ead25d247bb379f68596dcd52a6b3cc91a124456 | 22,113 | py | Python | Misc_and_Old/toil_merkel_report.py | dgaston/ddbio-variantstore | e1f33f18ff8fc2f51d1f215212e4a368eb7505e7 | [
"MIT"
] | 1 | 2021-06-28T01:27:28.000Z | 2021-06-28T01:27:28.000Z | Misc_and_Old/toil_merkel_report.py | dgaston/ddbio-variantstore | e1f33f18ff8fc2f51d1f215212e4a368eb7505e7 | [
"MIT"
] | null | null | null | Misc_and_Old/toil_merkel_report.py | dgaston/ddbio-variantstore | e1f33f18ff8fc2f51d1f215212e4a368eb7505e7 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import sys
import xlwt
import utils
import getpass
import argparse
import numpy as np
from toil.job import Job
from ddb import configuration
from ddb_ngsflow import pipeline
from variantstore import Variant
from collections import defaultdict
from variantstore import SampleVariant
from coveragestore import SampleCoverage
from cassandra.cqlengine import connection
from cassandra.auth import PlainTextAuthProvider
def process_sample(job, config, sample, samples, addresses, authenticator, thresholds, callers):
job.fileStore.logToMaster("Retrieving data for sample {}\n".format(sample))
job.fileStore.logToMaster("Retrieving coverage data from database\n")
connection.setup(addresses, "coveragestore", auth_provider=authenticator)
report_data = dict()
filtered_variant_data = defaultdict(list)
off_target_amplicon_counts = defaultdict(int)
target_amplicon_coverage = dict()
ordered_amplicon_coverage = list()
iterated = 0
passing_variants = 0
filtered_low_freq = 0
filtered_low_depth = 0
filtered_off_target = 0
for library in samples[sample]:
counted = list()
category = samples[sample][library]['category']
report_panel_path = "/mnt/shared-data/ddb-configs/disease_panels/{}/{}" \
"".format(samples[sample][library]['panel'], samples[sample][library]['report'])
job.fileStore.logToMaster("{}: processing amplicons from file {}".format(library, report_panel_path))
target_amplicons = utils.get_target_amplicons(report_panel_path)
for amplicon in target_amplicons:
coverage_data = SampleCoverage.objects.timeout(None).filter(
SampleCoverage.sample == samples[sample][library]['sample_name'],
SampleCoverage.amplicon == amplicon,
SampleCoverage.run_id == samples[sample][library]['run_id'],
SampleCoverage.library_name == samples[sample][library]['library_name'],
SampleCoverage.program_name == "sambamba"
)
ordered_amplicons = coverage_data.order_by('amplicon', 'run_id').limit(coverage_data.count() + 1000)
for result in ordered_amplicons:
target_amplicon_coverage[amplicon] = result
ordered_amplicon_coverage.append(result)
job.fileStore.logToMaster("{}: retrieving variants".format(library))
variants = SampleVariant.objects.timeout(None).filter(
SampleVariant.reference_genome == config['genome_version'],
SampleVariant.sample == samples[sample][library]['sample_name'],
SampleVariant.run_id == samples[sample][library]['run_id'],
SampleVariant.library_name == samples[sample][library]['library_name'],
SampleVariant.max_maf_all <= thresholds['max_maf']
).allow_filtering()
num_var = variants.count()
ordered = variants.order_by('library_name', 'chr', 'pos', 'ref', 'alt').limit(variants.count() + 1000)
job.fileStore.logToMaster("{}: retrieved {} variants from database\n".format(library, num_var))
job.fileStore.logToMaster("{}: classifying and filtering variants\n".format(library))
for variant in ordered:
iterated += 1
if len(variant.callers) < 2:
continue
if len(variant.ref) > 2 and len(variant.alt) > 2:
continue
elements = variant.amplicon_data['amplicon'].split('_')
gene = elements[0]
variant_id = "{}:{}-{}_{}_{}_{}_{}".format(variant.chr, variant.pos, variant.end, variant.ref, variant.alt,
variant.codon_change, variant.aa_change)
if variant.amplicon_data['amplicon'] is 'None':
filtered_off_target += 1
off_target_amplicon_counts[variant.amplicon_data['amplicon']] += 1
else:
amplicons = variant.amplicon_data['amplicon'].split(',')
assignable = 0
for amplicon in amplicons:
if amplicon in target_amplicons:
assignable += 1
break
if assignable:
if variant.max_som_aaf > thresholds['min_saf']:
if variant.min_depth > thresholds['depth']:
if variant_id not in counted:
match_variants = Variant.objects.timeout(None).filter(
Variant.reference_genome == config['genome_version'],
Variant.chr == variant.chr,
Variant.pos == variant.pos,
Variant.ref == variant.ref,
Variant.alt == variant.alt
).allow_filtering()
num_matches = match_variants.count()
ordered_var = match_variants.order_by('ref', 'alt', 'sample', 'library_name',
'run_id').limit(num_matches + 1000)
vafs = list()
num_times_callers = defaultdict(int)
for var in ordered_var:
vaf = var.max_som_aaf
vafs.append(vaf)
for caller in var.callers:
num_times_callers[caller] += 1
variant.vaf_median = np.median(vafs)
variant.vaf_std_dev = np.std(vafs)
variant.num_times_called = num_matches
counted.append(variant_id)
caller_counts_elements = list()
for caller in num_times_callers:
caller_counts_elements.append("{}: {}".format(caller, num_times_callers[caller]))
variant.num_times_callers = ",".join(caller_counts_elements)
# Putting in to Tier1 based on COSMIC
if variant.cosmic_ids:
if variant.max_som_aaf < thresholds['min_saf']:
filtered_variant_data['tier1_fail_variants'].append(variant)
filtered_low_freq += 1
elif variant.max_depth < thresholds['depth']:
filtered_variant_data['tier1_fail_variants'].append(variant)
filtered_low_depth += 1
else:
filtered_variant_data['tier1_pass_variants'].append(variant)
passing_variants += 1
continue
# Putting in to Tier1 based on ClinVar not being None or Benign
if variant.clinvar_data['pathogenic'] != 'None':
if variant.clinvar_data['pathogenic'] != 'benign':
if variant.clinvar_data['pathogenic'] != 'likely-benign':
if variant.max_som_aaf < thresholds['min_saf']:
filtered_variant_data['tier1_fail_variants'].append(variant)
filtered_low_freq += 1
elif variant.max_depth < thresholds['depth']:
filtered_variant_data['tier1_fail_variants'].append(variant)
filtered_low_depth += 1
else:
filtered_variant_data['tier1_pass_variants'].append(variant)
passing_variants += 1
continue
if variant.severity == 'MED' or variant.severity == 'HIGH':
if variant.max_som_aaf < thresholds['min_saf']:
filtered_variant_data['tier3_fail_variants'].append(variant)
filtered_low_freq += 1
elif variant.max_depth < thresholds['depth']:
filtered_variant_data['tier3_fail_variants'].append(variant)
filtered_low_depth += 1
else:
filtered_variant_data['tier3_pass_variants'].append(variant)
passing_variants += 1
continue
else:
if variant.max_som_aaf < thresholds['min_saf']:
filtered_variant_data['tier4_fail_variants'].append(variant)
filtered_low_freq += 1
elif variant.max_depth < thresholds['depth']:
filtered_variant_data['tier4_fail_variants'].append(variant)
filtered_low_depth += 1
else:
filtered_variant_data['tier4_pass_variants'].append(variant)
passing_variants += 1
continue
else:
filtered_off_target += 1
off_target_amplicon_counts[variant.amplicon_data['amplicon']] += 1
job.fileStore.logToMaster("{}: iterated through {} variants\n".format(library, iterated))
job.fileStore.logToMaster("{}: filtered {} off-target variants\n".format(library, filtered_off_target))
job.fileStore.logToMaster("{}: filtered {} low-freq variants\n".format(library, filtered_low_freq))
job.fileStore.logToMaster("{}: filtered {} low-depth variants\n".format(library, filtered_low_depth))
job.fileStore.logToMaster("{}: passing {} tier 1 and 2 variants"
"\n".format(library, len(filtered_variant_data['tier1_pass_variants'])))
job.fileStore.logToMaster("{}: passing {} tier3 variants"
"\n".format(library, len(filtered_variant_data['tier3_pass_variants'])))
job.fileStore.logToMaster("{}: passing {} tier 4 variants"
"\n".format(library, len(filtered_variant_data['tier4_pass_variants'])))
report_data['variants'] = filtered_variant_data
report_data['coverage'] = target_amplicon_coverage
report_name = "{}.xlsx".format(sample)
wb = xlwt.Workbook()
error_style = xlwt.easyxf('pattern: pattern solid, fore_colour red;')
warning_style = xlwt.easyxf('pattern: pattern solid, fore_colour light_orange;')
pass_style = xlwt.easyxf('pattern: pattern solid, fore_colour light_green;')
coverage_sheet = wb.add_sheet("Coverage")
tier1_sheet = wb.add_sheet("Tier1 and 2 Pass")
tier3_sheet = wb.add_sheet("Tier3 Pass")
tier4_sheet = wb.add_sheet("Tier4 Pass")
tier1_fail_sheet = wb.add_sheet("Tier1 and 2 Fail")
tier3_fail_sheet = wb.add_sheet("Tier3 Fail")
tier4_fail_sheet = wb.add_sheet("Tier4 Fail")
tier_sheets = (tier1_sheet, tier1_fail_sheet, tier3_sheet, tier3_fail_sheet, tier4_sheet, tier4_fail_sheet)
tier_key = ("tier1_pass_variants", "tier1_fail_variants",
"tier3_pass_variants", "tier3_fail_variants",
"tier4_pass_variants", "tier4_fail_variants")
libraries = list()
report_templates = list()
run_id = ""
for library in samples[sample]:
libraries.append(samples[sample][library]['library_name'])
report_templates.append(samples[sample][library]['report'])
run_id = samples[sample][library]['run_id']
lib_string = " | ".join(libraries)
reports_string = " | ".join(report_templates)
coverage_sheet.write(0, 0, "Sample")
coverage_sheet.write(0, 1, "{}".format(sample))
coverage_sheet.write(1, 0, "Libraries")
coverage_sheet.write(1, 1, "{}".format(lib_string))
coverage_sheet.write(2, 0, "Run ID")
coverage_sheet.write(2, 1, "{}".format(run_id))
coverage_sheet.write(3, 0, "Reporting Templates")
coverage_sheet.write(3, 1, "{}".format(reports_string))
coverage_sheet.write(4, 0, "Minimum Reportable Somatic Allele Frequency")
coverage_sheet.write(4, 1, "{}".format(thresholds['min_saf']))
coverage_sheet.write(5, 0, "Minimum Amplicon Depth")
coverage_sheet.write(5, 1, "{}".format(thresholds['depth']))
coverage_sheet.write(6, 0, "Maximum Population Allele Frequency")
coverage_sheet.write(6, 1, "{}".format(thresholds['max_maf']))
coverage_sheet.write(7, 0, "Sample")
coverage_sheet.write(7, 1, "Library")
coverage_sheet.write(7, 2, "Amplicon")
coverage_sheet.write(7, 3, "Num Reads")
coverage_sheet.write(7, 4, "Coverage")
row_num = 9
for amplicon in ordered_amplicon_coverage:
if amplicon.mean_coverage < 250:
style = error_style
elif amplicon.mean_coverage < 500:
style = warning_style
else:
style = pass_style
coverage_sheet.write(row_num, 0, "{}".format(amplicon.sample), style)
coverage_sheet.write(row_num, 1, "{}".format(amplicon.library_name), style)
coverage_sheet.write(row_num, 2, "{}".format(amplicon.amplicon), style)
coverage_sheet.write(row_num, 3, "{}".format(amplicon.num_reads), style)
coverage_sheet.write(row_num, 4, "{}".format(amplicon.mean_coverage), style)
row_num += 1
####################################################################################################################
sheet_num = 0
for sheet in tier_sheets:
sheet.write(0, 0, "Sample")
sheet.write(0, 1, "Library")
sheet.write(0, 2, "Gene")
sheet.write(0, 3, "Amplicon")
sheet.write(0, 4, "Ref")
sheet.write(0, 5, "Alt")
sheet.write(0, 6, "Codon")
sheet.write(0, 7, "AA")
sheet.write(0, 8, "Max Caller Somatic VAF")
sheet.write(0, 9, "Num Times in Database")
sheet.write(0, 10, "Median VAF")
sheet.write(0, 11, "StdDev VAF")
sheet.write(0, 12, "Callers")
sheet.write(0, 13, "Caller Counts")
sheet.write(0, 14, "COSMIC IDs")
sheet.write(0, 15, "Num COSMIC Samples")
sheet.write(0, 16, "COSMIC AA")
sheet.write(0, 17, "Clinvar Significance")
sheet.write(0, 18, "Clinvar HGVS")
sheet.write(0, 19, "Clinvar Disease")
sheet.write(0, 20, "Coverage")
sheet.write(0, 21, "Num Reads")
sheet.write(0, 22, "Impact")
sheet.write(0, 23, "Severity")
sheet.write(0, 24, "Maximum Population AF")
sheet.write(0, 25, "Min Caller Depth")
sheet.write(0, 26, "Max Caller Depth")
sheet.write(0, 27, "Chrom")
sheet.write(0, 28, "Start")
sheet.write(0, 29, "End")
sheet.write(0, 30, "rsIDs")
col = 31
if 'mutect' in callers:
sheet.write(0, col, "MuTect_AF")
col += 1
if 'vardict' in callers:
sheet.write(0, col, "VarDict_AF")
col += 1
if 'freebayes' in callers:
sheet.write(0, col, "FreeBayes_AF")
col += 1
if 'scalpel' in callers:
sheet.write(0, col, "Scalpel_AF")
col += 1
if 'platypus' in callers:
sheet.write(0, col, "Platypus_AF")
col += 1
if 'pindel' in callers:
sheet.write(0, col, "Pindel_AF")
col += 1
row = 1
for variant in report_data['variants'][tier_key[sheet_num]]:
amplicons = variant.amplicon_data['amplicon'].split(',')
coverage_values = list()
reads_values = list()
for amplicon in amplicons:
coverage_values.append(str(report_data['coverage'][amplicon]['mean_coverage']))
reads_values.append(str(report_data['coverage'][amplicon]['num_reads']))
coverage_string = ",".join(coverage_values)
reads_string = ",".join(reads_values)
sheet.write(row, 0, "{}".format(variant.sample))
sheet.write(row, 1, "{}".format(variant.library_name))
sheet.write(row, 2, "{}".format(variant.gene))
sheet.write(row, 3, "{}".format(variant.amplicon_data['amplicon']))
sheet.write(row, 4, "{}".format(variant.ref))
sheet.write(row, 5, "{}".format(variant.alt))
sheet.write(row, 6, "{}".format(variant.codon_change))
sheet.write(row, 7, "{}".format(variant.aa_change))
sheet.write(row, 8, "{}".format(variant.max_som_aaf))
sheet.write(row, 9, "{}".format(variant.num_times_called))
sheet.write(row, 10, "{}".format(variant.vaf_median))
sheet.write(row, 11, "{}".format(variant.vaf_std_dev))
sheet.write(row, 12, "{}".format(",".join(variant.callers) or None))
sheet.write(row, 13, "{}".format(variant.num_times_callers))
sheet.write(row, 14, "{}".format(",".join(variant.cosmic_ids) or None))
sheet.write(row, 15, "{}".format(variant.cosmic_data['num_samples']))
sheet.write(row, 16, "{}".format(variant.cosmic_data['aa']))
sheet.write(row, 17, "{}".format(variant.clinvar_data['significance']))
sheet.write(row, 18, "{}".format(variant.clinvar_data['hgvs']))
sheet.write(row, 19, "{}".format(variant.clinvar_data['disease']))
sheet.write(row, 20, "{}".format(coverage_string))
sheet.write(row, 21, "{}".format(reads_string))
sheet.write(row, 22, "{}".format(variant.impact))
sheet.write(row, 23, "{}".format(variant.severity))
sheet.write(row, 24, "{}".format(variant.max_maf_all))
sheet.write(row, 25, "{}".format(variant.min_depth))
sheet.write(row, 26, "{}".format(variant.max_depth))
sheet.write(row, 27, "{}".format(variant.chr))
sheet.write(row, 28, "{}".format(variant.pos))
sheet.write(row, 29, "{}".format(variant.end))
sheet.write(row, 30, "{}".format(",".join(variant.rs_ids)))
col = 31
if 'mutect' in callers:
sheet.write(row, col, "{}".format(variant.mutect.get('AAF') or None))
col += 1
if 'vardict' in callers:
sheet.write(row, col, "{}".format(variant.vardict.get('AAF') or None))
col += 1
if 'freebayes' in callers:
sheet.write(row, col, "{}".format(variant.freebayes.get('AAF') or None))
col += 1
if 'scalpel' in callers:
sheet.write(row, col, "{}".format(variant.scalpel.get('AAF') or None))
col += 1
if 'platypus' in callers:
sheet.write(row, col, "{}".format(variant.platypus.get('AAF') or None))
col += 1
if 'pindel' in callers:
sheet.write(row, col, "{}".format(variant.pindel.get('AAF') or None))
col += 1
row += 1
sheet_num += 1
wb.save(report_name)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-s', '--samples_file', help="Input configuration file for samples")
parser.add_argument('-c', '--configuration', help="Configuration file for various settings")
parser.add_argument('-r', '--report', help="Root name for reports (per sample)", default='report')
parser.add_argument('-a', '--address', help="IP Address for Cassandra connection", default='127.0.0.1')
parser.add_argument('-u', '--username', help='Cassandra username for login', default=None)
parser.add_argument('-d', '--min_depth', help='Minimum depth threshold for variant reporting', default=250.0)
parser.add_argument('-t', '--min_somatic_var_freq', help='Minimum reportable somatic variant frequency',
default=0.10)
parser.add_argument('-p', '--max_pop_freq', help='Maximum allowed population allele frequency', default=0.005)
Job.Runner.addToilOptions(parser)
args = parser.parse_args()
args.logLevel = "INFO"
sys.stdout.write("Parsing configuration data\n")
config = configuration.configure_runtime(args.configuration)
sys.stdout.write("Parsing sample data\n")
libraries = configuration.configure_samples(args.samples_file, config)
samples = configuration.merge_library_configs_samples(libraries)
if args.username:
password = getpass.getpass()
auth_provider = PlainTextAuthProvider(username=args.username, password=password)
else:
auth_provider = None
thresholds = {'min_saf': args.min_somatic_var_freq,
'max_maf': args.max_pop_freq,
'depth': args.min_depth}
callers = ("mutect", "platypus", "vardict", "scalpel", "freebayes", "pindel")
sys.stdout.write("Processing samples\n")
root_job = Job.wrapJobFn(pipeline.spawn_batch_jobs, cores=1)
for sample in samples:
sample_job = Job.wrapJobFn(process_sample, config, sample, samples, [args.address], auth_provider,
thresholds, callers, cores=1)
root_job.addChild(sample_job)
# Start workflow execution
Job.Runner.startToil(root_job, args)
| 48.071739 | 120 | 0.557003 |
e29f3116fc74c1bbafeee96c0207030239db58a4 | 6,939 | py | Python | unit_tests/glhe/input_processor/test_plant_loop_component_factory.py | stianchris/GLHE | 80c3eecca81ffd50d5077f87027c9441292452f5 | [
"MIT"
] | 2 | 2018-11-06T08:04:04.000Z | 2020-10-09T14:52:36.000Z | unit_tests/glhe/input_processor/test_plant_loop_component_factory.py | stianchris/GLHE | 80c3eecca81ffd50d5077f87027c9441292452f5 | [
"MIT"
] | 68 | 2018-03-27T01:43:22.000Z | 2019-09-09T12:05:44.000Z | unit_tests/glhe/input_processor/test_plant_loop_component_factory.py | mitchute/GLHE | 80c3eecca81ffd50d5077f87027c9441292452f5 | [
"MIT"
] | 4 | 2018-05-24T03:02:44.000Z | 2021-08-16T13:54:09.000Z | import os
import tempfile
import unittest
from glhe.input_processor.input_processor import InputProcessor
from glhe.input_processor.plant_loop_component_factory import make_plant_loop_component
from glhe.output_processor.output_processor import OutputProcessor
from glhe.profiles.constant_flow import ConstantFlow
from glhe.profiles.constant_load import ConstantLoad
from glhe.profiles.constant_temp import ConstantTemp
from glhe.topology.ground_heat_exchanger import GroundHeatExchanger
from glhe.topology.pipe import Pipe
from glhe.utilities.functions import write_json
join = os.path.join
norm = os.path.normpath
cwd = os.getcwd()
class TestPLCompFactory(unittest.TestCase):
@staticmethod
def add_instance():
f_path = os.path.dirname(os.path.abspath(__file__))
d = {
"borehole-definitions": [
{
"borehole-type": "single-grouted",
"length": 76.2,
"diameter": 0.114,
"grout-def-name": "standard grout",
"name": "borehole type 1",
"pipe-def-name": "26 mm SDR-11 HDPE",
"segments": 10,
"shank-spacing": 0.0469
}
],
"borehole": [
{
"name": "bh 1",
"borehole-def-name": "borehole type 1",
"location": {
"x": 0,
"y": 0,
"z": 0
}
}
],
"flow-profile": [
{
"name": "constant 0.3",
"flow-profile-type": "constant",
"value": 0.3
}
],
"fluid": {
"fluid-type": "water"
},
"ground-temperature-model": {
"ground-temperature-model-type": "constant",
"temperature": 16.1
},
"grout-definitions": [
{
"name": "standard grout",
"conductivity": 0.85,
"density": 2500,
"specific-heat": 1560
}
],
"load-profile": [
{
"name": "constant 4000",
"load-profile-type": "constant",
"value": 4000
}
],
"ground-heat-exchanger": [
{
"name": "GHE 1",
"simulation-mode": "enhanced",
"g-function-path": norm(join(f_path, '..', '..', '..', 'validation', 'MFRTRT_EWT_g_functions',
'EWT_experimental_g_functions.csv')),
"g_b-function-path": norm(join(f_path, '..', '..', '..', 'validation', 'MFRTRT_EWT_g_functions',
'EWT_experimental_g_functions.csv')),
"flow-paths": [
{
"name": "path 1",
"components": [
{
"comp-type": "borehole",
"name": "bh 1"
}
]
}
],
"load-aggregation": {
"method": "dynamic",
"expansion-rate": 1.5,
"number-bins-per-level": 9
}
}
],
"pipe-definitions": [
{
"name": "26 mm SDR-11 HDPE",
"outer-diameter": 0.0267,
"inner-diameter": 0.0218,
"conductivity": 0.39,
"density": 950,
"specific-heat": 1900
}
],
'pipe': [
{'pipe-def-name': '26 mm SDR-11 HDPE',
'name': 'pipe 1',
'length': 100}],
"simulation": {
"name": "Basic GLHE",
"initial-temperature": 16.1,
"time-steps-per-hour": 6,
"runtime": 14400
},
"topology": {
"demand-side": [
{
"comp-type": "flow-profile",
"name": "constant 0.3"
},
{
"comp-type": "load-profile",
"name": "constant 4000"
}
],
"supply-side": [
{
"comp-type": "ground-heat-exchanger",
"name": "GHE 1"
}
]
},
'temperature-profile': [
{'temperature-profile-type': 'constant',
'name': 'constant 20',
'value': 20}],
"soil": {
"name": "dirt",
"conductivity": 2.7,
"density": 2500,
"specific-heat": 880
}
}
temp_dir = tempfile.mkdtemp()
temp_file = join(temp_dir, 'in.json')
d['simulation']['output-path'] = temp_dir
write_json(temp_file, d)
ip = InputProcessor(temp_file)
op = OutputProcessor(temp_dir, 'out.csv')
return ip, op
def test_add_flow_profile(self):
ip, op = self.add_instance()
tst = make_plant_loop_component({'comp-type': 'flow-profile',
'name': 'constant 0.3'}, ip, op)
self.assertIsInstance(tst, ConstantFlow)
def test_add_load_profile(self):
ip, op = self.add_instance()
tst = make_plant_loop_component({'comp-type': 'load-profile',
'name': 'constant 4000'}, ip, op)
self.assertIsInstance(tst, ConstantLoad)
def test_add_ground_heat_exchanger(self):
ip, op = self.add_instance()
tst = make_plant_loop_component({'comp-type': 'ground-heat-exchanger',
'name': 'ghe 1'}, ip, op)
self.assertIsInstance(tst, GroundHeatExchanger)
def test_add_pipe(self):
ip, op = self.add_instance()
tst = make_plant_loop_component({'comp-type': 'pipe',
'name': 'pipe 1'}, ip, op)
self.assertIsInstance(tst, Pipe)
def test_add_temperature_profile(self):
ip, op = self.add_instance()
tst = make_plant_loop_component({'comp-type': 'temperature-profile',
'name': 'constant 20'}, ip, op)
self.assertIsInstance(tst, ConstantTemp)
| 36.329843 | 116 | 0.416342 |
52f75e2cf1a3bf61341c38f0c8fd09c086e065ba | 8,806 | py | Python | Athos/Networks/DenseNet/densenet.py | krantikiran68/EzPC | cacf10f31cddf55e4a06908fcfc64f8d7d0f85bd | [
"MIT"
] | 221 | 2019-05-16T16:42:49.000Z | 2022-03-29T14:05:31.000Z | Athos/Networks/DenseNet/densenet.py | krantikiran68/EzPC | cacf10f31cddf55e4a06908fcfc64f8d7d0f85bd | [
"MIT"
] | 63 | 2019-07-02T11:50:15.000Z | 2022-03-31T08:14:02.000Z | Athos/Networks/DenseNet/densenet.py | krantikiran68/EzPC | cacf10f31cddf55e4a06908fcfc64f8d7d0f85bd | [
"MIT"
] | 67 | 2019-08-30T08:44:47.000Z | 2022-03-23T08:08:33.000Z | # Copyright 2016 pudae. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains the definition of the DenseNet architecture.
As described in https://arxiv.org/abs/1608.06993.
Densely Connected Convolutional Networks
Gao Huang, Zhuang Liu, Kilian Q. Weinberger, Laurens van der Maaten
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
slim = tf.contrib.slim
@slim.add_arg_scope
def _global_avg_pool2d(
inputs, data_format="NHWC", scope=None, outputs_collections=None
):
with tf.variable_scope(scope, "xx", [inputs]) as sc:
axis = [1, 2] if data_format == "NHWC" else [2, 3]
# net = tf.reduce_mean(inputs, axis=axis, keep_dims=True)
net = tf.nn.avg_pool(
inputs, ksize=[1, 7, 7, 1], strides=[1, 1, 1, 1], padding="VALID"
)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net
@slim.add_arg_scope
def _conv(
inputs,
num_filters,
kernel_size,
stride=1,
dropout_rate=None,
scope=None,
outputs_collections=None,
):
with tf.variable_scope(scope, "xx", [inputs]) as sc:
net = slim.batch_norm(inputs)
net = tf.nn.relu(net)
net = slim.conv2d(net, num_filters, kernel_size)
if dropout_rate:
net = tf.nn.dropout(net)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net
@slim.add_arg_scope
def _conv_block(
inputs, num_filters, data_format="NHWC", scope=None, outputs_collections=None
):
with tf.variable_scope(scope, "conv_blockx", [inputs]) as sc:
net = inputs
net = _conv(net, num_filters * 4, 1, scope="x1")
net = _conv(net, num_filters, 3, scope="x2")
if data_format == "NHWC":
net = tf.concat([inputs, net], axis=3)
else: # "NCHW"
net = tf.concat([inputs, net], axis=1)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net
@slim.add_arg_scope
def _dense_block(
inputs,
num_layers,
num_filters,
growth_rate,
grow_num_filters=True,
scope=None,
outputs_collections=None,
):
with tf.variable_scope(scope, "dense_blockx", [inputs]) as sc:
net = inputs
for i in range(num_layers):
branch = i + 1
net = _conv_block(net, growth_rate, scope="conv_block" + str(branch))
if grow_num_filters:
num_filters += growth_rate
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net, num_filters
@slim.add_arg_scope
def _transition_block(
inputs, num_filters, compression=1.0, scope=None, outputs_collections=None
):
num_filters = int(num_filters * compression)
with tf.variable_scope(scope, "transition_blockx", [inputs]) as sc:
net = inputs
net = _conv(net, num_filters, 1, scope="blk")
net = slim.avg_pool2d(net, 2)
net = slim.utils.collect_named_outputs(outputs_collections, sc.name, net)
return net, num_filters
def densenet(
inputs,
num_classes=1000,
reduction=None,
growth_rate=None,
num_filters=None,
num_layers=None,
dropout_rate=None,
data_format="NHWC",
is_training=True,
reuse=None,
scope=None,
):
assert reduction is not None
assert growth_rate is not None
assert num_filters is not None
assert num_layers is not None
compression = 1.0 - reduction
num_dense_blocks = len(num_layers)
if data_format == "NCHW":
inputs = tf.transpose(inputs, [0, 3, 1, 2])
with tf.variable_scope(
scope, "densenetxxx", [inputs, num_classes], reuse=reuse
) as sc:
end_points_collection = sc.name + "_end_points"
with slim.arg_scope(
[slim.batch_norm, slim.dropout], is_training=is_training
), slim.arg_scope(
[slim.conv2d, _conv, _conv_block, _dense_block, _transition_block],
outputs_collections=end_points_collection,
), slim.arg_scope(
[_conv], dropout_rate=dropout_rate
):
net = inputs
# initial convolution
net = slim.conv2d(net, num_filters, 7, stride=2, scope="conv1")
net = slim.batch_norm(net)
net = tf.nn.relu(net)
net = slim.max_pool2d(net, 3, stride=2, padding="SAME")
# blocks
for i in range(num_dense_blocks - 1):
# dense blocks
net, num_filters = _dense_block(
net,
num_layers[i],
num_filters,
growth_rate,
scope="dense_block" + str(i + 1),
)
# Add transition_block
net, num_filters = _transition_block(
net,
num_filters,
compression=compression,
scope="transition_block" + str(i + 1),
)
net, num_filters = _dense_block(
net,
num_layers[-1],
num_filters,
growth_rate,
scope="dense_block" + str(num_dense_blocks),
)
# final blocks
with tf.variable_scope("final_block", [inputs]):
net = slim.batch_norm(net)
net = tf.nn.relu(net)
net = _global_avg_pool2d(net, scope="global_avg_pool")
net = slim.conv2d(
net,
num_classes,
1,
biases_initializer=tf.zeros_initializer(),
scope="logits",
)
end_points = slim.utils.convert_collection_to_dict(end_points_collection)
# if num_classes is not None:
# end_points['predictions'] = slim.softmax(net, scope='predictions')
return net, end_points
def densenet121(
inputs, num_classes=1000, data_format="NHWC", is_training=True, reuse=None
):
return densenet(
inputs,
num_classes=num_classes,
reduction=0.5,
growth_rate=32,
num_filters=64,
num_layers=[6, 12, 24, 16],
data_format=data_format,
is_training=is_training,
reuse=reuse,
scope="densenet121",
)
densenet121.default_image_size = 224
def densenet161(
inputs, num_classes=1000, data_format="NHWC", is_training=True, reuse=None
):
return densenet(
inputs,
num_classes=num_classes,
reduction=0.5,
growth_rate=48,
num_filters=96,
num_layers=[6, 12, 36, 24],
data_format=data_format,
is_training=is_training,
reuse=reuse,
scope="densenet161",
)
densenet161.default_image_size = 224
def densenet169(
inputs, num_classes=1000, data_format="NHWC", is_training=True, reuse=None
):
return densenet(
inputs,
num_classes=num_classes,
reduction=0.5,
growth_rate=32,
num_filters=64,
num_layers=[6, 12, 32, 32],
data_format=data_format,
is_training=is_training,
reuse=reuse,
scope="densenet169",
)
densenet169.default_image_size = 224
def densenet_arg_scope(
weight_decay=1e-4,
batch_norm_decay=0.99,
batch_norm_epsilon=1.1e-5,
data_format="NHWC",
):
with slim.arg_scope(
[
slim.conv2d,
slim.batch_norm,
slim.avg_pool2d,
slim.max_pool2d,
_conv_block,
_global_avg_pool2d,
],
data_format=data_format,
):
with slim.arg_scope(
[slim.conv2d],
# weights_regularizer=slim.l2_regularizer(weight_decay),
weights_initializer=tf.zeros_initializer(),
activation_fn=None,
biases_initializer=None,
):
with slim.arg_scope(
[slim.batch_norm],
scale=True,
decay=batch_norm_decay,
epsilon=batch_norm_epsilon,
) as scope:
return scope
| 28.224359 | 85 | 0.597661 |
b7503416aa58622714f6dbdfd950d0c66daa24a2 | 25 | py | Python | data/studio21_generated/introductory/3276/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/introductory/3276/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | data/studio21_generated/introductory/3276/starter_code.py | vijaykumawat256/Prompt-Summarization | 614f5911e2acd2933440d909de2b4f86653dc214 | [
"Apache-2.0"
] | null | null | null | def missing(nums, str):
| 12.5 | 23 | 0.68 |
483a83be43f98cdaf564eff6eb1c00ffdd2209cf | 1,408 | py | Python | pygs/graphserver/compiler/tools.py | ninowalker/graphserver | dc08070bc6e295986633cf510ca46a2f8d451b92 | [
"BSD-3-Clause-Clear"
] | 2 | 2016-01-02T22:09:07.000Z | 2016-05-09T04:48:13.000Z | pygs/graphserver/compiler/tools.py | wlach/graphserver | 52dac7487673aa5f28bfe2342dbe93ce03880f7a | [
"BSD-3-Clause-Clear"
] | null | null | null | pygs/graphserver/compiler/tools.py | wlach/graphserver | 52dac7487673aa5f28bfe2342dbe93ce03880f7a | [
"BSD-3-Clause-Clear"
] | null | null | null | from graphserver.core import ServiceCalendar
import pytz
from datetime import timedelta, datetime, time
from graphserver.util import TimeHelpers
def iter_dates(startdate, enddate):
currdate = startdate
while currdate <= enddate:
yield currdate
currdate += timedelta(1)
def service_calendar_from_timezone(gtfsdb, timezone_name):
timezone = pytz.timezone( timezone_name )
# grab date, day service bounds
start_date, end_date = gtfsdb.date_range()
# init empty calendar
cal = ServiceCalendar()
# for each day in service range, inclusive
for currdate in iter_dates(start_date, end_date):
# get and encode in utf-8 the service_ids of all service periods running thos date
service_ids = [x.encode('utf8') for x in gtfsdb.service_periods( currdate )]
# figure datetime.datetime bounds of this service day
currdate_start = datetime.combine(currdate, time(0))
currdate_local_start = timezone.localize(currdate_start)
service_period_begins = timezone.normalize( currdate_local_start )
service_period_ends = timezone.normalize( currdate_local_start + timedelta(hours=24) )
# enter as entry in service calendar
cal.add_period( TimeHelpers.datetime_to_unix(service_period_begins), TimeHelpers.datetime_to_unix(service_period_ends), service_ids )
return cal
| 36.102564 | 141 | 0.723722 |
ca0b0bfc8140b2ee6d07f363b7e0b0d8720ccb35 | 15,717 | py | Python | lane_detector.py | amedveczki/CarND-Advanced-Lane-Lines | f9dab1b82bc4f570a9bb880021175cec78597e4f | [
"MIT"
] | null | null | null | lane_detector.py | amedveczki/CarND-Advanced-Lane-Lines | f9dab1b82bc4f570a9bb880021175cec78597e4f | [
"MIT"
] | null | null | null | lane_detector.py | amedveczki/CarND-Advanced-Lane-Lines | f9dab1b82bc4f570a9bb880021175cec78597e4f | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import numpy as np
import cv2
import matplotlib.pyplot as plt
import collections
class lane_detector:
STATE_INIT = 0 # first start or we are completely lost
STATE_OK = 1 # continue using polynomial+epsilon for searching lane instead of boxes
STATE_UNCERTAIN = 2 # until MAX_UNCERTAIN_FRAMES we hold onto previous curvature
MAX_UNCERTAIN_FRAMES = 2 # TODO test
NUM_SLIDING_WINDOWS = 11
SLIDING_WINDOW_MARGIN = 85
MIN_PIXELS_TO_RECENTER_WINDOW = 50
POLY_WINDOW_MARGIN = 85
MIN_CURVE_RADIUS = 750
MAX_CURVE_CHECK = 2000 # don't check radius difference above this (straight line)
KEEP_LAST_FIT = True # Keep last fit (we wouldn't draw anything if False)
MAX_DRAW_AVERAGE=3 # polynomials are drawn based on this amount of frames
def __init__(self, persp, fancy = None):
self.state = self.STATE_INIT
self.fancy = fancy
self.ploty = None
self.persp = persp
self.uncertain_frames = 0
# Keeps last MAX_DRAW_AVERAGE
self.left_fits = collections.deque(maxlen = self.MAX_DRAW_AVERAGE)
self.right_fits = collections.deque(maxlen = self.MAX_DRAW_AVERAGE)
self.left_fit = self.right_fit = self.left_curverad = self.right_curverad = None
def diff(self,a,b):
return max(a,b)/min(a,b)-1
def sanity_check(self, left_curverad, right_curverad,
left_fit, right_fit):
busted = False
if self.left_curverad is not None and self.left_curverad < self.MAX_CURVE_CHECK and left_curverad > self.MAX_CURVE_CHECK:
if self.diff(left_curverad, self.left_curverad) > 0.3:
busted = True
if not busted and self.right_curverad is not None and self.right_curverad < self.MAX_CURVE_CHECK and right_curverad > self.MAX_CURVE_CHECK:
if self.diff(right_curverad, self.right_curverad) > 0.3:
busted = True
if left_curverad < self.MIN_CURVE_RADIUS or right_curverad < self.MIN_CURVE_RADIUS:
busted = True
if busted:
self.uncertain_frames += 1
if self.uncertain_frames > self.MAX_UNCERTAIN_FRAMES:
return self.STATE_INIT
return self.STATE_UNCERTAIN
self.uncertain_frames = 0
return self.STATE_OK
def calc_radius_and_carpos(self, imgshape, left_fit, left_fitx, right_fitx):
lane_width_meters = 3.7 # meters
lane_width_pixels = 365 # there are 365 pixels between the left and right at the bottom of bird-eye view (old value: 700)
# Define conversions in x and y from pixels space to meters
dashed_line_pixel = 75 # number of pixels a dashed line is
dashed_line_meter = 3.048 # 10 feet is one line : https://news.osu.edu/slow-down----those-lines-on-the-road-are-longer-than-you-think/
ym_per_pix = dashed_line_meter/dashed_line_pixel
xm_per_pix = lane_width_meters/lane_width_pixels
# Just use the originals as it doesn't seems to be working :(
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(self.ploty)
left_lane = left_fit[0]*y_eval**2 + left_fit[1]*y_eval + left_fit[2]
carpos = (imgshape[1]/2 - left_lane)/lane_width_pixels
left_fit_cr = np.polyfit(self.ploty*ym_per_pix, left_fitx*xm_per_pix, 2)
right_fit_cr = np.polyfit(self.ploty*ym_per_pix, right_fitx*xm_per_pix, 2)
left_curverad = (1+(2*left_fit_cr[0]*y_eval*ym_per_pix+left_fit_cr[1])**2)**1.5/abs(2*left_fit_cr[0])
right_curverad = (1+(2*right_fit_cr[0]*y_eval*ym_per_pix+right_fit_cr[1])**2)**1.5/abs(2*right_fit_cr[0])
return left_curverad, right_curverad, carpos, carpos*lane_width_meters
# print("Left: %d meter right: %d meter" % (left_curverad, right_curverad))
def process(self, warped, undist):
if self.ploty is None:
self.ploty = np.linspace(0, warped.shape[0]-1, warped.shape[0])
if self.state == self.STATE_INIT:
leftx, lefty, rightx, righty, out_img = self.find_lane_pixels_histogram(warped)
else:
leftx, lefty, rightx, righty, out_img = self.search_around_poly(self.left_fit, self.right_fit, warped)
left_fit, right_fit, left_fitx, right_fitx = self.fit_poly(leftx, lefty, rightx, righty)
left_curverad, right_curverad, carpos, carpos_meters = self.calc_radius_and_carpos(warped.shape, left_fit, left_fitx, right_fitx)
self.state = self.sanity_check(left_curverad, right_curverad,
left_fit, right_fit)
if self.fancy:
## Visualization ##
# Colors in the left and right lane regions
out_img[lefty, leftx] = [255, 0, 0]
out_img[righty, rightx] = [0, 0, 255]
out_img[np.uint16(self.ploty), np.uint16(left_fitx)] = [255, 255, 0]
out_img[np.uint16(self.ploty), np.uint16(right_fitx)] = [0, 255, 255]
self.fancy.save("lane_detector_%d_%d" % (left_curverad, right_curverad), out_img)
def first_or_second_avg(a,b):
if b is None:
return a
return (a+b)/2
if self.state == self.STATE_OK:
self.left_fit = first_or_second_avg(left_fit, self.left_fit)
self.right_fit = first_or_second_avg(right_fit, self.right_fit)
self.left_fits.append(left_fit)
self.right_fits.append(right_fit)
self.left_curverad = left_curverad
self.right_curverad = right_curverad
else:
if False and len(self.left_fits) and (not self.KEEP_LAST_FIT or len(self.left_fits) > 1):
self.left_fits.pop()
self.right_fits.pop()
if self.state == self.STATE_INIT:
self.left_curverad = self.right_curverad = None
projback = self.project_back(undist, warped)
if self.state == self.STATE_OK:
state_text = "OK"
elif self.state == self.STATE_INIT:
state_text = "Reinit, bad frames %d" % self.uncertain_frames
else:
state_text = "NOK, bad frames %d" % self.uncertain_frames
cv2.putText(projback, "Left radius %4dm right radius %4dm Camera position is %3.1f (%1.1f meter from left lane) state %s" % (left_curverad, right_curverad, carpos, carpos_meters, state_text),
(0, 30), cv2.FONT_HERSHEY_SIMPLEX, .5, (255, 255, 255))
if self.fancy:
self.fancy.save("project_back", projback)
return projback
def search_around_poly(self, left_fit, right_fit, binary_warped):
# Grab activated pixels
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = self.POLY_WINDOW_MARGIN
if self.state == self.STATE_UNCERTAIN:
margin *= 0.8
left_lane_inds = ((nonzerox > (self.left_fit[0]*(nonzeroy**2) + left_fit[1]*nonzeroy +
left_fit[2] - self.POLY_WINDOW_MARGIN)) & (nonzerox < (left_fit[0]*(nonzeroy**2) +
left_fit[1]*nonzeroy + left_fit[2] + self.POLY_WINDOW_MARGIN)))
right_lane_inds = ((nonzerox > (right_fit[0]*(nonzeroy**2) + right_fit[1]*nonzeroy +
right_fit[2] - self.POLY_WINDOW_MARGIN)) & (nonzerox < (right_fit[0]*(nonzeroy**2) +
right_fit[1]*nonzeroy + right_fit[2] + self.POLY_WINDOW_MARGIN)))
# Again, extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
## Visualization ##
# Create an image to draw on and an image to show the selection window
out_img = np.dstack((binary_warped, binary_warped, binary_warped))
window_img = np.zeros_like(out_img)
# Color in left and right line pixels
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [0, 0, 255]
# this.left_fitx might be out of date (as it might be averaged/not used)
left_fitx = left_fit[0]*self.ploty**2 + left_fit[1]*self.ploty + left_fit[2]
right_fitx = right_fit[0]*self.ploty**2 + right_fit[1]*self.ploty + right_fit[2]
# Generate a polygon to illustrate the search window area
# And recast the x and y points into usable format for cv2.fillPoly()
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, self.ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin,
self.ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, self.ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin,
self.ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
# Draw the lane onto the warped blank image
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
return leftx, lefty, rightx, righty, result
def project_back(self, undist, warped):
if len(self.left_fits) == 0:
return undist
# Get the average of the last N polys and create new Xs for them
left_fit = sum(self.left_fits)/len(self.left_fits)
right_fit = sum(self.right_fits)/len(self.right_fits)
left_fitx = left_fit[0]*self.ploty**2 + left_fit[1]*self.ploty + left_fit[2]
right_fitx = right_fit[0]*self.ploty**2 + right_fit[1]*self.ploty + right_fit[2]
# Project back the lines
# Create an image to draw the lines on
warp_zero = np.zeros_like(warped).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
# Recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, self.ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, self.ploty])))])
pts = np.hstack((pts_left, pts_right))
# Draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0,255, 0))
# Warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = self.persp.unwarp(color_warp)
# Combine the result with the original image
result = cv2.addWeighted(undist, 1, newwarp, 0.3, 0)
return result
def find_lane_pixels_histogram(self, binary_warped):
# Take a histogram of the bottom half of the image
histogram = np.sum(binary_warped[binary_warped.shape[0]//2:,:], axis=0)
# Create an output image to draw on and visualize the result
out_img = np.dstack((binary_warped, binary_warped, binary_warped)) if self.fancy else None
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
# points closer to center are more likely to be lanes
weight = np.linspace(.5, 1, midpoint)
leftx_base = np.argmax(histogram[:midpoint] * weight)
rightx_base = np.argmax(histogram[midpoint:] * weight[::-1]) + midpoint
# Set height of windows - based on NUM_SLIDING_WINDOWSs above and image shape
window_height = np.int(binary_warped.shape[0]//self.NUM_SLIDING_WINDOWS)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = binary_warped.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated later for each window in NUM_SLIDING_WINDOWSs
leftx_current = leftx_base
rightx_current = rightx_base
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Step through the windows one by one
for window in range(self.NUM_SLIDING_WINDOWS):
# Identify window boundaries in x and y (and right and left)
win_y_low = binary_warped.shape[0] - (window+1)*window_height
win_y_high = binary_warped.shape[0] - window*window_height
win_xleft_low = leftx_current - self.SLIDING_WINDOW_MARGIN
win_xleft_high = leftx_current + self.SLIDING_WINDOW_MARGIN
win_xright_low = rightx_current - self.SLIDING_WINDOW_MARGIN
win_xright_high = rightx_current + self.SLIDING_WINDOW_MARGIN
if self.fancy:
# Draw the windows on the visualization image
cv2.rectangle(out_img,(win_xleft_low,win_y_low), (win_xleft_high,win_y_high),(0,255,0), 2)
cv2.rectangle(out_img,(win_xright_low,win_y_low), (win_xright_high,win_y_high),(0,255,0), 2)
# Identify the nonzero pixels in x and y within the window #
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xleft_low) & (nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) &
(nonzerox >= win_xright_low) & (nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > MIN_PIXELS_TO_RECENTER_WINDOW pixels, recenter next window on their mean position
if len(good_left_inds) > self.MIN_PIXELS_TO_RECENTER_WINDOW:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > self.MIN_PIXELS_TO_RECENTER_WINDOW:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
return leftx, lefty, rightx, righty, out_img
def fit_poly(self, leftx, lefty, rightx, righty):
left_fit = np.polyfit(lefty, leftx, 2)
right_fit = np.polyfit(righty, rightx, 2)
# Generate x and y values for plotting
self.left_fitx = left_fit[0]*self.ploty**2 + left_fit[1]*self.ploty + left_fit[2]
self.right_fitx = right_fit[0]*self.ploty**2 + right_fit[1]*self.ploty + right_fit[2]
return left_fit, right_fit, self.left_fitx, self.right_fitx
| 47.056886 | 199 | 0.633391 |
00f611e0f25350306775531c30edc76797c4811e | 358 | py | Python | app.py | nunolima/ci-app | e6f1e922909986d08c92f704a43f892ade7d5095 | [
"MIT"
] | null | null | null | app.py | nunolima/ci-app | e6f1e922909986d08c92f704a43f892ade7d5095 | [
"MIT"
] | null | null | null | app.py | nunolima/ci-app | e6f1e922909986d08c92f704a43f892ade7d5095 | [
"MIT"
] | null | null | null | from flask import Flask
app = Flask(__name__)
@app.route("/")
def index():
return "Index!"
@app.route("/hello")
def hello():
return "Hello World! X +++++++++"
@app.route("/members")
def members():
return "Members --X--"
@app.route("/members/<string:name>/")
def getMember(name):
return name
if __name__ == "__main__":
app.run()
| 16.272727 | 37 | 0.600559 |
258c658d43cf2762f5fa33ddc79906ab8c9555e0 | 1,157 | py | Python | CybORG/CybORG/Shared/Actions/ShellActionsFolder/AccountManipulationFolder/RemoveUserFromGroupWindows.py | rafvasq/cage-challenge-1 | 95affdfa38afc1124f1a1a09c92fbc0ed5b96318 | [
"MIT"
] | 18 | 2021-08-20T15:07:55.000Z | 2022-03-11T12:05:15.000Z | CybORG/CybORG/Shared/Actions/ShellActionsFolder/AccountManipulationFolder/RemoveUserFromGroupWindows.py | rafvasq/cage-challenge-1 | 95affdfa38afc1124f1a1a09c92fbc0ed5b96318 | [
"MIT"
] | 7 | 2021-11-09T06:46:58.000Z | 2022-03-31T12:35:06.000Z | CybORG/CybORG/Shared/Actions/ShellActionsFolder/AccountManipulationFolder/RemoveUserFromGroupWindows.py | rafvasq/cage-challenge-1 | 95affdfa38afc1124f1a1a09c92fbc0ed5b96318 | [
"MIT"
] | 13 | 2021-08-17T00:26:31.000Z | 2022-03-29T20:06:45.000Z | # Copyright DST Group. Licensed under the MIT license.
from CybORG.Shared.Actions.ShellActionsFolder.AccountManipulationFolder.AccountManipulation import AccountManipulation
from CybORG.Shared.Enums import OperatingSystemType
from CybORG.Shared.Observation import Observation
class RemoveUserFromGroupWindows(AccountManipulation):
def __init__(self, session, agent, username, group):
super().__init__(session, agent)
self.user = username
self.group = group
def sim_execute(self, state):
obs = Observation()
obs.set_success(False)
if self.session not in state.sessions[self.agent]:
return obs
if state.sessions[self.agent][self.session].active:
host = state.sessions[self.agent][self.session].host
obs.add_system_info(hostid="hostid0", os_type=host.os_type)
if host.os_type == OperatingSystemType.WINDOWS:
host.remove_user_group(user=self.user, group=self.group)
obs.set_success(True)
else:
obs.set_success(False)
else:
obs.set_success(False)
return obs
| 38.566667 | 118 | 0.67675 |
d2d35ed93957a1de1b34e91cc309ce3d9f229d03 | 4,480 | py | Python | perceptron_ch2.py | amitmeel/Machine-Learning-with-PyTorch-and-Scikit-Learn-Book | 2506730cf36f37f3bc1b611e8789bf88610db9d5 | [
"MIT"
] | null | null | null | perceptron_ch2.py | amitmeel/Machine-Learning-with-PyTorch-and-Scikit-Learn-Book | 2506730cf36f37f3bc1b611e8789bf88610db9d5 | [
"MIT"
] | null | null | null | perceptron_ch2.py | amitmeel/Machine-Learning-with-PyTorch-and-Scikit-Learn-Book | 2506730cf36f37f3bc1b611e8789bf88610db9d5 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
# # Implementing a perceptron learning algorithm in Python
# ## An object-oriented perceptron API
class Perceptron:
"""Perceptron classifier.
Parameters
------------
eta : float
Learning rate (between 0.0 and 1.0)
n_iter : int
Passes over the training dataset.
random_state : int
Random number generator seed for random weight
initialization.
Attributes
-----------
w_ : 1d-array
Weights after fitting.
b_ : Scalar
Bias unit after fitting.
errors_ : list
Number of misclassifications (updates) in each epoch.
"""
def __init__(self, eta=0.01, n_iter=50, random_state=1):
self.eta = eta
self.n_iter = n_iter
self.random_state = random_state
def fit(self, X, y):
"""Fit training data.
Parameters
----------
X : {array-like}, shape = [n_examples, n_features]
Training vectors, where n_examples is the number of examples and
n_features is the number of features.
y : array-like, shape = [n_examples]
Target values.
Returns
-------
self : object
"""
rgen = np.random.RandomState(self.random_state)
self.w_ = rgen.normal(loc=0.0, scale=0.01, size=X.shape[1])
self.b_ = np.float_(0.)
self.errors_ = []
for _ in range(self.n_iter):
errors = 0
for xi, target in zip(X, y):
update = self.eta * (target - self.predict(xi))
self.w_ += update * xi
self.b_ += update
errors += int(update != 0.0)
self.errors_.append(errors)
return self
def net_input(self, X):
"""Calculate net input"""
return np.dot(X, self.w_) + self.b_
def predict(self, X):
"""Return class label after unit step"""
return np.where(self.net_input(X) >= 0.0, 1, 0)
def plot_decision_regions(X, y, classifier, resolution=0.02):
# setup marker generator and color map
markers = ('o', 's', '^', 'v', '<')
colors = ('red', 'blue', 'lightgreen', 'gray', 'cyan')
cmap = ListedColormap(colors[:len(np.unique(y))])
# plot the decision surface
x1_min, x1_max = X[:, 0].min() - 1, X[:, 0].max() + 1
x2_min, x2_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx1, xx2 = np.meshgrid(np.arange(x1_min, x1_max, resolution),
np.arange(x2_min, x2_max, resolution))
lab = classifier.predict(np.array([xx1.ravel(), xx2.ravel()]).T)
lab = lab.reshape(xx1.shape)
plt.contourf(xx1, xx2, lab, alpha=0.3, cmap=cmap)
plt.xlim(xx1.min(), xx1.max())
plt.ylim(xx2.min(), xx2.max())
# plot class examples
for idx, cl in enumerate(np.unique(y)):
plt.scatter(x=X[y == cl, 0],
y=X[y == cl, 1],
alpha=0.8,
c=colors[idx],
marker=markers[idx],
label=f'Class {cl}',
edgecolor='black')
if __name__ == '__main__':
# iris_data: https://archive.ics.uci.edu/ml/machine-learning-databases/iris/iris.data
df = pd.read_csv('./datasets/iris.data', header=None)
# select setosa and versicolor (50 Iris-setosa and 50 Iris-versicolor)
y = df.iloc[0:100, 4].values
y = np.where(y == 'Iris-setosa', 0, 1)
# extract sepal length and petal length
X = df.iloc[0:100, [0, 2]].values
# plot data
plt.scatter(X[:50, 0], X[:50, 1], color='red', marker='o', label='setosa')
plt.scatter(X[50:100, 0], X[50:100, 1], color='blue', marker='x', label='versicolor')
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.legend(loc='upper left')
plt.savefig('./images/perceptron_ch2_plot.png')
plt.show()
# Training the Perceptron classifier
ppn = Perceptron(eta=0.1, n_iter=10)
ppn.fit(X, y)
plt.plot(range(1, len(ppn.errors_) + 1), ppn.errors_, marker='o')
plt.xlabel('Epochs')
plt.ylabel('Number of updates')
plt.savefig('./images/perceptron_ch2_plot_2.png')
plt.show()
# plot of the perceptron’s decision regions
plot_decision_regions(X, y, classifier=ppn)
plt.xlabel('sepal length [cm]')
plt.ylabel('petal length [cm]')
plt.savefig('./images/perceptron_ch2_plot_3.png')
plt.show()
| 31.77305 | 89 | 0.578571 |
714631a53b815c5e60448cc560ee6ab26dfc7cb4 | 3,910 | py | Python | fuji_server/evaluators/fair_evaluator_related_resources.py | ignpelloz/fuji | 5e6fe8333c1706d1b628a84108bff7a97fdf11a7 | [
"MIT"
] | 25 | 2020-09-22T08:28:45.000Z | 2022-02-23T07:10:28.000Z | fuji_server/evaluators/fair_evaluator_related_resources.py | ignpelloz/fuji | 5e6fe8333c1706d1b628a84108bff7a97fdf11a7 | [
"MIT"
] | 188 | 2020-05-11T08:54:59.000Z | 2022-03-31T12:28:15.000Z | fuji_server/evaluators/fair_evaluator_related_resources.py | ignpelloz/fuji | 5e6fe8333c1706d1b628a84108bff7a97fdf11a7 | [
"MIT"
] | 20 | 2020-05-04T13:56:26.000Z | 2022-03-02T13:39:04.000Z | # -*- coding: utf-8 -*-
# MIT License
#
# Copyright (c) 2020 PANGAEA (https://www.pangaea.de/)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from fuji_server.evaluators.fair_evaluator import FAIREvaluator
from fuji_server.helper.identifier_helper import IdentifierHelper
from fuji_server.models.related_resource import RelatedResource
from fuji_server.models.related_resource_output import RelatedResourceOutput
class FAIREvaluatorRelatedResources(FAIREvaluator):
"""
A class to evaluate that the metadata links between the data and its related entities (I3-01M).
A child class of FAIREvaluator.
...
Methods
-------
evaluate()
This method will evaluate the links between metadata whether they relate explicitly in metadata and
they relate by machine-readable links/identifier.
"""
def evaluate(self):
self.result = RelatedResource(id=self.metric_number,
metric_identifier=self.metric_identifier,
metric_name=self.metric_name)
self.output = RelatedResourceOutput()
self.logger.info('{0} : Total number of related resources extracted -: {1}'.format(
self.metric_identifier, len(self.fuji.related_resources)))
# if self.metadata_merged.get('related_resources'):
pid_used = False
if self.fuji.related_resources:
#print(self.fuji.related_resources)
# QC check: exclude potential incorrect relation
self.fuji.related_resources = [
item for item in self.fuji.related_resources if item.get('related_resource') != self.fuji.pid_url
]
self.logger.log(
self.fuji.LOG_SUCCESS,
'{0} : Number of related resources after QC step -: {1}'.format(self.metric_identifier,
len(self.fuji.related_resources)))
if self.fuji.related_resources: # TODO include source of relation
for relation in self.fuji.related_resources:
relation_identifier = IdentifierHelper(relation.get('related_resource'))
if relation_identifier.is_persistent or 'url' in relation_identifier.identifier_schemes:
pid_used = True
self.output = self.fuji.related_resources
self.result.test_status = 'pass'
self.setEvaluationCriteriumScore('FsF-I3-01M-1', 1, 'pass')
self.score.earned = self.total_score
self.maturity = 2
if pid_used:
self.setEvaluationCriteriumScore('FsF-I3-01M-2', 1, 'pass')
self.maturity = 3
self.result.metric_tests = self.metric_tests
self.result.maturity = self.maturity
self.result.score = self.score
self.result.output = self.output
| 46.547619 | 114 | 0.675448 |
6c8713102ff400da3ee1106c003e277d6155bc7f | 841 | py | Python | eval_secrets.py | TremoloSecurity/OpenUnisonS2IDockerRHEL | 83d95be6f4f12b24a80d74ae6a09791ba4bd3aa9 | [
"Apache-2.0"
] | null | null | null | eval_secrets.py | TremoloSecurity/OpenUnisonS2IDockerRHEL | 83d95be6f4f12b24a80d74ae6a09791ba4bd3aa9 | [
"Apache-2.0"
] | null | null | null | eval_secrets.py | TremoloSecurity/OpenUnisonS2IDockerRHEL | 83d95be6f4f12b24a80d74ae6a09791ba4bd3aa9 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/python
import sys
env_vars = {}
content = ''
with open(sys.argv[1]) as f:
for line in f:
line = line[0:-1]
name = line[0:line.find('=')]
val = line[line.find('=') + 1:]
env_vars[name] = val
with open(sys.argv[2]) as f:
content = f.read()
#if not specified, we don't need TLS auth
if not 'TOMCAT_TLS_CLIENT_AUTH' in env_vars:
env_vars['TOMCAT_TLS_CLIENT_AUTH'] = 'none'
done = False
start = 0
new_content = ''
while not done:
next_var = content.find('#[',start)
if next_var == -1:
new_content += content[start:]
done = True
else:
end_var = content.find(']',next_var)
var_name = content[next_var+2:end_var]
new_content = new_content + content[start:next_var] + env_vars[var_name]
start = end_var + 1
print new_content
| 21.025 | 80 | 0.604043 |
b474d1c11b625ac36fbe0b0b097ecedbae64173e | 1,146 | py | Python | gr-carrier48/python/__init__.py | xueyuecanfeng/C-LQI | f489c6447428d6affb2159e9d8f895caab2868c7 | [
"BSD-2-Clause"
] | 2 | 2021-11-30T02:35:48.000Z | 2021-11-30T02:53:02.000Z | gr-carrier48/python/__init__.py | xueyuecanfeng/C-LQI | f489c6447428d6affb2159e9d8f895caab2868c7 | [
"BSD-2-Clause"
] | null | null | null | gr-carrier48/python/__init__.py | xueyuecanfeng/C-LQI | f489c6447428d6affb2159e9d8f895caab2868c7 | [
"BSD-2-Clause"
] | null | null | null | #
# Copyright 2008,2009 Free Software Foundation, Inc.
#
# This application is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# This application is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
# The presence of this file turns this directory into a Python package
'''
This is the GNU Radio CARRIER48 module. Place your Python package
description here (python/__init__.py).
'''
# import swig generated symbols into the carrier48 namespace
try:
# this might fail if the module is python-only
from carrier48_swig import *
except ImportError:
pass
# import any pure python here
#
| 32.742857 | 74 | 0.768761 |
0b778b42a1cc2866b6112aa9596e15f7b5bd9f4b | 359 | py | Python | TP_ALGO_2/saisie_liste.py | PierreLeGuen/ALGO_S5 | 9067e887d14fe997c6944292a0cff23ceda47b6e | [
"MIT"
] | null | null | null | TP_ALGO_2/saisie_liste.py | PierreLeGuen/ALGO_S5 | 9067e887d14fe997c6944292a0cff23ceda47b6e | [
"MIT"
] | null | null | null | TP_ALGO_2/saisie_liste.py | PierreLeGuen/ALGO_S5 | 9067e887d14fe997c6944292a0cff23ceda47b6e | [
"MIT"
] | null | null | null | def saisie_liste():
cest_un_nombre=True
liste_nombre=[]
while cest_un_nombre==True:
entree_liste=input("Entrer un nombre : ")
if entree_liste=="":
print("Ce n'est pas un nombre")
cest_un_nombre=False
print(liste_nombre)
else:
liste_nombre.append(entree_liste)
saisie_liste()
| 27.615385 | 49 | 0.604457 |
39abe7ee34a3465d0107535656aebc779accfa41 | 3,347 | py | Python | common/data_refinery_common/models/__init__.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 106 | 2018-03-05T16:24:47.000Z | 2022-03-19T19:12:25.000Z | common/data_refinery_common/models/__init__.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 1,494 | 2018-02-27T17:02:21.000Z | 2022-03-24T15:10:30.000Z | common/data_refinery_common/models/__init__.py | AlexsLemonade/refinebio | 52f44947f902adedaccf270d5f9dbd56ab47e40a | [
"BSD-3-Clause"
] | 15 | 2019-02-03T01:34:59.000Z | 2022-03-29T01:59:13.000Z | from data_refinery_common.models.api_token import APIToken # noqa
from data_refinery_common.models.associations.compendium_result_organism_association import ( # noqa
CompendiumResultOrganismAssociation,
)
from data_refinery_common.models.associations.downloaderjob_originalfile_association import ( # noqa
DownloaderJobOriginalFileAssociation,
)
from data_refinery_common.models.associations.experiment_organism_association import ( # noqa
ExperimentOrganismAssociation,
)
from data_refinery_common.models.associations.experiment_result_association import ( # noqa
ExperimentResultAssociation,
)
from data_refinery_common.models.associations.experiment_sample_association import ( # noqa
ExperimentSampleAssociation,
)
from data_refinery_common.models.associations.original_file_sample_association import ( # noqa
OriginalFileSampleAssociation,
)
from data_refinery_common.models.associations.processorjob_dataset_association import ( # noqa
ProcessorJobDatasetAssociation,
)
from data_refinery_common.models.associations.processorjob_originalfile_association import ( # noqa
ProcessorJobOriginalFileAssociation,
)
from data_refinery_common.models.associations.sample_computed_file_association import ( # noqa
SampleComputedFileAssociation,
)
from data_refinery_common.models.associations.sample_result_association import ( # noqa
SampleResultAssociation,
)
from data_refinery_common.models.attributes import ExperimentAttribute, SampleAttribute # noqa
from data_refinery_common.models.command_progress import ( # noqa
CdfCorrectedAccession,
SurveyedAccession,
)
from data_refinery_common.models.compendium_result import CompendiumResult # noqa
from data_refinery_common.models.computational_result import ComputationalResult # noqa
from data_refinery_common.models.computational_result_annotation import ( # noqa
ComputationalResultAnnotation,
)
from data_refinery_common.models.computed_file import ComputedFile # noqa
from data_refinery_common.models.contributions import Contribution # noqa
from data_refinery_common.models.dataset import Dataset # noqa
from data_refinery_common.models.dataset_annotation import DatasetAnnotation # noqa
from data_refinery_common.models.experiment import Experiment # noqa
from data_refinery_common.models.experiment_annotation import ExperimentAnnotation # noqa
from data_refinery_common.models.jobs.downloader_job import DownloaderJob # noqa
from data_refinery_common.models.jobs.processor_job import ProcessorJob # noqa
from data_refinery_common.models.jobs.survey_job import SurveyJob # noqa
from data_refinery_common.models.jobs.survey_job_key_value import SurveyJobKeyValue # noqa
from data_refinery_common.models.keywords import SampleKeyword # noqa
from data_refinery_common.models.ontology_term import OntologyTerm # noqa
from data_refinery_common.models.organism import Organism # noqa
from data_refinery_common.models.organism_index import OrganismIndex # noqa
from data_refinery_common.models.original_file import OriginalFile # noqa
from data_refinery_common.models.pipeline import Pipeline # noqa
from data_refinery_common.models.processor import Processor # noqa
from data_refinery_common.models.sample import Sample # noqa
from data_refinery_common.models.sample_annotation import SampleAnnotation # noqa
| 54.868852 | 101 | 0.856588 |
fa580cf6e6de3f697ee896b551ee8155d1078a87 | 11,762 | py | Python | setup.py | Whu-gaozhao/my_mmdetection3d | 798dcdb7a2358220bb588399f179cf8a98e55cf8 | [
"Apache-2.0"
] | null | null | null | setup.py | Whu-gaozhao/my_mmdetection3d | 798dcdb7a2358220bb588399f179cf8a98e55cf8 | [
"Apache-2.0"
] | null | null | null | setup.py | Whu-gaozhao/my_mmdetection3d | 798dcdb7a2358220bb588399f179cf8a98e55cf8 | [
"Apache-2.0"
] | 1 | 2022-03-03T08:32:06.000Z | 2022-03-03T08:32:06.000Z | from setuptools import find_packages, setup
import os
import platform
import shutil
import sys
import torch
import warnings
from os import path as osp
from torch.utils.cpp_extension import (BuildExtension, CppExtension,
CUDAExtension)
def readme():
with open('README.md', encoding='utf-8') as f:
content = f.read()
return content
version_file = 'mmdet3d/version.py'
def get_version():
with open(version_file, 'r') as f:
exec(compile(f.read(), version_file, 'exec'))
import sys
# return short version for sdist
if 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
return locals()['short_version']
else:
return locals()['__version__']
def make_cuda_ext(name,
module,
sources,
sources_cuda=[],
extra_args=[],
extra_include_path=[]):
define_macros = []
extra_compile_args = {'cxx': [] + extra_args}
if torch.cuda.is_available() or os.getenv('FORCE_CUDA', '0') == '1':
define_macros += [('WITH_CUDA', None)]
extension = CUDAExtension
extra_compile_args['nvcc'] = extra_args + [
'-D__CUDA_NO_HALF_OPERATORS__',
'-D__CUDA_NO_HALF_CONVERSIONS__',
'-D__CUDA_NO_HALF2_OPERATORS__',
]
sources += sources_cuda
else:
print('Compiling {} without CUDA'.format(name))
extension = CppExtension
# raise EnvironmentError('CUDA is required to compile MMDetection!')
return extension(
name='{}.{}'.format(module, name),
sources=[os.path.join(*module.split('.'), p) for p in sources],
include_dirs=extra_include_path,
define_macros=define_macros,
extra_compile_args=extra_compile_args)
def parse_requirements(fname='requirements.txt', with_version=True):
"""Parse the package dependencies listed in a requirements file but strips
specific versioning information.
Args:
fname (str): path to requirements file
with_version (bool, default=False): if True include version specs
Returns:
list[str]: list of requirements items
CommandLine:
python -c "import setup; print(setup.parse_requirements())"
"""
import re
import sys
from os.path import exists
require_fpath = fname
def parse_line(line):
"""Parse information from a line in a requirements text file."""
if line.startswith('-r '):
# Allow specifying requirements in other files
target = line.split(' ')[1]
for info in parse_require_file(target):
yield info
else:
info = {'line': line}
if line.startswith('-e '):
info['package'] = line.split('#egg=')[1]
else:
# Remove versioning from the package
pat = '(' + '|'.join(['>=', '==', '>']) + ')'
parts = re.split(pat, line, maxsplit=1)
parts = [p.strip() for p in parts]
info['package'] = parts[0]
if len(parts) > 1:
op, rest = parts[1:]
if ';' in rest:
# Handle platform specific dependencies
# http://setuptools.readthedocs.io/en/latest/setuptools.html#declaring-platform-specific-dependencies
version, platform_deps = map(str.strip,
rest.split(';'))
info['platform_deps'] = platform_deps
else:
version = rest # NOQA
info['version'] = (op, version)
yield info
def parse_require_file(fpath):
with open(fpath, 'r') as f:
for line in f.readlines():
line = line.strip()
if line and not line.startswith('#'):
for info in parse_line(line):
yield info
def gen_packages_items():
if exists(require_fpath):
for info in parse_require_file(require_fpath):
parts = [info['package']]
if with_version and 'version' in info:
parts.extend(info['version'])
if not sys.version.startswith('3.4'):
# apparently package_deps are broken in 3.4
platform_deps = info.get('platform_deps')
if platform_deps is not None:
parts.append(';' + platform_deps)
item = ''.join(parts)
yield item
packages = list(gen_packages_items())
return packages
def add_mim_extention():
"""Add extra files that are required to support MIM into the package.
These files will be added by creating a symlink to the originals if the
package is installed in `editable` mode (e.g. pip install -e .), or by
copying from the originals otherwise.
"""
# parse installment mode
if 'develop' in sys.argv:
# installed by `pip install -e .`
if platform.system() == 'Windows':
# set `copy` mode here since symlink fails on Windows.
mode = 'copy'
else:
mode = 'symlink'
elif 'sdist' in sys.argv or 'bdist_wheel' in sys.argv:
# installed by `pip install .`
# or create source distribution by `python setup.py sdist`
mode = 'copy'
else:
return
filenames = ['tools', 'configs', 'model-index.yml']
repo_path = osp.dirname(__file__)
mim_path = osp.join(repo_path, 'mmdet3d', '.mim')
os.makedirs(mim_path, exist_ok=True)
for filename in filenames:
if osp.exists(filename):
src_path = osp.join(repo_path, filename)
tar_path = osp.join(mim_path, filename)
if osp.isfile(tar_path) or osp.islink(tar_path):
os.remove(tar_path)
elif osp.isdir(tar_path):
shutil.rmtree(tar_path)
if mode == 'symlink':
src_relpath = osp.relpath(src_path, osp.dirname(tar_path))
os.symlink(src_relpath, tar_path)
elif mode == 'copy':
if osp.isfile(src_path):
shutil.copyfile(src_path, tar_path)
elif osp.isdir(src_path):
shutil.copytree(src_path, tar_path)
else:
warnings.warn(f'Cannot copy file {src_path}.')
else:
raise ValueError(f'Invalid mode {mode}')
if __name__ == '__main__':
add_mim_extention()
setup(
name='mmdet3d',
version=get_version(),
description=("OpenMMLab's next-generation platform"
'for general 3D object detection.'),
long_description=readme(),
long_description_content_type='text/markdown',
author='MMDetection3D Contributors',
author_email='zwwdev@gmail.com',
keywords='computer vision, 3D object detection',
url='https://github.com/open-mmlab/mmdetection3d',
packages=find_packages(),
include_package_data=True,
package_data={'mmdet3d.ops': ['*/*.so']},
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
license='Apache License 2.0',
install_requires=parse_requirements('requirements/runtime.txt'),
extras_require={
'all': parse_requirements('requirements.txt'),
'tests': parse_requirements('requirements/tests.txt'),
'build': parse_requirements('requirements/build.txt'),
'optional': parse_requirements('requirements/optional.txt'),
},
ext_modules=[
# make_cuda_ext(
# name='sparse_conv_ext',
# module='mmdet3d.ops.spconv',
# extra_include_path=[
# # PyTorch 1.5 uses ninjia, which requires absolute path
# # of included files, relative path will cause failure.
# os.path.abspath(
# os.path.join(*'mmdet3d.ops.spconv'.split('.'),
# 'include/'))
# ],
# sources=[
# 'src/all.cc',
# 'src/reordering.cc',
# 'src/reordering_cuda.cu',
# 'src/indice.cc',
# 'src/indice_cuda.cu',
# 'src/maxpool.cc',
# 'src/maxpool_cuda.cu',
# ],
# extra_args=['-w', '-std=c++14']),
make_cuda_ext(
name='iou3d_cuda',
module='mmdet3d.ops.iou3d',
sources=[
'src/iou3d.cpp',
'src/iou3d_kernel.cu',
]),
make_cuda_ext(
name='voxel_layer',
module='mmdet3d.ops.voxel',
sources=[
'src/voxelization.cpp',
'src/scatter_points_cpu.cpp',
'src/scatter_points_cuda.cu',
'src/voxelization_cpu.cpp',
'src/voxelization_cuda.cu',
]),
make_cuda_ext(
name='roiaware_pool3d_ext',
module='mmdet3d.ops.roiaware_pool3d',
sources=[
'src/roiaware_pool3d.cpp',
'src/points_in_boxes_cpu.cpp',
],
sources_cuda=[
'src/roiaware_pool3d_kernel.cu',
'src/points_in_boxes_cuda.cu',
]),
make_cuda_ext(
name='ball_query_ext',
module='mmdet3d.ops.ball_query',
sources=['src/ball_query.cpp'],
sources_cuda=['src/ball_query_cuda.cu']),
make_cuda_ext(
name='knn_ext',
module='mmdet3d.ops.knn',
sources=['src/knn.cpp'],
sources_cuda=['src/knn_cuda.cu']),
make_cuda_ext(
name='assign_score_withk_ext',
module='mmdet3d.ops.paconv',
sources=['src/assign_score_withk.cpp'],
sources_cuda=['src/assign_score_withk_cuda.cu']),
make_cuda_ext(
name='group_points_ext',
module='mmdet3d.ops.group_points',
sources=['src/group_points.cpp'],
sources_cuda=['src/group_points_cuda.cu']),
make_cuda_ext(
name='interpolate_ext',
module='mmdet3d.ops.interpolate',
sources=['src/interpolate.cpp'],
sources_cuda=[
'src/three_interpolate_cuda.cu', 'src/three_nn_cuda.cu'
]),
make_cuda_ext(
name='furthest_point_sample_ext',
module='mmdet3d.ops.furthest_point_sample',
sources=['src/furthest_point_sample.cpp'],
sources_cuda=['src/furthest_point_sample_cuda.cu']),
make_cuda_ext(
name='gather_points_ext',
module='mmdet3d.ops.gather_points',
sources=['src/gather_points.cpp'],
sources_cuda=['src/gather_points_cuda.cu'])
],
cmdclass={'build_ext': BuildExtension},
zip_safe=False)
| 37.221519 | 125 | 0.529247 |
75f17e2a48d951edb8a8a95cd22d5b0498356f67 | 6,163 | py | Python | board/RFSoC2x2/packages/xrfclk/package/xrfclk/xrfclk.py | mariodruiz/RFSoC2x2-PYNQ | ab0b4f2fb2e5b2edb15a2ce89487883ecc7a73cb | [
"BSD-3-Clause"
] | null | null | null | board/RFSoC2x2/packages/xrfclk/package/xrfclk/xrfclk.py | mariodruiz/RFSoC2x2-PYNQ | ab0b4f2fb2e5b2edb15a2ce89487883ecc7a73cb | [
"BSD-3-Clause"
] | null | null | null | board/RFSoC2x2/packages/xrfclk/package/xrfclk/xrfclk.py | mariodruiz/RFSoC2x2-PYNQ | ab0b4f2fb2e5b2edb15a2ce89487883ecc7a73cb | [
"BSD-3-Clause"
] | 4 | 2021-03-02T15:35:28.000Z | 2021-09-04T05:55:40.000Z | # Copyright (c) 2021, Xilinx, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
# OR BUSINESS INTERRUPTION). HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
# WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
# OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF
# ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
import glob
import re
import cffi
from collections import defaultdict
__author__ = "Yun Rock Qu"
__copyright__ = "Copyright 2021, Xilinx"
__email__ = "pynq_support@xilinx.com"
board = os.environ['BOARD']
if board == "ZCU111":
_iic_channel = 12
elif board == "RFSoC2x2":
_iic_channel = 7
else:
raise ValueError("Board {} is not supported.".format(board))
_ffi = cffi.FFI()
_ffi.cdef("int clearInt(int IicNum);"
"int writeLmx2594Regs(int IicNum, unsigned int RegVals[113]);"
"int writeLmk04208Regs(int IicNum, unsigned int RegVals[26]);"
"int writeLmk04832Regs(int IicNum, unsigned int RegVals[125]);")
_lib = _ffi.dlopen(os.path.join(os.path.dirname(__file__), 'libxrfclk.so'))
_lmx2594Config = defaultdict(list)
_lmk04208Config = defaultdict(list)
_lmk04832Config = defaultdict(list)
def _safe_wrapper(name, *args, **kwargs):
"""Wrapper function for FFI function calls.
"""
if not hasattr(_lib, name):
raise RuntimeError("Function {} not in library.".format(name))
if getattr(_lib, name)(*args, **kwargs):
raise RuntimeError("Function {} call failed.".format(name))
def clear_int():
"""Clear the interrupts.
"""
_safe_wrapper("clearInt", _iic_channel)
def write_lmk04208_regs(reg_vals):
"""Write values to the LMK04208 registers.
This is an internal function.
Parameters
----------
reg_vals: list
A list of 26 32-bit register values.
"""
_safe_wrapper("writeLmk04208Regs", _iic_channel, reg_vals)
def write_lmk04832_regs(reg_vals):
"""Write values to the LMK04832 registers.
This is an internal function.
Parameters
----------
reg_vals: list
A list of 125 24-bit register values.
"""
_safe_wrapper("writeLmk04832Regs", _iic_channel, reg_vals)
def write_lmx2594_regs(reg_vals):
"""Write values to the LMX2594 registers.
This is an internal function.
Parameters
----------
reg_vals: list
A list of 113 32-bit register values.
"""
_safe_wrapper("writeLmx2594Regs", _iic_channel, reg_vals)
def set_ref_clks(lmk_freq=122.88, lmx_freq=409.6):
"""Set all RF data converter tile reference clocks to a given frequency.
LMX chips are downstream so make sure LMK chips are enabled first.
Parameters
----------
lmk_freq: float
The frequency for the LMK clock generation chip.
lmx_freq: float
The frequency for the LMX PLL chip.
"""
if board == "ZCU111":
read_tics_output()
set_lmk04208_clks(lmk_freq)
set_lmx2594_clks(lmx_freq)
elif board == "RFSoC2x2":
read_tics_output()
set_lmk04832_clks(lmk_freq)
set_lmx2594_clks(lmx_freq)
def read_tics_output():
"""Read all the TICS register values from all the txt files.
Reading all the configurations from the current directory. We assume the
file has a format `CHIPNAME_frequency.txt`.
"""
dir_path = os.path.dirname(os.path.realpath(__file__))
all_txt = glob.glob(os.path.join(dir_path, '*.txt'))
for s in all_txt:
chip, freq = s.lower().split('/')[-1].strip('.txt').split('_')
config = eval('_{}Config'.format(chip))
with open(s, 'r') as f:
lines = [l.rstrip("\n") for l in f]
for i in lines:
m = re.search('[\t]*(0x[0-9A-F]*)', i)
config[float(freq)] += int(m.group(1), 16),
def set_lmx2594_clks(lmx_freq):
"""Set LMX chip frequency.
Parameters
----------
lmx_freq: float
The frequency for the LMX PLL chip.
"""
if lmx_freq not in _lmx2594Config:
raise RuntimeError("Frequency {} MHz is not valid.".format(lmx_freq))
else:
write_lmx2594_regs(_lmx2594Config[lmx_freq])
def set_lmk04832_clks(lmk_freq):
"""Set LMK chip frequency.
Parameters
----------
lmk_freq: float
The frequency for the LMK clock generation chip.
"""
if lmk_freq not in _lmk04832Config:
raise RuntimeError("Frequency {} MHz is not valid.".format(lmx_freq))
else:
write_lmk04832_regs(_lmk04832Config[lmk_freq])
def set_lmk04208_clks(lmk_freq):
"""Set LMK chip frequency.
Parameters
----------
lmk_freq: float
The frequency for the LMK clock generation chip.
"""
if lmk_freq not in _lmk04208Config:
raise RuntimeError("Frequency {} MHz is not valid.".format(lmx_freq))
else:
write_lmk04208_regs(_lmk04208Config[lmk_freq])
| 29.347619 | 79 | 0.674996 |
3c754e36f92d4c45127fbf0cff6746f6d9671365 | 2,445 | py | Python | torch_glow/tests/functionality/blacklist_test.py | YonginKwon/glow | 7d316d028e1792534416755bf80af422adccdaa9 | [
"Apache-2.0"
] | 2 | 2020-03-23T21:04:00.000Z | 2020-04-02T22:49:49.000Z | torch_glow/tests/functionality/blacklist_test.py | YonginKwon/glow | 7d316d028e1792534416755bf80af422adccdaa9 | [
"Apache-2.0"
] | null | null | null | torch_glow/tests/functionality/blacklist_test.py | YonginKwon/glow | 7d316d028e1792534416755bf80af422adccdaa9 | [
"Apache-2.0"
] | null | null | null | from __future__ import absolute_import, division, print_function, unicode_literals
import torch
from tests.utils import GLOW_NODE_NAME, SUBGRAPH_ATTR
import torch_glow
import unittest
class TestBlackList(unittest.TestCase):
def test_op_blacklist(self):
"""Test Glow fuser op kind blacklisting mechanism."""
def f(a, b):
return (a + b) * (a - b)
torch_glow.enableFusionPass()
torch_glow.setFusionBlacklist(["aten::add"])
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
fused_add = False
fused_sub = False
for node in jit_f_graph.nodes():
if node.kind() == GLOW_NODE_NAME:
glow_subgraph = node.g(SUBGRAPH_ATTR)
for node in glow_subgraph.nodes():
if node.kind() == "aten::add":
fused_add = True
if node.kind() == "aten::sub":
fused_sub = True
assert not fused_add, "Expected aten::add to be blacklisted"
assert fused_sub, "Expected aten::sub to not be blacklisted"
torch_glow.clearFusionBlacklist()
def test_op_index_blacklist(self):
"""Test Glow fuser index blacklisting mechanism."""
def f(a, b):
x1 = a * b
x2 = x1 * b
x3 = x2 * a
x4 = x3 / b
x5 = x4 / a
x6 = x5 / b
x7 = x6 * a
x8 = x7 * b
return x8
torch_glow.enableFusionPass()
torch_glow.setFusionStartIndex(3)
torch_glow.setFusionEndIndex(6)
a = torch.randn(5, 5)
b = torch.randn(5, 5)
jit_f = torch.jit.trace(f, (a, b))
jit_f_graph = jit_f.graph_for(a, b)
torch_glow.clearFusionIndices()
fused_muls = 0
fused_divs = 0
for node in jit_f_graph.nodes():
if node.kind() == GLOW_NODE_NAME:
glow_subgraph = node.g(SUBGRAPH_ATTR)
for node in glow_subgraph.nodes():
if node.kind() == "aten::mul":
fused_muls += 1
if node.kind() == "aten::div":
fused_divs += 1
assert fused_muls == 0, "Expected no aten::muls to be fused"
assert fused_divs == 3, "Expected all 3 aten::divs to be fused"
| 29.457831 | 82 | 0.537423 |
a11267bc240c1ef0c8ec6e0e89bc7cd2ac5574b1 | 1,804 | py | Python | tests/test_precise_guess_loss.py | zarebulic/neural-semigroup-experiment | c554acb17d264ba810009f8b86c35ee9f8c4d1f4 | [
"Apache-2.0"
] | 6 | 2020-04-05T23:24:54.000Z | 2021-11-15T11:17:09.000Z | tests/test_precise_guess_loss.py | zarebulic/neural-semigroup-experiment | c554acb17d264ba810009f8b86c35ee9f8c4d1f4 | [
"Apache-2.0"
] | 23 | 2020-03-15T09:09:54.000Z | 2022-03-29T22:32:23.000Z | tests/test_precise_guess_loss.py | zarebulic/neural-semigroup-experiment | c554acb17d264ba810009f8b86c35ee9f8c4d1f4 | [
"Apache-2.0"
] | null | null | null | """
Copyright 2019-2021 Boris Shminke
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
# pylint: disable-all
from unittest import TestCase
import torch
from neural_semigroups.constants import CURRENT_DEVICE
from neural_semigroups.cyclic_group import CyclicGroup
from neural_semigroups.magma import Magma
from neural_semigroups.precise_guess_loss import PreciseGuessLoss
from neural_semigroups.utils import FOUR_GROUP
class TestPreciseGuessLoss(TestCase):
def test_forward(self):
precise_guess_loss = PreciseGuessLoss()
predicted_cubes = (
torch.stack(
[
Magma(FOUR_GROUP).probabilistic_cube,
CyclicGroup(4).probabilistic_cube,
]
)
.to(CURRENT_DEVICE)
.view(-1, 4, 4, 4)
)
target_cubes = (
torch.stack(
[
Magma(FOUR_GROUP).probabilistic_cube,
Magma(FOUR_GROUP).probabilistic_cube,
]
)
.to(CURRENT_DEVICE)
.view(-1, 4, 4, 4)
)
self.assertEqual(
precise_guess_loss(predicted_cubes, target_cubes), 0.5
)
self.assertEqual(precise_guess_loss(target_cubes, target_cubes), 1.0)
| 32.8 | 77 | 0.648004 |
071a89ccd973a450cb5d4ffce75e766d476d6bd4 | 2,338 | py | Python | actions/custom_attr_assign_or_create.py | lingfish/stackstorm-vsphere | 49199f5ebdc05b70b7504962e104642b0c30ba30 | [
"Apache-2.0"
] | null | null | null | actions/custom_attr_assign_or_create.py | lingfish/stackstorm-vsphere | 49199f5ebdc05b70b7504962e104642b0c30ba30 | [
"Apache-2.0"
] | 2 | 2019-03-25T18:03:02.000Z | 2019-03-26T13:13:59.000Z | actions/custom_attr_assign_or_create.py | lingfish/stackstorm-vsphere | 49199f5ebdc05b70b7504962e104642b0c30ba30 | [
"Apache-2.0"
] | 1 | 2021-03-05T10:12:21.000Z | 2021-03-05T10:12:21.000Z | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from vmwarelib import inventory
from vmwarelib.actions import BaseAction
class CustomAttrAssignOrCreate(BaseAction):
def run(self, custom_attr_name, custom_attr_value, object_id, object_type, vsphere=None):
"""
Create a custom attribute if it doesn't exist and asssign it to a given object
Args:
- custom_attr_name: name of custom attribute to assign
- custom_attr_value: value of custom attribute to assign
- object_id: MOID of an object to assign a custom attribute to
- object_type: vimType of convert object
- vsphere: Pre-configured vsphere connection details (config.yaml)
Returns:
- string: string with success or error message
"""
vimtype = self.get_vim_type(object_type)
self.establish_connection(vsphere)
# consult vSphere for the managed_entity object that is the specified name and type
entity = inventory.get_managed_entity(self.si_content, vimtype, moid=object_id)
cfm = self.si_content.customFieldsManager
# Get the custom attribute object with the given name or 'None' if it isn't found
field = next((field for field in cfm.field if field.name == custom_attr_name), None)
if field is None:
field = cfm.AddCustomFieldDef(name=custom_attr_name)
cfm.SetField(entity=entity, key=field.key, value=custom_attr_value)
return (True, "Attribute: '%s' set on object: '%s' with value: '%s'" %
(custom_attr_name, object_id, custom_attr_value))
| 44.113208 | 93 | 0.718135 |
2c34a0a8eb6bf8e894c181f66ed7607e5acecb2a | 8,305 | py | Python | sympy/parsing/tests/test_latex.py | Knewton/sympy | a2ce003faaa504d3ad7aa57bbc53d5c1b37812bb | [
"BSD-3-Clause"
] | null | null | null | sympy/parsing/tests/test_latex.py | Knewton/sympy | a2ce003faaa504d3ad7aa57bbc53d5c1b37812bb | [
"BSD-3-Clause"
] | null | null | null | sympy/parsing/tests/test_latex.py | Knewton/sympy | a2ce003faaa504d3ad7aa57bbc53d5c1b37812bb | [
"BSD-3-Clause"
] | null | null | null | from sympy.testing.pytest import raises, XFAIL
from sympy.external import import_module
from sympy import (
Symbol, Mul, Add, Eq, Abs, sin, asin, cos, Pow,
csc, csch, sec, Limit, oo, Derivative, Integral, factorial,
sqrt, root, StrictLessThan, LessThan, StrictGreaterThan,
GreaterThan, Sum, Product, E, log, tan, Function, binomial
)
from sympy.abc import x, y, z, a, b, c, t, k, n
antlr4 = import_module("antlr4")
# disable tests if antlr4-python*-runtime is not present
if not antlr4:
disabled = True
theta = Symbol('theta')
f = Function('f')
# shorthand definitions
def _Add(a, b):
return Add(a, b, evaluate=False)
def _Mul(a, b):
return Mul(a, b, evaluate=False)
def _Pow(a, b):
return Pow(a, b, evaluate=False)
def _Abs(a):
return Abs(a, evaluate=False)
def _factorial(a):
return factorial(a, evaluate=False)
def _log(a, b):
return log(a, b, evaluate=False)
def _binomial(n, k):
return binomial(n, k, evaluate=False)
def test_import():
from sympy.parsing.latex._build_latex_antlr import (
build_parser,
check_antlr_version,
dir_latex_antlr
)
# XXX: It would be better to come up with a test for these...
del build_parser, check_antlr_version, dir_latex_antlr
# These LaTeX strings should parse to the corresponding SymPy expression
GOOD_PAIRS = [
("0", 0),
("1", 1),
("-3.14", _Mul(-1, 3.14)),
("(-7.13)(1.5)", _Mul(_Mul(-1, 7.13), 1.5)),
("x", x),
("2x", 2*x),
("x^2", x**2),
("x^{3 + 1}", x**_Add(3, 1)),
("-c", -c),
("a \\cdot b", a * b),
("a / b", a / b),
("a \\div b", a / b),
("a + b", a + b),
("a + b - a", _Add(a+b, -a)),
("a^2 + b^2 = c^2", Eq(a**2 + b**2, c**2)),
("\\sin \\theta", sin(theta)),
("\\sin(\\theta)", sin(theta)),
("\\sin^{-1} a", asin(a)),
("\\sin a \\cos b", _Mul(sin(a), cos(b))),
("\\sin \\cos \\theta", sin(cos(theta))),
("\\sin(\\cos \\theta)", sin(cos(theta))),
("\\csch^2(\\theta)", csch(theta)**2),
("\\frac{a}{b}", a / b),
("\\frac{a + b}{c}", _Mul(a + b, _Pow(c, -1))),
("\\frac{7}{3}", _Mul(7, _Pow(3, -1))),
("(\\csc x)(\\sec y)", csc(x)*sec(y)),
("\\lim_{x \\to 3} a", Limit(a, x, 3)),
("\\lim_{x \\rightarrow 3} a", Limit(a, x, 3)),
("\\lim_{x \\Rightarrow 3} a", Limit(a, x, 3)),
("\\lim_{x \\longrightarrow 3} a", Limit(a, x, 3)),
("\\lim_{x \\Longrightarrow 3} a", Limit(a, x, 3)),
("\\lim_{x \\to 3^{+}} a", Limit(a, x, 3, dir='+')),
("\\lim_{x \\to 3^{-}} a", Limit(a, x, 3, dir='-')),
("\\infty", oo),
("\\lim_{x \\to \\infty} \\frac{1}{x}",
Limit(_Mul(1, _Pow(x, -1)), x, oo)),
("\\frac{d}{dx} x", Derivative(x, x)),
("\\frac{d}{dt} x", Derivative(x, t)),
("f(x)", f(x)),
("f(x, y)", f(x, y)),
("f(x, y, z)", f(x, y, z)),
("\\frac{d f(x)}{dx}", Derivative(f(x), x)),
("\\frac{d\\theta(x)}{dx}", Derivative(Function('theta')(x), x)),
("|x|", _Abs(x)),
("||x||", _Abs(Abs(x))),
("|x||y|", _Abs(x)*_Abs(y)),
("||x||y||", _Abs(_Abs(x)*_Abs(y))),
("\\pi^{|xy|}", Symbol('pi')**_Abs(x*y)),
("\\int x dx", Integral(x, x)),
("\\int x d\\theta", Integral(x, theta)),
("\\int (x^2 - y)dx", Integral(x**2 - y, x)),
("\\int x + a dx", Integral(_Add(x, a), x)),
("\\int da", Integral(1, a)),
("\\int_0^7 dx", Integral(1, (x, 0, 7))),
("\\int_a^b x dx", Integral(x, (x, a, b))),
("\\int^b_a x dx", Integral(x, (x, a, b))),
("\\int_{a}^b x dx", Integral(x, (x, a, b))),
("\\int^{b}_a x dx", Integral(x, (x, a, b))),
("\\int_{a}^{b} x dx", Integral(x, (x, a, b))),
("\\int^{b}_{a} x dx", Integral(x, (x, a, b))),
("\\int_{f(a)}^{f(b)} f(z) dz", Integral(f(z), (z, f(a), f(b)))),
("\\int (x+a)", Integral(_Add(x, a), x)),
("\\int a + b + c dx", Integral(_Add(_Add(a, b), c), x)),
("\\int \\frac{dz}{z}", Integral(Pow(z, -1), z)),
("\\int \\frac{3 dz}{z}", Integral(3*Pow(z, -1), z)),
("\\int \\frac{1}{x} dx", Integral(Pow(x, -1), x)),
("\\int \\frac{1}{a} + \\frac{1}{b} dx",
Integral(_Add(_Pow(a, -1), Pow(b, -1)), x)),
("\\int \\frac{3 \\cdot d\\theta}{\\theta}",
Integral(3*_Pow(theta, -1), theta)),
("\\int \\frac{1}{x} + 1 dx", Integral(_Add(_Pow(x, -1), 1), x)),
("x_0", Symbol('x_{0}')),
("x_{1}", Symbol('x_{1}')),
("x_a", Symbol('x_{a}')),
("x_{b}", Symbol('x_{b}')),
("h_\\theta", Symbol('h_{theta}')),
("h_{\\theta}", Symbol('h_{theta}')),
("h_{\\theta}(x_0, x_1)",
Function('h_{theta}')(Symbol('x_{0}'), Symbol('x_{1}'))),
("x!", _factorial(x)),
("100!", _factorial(100)),
("\\theta!", _factorial(theta)),
("(x + 1)!", _factorial(_Add(x, 1))),
("(x!)!", _factorial(_factorial(x))),
("x!!!", _factorial(_factorial(_factorial(x)))),
("5!7!", _Mul(_factorial(5), _factorial(7))),
("\\sqrt{x}", sqrt(x)),
("\\sqrt{x + b}", sqrt(_Add(x, b))),
("\\sqrt[3]{\\sin x}", root(sin(x), 3)),
("\\sqrt[y]{\\sin x}", root(sin(x), y)),
("\\sqrt[\\theta]{\\sin x}", root(sin(x), theta)),
("x < y", StrictLessThan(x, y)),
("x \\leq y", LessThan(x, y)),
("x > y", StrictGreaterThan(x, y)),
("x \\geq y", GreaterThan(x, y)),
("\\mathit{x}", Symbol('x')),
("\\mathit{test}", Symbol('test')),
("\\mathit{TEST}", Symbol('TEST')),
("\\mathit{HELLO world}", Symbol('HELLO world')),
("\\sum_{k = 1}^{3} c", Sum(c, (k, 1, 3))),
("\\sum_{k = 1}^3 c", Sum(c, (k, 1, 3))),
("\\sum^{3}_{k = 1} c", Sum(c, (k, 1, 3))),
("\\sum^3_{k = 1} c", Sum(c, (k, 1, 3))),
("\\sum_{k = 1}^{10} k^2", Sum(k**2, (k, 1, 10))),
("\\sum_{n = 0}^{\\infty} \\frac{1}{n!}",
Sum(_Pow(_factorial(n), -1), (n, 0, oo))),
("\\prod_{a = b}^{c} x", Product(x, (a, b, c))),
("\\prod_{a = b}^c x", Product(x, (a, b, c))),
("\\prod^{c}_{a = b} x", Product(x, (a, b, c))),
("\\prod^c_{a = b} x", Product(x, (a, b, c))),
("\\ln x", _log(x, E)),
("\\ln xy", _log(x*y, E)),
("\\log x", _log(x, 10)),
("\\log xy", _log(x*y, 10)),
("\\log_{2} x", _log(x, 2)),
("\\log_{a} x", _log(x, a)),
("\\log_{11} x", _log(x, 11)),
("\\log_{a^2} x", _log(x, _Pow(a, 2))),
("[x]", x),
("[a + b]", _Add(a, b)),
("\\frac{d}{dx} [ \\tan x ]", Derivative(tan(x), x)),
("\\binom{n}{k}", _binomial(n, k)),
("\\tbinom{n}{k}", _binomial(n, k)),
("\\dbinom{n}{k}", _binomial(n, k)),
("\\binom{n}{0}", _binomial(n, 0))
]
def test_parseable():
from sympy.parsing.latex import parse_latex
for latex_str, sympy_expr in GOOD_PAIRS:
assert parse_latex(latex_str) == sympy_expr
# At time of migration from latex2sympy, should work but doesn't
FAILING_PAIRS = [
("\\log_2 x", _log(x, 2)),
("\\log_a x", _log(x, a)),
]
def test_failing_parseable():
from sympy.parsing.latex import parse_latex
for latex_str, sympy_expr in FAILING_PAIRS:
with raises(Exception):
assert parse_latex(latex_str) == sympy_expr
# These bad LaTeX strings should raise a LaTeXParsingError when parsed
BAD_STRINGS = [
"(",
")",
"\\frac{d}{dx}",
"(\\frac{d}{dx})"
"\\sqrt{}",
"\\sqrt",
"{",
"}",
"\\mathit{x + y}",
"\\mathit{21}",
"\\frac{2}{}",
"\\frac{}{2}",
"\\int",
"!",
"!0",
"_",
"^",
"|",
"||x|",
"()",
"((((((((((((((((()))))))))))))))))",
"-",
"\\frac{d}{dx} + \\frac{d}{dt}",
"f(x,,y)",
"f(x,y,",
"\\sin^x",
"\\cos^2",
"@",
"#",
"$",
"%",
"&",
"*",
"\\",
"~",
"\\frac{(2 + x}{1 - x)}"
]
def test_not_parseable():
from sympy.parsing.latex import parse_latex, LaTeXParsingError
for latex_str in BAD_STRINGS:
with raises(LaTeXParsingError):
parse_latex(latex_str)
# At time of migration from latex2sympy, should fail but doesn't
FAILING_BAD_STRINGS = [
"\\cos 1 \\cos",
"f(,",
"f()",
"a \\div \\div b",
"a \\cdot \\cdot b",
"a // b",
"a +",
"1.1.1",
"1 +",
"a / b /",
]
@XFAIL
def test_failing_not_parseable():
from sympy.parsing.latex import parse_latex, LaTeXParsingError
for latex_str in FAILING_BAD_STRINGS:
with raises(LaTeXParsingError):
parse_latex(latex_str)
| 30.759259 | 72 | 0.483564 |
d1609cc4bdd5e128b738b2d83a9bb141c7cdf705 | 9,076 | py | Python | learning/Ruijie/handwriting_recognization/main.py | ricardodxu/AutoGrading | 1469fe63a546b00bad9fda3af7a5fca499dce789 | [
"MIT"
] | 6 | 2018-05-26T02:47:42.000Z | 2019-12-04T20:06:50.000Z | learning/Ruijie/handwriting_recognization/main.py | ricardodxu/AutoGrading | 1469fe63a546b00bad9fda3af7a5fca499dce789 | [
"MIT"
] | 25 | 2017-09-19T20:37:55.000Z | 2019-04-26T21:50:46.000Z | learning/Ruijie/handwriting_recognization/main.py | ricardodxu/AutoGrading | 1469fe63a546b00bad9fda3af7a5fca499dce789 | [
"MIT"
] | 9 | 2017-09-16T05:16:00.000Z | 2021-07-17T03:17:47.000Z | # -*- coding: utf-8 -*-
import cv2
import numpy as np
# import matplotlib.pyplot as plt
from PIL import Image
from difflib import SequenceMatcher
from PIL import *
from PIL import ImageEnhance
import time
from pytesseract import image_to_string, image_to_boxes
import os
def getNameArea(imageFolderPath):
"""
@param imagePath: the path to the folder contains the answerSheet
This function trys to access the scaned file and crop the name area. Then save the name area into a new image file.
"""
allfile = os.listdir(imagePath)
allfile.remove('.DS_Store')
print(allfile)
for element in allfile:
image = cv2.imread(imagePath+element)
thePositionOfE = findcoordinateOfName(imagePath+element)
cropNamePart(image, thePositionOfE,element)
# name = "sample/" + element
def findcoordinateOfName(path):
"""
@param path: the path of answersheet
@return: the coordinate of "e" in the name in answersheet
this function will find the word of "name" in answersheet and return the coordinate.
"""
image = cv2.imread(path)
height, width = image.shape[:2]
crop_img = image[ 0:int(height/3), 0:width]
cv2.imwrite("temp.png", crop_img)
image = Image.open("temp.png")
box = image_to_boxes(image).split('\n')
width , height = image.size
coordinate = []
for i in range(len(box)):
flag = False
if (box[i][0] == 'n' and box[i + 1][0] == 'a' and box[i + 2][0] == 'm' and box[i + 3][0] == 'e'):
for j in range(0, 4):
flag = True
coordinate.append(box[i+j])
if(flag):
break
coorE = coordinate[3].split(" ")
return (( int(coorE[1]) , height - int(coorE[4])), ( int(coorE[3]), height - int(coorE[2])))
def cropNamePart(image, thePositionOfE,fileName):
"""
@param image: the answerSheet
@param thePositionOfE: the coordinate of "e" in the answreSheet
@param fileName: file name of saved image
this function try to crop the name area in answerSheet and save the file into local disk.
"""
# calculate bounds of interest
dh = thePositionOfE[1][1] - thePositionOfE[0][1]
upper = thePositionOfE[0][1] - 2 * dh
lower = thePositionOfE[1][1] + int(3.5 * dh)
left = thePositionOfE[1][0]
right = left + 40 * (thePositionOfE[1][0] - thePositionOfE[0][0])
crop_img = image[ upper:lower, left:right]
fileName = "sample/" + fileName
cv2.imwrite(fileName, crop_img)
# image = Image.open("temp.png")
# return image
def findLengthAndHeight(contour):
"""
@param contour: the contour of one shape, maybe one letter, maybe two letters
@return: the length of this contour
"""
# contour = answerBox[0].getContour()
x_axis = []
y_axis = []
for point in contour:
x_axis.append(point[0][0])
y_axis.append(point[0][1])
x_axis.sort()
y_axis.sort()
length = x_axis[-1] - x_axis[0]
height = y_axis[-1] - y_axis[0]
return length
def checkSpecial(cnt, contours,smallestWide, crop_image):
"""
@param cnt: the contour of the letter
@param contours: all of contours in the image
@param smallestWide: a list of one element which store the smallest wide, use list to pass by reference
@param crop_image: the image contains the name
@return: the box of the letter
This function will check the i and j in the handwriting
"""
[x, y, w, h] = cv2.boundingRect(cnt)
if w < smallestWide[0]:
smallestWide[0] = w
for contour in contours:
[x1, y1, w1, h1] = cv2.boundingRect(contour)
'''
if x - 1.3 * w <x1 < x + 2 * w and \
0.1*w < w1 < 2 * w and \
cv2.contourArea(contour) < 0.5 * cv2.contourArea(cnt) and \
h1 > 0.1*h and \
w < 2*smallestWide[0] and \
y1 < y:
cv2.drawContours(crop_image, [contour], -1, (0, 255, 0), 2)
'''
if x - 1.3 * w <x1 < x + 2 * w :
if 0.1*w < w1 < 2 * w :
if cv2.contourArea(contour) < 0.7 * cv2.contourArea(cnt) :
if h1 > 0.1*h:
if w < 3*smallestWide[0] :
if y1 < y:
cv2.drawContours(crop_image, [contour], -1, (0, 255, 0), 2)
return x, y1,abs(x1-x+w1), y-y1+h
return x, y, w, h
if __name__ == '__main__':
# get the sample from original image
imagePath2 = "/Users/gengruijie/Desktop/未命名文件夹/OneDrive/学习" + \
"/cs/课外/Github/AutoGrading/learning/Ruijie/handwriting_recognization/original_file/"
# getNameArea(imagePath2)
imagePath = "/Users/gengruijie/Desktop/未命名文件夹/OneDrive/学习/cs/课外/Github/AutoGrading/learning/Ruijie/handwriting_recognization/sample/"
image = cv2.imread(imagePath+"temp3.png")
res = image
# convert image to grayscale
gray = cv2.cvtColor(res, cv2.COLOR_BGR2GRAY)
# blur the image slightly to remove noise.
# gray = cv2.bilateralFilter(gray, 11, 17, 17)
gray = cv2.GaussianBlur(gray, (5, 5), 0) # is an alternative way to blur the image
# canny edge detection
edged = cv2.Canny(gray, 30, 200)
# two threshold method.
# The first one is normal threshold method
# The second one is use Gaussian method which has better effect.
# ret,thresh1 = cv2.threshold(gray,150,150,cv2.THRESH_BINARY)
thresh=cv2.adaptiveThreshold(gray,255,cv2.ADAPTIVE_THRESH_GAUSSIAN_C,cv2.THRESH_BINARY,11,2)
try:
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except:
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# cnts = sorted(cnts, key=cv2.contourArea, reverse=True)
cnts = sorted(cnts, key=lambda student: student[0][0][0], reverse=False)
# 找到外面的那个框,长度大于2/3的框
i = 0
lengthList = []
temp = res.copy()
for cnt in cnts:
lengthList.append(findLengthAndHeight(cnt))
# if True:
# temp = res.copy()
# cv2.drawContours(temp, [cnt], -1, (0, 255, 0), 2)
# cv2.imwrite(str(i) + ".png", temp)
# i += 1
largestCnt = cnts[lengthList.index(max(lengthList))]
x_axis = []
y_axis = []
for point in largestCnt:
x_axis.append(point[0][0])
y_axis.append(point[0][1])
x_axis.sort()
y_axis.sort()
maxX = x_axis[-1] - 0.03*(x_axis[-1] - x_axis[0])
minX = x_axis[0] + 0.03*(x_axis[-1] - x_axis[0])
maxY = y_axis[-1] - 0.05*(y_axis[-1] - y_axis[0])
minY = y_axis[0] + 0.03*(y_axis[-1] - y_axis[0])
crop_img = temp[ int(minY): int(maxY), int(minX): int(maxX)]
gray = cv2.cvtColor(crop_img, cv2.COLOR_BGR2GRAY)
# blur the image slightly to remove noise.
# gray = cv2.bilateralFilter(gray, 11, 17, 17)
gray = cv2.GaussianBlur(gray, (5, 5), 0) # is an alternative way to blur the image
# canny edge detection
edged = cv2.Canny(gray, 30, 200)
try:
(cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
except:
(_, cnts, _) = cv2.findContours(edged.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# for cnt in contours:
# # print(cv2.contourArea(cnt))
# if cv2.contourArea(cnt) > 8 and cv2.contourArea(cnt) < 3000:
# [x, y, w, h] = cv2.boundingRect(cnt)
# print(cv2.contourArea(cnt))
# print(h)
# if h > 25:
# cv2.rectangle(im, (x, y), (x + w, y + h), (0, 0, 255), 2)
# roi = thresh[y:y + h, x:x + w]
# roismall = cv2.resize(roi, (10, 10))
# cv2.imshow('norm', im)
# key = cv2.waitKey(0)
#
# if key == 27: # (escape to quit)
# sys.exit()
# elif key in keys:
# responses.append(int(chr(key)))
# sample = roismall.reshape((1, 100))
# samples = np.append(samples, sample, 0)
cnts = sorted(cnts, key=lambda cnts: cnts[0][0][0], reverse=False)
cv2.drawContours(crop_img, cnts, -1, (0, 255, 0), 2)
cv2.imshow("this is all contours", crop_img)
smallestWide = [10000]
for cnt in cnts:
[_, _, w, h] = cv2.boundingRect(cnt)
if not(h > (maxY - minY) * 0.2 and w < (maxX-minX)*0.5 ):
continue
# special is i and j
x,y,w,h = checkSpecial(cnt, cnts,smallestWide, crop_img)
cv2.rectangle(crop_img, (x, y), (x + w, y + h), (0, 0, 255), 2)
roi = thresh[y:y + h, x:x + w]
roismall = cv2.resize(roi, (10, 10))
cv2.imshow("norm" , crop_img)
key = cv2.waitKey(0)
if key == 50:
cv2.imwrite("sample/Indivisible"+ ".png", crop_img)
elif key == 49:
cv2.imwrite("sample/Indivisible"+ ".png", crop_img)
# cv2.drawContours(temp, [cnts[14]], -1, (0, 255, 0), 2)
# cv2.imwrite("1111"+ ".png", crop_img)
| 34.249057 | 137 | 0.58539 |
00f2e98613f19cf6896f30a6bc497fc8764a582c | 964 | py | Python | zhihu_user_info_spider/zhihu_user_info_spider/scheduler/go.py | Yourrrrlove/spider_collection | 3b9f3cc8badc01e3f3e3035c14139d4d5fd1382f | [
"MIT"
] | 178 | 2021-10-29T18:42:04.000Z | 2022-03-30T16:44:37.000Z | zhihu_user_info_spider/zhihu_user_info_spider/scheduler/go.py | xzmdc/spider_collection | af2d622a9cf9d859ef6f2a48afea346110995be6 | [
"MIT"
] | 20 | 2021-11-09T15:25:55.000Z | 2022-03-25T05:23:39.000Z | zhihu_user_info_spider/zhihu_user_info_spider/scheduler/go.py | xzmdc/spider_collection | af2d622a9cf9d859ef6f2a48afea346110995be6 | [
"MIT"
] | 40 | 2021-11-02T01:30:01.000Z | 2022-03-19T12:15:24.000Z | import os
import sys
rootPath = os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__))))
sys.path.append(rootPath)
from zhihu_user_info_spider.scheduler.QuestionScheduler import QuestionScheduler
from zhihu_user_info_spider.scheduler.UserScheduler import UserScheduler
question_sche = QuestionScheduler()
user_sche = UserScheduler()
# @click.command("uuid")
def start_get_uuid():
question_sche.daily_get_user_uuid()
# @click.command("hot")
def start_get_hot():
question_sche.update_hot_list()
# @click.command("info")
def start_get_info():
user_sche.get_user_info()
if __name__ == '__main__':
if len(sys.argv) == 1:
print("请输入参数【hot、uuid、info】")
else:
if sys.argv[1] == "hot":
start_get_hot()
elif sys.argv[1] == "uuid":
start_get_uuid()
elif sys.argv[1] == "info":
start_get_info()
else:
print("请正确输入要启动的模式【hot、uuid、info】")
| 23.512195 | 87 | 0.676349 |
9a2c71d8dcdcf3105873c51bcc247465c0a421f4 | 1,251 | py | Python | speech_recognition/cmusphinx-code/sphinxtrain/python/cmusphinx/lattice_error.py | Ohara124c41/TUB-MSc_Thesis | b1a2d5dc9c0c589a39019126cf7a5cc775baa288 | [
"MIT"
] | 1 | 2016-12-05T01:29:52.000Z | 2016-12-05T01:29:52.000Z | speech_recognition/cmusphinx-code/sphinxtrain/python/cmusphinx/lattice_error.py | Ohara124c41/TUB-MSc_Thesis | b1a2d5dc9c0c589a39019126cf7a5cc775baa288 | [
"MIT"
] | null | null | null | speech_recognition/cmusphinx-code/sphinxtrain/python/cmusphinx/lattice_error.py | Ohara124c41/TUB-MSc_Thesis | b1a2d5dc9c0c589a39019126cf7a5cc775baa288 | [
"MIT"
] | 1 | 2020-04-24T17:26:59.000Z | 2020-04-24T17:26:59.000Z | #!/usr/bin/env python
import sys
import os
import lattice
from itertools import izip
ctl, ref, latdir = sys.argv[1:4]
prune = 0
if len(sys.argv) > 4:
prune = float(sys.argv[4])
ctl = open(ctl)
ref = open(ref)
wordcount = 0
errcount = 0
for c,r in izip(ctl, ref):
c = c.strip()
r = r.split()
del r[-1]
if len(r) == 0 or r[0] != '<s>': r.insert(0, '<s>')
if r[-1] != '</s>': r.append('</s>')
nw = len(r) - 2
r = filter(lambda x: not lattice.is_filler(x), r)
l = lattice.Dag()
try:
l.sphinx2dag(os.path.join(latdir, c + ".lat.gz"))
except IOError:
try:
l.sphinx2dag(os.path.join(latdir, c + ".lat"))
except IOError:
l.htk2dag(os.path.join(latdir, c + ".slf"))
if prune:
l.posterior_prune(-prune)
err, bt = l.minimum_error(r)
maxlen = [max([len(y) for y in x]) for x in bt]
print " ".join(["%*s" % (m, x[0]) for m, x in izip(maxlen, bt)])
print " ".join(["%*s" % (m, x[1]) for m, x in izip(maxlen, bt)])
if nw:
print "Error: %.2f%%" % (float(err) / nw * 100)
else:
print "Error: %.2f%%" % 0
print
wordcount += nw
errcount += err
print "TOTAL Error: %.2f%%" % (float(errcount) / wordcount * 100)
| 26.0625 | 68 | 0.533173 |
2a6fdb9bedab1a236e7efba07b48d19bc12d4898 | 147 | py | Python | version.py | sayRequil/backBone | ca0da396f78260feaf41daae0c7aa7a685096c2d | [
"MIT"
] | 1 | 2017-07-11T09:36:26.000Z | 2017-07-11T09:36:26.000Z | version.py | sayRequil/backBone | ca0da396f78260feaf41daae0c7aa7a685096c2d | [
"MIT"
] | null | null | null | version.py | sayRequil/backBone | ca0da396f78260feaf41daae0c7aa7a685096c2d | [
"MIT"
] | null | null | null | def vers():
major = "1"
minor = "0"
release = "0"
pre = "alpha"
version = ''.join([major,".",minor,".",release,":",pre])
return version | 21 | 58 | 0.537415 |
e40e89222a205be019476a38e08c6e6cbfe4c9a8 | 1,186 | py | Python | class_lib/solids/ellipsoid.py | Tomas-Tamantini/py-cgi | 177930da87aaac721e1294211aa291b11e156ee8 | [
"MIT"
] | null | null | null | class_lib/solids/ellipsoid.py | Tomas-Tamantini/py-cgi | 177930da87aaac721e1294211aa291b11e156ee8 | [
"MIT"
] | null | null | null | class_lib/solids/ellipsoid.py | Tomas-Tamantini/py-cgi | 177930da87aaac721e1294211aa291b11e156ee8 | [
"MIT"
] | null | null | null | from basics import Vector
from class_lib.solid_objects import AbstractObject
from class_lib.useful_functions import min_pos_root
class Ellipsoid(AbstractObject):
def __init__(self, coordinate_system, material, width=1, length=1, height=1):
super().__init__(coordinate_system)
self.material = material
self.width = width
self.length = length
self.height = height
def material_at(self, rel_position):
return self.material
def easier_intersection(self, ray_p0, ray_dir):
a = (ray_dir.x / self.width) ** 2 + (ray_dir.y / self.length) ** 2 + (ray_dir.z / self.height) ** 2
b = 2 * ((ray_p0.x * ray_dir.x) / self.width ** 2 +
(ray_p0.y * ray_dir.y) / self.length ** 2 +
(ray_p0.z * ray_dir.z) / self.height ** 2)
c = (ray_p0.x / self.width) ** 2 + (ray_p0.y / self.length) ** 2 + (ray_p0.z / self.height) ** 2 - 0.25
return min_pos_root(a, b, c)
def normal_at(self, rel_position):
return Vector(rel_position.x / self.width ** 2,
rel_position.y / self.length ** 2,
rel_position.z / self.height ** 2).unit
| 39.533333 | 111 | 0.602867 |
6aaaaae573e4ce297592d3a5226a30811e940a6b | 3,627 | py | Python | tests/unit/modules/test_twilio_notify.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_twilio_notify.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | tests/unit/modules/test_twilio_notify.py | l2ol33rt/salt | ff68bbd9f4bda992a3e039822fb32f141e94347c | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
'''
:codeauthor: :email:`Jayesh Kariya <jayeshk@saltstack.com>`
'''
# Import Python Libs
from __future__ import absolute_import
# Import Salt Testing Libs
from tests.support.mixins import LoaderModuleMockMixin
from tests.support.unit import TestCase, skipIf
from tests.support.mock import (
MagicMock,
patch,
NO_MOCK,
NO_MOCK_REASON
)
# Import Salt Libs
import salt.modules.twilio_notify as twilio_notify
class MockTwilioRestException(Exception):
'''
Mock TwilioRestException class
'''
def __init__(self):
self.code = 'error code'
self.msg = 'Exception error'
self.status = 'Not send'
super(MockTwilioRestException, self).__init__(self.msg)
class MockMessages(object):
'''
Mock SMS class
'''
flag = None
def __init__(self):
self.sid = '011'
self.price = '200'
self.price_unit = '1'
self.status = 'Sent'
self.num_segments = '2'
self.body = None
self.date_sent = '01-01-2015'
self.date_created = '01-01-2015'
self.to = None
self.from_ = None
def create(self, body, to, from_):
'''
Mock create method
'''
msg = MockMessages()
if self.flag == 1:
raise MockTwilioRestException()
msg.body = body
msg.to = to
msg.from_ = from_
return msg
class MockSMS(object):
'''
Mock SMS class
'''
def __init__(self):
self.messages = MockMessages()
class MockTwilioRestClient(object):
'''
Mock TwilioRestClient class
'''
def __init__(self):
self.sms = MockSMS()
@skipIf(NO_MOCK, NO_MOCK_REASON)
class TwilioNotifyTestCase(TestCase, LoaderModuleMockMixin):
'''
Test cases for salt.modules.twilio_notify
'''
def setup_loader_modules(self):
return {
twilio_notify: {
'TwilioRestClient': MockTwilioRestClient,
'TwilioRestException': MockTwilioRestException
}
}
# 'send_sms' function tests: 1
def test_send_sms(self):
'''
Test if it send an sms.
'''
mock = MagicMock(return_value=MockTwilioRestClient())
with patch.object(twilio_notify, '_get_twilio', mock):
self.assertDictEqual(twilio_notify.send_sms('twilio-account',
'SALTSTACK',
'+18019999999',
'+18011111111'),
{'message': {'status': 'Sent',
'num_segments': '2',
'price': '200',
'body': 'SALTSTACK', 'sid': '011',
'date_sent': '01-01-2015',
'date_created': '01-01-2015',
'price_unit': '1'}})
MockMessages.flag = 1
self.assertDictEqual(twilio_notify.send_sms('twilio-account',
'SALTSTACK',
'+18019999999',
'+18011111111'),
{'message': {'sid': None}, '_error':
{'msg': 'Exception error',
'status': 'Not send', 'code': 'error code'}})
| 29.729508 | 80 | 0.483595 |
293f7ec8cd1669d346ac6634f935746181cfe295 | 931 | py | Python | setup.py | ArpitFalcon/clean-my-logs | ff51d06a76821d61a6c67154bb8f0315fddc10da | [
"MIT"
] | 1 | 2021-08-06T07:16:59.000Z | 2021-08-06T07:16:59.000Z | setup.py | ArpitFalcon/clean-my-logs | ff51d06a76821d61a6c67154bb8f0315fddc10da | [
"MIT"
] | null | null | null | setup.py | ArpitFalcon/clean-my-logs | ff51d06a76821d61a6c67154bb8f0315fddc10da | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r", encoding='utf-8') as f:
readme = f.read()
with open("VERSION.txt", "r") as f:
version_num = f.read()
setup (
name="cleanlogs",
version = version_num.strip(),
author="Arpit",
author_email="arpitfalcon1@gmail.com",
description = "Clean thousand of lines of log file",
long_description = readme,
long_description_content_type = "text/markdown",
# Homepage
url="https://github.com/arpitfalcon/clean-my-logs",
install_requires=[],
keywords="pypi logmine cleanlogs clean logs cli bash",
include_package_data=True,
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
python_requires=">=3.6",
entry_points = {
'console_scripts': [
'cleanlogs = src.run:run'
],}
) | 27.382353 | 58 | 0.635875 |
80e5637e4f67a116fbfb84f8cb9eb15d1d947621 | 118 | py | Python | src/telfit/__init__.py | Kamuish/Telluric-Fitter | 9a7dc7cd3c84929064a49293d494592326c18731 | [
"MIT"
] | 14 | 2015-02-26T20:56:42.000Z | 2021-09-21T16:09:49.000Z | src/telfit/__init__.py | gully/Telluric-Fitter | a6988feb6340e73b0fca302f17da1c15dd7de81c | [
"MIT"
] | 35 | 2015-02-26T23:00:28.000Z | 2022-03-09T20:55:56.000Z | src/telfit/__init__.py | gully/Telluric-Fitter | a6988feb6340e73b0fca302f17da1c15dd7de81c | [
"MIT"
] | 11 | 2018-05-30T21:03:48.000Z | 2021-12-04T23:27:42.000Z | from TelluricFitter import TelluricFitter
from MakeModel import Modeler
import DataStructures
import FittingUtilities
| 23.6 | 41 | 0.898305 |
57f941737ef5ca6b3c2bf48ac3d88b50cf670b89 | 4,778 | py | Python | src/NodeGenerators/distributeNodesGeneric.py | jmikeowen/Spheral | 3e1082a7aefd6b328bd3ae24ca1a477108cfc3c4 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 22 | 2018-07-31T21:38:22.000Z | 2020-06-29T08:58:33.000Z | src/NodeGenerators/distributeNodesGeneric.py | jmikeowen/Spheral | 3e1082a7aefd6b328bd3ae24ca1a477108cfc3c4 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 41 | 2020-09-28T23:14:27.000Z | 2022-03-28T17:01:33.000Z | src/NodeGenerators/distributeNodesGeneric.py | jmikeowen/Spheral | 3e1082a7aefd6b328bd3ae24ca1a477108cfc3c4 | [
"BSD-Source-Code",
"BSD-3-Clause-LBNL",
"FSFAP"
] | 7 | 2019-12-01T07:00:06.000Z | 2020-09-15T21:12:39.000Z | import Spheral
import mpi
#-------------------------------------------------------------------------------
# Domain decompose using some specified domain partitioner (generic method).
#-------------------------------------------------------------------------------
def distributeNodesGeneric(listOfNodeTuples,
DataBaseType,
globalNodeIDs,
RedistributeNodesType):
# We'll build the NodeLists into a DataBase.
db = DataBaseType()
# Assign nodes to domains by globalNodeID as a first cut.
kernelExtent = 0.0
numNodesPerProcess = [0]
totalNumGlobalNodes = 0
extrafields = {}
for tup in listOfNodeTuples:
# We assume any extra args are list of values per node we want preserved through
# the node generation
assert len(tup) >= 2
nodes, generator, extralists = tup[0], tup[1], tup[2:]
nglobal = generator.globalNumNodes()
nlocal = generator.localNumNodes()
print "distributeNodesGeneric: working on %s, (local, global) number nodes %i %i" % (nodes.name, nlocal, nglobal)
numNodesPerProcess[0] += nlocal
totalNumGlobalNodes += nglobal
nodes.numGhostNodes = 0
nodes.numInternalNodes = nlocal
# Prepare to preserve any extra per point values
extrafields[nodes.name] = []
ScalarField = eval("Spheral.ScalarField%id" % db.nDim)
for iextra, vals in enumerate(extralists):
assert len(vals) == nlocal
extrafields[nodes.name].append(ScalarField("extra%i" % iextra, nodes))
for i in xrange(nlocal):
extrafields[nodes.name][iextra][i] = vals[i]
# Find the maximum kernel extent for all NodeLists.
kernelExtent = max(kernelExtent, nodes.neighbor().kernelExtent)
one = eval("Spheral.SymTensor%id.one" % db.nDim)
hminInv = 1.0/nodes.hmin
hmaxInv = 1.0/nodes.hmax
# We start with the initial crappy distribution used in the generator.
assert mpi.allreduce(nodes.numInternalNodes, mpi.SUM) == nglobal
print " distributeNodesGeneric: performing initial crappy distribution."
r = nodes.positions()
m = nodes.mass()
vel = nodes.velocity()
rho = nodes.massDensity()
H = nodes.Hfield()
for i in xrange(nlocal):
r[i] = generator.localPosition(i)
m[i] = generator.localMass(i)
vel[i] = generator.localVelocity(i)
rho[i] = generator.localMassDensity(i)
H[i] = generator.localHtensor(i)
H.applyScalarMin(hmaxInv)
H.applyScalarMax(hminInv)
# Put this NodeList into the DataBase.
db.appendNodeList(nodes)
print " distributeNodesGeneric: %s initially finished" % nodes.name
# # Update Neighbor information.
# exec("Spheral.Neighbor%id.setBoundingBox()" % db.nDim)
# for (nodes, generator) in listOfNodeTuples:
# nodes.neighbor().updateNodes()
# if (isinstance(nodes, Spheral.FluidNodeList1d) or
# isinstance(nodes, Spheral.FluidNodeList2d) or
# isinstance(nodes, Spheral.FluidNodeList3d)):
# nodes.updateWeight()
# Report the initial breakdown.
numNodesPerProcess = mpi.allreduce(numNodesPerProcess, mpi.SUM)
print "(min, max, avg) nodes per process initially: ", min(numNodesPerProcess), max(numNodesPerProcess), sum(numNodesPerProcess)/len(numNodesPerProcess)
print "Total number of nodes: ", totalNumGlobalNodes
# Now have the Redistributer repartition the nodes into something sensible. Note this
# automatically redistributes the globalNodeListID fields as well.
print "distributeNodesGeneric: calling for redistribution."
if RedistributeNodesType:
repartition = RedistributeNodesType(kernelExtent)
repartition.redistributeNodes(db)
print "distributeNodesGeneric: redistribution done."
# Update the neighboring info.
#exec("Spheral.Neighbor%id.setBoundingBox()" % db.nDim)
for nodes in db.nodeLists():
nodes.neighbor().updateNodes()
# Make sure we finished with the correct numbers of nodes!
totalCheck = mpi.allreduce(sum([nodes.numInternalNodes for nodes in db.nodeLists()]), mpi.SUM)
assert totalCheck == totalNumGlobalNodes
# Stuff any extra field values back in the initial lists.
for tup in listOfNodeTuples:
assert len(tup) >= 2
nodes, generator, extralists = tup[0], tup[1], tup[2:]
if extralists:
assert len(extrafields[nodes.name]) == len(extralists)
for vals, field in zip(extralists, extrafields[nodes.name]):
vals[:] = list(field.internalValues())
return
| 43.436364 | 157 | 0.631436 |
252658c29bff94735fd4fd59ceaa1139cb846d50 | 16,982 | py | Python | pbr-1.0.1-py2.7.egg/pbr/tests/test_setup.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | null | null | null | pbr-1.0.1-py2.7.egg/pbr/tests/test_setup.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | null | null | null | pbr-1.0.1-py2.7.egg/pbr/tests/test_setup.py | umkcdcrg01/ryu_openflow | 37ed5b88f7d119344e07c95314a7450235c037a8 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2011 OpenStack Foundation
# Copyright (c) 2013 Hewlett-Packard Development Company, L.P.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from __future__ import print_function
import os
import sys
import tempfile
try:
import cStringIO as io
BytesIO = io.StringIO
except ImportError:
import io
BytesIO = io.BytesIO
import fixtures
import testscenarios
from pbr import git
from pbr import options
from pbr import packaging
from pbr.tests import base
class SkipFileWrites(base.BaseTestCase):
scenarios = [
('changelog_option_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('changelog_option_false',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value=None,
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('changelog_env_true',
dict(option_key='skip_changelog', option_value='False',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('changelog_both_true',
dict(option_key='skip_changelog', option_value='True',
env_key='SKIP_WRITE_GIT_CHANGELOG', env_value='True',
pkg_func=git.write_git_changelog, filename='ChangeLog')),
('authors_option_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=git.generate_authors, filename='AUTHORS')),
('authors_option_false',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value=None,
pkg_func=git.generate_authors, filename='AUTHORS')),
('authors_env_true',
dict(option_key='skip_authors', option_value='False',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=git.generate_authors, filename='AUTHORS')),
('authors_both_true',
dict(option_key='skip_authors', option_value='True',
env_key='SKIP_GENERATE_AUTHORS', env_value='True',
pkg_func=git.generate_authors, filename='AUTHORS')),
]
def setUp(self):
super(SkipFileWrites, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
if not os.path.exists(self.git_dir):
self.skipTest("%s is missing; skipping git-related checks"
% self.git_dir)
return
self.filename = os.path.join(self.temp_path, self.filename)
self.option_dict = dict()
if self.option_key is not None:
self.option_dict[self.option_key] = ('setup.cfg',
self.option_value)
self.useFixture(
fixtures.EnvironmentVariable(self.env_key, self.env_value))
def test_skip(self):
self.pkg_func(git_dir=self.git_dir,
dest_dir=self.temp_path,
option_dict=self.option_dict)
self.assertEqual(
not os.path.exists(self.filename),
(self.option_value.lower() in options.TRUE_VALUES
or self.env_value is not None))
_changelog_content = """04316fe (review/monty_taylor/27519) Make python
378261a Add an integration test script.
3c373ac (HEAD, tag: 2013.2.rc2, tag: 2013.2, milestone-proposed) Merge "Lib
182feb3 (tag: 0.5.17) Fix pip invocation for old versions of pip.
fa4f46e (tag: 0.5.16) Remove explicit depend on distribute.
d1c53dd Use pip instead of easy_install for installation.
a793ea1 Merge "Skip git-checkout related tests when .git is missing"
6c27ce7 Skip git-checkout related tests when .git is missing
04984a5 Refactor hooks file.
a65e8ee (tag: 0.5.14, tag: 0.5.13) Remove jinja pin.
"""
class GitLogsTest(base.BaseTestCase):
def setUp(self):
super(GitLogsTest, self).setUp()
self.temp_path = self.useFixture(fixtures.TempDir()).path
self.root_dir = os.path.abspath(os.path.curdir)
self.git_dir = os.path.join(self.root_dir, ".git")
self.useFixture(
fixtures.EnvironmentVariable('SKIP_GENERATE_AUTHORS'))
self.useFixture(
fixtures.EnvironmentVariable('SKIP_WRITE_GIT_CHANGELOG'))
def test_write_git_changelog(self):
self.useFixture(fixtures.FakePopen(lambda _: {
"stdout": BytesIO(_changelog_content.encode('utf-8'))
}))
git.write_git_changelog(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "ChangeLog"), "r") as ch_fh:
changelog_contents = ch_fh.read()
self.assertIn("2013.2", changelog_contents)
self.assertIn("0.5.17", changelog_contents)
self.assertIn("------", changelog_contents)
self.assertIn("Refactor hooks file", changelog_contents)
self.assertNotIn("Refactor hooks file.", changelog_contents)
self.assertNotIn("182feb3", changelog_contents)
self.assertNotIn("review/monty_taylor/27519", changelog_contents)
self.assertNotIn("0.5.13", changelog_contents)
self.assertNotIn('Merge "', changelog_contents)
def test_generate_authors(self):
author_old = u"Foo Foo <email@foo.com>"
author_new = u"Bar Bar <email@bar.com>"
co_author = u"Foo Bar <foo@bar.com>"
co_author_by = u"Co-authored-by: " + co_author
git_log_cmd = (
"git --git-dir=%s log --format=%%aN <%%aE>"
% self.git_dir)
git_co_log_cmd = ("git --git-dir=%s log" % self.git_dir)
git_top_level = "git rev-parse --show-toplevel"
cmd_map = {
git_log_cmd: author_new,
git_co_log_cmd: co_author_by,
git_top_level: self.root_dir,
}
exist_files = [self.git_dir,
os.path.join(self.temp_path, "AUTHORS.in")]
self.useFixture(fixtures.MonkeyPatch(
"os.path.exists",
lambda path: os.path.abspath(path) in exist_files))
def _fake_run_shell_command(cmd, **kwargs):
return cmd_map[" ".join(cmd)]
self.useFixture(fixtures.MonkeyPatch(
"pbr.git._run_shell_command",
_fake_run_shell_command))
with open(os.path.join(self.temp_path, "AUTHORS.in"), "w") as auth_fh:
auth_fh.write("%s\n" % author_old)
git.generate_authors(git_dir=self.git_dir,
dest_dir=self.temp_path)
with open(os.path.join(self.temp_path, "AUTHORS"), "r") as auth_fh:
authors = auth_fh.read()
self.assertTrue(author_old in authors)
self.assertTrue(author_new in authors)
self.assertTrue(co_author in authors)
class BuildSphinxTest(base.BaseTestCase):
scenarios = [
('true_autodoc_caps',
dict(has_opt=True, autodoc='True', has_autodoc=True)),
('true_autodoc_caps_with_excludes',
dict(has_opt=True, autodoc='True', has_autodoc=True,
excludes="fake_package.fake_private_module\n"
"fake_package.unknown_module")),
('true_autodoc_lower',
dict(has_opt=True, autodoc='true', has_autodoc=True)),
('false_autodoc',
dict(has_opt=True, autodoc='False', has_autodoc=False)),
('no_autodoc',
dict(has_opt=False, autodoc='False', has_autodoc=False)),
]
def setUp(self):
super(BuildSphinxTest, self).setUp()
self.useFixture(fixtures.MonkeyPatch(
"sphinx.setup_command.BuildDoc.run", lambda self: None))
from distutils import dist
self.distr = dist.Distribution()
self.distr.packages = ("fake_package",)
self.distr.command_options["build_sphinx"] = {
"source_dir": ["a", "."]}
pkg_fixture = fixtures.PythonPackage(
"fake_package", [("fake_module.py", b""),
("fake_private_module.py", b"")])
self.useFixture(pkg_fixture)
self.useFixture(base.DiveDir(pkg_fixture.base))
self.distr.command_options["pbr"] = {}
if hasattr(self, "excludes"):
self.distr.command_options["pbr"]["autodoc_exclude_modules"] = (
'setup.cfg',
"fake_package.fake_private_module\n"
"fake_package.unknown_module")
if self.has_opt:
options = self.distr.command_options["pbr"]
options["autodoc_index_modules"] = ('setup.cfg', self.autodoc)
def test_build_doc(self):
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.run()
self.assertTrue(
os.path.exists("api/autoindex.rst") == self.has_autodoc)
self.assertTrue(
os.path.exists(
"api/fake_package.fake_module.rst") == self.has_autodoc)
if not self.has_autodoc or hasattr(self, "excludes"):
assertion = self.assertFalse
else:
assertion = self.assertTrue
assertion(
os.path.exists(
"api/fake_package.fake_private_module.rst"))
def test_builders_config(self):
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.finalize_options()
self.assertEqual(2, len(build_doc.builders))
self.assertIn('html', build_doc.builders)
self.assertIn('man', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = ''
build_doc.finalize_options()
self.assertEqual('', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = 'man'
build_doc.finalize_options()
self.assertEqual(1, len(build_doc.builders))
self.assertIn('man', build_doc.builders)
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.builders = 'html,man,doctest'
build_doc.finalize_options()
self.assertIn('html', build_doc.builders)
self.assertIn('man', build_doc.builders)
self.assertIn('doctest', build_doc.builders)
def test_cmd_builder_override(self):
if self.has_opt:
self.distr.command_options["pbr"] = {
"autodoc_index_modules": ('setup.cfg', self.autodoc)
}
self.distr.command_options["build_sphinx"]["builder"] = (
"command line", "non-existing-builder")
build_doc = packaging.LocalBuildDoc(self.distr)
self.assertNotIn('non-existing-builder', build_doc.builders)
self.assertIn('html', build_doc.builders)
# process command line options which should override config
build_doc.finalize_options()
self.assertIn('non-existing-builder', build_doc.builders)
self.assertNotIn('html', build_doc.builders)
def test_cmd_builder_override_multiple_builders(self):
if self.has_opt:
self.distr.command_options["pbr"] = {
"autodoc_index_modules": ('setup.cfg', self.autodoc)
}
self.distr.command_options["build_sphinx"]["builder"] = (
"command line", "builder1,builder2")
build_doc = packaging.LocalBuildDoc(self.distr)
build_doc.finalize_options()
self.assertEqual(["builder1", "builder2"], build_doc.builders)
class ParseRequirementsTest(base.BaseTestCase):
def setUp(self):
super(ParseRequirementsTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix='openstack',
suffix='.setup')
def test_parse_requirements_normal(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_git_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-e git://foo.com/zipball#egg=bar")
self.assertEqual(['bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_versioned_git_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-e git://foo.com/zipball#egg=bar-1.2.4")
self.assertEqual(['bar>=1.2.4'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_http_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("https://foo.com/zipball#egg=bar")
self.assertEqual(['bar'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_with_versioned_http_egg_url(self):
with open(self.tmp_file, 'w') as fh:
fh.write("https://foo.com/zipball#egg=bar-4.2.1")
self.assertEqual(['bar>=4.2.1'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_removes_index_lines(self):
with open(self.tmp_file, 'w') as fh:
fh.write("-f foobar")
self.assertEqual([], packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_override_with_env(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_parse_requirements_override_with_env_multiple_files(self):
with open(self.tmp_file, 'w') as fh:
fh.write("foo\nbar")
self.useFixture(
fixtures.EnvironmentVariable('PBR_REQUIREMENTS_FILES',
"no-such-file," + self.tmp_file))
self.assertEqual(['foo', 'bar'],
packaging.parse_requirements())
def test_get_requirement_from_file_empty(self):
actual = packaging.get_reqs_from_files([])
self.assertEqual([], actual)
def test_parse_requirements_with_comments(self):
with open(self.tmp_file, 'w') as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements([self.tmp_file]))
def test_parse_requirements_python_version(self):
with open("requirements-py%d.txt" % sys.version_info[0],
"w") as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements())
def test_parse_requirements_right_python_version(self):
with open("requirements-py1.txt", "w") as fh:
fh.write("thisisatrap")
with open("requirements-py%d.txt" % sys.version_info[0],
"w") as fh:
fh.write("# this is a comment\nfoobar\n# and another one\nfoobaz")
self.assertEqual(['foobar', 'foobaz'],
packaging.parse_requirements())
class ParseDependencyLinksTest(base.BaseTestCase):
def setUp(self):
super(ParseDependencyLinksTest, self).setUp()
(fd, self.tmp_file) = tempfile.mkstemp(prefix="openstack",
suffix=".setup")
def test_parse_dependency_normal(self):
with open(self.tmp_file, "w") as fh:
fh.write("http://test.com\n")
self.assertEqual(
["http://test.com"],
packaging.parse_dependency_links([self.tmp_file]))
def test_parse_dependency_with_git_egg_url(self):
with open(self.tmp_file, "w") as fh:
fh.write("-e git://foo.com/zipball#egg=bar")
self.assertEqual(
["git://foo.com/zipball#egg=bar"],
packaging.parse_dependency_links([self.tmp_file]))
def load_tests(loader, in_tests, pattern):
return testscenarios.load_tests_apply_scenarios(loader, in_tests, pattern)
| 39.957647 | 78 | 0.627017 |
073f1ee70f94f5c4c9678cd4ec9c5a2d7b4f6541 | 4,836 | py | Python | client.py | Mitshah4343/WhatsChat | e7dcb514cbe608b81a74c99b2c14f6c0a2fd0f59 | [
"MIT"
] | null | null | null | client.py | Mitshah4343/WhatsChat | e7dcb514cbe608b81a74c99b2c14f6c0a2fd0f59 | [
"MIT"
] | null | null | null | client.py | Mitshah4343/WhatsChat | e7dcb514cbe608b81a74c99b2c14f6c0a2fd0f59 | [
"MIT"
] | null | null | null | import gtk, socket, select, thread, sys, helper, pango, gobject
from random import random
gobject.threads_init()
def send_msg(text):
serv.send(text)
def recv_msg(name):
global msg, flag, client_obj
while not flag:
socket_list = [sys.stdin, serv]
read_sockets,write_sockets,error_sockets = select.select(socket_list,[],[])
for sock in read_sockets:
if sock==serv:
try:
data = serv.recv(4096)
if data=="@$__-_Change--name%$$$":
flag=1
helper.errName()
gtk.main_quit()
elif data=="@$__-_fa--lse%$$$":
enditer = client_obj.textbuffer.get_end_iter()
client_obj.textbuffer.insert(enditer, 'Disconnected from chat server' + '\n')
adj = client_obj.sw.get_vadjustment()
if adj!=None:
adj.set_value(adj.upper - adj.page_size - 1)
flag=1
else:
ind = data.find(':')
if ind==-1:
enditer = client_obj.textbuffer.get_end_iter()
client_obj.textbuffer.insert(enditer, data + '\n')
adj = client_obj.sw.get_vadjustment()
if adj!=None:
adj.set_value(adj.upper - adj.page_size - 1)
else:
name = data[:ind]
if client_obj.tags.has_key(name)==False:
client_obj.tags[name]=client_obj.textbuffer.create_tag(name,foreground=gtk.gdk.Color(random()/2,random()/2,random()/2), weight=pango.WEIGHT_BOLD, right_margin=50)
enditer = client_obj.textbuffer.get_end_iter()
client_obj.textbuffer.insert_with_tags(enditer, '%s'%name, client_obj.tags[name])
enditer = client_obj.textbuffer.get_end_iter()
client_obj.textbuffer.insert(enditer, data[ind:] + '\n')
adj = client_obj.sw.get_vadjustment()
adj.set_value( adj.upper - adj.page_size - 1)
except:
enditer = client_obj.textbuffer.get_end_iter()
client_obj.textbuffer.insert(enditer, 'Disconnected from chat server' + '\n')
adj = client_obj.sw.get_vadjustment()
if adj!=None:
adj.set_value(adj.upper - adj.page_size - 1)
flag=1
# Class for GUI
class client:
def exit(*args):
send_msg("@$__-_fa--lse%$$$.")
gtk.main_quit()
sys.exit()
def send(self, widget):
global flag
txt = self.entry.get_text()
self.entry.set_text('')
if txt=='':
return
enditer = self.textbuffer.get_end_iter()
self.textbuffer.insert_with_tags(enditer, txt + '\n', self.tag1)
adj = self.sw.get_vadjustment()
if adj!=None:
adj.set_value( adj.upper - adj.page_size - 1)
if not flag:
try:
send_msg(txt)
except:
pass
def __init__(self):
self.tags = {}
self.window = gtk.Window(gtk.WINDOW_TOPLEVEL)
self.window.set_resizable(True)
self.window.set_default_size(300, 360)
self.window.set_title("WhatChat")
self.window.set_border_width(10)
self.window.connect("destroy", self.exit)
self.window.connect("delete_event", self.exit)
box2 = gtk.VBox(False, 0)
box1 = gtk.VBox(False, 0)
box1.set_border_width(10)
box2.pack_start(box1, True, True, 0)
self.sw = gtk.ScrolledWindow()
self.sw.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC)
self.textview = gtk.TextView()
self.textview.modify_base(gtk.STATE_NORMAL, gtk.gdk.Color(0.99, 0.94, 0.96))
self.textview.set_editable(False)
self.textview.set_cursor_visible(False)
self.textview.set_wrap_mode(gtk.WRAP_WORD_CHAR)
self.textbuffer = self.textview.get_buffer()
self.tag1 = self.textbuffer.create_tag('tag1', justification = gtk.JUSTIFY_RIGHT, style=pango.STYLE_ITALIC)
self.sw.add(self.textview)
box1.pack_start(self.sw)
separator = gtk.HSeparator()
box1.pack_start(separator, False, True, 0)
self.entry = gtk.Entry(max=0)
self.entry.connect('activate', self.send)
box1.pack_start(self.entry, False, False, 0)
separator2 = gtk.HSeparator()
box1.pack_start(separator2, False, True, 0)
h1 = gtk.HBox(False, 0)
h1.set_border_width(10)
b1 = gtk.Button("Send")
b1.connect("clicked", self.send)
h1.pack_start(b1, True, True, 0)
b2 = gtk.Button("Exit")
b2.connect("clicked", self.exit)
h1.pack_start(b2, True, True, 0)
box2.pack_start(h1, False, False, 0)
self.window.add(box2)
self.window.show_all()
def main(self):
thread.start_new_thread (recv_msg, ("Thread-1", ))
gtk.main()
if __name__ == "__main__":
flag = 0
name = ''
while name=='':
name = helper.get_name()
if name==False:
sys.exit()
host, port = helper.get_connection_details()
while host=='' or port=='':
host, port = helper.get_connection_details()
if host==False and port==False:
sys.exit()
serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
serv.settimeout(2)
try :
serv.connect((host, int(port)))
except:
helper.err()
print "Unable to connect"
sys.exit()
send_msg(name)
client_obj = client()
client_obj.textbuffer.insert_at_cursor('Connected to remote host.' + '\n')
client_obj.main()
| 30.037267 | 170 | 0.679901 |
912a58e501e985f1b12e3fb19ba3dce9cb5b90cd | 1,256 | py | Python | daterange_chunker/datetuils.py | OdinTech3/daterange-chunker | 7d601bc10ba442750678f9ad1fe78b5f2ca061c5 | [
"MIT"
] | null | null | null | daterange_chunker/datetuils.py | OdinTech3/daterange-chunker | 7d601bc10ba442750678f9ad1fe78b5f2ca061c5 | [
"MIT"
] | null | null | null | daterange_chunker/datetuils.py | OdinTech3/daterange-chunker | 7d601bc10ba442750678f9ad1fe78b5f2ca061c5 | [
"MIT"
] | null | null | null | import datetime as dt
from typing import Generator, Tuple
Chunks = Generator[Tuple[dt.datetime, dt.datetime], None, None]
def calc_delta(startdate: dt.date, enddate: dt.date, no_of_ranges: int) -> dt.timedelta:
"""Find the delta between two dates based on a desired number of ranges"""
date_diff = enddate - startdate
steps = date_diff / no_of_ranges
return steps
def chunk(startdate: dt.date, enddate: dt.date, delta=dt.timedelta(days=1)) -> Chunks:
"""
Split a date range into a certain amount of equal sized chunks,
evenly spaced by `delta`.
"""
curr_date = to_datetime(startdate)
end_date = to_datetime(enddate)
while curr_date + delta <= end_date:
to_date = curr_date + delta
yield curr_date, to_date
curr_date += delta
def is_delta_neg(startdate: dt.date, enddate: dt.date) -> bool:
date_diff = enddate - startdate
return date_diff < dt.timedelta(0)
def is_delta_gte_aday(delta: dt.timedelta) -> bool:
return delta >= dt.timedelta(days=1)
def fmt_dates(dates, fmt="%Y-%m-%d %H:%M:%S") -> Tuple[str, ...]:
return tuple(map(lambda d: d.strftime(fmt), dates))
def to_datetime(d: dt.date) -> dt.datetime:
return dt.datetime.combine(d, dt.time.min)
| 26.723404 | 88 | 0.679936 |
c131e7d3e9e40eb36f2c8b2168fcffdfbcc60573 | 102 | py | Python | HACKERRANK_Numpy/zeroes&one.py | StefaniaSferragatta/ADM2020-HW1 | 8f85ac1c8dd4bff52c5c17987c9e96b209a93830 | [
"MIT"
] | null | null | null | HACKERRANK_Numpy/zeroes&one.py | StefaniaSferragatta/ADM2020-HW1 | 8f85ac1c8dd4bff52c5c17987c9e96b209a93830 | [
"MIT"
] | null | null | null | HACKERRANK_Numpy/zeroes&one.py | StefaniaSferragatta/ADM2020-HW1 | 8f85ac1c8dd4bff52c5c17987c9e96b209a93830 | [
"MIT"
] | null | null | null | import numpy
x = list(map(int, input().split()))
print(numpy.zeros(x, int))
print(numpy.ones(x, int)) | 20.4 | 35 | 0.676471 |
fa0fc4aac57f47746750a5dd21b45421a8be6df5 | 4,619 | py | Python | qa327_test/test_whitebox.py | lucidorangee/CISC-327-Course-Project | b86fe58e809f10a90134cbe33202c9e68a46d13b | [
"MIT"
] | null | null | null | qa327_test/test_whitebox.py | lucidorangee/CISC-327-Course-Project | b86fe58e809f10a90134cbe33202c9e68a46d13b | [
"MIT"
] | null | null | null | qa327_test/test_whitebox.py | lucidorangee/CISC-327-Course-Project | b86fe58e809f10a90134cbe33202c9e68a46d13b | [
"MIT"
] | null | null | null | import pytest
from seleniumbase import BaseCase
from qa327_test.conftest import base_url
from unittest.mock import patch
from qa327.models import db, User, TicketInfo
from werkzeug.security import generate_password_hash, check_password_hash
"""
This file defines unit tests for the frontend homepage.
The tests will only test the frontend portion of the program, by patching the backend to return
specific values. For example:
@patch('qa327.backend.get_user', return_value=test_user)
Will patch the backend get_user function (within the scope of the current test case)
so that it return 'test_user' instance below rather than reading
the user from the database.
Annotate @patch before unit tests can mock backend methods (for that testing function)
"""
class TestRStatment(BaseCase):
# we need users to interact if we want to cover the buy deletion so we are just going to write all the tests as
# one big test. This test also functions as a good integration test (we could write a fixture but we have to test
# register and login anyways
@pytest.mark.timeout(60)
def test_user_interact(self, *_):
# open logout page to invalidate any logged in sessions may exist
self.open(base_url + '/logout')
# open the register page
self.open(base_url + '/register')
# fill email and password
self.type("#email", "login@gmail.com")
self.type("#password", "Tester327!")
self.type("#password2", "Tester327!")
self.type("#name", "t1")
# click enter button
self.click('input[type="submit"]')
# make sure it shows proper error message
self.assert_element("#message")
self.assert_text("User name has to be longer than 2 characters and less than 20 characters.", "#message")
# proper input
self.type("#email", "login@gmail.com")
self.type("#password", "Tester327!")
self.type("#password2", "Tester327!")
self.type("#name", "TestUser")
self.click('input[type="submit"]')
# register other person
self.open(base_url + '/register')
self.type("#email", "login2@gmail.com")
self.type("#password", "Tester327!")
self.type("#password2", "Tester327!")
self.type("#name", "TestUser2")
self.click('input[type="submit"]')
# test login error code that wasn't covered
self.open(base_url + '/login')
self.type("#email", "login@gmail.com")
# an incorrect password
self.type("#password", "Tesr327!")
self.click('input[type="submit"]')
self.assert_text("email/password combination incorrect.", "#message")
self.type("#email", "login@gmail.com")
# an incorrect password
self.type("#password", "Tester327!")
self.click('input[type="submit"]')
# this test is currently unreachable but should be added if we decide the ticket name should be longer than 1
'''
# now on the home page of user1
self.type("#name_sell", "")
self.type("#quantity_sell", "1")
self.type("#price_sell", "10")
self.type("#expdate_sell", "20210901")
# click sell button
self.click('input[value="Sell"]')
# assert empty error
self.assert_text("The name of the tickets has to contain at least once character", "#sell_message")
'''
# now on the home page of user1
self.type("#name_sell", "userTicket")
self.type("#quantity_sell", "1")
self.type("#price_sell", "10")
self.type("#expdate_sell", "20200901")
# click sell button
self.click('input[value="Sell"]')
self.assert_text("The new tickets must not be expired", "#sell_message")
# now do a positive sell
self.type("#name_sell", "userTicket")
self.type("#quantity_sell", "1")
self.type("#price_sell", "10")
self.type("#expdate_sell", "20210901")
# click sell button
self.click('input[value="Sell"]')
# logout
self.open(base_url + '/logout')
self.open(base_url + '/login')
self.type("#email", "login2@gmail.com")
self.type("#password", "Tester327!")
self.click('input[type="submit"]')
# now logged in as user 2 and we run code for delete ticket
self.type("#name_buy", "userTicket")
self.type("#quantity_buy", "1")
# click buy button
self.click('input[value="Buy"]')
self.assert_text_not_visible("userTicket", "#tickets")
# open logout (for cleanup)
self.open(base_url + '/logout')
| 42.376147 | 117 | 0.633037 |
adad3e63f11fe9681cc109cf5e7f6fbccb7d1e1f | 3,940 | py | Python | Buffer.py | harmeeksinghbedi/rl-learning | 5e8dbf9b0b1685702444836adb2c5e6ab118984f | [
"Apache-2.0"
] | null | null | null | Buffer.py | harmeeksinghbedi/rl-learning | 5e8dbf9b0b1685702444836adb2c5e6ab118984f | [
"Apache-2.0"
] | null | null | null | Buffer.py | harmeeksinghbedi/rl-learning | 5e8dbf9b0b1685702444836adb2c5e6ab118984f | [
"Apache-2.0"
] | null | null | null | import gym
import tensorflow as tf
from tensorflow.keras import layers
import numpy as np
import matplotlib.pyplot as plt
import Actor
import Critic
class Buffer:
def __init__(self, num_states, num_actions, buffer_capacity=100000, batch_size=64):
# Number of "experiences" to store at max
self.buffer_capacity = buffer_capacity
# Num of tuples to train on.
self.batch_size = batch_size
# Its tells us num of times record() was called.
self.buffer_counter = 0
# Instead of list of tuples as the exp.replay concept go
# We use different np.arrays for each tuple element
self.state_buffer = np.zeros((self.buffer_capacity, num_states))
self.action_buffer = np.zeros((self.buffer_capacity, num_actions))
self.reward_buffer = np.zeros((self.buffer_capacity, 1))
self.next_state_buffer = np.zeros((self.buffer_capacity, num_states))
# stores (S,A,R,S') observations
def remember_experience(self, observation_tuple):
index = self.buffer_counter % self.buffer_capacity
self.state_buffer[index] = observation_tuple[0]
self.action_buffer[index] = observation_tuple[1]
self.reward_buffer[index] = observation_tuple[2]
self.next_state_buffer[index] = observation_tuple[2]
self.buffer_counter += 1
# process_batch
def process_batch(self, gamma, critic_model, target_critic_model, actor_model, target_actor_model, critic_optimizer, actor_optimizer):
record_range = min(self.buffer_counter, self.buffer_capacity)
batch_indexes = np.random.choice(record_range, self.batch_size)
# get the random sample to learn against
state_buffer = tf.convert_to_tensor(self.state_buffer[batch_indexes])
action_buffer = tf.convert_to_tensor(self.action_buffer[batch_indexes])
reward_buffer = tf.convert_to_tensor(self.reward_buffer[batch_indexes])
reward_buffer = tf.cast(reward_buffer, dtype=tf.float32)
next_state_buffer = tf.convert_to_tensor(self.next_state_buffer[batch_indexes])
# Train the Critic Model
with tf.GradientTape() as tape:
# for Critic model training Compute y = r + gamma*Q_t1(s(t_1), a(t+1))
a_t1 = target_actor_model(next_state_buffer)
y_t = reward_buffer + gamma * target_critic_model([next_state_buffer, a_t1])
q_t1 = target_critic_model([next_state_buffer, a_t1])
q_t = critic_model([state_buffer, action_buffer])
critic_loss = tf.math.reduce_mean(tf.math.square(y_t - q_t))
critic_grad = tape.gradient(critic_loss, critic_model.trainable_variables)
critic_optimizer.apply_gradients(zip(critic_grad, critic_model.trainable_variables))
# Train the Actor Model
with tf.GradientTape() as tape:
# grad = MEAN(q(s_i, a_i)
actions = actor_model(state_buffer)
critic_value = critic_model([state_buffer, actions])
# Used `-value` as we want to maximize the value given
# by the critic for our actions
actor_loss = -tf.math.reduce_mean(critic_value)
actor_grad = tape.gradient(actor_loss, actor_model.trainable_variables)
actor_optimizer.apply_gradients(zip(actor_grad, actor_model.trainable_variables))
# This update target parameters slowly
# Based on rate `tau`, which is much less than one.
def update_target(self, tau, critic_model, target_critic_model, actor_model, target_actor_model):
new_weights = []
target_variables = target_critic_model.weights
for i, variable in enumerate(critic_model.weights):
new_weights.append(variable * tau + target_variables[i] * (1 - tau))
target_critic_model.set_weights(new_weights)
new_weights = []
target_variables = target_actor_model.weights
for i, variable in enumerate(actor_model.weights):
new_weights.append(variable * tau + target_variables[i] * (1 - tau))
target_actor_model.set_weights(new_weights)
| 45.287356 | 137 | 0.726396 |
39385d79c9d6e94010a34f402587e19f5141f6b9 | 1,687 | py | Python | v1/tasks/signed_requests.py | Hristijan95/Validator | aae7d0ae8a8ed5576ff721f936c7d916d113d784 | [
"MIT"
] | null | null | null | v1/tasks/signed_requests.py | Hristijan95/Validator | aae7d0ae8a8ed5576ff721f936c7d916d113d784 | [
"MIT"
] | null | null | null | v1/tasks/signed_requests.py | Hristijan95/Validator | aae7d0ae8a8ed5576ff721f936c7d916d113d784 | [
"MIT"
] | null | null | null | import logging
from celery import shared_task
from nacl.encoding import HexEncoder
from nacl.signing import SigningKey
from thenewboston.environment.environment_variables import get_environment_variable
from thenewboston.utils.format import format_address
from thenewboston.utils.network import patch, post
from thenewboston.utils.signed_requests import generate_signed_request
logger = logging.getLogger('thenewboston')
@shared_task
def send_signed_patch_request(*, data, ip_address, port, protocol, url_path):
"""
Sign data and send to recipient
"""
network_signing_key = get_environment_variable('NETWORK_SIGNING_KEY')
signing_key = SigningKey(network_signing_key, encoder=HexEncoder)
signed_request = generate_signed_request(
data=data,
nid_signing_key=signing_key
)
node_address = format_address(ip_address=ip_address, port=port, protocol=protocol)
url = f'{node_address}{url_path}'
try:
patch(url=url, body=signed_request)
except Exception as e:
logger.exception(e)
@shared_task
def send_signed_post_request(*, data, ip_address, port, protocol, url_path):
"""
Sign data and send to recipient
"""
network_signing_key = get_environment_variable('NETWORK_SIGNING_KEY')
signing_key = SigningKey(network_signing_key, encoder=HexEncoder)
signed_request = generate_signed_request(
data=data,
nid_signing_key=signing_key
)
node_address = format_address(ip_address=ip_address, port=port, protocol=protocol)
url = f'{node_address}{url_path}'
try:
post(url=url, body=signed_request)
except Exception as e:
logger.exception(e)
| 29.086207 | 86 | 0.750445 |
a97f54d6cac1ea91f05cb3dc68729f5b68df7c9e | 6,979 | py | Python | python/paddle/fluid/tests/unittests/test_compare_op.py | mamingjie-China/Paddle | 91d2f1e3e6e51142a74a43d0673a8feff056c39b | [
"Apache-2.0"
] | 3 | 2021-06-11T06:48:10.000Z | 2021-09-02T10:18:06.000Z | python/paddle/fluid/tests/unittests/test_compare_op.py | Janayt/Paddle | 68c6160e639be38c57a7dd831f7b841b33e92676 | [
"Apache-2.0"
] | null | null | null | python/paddle/fluid/tests/unittests/test_compare_op.py | Janayt/Paddle | 68c6160e639be38c57a7dd831f7b841b33e92676 | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import op_test
import unittest
import numpy
import numpy as np
import paddle
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid import Program, program_guard
def create_test_class(op_type, typename, callback):
class Cls(op_test.OpTest):
def setUp(self):
a = numpy.random.random(size=(10, 7)).astype(typename)
b = numpy.random.random(size=(10, 7)).astype(typename)
c = callback(a, b)
self.inputs = {'X': a, 'Y': b}
self.outputs = {'Out': c}
self.op_type = op_type
def test_output(self):
self.check_output()
def test_errors(self):
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[2], dtype='int32')
y = fluid.layers.data(name='y', shape=[2], dtype='int32')
a = fluid.layers.data(name='a', shape=[2], dtype='int16')
if self.op_type == "less_than":
self.assertRaises(
TypeError,
fluid.layers.less_than,
x=x,
y=y,
force_cpu=1)
op = eval("fluid.layers.%s" % self.op_type)
self.assertRaises(TypeError, op, x=x, y=y, cond=1)
self.assertRaises(TypeError, op, x=x, y=a)
self.assertRaises(TypeError, op, x=a, y=y)
cls_name = "{0}_{1}".format(op_type, typename)
Cls.__name__ = cls_name
globals()[cls_name] = Cls
for _type_name in {'float32', 'float64', 'int32', 'int64'}:
create_test_class('less_than', _type_name, lambda _a, _b: _a < _b)
create_test_class('less_equal', _type_name, lambda _a, _b: _a <= _b)
create_test_class('greater_than', _type_name, lambda _a, _b: _a > _b)
create_test_class('greater_equal', _type_name, lambda _a, _b: _a >= _b)
create_test_class('equal', _type_name, lambda _a, _b: _a == _b)
create_test_class('not_equal', _type_name, lambda _a, _b: _a != _b)
def create_paddle_case(op_type, callback):
class PaddleCls(unittest.TestCase):
def setUp(self):
self.op_type = op_type
self.input_x = np.array([1, 2, 3, 4]).astype(np.int64)
self.input_y = np.array([1, 3, 2, 4]).astype(np.int64)
self.real_result = callback(self.input_x, self.input_y)
self.place = fluid.CPUPlace()
if core.is_compiled_with_cuda():
self.place = paddle.CUDAPlace(0)
def test_api(self):
with program_guard(Program(), Program()):
x = fluid.data(name='x', shape=[4], dtype='int64')
y = fluid.data(name='y', shape=[4], dtype='int64')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = fluid.Executor(self.place)
res, = exe.run(feed={"x": self.input_x,
"y": self.input_y},
fetch_list=[out])
self.assertEqual((res == self.real_result).all(), True)
def test_broadcast_api_1(self):
with program_guard(Program(), Program()):
x = paddle.nn.data(name='x', shape=[1, 2, 1, 3], dtype='int32')
y = paddle.nn.data(name='y', shape=[1, 2, 3], dtype='int32')
op = eval("paddle.%s" % (self.op_type))
out = op(x, y)
exe = paddle.Executor(self.place)
input_x = np.arange(1, 7).reshape((1, 2, 1, 3)).astype(np.int32)
input_y = np.arange(0, 6).reshape((1, 2, 3)).astype(np.int32)
real_result = callback(input_x, input_y)
res, = exe.run(feed={"x": input_x,
"y": input_y},
fetch_list=[out])
self.assertEqual((res == real_result).all(), True)
def test_attr_name(self):
with program_guard(Program(), Program()):
x = fluid.layers.data(name='x', shape=[4], dtype='int32')
y = fluid.layers.data(name='y', shape=[4], dtype='int32')
op = eval("paddle.%s" % (self.op_type))
out = op(x=x, y=y, name="name_%s" % (self.op_type))
self.assertEqual("name_%s" % (self.op_type) in out.name, True)
cls_name = "TestCase_{}".format(op_type)
PaddleCls.__name__ = cls_name
globals()[cls_name] = PaddleCls
create_paddle_case('less_than', lambda _a, _b: _a < _b)
create_paddle_case('less_equal', lambda _a, _b: _a <= _b)
create_paddle_case('greater_than', lambda _a, _b: _a > _b)
create_paddle_case('greater_equal', lambda _a, _b: _a >= _b)
create_paddle_case('equal', lambda _a, _b: _a == _b)
create_paddle_case('not_equal', lambda _a, _b: _a != _b)
class TestCompareOpError(unittest.TestCase):
def test_errors(self):
with program_guard(Program(), Program()):
# The input x and y of compare_op must be Variable.
x = fluid.layers.data(name='x', shape=[1], dtype="float32")
y = fluid.create_lod_tensor(
numpy.array([[-1]]), [[1]], fluid.CPUPlace())
self.assertRaises(TypeError, fluid.layers.greater_equal, x, y)
class API_TestElementwise_Equal(unittest.TestCase):
def test_api(self):
with fluid.program_guard(fluid.Program(), fluid.Program()):
label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
limit = fluid.layers.assign(np.array([3, 2], dtype="int32"))
out = paddle.equal(x=label, y=limit)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
res, = exe.run(fetch_list=[out])
self.assertEqual((res == np.array([True, False])).all(), True)
with fluid.program_guard(fluid.Program(), fluid.Program()):
label = fluid.layers.assign(np.array([3, 3], dtype="int32"))
limit = fluid.layers.assign(np.array([3, 3], dtype="int32"))
out = paddle.equal(x=label, y=limit)
place = fluid.CPUPlace()
exe = fluid.Executor(place)
res, = exe.run(fetch_list=[out])
self.assertEqual((res == np.array([True, True])).all(), True)
if __name__ == '__main__':
unittest.main()
| 42.815951 | 80 | 0.576444 |
2421e2befd2ee4a74070474ea2a2e24288ef1b46 | 1,142 | py | Python | workspace/src/rqt_common_plugins/rqt_launch/src/rqt_launch/status_indicator.py | migarstka/barc | deacfd974f251693d74b273d58d22e9fead2354f | [
"MIT"
] | 1 | 2019-01-10T22:07:07.000Z | 2019-01-10T22:07:07.000Z | workspace/src/rqt_common_plugins/rqt_launch/src/rqt_launch/status_indicator.py | migarstka/barc | deacfd974f251693d74b273d58d22e9fead2354f | [
"MIT"
] | null | null | null | workspace/src/rqt_common_plugins/rqt_launch/src/rqt_launch/status_indicator.py | migarstka/barc | deacfd974f251693d74b273d58d22e9fead2354f | [
"MIT"
] | null | null | null | #! /usr/bin/env python
from python_qt_binding.QtGui import QLabel, QStyle
import rospy
class StatusIndicator(QLabel):
def __init__(self, *args):
super(StatusIndicator, self).__init__(*args)
self.set_stopped()
def set_running(self):
self.setPixmap(
self.style().standardIcon(QStyle.SP_DialogApplyButton).pixmap(16))
def set_starting(self):
rospy.logdebug('StatusIndicator.set_starting')
self.setPixmap(self.style().standardIcon(
QStyle.SP_DialogResetButton).pixmap(16))
def set_stopping(self):
"""
Show msg that the process is "stopping".
cf. set_stopped()
"""
self.setPixmap(self.style().standardIcon(
QStyle.SP_DialogResetButton).pixmap(16))
def set_stopped(self):
"""
Show msg that the process is "stopped".
cf. set_stopping()
"""
self.setText(" ")
def set_died(self):
self.setPixmap(self.style().standardIcon(
QStyle.SP_MessageBoxCritical).pixmap(16))
| 27.853659 | 79 | 0.584063 |
1bba88c73db33fe9a68126ef3f708725118029ee | 397 | py | Python | authorize/opa/exceptions.py | cedadev/django-authorizer | 88efa020defce86f50c72aab724cb5c4a34442f8 | [
"BSD-3-Clause"
] | 1 | 2021-02-03T15:40:10.000Z | 2021-02-03T15:40:10.000Z | authorize/opa/exceptions.py | cedadev/django-authorizer | 88efa020defce86f50c72aab724cb5c4a34442f8 | [
"BSD-3-Clause"
] | 10 | 2020-03-25T16:51:27.000Z | 2021-03-25T18:12:52.000Z | authorize/opa/exceptions.py | cedadev/django-authorizer | 88efa020defce86f50c72aab724cb5c4a34442f8 | [
"BSD-3-Clause"
] | 1 | 2021-07-01T09:51:19.000Z | 2021-07-01T09:51:19.000Z | """ OPA Authorization related exceptions. """
__author__ = "William Tucker"
__date__ = "2020-02-14"
__copyright__ = "Copyright 2020 United Kingdom Research and Innovation"
__license__ = "BSD - see LICENSE file in top-level package directory"
class OPAAuthorizationError(Exception):
""" Generic exception raised when a problem occurs when querying the
OPA Authorization service.
"""
| 30.538462 | 72 | 0.753149 |
1b7a464733ae77c7a4a2652ead1433584cca7845 | 16,634 | py | Python | benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/6-sender_receiver_2.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 3 | 2021-04-23T23:29:26.000Z | 2022-03-23T10:00:30.000Z | benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/6-sender_receiver_2.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | null | null | null | benchmarks/f3_wrong_hints_permutations/scaling_ltl_timed_transition_system/6-sender_receiver_2.py | EnricoMagnago/F3 | c863215c318d7d5f258eb9be38c6962cf6863b52 | [
"MIT"
] | 1 | 2021-11-17T22:02:56.000Z | 2021-11-17T22:02:56.000Z | from typing import FrozenSet
from collections import Iterable
from math import log, ceil
from mathsat import msat_term, msat_env
from mathsat import msat_make_constant, msat_declare_function
from mathsat import msat_get_integer_type, msat_get_rational_type, msat_get_bool_type
from mathsat import msat_make_and, msat_make_not, msat_make_or, msat_make_iff
from mathsat import msat_make_leq, msat_make_equal, msat_make_true
from mathsat import msat_make_number, msat_make_plus, msat_make_times
from pysmt.environment import Environment as PysmtEnv
import pysmt.typing as types
from ltl.ltl import TermMap, LTLEncoder
from utils import name_next, symb_to_next
from hint import Hint, Location
delta_name = "delta"
def decl_consts(menv: msat_env, name: str, c_type) -> tuple:
assert not name.startswith("_"), name
s = msat_declare_function(menv, name, c_type)
s = msat_make_constant(menv, s)
x_s = msat_declare_function(menv, name_next(name), c_type)
x_s = msat_make_constant(menv, x_s)
return s, x_s
def make_enum(menv, v_name: str, enum_size: int):
bool_type = msat_get_bool_type(menv)
num_bits = ceil(log(enum_size, 2))
b_vars = []
for idx in range(num_bits):
c_name = "{}{}".format(v_name, idx)
b_vars.append(tuple(decl_consts(menv, c_name, bool_type)))
vals = []
x_vals = []
for enum_val in range(enum_size):
bit_val = format(enum_val, '0{}b'.format(num_bits))
assert len(bit_val) == num_bits
assert all(c in {'0', '1'} for c in bit_val)
assign = [b_vars[idx] if c == '1' else
(msat_make_not(menv, b_vars[idx][0]),
msat_make_not(menv, b_vars[idx][1]))
for idx, c in enumerate(reversed(bit_val))]
pred = assign[0][0]
x_pred = assign[0][1]
for it in assign[1:]:
pred = msat_make_and(menv, pred, it[0])
x_pred = msat_make_and(menv, x_pred, it[1])
vals.append(pred)
x_vals.append(x_pred)
assert len(vals) == enum_size
assert len(x_vals) == enum_size
return b_vars, vals, x_vals
def msat_make_minus(menv: msat_env, arg0: msat_term, arg1: msat_term):
m_one = msat_make_number(menv, "-1")
arg1 = msat_make_times(menv, arg1, m_one)
return msat_make_plus(menv, arg0, arg1)
def msat_make_lt(menv: msat_env, arg0: msat_term, arg1: msat_term):
geq = msat_make_geq(menv, arg0, arg1)
return msat_make_not(menv, geq)
def msat_make_geq(menv: msat_env, arg0: msat_term, arg1: msat_term):
return msat_make_leq(menv, arg1, arg0)
def msat_make_gt(menv: msat_env, arg0: msat_term, arg1: msat_term):
leq = msat_make_leq(menv, arg0, arg1)
return msat_make_not(menv, leq)
def msat_make_impl(menv: msat_env, arg0: msat_term, arg1: msat_term):
n_arg0 = msat_make_not(menv, arg0)
return msat_make_or(menv, n_arg0, arg1)
def diverging_symbs(menv: msat_env) -> frozenset:
real_type = msat_get_rational_type(menv)
delta = msat_declare_function(menv, delta_name, real_type)
delta = msat_make_constant(menv, delta)
return frozenset([delta])
def check_ltl(menv: msat_env, enc: LTLEncoder) -> (Iterable, msat_term,
msat_term, msat_term):
assert menv
assert isinstance(menv, msat_env)
assert enc
assert isinstance(enc, LTLEncoder)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
r2s, x_r2s = decl_consts(menv, "r2s", int_type)
s2r, x_s2r = decl_consts(menv, "s2r", int_type)
delta, x_delta = decl_consts(menv, delta_name, real_type)
sender = Sender("s", menv, enc, r2s, x_r2s, s2r, x_s2r, delta)
receiver = Receiver("r", menv, enc, s2r, x_s2r, r2s, x_r2s, delta)
curr2next = {r2s: x_r2s, s2r: x_s2r, delta: x_delta}
for comp in [sender, receiver]:
for s, x_s in comp.symb2next.items():
curr2next[s] = x_s
zero = msat_make_number(menv, "0")
init = msat_make_and(menv, receiver.init, sender.init)
trans = msat_make_and(menv, receiver.trans, sender.trans)
# invar delta >= 0
init = msat_make_and(menv, init,
msat_make_geq(menv, delta, zero))
trans = msat_make_and(menv, trans,
msat_make_geq(menv, x_delta, zero))
# delta > 0 -> (r2s' = r2s & s2r' = s2r)
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_equal(menv, x_r2s, r2s),
msat_make_equal(menv, x_s2r, s2r))
trans = msat_make_and(menv, trans,
msat_make_impl(menv, lhs, rhs))
# (G F !s.stutter) -> G (s.wait_ack -> F s.send)
lhs = enc.make_G(enc.make_F(msat_make_not(menv, sender.stutter)))
rhs = enc.make_G(msat_make_impl(menv, sender.wait_ack,
enc.make_F(sender.send)))
ltl = msat_make_impl(menv, lhs, rhs)
return TermMap(curr2next), init, trans, ltl
class Module:
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
*args, **kwargs):
self.name = name
self.menv = menv
self.enc = enc
self.symb2next = {}
true = msat_make_true(menv)
self.init = true
self.trans = true
def _symb(self, v_name, v_type):
v_name = "{}_{}".format(self.name, v_name)
return decl_consts(self.menv, v_name, v_type)
def _enum(self, v_name: str, enum_size: int):
c_name = "{}_{}".format(self.name, v_name)
return make_enum(self.menv, c_name, enum_size)
class Sender(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
int_type = msat_get_integer_type(menv)
real_type = msat_get_rational_type(menv)
loc, x_loc = self._symb("l", bool_type)
evt, x_evt = self._symb("evt", bool_type)
msg_id, x_msg_id = self._symb("msg_id", int_type)
timeout, x_timeout = self._symb("timeout", real_type)
c, x_c = self._symb("c", real_type)
self.move = evt
self.stutter = msat_make_not(menv, evt)
self.x_move = x_evt
self.x_stutter = msat_make_not(menv, x_evt)
self.send = loc
self.wait_ack = msat_make_not(menv, loc)
self.x_send = x_loc
self.x_wait_ack = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc, evt: x_evt, msg_id: x_msg_id,
timeout: x_timeout, c: x_c}
zero = msat_make_number(menv, "0")
one = msat_make_number(menv, "1")
base_timeout = one
# send & c = 0 & msg_id = 0
self.init = msat_make_and(menv,
msat_make_and(menv, self.send,
msat_make_equal(menv, c,
zero)),
msat_make_equal(menv, msg_id, zero))
# invar: wait_ack -> c <= timeout
self.init = msat_make_and(
menv, self.init,
msat_make_impl(menv, self.wait_ack,
msat_make_leq(menv, c, timeout)))
self.trans = msat_make_impl(menv, self.x_wait_ack,
msat_make_leq(menv, x_c, x_timeout))
# delta > 0 | stutter -> l' = l & msg_id' = msg_id & timeout' = timeout &
# c' = c + delta & out_c' = out_c
lhs = msat_make_or(menv, msat_make_gt(menv, delta, zero), self.stutter)
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_msg_id, msg_id)),
msat_make_and(menv,
msat_make_equal(menv, x_timeout, timeout),
msat_make_equal(menv, x_c,
msat_make_plus(menv, c, delta))))
rhs = msat_make_and(menv, rhs,
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
disc_t = msat_make_and(menv, self.move,
msat_make_equal(menv, delta, zero))
# (send & send') ->
# (msg_id' = msg_id & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_send))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id, msg_id),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (send & wait_ack') ->
# (msg_id' = msg_id + 1 & timeout' = base_timeout & c' = 0 & out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.send, self.x_wait_ack))
rhs = msat_make_and(
menv,
msat_make_and(menv,
msat_make_equal(menv, x_msg_id,
msat_make_plus(menv, msg_id, one)),
msat_make_equal(menv, x_timeout, base_timeout)),
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c, out_c)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (c' = 0 & out_c' = out_c &
# (wait_ack' <-> (in_c != msg_id & c > timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs_iff = msat_make_and(menv,
msat_make_not(menv,
msat_make_equal(menv, in_c,
msg_id)),
msat_make_geq(menv, c, timeout))
rhs_iff = msat_make_iff(menv, self.x_wait_ack, rhs_iff)
rhs = msat_make_and(menv,
msat_make_and(menv,
msat_make_equal(menv, x_c, zero),
msat_make_equal(menv, x_out_c,
out_c)),
rhs_iff)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & wait_ack') -> (timeout' > timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack,
self.x_wait_ack))
rhs = msat_make_gt(menv, x_timeout, timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack) -> (send' <-> (in_c = msg_id & c < timeout))
lhs = msat_make_and(menv, disc_t, self.wait_ack)
rhs = msat_make_iff(menv, self.x_send,
msat_make_and(menv,
msat_make_equal(menv, in_c, msg_id),
msat_make_lt(menv, c, timeout)))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait_ack & send') -> (timeout' = base_timeout)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait_ack, self.x_send))
rhs = msat_make_equal(menv, x_timeout, base_timeout)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
class Receiver(Module):
def __init__(self, name: str, menv: msat_env, enc: LTLEncoder,
in_c, x_in_c, out_c, x_out_c, delta):
super().__init__(name, menv, enc)
bool_type = msat_get_bool_type(menv)
loc, x_loc = self._symb("l", bool_type)
self.wait = loc
self.work = msat_make_not(menv, loc)
self.x_wait = x_loc
self.x_work = msat_make_not(menv, x_loc)
self.symb2next = {loc: x_loc}
zero = msat_make_number(menv, "0")
# wait
self.init = self.wait
# delta > 0 -> loc' = loc & out_c' = out_c
lhs = msat_make_gt(menv, delta, zero)
rhs = msat_make_and(menv,
msat_make_iff(menv, x_loc, loc),
msat_make_equal(menv, x_out_c, out_c))
self.trans = msat_make_impl(menv, lhs, rhs)
disc_t = msat_make_equal(menv, delta, zero)
# wait -> (wait' <-> in_c = out_c)
lhs = msat_make_and(menv, disc_t, self.wait)
rhs = msat_make_iff(menv, self.x_wait,
msat_make_equal(menv, in_c, out_c))
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & wait') -> (out_c' = out_c)
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_wait))
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# (wait & work') -> out_c' = in_c
lhs = msat_make_and(menv, disc_t,
msat_make_and(menv, self.wait, self.x_work))
rhs = msat_make_equal(menv, x_out_c, in_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
# work -> out_c' = out_c
lhs = msat_make_and(menv, disc_t, self.work)
rhs = msat_make_equal(menv, x_out_c, out_c)
self.trans = msat_make_and(menv, self.trans,
msat_make_impl(menv, lhs, rhs))
def hints(env: PysmtEnv) -> FrozenSet[Hint]:
assert isinstance(env, PysmtEnv)
mgr = env.formula_manager
delta = mgr.Symbol(delta_name, types.REAL)
r2s = mgr.Symbol("r2s", types.INT)
s2r = mgr.Symbol("r2s", types.INT)
s_l = mgr.Symbol("s_l", types.BOOL)
s_evt = mgr.Symbol("s_evt", types.BOOL)
s_msg_id = mgr.Symbol("s_msg_id", types.INT)
s_timeout = mgr.Symbol("s_timeout", types.REAL)
s_c = mgr.Symbol("s_c", types.REAL)
r_l = mgr.Symbol("r_l", types.BOOL)
symbs = frozenset([delta, r2s, s2r, s_l, s_evt, s_msg_id, s_timeout, s_c,
r_l])
x_delta = symb_to_next(mgr, delta)
x_r2s = symb_to_next(mgr, r2s)
x_s2r = symb_to_next(mgr, s2r)
x_s_l = symb_to_next(mgr, s_l)
x_s_evt = symb_to_next(mgr, s_evt)
x_s_msg_id = symb_to_next(mgr, s_msg_id)
x_s_timeout = symb_to_next(mgr, s_timeout)
x_s_c = symb_to_next(mgr, s_c)
x_r_l = symb_to_next(mgr, r_l)
res = []
r0 = mgr.Real(0)
r1 = mgr.Real(1)
i0 = mgr.Int(0)
i1 = mgr.Int(1)
loc0 = Location(env, mgr.Equals(s_c, r0))
loc0.set_progress(0, mgr.Equals(x_s_c, r0))
hint = Hint("h_s_c0", env, frozenset([s_c]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.GE(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, mgr.Plus(s2r, i1)))
hint = Hint("h_s2r2", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(delta, r0))
loc0.set_progress(0, mgr.Equals(x_delta, r0))
hint = Hint("h_delta0", env, frozenset([delta]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s2r, i0))
loc0.set_progress(0, mgr.Equals(x_s2r, i0))
hint = Hint("h_s2r0", env, frozenset([s2r]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, s_evt)
loc0.set_progress(0, x_s_evt)
hint = Hint("h_s_evt0", env, frozenset([s_evt]), symbs)
hint.set_locs([loc0])
res.append(hint)
loc0 = Location(env, mgr.Equals(s_msg_id, i0))
loc0.set_progress(0, mgr.Equals(x_s_msg_id, i0))
hint = Hint("h_s_msg_id0", env, frozenset([s_msg_id]), symbs)
hint.set_locs([loc0])
res.append(hint)
return frozenset(res)
| 39.889688 | 89 | 0.569556 |
0743cc8611ecf3e3d5469693393efb392a69cd70 | 3,195 | py | Python | hublib/ui/pathselect.py | hzclarksm/hublib | e8f2168d80464b6343b980e30fdd552d1b0c2479 | [
"MIT"
] | 6 | 2017-05-23T19:17:29.000Z | 2022-02-24T00:36:46.000Z | hublib/ui/pathselect.py | hzclarksm/hublib | e8f2168d80464b6343b980e30fdd552d1b0c2479 | [
"MIT"
] | 1 | 2019-02-13T13:35:57.000Z | 2019-02-13T13:35:57.000Z | hublib/ui/pathselect.py | hzclarksm/hublib | e8f2168d80464b6343b980e30fdd552d1b0c2479 | [
"MIT"
] | 6 | 2017-09-12T19:51:12.000Z | 2021-01-13T23:43:57.000Z | # ----------------------------------------------------------------------
# A widget that allows you to choose server-side files or directories.
# ======================================================================
# AUTHOR: Rob Campbell, Purdue University
# Copyright (c) 2018 HUBzero Foundation, LLC
# See LICENSE file for details.
# ======================================================================
import os
import ipywidgets as ui
class PathSelector():
"""
The PathSelector widget allows the user to choose a path in the server (container). It cannot access files
from the user's computer.
:param start_dir: The directory to display.
:param select_file: True for file select. False for directory select.
"""
def __init__(self, start_dir, select_file=True):
self.file = None
self.select_file = select_file
self.value = start_dir
self.select = ui.SelectMultiple(options=['init'], value=(), rows=10, description='')
self.accord = ui.Accordion(children=[self.select])
self.accord.selected_index = None # Start closed (showing path only)
self.refresh(self.value)
self.select.observe(self.on_update, 'value')
def on_update(self, change):
if len(change['new']) > 0:
self.refresh(change['new'][0])
def refresh(self, item):
path = os.path.abspath(os.path.join(self.value, item))
if os.path.isfile(path):
if self.select_file:
self.accord.set_title(0, path)
self.file = path
self.accord.selected_index = None
else:
self.select.value = ()
else: # os.path.isdir(path)
self.file = None
self.value = path
# Build list of files and dirs
keys = ['[..]']
for item in os.listdir(path):
if item[0] == '.':
continue
elif os.path.isdir(os.path.join(path, item)):
keys.append('[' + item + ']')
else:
keys.append(item)
# Sort and create list of output values
keys.sort(key=str.lower)
vals = []
for k in keys:
if k[0] == '[':
vals.append(k[1:-1]) # strip off brackets
else:
vals.append(k)
# Update widget
self.accord.set_title(0, path)
self.select.options = list(zip(keys, vals))
with self.select.hold_trait_notifications():
self.select.value = ()
def _ipython_display_(self):
self.accord._ipython_display_()
@property
def disabled(self):
return self.select.disabled
@disabled.setter
def disabled(self, newval):
self.accord.disabled = newval
self.select.disabled = newval
@property
def visible(self):
return self.accord.layout.visibility
@visible.setter
def visible(self, newval):
if newval:
self.accord.layout.visibility = 'visible'
return
self.accord.layout.visibility = 'hidden'
| 32.602041 | 111 | 0.525196 |
85dc0f71df9ab0380c3dc0d57201dcac76a8121a | 6,686 | py | Python | base/models.py | felixyin/qdqtrj_website | 43ae31af887cfe537d6f0cff5329dac619190210 | [
"MIT"
] | null | null | null | base/models.py | felixyin/qdqtrj_website | 43ae31af887cfe537d6f0cff5329dac619190210 | [
"MIT"
] | null | null | null | base/models.py | felixyin/qdqtrj_website | 43ae31af887cfe537d6f0cff5329dac619190210 | [
"MIT"
] | null | null | null | from ckeditor_uploader.fields import RichTextUploadingField
from django.contrib.sites.models import Site
from django.core.exceptions import ValidationError
from django.db import models as m
from django.utils.timezone import now
from django.utils.translation import gettext_lazy as _
# Create your models here.
#
class BaseModel(m.Model):
created_time = m.DateTimeField('创建时间', default=now, editable=False)
last_mod_time = m.DateTimeField('修改时间', default=now, editable=False)
is_enable = m.BooleanField('是否启用', default=True)
sequence = m.IntegerField('排列顺序', unique=True, null=True, blank=True)
class Meta:
abstract = True
ordering = ('sequence',)
def save(self, *args, **kwargs):
# if not self.slug or self.slug == 'no-slug' or not self.id:
# slug = self.title if 'title' in self.__dict__ else self.name
# self.slug = slugify(slug)
super().save(*args, **kwargs)
# type = self.__class__.__name__
is_update_views = 'update_fields' in kwargs and len(kwargs['update_fields']) == 1 and kwargs['update_fields'][
0] == 'views'
# FIXME 通知百度重新收录url地址
# save_signal.send(sender=self.__class__, is_update_views=is_update_views, id=self.id)
def get_full_url(self):
site = Site.objects.get_current().domain
try:
url = "http://{site}{path}".format(site=site, path=self.get_absolute_url())
except Exception:
url = "http://{site}".format(site=site)
return url
class BaseTag(BaseModel):
# 项目-技术标签
name = m.CharField(max_length=50, verbose_name='标签名称')
# description = MDTextField(max_length=200, blank=True, verbose_name='特点简介')
description = RichTextUploadingField(max_length=2000, config_name='mini', verbose_name='特点简介', blank=False)
def __str__(self):
return self.name
class Meta:
verbose_name = '标签'
verbose_name_plural = verbose_name
class BaseAttach(BaseModel):
# 项目-附件
file = m.FileField(upload_to="upload/attach/%Y/%m/%d", null=True, verbose_name='附件')
name = m.CharField(max_length=50, null=True, verbose_name='附件名称', default=file.name)
# project = m.ForeignKey(BaseModel, on_delete=m.CASCADE, null=True, verbose_name='项目')
class Meta:
verbose_name = '附件'
verbose_name_plural = verbose_name
class WebSettings(m.Model):
'''站点设置 '''
site_address = m.CharField("网站地址", max_length=30, null=False, blank=False, default='')
sitename = m.CharField("网站名称", max_length=30, null=False, blank=False, default='')
site_description = m.TextField("网站描述", max_length=500, null=False, blank=False, default='')
site_seo_description = m.TextField("网站SEO描述", max_length=500, null=False, blank=False, default='')
site_keywords = m.TextField("网站关键字", max_length=500, null=False, blank=False, default='')
copyright = m.CharField('Copyright', max_length=40, null=True, blank=True, default='')
beiancode = m.CharField('备案号', max_length=20, null=True, blank=True, default='')
analyticscode = m.TextField("网站统计代码", max_length=500, null=False, blank=False, default='')
show_gongan_code = m.BooleanField('是否显示公安备案号', default=False, null=False)
gongan_beiancode = m.TextField('公安备案号', max_length=20, null=True, blank=True, default='')
blogname = m.CharField("博客名称", max_length=200, null=False, blank=False, default='')
blog_description = m.TextField("博客描述", max_length=500, null=False, blank=False, default='')
blog_seo_description = m.TextField("博客SEO描述", max_length=500, null=False, blank=False, default='')
blog_keywords = m.TextField("博客关键字", max_length=500, null=False, blank=False, default='')
article_sub_length = m.IntegerField("文章摘要长度", default=300)
sidebar_article_count = m.IntegerField("侧边栏文章数目", default=10)
sidebar_comment_count = m.IntegerField("侧边栏评论数目", default=5)
show_google_adsense = m.BooleanField('是否显示谷歌广告', default=False)
google_adsense_codes = m.TextField('广告内容', max_length=500, null=True, blank=True, default='')
open_blog_comment = m.BooleanField('是否打开网站评论功能', default=True)
resource_path = m.CharField("静态文件保存地址", max_length=30, null=False, default='/var/www/resource/')
logo_img = m.ImageField(upload_to='upload/websetting/', verbose_name='网站LOGO', blank=False)
logo_footer_img = m.ImageField(upload_to='upload/websetting/', verbose_name='网站页脚LOGO', blank=False)
phone_img = m.ImageField(upload_to='upload/websetting/', verbose_name='电话', blank=False)
wechart_img = m.ImageField(upload_to='upload/websetting/', verbose_name='微信二维码', blank=False)
class Meta:
verbose_name = '网站配置'
verbose_name_plural = verbose_name
def __str__(self):
return self.sitename
def clean(self):
if WebSettings.objects.exclude(id=self.id).count():
raise ValidationError(_('只能有一个配置'))
def save(self, *args, **kwargs):
super().save(*args, **kwargs)
from website.utils import cache
cache.clear()
class Message(BaseModel):
# 留言
name = m.CharField(max_length=50, null=True, blank=True, verbose_name='姓名')
phone = m.CharField(max_length=20, null=True, blank=True, verbose_name='电话')
wechart = m.CharField(max_length=20, null=True, blank=True, verbose_name='微信')
qq = m.CharField(max_length=20, null=True, blank=True, verbose_name='qq')
email = m.CharField(max_length=20, null=True, blank=True, verbose_name='邮箱')
content = m.TextField(max_length=10000, null=True, blank=True, verbose_name='留言信息')
class EnumIsRead(m.TextChoices):
YIDU = 'Y', _('已读')
WEIDU = 'N', _('未读')
is_read = m.CharField(max_length=1, choices=EnumIsRead.choices, default=EnumIsRead.WEIDU, verbose_name='已读否?')
class Meta:
verbose_name = '免费报价'
verbose_name_plural = verbose_name
# Create your models here.
class ContactInfo(m.Model):
# 联系信息
company = m.CharField(max_length=20, verbose_name='组织名称')
phone = m.CharField(max_length=20, verbose_name='咨询电话')
phone_user = m.CharField(max_length=20, verbose_name='咨询电话姓名')
wechart = m.CharField(max_length=20, verbose_name='微信号')
qq = m.CharField(max_length=20, verbose_name='QQ号')
phone_after_sale = m.CharField(max_length=20, verbose_name='售后电话')
phone_after_sale_user = m.CharField(max_length=20, verbose_name='售后电话姓名')
email = m.EmailField(verbose_name='邮箱')
email_hr = m.EmailField(verbose_name='HR邮箱')
address = m.CharField(max_length=100, verbose_name='地址')
def __str__(self):
return self.company
class Meta:
verbose_name = '联系信息'
verbose_name_plural = verbose_name
| 43.699346 | 118 | 0.691445 |
bdb82f5d854ed09a90d5bcf902db6e1e94968de6 | 7,028 | py | Python | sympy/assumptions/refine.py | wanglongqi/sympy | 66a37804f13e8510a35d958b2cfaef91cbb2fd7c | [
"BSD-3-Clause"
] | null | null | null | sympy/assumptions/refine.py | wanglongqi/sympy | 66a37804f13e8510a35d958b2cfaef91cbb2fd7c | [
"BSD-3-Clause"
] | null | null | null | sympy/assumptions/refine.py | wanglongqi/sympy | 66a37804f13e8510a35d958b2cfaef91cbb2fd7c | [
"BSD-3-Clause"
] | 1 | 2020-09-09T15:20:27.000Z | 2020-09-09T15:20:27.000Z | from __future__ import print_function, division
from sympy.core import S, Add, Expr, Basic
from sympy.assumptions import Q, ask
from sympy.core.logic import fuzzy_not
def refine(expr, assumptions=True):
"""
Simplify an expression using assumptions.
Gives the form of expr that would be obtained if symbols
in it were replaced by explicit numerical expressions satisfying
the assumptions.
Examples
========
>>> from sympy import Symbol, refine, sqrt, Q
>>> x = Symbol('x', real=True)
>>> refine(sqrt(x**2))
Abs(x)
>>> x = Symbol('x', positive=True)
>>> refine(sqrt(x**2))
x
"""
if not isinstance(expr, Basic):
return expr
if not expr.is_Atom:
args = [refine(arg, assumptions) for arg in expr.args]
# TODO: this will probably not work with Integral or Polynomial
expr = expr.func(*args)
if hasattr(expr, '_eval_refine'):
ref_expr = expr._eval_refine()
if ref_expr is not None:
return ref_expr
name = expr.__class__.__name__
handler = handlers_dict.get(name, None)
if handler is None:
return expr
new_expr = handler(expr, assumptions)
if (new_expr is None) or (expr == new_expr):
return expr
if not isinstance(new_expr, Expr):
return new_expr
return refine(new_expr, assumptions)
def refine_Pow(expr, assumptions):
"""
Handler for instances of Pow.
>>> from sympy import Symbol, Q
>>> from sympy.assumptions.refine import refine_Pow
>>> from sympy.abc import x,y,z
>>> refine_Pow((-1)**x, Q.real(x))
>>> refine_Pow((-1)**x, Q.even(x))
1
>>> refine_Pow((-1)**x, Q.odd(x))
-1
For powers of -1, even parts of the exponent can be simplified:
>>> refine_Pow((-1)**(x+y), Q.even(x))
(-1)**y
>>> refine_Pow((-1)**(x+y+z), Q.odd(x) & Q.odd(z))
(-1)**y
>>> refine_Pow((-1)**(x+y+2), Q.odd(x))
(-1)**(y + 1)
>>> refine_Pow((-1)**(x+3), True)
(-1)**(x + 1)
"""
from sympy.core import Pow, Rational
from sympy.functions.elementary.complexes import Abs
from sympy.functions import sign
if isinstance(expr.base, Abs):
if ask(Q.real(expr.base.args[0]), assumptions) and \
ask(Q.even(expr.exp), assumptions):
return expr.base.args[0] ** expr.exp
if ask(Q.real(expr.base), assumptions):
if expr.base.is_number:
if ask(Q.even(expr.exp), assumptions):
return abs(expr.base) ** expr.exp
if ask(Q.odd(expr.exp), assumptions):
return sign(expr.base) * abs(expr.base) ** expr.exp
if isinstance(expr.exp, Rational):
if type(expr.base) is Pow:
return abs(expr.base.base) ** (expr.base.exp * expr.exp)
if expr.base is S.NegativeOne:
if expr.exp.is_Add:
old = expr
# For powers of (-1) we can remove
# - even terms
# - pairs of odd terms
# - a single odd term + 1
# - A numerical constant N can be replaced with mod(N,2)
coeff, terms = expr.exp.as_coeff_add()
terms = set(terms)
even_terms = set([])
odd_terms = set([])
initial_number_of_terms = len(terms)
for t in terms:
if ask(Q.even(t), assumptions):
even_terms.add(t)
elif ask(Q.odd(t), assumptions):
odd_terms.add(t)
terms -= even_terms
if len(odd_terms) % 2:
terms -= odd_terms
new_coeff = (coeff + S.One) % 2
else:
terms -= odd_terms
new_coeff = coeff % 2
if new_coeff != coeff or len(terms) < initial_number_of_terms:
terms.add(new_coeff)
expr = expr.base**(Add(*terms))
# Handle (-1)**((-1)**n/2 + m/2)
e2 = 2*expr.exp
if ask(Q.even(e2), assumptions):
if e2.could_extract_minus_sign():
e2 *= expr.base
if e2.is_Add:
i, p = e2.as_two_terms()
if p.is_Pow and p.base is S.NegativeOne:
if ask(Q.integer(p.exp), assumptions):
i = (i + 1)/2
if ask(Q.even(i), assumptions):
return expr.base**p.exp
elif ask(Q.odd(i), assumptions):
return expr.base**(p.exp + 1)
else:
return expr.base**(p.exp + i)
if old != expr:
return expr
def refine_atan2(expr, assumptions):
"""
Handler for the atan2 function
Examples
========
>>> from sympy import Symbol, Q, refine, atan2
>>> from sympy.assumptions.refine import refine_atan2
>>> from sympy.abc import x, y
>>> refine_atan2(atan2(y,x), Q.real(y) & Q.positive(x))
atan(y/x)
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.negative(x))
atan(y/x) - pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.negative(x))
atan(y/x) + pi
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.negative(x))
pi
>>> refine_atan2(atan2(y,x), Q.positive(y) & Q.zero(x))
pi/2
>>> refine_atan2(atan2(y,x), Q.negative(y) & Q.zero(x))
-pi/2
>>> refine_atan2(atan2(y,x), Q.zero(y) & Q.zero(x))
nan
"""
from sympy.functions.elementary.trigonometric import atan
from sympy.core import S
y, x = expr.args
if ask(Q.real(y) & Q.positive(x), assumptions):
return atan(y / x)
elif ask(Q.negative(y) & Q.negative(x), assumptions):
return atan(y / x) - S.Pi
elif ask(Q.positive(y) & Q.negative(x), assumptions):
return atan(y / x) + S.Pi
elif ask(Q.zero(y) & Q.negative(x), assumptions):
return S.Pi
elif ask(Q.positive(y) & Q.zero(x), assumptions):
return S.Pi/2
elif ask(Q.negative(y) & Q.zero(x), assumptions):
return -S.Pi/2
elif ask(Q.zero(y) & Q.zero(x), assumptions):
return S.NaN
else:
return expr
def refine_Relational(expr, assumptions):
"""
Handler for Relational
>>> from sympy.assumptions.refine import refine_Relational
>>> from sympy.assumptions.ask import Q
>>> from sympy.abc import x
>>> refine_Relational(x<0, ~Q.is_true(x<0))
False
"""
return ask(Q.is_true(expr), assumptions)
handlers_dict = {
'Pow': refine_Pow,
'atan2': refine_atan2,
'Equality': refine_Relational,
'Unequality': refine_Relational,
'GreaterThan': refine_Relational,
'LessThan': refine_Relational,
'StrictGreaterThan': refine_Relational,
'StrictLessThan': refine_Relational
}
| 32.387097 | 78 | 0.537422 |
e2f3b0b16cf65ce02ea61cb9a4a1cab5853db4d7 | 1,743 | py | Python | histdiads/utils.py | anguelos/histdiads | d1ea260b4ad1bda41169e525f5ceddcebf4447bf | [
"Apache-2.0"
] | null | null | null | histdiads/utils.py | anguelos/histdiads | d1ea260b4ad1bda41169e525f5ceddcebf4447bf | [
"Apache-2.0"
] | null | null | null | histdiads/utils.py | anguelos/histdiads | d1ea260b4ad1bda41169e525f5ceddcebf4447bf | [
"Apache-2.0"
] | null | null | null | import tqdm
import os
import requests
import pathlib
import tarfile
import zipfile
def download_url(url, filename, filesize=None, resume=True):
pathlib.Path(filename).parents[0].mkdir(parents=True, exist_ok=True)
if filesize is not None and os.path.isfile(filename) and os.path.getsize(filename) == filesize:
print(f"Found {filename} cached")
return
if filesize is not None and os.path.isfile(filename) and resume:
found_bytes_count = os.path.getsize(filename)
resume_header = {"Range": f"bytes={found_bytes_count}"}
response = requests.get(url, headers=resume_header, stream=True, verify=False, allow_redirects=True)
file_mode="ab"
else:
response = requests.get(url, stream=True, verify=False, allow_redirects=True)
file_mode="wb"
total_size_in_bytes = int(response.headers.get('content-length', 0))
block_size = 1024**2 # 1 MB
progress_bar = tqdm.tqdm(total=total_size_in_bytes, unit='iB', unit_scale=True)
with open(filename, file_mode) as file:
for data in response.iter_content(block_size):
progress_bar.update(len(data))
file.write(data)
progress_bar.close()
#if total_size_in_bytes != 0 and progress_bar.n != total_size_in_bytes:
# print(f"ERROR, something went wrong downloading {url} to {filename}")
def extract(compressed_path, root):
if compressed_path.name.endswith(".tar.gz"):
my_tar = tarfile.open(compressed_path,"r:gz")
my_tar.extractall(root)
my_tar.close()
elif compressed_path.name.endswith(".zip"):
archive = zipfile.ZipFile(compressed_path)
archive.extractall(root)
archive.close()
else:
raise ValueError | 38.733333 | 109 | 0.689042 |
9ffef65e1d93ab651bcd0204dc57dd62b305a1d1 | 1,363 | py | Python | Bot/sqlite_setup.py | MaexMaex/python_discord_bot | 8bbccbcfb6b6ae3d1ad670df03ff3c41feb65a59 | [
"MIT"
] | null | null | null | Bot/sqlite_setup.py | MaexMaex/python_discord_bot | 8bbccbcfb6b6ae3d1ad670df03ff3c41feb65a59 | [
"MIT"
] | 10 | 2018-04-05T06:16:15.000Z | 2020-02-09T08:33:13.000Z | Bot/sqlite_setup.py | MaexMaex/python_discord_bot | 8bbccbcfb6b6ae3d1ad670df03ff3c41feb65a59 | [
"MIT"
] | null | null | null | import sqlite3
class DBSetup:
def __init__(self, dbname="bttn.sqlite"):
self.dbname = dbname
self.conn = sqlite3.connect(dbname)
self.c = self.conn.cursor()
def setup(self):
with self.conn:
self.c.execute("""CREATE TABLE IF NOT EXISTS users (
discord_id integer unique,
name text,
score integer,
status integer
)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS bttns (
discord_id integer,
party_name text,
timestamp real,
foreign key(discord_id) references users(discord_id)
)""")
self.c.execute(""" CREATE TABLE IF NOT EXISTS telegram_users (
telegram_id integer unique,
discord_id integer,
name text,
score integer,
status integer,
foreign key(discord_id) references users(discord_id)
)""")
self.c.execute("""CREATE TABLE IF NOT EXISTS telegram_bttns (
telegram_id integer,
party_name text,
timestamp real,
foreign key(telegram_id) references telegram_users(telegram_id)
)""")
db = DBSetup()
db.setup()
| 29.630435 | 79 | 0.512106 |
ee8dcd50659bb7bdfc42f17934983daa75b5f316 | 16,994 | py | Python | Barbican/barbican-kpt-server/barbican/plugin/crypto/p11_crypto.py | sdic-cloud-security-research/barbican-kpt-kms | a6fba7faa046e0dcfd76321e32914b80484ed829 | [
"Apache-2.0"
] | null | null | null | Barbican/barbican-kpt-server/barbican/plugin/crypto/p11_crypto.py | sdic-cloud-security-research/barbican-kpt-kms | a6fba7faa046e0dcfd76321e32914b80484ed829 | [
"Apache-2.0"
] | null | null | null | Barbican/barbican-kpt-server/barbican/plugin/crypto/p11_crypto.py | sdic-cloud-security-research/barbican-kpt-kms | a6fba7faa046e0dcfd76321e32914b80484ed829 | [
"Apache-2.0"
] | null | null | null | # Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import base64
import collections
import threading
import time
from oslo_config import cfg
from oslo_serialization import jsonutils as json
from barbican.common import config
from barbican.common import exception
from barbican.common import utils
from barbican import i18n as u
from barbican.plugin.crypto import base as plugin
from barbican.plugin.crypto import pkcs11
CONF = config.new_config()
LOG = utils.getLogger(__name__)
CachedKEK = collections.namedtuple("CachedKEK", ["kek", "expires"])
p11_crypto_plugin_group = cfg.OptGroup(name='p11_crypto_plugin',
title="PKCS11 Crypto Plugin Options")
p11_crypto_plugin_opts = [
cfg.StrOpt('library_path',
help=u._('Path to vendor PKCS11 library')),
cfg.StrOpt('login',
help=u._('Password to login to PKCS11 session'),
secret=True),
cfg.StrOpt('mkek_label',
help=u._('Master KEK label (as stored in the HSM)')),
cfg.IntOpt('mkek_length',
help=u._('Master KEK length in bytes.')),
cfg.StrOpt('hmac_label',
help=u._('Master HMAC Key label (as stored in the HSM)')),
cfg.IntOpt('slot_id',
help=u._('HSM Slot ID'),
default=1),
cfg.BoolOpt('rw_session',
help=u._('Flag for Read/Write Sessions'),
default=True),
cfg.IntOpt('pkek_length',
help=u._('Project KEK length in bytes.'),
default=32),
cfg.IntOpt('pkek_cache_ttl',
help=u._('Project KEK Cache Time To Live, in seconds'),
default=900),
cfg.IntOpt('pkek_cache_limit',
help=u._('Project KEK Cache Item Limit'),
default=100),
cfg.StrOpt('encryption_mechanism',
help=u._('Secret encryption mechanism'),
default='CKM_AES_CBC', deprecated_name='algorithm'),
cfg.StrOpt('hmac_key_type',
help=u._('HMAC Key Type'),
default='CKK_AES'),
cfg.StrOpt('hmac_keygen_mechanism',
help=u._('HMAC Key Generation Algorithm'),
default='CKM_AES_KEY_GEN'),
cfg.StrOpt('seed_file',
help=u._('File to pull entropy for seeding RNG'),
default=''),
cfg.IntOpt('seed_length',
help=u._('Amount of data to read from file for seed'),
default=32),
cfg.StrOpt('plugin_name',
help=u._('User friendly plugin name'),
default='PKCS11 HSM'),
cfg.BoolOpt('aes_gcm_generate_iv',
help=u._('Generate IVs for CKM_AES_GCM mechanism.'),
default=True, deprecated_name='generate_iv'),
]
CONF.register_group(p11_crypto_plugin_group)
CONF.register_opts(p11_crypto_plugin_opts, group=p11_crypto_plugin_group)
config.parse_args(CONF)
def list_opts():
yield p11_crypto_plugin_group, p11_crypto_plugin_opts
def json_dumps_compact(data):
return json.dumps(data, separators=(',', ':'))
class P11CryptoPlugin(plugin.CryptoPluginBase):
"""PKCS11 supporting implementation of the crypto plugin.
"""
def __init__(self, conf=CONF, ffi=None, pkcs11=None):
self.conf = conf
plugin_conf = conf.p11_crypto_plugin
if plugin_conf.library_path is None:
raise ValueError(u._("library_path is required"))
# Use specified or create new pkcs11 object
self.pkcs11 = pkcs11 or self._create_pkcs11(plugin_conf, ffi)
# Save conf arguments
self.encryption_mechanism = plugin_conf.encryption_mechanism
self.mkek_key_type = 'CKK_AES'
self.mkek_length = plugin_conf.mkek_length
self.mkek_label = plugin_conf.mkek_label
self.hmac_label = plugin_conf.hmac_label
self.hmac_key_type = plugin_conf.hmac_key_type
self.hmac_keygen_mechanism = plugin_conf.hmac_keygen_mechanism
self.pkek_length = plugin_conf.pkek_length
self.pkek_cache_ttl = plugin_conf.pkek_cache_ttl
self.pkek_cache_limit = plugin_conf.pkek_cache_limit
self._configure_object_cache()
def get_plugin_name(self):
return self.conf.p11_crypto_plugin.plugin_name
def encrypt(self, encrypt_dto, kek_meta_dto, project_id):
return self._call_pkcs11(self._encrypt, encrypt_dto, kek_meta_dto,
project_id)
def decrypt(self, decrypt_dto, kek_meta_dto, kek_meta_extended,
project_id):
return self._call_pkcs11(self._decrypt, decrypt_dto, kek_meta_dto,
kek_meta_extended, project_id)
def bind_kek_metadata(self, kek_meta_dto):
return self._call_pkcs11(self._bind_kek_metadata, kek_meta_dto)
def generate_symmetric(self, generate_dto, kek_meta_dto, project_id):
return self._call_pkcs11(self._generate_symmetric, generate_dto,
kek_meta_dto, project_id)
def generate_asymmetric(self, generate_dto, kek_meta_dto, project_id):
raise NotImplementedError(u._("Feature not implemented for PKCS11"))
def supports(self, type_enum, algorithm=None, bit_length=None, mode=None):
if type_enum == plugin.PluginSupportTypes.ENCRYPT_DECRYPT:
return True
elif type_enum == plugin.PluginSupportTypes.SYMMETRIC_KEY_GENERATION:
return True
elif type_enum == plugin.PluginSupportTypes.ASYMMETRIC_KEY_GENERATION:
return False
else:
return False
def _call_pkcs11(self, func, *args, **kwargs):
# Wrap pkcs11 calls to enable a single retry when exceptions are raised
# that can be fixed by reinitializing the pkcs11 library
try:
return func(*args, **kwargs)
except (exception.PKCS11Exception) as pe:
LOG.warning("Reinitializing PKCS#11 library: %s", pe)
self._reinitialize_pkcs11()
return func(*args, **kwargs)
def _encrypt(self, encrypt_dto, kek_meta_dto, project_id):
kek = self._load_kek_from_meta_dto(kek_meta_dto)
try:
session = self._get_session()
ct_data = self.pkcs11.encrypt(
kek, encrypt_dto.unencrypted, session
)
finally:
if 'session' in locals():
self._return_session(session)
kek_meta_extended = json_dumps_compact({
'iv': base64.b64encode(ct_data['iv']),
'mechanism': self.encryption_mechanism
})
return plugin.ResponseDTO(ct_data['ct'], kek_meta_extended)
def _decrypt(self, decrypt_dto, kek_meta_dto, kek_meta_extended,
project_id):
kek = self._load_kek_from_meta_dto(kek_meta_dto)
meta_extended = json.loads(kek_meta_extended)
iv = base64.b64decode(meta_extended['iv'])
mech = meta_extended['mechanism']
try:
session = self._get_session()
pt_data = self.pkcs11.decrypt(
mech, kek, iv, decrypt_dto.encrypted, session
)
finally:
if 'session' in locals():
self._return_session(session)
return pt_data
def _bind_kek_metadata(self, kek_meta_dto):
if not kek_meta_dto.plugin_meta:
# Generate wrapped kek and jsonify
wkek = self._generate_wrapped_kek(
self.pkek_length, kek_meta_dto.kek_label
)
# Persisted by Barbican
kek_meta_dto.plugin_meta = json_dumps_compact(wkek)
kek_meta_dto.algorithm = 'AES'
kek_meta_dto.bit_length = self.pkek_length * 8
kek_meta_dto.mode = 'CBC'
return kek_meta_dto
def _generate_symmetric(self, generate_dto, kek_meta_dto, project_id):
kek = self._load_kek_from_meta_dto(kek_meta_dto)
byte_length = int(generate_dto.bit_length) // 8
try:
session = self._get_session()
buf = self.pkcs11.generate_random(byte_length, session)
ct_data = self.pkcs11.encrypt(kek, buf, session)
finally:
if 'session' in locals():
self._return_session(session)
kek_meta_extended = json_dumps_compact(
{'iv': base64.b64encode(ct_data['iv']),
'mechanism': self.encryption_mechanism}
)
return plugin.ResponseDTO(ct_data['ct'], kek_meta_extended)
def _configure_object_cache(self):
# Master Key cache
self.mk_cache = {}
self.mk_cache_lock = threading.RLock()
# Project KEK cache
self.pkek_cache = collections.OrderedDict()
self.pkek_cache_lock = threading.RLock()
# Session for object caching
self.caching_session = self._get_session()
self.caching_session_lock = threading.RLock()
# Cache master keys
self._get_master_key(self.mkek_key_type, self.mkek_label)
self._get_master_key(self.hmac_key_type, self.hmac_label)
def _pkek_cache_add(self, kek, label):
with self.pkek_cache_lock:
if label in self.pkek_cache:
raise ValueError('{0} is already in the cache'.format(label))
now = int(time.time())
ckek = CachedKEK(kek, now + self.pkek_cache_ttl)
if len(self.pkek_cache) >= self.pkek_cache_limit:
with self.caching_session_lock:
session = self.caching_session
self._pkek_cache_expire(now, session)
# Test again if call above didn't remove any items
if len(self.pkek_cache) >= self.pkek_cache_limit:
(l, k) = self.pkek_cache.popitem(last=False)
self.pkcs11.destroy_object(k.kek, session)
self.pkek_cache[label] = ckek
def _pkek_cache_get(self, label, default=None):
kek = default
with self.pkek_cache_lock:
ckek = self.pkek_cache.get(label)
if ckek is not None:
if int(time.time()) < ckek.expires:
kek = ckek.kek
else:
with self.caching_session_lock:
self.pkcs11.destroy_object(ckek.kek,
self.caching_session)
del self.pkek_cache[label]
return kek
def _pkek_cache_expire(self, now, session):
# Look for expired items, starting from oldest
for (label, kek) in self.pkek_cache.items():
if now >= kek.expires:
self.pkcs11.destroy_object(kek.kek, session)
del self.pkek_cache[label]
else:
break
def _create_pkcs11(self, plugin_conf, ffi=None):
seed_random_buffer = None
if plugin_conf.seed_file:
with open(plugin_conf.seed_file, 'rb') as f:
seed_random_buffer = f.read(plugin_conf.seed_length)
return pkcs11.PKCS11(
library_path=plugin_conf.library_path,
login_passphrase=plugin_conf.login,
rw_session=plugin_conf.rw_session,
slot_id=plugin_conf.slot_id,
encryption_mechanism=plugin_conf.encryption_mechanism,
ffi=ffi,
seed_random_buffer=seed_random_buffer,
generate_iv=plugin_conf.aes_gcm_generate_iv,
)
def _reinitialize_pkcs11(self):
self.pkcs11.finalize()
self.pkcs11 = None
with self.caching_session_lock:
self.caching_session = None
with self.pkek_cache_lock:
self.pkek_cache.clear()
with self.mk_cache_lock:
self.mk_cache.clear()
self.pkcs11 = self._create_pkcs11(self.conf.p11_crypto_plugin)
self._configure_object_cache()
def _get_session(self):
return self.pkcs11.get_session()
def _return_session(self, session):
self.pkcs11.return_session(session)
def _get_master_key(self, key_type, label):
with self.mk_cache_lock:
session = self.caching_session
key = self.mk_cache.get(label, None)
if key is None:
with self.caching_session_lock:
key = self.pkcs11.get_key_handle(key_type, label, session)
if key is None:
raise exception.P11CryptoKeyHandleException(
u._("Could not find key labeled {0}").format(label)
)
self.mk_cache[label] = key
return key
def _load_kek_from_meta_dto(self, kek_meta_dto):
meta = json.loads(kek_meta_dto.plugin_meta)
kek = self._load_kek(
kek_meta_dto.kek_label, meta['iv'], meta['wrapped_key'],
meta['hmac'], meta['mkek_label'], meta['hmac_label']
)
return kek
def _load_kek(self, key_label, iv, wrapped_key, hmac,
mkek_label, hmac_label):
with self.pkek_cache_lock:
kek = self._pkek_cache_get(key_label)
if kek is None:
# Decode data
iv = base64.b64decode(iv)
wrapped_key = base64.b64decode(wrapped_key)
hmac = base64.b64decode(hmac)
kek_data = iv + wrapped_key
with self.caching_session_lock:
session = self.caching_session
# Get master keys
mkek = self._get_master_key(self.mkek_key_type, mkek_label)
mkhk = self._get_master_key(self.hmac_key_type, hmac_label)
# Verify HMAC
self.pkcs11.verify_hmac(mkhk, hmac, kek_data, session)
# Unwrap KEK
kek = self.pkcs11.unwrap_key(mkek, iv, wrapped_key,
session)
self._pkek_cache_add(kek, key_label)
return kek
def _generate_wrapped_kek(self, key_length, key_label):
with self.caching_session_lock:
session = self.caching_session
# Get master keys
mkek = self._get_master_key(self.mkek_key_type, self.mkek_label)
mkhk = self._get_master_key(self.hmac_key_type, self.hmac_label)
# Generate KEK
kek = self.pkcs11.generate_key(
'CKK_AES', key_length, 'CKM_AES_KEY_GEN', session, encrypt=True
)
# Wrap KEK
wkek = self.pkcs11.wrap_key(mkek, kek, session)
# HMAC Wrapped KEK
wkek_data = wkek['iv'] + wkek['wrapped_key']
wkek_hmac = self.pkcs11.compute_hmac(mkhk, wkek_data, session)
# Cache KEK
self._pkek_cache_add(kek, key_label)
return {
'iv': base64.b64encode(wkek['iv']),
'wrapped_key': base64.b64encode(wkek['wrapped_key']),
'hmac': base64.b64encode(wkek_hmac),
'mkek_label': self.mkek_label,
'hmac_label': self.hmac_label
}
def _generate_mkek(self, key_length, key_label):
with self.mk_cache_lock, self.caching_session_lock:
session = self.caching_session
if key_label in self.mk_cache or \
self.pkcs11.get_key_handle(key_label, session) is not None:
raise exception.P11CryptoPluginKeyException(
u._("A master key with that label already exists")
)
mk = self.pkcs11.generate_key(
'CKK_AES', key_length, 'CKM_AES_KEY_GEN', session,
key_label=key_label,
encrypt=True, wrap=True, master_key=True
)
self.mk_cache[key_label] = mk
return mk
def _generate_mkhk(self, key_length, key_label):
with self.mk_cache_lock, self.caching_session_lock:
session = self.caching_session
if key_label in self.mk_cache or \
self.pkcs11.get_key_handle(key_label, session, 'hmac') \
is not None:
raise exception.P11CryptoPluginKeyException(
u._("A master key with that label already exists")
)
mk = self.pkcs11.generate_key(
self.hmac_key_type, key_length, self.hmac_keygen_mechanism,
session, key_label, sign=True, master_key=True
)
self.mk_cache[key_label] = mk
return mk
| 38.535147 | 79 | 0.611157 |
d6f76d08aed7db93ec7b993ebf65c84f93bca2c2 | 37,528 | py | Python | core/src/autogluon/core/models/ensemble/bagged_ensemble_model.py | songqiang/autogluon | 529d7cc65fad411622072aa0349215a15e1e901c | [
"Apache-2.0"
] | 1 | 2021-03-25T09:35:02.000Z | 2021-03-25T09:35:02.000Z | core/src/autogluon/core/models/ensemble/bagged_ensemble_model.py | songqiang/autogluon | 529d7cc65fad411622072aa0349215a15e1e901c | [
"Apache-2.0"
] | null | null | null | core/src/autogluon/core/models/ensemble/bagged_ensemble_model.py | songqiang/autogluon | 529d7cc65fad411622072aa0349215a15e1e901c | [
"Apache-2.0"
] | null | null | null | import copy
import logging
import os
import time
from collections import Counter
from statistics import mean
import numpy as np
import pandas as pd
from .fold_fitting_strategy import AbstractFoldFittingStrategy, SequentialLocalFoldFittingStrategy
from ..abstract.abstract_model import AbstractModel
from ...constants import MULTICLASS, REGRESSION, SOFTCLASS, QUANTILE, REFIT_FULL_SUFFIX
from ...utils.exceptions import TimeLimitExceeded
from ...utils.loaders import load_pkl
from ...utils.savers import save_pkl
from ...utils.utils import generate_kfold, _compute_fi_with_stddev
logger = logging.getLogger(__name__)
# TODO: Add metadata object with info like score on each model, train time on each model, etc.
class BaggedEnsembleModel(AbstractModel):
"""
Bagged ensemble meta-model which fits a given model multiple times across different splits of the training data.
For certain child models such as KNN, this may only train a single model and instead rely on the child model to generate out-of-fold predictions.
"""
_oof_filename = 'oof.pkl'
def __init__(self, model_base: AbstractModel, random_state=0, **kwargs):
self.model_base = model_base
self._child_type = type(self.model_base)
self.models = []
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
self._n_repeats = 0 # Number of n_repeats with at least 1 model fit, if kfold=5 and 8 models have been fit, _n_repeats is 2
self._n_repeats_finished = 0 # Number of n_repeats finished, if kfold=5 and 8 models have been fit, _n_repeats_finished is 1
self._k_fold_end = 0 # Number of models fit in current n_repeat (0 if completed), if kfold=5 and 8 models have been fit, _k_fold_end is 3
self._k = None # k models per n_repeat, equivalent to kfold value
self._k_per_n_repeat = [] # k-fold used for each n_repeat. == [5, 10, 3] if first kfold was 5, second was 10, and third was 3
self._random_state = random_state
self.low_memory = True
self._bagged_mode = None
# _child_oof currently is only set to True for KNN models, that are capable of LOO prediction generation to avoid needing bagging.
# TODO: Consider moving `_child_oof` logic to a separate class / refactor OOF logic.
# FIXME: Avoid unnecessary refit during refit_full on `_child_oof=True` models, just re-use the original model.
self._child_oof = False # Whether the OOF preds were taken from a single child model (Assumes child can produce OOF preds without bagging).
eval_metric = kwargs.pop('eval_metric', self.model_base.eval_metric)
stopping_metric = kwargs.pop('stopping_metric', self.model_base.stopping_metric) # FIXME: Has to be moved to post-model_base initialization, otherwise could be misaligned.
super().__init__(problem_type=self.model_base.problem_type, eval_metric=eval_metric, stopping_metric=stopping_metric, **kwargs)
def _set_default_params(self):
default_params = {
# 'use_child_oof': False, # [Advanced] Whether to defer to child model for OOF preds and only train a single child.
'save_bag_folds': True,
# 'refit_folds': False, # [Advanced, Experimental] Whether to refit bags immediately to a refit_full model in a single .fit call.
}
for param, val in default_params.items():
self._set_default_param_value(param, val)
super()._set_default_params()
def _get_default_auxiliary_params(self) -> dict:
default_auxiliary_params = super()._get_default_auxiliary_params()
extra_auxiliary_params = dict(
drop_unique=False, # TODO: Get the value from child instead
)
default_auxiliary_params.update(extra_auxiliary_params)
return default_auxiliary_params
def is_valid(self):
return self.is_fit() and (self._n_repeats == self._n_repeats_finished)
def can_infer(self):
return self.is_fit() and self.params.get('save_bag_folds', True)
def is_stratified(self):
if self.problem_type in [REGRESSION, QUANTILE, SOFTCLASS]:
return False
else:
return True
def is_fit(self):
return len(self.models) != 0
def can_fit(self) -> bool:
return not self.is_fit() or self._bagged_mode
def is_valid_oof(self):
return self.is_fit() and (self._child_oof or self._bagged_mode)
def get_oof_pred_proba(self, **kwargs):
# TODO: Require is_valid == True (add option param to ignore is_valid)
return self._oof_pred_proba_func(self._oof_pred_proba, self._oof_pred_model_repeats)
@staticmethod
def _oof_pred_proba_func(oof_pred_proba, oof_pred_model_repeats):
oof_pred_model_repeats_without_0 = np.where(oof_pred_model_repeats == 0, 1, oof_pred_model_repeats)
if oof_pred_proba.ndim == 2:
oof_pred_model_repeats_without_0 = oof_pred_model_repeats_without_0[:, None]
return oof_pred_proba / oof_pred_model_repeats_without_0
def preprocess(self, X, preprocess_nonadaptive=True, model=None, **kwargs):
if preprocess_nonadaptive:
if model is None:
if not self.models:
return X
model = self.models[0]
model = self.load_child(model)
return model.preprocess(X, preprocess_stateful=False)
else:
return X
def _fit(self,
X,
y,
X_val=None,
y_val=None,
k_fold=5,
k_fold_start=0,
k_fold_end=None,
n_repeats=1,
n_repeat_start=0,
**kwargs):
use_child_oof = self.params.get('use_child_oof', False)
if use_child_oof:
if self.is_fit():
# TODO: We may want to throw an exception instead and avoid calling fit more than once
return self
k_fold = 1
k_fold_end = None
if k_fold < 1:
k_fold = 1
if k_fold_end is None:
k_fold_end = k_fold
if self._oof_pred_proba is None and (k_fold_start != 0 or n_repeat_start != 0):
self._load_oof()
if n_repeat_start != self._n_repeats_finished:
raise ValueError(f'n_repeat_start must equal self._n_repeats_finished, values: ({n_repeat_start}, {self._n_repeats_finished})')
if n_repeats <= n_repeat_start:
raise ValueError(f'n_repeats must be greater than n_repeat_start, values: ({n_repeats}, {n_repeat_start})')
if k_fold_start != self._k_fold_end:
raise ValueError(f'k_fold_start must equal previous k_fold_end, values: ({k_fold_start}, {self._k_fold_end})')
if k_fold_start >= k_fold_end:
# TODO: Remove this limitation if n_repeats > 1
raise ValueError(f'k_fold_end must be greater than k_fold_start, values: ({k_fold_end}, {k_fold_start})')
if (n_repeats - n_repeat_start) > 1 and k_fold_end != k_fold:
# TODO: Remove this limitation
raise ValueError(f'k_fold_end must equal k_fold when (n_repeats - n_repeat_start) > 1, values: ({k_fold_end}, {k_fold})')
if self._k is not None and self._k != k_fold:
raise ValueError(f'k_fold must equal previously fit k_fold value for the current n_repeat, values: (({k_fold}, {self._k})')
model_base = self._get_model_base()
model_base.rename(name='')
kwargs['feature_metadata'] = self.feature_metadata
kwargs['num_classes'] = self.num_classes # TODO: maybe don't pass num_classes to children
if self.model_base is not None:
self.save_model_base(self.model_base)
self.model_base = None
save_bag_folds = self.params.get('save_bag_folds', True)
if k_fold == 1:
self._fit_single(X=X, y=y, model_base=model_base, use_child_oof=use_child_oof, **kwargs)
return self
else:
refit_folds = self.params.get('refit_folds', False)
if refit_folds:
save_bag_folds = False
if kwargs.get('time_limit', None) is not None:
fold_start = n_repeat_start * k_fold + k_fold_start
fold_end = (n_repeats - 1) * k_fold + k_fold_end
folds_to_fit = fold_end - fold_start
# Reserve time for final refit model
kwargs['time_limit'] = kwargs['time_limit'] * folds_to_fit / (folds_to_fit + 1.2)
self._fit_folds(X=X, y=y, model_base=model_base, k_fold=k_fold, k_fold_start=k_fold_start, k_fold_end=k_fold_end,
n_repeats=n_repeats, n_repeat_start=n_repeat_start, save_folds=save_bag_folds, **kwargs)
# FIXME: Don't save folds except for refit
# FIXME: Cleanup self
# FIXME: Don't add `_FULL` to name
if refit_folds:
refit_template = self.convert_to_refit_full_template()
refit_template.params['use_child_oof'] = False
kwargs['time_limit'] = None
refit_template.fit(X=X, y=y, k_fold=1, **kwargs)
refit_template._oof_pred_proba = self._oof_pred_proba
refit_template._oof_pred_model_repeats = self._oof_pred_model_repeats
refit_template._child_oof = True
refit_template.fit_time += self.fit_time + self.predict_time
return refit_template
else:
return self
def predict_proba(self, X, normalize=None, **kwargs):
model = self.load_child(self.models[0])
X = self.preprocess(X, model=model, **kwargs)
pred_proba = model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)
for model in self.models[1:]:
model = self.load_child(model)
pred_proba += model.predict_proba(X=X, preprocess_nonadaptive=False, normalize=normalize)
pred_proba = pred_proba / len(self.models)
return pred_proba
def _predict_proba(self, X, normalize=False, **kwargs):
return self.predict_proba(X=X, normalize=normalize, **kwargs)
def score_with_oof(self, y, sample_weight=None):
self._load_oof()
valid_indices = self._oof_pred_model_repeats > 0
y = y[valid_indices]
y_pred_proba = self.get_oof_pred_proba()[valid_indices]
if sample_weight is not None:
sample_weight = sample_weight[valid_indices]
return self.score_with_y_pred_proba(y=y, y_pred_proba=y_pred_proba, sample_weight=sample_weight)
def _fit_single(self, X, y, model_base, use_child_oof, time_limit=None, **kwargs):
if self.is_fit():
raise AssertionError('Model is already fit.')
if self._n_repeats != 0:
raise ValueError(f'n_repeats must equal 0 when fitting a single model with k_fold == 1, value: {self._n_repeats}')
model_base.name = f'{model_base.name}S1F1'
model_base.set_contexts(path_context=self.path + model_base.name + os.path.sep)
time_start_fit = time.time()
model_base.fit(X=X, y=y, time_limit=time_limit, **kwargs)
model_base.fit_time = time.time() - time_start_fit
model_base.predict_time = None
X_len = len(X)
# Check if pred_proba is going to take too long
if time_limit is not None and X_len >= 10000:
max_allowed_time = time_limit * 1.3 # allow some buffer
time_left = max(
max_allowed_time - model_base.fit_time,
time_limit * 0.1, # At least 10% of time_limit
10, # At least 10 seconds
)
# Sample at most 500 rows to estimate prediction time of all rows
# TODO: Consider moving this into end of abstract model fit for all models.
# Currently this only fixes problem when in bagged mode, if not bagging, then inference could still be problamatic
n_sample = min(500, round(X_len * 0.1))
frac = n_sample / X_len
X_sample = X.sample(n=n_sample)
time_start_predict = time.time()
model_base.predict_proba(X_sample)
time_predict_frac = time.time() - time_start_predict
time_predict_estimate = time_predict_frac / frac
logger.log(15, f'\t{round(time_predict_estimate, 2)}s\t= Estimated out-of-fold prediction time...')
if time_predict_estimate > time_left:
logger.warning(f'\tNot enough time to generate out-of-fold predictions for model. Estimated time required was {round(time_predict_estimate, 2)}s compared to {round(time_left, 2)}s of available time.')
raise TimeLimitExceeded
if use_child_oof:
logger.log(15, '\t`use_child_oof` was specified for this model. It will function similarly to a bagged model, but will only fit one child model.')
time_start_predict = time.time()
if model_base._get_tags().get('valid_oof', False):
self._oof_pred_proba = model_base.get_oof_pred_proba(X=X, y=y)
else:
logger.warning('\tWARNING: `use_child_oof` was specified but child model does not have a dedicated `get_oof_pred_proba` method. This model may have heavily overfit validation scores.')
self._oof_pred_proba = model_base.predict_proba(X=X)
self._child_oof = True
model_base.predict_time = time.time() - time_start_predict
model_base.val_score = model_base.score_with_y_pred_proba(y=y, y_pred_proba=self._oof_pred_proba)
else:
self._oof_pred_proba = model_base.predict_proba(X=X) # TODO: Cheater value, will be overfit to valid set
self._oof_pred_model_repeats = np.ones(shape=len(X), dtype=np.uint8)
self._n_repeats = 1
self._n_repeats_finished = 1
self._k_per_n_repeat = [1]
self._bagged_mode = False
model_base.reduce_memory_size(remove_fit=True, remove_info=False, requires_save=True)
if not self.params.get('save_bag_folds', True):
model_base.model = None
if self.low_memory:
self.save_child(model_base, verbose=False)
self.models = [model_base.name]
else:
self.models = [model_base]
self._add_child_times_to_bag(model=model_base)
def _fit_folds(self,
X,
y,
model_base,
k_fold=5,
k_fold_start=0,
k_fold_end=None,
n_repeats=1,
n_repeat_start=0,
time_limit=None,
sample_weight=None,
save_folds=True,
**kwargs):
fold_fitting_strategy = self.params.get('fold_fitting_strategy', SequentialLocalFoldFittingStrategy)
# TODO: Preprocess data here instead of repeatedly
time_start = time.time()
kfolds = generate_kfold(X=X, y=y, n_splits=k_fold, stratified=self.is_stratified(), random_state=self._random_state, n_repeats=n_repeats)
oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X, y=y)
models = []
fold_start = n_repeat_start * k_fold + k_fold_start
fold_end = (n_repeats - 1) * k_fold + k_fold_end
folds_to_fit = fold_end - fold_start
# noinspection PyCallingNonCallable
fold_fitting_strategy: AbstractFoldFittingStrategy = fold_fitting_strategy(
self, X, y, sample_weight, time_limit, time_start, models, oof_pred_proba, oof_pred_model_repeats, save_folds=save_folds)
for j in range(n_repeat_start, n_repeats): # For each n_repeat
cur_repeat_count = j - n_repeat_start
fold_start_n_repeat = fold_start + cur_repeat_count * k_fold
fold_end_n_repeat = min(fold_start_n_repeat + k_fold, fold_end)
for i in range(fold_start_n_repeat, fold_end_n_repeat): # For each fold
fold_num_in_repeat = i - (j * k_fold) # The fold in the current repeat set (first fold in set = 0)
fold_ctx = dict(
model_name_suffix=f'S{j + 1}F{fold_num_in_repeat + 1}', # S5F3 = 3rd fold of the 5th repeat set
fold=kfolds[i],
is_last_fold=i != (fold_end - 1),
folds_to_fit=folds_to_fit,
folds_finished=i - fold_start,
folds_left=fold_end - i,
)
fold_fitting_strategy.schedule_fold_model_fit(model_base, fold_ctx, kwargs)
if (fold_end_n_repeat != fold_end) or (k_fold == k_fold_end):
self._k_per_n_repeat.append(k_fold)
fold_fitting_strategy.after_all_folds_scheduled()
self.models += models
self._bagged_mode = True
if self._oof_pred_proba is None:
self._oof_pred_proba = oof_pred_proba
self._oof_pred_model_repeats = oof_pred_model_repeats
else:
self._oof_pred_proba += oof_pred_proba
self._oof_pred_model_repeats += oof_pred_model_repeats
self._n_repeats = n_repeats
if k_fold == k_fold_end:
self._k = None
self._k_fold_end = 0
self._n_repeats_finished = self._n_repeats
else:
self._k = k_fold
self._k_fold_end = k_fold_end
self._n_repeats_finished = self._n_repeats - 1
# TODO: Augment to generate OOF after shuffling each column in X (Batching), this is the fastest way.
# TODO: Reduce logging clutter during OOF importance calculation (Currently logs separately for each child)
# Generates OOF predictions from pre-trained bagged models, assuming X and y are in the same row order as used in .fit(X, y)
def compute_feature_importance(self,
X,
y,
features=None,
silent=False,
time_limit=None,
is_oof=False,
**kwargs) -> pd.DataFrame:
if features is None:
# FIXME: use FULL features (children can have different features)
features = self.load_child(model=self.models[0]).features
if not is_oof:
return super().compute_feature_importance(X, y, features=features, time_limit=time_limit, silent=silent, **kwargs)
fi_fold_list = []
model_index = 0
num_children = len(self.models)
if time_limit is not None:
time_limit_per_child = time_limit / num_children
else:
time_limit_per_child = None
if not silent:
logging_message = f'Computing feature importance via permutation shuffling for {len(features)} features using out-of-fold (OOF) data aggregated across {num_children} child models...'
if time_limit is not None:
logging_message = f'{logging_message} Time limit: {time_limit}s...'
logger.log(20, logging_message)
time_start = time.time()
early_stop = False
children_completed = 0
log_final_suffix = ''
for n_repeat, k in enumerate(self._k_per_n_repeat):
if is_oof:
if self._child_oof or not self._bagged_mode:
raise AssertionError('Model trained with no validation data cannot get feature importances on training data, please specify new test data to compute feature importances (model=%s)' % self.name)
kfolds = generate_kfold(X=X, y=y, n_splits=k, stratified=self.is_stratified(), random_state=self._random_state, n_repeats=n_repeat + 1)
cur_kfolds = kfolds[n_repeat * k:(n_repeat + 1) * k]
else:
cur_kfolds = [(None, list(range(len(X))))] * k
for i, fold in enumerate(cur_kfolds):
_, test_index = fold
model = self.load_child(self.models[model_index + i])
fi_fold = model.compute_feature_importance(X=X.iloc[test_index, :], y=y.iloc[test_index], features=features, time_limit=time_limit_per_child,
silent=silent, log_prefix='\t', importance_as_list=True, **kwargs)
fi_fold_list.append(fi_fold)
children_completed += 1
if time_limit is not None and children_completed != num_children:
time_now = time.time()
time_left = time_limit - (time_now - time_start)
time_child_average = (time_now - time_start) / children_completed
if time_left < (time_child_average * 1.1):
log_final_suffix = f' (Early stopping due to lack of time...)'
early_stop = True
break
if early_stop:
break
model_index += k
# TODO: DON'T THROW AWAY SAMPLES! USE LARGER N
fi_list_dict = dict()
for val in fi_fold_list:
val = val['importance'].to_dict() # TODO: Don't throw away stddev information of children
for key in val:
if key not in fi_list_dict:
fi_list_dict[key] = []
fi_list_dict[key] += val[key]
fi_df = _compute_fi_with_stddev(fi_list_dict)
if not silent:
logger.log(20, f'\t{round(time.time() - time_start, 2)}s\t= Actual runtime (Completed {children_completed} of {num_children} children){log_final_suffix}')
return fi_df
def load_child(self, model, verbose=False) -> AbstractModel:
if isinstance(model, str):
child_path = self.create_contexts(self.path + model + os.path.sep)
return self._child_type.load(path=child_path, verbose=verbose)
else:
return model
def save_child(self, model, verbose=False):
child = self.load_child(model)
child.set_contexts(self.path + child.name + os.path.sep)
child.save(verbose=verbose)
# TODO: Multiply epochs/n_iterations by some value (such as 1.1) to account for having more training data than bagged models
def convert_to_refit_full_template(self):
init_args = self._get_init_args()
init_args['hyperparameters']['save_bag_folds'] = True # refit full models must save folds
init_args['model_base'] = self.convert_to_refitfull_template_child()
init_args['name'] = init_args['name'] + REFIT_FULL_SUFFIX
model_full_template = self.__class__(**init_args)
return model_full_template
def convert_to_refitfull_template_child(self):
compressed_params = self._get_compressed_params()
child_compressed = copy.deepcopy(self._get_model_base())
child_compressed.feature_metadata = self.feature_metadata # TODO: Don't pass this here
child_compressed.params = compressed_params
return child_compressed
def _get_init_args(self):
init_args = dict(
model_base=self._get_model_base(),
random_state=self._random_state,
)
init_args.update(super()._get_init_args())
init_args.pop('problem_type')
return init_args
def _get_compressed_params(self, model_params_list=None):
if model_params_list is None:
model_params_list = [
self.load_child(child).get_trained_params()
for child in self.models
]
model_params_compressed = dict()
for param in model_params_list[0].keys():
model_param_vals = [model_params[param] for model_params in model_params_list]
if all(isinstance(val, bool) for val in model_param_vals):
counter = Counter(model_param_vals)
compressed_val = counter.most_common(1)[0][0]
elif all(isinstance(val, int) for val in model_param_vals):
compressed_val = round(mean(model_param_vals))
elif all(isinstance(val, float) for val in model_param_vals):
compressed_val = mean(model_param_vals)
else:
try:
counter = Counter(model_param_vals)
compressed_val = counter.most_common(1)[0][0]
except TypeError:
compressed_val = model_param_vals[0]
model_params_compressed[param] = compressed_val
return model_params_compressed
def _get_compressed_params_trained(self):
model_params_list = [
self.load_child(child).params_trained
for child in self.models
]
return self._get_compressed_params(model_params_list=model_params_list)
def _get_model_base(self):
if self.model_base is None:
return self.load_model_base()
else:
return self.model_base
def _add_child_times_to_bag(self, model):
if self.fit_time is None:
self.fit_time = model.fit_time
else:
self.fit_time += model.fit_time
if self.predict_time is None:
self.predict_time = model.predict_time
else:
self.predict_time += model.predict_time
@classmethod
def load(cls, path: str, reset_paths=True, low_memory=True, load_oof=False, verbose=True):
model = super().load(path=path, reset_paths=reset_paths, verbose=verbose)
if not low_memory:
model.persist_child_models(reset_paths=reset_paths)
if load_oof:
model._load_oof()
return model
@classmethod
def load_oof(cls, path, verbose=True):
try:
oof = load_pkl.load(path=path + 'utils' + os.path.sep + cls._oof_filename, verbose=verbose)
oof_pred_proba = oof['_oof_pred_proba']
oof_pred_model_repeats = oof['_oof_pred_model_repeats']
except FileNotFoundError:
model = cls.load(path=path, reset_paths=True, verbose=verbose)
model._load_oof()
oof_pred_proba = model._oof_pred_proba
oof_pred_model_repeats = model._oof_pred_model_repeats
return cls._oof_pred_proba_func(oof_pred_proba=oof_pred_proba, oof_pred_model_repeats=oof_pred_model_repeats)
def _load_oof(self):
if self._oof_pred_proba is not None:
pass
else:
oof = load_pkl.load(path=self.path + 'utils' + os.path.sep + self._oof_filename)
self._oof_pred_proba = oof['_oof_pred_proba']
self._oof_pred_model_repeats = oof['_oof_pred_model_repeats']
def persist_child_models(self, reset_paths=True):
for i, model_name in enumerate(self.models):
if isinstance(model_name, str):
child_path = self.create_contexts(self.path + model_name + os.path.sep)
child_model = self._child_type.load(path=child_path, reset_paths=reset_paths, verbose=True)
self.models[i] = child_model
def load_model_base(self):
return load_pkl.load(path=self.path + 'utils' + os.path.sep + 'model_template.pkl')
def save_model_base(self, model_base):
save_pkl.save(path=self.path + 'utils' + os.path.sep + 'model_template.pkl', object=model_base)
def save(self, path=None, verbose=True, save_oof=True, save_children=False) -> str:
if path is None:
path = self.path
if save_children:
model_names = []
for child in self.models:
child = self.load_child(child)
child.set_contexts(path + child.name + os.path.sep)
child.save(verbose=False)
model_names.append(child.name)
self.models = model_names
if save_oof and self._oof_pred_proba is not None:
save_pkl.save(path=path + 'utils' + os.path.sep + self._oof_filename, object={
'_oof_pred_proba': self._oof_pred_proba,
'_oof_pred_model_repeats': self._oof_pred_model_repeats,
})
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
return super().save(path=path, verbose=verbose)
# If `remove_fit_stack=True`, variables will be removed that are required to fit more folds and to fit new stacker models which use this model as a base model.
# This includes OOF variables.
def reduce_memory_size(self, remove_fit_stack=False, remove_fit=True, remove_info=False, requires_save=True, reduce_children=False, **kwargs):
super().reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)
if remove_fit_stack:
try:
os.remove(self.path + 'utils' + os.path.sep + self._oof_filename)
except FileNotFoundError:
pass
if requires_save:
self._oof_pred_proba = None
self._oof_pred_model_repeats = None
try:
os.remove(self.path + 'utils' + os.path.sep + 'model_template.pkl')
except FileNotFoundError:
pass
if requires_save:
self.model_base = None
try:
os.rmdir(self.path + 'utils')
except OSError:
pass
if reduce_children:
for model in self.models:
model = self.load_child(model)
model.reduce_memory_size(remove_fit=remove_fit, remove_info=remove_info, requires_save=requires_save, **kwargs)
if requires_save and self.low_memory:
self.save_child(model=model)
def _get_model_names(self):
model_names = []
for model in self.models:
if isinstance(model, str):
model_names.append(model)
else:
model_names.append(model.name)
return model_names
def get_info(self):
info = super().get_info()
children_info = self._get_child_info()
child_memory_sizes = [child['memory_size'] for child in children_info.values()]
sum_memory_size_child = sum(child_memory_sizes)
if child_memory_sizes:
max_memory_size_child = max(child_memory_sizes)
else:
max_memory_size_child = 0
if self.low_memory:
max_memory_size = info['memory_size'] + sum_memory_size_child
min_memory_size = info['memory_size'] + max_memory_size_child
else:
max_memory_size = info['memory_size']
min_memory_size = info['memory_size'] - sum_memory_size_child + max_memory_size_child
# Necessary if save_space is used as save_space deletes model_base.
if len(self.models) > 0:
child_model = self.load_child(self.models[0])
else:
child_model = self._get_model_base()
child_hyperparameters = child_model.params
child_ag_args_fit = child_model.params_aux
bagged_info = dict(
child_model_type=self._child_type.__name__,
num_child_models=len(self.models),
child_model_names=self._get_model_names(),
_n_repeats=self._n_repeats,
# _n_repeats_finished=self._n_repeats_finished, # commented out because these are too technical
# _k_fold_end=self._k_fold_end,
# _k=self._k,
_k_per_n_repeat=self._k_per_n_repeat,
_random_state=self._random_state,
low_memory=self.low_memory, # If True, then model will attempt to use at most min_memory_size memory by having at most one child in memory. If False, model will use max_memory_size memory.
bagged_mode=self._bagged_mode,
max_memory_size=max_memory_size, # Memory used when all children are loaded into memory at once.
min_memory_size=min_memory_size, # Memory used when only the largest child is loaded into memory.
child_hyperparameters=child_hyperparameters,
child_hyperparameters_fit=self._get_compressed_params_trained(),
child_ag_args_fit=child_ag_args_fit,
)
info['bagged_info'] = bagged_info
info['children_info'] = children_info
child_features_full = list(set().union(*[child['features'] for child in children_info.values()]))
info['features'] = child_features_full
info['num_features'] = len(child_features_full)
return info
def get_memory_size(self):
models = self.models
self.models = None
memory_size = super().get_memory_size()
self.models = models
return memory_size
def _get_child_info(self):
child_info_dict = dict()
for model in self.models:
if isinstance(model, str):
child_path = self.create_contexts(self.path + model + os.path.sep)
child_info_dict[model] = self._child_type.load_info(child_path)
else:
child_info_dict[model.name] = model.get_info()
return child_info_dict
def _construct_empty_oof(self, X, y):
if self.problem_type == MULTICLASS:
oof_pred_proba = np.zeros(shape=(len(X), len(y.unique())), dtype=np.float32)
elif self.problem_type == SOFTCLASS:
oof_pred_proba = np.zeros(shape=y.shape, dtype=np.float32)
elif self.problem_type == QUANTILE:
oof_pred_proba = np.zeros(shape=(len(X), len(self.quantile_levels)), dtype=np.float32)
else:
oof_pred_proba = np.zeros(shape=len(X), dtype=np.float32)
oof_pred_model_repeats = np.zeros(shape=len(X), dtype=np.uint8)
return oof_pred_proba, oof_pred_model_repeats
def _preprocess_fit_resources(self, silent=False, **kwargs):
"""Pass along to child models to avoid altering up-front"""
return kwargs
# TODO: Currently double disk usage, saving model in HPO and also saving model in bag
def _hyperparameter_tune(self, X, y, k_fold, scheduler_options, preprocess_kwargs=None, **kwargs):
if len(self.models) != 0:
raise ValueError('self.models must be empty to call hyperparameter_tune, value: %s' % self.models)
kwargs['feature_metadata'] = self.feature_metadata
kwargs['num_classes'] = self.num_classes # TODO: maybe don't pass num_classes to children
self.model_base.set_contexts(self.path + 'hpo' + os.path.sep)
# TODO: Preprocess data here instead of repeatedly
if preprocess_kwargs is None:
preprocess_kwargs = dict()
X = self.preprocess(X=X, preprocess=False, fit=True, **preprocess_kwargs)
kfolds = generate_kfold(X=X, y=y, n_splits=k_fold, stratified=self.is_stratified(), random_state=self._random_state, n_repeats=1)
train_index, test_index = kfolds[0]
X_fold, X_val_fold = X.iloc[train_index, :], X.iloc[test_index, :]
y_fold, y_val_fold = y.iloc[train_index], y.iloc[test_index]
orig_time = scheduler_options[1]['time_out']
if orig_time:
scheduler_options[1]['time_out'] = orig_time * 0.8 # TODO: Scheduler doesn't early stop on final model, this is a safety net. Scheduler should be updated to early stop
hpo_models, hpo_model_performances, hpo_results = self.model_base.hyperparameter_tune(X=X_fold, y=y_fold, X_val=X_val_fold, y_val=y_val_fold, scheduler_options=scheduler_options, **kwargs)
scheduler_options[1]['time_out'] = orig_time
bags = {}
bags_performance = {}
for i, (model_name, model_path) in enumerate(hpo_models.items()):
child: AbstractModel = self._child_type.load(path=model_path)
y_pred_proba = child.predict_proba(X_val_fold)
# TODO: Create new Ensemble Here
bag = copy.deepcopy(self)
bag.rename(f"{bag.name}{os.path.sep}T{i}")
bag.set_contexts(self.path_root + bag.name + os.path.sep)
oof_pred_proba, oof_pred_model_repeats = self._construct_empty_oof(X=X, y=y)
oof_pred_proba[test_index] += y_pred_proba
oof_pred_model_repeats[test_index] += 1
bag.model_base = None
child.rename('')
child.set_contexts(bag.path + child.name + os.path.sep)
bag.save_model_base(child.convert_to_template())
bag._k = k_fold
bag._k_fold_end = 1
bag._n_repeats = 1
bag._oof_pred_proba = oof_pred_proba
bag._oof_pred_model_repeats = oof_pred_model_repeats
child.rename('S1F1')
child.set_contexts(bag.path + child.name + os.path.sep)
if not self.params.get('save_bag_folds', True):
child.model = None
if bag.low_memory:
bag.save_child(child, verbose=False)
bag.models.append(child.name)
else:
bag.models.append(child)
bag.val_score = child.val_score
bag._add_child_times_to_bag(model=child)
bag.save()
bags[bag.name] = bag.path
bags_performance[bag.name] = bag.val_score
# TODO: hpo_results likely not correct because no renames
return bags, bags_performance, hpo_results
def _more_tags(self):
return {'valid_oof': True}
| 48.485788 | 216 | 0.637444 |
a4d4dd947486de0ce10c8fe0a1ece75db9081f7d | 8,723 | py | Python | rdr_service/alembic/versions/a43f72b7c848_update_code_procs.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 39 | 2017-10-13T19:16:27.000Z | 2021-09-24T16:58:21.000Z | rdr_service/alembic/versions/a43f72b7c848_update_code_procs.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 312 | 2017-09-08T15:42:13.000Z | 2022-03-23T18:21:40.000Z | rdr_service/alembic/versions/a43f72b7c848_update_code_procs.py | all-of-us/raw-data-repository | d28ad957557587b03ff9c63d55dd55e0508f91d8 | [
"BSD-3-Clause"
] | 19 | 2017-09-15T13:58:00.000Z | 2022-02-07T18:33:20.000Z | """update code procs
Revision ID: a43f72b7c848
Revises: 3adfe155c68b
Create Date: 2019-04-15 14:55:40.416929
"""
from alembic import op
from rdr_service.dao.alembic_utils import ReplaceableObject
# revision identifiers, used by Alembic.
revision = "a43f72b7c848"
down_revision = "3adfe155c68b"
branch_labels = None
depends_on = None
sp_get_code_module_items = ReplaceableObject(
"sp_get_code_module_items",
"""
(IN module VARCHAR(80))
BEGIN
# Return all of the codebook items (topics, questions, answers) related to the passed
# module name.
SELECT @code_id := code_id FROM code WHERE `value` = module and parent_id is NULL;
SELECT a.code_id, a.parent_id, a.topic, a.code_type, a.`value`, a.display, a.`system`, a.mapped, a.created, a.code_book_id, a.short_value
FROM (
SELECT t1.*, '0' AS sort_id
FROM code t1
WHERE t1.code_id = @code_id
UNION ALL
SELECT t2.*, CONCAT(LPAD(t2.code_id, 8, '0'), t2.value) AS sort_id
FROM code t1
INNER JOIN code t2 on t2.parent_id = t1.code_id
WHERE t1.code_id = @code_id
UNION ALL
SELECT t3.*, CONCAT(LPAD(t2.code_id, 8, '0'), t2.value, LPAD(t3.code_id, 8, '0')) AS sort_id
FROM code t1
INNER JOIN code t2 on t2.parent_id = t1.code_id
INNER JOIN code t3 on t3.parent_id = t2.code_id
WHERE t1.code_id = @code_id
UNION ALL
SELECT t4.*, CONCAT(LPAD(t2.code_id, 8, '0'), t2.value, LPAD(t3.code_id, 8, '0'), t3.value)
FROM code t1
INNER JOIN code t2 on t2.parent_id = t1.code_id
INNER JOIN code t3 on t3.parent_id = t2.code_id
INNER JOIN code t4 on t4.parent_id = t3.code_id
WHERE t1.code_id = @code_id
) a
ORDER BY a.sort_id, a.code_id;
END
""",
)
sp_get_questionnaire_answers = ReplaceableObject(
"sp_get_questionnaire_answers",
"""
(IN module VARCHAR(80), IN participant_id INT)
BEGIN
# Dynamically pivot the questionnaire answers for the given participant and module.
# Results are ordered by 'created' descending.
DECLARE CONTINUE HANDLER FOR 1064 SET @sql = NULL;
DECLARE CONTINUE HANDLER FOR 1243 SELECT 1 AS 'invalid_code_id' FROM dual WHERE FALSE;
SET @sql = '';
SELECT @module := COALESCE(c.value, 0), @code_id := COALESCE(c.code_id, 0)
FROM code c
WHERE c.value = module;
SELECT @sql := CONCAT(@sql, IF(@sql = '', '', ', '), temp.output)
FROM (
SELECT DISTINCT CONCAT('GROUP_CONCAT(IF(code_id = ', code_id, ', answer, NULL) SEPARATOR ",") AS ',
`value`) as output
FROM (
SELECT a.*
FROM (
SELECT t1.code_id, t1.value, t1.display, t1.code_type, t1.parent_id, '0' AS sort_id
FROM code t1
WHERE t1.code_id = @code_id
UNION ALL
SELECT t2.code_id, t2.value, t2.display, t2.code_type, t2.parent_id, CONCAT(LPAD(t2.code_id, 8, '0'), t2.value) AS sort_id
FROM code t1
INNER JOIN code t2 on t2.parent_id = t1.code_id
WHERE t1.code_id = @code_id
UNION ALL
SELECT t3.code_id, t3.value, t3.display, t3.code_type, t3.parent_id, CONCAT(LPAD(t2.code_id, 8, '0'), t2.value, LPAD(t3.code_id, 8, '0')) AS sort_id
FROM code t1
INNER JOIN code t2 on t2.parent_id = t1.code_id
INNER JOIN code t3 on t3.parent_id = t2.code_id
WHERE t1.code_id = @code_id
UNION ALL
SELECT t4.code_id, t4.value, t4.display, t4.code_type, t4.parent_id, CONCAT(LPAD(t2.code_id, 8, '0'), t2.value, LPAD(t3.code_id, 8, '0'), t3.value)
FROM code t1
INNER JOIN code t2 on t2.parent_id = t1.code_id
INNER JOIN code t3 on t3.parent_id = t2.code_id
INNER JOIN code t4 on t4.parent_id = t3.code_id
WHERE t1.code_id = @code_id
) a
ORDER BY a.sort_id, a.code_id
) b
WHERE b.code_type = 3
) AS temp;
SET @sql = CONCAT('
SELECT
a.questionnaire_id,
a.questionnaire_response_id,
a.created,
', @code_id, ' as code_id,
a.version,
a.authored,
a.language,
a.participant_id,
''', @module, ''' as module,
', @sql, '
FROM (
SELECT qr.questionnaire_id,
qr.questionnaire_response_id,
qr.created,
qq.code_id,
q.version,
qr.authored,
qr.language,
qr.participant_id,
COALESCE((SELECT c.value from code c where c.code_id = qra.value_code_id),
qra.value_integer, qra.value_decimal,
qra.value_boolean, qra.value_string, qra.value_system,
qra.value_uri, qra.value_date, qra.value_datetime) as answer
FROM questionnaire_response qr
INNER JOIN questionnaire_response_answer qra
ON qra.questionnaire_response_id = qr.questionnaire_response_id
INNER JOIN questionnaire_question qq
ON qra.question_id = qq.questionnaire_question_id
INNER JOIN questionnaire q
ON qq.questionnaire_id = q.questionnaire_id
WHERE qr.participant_id = ', participant_id, ' AND
--
qr.questionnaire_id IN (
SELECT q.questionnaire_id
FROM questionnaire q
INNER JOIN questionnaire_concept qc
ON q.questionnaire_id = qc.questionnaire_id AND q.version = qc.questionnaire_version
WHERE qc.code_id = ', @code_id, '
)
) a
GROUP BY a.questionnaire_response_id, a.version
ORDER BY a.created DESC
');
-- select @sql;
PREPARE stmt FROM @sql;
EXECUTE stmt;
DEALLOCATE PREPARE stmt;
END
""",
)
participant_answers_view = ReplaceableObject(
"participant_answers_view",
"""
SELECT
qr.participant_id,
code.value AS module,
(SELECT c.value FROM code c WHERE c.code_id = qq.code_id) AS question_code,
COALESCE((SELECT c.value FROM code c WHERE c.code_id = qra.value_code_id),
qra.value_boolean, qra.value_date, qra.value_datetime, qra.value_decimal, qra.value_integer,
qra.value_string, qra.value_system, qra.value_uri) AS answer,
qr.questionnaire_response_id,
qr.authored,
qr.created
FROM questionnaire_response_answer qra
INNER JOIN questionnaire_response qr ON qr.questionnaire_response_id = qra.questionnaire_response_id
INNER JOIN questionnaire_question qq ON qra.question_id = qq.questionnaire_question_id
INNER JOIN questionnaire_concept qc ON qc.questionnaire_id = qr.questionnaire_id
INNER JOIN code ON qc.code_id = code.code_id
ORDER BY qr.participant_id, qr.created DESC, question_code
""",
)
def upgrade(engine_name):
globals()["upgrade_%s" % engine_name]()
def downgrade(engine_name):
globals()["downgrade_%s" % engine_name]()
def upgrade_rdr():
op.replace_sp(sp_get_code_module_items, replaces="ed28b84f061e.sp_get_code_module_items")
op.replace_sp(sp_get_questionnaire_answers, replaces="1338221caf81.sp_get_questionnaire_answers")
op.create_view(participant_answers_view)
def downgrade_rdr():
op.replace_sp(sp_get_code_module_items, replace_with="ed28b84f061e.sp_get_code_module_items")
op.replace_sp(sp_get_questionnaire_answers, replace_with="1338221caf81.sp_get_questionnaire_answers")
op.drop_view(participant_answers_view)
def upgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def downgrade_metrics():
# ### commands auto generated by Alembic - please adjust! ###
pass
# ### end Alembic commands ###
def unittest_schemas():
schemas = list()
schemas.append("DROP PROCEDURE IF EXISTS `{0}`".format(sp_get_code_module_items.name))
schemas.append(
"CREATE PROCEDURE `{0}` {1}".format(sp_get_code_module_items.name, sp_get_code_module_items.sqltext)
)
schemas.append("DROP PROCEDURE IF EXISTS `{0}`".format(sp_get_questionnaire_answers.name))
schemas.append(
"CREATE PROCEDURE `{0}` {1}".format(sp_get_questionnaire_answers.name, sp_get_questionnaire_answers.sqltext)
)
return schemas
| 37.926087 | 166 | 0.619053 |
73ae14193dde5195da2706424112e0f2e5a2135e | 1,376 | py | Python | sdk/identity/azure-identity/azure/identity/__init__.py | gautam714/azure-sdk-for-python | 1741c199c42e8c85a2e14bc78195fd992837ef92 | [
"MIT"
] | null | null | null | sdk/identity/azure-identity/azure/identity/__init__.py | gautam714/azure-sdk-for-python | 1741c199c42e8c85a2e14bc78195fd992837ef92 | [
"MIT"
] | null | null | null | sdk/identity/azure-identity/azure/identity/__init__.py | gautam714/azure-sdk-for-python | 1741c199c42e8c85a2e14bc78195fd992837ef92 | [
"MIT"
] | null | null | null | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from ._browser_auth import InteractiveBrowserCredential
from .credentials import (
CertificateCredential,
ChainedTokenCredential,
ClientSecretCredential,
DeviceCodeCredential,
EnvironmentCredential,
ManagedIdentityCredential,
UsernamePasswordCredential,
)
class DefaultAzureCredential(ChainedTokenCredential):
"""
A default credential capable of handling most Azure SDK authentication scenarios.
When environment variable configuration is present, it authenticates as a service principal
using :class:`azure.identity.EnvironmentCredential`.
When environment configuration is not present, it authenticates with a managed identity
using :class:`azure.identity.ManagedIdentityCredential`.
"""
def __init__(self, **kwargs):
super(DefaultAzureCredential, self).__init__(
EnvironmentCredential(**kwargs), ManagedIdentityCredential(**kwargs)
)
__all__ = [
"CertificateCredential",
"ChainedTokenCredential",
"ClientSecretCredential",
"DefaultAzureCredential",
"DeviceCodeCredential",
"EnvironmentCredential",
"InteractiveBrowserCredential",
"ManagedIdentityCredential",
"UsernamePasswordCredential",
]
| 30.577778 | 95 | 0.713663 |
2144e60f7c4daba2c6bc964c64c9875d59413c1c | 796 | py | Python | CPAC/registration/__init__.py | chrisfoulon/C-PAC | 2746a90c39cea586aede98343c5927252bb8e81a | [
"BSD-3-Clause"
] | 1 | 2021-08-02T23:23:39.000Z | 2021-08-02T23:23:39.000Z | CPAC/registration/__init__.py | chrisfoulon/C-PAC | 2746a90c39cea586aede98343c5927252bb8e81a | [
"BSD-3-Clause"
] | null | null | null | CPAC/registration/__init__.py | chrisfoulon/C-PAC | 2746a90c39cea586aede98343c5927252bb8e81a | [
"BSD-3-Clause"
] | 2 | 2021-08-02T23:23:40.000Z | 2022-02-26T12:39:30.000Z | from registration import create_nonlinear_register, \
create_register_func_to_mni, \
create_register_func_to_anat, \
create_bbregister_func_to_anat, \
create_wf_calculate_ants_warp, \
create_wf_apply_ants_warp, \
create_wf_c3d_fsl_to_itk, \
create_wf_collect_transforms
__all__ = ['create_nonlinear_register', \
'create_register_func_to_mni', \
'create_register_func_to_anat', \
'create_bbregister_func_to_anat', \
'create_wf_calculate_ants_warp', \
'create_wf_apply_ants_warp', \
'create_wf_c3d_fsl_to_itk', \
'create_wf_collect_transforms']
| 44.222222 | 58 | 0.579146 |
19ae649cd293a7b1313bf7ccd416314a37ba838d | 271 | py | Python | resume parser/app.py | sushant66/AI-Job-Hiring | 2bda74fa4a8485fcbe93a37445a3e4fd138655a8 | [
"MIT"
] | null | null | null | resume parser/app.py | sushant66/AI-Job-Hiring | 2bda74fa4a8485fcbe93a37445a3e4fd138655a8 | [
"MIT"
] | null | null | null | resume parser/app.py | sushant66/AI-Job-Hiring | 2bda74fa4a8485fcbe93a37445a3e4fd138655a8 | [
"MIT"
] | null | null | null | import os
from flask import Flask, flash, request, redirect, url_for
from werkzeug.utils import secure_filename
UPLOAD_FOLDER = 'uploads'
ALLOWED_EXTENSIONS = {'txt', 'pdf', 'png', 'jpg', 'jpeg', 'gif'}
app = Flask(__name__)
app.config['UPLOAD_FOLDER'] = UPLOAD_FOLDER
| 27.1 | 64 | 0.741697 |
6db04c37affba643aa7c29f9da160d1d98c13ffd | 3,049 | py | Python | tuition/hooks.py | AlvaroMolano/tuition | d6e9b55c1cf7a297895df748c481f8bdac4f0cdf | [
"CECILL-B"
] | null | null | null | tuition/hooks.py | AlvaroMolano/tuition | d6e9b55c1cf7a297895df748c481f8bdac4f0cdf | [
"CECILL-B"
] | null | null | null | tuition/hooks.py | AlvaroMolano/tuition | d6e9b55c1cf7a297895df748c481f8bdac4f0cdf | [
"CECILL-B"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from . import __version__ as app_version
app_name = "tuition"
app_title = "Tuition"
app_publisher = "AlvaroMolano"
app_description = "Add flavour and utilities to education module"
app_icon = "octicon octicon-mortar-board"
app_color = "green"
app_email = "al.j.molano@gmail.com"
app_license = "MIT"
# Includes in <head>
# ------------------
# include js, css files in header of desk.html
# app_include_css = "/assets/tuition/css/tuition.css"
# app_include_js = "/assets/tuition/js/tuition.js"
# include js, css files in header of web template
# web_include_css = "/assets/tuition/css/tuition.css"
# web_include_js = "/assets/tuition/js/tuition.js"
# include js in page
# page_js = {"page" : "public/js/file.js"}
# include js in doctype views
# doctype_js = {"doctype" : "public/js/doctype.js"}
# doctype_list_js = {"doctype" : "public/js/doctype_list.js"}
# doctype_tree_js = {"doctype" : "public/js/doctype_tree.js"}
# doctype_calendar_js = {"doctype" : "public/js/doctype_calendar.js"}
# Home Pages
# ----------
# application home page (will override Website Settings)
# home_page = "login"
# website user home page (by Role)
# role_home_page = {
# "Role": "home_page"
# }
# Website user home page (by function)
# get_website_user_home_page = "tuition.utils.get_home_page"
# Generators
# ----------
# automatically create page for each record of this doctype
# website_generators = ["Web Page"]
# Installation
# ------------
# before_install = "tuition.install.before_install"
# after_install = "tuition.install.after_install"
# Desk Notifications
# ------------------
# See frappe.core.notifications.get_notification_config
# notification_config = "tuition.notifications.get_notification_config"
# Permissions
# -----------
# Permissions evaluated in scripted ways
# permission_query_conditions = {
# "Event": "frappe.desk.doctype.event.event.get_permission_query_conditions",
# }
#
# has_permission = {
# "Event": "frappe.desk.doctype.event.event.has_permission",
# }
# Document Events
# ---------------
# Hook on document methods and events
# doc_events = {
# "*": {
# "on_update": "method",
# "on_cancel": "method",
# "on_trash": "method"
# }
# }
# Scheduled Tasks
# ---------------
# scheduler_events = {
# "all": [
# "tuition.tasks.all"
# ],
# "daily": [
# "tuition.tasks.daily"
# ],
# "hourly": [
# "tuition.tasks.hourly"
# ],
# "weekly": [
# "tuition.tasks.weekly"
# ]
# "monthly": [
# "tuition.tasks.monthly"
# ]
# }
# Testing
# -------
# before_tests = "tuition.install.before_tests"
# Overriding Methods
# ------------------------------
#
# override_whitelisted_methods = {
# "frappe.desk.doctype.event.event.get_events": "tuition.event.get_events"
# }
#
# each overriding function accepts a `data` argument;
# generated from the base implementation of the doctype dashboard,
# along with any modifications made in other Frappe apps
# override_doctype_dashboards = {
# "Task": "tuition.task.get_dashboard_data"
# }
| 23.453846 | 78 | 0.673991 |
7db7e398996a670e63f7fd4c7042f23421dab4a4 | 5,312 | py | Python | train_transmitter.py | Guaguago/Persona-Dialogue-Generation | 0d4526ec8eddff62751a70666e14d72103906f44 | [
"MIT"
] | null | null | null | train_transmitter.py | Guaguago/Persona-Dialogue-Generation | 0d4526ec8eddff62751a70666e14d72103906f44 | [
"MIT"
] | null | null | null | train_transmitter.py | Guaguago/Persona-Dialogue-Generation | 0d4526ec8eddff62751a70666e14d72103906f44 | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
# Copyright (c) Facebook, Inc. and its affiliates.
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""Train model for ppl metric with pre-selected parameters.
These parameters have some variance in their final perplexity, but they were
used to achieve the pre-trained model.
"""
import os
import random
import torch
from agents.transmitter.transmitter import ARCH_CHOICE
from parlai.scripts.train_model import setup_args as setup_dict_args, TrainLoop
# Parameters
MODEL = 'transmitter'
NAME = "pegg-o"
IS_ORIGINAL = True
GEN, GATE, CLS = 1., 1., 1.
MIDDLE_POOL_SIZE = None
NEXT_POOL_SIZE = None
PERSONA_POOL_R = None
PERSONA_POOL_SIZE = 50
USE_TO_PERSONA_POOL = False
USE_CONTEXT_POOL = False
DROP_LITERAL_PERSONA = False
PERSONA_LOWER_BOUND = 0
CONTEXT_LOWER_BOUND = 0
USE_ATTENTION = True
BEAM_SIZE = 2
MODEL_DIR = '/apdcephfs/share_916081/chencxu/pegg/AAAI/train-o-25'
DATA_DIR = '/apdcephfs/share_916081/chencxu/pegg/data'
def setup_task():
if IS_ORIGINAL:
task_name = 'tasks.convai2transmitter.agents:SelfOriginalTeacher'
else:
task_name = 'tasks.convai2transmitter.agents:SelfRevisedTeacher'
return task_name
def setup_seed(seed=1706123):
# random seed, to evaluate the performance
torch.random.manual_seed(seed)
torch.cuda.manual_seed(seed)
random.seed(seed)
def gpt_setting():
return 10, 1e-4, 'gpt_custom', 1.0
def lstm_setting():
return 64, 3, 'sgd', 0.1
def setup_args():
"""
Use create test env setting
:return: opt
"""
parser = setup_dict_args()
exp_name = NAME
n_epoches = 100
beam_size = BEAM_SIZE
encode_layers = 2
decode_layers = 2
embedding_size = 256
turn_emed_size = 50
encoder_turn_use = False
encoder_dis_use = False
encoder_hidden_size = 1024
decoder_hidden_size = 1024
encode_max_seq_len = 256
decode_max_seq_len = 32
smoothing = 0.05
dropout = 0.1
embedding_type = 'glove'
momentum = 0.9
persona_append_strategy = 'concat'
history_append_strategy = -1
select_persona = False
shuffle_persona = True
share_decoder_input_output_embed = False
num_train_epochs = 4
if ARCH_CHOICE == 'gpt':
batchsize, lr, optimizer, gradient_clip = gpt_setting()
else:
batchsize, lr, optimizer, gradient_clip = lstm_setting()
task_name = setup_task()
parser.set_defaults(
task=task_name,
rank_candidates=False,
# task='tasks.convai2transmitter.agents:SelfRevisedTeacher:no_cands',
model='agents.transmitter.transmitter:TransformerAgent',
datapath=DATA_DIR,
# =====================
gen_weight=GEN,
gate_weight=GATE,
cls_weight=CLS,
middle_pool_size=MIDDLE_POOL_SIZE,
persona_pool_size=PERSONA_POOL_SIZE,
next_pool_size=NEXT_POOL_SIZE,
use_context_pool=USE_CONTEXT_POOL,
use_to_persona_pool=USE_TO_PERSONA_POOL,
drop_literal_persona=DROP_LITERAL_PERSONA,
persona_lower_bound=PERSONA_LOWER_BOUND,
context_lower_bound=CONTEXT_LOWER_BOUND,
use_attention=USE_ATTENTION,
persona_pool_r=PERSONA_POOL_R,
# ======================
model_file='{}/{}/{}.model'.format(MODEL_DIR, MODEL, exp_name),
dict_tokenizer='split',
datatype='train',
gpt_lr=6.25e-5,
n_epoches=n_epoches,
num_epochs=num_train_epochs,
batchsize=batchsize,
beam_size=beam_size,
encoder_layers=encode_layers,
decoder_layers=decode_layers,
encoder_embed_dim=embedding_size,
encoder_turn_dim=turn_emed_size,
encoder_turn_use=encoder_turn_use,
encoder_dis_use=encoder_dis_use,
decoder_embed_dim=embedding_size,
encode_max_seq_len=encode_max_seq_len,
decode_max_seq_len=decode_max_seq_len,
select_persona=select_persona,
shuffle_persona=shuffle_persona,
persona_append_strategy=persona_append_strategy,
history_append_strategy=history_append_strategy,
encoder_bidirectional=False,
encoder_hidden_size=encoder_hidden_size,
decoder_hidden_size=decoder_hidden_size,
smoothing=smoothing,
lr=lr,
dropout=dropout,
encoder_dropout_in=dropout,
encoder_dropout_out=0,
decoder_dropout_in=dropout,
decoder_dropout_out=0,
share_decoder_input_output_embed=share_decoder_input_output_embed,
gradient_clip=gradient_clip,
lookuptable='enc_dec',
optimizer=optimizer,
embedding_type=embedding_type,
momentum=momentum,
# rough enough
validation_max_exs=-1,
validation_every_n_secs=3600,
validation_metric='f1',
validation_metric_mode='min',
validation_patience=5,
log_every_n_secs=30,
gpu=0,
# logging configuration
exp=exp_name,
tensorboard_log=True,
tensorboard_tag='exp',
train_report_metrics='ppl,f1,hits@1',
tensorboard_metrics='ppl,f1,hits@1',
visualization=False
)
return parser
if __name__ == '__main__':
opt = setup_args()
setup_seed()
TrainLoop(opt).train()
| 29.842697 | 79 | 0.693148 |
123d994c09bcb9d3a2863ca2a52f3e89dcd03a6e | 9,561 | py | Python | pgmpy/models/ClusterGraph.py | jaidevd/pgmpy | 0a7f371f4b39ded45e48d637fa1a44b4518162da | [
"MIT"
] | null | null | null | pgmpy/models/ClusterGraph.py | jaidevd/pgmpy | 0a7f371f4b39ded45e48d637fa1a44b4518162da | [
"MIT"
] | null | null | null | pgmpy/models/ClusterGraph.py | jaidevd/pgmpy | 0a7f371f4b39ded45e48d637fa1a44b4518162da | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
from collections import defaultdict
import numpy as np
from pgmpy.base import UndirectedGraph
from pgmpy.exceptions import CardinalityError
class ClusterGraph(UndirectedGraph):
r"""
Base class for representing Cluster Graph.
Cluster graph is an undirected graph which is associated with a subset of variables. The graph contains undirected
edges that connects clusters whose scopes have a non-empty intersection.
Formally, a cluster graph is :math:`\mathcal{U}` for a set of factors :math:`\Phi` over :math:`\mathcal{X}` is an
undirected graph, each of whose nodes :math:`i` is associated with a subset :math:`C_i \subseteq X`. A cluster
graph must be family-preserving - each factor :math:`\phi \in \Phi` must be associated with a cluster C, denoted
:math:`\alpha(\phi)`, such that :math:`Scope[\phi] \subseteq C_i`. Each edge between a pair of clusters :math:`C_i`
and :math:`C_j` is associated with a sepset :math:`S_{i,j} \subseteq C_i \cap C_j`.
Parameters
----------
data: input graph
Data to initialize graph. If data=None (default) an empty graph is created. The data is an edge list
Examples
--------
Create an empty ClusterGraph with no nodes and no edges
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
G can be grown by adding clique nodes.
**Nodes:**
Add a tuple (or list or set) of nodes as single clique node.
>>> G.add_node(('a', 'b', 'c'))
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
**Edges:**
G can also be grown by adding edges.
>>> G.add_edge(('a', 'b', 'c'), ('a', 'b'))
or a list of edges
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
def __init__(self, ebunch=None):
super().__init__()
if ebunch:
self.add_edges_from(ebunch)
self.factors = []
self.cardinalities = defaultdict(int)
def add_node(self, node, **kwargs):
"""
Add a single node to the cluster graph.
Parameters
----------
node: node
A node should be a collection of nodes forming a clique. It can be
a list, set or tuple of nodes
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_node(('a', 'b', 'c'))
"""
if not isinstance(node, (list, set, tuple)):
raise TypeError('Node can only be a list, set or tuple of nodes forming a clique')
node = tuple(node)
super().add_node(node, **kwargs)
def add_nodes_from(self, nodes, **kwargs):
"""
Add multiple nodes to the cluster graph.
Parameters
----------
nodes: iterable container
A container of nodes (list, dict, set, etc.).
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b'), ('a', 'b', 'c')])
"""
for node in nodes:
self.add_node(node, **kwargs)
def add_edge(self, u, v, **kwargs):
"""
Add an edge between two clique nodes.
Parameters
----------
u, v: nodes
Nodes can be any list or set or tuple of nodes forming a clique.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
"""
set_u = set(u)
set_v = set(v)
if set_u.isdisjoint(set_v):
raise ValueError('No sepset found between these two edges.')
super().add_edge(u, v)
def add_factors(self, *factors):
"""
Associate a factor to the graph.
See factors class for the order of potential values
Parameters
----------
*factor: pgmpy.factors.factors object
A factor object on any subset of the variables of the model which
is to be associated with the model.
Returns
-------
None
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors import Factor
>>> student = ClusterGraph()
>>> student.add_node(('Alice', 'Bob'))
>>> factor = Factor(['Alice', 'Bob'], cardinality=[3, 2],
... value=np.random.rand(6))
>>> student.add_factors(factor)
"""
for factor in factors:
factor_scope = set(factor.scope())
nodes = [set(node) for node in self.nodes()]
if factor_scope not in nodes:
raise ValueError('Factors defined on clusters of variable not'
'present in model')
self.factors.append(factor)
def get_factors(self, node=None):
"""
Return the factors that have been added till now to the graph.
If node is not None, it would return the factor corresponding to the
given node.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors import Factor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = Factor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = Factor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_factors()
>>> G.get_factors(node=('a', 'b', 'c'))
"""
if node is None:
return self.factors
else:
nodes = [set(node) for node in self.nodes()]
if set(node) not in nodes:
raise ValueError('Node not present in Cluster Graph')
factors = list(filter(lambda x: set(x.scope()) == set(node),
self.factors))
return factors[0]
def remove_factors(self, *factors):
"""
Removes the given factors from the added factors.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors import Factor
>>> student = ClusterGraph()
>>> factor = Factor(['Alice', 'Bob'], cardinality=[2, 2],
... value=np.random.rand(4))
>>> student.add_factors(factor)
>>> student.remove_factors(factor)
"""
for factor in factors:
self.factors.remove(factor)
def get_partition_function(self):
r"""
Returns the partition function for a given undirected graph.
A partition function is defined as
.. math:: \sum_{X}(\prod_{i=1}^{m} \phi_i)
where m is the number of factors present in the graph
and X are all the random variables present.
Examples
--------
>>> from pgmpy.models import ClusterGraph
>>> from pgmpy.factors import Factor
>>> G = ClusterGraph()
>>> G.add_nodes_from([('a', 'b', 'c'), ('a', 'b'), ('a', 'c')])
>>> G.add_edges_from([(('a', 'b', 'c'), ('a', 'b')),
... (('a', 'b', 'c'), ('a', 'c'))])
>>> phi1 = Factor(['a', 'b', 'c'], [2, 2, 2], np.random.rand(8))
>>> phi2 = Factor(['a', 'b'], [2, 2], np.random.rand(4))
>>> phi3 = Factor(['a', 'c'], [2, 2], np.random.rand(4))
>>> G.add_factors(phi1, phi2, phi3)
>>> G.get_partition_function()
"""
if self.check_model():
factor = self.factors[0]
factor = factor.product(*[self.factors[i] for i in range(1, len(self.factors))])
return np.sum(factor.values)
def check_model(self):
"""
Check the model for various errors. This method checks for the following
errors. In the same time also updates the cardinalities of all the random
variables.
* Checks if clique potentials are defined for all the cliques or not.
* Check for running intersection property is not done explicitly over
here as it done in the add_edges method.
Returns
-------
check: boolean
True if all the checks are passed
"""
for clique in self.nodes():
if self.get_factors(clique):
pass
else:
raise ValueError('Factors for all the cliques or clusters not'
'defined.')
if len(self.factors) != len(self.nodes()):
raise ValueError('One to one mapping of factor to clique or cluster'
'is not there.')
for factor in self.factors:
for variable, cardinality in zip(factor.scope(), factor.cardinality):
if ((self.cardinalities[variable]) and
(self.cardinalities[variable] != cardinality)):
raise CardinalityError(
'Cardinality of variable %s not matching among factors' % variable)
else:
self.cardinalities[variable] = cardinality
return True
| 34.516245 | 119 | 0.528606 |
fe66d82dc76fea148ff9163e36a89ec61940870a | 17,899 | py | Python | astroquery/simbad/tests/test_simbad.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | astroquery/simbad/tests/test_simbad.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | astroquery/simbad/tests/test_simbad.py | EnjoyLifeFund/macHighSierra-py36-pkgs | 5668b5785296b314ea1321057420bcd077dba9ea | [
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | null | null | null | # Licensed under a 3-clause BSD style license - see LICENSE.rst
import os
import re
from astropy.extern import six
import pytest
import astropy.units as u
from astropy.table import Table
import numpy as np
from ... import simbad
from ...utils.testing_tools import MockResponse
from ...utils import commons
from ...exceptions import TableParseError
from .test_simbad_remote import multicoords
GALACTIC_COORDS = commons.GalacticCoordGenerator(l=-67.02084, b=-29.75447,
unit=(u.deg, u.deg))
ICRS_COORDS = commons.ICRSCoordGenerator("05h35m17.3s -05h23m28s")
FK4_COORDS = commons.FK4CoordGenerator(ra=84.90759, dec=-80.89403,
unit=(u.deg, u.deg))
FK5_COORDS = commons.FK5CoordGenerator(ra=83.82207, dec=-80.86667,
unit=(u.deg, u.deg))
DATA_FILES = {
'id': 'query_id.data',
'coo': 'query_coo.data',
'cat': 'query_cat.data',
'bibobj': 'query_bibobj.data',
'bibcode': 'query_bibcode.data',
'objectids': 'query_objectids.data',
'error': 'query_error.data',
'sample': 'query_sample.data',
'region': 'query_sample_region.data',
}
class MockResponseSimbad(MockResponse):
query_regex = re.compile(r'query\s+([a-z]+)\s+')
def __init__(self, script, cache=True, **kwargs):
# preserve, e.g., headers
super(MockResponseSimbad, self).__init__(**kwargs)
self.content = self.get_content(script)
def get_content(self, script):
match = self.query_regex.search(script)
if match:
filename = DATA_FILES[match.group(1)]
content = open(data_path(filename), "rb").read()
return content
def data_path(filename):
data_dir = os.path.join(os.path.dirname(__file__), 'data')
return os.path.join(data_dir, filename)
@pytest.fixture
def patch_post(request):
try:
mp = request.getfixturevalue("monkeypatch")
except AttributeError: # pytest < 3
mp = request.getfuncargvalue("monkeypatch")
mp.setattr(simbad.SimbadClass, '_request', post_mockreturn)
return mp
def post_mockreturn(self, method, url, data, timeout, **kwargs):
response = MockResponseSimbad(data['script'], **kwargs)
class last_query(object):
pass
self._last_query = last_query()
self._last_query.data = data
return response
@pytest.mark.parametrize(('radius', 'expected_radius'),
[('5d0m0s', '5.0d'),
('5d', '5.0d'),
('5.0d', '5.0d'),
(5 * u.deg, '5.0d'),
(5.0 * u.deg, '5.0d'),
(1.2 * u.deg, '1.2d'),
(0.5 * u.deg, '30.0m'),
('0d1m12s', '1.2m'),
(0.003 * u.deg, '10.8s'),
('0d0m15s', '15.0s')
])
def test_parse_radius(radius, expected_radius):
actual = simbad.core._parse_radius(radius)
assert actual == expected_radius
@pytest.mark.parametrize(('ra', 'dec', 'expected_ra', 'expected_dec'),
[(ICRS_COORDS.ra, ICRS_COORDS.dec, u'5:35:17.3',
u'-80:52:00')
])
def test_to_simbad_format(ra, dec, expected_ra, expected_dec):
actual_ra, actual_dec = simbad.core._to_simbad_format(ra, dec)
assert (actual_ra, actual_dec) == (expected_ra, expected_dec)
@pytest.mark.parametrize(('coordinates', 'expected_frame'),
[(GALACTIC_COORDS, 'GAL'),
(ICRS_COORDS, 'ICRS'),
(FK4_COORDS, 'FK4'),
(FK5_COORDS, 'FK5')
])
def test_get_frame_coordinates(coordinates, expected_frame):
actual_frame = simbad.core._get_frame_coords(coordinates)[2]
assert actual_frame == expected_frame
if actual_frame == 'GAL':
l, b = simbad.core._get_frame_coords(coordinates)[:2]
np.testing.assert_almost_equal(float(l) % 360, -67.02084 % 360)
np.testing.assert_almost_equal(float(b), -29.75447)
def test_parse_result():
result1 = simbad.core.Simbad._parse_result(
MockResponseSimbad('query id '), simbad.core.SimbadVOTableResult)
assert isinstance(result1, Table)
with pytest.raises(TableParseError) as ex:
simbad.core.Simbad._parse_result(MockResponseSimbad('query error '),
simbad.core.SimbadVOTableResult)
assert str(ex.value) == ('Failed to parse SIMBAD result! The raw response '
'can be found in self.last_response, and the '
'error in self.last_table_parse_error. '
'The attempted parsed result is in '
'self.last_parsed_result.\n Exception: 7:115: '
'no element found')
assert isinstance(simbad.Simbad.last_response.text, six.string_types)
assert isinstance(simbad.Simbad.last_response.content, six.binary_type)
votable_fields = ",".join(simbad.core.Simbad.get_votable_fields())
@pytest.mark.parametrize(('args', 'kwargs', 'expected_script'),
[(["m [0-9]"], dict(wildcard=True,
caller='query_object_async'),
("\nvotable {" + votable_fields + "}\n"
"votable open\n"
"query id wildcard m [0-9] \n"
"votable close"
)),
(["2006ApJ"], dict(caller='query_bibcode_async',
get_raw=True),
("\n\nquery bibcode 2006ApJ \n"))
])
def test_args_to_payload(args, kwargs, expected_script):
script = simbad.Simbad._args_to_payload(*args, **kwargs)['script']
assert script == expected_script
@pytest.mark.parametrize(('epoch', 'equinox'),
[(2000, 'thousand'),
('J-2000', None),
(None, '10e3b')
])
def test_validation(epoch, equinox):
with pytest.raises(ValueError):
# only one of these has to raise an exception
if equinox is not None:
simbad.core.validate_equinox(equinox)
if epoch is not None:
simbad.core.validate_epoch(epoch)
@pytest.mark.parametrize(('bibcode', 'wildcard'),
[('2006ApJ*', True),
('2005A&A.430.165F', None)
])
def test_query_bibcode_async(patch_post, bibcode, wildcard):
response1 = simbad.core.Simbad.query_bibcode_async(bibcode,
wildcard=wildcard)
response2 = simbad.core.Simbad().query_bibcode_async(bibcode,
wildcard=wildcard)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_bibcode_class(patch_post):
result1 = simbad.core.Simbad.query_bibcode("2006ApJ*", wildcard=True)
assert isinstance(result1, Table)
def test_query_bibcode_instance(patch_post):
S = simbad.core.Simbad()
result2 = S.query_bibcode("2006ApJ*", wildcard=True)
assert isinstance(result2, Table)
def test_query_objectids_async(patch_post):
response1 = simbad.core.Simbad.query_objectids_async('Polaris')
response2 = simbad.core.Simbad().query_objectids_async('Polaris')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_objectids(patch_post):
result1 = simbad.core.Simbad.query_objectids('Polaris')
result2 = simbad.core.Simbad().query_objectids('Polaris')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_query_bibobj_async(patch_post):
response1 = simbad.core.Simbad.query_bibobj_async('2005A&A.430.165F')
response2 = simbad.core.Simbad().query_bibobj_async('2005A&A.430.165F')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_bibobj(patch_post):
result1 = simbad.core.Simbad.query_bibobj('2005A&A.430.165F')
result2 = simbad.core.Simbad().query_bibobj('2005A&A.430.165F')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_query_catalog_async(patch_post):
response1 = simbad.core.Simbad.query_catalog_async('m')
response2 = simbad.core.Simbad().query_catalog_async('m')
assert response1 is not None and response2 is not None
assert response1.content == response2.content
def test_query_catalog(patch_post):
result1 = simbad.core.Simbad.query_catalog('m')
result2 = simbad.core.Simbad().query_catalog('m')
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, None, 2000.0, 'J2000'),
(GALACTIC_COORDS, 5 * u.deg, 2000.0, 'J2000'),
(FK4_COORDS, '5d0m0s', 2000.0, 'J2000'),
(FK5_COORDS, None, 2000.0, 'J2000'),
(multicoords, 0.5*u.arcsec, 2000.0, 'J2000'),
])
def test_query_region_async(patch_post, coordinates, radius, equinox, epoch):
response1 = simbad.core.Simbad.query_region_async(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
response2 = simbad.core.Simbad().query_region_async(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, None, 2000.0, 'J2000'),
(GALACTIC_COORDS, 5 * u.deg, 2000.0, 'J2000'),
(FK4_COORDS, '5d0m0s', 2000.0, 'J2000'),
(FK5_COORDS, None, 2000.0, 'J2000')
])
def test_query_region(patch_post, coordinates, radius, equinox, epoch):
result1 = simbad.core.Simbad.query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
result2 = simbad.core.Simbad().query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, 0, 2000.0, 'J2000')])
def test_query_region_radius_error(patch_post, coordinates, radius,
equinox, epoch):
with pytest.raises(u.UnitsError):
simbad.core.Simbad.query_region(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
with pytest.raises(u.UnitsError):
simbad.core.Simbad().query_region(
coordinates, radius=radius, equinox=equinox, epoch=epoch)
@pytest.mark.parametrize(('coordinates', 'radius', 'equinox', 'epoch'),
[(ICRS_COORDS, "0d", 2000.0, 'J2000'),
(GALACTIC_COORDS, 1.0 * u.marcsec, 2000.0, 'J2000')
])
def test_query_region_small_radius(patch_post, coordinates, radius,
equinox, epoch):
result1 = simbad.core.Simbad.query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
result2 = simbad.core.Simbad().query_region(coordinates, radius=radius,
equinox=equinox, epoch=epoch)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
@pytest.mark.parametrize(('object_name', 'wildcard'),
[("m1", None),
("m [0-9]", True)
])
def test_query_object_async(patch_post, object_name, wildcard):
response1 = simbad.core.Simbad.query_object_async(object_name,
wildcard=wildcard)
response2 = simbad.core.Simbad().query_object_async(object_name,
wildcard=wildcard)
assert response1 is not None and response2 is not None
assert response1.content == response2.content
@pytest.mark.parametrize(('object_name', 'wildcard'),
[("m1", None),
("m [0-9]", True),
])
def test_query_object(patch_post, object_name, wildcard):
result1 = simbad.core.Simbad.query_object(object_name,
wildcard=wildcard)
result2 = simbad.core.Simbad().query_object(object_name,
wildcard=wildcard)
assert isinstance(result1, Table)
assert isinstance(result2, Table)
def test_list_votable_fields():
simbad.core.Simbad.list_votable_fields()
simbad.core.Simbad().list_votable_fields()
def test_get_field_description():
simbad.core.Simbad.get_field_description('bibcodelist(y1-y2)')
simbad.core.Simbad().get_field_description('bibcodelist(y1-y2)')
with pytest.raises(Exception):
simbad.core.Simbad.get_field_description('xyz')
def test_votable_fields():
simbad.core.Simbad.add_votable_fields('rot', 'ze', 'z')
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates', 'rot', 'ze', 'z']))
try:
simbad.core.Simbad.add_votable_fields('z')
except KeyError:
pass # this is the expected response
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates', 'rot', 'ze', 'z']))
simbad.core.Simbad.remove_votable_fields('rot', 'main_id', 'coordinates')
assert set(simbad.core.Simbad.get_votable_fields()) == set(['ze', 'z'])
simbad.core.Simbad.remove_votable_fields('rot', 'main_id', 'coordinates')
assert set(simbad.core.Simbad.get_votable_fields()) == set(['ze', 'z'])
simbad.core.Simbad.remove_votable_fields('ze', 'z')
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates']))
simbad.core.Simbad.add_votable_fields('rot', 'ze', 'z')
simbad.core.Simbad.reset_votable_fields()
assert (set(simbad.core.Simbad.get_votable_fields()) ==
set(['main_id', 'coordinates']))
def test_query_criteria1(patch_post):
Simbad = simbad.core.Simbad()
result = Simbad.query_criteria(
"region(box, GAL, 49.89 -0.3, 0.5d 0.5d)", otype='HII')
assert isinstance(result, Table)
assert "region(box, GAL, 49.89 -0.3, 0.5d 0.5d)" in Simbad._last_query.data['script']
def test_query_criteria2(patch_post):
S = simbad.core.Simbad()
S.add_votable_fields('ra(d)', 'dec(d)')
S.remove_votable_fields('coordinates')
assert S.get_votable_fields() == ['main_id', 'ra(d)', 'dec(d)']
result = S.query_criteria(otype='SNR')
assert isinstance(result, Table)
assert 'otype=SNR' in S._last_query.data['script']
def test_simbad_settings1():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
simbad.core.Simbad.remove_votable_fields('ra', 'dec')
assert (simbad.Simbad.get_votable_fields() ==
['main_id', 'coordinates', 'dec(5)'])
simbad.core.Simbad.reset_votable_fields()
def test_simbad_settings2():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
simbad.core.Simbad.remove_votable_fields('ra', 'dec', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_votablesettings():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('ra', 'dec(5)')
# this is now allowed:
simbad.core.Simbad.add_votable_fields('ra(d)', 'dec(d)')
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates',
'ra', 'dec(5)', 'ra(d)',
'dec(d)']
# cleanup
simbad.core.Simbad.remove_votable_fields('ra', 'dec', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_votablesettings2():
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
simbad.core.Simbad.add_votable_fields('fluxdata(J)')
simbad.core.Simbad.add_votable_fields('fluxdata(H)')
simbad.core.Simbad.add_votable_fields('fluxdata(K)')
assert (simbad.Simbad.get_votable_fields() ==
['main_id', 'coordinates',
'fluxdata(J)', 'fluxdata(H)', 'fluxdata(K)'])
simbad.core.Simbad.remove_votable_fields('fluxdata', strip_params=True)
assert simbad.Simbad.get_votable_fields() == ['main_id', 'coordinates']
def test_regression_issue388():
# This is a python-3 issue: content needs to be decoded?
response = MockResponseSimbad('\nvotable {main_id,coordinates}\nvotable '
'open\nquery id m1 \nvotable close')
with open(data_path('m1.data'), "rb") as f:
response.content = f.read()
parsed_table = simbad.Simbad._parse_result(response,
simbad.core.SimbadVOTableResult)
assert parsed_table['MAIN_ID'][0] == b'M 1'
assert len(parsed_table) == 1
| 41.820093 | 89 | 0.60428 |
d11c59e0bb168ac363cd6d787e6f29ee58ab6804 | 6,723 | py | Python | odps/errors.py | hekaisheng/aliyun-odps-python-sdk | a08f5a9f006487dd3443ebe000f363e9cbee6a80 | [
"Apache-2.0"
] | null | null | null | odps/errors.py | hekaisheng/aliyun-odps-python-sdk | a08f5a9f006487dd3443ebe000f363e9cbee6a80 | [
"Apache-2.0"
] | null | null | null | odps/errors.py | hekaisheng/aliyun-odps-python-sdk | a08f5a9f006487dd3443ebe000f363e9cbee6a80 | [
"Apache-2.0"
] | 1 | 2017-06-27T08:18:29.000Z | 2017-06-27T08:18:29.000Z | # -*- coding: utf-8 -*-
# Copyright 1999-2017 Alibaba Group Holding Ltd.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
import calendar
import operator
import logging
from datetime import datetime
from . import utils
from .compat import six, reduce, ElementTree as ET, ElementTreeParseError as ETParseError
LOG = logging.getLogger(__name__)
class DependencyNotInstalledError(Exception):
pass
class InteractiveError(Exception):
pass
def parse_response(resp):
"""Parses the content of response and returns an exception object.
"""
host_id, msg, code = None, None, None
try:
content = resp.content
root = ET.fromstring(content)
code = root.find('./Code').text
msg = root.find('./Message').text
request_id = root.find('./RequestId').text
host_id = root.find('./HostId').text
except ETParseError:
request_id = resp.headers.get('x-odps-request-id', None)
if len(resp.content) > 0:
obj = json.loads(resp.text)
msg = obj['Message']
code = obj.get('Code')
host_id = obj.get('HostId')
if request_id is None:
request_id = obj.get('RequestId')
else:
return
clz = globals().get(code, ODPSError)
return clz(msg, request_id=request_id, code=code, host_id=host_id)
def throw_if_parsable(resp):
"""Try to parse the content of the response and raise an exception
if neccessary.
"""
e = None
try:
e = parse_response(resp)
except:
# Error occurred during parsing the response. We ignore it and delegate
# the situation to caller to handle.
LOG.debug(utils.stringify_expt())
if e is not None:
raise e
if resp.status_code == 404:
raise NoSuchObject('No such object.')
else:
text = resp.text if six.PY3 else resp.content
if text:
raise ODPSError(text, code=str(resp.status_code))
else:
raise ODPSError(str(resp.status_code))
_CODE_MAPPING = {
'ODPS-0010000': 'InternalServerError',
'ODPS-0110141': 'DataVersionError',
'ODPS-0123055': 'ScriptError',
'ODPS-0130131': 'NoSuchTable',
'ODPS-0430055': 'InternalConnectionError',
}
def parse_instance_error(msg):
msg = utils.to_str(msg)
msg_parts = reduce(operator.add, (pt.split(':') for pt in msg.split(' - ')))
msg_parts = [pt.strip() for pt in msg_parts]
try:
msg_code = next(p for p in msg_parts if p in _CODE_MAPPING)
cls = globals().get(_CODE_MAPPING[msg_code], ODPSError)
except StopIteration:
cls = ODPSError
msg_code = None
return cls(msg, code=msg_code)
class ODPSError(RuntimeError):
"""
"""
def __init__(self, msg, request_id=None, code=None, host_id=None, instance_id=None):
super(ODPSError, self).__init__(msg)
self.request_id = request_id
self.instance_id = instance_id
self.code = code
self.host_id = host_id
def __str__(self):
if hasattr(self, 'message'):
message = self.message
else:
message = self.args[0] # py3
if self.request_id:
message = 'RequestId: %s\n%s' % (self.request_id, message)
if self.instance_id:
message = 'InstanceId: %s\n%s' % (self.instance_id, message)
if self.code:
return '%s: %s' % (self.code, message)
return message
@classmethod
def parse(cls, resp):
content = resp.content
try:
error = parse_response(resp)
except:
try:
root = json.loads(content)
code = root.get('Code')
msg = root.get('Message')
request_id = root.get('RequestId')
host_id = root.get('HostId')
error = ODPSError(msg, request_id, code, host_id)
except:
# XXX: Can this happen?
error = ODPSError(content, None)
return error
class ConnectTimeout(ODPSError):
pass
class DataHealthManagerError(ODPSError):
pass
class ServerDefinedException(ODPSError):
pass
# A long list of server defined exceptions
class MethodNotAllowed(ServerDefinedException):
pass
class NoSuchObject(ServerDefinedException):
pass
class NoSuchPartition(NoSuchObject):
pass
class NoSuchPath(NoSuchObject):
pass
class NoSuchTable(NoSuchObject):
pass
class InvalidArgument(ServerDefinedException):
pass
class Unauthorized(ServerDefinedException):
pass
class SchemaParseError(ServerDefinedException):
pass
class InvalidStateSetting(ServerDefinedException):
pass
class InvalidProjectTable(ServerDefinedException):
pass
class NoPermission(ServerDefinedException):
pass
class InternalServerError(ServerDefinedException):
pass
class ReadMetaError(InternalServerError):
pass
class ServiceUnavailable(InternalServerError):
pass
class ScriptError(ServerDefinedException):
pass
class DataVersionError(InternalServerError):
pass
class InstanceTypeNotSupported(ServerDefinedException):
pass
class RequestTimeTooSkewed(ServerDefinedException):
def __init__(self, msg, *args, **kwargs):
super(RequestTimeTooSkewed, self).__init__(msg, *args, **kwargs)
try:
parts = msg.split(',')
kv_dict = dict(tuple(s.strip() for s in p.split(':', 1)) for p in parts)
self.max_interval_date = int(kv_dict['max_interval_date'])
self.expire_date = self._parse_error_date(kv_dict['expire_date'])
self.now_date = self._parse_error_date(kv_dict['now_date'])
except:
self.max_interval_date = None
self.expire_date = None
self.now_date = None
@staticmethod
def _parse_error_date(date_str):
date_obj = datetime.strptime(date_str, '%Y-%m-%dT%H:%M:%S.%fZ')
micros = date_obj.microsecond
return datetime.fromtimestamp(calendar.timegm(date_obj.timetuple())).replace(microsecond=micros)
# Handling error code typo in ODPS error message
RequestTimeTooSkewd = RequestTimeTooSkewed
| 26.159533 | 104 | 0.651346 |
ef9172673846d283ac1821ed2f76519815f94f26 | 273 | py | Python | data/config.py | Vladvlad9/moderator_bot | 080d4f60d3492514a9da52ee9787320d788b83ed | [
"MIT"
] | null | null | null | data/config.py | Vladvlad9/moderator_bot | 080d4f60d3492514a9da52ee9787320d788b83ed | [
"MIT"
] | null | null | null | data/config.py | Vladvlad9/moderator_bot | 080d4f60d3492514a9da52ee9787320d788b83ed | [
"MIT"
] | null | null | null | import os
from dotenv import load_dotenv
from os import environ
load_dotenv(".env")
BOT_TOKEN = os.environ["BOT_TOKEN"]
DATABASE = os.environ["DATABASE"]
admins = [
381252111,
]
async def load_admins() -> tuple:
return tuple(map(int, environ["ADMINS"].split(",")))
| 18.2 | 56 | 0.710623 |
53c526d548fd0c1e119b6edcf72b63360efba099 | 114 | py | Python | src/apps/core/views.py | yrrodriguezb/djangp_apps | 7a0f85f65558e02d0707525b5d7f5bfa6caacb2d | [
"MIT"
] | null | null | null | src/apps/core/views.py | yrrodriguezb/djangp_apps | 7a0f85f65558e02d0707525b5d7f5bfa6caacb2d | [
"MIT"
] | null | null | null | src/apps/core/views.py | yrrodriguezb/djangp_apps | 7a0f85f65558e02d0707525b5d7f5bfa6caacb2d | [
"MIT"
] | null | null | null | from django.views.generic import TemplateView
class HomeView(TemplateView):
template_name = "core/home.html" | 22.8 | 45 | 0.789474 |
7a75543fa812b9d723dc76c62cf9a764e67f46a6 | 857 | py | Python | avions/Vol.py | ErnestBidouille/avions_hotels_methodes_agiles | e50bb600544be0a154a8c2f7a6cbac23e2c265d5 | [
"MIT"
] | null | null | null | avions/Vol.py | ErnestBidouille/avions_hotels_methodes_agiles | e50bb600544be0a154a8c2f7a6cbac23e2c265d5 | [
"MIT"
] | null | null | null | avions/Vol.py | ErnestBidouille/avions_hotels_methodes_agiles | e50bb600544be0a154a8c2f7a6cbac23e2c265d5 | [
"MIT"
] | null | null | null | class Vol(object):
def __init__(self, ville_depart, ville_arrivee, distance, nb_max_passenger):
self._ville_depart = ville_depart
self._ville_arrivee = ville_arrivee
self._distance = float(distance)
self._nb_max_passenger = float(nb_max_passenger)
self._lst_avion = []
def set_avion(self, a):
self._lst_avion = a
def add_avion(self, a):
self._lst_avion.append(a)
def crash(self, dist):
if self._distance > dist:
return True
return False
def calcul_nb_passenger_atc(self):
resultat = 0.0
for a in self._lst_avion:
resultat += a.get_passenger();
return float(resultat)
def is_max_passenger(self):
if self._nb_max_passenger > self.calcul_nb_passenger_atc():
return True
return False
| 28.566667 | 80 | 0.631272 |
e07063fc8b1a9943d0960b6dce58ee1d5187f43c | 9,855 | py | Python | deepbeliefnetwork.py | indervirbanipal/tensorflow | 1f54174a98d5d5ce66523f1d5aec6ebd51edb320 | [
"MIT"
] | null | null | null | deepbeliefnetwork.py | indervirbanipal/tensorflow | 1f54174a98d5d5ce66523f1d5aec6ebd51edb320 | [
"MIT"
] | null | null | null | deepbeliefnetwork.py | indervirbanipal/tensorflow | 1f54174a98d5d5ce66523f1d5aec6ebd51edb320 | [
"MIT"
] | null | null | null | #urllib is used to download the utils file from deeplearning.net
import urllib.request
with urllib.request.urlopen("http://deeplearning.net/tutorial/code/utils.py") as url:
response = url.read()
target = open('utils.py', 'w')
target.write(response.decode('utf-8'))
target.close()
#Import the math function for calculations
import math
#Tensorflow library. Used to implement machine learning models
import tensorflow as tf
#Numpy contains helpful functions for efficient mathematical calculations
import numpy as np
#Image library for image manipulation
from PIL import Image
#import Image
#Utils file
from utils import tile_raster_images
# Class that defines the behavior of the RBM
class RBM(object):
def __init__(self, input_size, output_size):
# Defining the hyperparameters
self._input_size = input_size # Size of input
self._output_size = output_size # Size of output
self.epochs = 5 # Amount of training iterations
self.learning_rate = 1.0 # The step used in gradient descent
self.batchsize = 100 # The size of how much data will be used for training per sub iteration
# Initializing weights and biases as matrices full of zeroes
self.w = np.zeros([input_size, output_size], np.float32) # Creates and initializes the weights with 0
self.hb = np.zeros([output_size], np.float32) # Creates and initializes the hidden biases with 0
self.vb = np.zeros([input_size], np.float32) # Creates and initializes the visible biases with 0
# Fits the result from the weighted visible layer plus the bias into a sigmoid curve
def prob_h_given_v(self, visible, w, hb):
# Sigmoid
return tf.nn.sigmoid(tf.matmul(visible, w) + hb)
# Fits the result from the weighted hidden layer plus the bias into a sigmoid curve
def prob_v_given_h(self, hidden, w, vb):
return tf.nn.sigmoid(tf.matmul(hidden, tf.transpose(w)) + vb)
# Generate the sample probability
def sample_prob(self, probs):
return tf.nn.relu(tf.sign(probs - tf.random_uniform(tf.shape(probs))))
# Training method for the model
def train(self, X):
# Create the placeholders for our parameters
_w = tf.placeholder("float", [self._input_size, self._output_size])
_hb = tf.placeholder("float", [self._output_size])
_vb = tf.placeholder("float", [self._input_size])
prv_w = np.zeros([self._input_size, self._output_size],
np.float32) # Creates and initializes the weights with 0
prv_hb = np.zeros([self._output_size], np.float32) # Creates and initializes the hidden biases with 0
prv_vb = np.zeros([self._input_size], np.float32) # Creates and initializes the visible biases with 0
cur_w = np.zeros([self._input_size, self._output_size], np.float32)
cur_hb = np.zeros([self._output_size], np.float32)
cur_vb = np.zeros([self._input_size], np.float32)
v0 = tf.placeholder("float", [None, self._input_size])
# Initialize with sample probabilities
h0 = self.sample_prob(self.prob_h_given_v(v0, _w, _hb))
v1 = self.sample_prob(self.prob_v_given_h(h0, _w, _vb))
h1 = self.prob_h_given_v(v1, _w, _hb)
# Create the Gradients
positive_grad = tf.matmul(tf.transpose(v0), h0)
negative_grad = tf.matmul(tf.transpose(v1), h1)
# Update learning rates for the layers
update_w = _w + self.learning_rate * (positive_grad - negative_grad) / tf.to_float(tf.shape(v0)[0])
update_vb = _vb + self.learning_rate * tf.reduce_mean(v0 - v1, 0)
update_hb = _hb + self.learning_rate * tf.reduce_mean(h0 - h1, 0)
# Find the error rate
err = tf.reduce_mean(tf.square(v0 - v1))
# Training loop
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
# For each epoch
for epoch in range(self.epochs):
# For each step/batch
for start, end in zip(range(0, len(X), self.batchsize), range(self.batchsize, len(X), self.batchsize)):
batch = X[start:end]
# Update the rates
cur_w = sess.run(update_w, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
cur_hb = sess.run(update_hb, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
cur_vb = sess.run(update_vb, feed_dict={v0: batch, _w: prv_w, _hb: prv_hb, _vb: prv_vb})
prv_w = cur_w
prv_hb = cur_hb
prv_vb = cur_vb
error = sess.run(err, feed_dict={v0: X, _w: cur_w, _vb: cur_vb, _hb: cur_hb})
print('Epoch: %d' % epoch, 'reconstruction error: %f' % error)
self.w = prv_w
self.hb = prv_hb
self.vb = prv_vb
# Create expected output for our DBN
def rbm_outpt(self, X):
input_X = tf.constant(X)
_w = tf.constant(self.w)
_hb = tf.constant(self.hb)
out = tf.nn.sigmoid(tf.matmul(input_X, _w) + _hb)
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
return sess.run(out)
# MNIST
#Getting the MNIST data provided by Tensorflow
from tensorflow.examples.tutorials.mnist import input_data
#Loading in the mnist data
mnist = input_data.read_data_sets("MNIST_data/", one_hot=True)
trX, trY, teX, teY = mnist.train.images, mnist.train.labels, mnist.test.images,\
mnist.test.labels
RBM_hidden_sizes = [500, 200 , 50 ] #create 2 layers of RBM with size 400 and 100
#Since we are training, set input as training data
inpX = trX
#Create list to hold our RBMs
rbm_list = []
#Size of inputs is the number of inputs in the training set
input_size = inpX.shape[1]
#For each RBM we want to generate
for i, size in enumerate(RBM_hidden_sizes):
print ('RBM: ',i,' ',input_size,'->', size)
rbm_list.append(RBM(input_size, size))
input_size = size
#For each RBM in our list
for rbm in rbm_list:
print ('New RBM:')
#Train a new one
rbm.train(inpX)
#Return the output layer
inpX = rbm.rbm_outpt(inpX)
import numpy as np
import math
import tensorflow as tf
class NN(object):
def __init__(self, sizes, X, Y):
# Initialize hyperparameters
self._sizes = sizes
self._X = X
self._Y = Y
self.w_list = []
self.b_list = []
self._learning_rate = 1.0
self._momentum = 0.0
self._epoches = 10
self._batchsize = 100
input_size = X.shape[1]
# initialization loop
for size in self._sizes + [Y.shape[1]]:
# Define upper limit for the uniform distribution range
max_range = 4 * math.sqrt(6. / (input_size + size))
# Initialize weights through a random uniform distribution
self.w_list.append(
np.random.uniform(-max_range, max_range, [input_size, size]).astype(np.float32))
# Initialize bias as zeroes
self.b_list.append(np.zeros([size], np.float32))
input_size = size
# load data from rbm
def load_from_rbms(self, dbn_sizes, rbm_list):
# Check if expected sizes are correct
assert len(dbn_sizes) == len(self._sizes)
for i in range(len(self._sizes)):
# Check if for each RBN the expected sizes are correct
assert dbn_sizes[i] == self._sizes[i]
# If everything is correct, bring over the weights and biases
for i in range(len(self._sizes)):
self.w_list[i] = rbm_list[i].w
self.b_list[i] = rbm_list[i].hb
# Training method
def train(self):
# Create placeholders for input, weights, biases, output
_a = [None] * (len(self._sizes) + 2)
_w = [None] * (len(self._sizes) + 1)
_b = [None] * (len(self._sizes) + 1)
_a[0] = tf.placeholder("float", [None, self._X.shape[1]])
y = tf.placeholder("float", [None, self._Y.shape[1]])
# Define variables and activation functoin
for i in range(len(self._sizes) + 1):
_w[i] = tf.Variable(self.w_list[i])
_b[i] = tf.Variable(self.b_list[i])
for i in range(1, len(self._sizes) + 2):
_a[i] = tf.nn.sigmoid(tf.matmul(_a[i - 1], _w[i - 1]) + _b[i - 1])
# Define the cost function
cost = tf.reduce_mean(tf.square(_a[-1] - y))
# Define the training operation (Momentum Optimizer minimizing the Cost function)
train_op = tf.train.MomentumOptimizer(self._learning_rate, self._momentum).minimize(cost)
# Prediction operation
predict_op = tf.argmax(_a[-1], 1)
# Training Loop
with tf.Session() as sess:
# Initialize Variables
sess.run(tf.global_variables_initializer())
# For each epoch
for i in range(self._epoches):
# For each step
for start, end in zip(range(0, len(self._X), self._batchsize),
range(self._batchsize, len(self._X), self._batchsize)):
# Run the training operation on the input data
sess.run(train_op, feed_dict={_a[0]: self._X[start:end], y: self._Y[start:end]})
for j in range(len(self._sizes) + 1):
# Retrieve weights and biases
self.w_list[j] = sess.run(_w[j])
self.b_list[j] = sess.run(_b[j])
print("Accuracy rating for epoch " + str(i) + ": " + str(np.mean(
np.argmax(self._Y, axis=1) == sess.run(predict_op, feed_dict={_a[0]: self._X, y: self._Y}))))
nNet = NN(RBM_hidden_sizes, trX, trY)
nNet.load_from_rbms(RBM_hidden_sizes,rbm_list)
nNet.train() | 39.898785 | 119 | 0.623135 |
327fe85168f5a5ab64791e94a2ff4211f51c5624 | 10,931 | py | Python | core/platform/email/mailgun_email_services_test.py | tjinjoy/oppia | ed5ccbd95e42078457d40dde1dda02f1ae6a4354 | [
"Apache-2.0"
] | 2 | 2019-03-31T07:03:32.000Z | 2019-04-24T18:12:53.000Z | core/platform/email/mailgun_email_services_test.py | tjinjoy/oppia | ed5ccbd95e42078457d40dde1dda02f1ae6a4354 | [
"Apache-2.0"
] | 3 | 2020-03-16T06:40:30.000Z | 2020-03-30T12:10:20.000Z | core/platform/email/mailgun_email_services_test.py | tjinjoy/oppia | ed5ccbd95e42078457d40dde1dda02f1ae6a4354 | [
"Apache-2.0"
] | 1 | 2020-03-15T14:29:55.000Z | 2020-03-15T14:29:55.000Z | # coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for the Mailgun API wrapper."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
from core.platform.email import mailgun_email_services
from core.tests import test_utils
import feconf
import python_utils
class EmailTests(test_utils.GenericTestBase):
"""Tests for sending emails."""
def test_post_to_mailgun(self):
"""Test for sending HTTP POST request."""
swapped_urlopen = lambda x: x
swapped_request = lambda *args: args
swap_urlopen_context = self.swap(
python_utils, 'url_open', swapped_urlopen)
swap_request_context = self.swap(
python_utils, 'url_request', swapped_request)
swap_api = self.swap(feconf, 'MAILGUN_API_KEY', 'key')
swap_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with swap_urlopen_context, swap_request_context, swap_api, swap_domain:
result = mailgun_email_services.post_to_mailgun({
'from': 'a@a.com',
'to': 'b@b.com',
'subject': 'Hola 😂 - invitation to collaborate'.encode(
encoding='utf-8'),
'text': 'plaintext_body 😂'.encode(encoding='utf-8'),
'html': 'Hi abc,<br> 😂'.encode(encoding='utf-8')
})
expected = (
'https://api.mailgun.net/v3/domain/messages',
('to=b%40b.com&text=plaintext_body+%F0%9F%98%82&html=Hi+abc'
'%2C%3Cbr%3E+%F0%9F%98%82&from=a%40a.com&subject=Hola+%F0'
'%9F%98%82+-+invitation+to+collaborate'),
{'Authorization': 'Basic YXBpOmtleQ=='})
self.assertEqual(result, expected)
def test_send_mail_raises_exception_for_missing_api_key(self):
"""Tests the missing Mailgun API key exception."""
mailgun_api_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun API key is not available.'))
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
with mailgun_api_exception, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_send_mail_raises_exception_for_missing_domain_name(self):
"""Tests the missing Mailgun domain name exception."""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain_name_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun domain name is not set.'))
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
with mailgun_api, mailgun_domain_name_exception, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_send_mail_raises_exception_for_invalid_permissions(self):
"""Tests the send_mail exception raised for invalid user permissions."""
send_email_exception = (
self.assertRaisesRegexp(
Exception, 'This app cannot send emails to users.'))
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with mailgun_api, mailgun_domain, send_email_exception:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_send_mail_data_properly_sent(self):
"""Verifies that the data sent in send_mail is correct."""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
# Data we expect to have been sent in the
# mailgun_email_services.post_to_mailgun().
expected = {'from': feconf.SYSTEM_EMAIL_ADDRESS,
'to': feconf.ADMIN_EMAIL_ADDRESS,
'subject': 'subject',
'text': 'body',
'html': 'html'}
# Lambda function, will replace post_to_mailgun().
req_post_lambda = (lambda data=None:
self.assertDictContainsSubset(expected, data))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=False)
def test_bcc_admin_flag(self):
"""Verifies that the bcc admin flag is working properly in send_mail.
Note that we replace the mailgun_email_services.post_to_mailgun()
function in send_mail with an alternate lambda that asserts the correct
values were placed in the data dictionary that is then passed to the
mailgun api.
"""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
# Lambda function, will replace post_to_mailgun().
req_post_lambda = (lambda data=None:
self.assertEqual(
data['bcc'], feconf.ADMIN_EMAIL_ADDRESS))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html', bcc_admin=True)
def test_reply_to_id_flag(self):
"""Verifies that the reply_to_id flag is working properly."""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
reply_id = 123
# Lambda function, will replace post_to_mailgun().
req_post_lambda = (
lambda data=None:
self.assertEqual(
data['h:Reply-To'],
'reply+' + python_utils.UNICODE(reply_id) + '@' +
feconf.INCOMING_EMAILS_DOMAIN_NAME))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_mail(
feconf.SYSTEM_EMAIL_ADDRESS, feconf.ADMIN_EMAIL_ADDRESS,
'subject', 'body', 'html',
bcc_admin=False, reply_to_id=reply_id)
def test_send_bulk_mail_raises_exception_for_missing_api_key(self):
"""Test that send_bulk_mail raises exception for missing
mailgun api key.
"""
mailgun_api_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun API key is not available.'))
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
with mailgun_api_exception, allow_emailing:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS],
'subject', 'body', 'html')
def test_send_bulk_mail_raises_exception_for_missing_domain_name(self):
"""Tests the missing Mailgun domain name exception for
send_bulk_mail.
"""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
mailgun_domain_name_exception = (
self.assertRaisesRegexp(
Exception, 'Mailgun domain name is not set.'))
with mailgun_api, mailgun_domain_name_exception, allow_emailing:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS],
'subject', 'body', 'html')
def test_send_bulk_mail_exception_for_invalid_permissions(self):
"""Tests the send_bulk_mail exception raised for invalid user
permissions.
"""
send_email_exception = (
self.assertRaisesRegexp(
Exception, 'This app cannot send emails to users.'))
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
with mailgun_api, mailgun_domain, send_email_exception:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, [feconf.ADMIN_EMAIL_ADDRESS],
'subject', 'body', 'html')
def test_send_bulk_mail_data_properly_sent(self):
"""Verifies that the data sent in send_bulk_mail is correct
for each user in the recipient list.
"""
mailgun_api = self.swap(feconf, 'MAILGUN_API_KEY', 'api')
mailgun_domain = self.swap(feconf, 'MAILGUN_DOMAIN_NAME', 'domain')
allow_emailing = self.swap(feconf, 'CAN_SEND_EMAILS', True)
recipients = [feconf.ADMIN_EMAIL_ADDRESS]
# Data that we expect to have been sent in the post_to_mailgun().
expected = ({'from': feconf.SYSTEM_EMAIL_ADDRESS, 'to': recipients,
'subject': 'subject', 'text': 'body', 'html': 'html',
'recipient-variables': '{}'})
# Lambda function, will replace post_to_mailgun().
req_post_lambda = (lambda data=None:
self.assertDictContainsSubset(expected, data))
post_request = self.swap(
mailgun_email_services, 'post_to_mailgun', req_post_lambda)
with mailgun_api, mailgun_domain, post_request, allow_emailing:
mailgun_email_services.send_bulk_mail(
feconf.SYSTEM_EMAIL_ADDRESS, recipients,
'subject', 'body', 'html')
| 47.320346 | 80 | 0.646418 |
5edce819b72193509f17d7d3c3b79aedbb3d0c11 | 224 | py | Python | vb_simulation_pkgs/gazebo_ros_pkgs/gazebo_ros/setup.py | 1arshan/Eyantra_Virgi-bot | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | [
"MIT"
] | 1 | 2021-09-09T04:41:28.000Z | 2021-09-09T04:41:28.000Z | vb_simulation_pkgs/gazebo_ros_pkgs/gazebo_ros/setup.py | 1arshan/Eyantra_Virgi-bot | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | [
"MIT"
] | null | null | null | vb_simulation_pkgs/gazebo_ros_pkgs/gazebo_ros/setup.py | 1arshan/Eyantra_Virgi-bot | 30ebe99fec6a0d4767fe94468b21bc00091bc527 | [
"MIT"
] | null | null | null | #!/usr/bin/env python2.7
from distutils.core import setup
from catkin_pkg.python_setup import generate_distutils_setup
d = generate_distutils_setup()
d['packages'] = ['gazebo_ros']
d['package_dir'] = {'':'src'}
setup(**d)
| 22.4 | 60 | 0.741071 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.