hexsha stringlengths 40 40 | size int64 1 1.03M | ext stringclasses 10 values | lang stringclasses 1 value | max_stars_repo_path stringlengths 3 239 | max_stars_repo_name stringlengths 5 130 | max_stars_repo_head_hexsha stringlengths 40 78 | max_stars_repo_licenses listlengths 1 10 | max_stars_count int64 1 191k ⌀ | max_stars_repo_stars_event_min_datetime stringlengths 24 24 ⌀ | max_stars_repo_stars_event_max_datetime stringlengths 24 24 ⌀ | max_issues_repo_path stringlengths 3 239 | max_issues_repo_name stringlengths 5 130 | max_issues_repo_head_hexsha stringlengths 40 78 | max_issues_repo_licenses listlengths 1 10 | max_issues_count int64 1 67k ⌀ | max_issues_repo_issues_event_min_datetime stringlengths 24 24 ⌀ | max_issues_repo_issues_event_max_datetime stringlengths 24 24 ⌀ | max_forks_repo_path stringlengths 3 239 | max_forks_repo_name stringlengths 5 130 | max_forks_repo_head_hexsha stringlengths 40 78 | max_forks_repo_licenses listlengths 1 10 | max_forks_count int64 1 105k ⌀ | max_forks_repo_forks_event_min_datetime stringlengths 24 24 ⌀ | max_forks_repo_forks_event_max_datetime stringlengths 24 24 ⌀ | content stringlengths 1 1.03M | avg_line_length float64 1 958k | max_line_length int64 1 1.03M | alphanum_fraction float64 0 1 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
acf0d77951bc2cbae4db39b2cbe0d5a8f7b90a7b | 5,073 | py | Python | myapp/migrations/0001_initial.py | rajeshgupta14/pathscriptfinal | 1a0b933d00b902588dfe30b9bea62c3e0c7ec4a2 | [
"Apache-2.0"
] | null | null | null | myapp/migrations/0001_initial.py | rajeshgupta14/pathscriptfinal | 1a0b933d00b902588dfe30b9bea62c3e0c7ec4a2 | [
"Apache-2.0"
] | null | null | null | myapp/migrations/0001_initial.py | rajeshgupta14/pathscriptfinal | 1a0b933d00b902588dfe30b9bea62c3e0c7ec4a2 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# Generated by Django 1.11.5 on 2017-09-15 12:26
from __future__ import unicode_literals
from django.conf import settings
import django.contrib.auth.models
import django.contrib.auth.validators
from django.db import migrations, models
import django.db.models.deletion
import django.utils.timezone
class Migration(migrations.Migration):
initial = True
dependencies = [
('auth', '0008_alter_user_username_max_length'),
]
operations = [
migrations.CreateModel(
name='User',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('password', models.CharField(max_length=128, verbose_name='password')),
('last_login', models.DateTimeField(blank=True, null=True, verbose_name='last login')),
('is_superuser', models.BooleanField(default=False, help_text='Designates that this user has all permissions without explicitly assigning them.', verbose_name='superuser status')),
('username', models.CharField(error_messages={'unique': 'A user with that username already exists.'}, help_text='Required. 150 characters or fewer. Letters, digits and @/./+/-/_ only.', max_length=150, unique=True, validators=[django.contrib.auth.validators.UnicodeUsernameValidator()], verbose_name='username')),
('first_name', models.CharField(blank=True, max_length=30, verbose_name='first name')),
('last_name', models.CharField(blank=True, max_length=30, verbose_name='last name')),
('email', models.EmailField(blank=True, max_length=254, verbose_name='email address')),
('is_staff', models.BooleanField(default=False, help_text='Designates whether the user can log into this admin site.', verbose_name='staff status')),
('is_active', models.BooleanField(default=True, help_text='Designates whether this user should be treated as active. Unselect this instead of deleting accounts.', verbose_name='active')),
('date_joined', models.DateTimeField(default=django.utils.timezone.now, verbose_name='date joined')),
('location', models.CharField(blank=True, max_length=30)),
],
options={
'verbose_name': 'user',
'verbose_name_plural': 'users',
'abstract': False,
},
managers=[
('objects', django.contrib.auth.models.UserManager()),
],
),
migrations.CreateModel(
name='Client',
fields=[
('id', models.CharField(max_length=30, primary_key=True, serialize=False)),
('clientname', models.CharField(max_length=30)),
('userid', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
),
migrations.CreateModel(
name='Product',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=40, null=True)),
('description', models.TextField(blank=True, max_length=500, null=True)),
('upload_Doc1', models.FileField(blank=True, null=True, upload_to='media')),
('upload_Doc2', models.FileField(blank=True, null=True, upload_to='media')),
],
),
migrations.CreateModel(
name='Project',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('name', models.CharField(blank=True, max_length=30, null=True)),
('client', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='myapp.Client')),
('product', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='myapp.Product')),
('user', models.ManyToManyField(blank=True, to=settings.AUTH_USER_MODEL)),
],
),
migrations.AddField(
model_name='user',
name='clientid',
field=models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='myapp.Client'),
),
migrations.AddField(
model_name='user',
name='groups',
field=models.ManyToManyField(blank=True, help_text='The groups this user belongs to. A user will get all permissions granted to each of their groups.', related_name='user_set', related_query_name='user', to='auth.Group', verbose_name='groups'),
),
migrations.AddField(
model_name='user',
name='user_permissions',
field=models.ManyToManyField(blank=True, help_text='Specific permissions for this user.', related_name='user_set', related_query_name='user', to='auth.Permission', verbose_name='user permissions'),
),
]
| 55.747253 | 329 | 0.628819 |
acf0d909c3618152d9ca103eef0928e4d5a79c58 | 328 | py | Python | tests/test_ninja.py | jcfr/ninja-python-distributions | 0fe8e6574f4bda81351172d6dc7a452e86adf07d | [
"Apache-2.0"
] | null | null | null | tests/test_ninja.py | jcfr/ninja-python-distributions | 0fe8e6574f4bda81351172d6dc7a452e86adf07d | [
"Apache-2.0"
] | null | null | null | tests/test_ninja.py | jcfr/ninja-python-distributions | 0fe8e6574f4bda81351172d6dc7a452e86adf07d | [
"Apache-2.0"
] | null | null | null |
import pytest
import ninja
from . import push_argv
def _run(program, args):
func = getattr(ninja, program)
args = ["%s.py" % program] + args
with push_argv(args), pytest.raises(SystemExit) as excinfo:
func()
assert 0 == excinfo.value.code
def test_ninja_module():
_run("ninja", ["--version"])
| 17.263158 | 63 | 0.646341 |
acf0d920c3b809ca0e9ac8e0963321294ed9ad04 | 9,232 | py | Python | deepvariant/vcf_stats_vis_test.py | peterdfields/deepvariant | 33fe874a7b2b4fdb67b0f6e361dd9e45f1f52676 | [
"BSD-3-Clause"
] | 4 | 2019-03-30T13:25:25.000Z | 2020-10-14T18:47:21.000Z | deepvariant/vcf_stats_vis_test.py | kchennen/deepvariant | b92646f51df8cf157147e93ecd7a082c7b6db457 | [
"BSD-3-Clause"
] | 2 | 2019-09-07T05:07:35.000Z | 2019-09-07T05:08:18.000Z | deepvariant/vcf_stats_vis_test.py | kchennen/deepvariant | b92646f51df8cf157147e93ecd7a082c7b6db457 | [
"BSD-3-Clause"
] | 1 | 2019-09-04T16:59:18.000Z | 2019-09-04T16:59:18.000Z | # Copyright 2019 Google LLC.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from this
# software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""Tests for deepvariant .vcf_stats_vis."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import sys
if 'google' in sys.modules and 'google.protobuf' not in sys.modules:
del sys.modules['google']
import os
import tempfile
from absl.testing import absltest
import altair as alt
import pandas as pd
import six
import tensorflow as tf
from deepvariant import vcf_stats_vis
# Note: histograms all have keys s, e, and c, shortened versions of
# bin_start, bin_end, and count to save space in output HTML
VIS_DATA = {
'base_changes': [['G', 'A', 56], ['T', 'A', 17], ['C', 'T', 47],
['G', 'C', 19], ['T', 'C', 48], ['C', 'A', 14],
['A', 'T', 9], ['A', 'C', 15], ['T', 'G', 9],
['G', 'T', 15], ['A', 'G', 60], ['C', 'G', 11]],
'gq_histogram': [[1, 3], [2, 24]],
'indel_sizes': [[1, 6], [2, 4], [4, 2], [5, 2], [7, 2], [8, 1], [12, 1],
[-2, 6], [-5, 1], [-4, 7], [-3, 4], [-1, 11]],
'qual_histogram': [{
's': 0,
'e': 50,
'c': 10
}, {
's': 50,
'e': 99,
'c': 10
}],
'depth_histogram': [[0, 10], [1, 20]],
'vaf_histograms_by_genotype': {
'[-1, -1]': [{
'e': 0.5,
's': 0,
'c': 10
}, {
'e': 1,
's': 0.5,
'c': 10
}],
'[0, 0]': [{
'e': 0.5,
's': 0,
'c': 10
}, {
'e': 1,
's': 0.5,
'c': 10
}],
'[0, 1]': [{
'e': 0.5,
's': 0,
'c': 10
}, {
'e': 1,
's': 0.5,
'c': 10
}],
'[0, 2]': [{
'e': 0.5,
's': 0,
'c': 10
}, {
'e': 1,
's': 0.5,
'c': 10
}],
'[1, 1]': [{
'e': 0.5,
's': 0,
'c': 10
}, {
'e': 1,
's': 0.5,
'c': 10
}],
'[1, 2]': [{
'e': 0.5,
's': 0,
'c': 10
}, {
'e': 1,
's': 0.5,
'c': 10
}],
'[1, 3]': [{
'e': 0.5,
's': 0,
'c': 10
}, {
'e': 1,
's': 0.5,
'c': 10
}]
},
'variant_type_counts': {
'Biallelic_SNP': 10,
'RefCall': 3,
'Multiallelic_Insertion': 1
},
'titv_counts': {
'Transition': 20,
'Transversion': 10
}
}
def is_an_altair_chart(chart):
# Chart type strings look like: "<class 'altair.vegalite.v3.api.FacetChart'>"
# Chart, FacetChart, LayerChart, and VConcatChart.
string_type = str(type(chart))
return 'altair' in string_type and 'Chart' in string_type
class VcfStatsVisTest(absltest.TestCase):
def test_dict_to_dataframe(self):
self.assertEqual('K', 'K')
self.assertEqual(
vcf_stats_vis._dict_to_dataframe({
'A': 'a'
}).to_dict('records'), [{
'label': 'A',
'value': 'a'
}])
def test_prettify_genotype(self):
self.assertEqual(
vcf_stats_vis._prettify_genotype('[0, 0]'), (vcf_stats_vis.REF, 'main'))
self.assertEqual(
vcf_stats_vis._prettify_genotype('[-1, -1]'),
(vcf_stats_vis.UNCALLED, 'others'))
self.assertEqual(
vcf_stats_vis._prettify_genotype('[3, 3]'), (vcf_stats_vis.HOM, 'main'))
self.assertEqual(
vcf_stats_vis._prettify_genotype('[0, 3]'), (vcf_stats_vis.HET, 'main'))
self.assertEqual(
vcf_stats_vis._prettify_genotype('[6, 3]'),
(vcf_stats_vis.HET_BOTH, 'others'))
def test_integer_counts_to_histogram(self):
test_input = [[1, 1], [2, 2], [4, 1]]
expected_output = pd.DataFrame(
data={
'c': [1, 2, 1],
's': [0.5, 1.5, 3.5],
'e': [1.5, 2.5, 4.5]
},
columns=['c', 's', 'e'])
observed_output = vcf_stats_vis._integer_counts_to_histogram(test_input)
six.assertCountEqual(
self,
list(observed_output.columns),
list(expected_output.columns),
msg='Wrong column names')
self.assertEqual(
list(observed_output['c']),
list(expected_output['c']),
msg='column c differs')
self.assertEqual(
list(observed_output['s']),
list(expected_output['s']),
msg='column s differs')
self.assertEqual(
list(observed_output['e']),
list(expected_output['e']),
msg='column e differs')
self.assertTrue((observed_output == expected_output).all().all())
def test_chart_type_negative_control(self):
self.assertFalse(is_an_altair_chart('some string'))
self.assertFalse(is_an_altair_chart(None))
def test_build_type_chart(self):
chart = vcf_stats_vis._build_type_chart(VIS_DATA['variant_type_counts'])
self.assertTrue(is_an_altair_chart(chart))
def test_build_tt_chart(self):
chart = vcf_stats_vis._build_tt_chart(VIS_DATA['titv_counts'])
self.assertTrue(is_an_altair_chart(chart))
def test_build_qual_histogram(self):
chart = vcf_stats_vis._build_qual_histogram(VIS_DATA['qual_histogram'])
self.assertTrue(is_an_altair_chart(chart))
def test_build_depth_histogram(self):
chart = vcf_stats_vis._build_depth_histogram(VIS_DATA['depth_histogram'])
self.assertTrue(is_an_altair_chart(chart))
def test_build_gq_histogram(self):
chart = vcf_stats_vis._build_gq_histogram(VIS_DATA['gq_histogram'])
self.assertTrue(is_an_altair_chart(chart))
def test_build_vaf_histograms(self):
chart = vcf_stats_vis._build_vaf_histograms(
VIS_DATA['vaf_histograms_by_genotype'])
self.assertTrue(is_an_altair_chart(chart[0]))
self.assertTrue(is_an_altair_chart(chart[1]))
def test_build_base_change_chart(self):
chart = vcf_stats_vis._build_base_change_chart(VIS_DATA['base_changes'])
self.assertTrue(is_an_altair_chart(chart))
def test_build_indel_size_chart(self):
chart = vcf_stats_vis._build_indel_size_chart(VIS_DATA['indel_sizes'])
self.assertTrue(is_an_altair_chart(chart))
def test_build_all_charts(self):
chart = vcf_stats_vis._build_all_charts(VIS_DATA)
self.assertTrue(is_an_altair_chart(chart))
def test_altair_chart_to_html(self):
df = pd.DataFrame({'x': ['A', 'B'], 'y': [28, 55]})
c = alt.Chart(df).mark_bar().encode(x='x', y='y')
html_string = vcf_stats_vis._altair_chart_to_html(
altair_chart=c, download_filename='TEST_DOWNLOAD_FILENAME')
import_base = 'src="https://storage.googleapis.com/deepvariant/lib/vega/'
self.assertNotEqual(
html_string.find(import_base + 'vega@%s"' %
(vcf_stats_vis.VEGA_VERSION)), -1)
self.assertNotEqual(
html_string.find(import_base + 'vega-lite@%s"' %
(vcf_stats_vis.VEGA_LITE_VERSION)), -1)
self.assertNotEqual(
html_string.find(import_base + 'vega-embed@%s"' %
(vcf_stats_vis.VEGA_EMBED_VERSION)), -1)
self.assertEqual(html_string.find('jsdelivr.net'), -1)
self.assertNotEqual(html_string.find('TEST_DOWNLOAD_FILENAME'), -1)
def test_create_visual_report(self):
base_dir = tempfile.mkdtemp()
outfile_base = os.path.join(base_dir, 'stats_test')
sample_name = 'test_sample_name'
vcf_stats_vis.create_visual_report(
outfile_base, VIS_DATA, sample_name=sample_name)
self.assertTrue(tf.io.gfile.exists(outfile_base + '.visual_report.html'))
if __name__ == '__main__':
absltest.main()
| 33.089606 | 80 | 0.59662 |
acf0d95f15d21792ce749abce1fae403d1f35a15 | 1,175 | py | Python | tkinter/__frame__/replace-frame-with-content/main-v1.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 140 | 2017-02-21T22:49:04.000Z | 2022-03-22T17:51:58.000Z | tkinter/__frame__/replace-frame-with-content/main-v1.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 5 | 2017-12-02T19:55:00.000Z | 2021-09-22T23:18:39.000Z | tkinter/__frame__/replace-frame-with-content/main-v1.py | whitmans-max/python-examples | 881a8f23f0eebc76816a0078e19951893f0daaaa | [
"MIT"
] | 79 | 2017-01-25T10:53:33.000Z | 2022-03-11T16:13:57.000Z |
# date: 2019.05.04
# author: Bartłomiej 'furas' Burek
import tkinter as tk
# --- functions ---
def change_frame(new_frame):
global current
# hide current tk.Frame
current.pack_forget()
# show new tk.Frame
current = new_frame
current.pack()
# ---
def show_main_frame():
change_frame(main_frame)
def show_frame_1():
change_frame(frame_1)
def show_frame_2():
change_frame(frame_2)
# --- main ---
root = tk.Tk()
# --- main frame without .pack() ---
main_frame = tk.Frame(root)
button = tk.Button(main_frame, text="Frame #1", command=show_frame_1)
button.pack()
button = tk.Button(main_frame, text="Frame #2", command=show_frame_2)
button.pack()
# --- frame #1 without .pack() ---
frame_1 = tk.Frame(root)
l = tk.Label(frame_1, text="It is Frame #1", bg='red')
l.pack()
b = tk.Button(frame_1, text="BACK", command=show_main_frame)
b.pack()
# --- frame #2 without .pack() ---
frame_2 = tk.Frame(root)
l = tk.Label(frame_2, text="It is Frame #2", bg='green')
l.pack()
b = tk.Button(frame_2, text="BACK", command=show_main_frame)
b.pack()
# --- set frame at start ---
current = main_frame
current.pack()
root.mainloop()
| 16.785714 | 69 | 0.657021 |
acf0da5f7a624585e3f1f4ec345c1a5db6fba3ab | 308 | py | Python | tests/test_job.py | stikos/tesk-core | acebd70a71b1e86cfc975a2d9efedb104d8bacd7 | [
"Apache-2.0"
] | null | null | null | tests/test_job.py | stikos/tesk-core | acebd70a71b1e86cfc975a2d9efedb104d8bacd7 | [
"Apache-2.0"
] | null | null | null | tests/test_job.py | stikos/tesk-core | acebd70a71b1e86cfc975a2d9efedb104d8bacd7 | [
"Apache-2.0"
] | null | null | null | import unittest
from tesk_core.job import Job
class JobTestCase(unittest.TestCase):
def test_job(self):
job = Job({'metadata': {'name': 'test'}})
self.assertEqual(job.name, 'task-job')
self.assertEqual(job.namespace, 'default')
if __name__ == '__main__':
unittest.main()
| 20.533333 | 50 | 0.652597 |
acf0db3c8f024103641b42a12bd30f65c4baa725 | 12,327 | py | Python | src/logml/core/logml.py | AstraZeneca-NGS/LogMl | cf254b358150f0f96a9dd2ea50de56acdc15bd56 | [
"MIT"
] | null | null | null | src/logml/core/logml.py | AstraZeneca-NGS/LogMl | cf254b358150f0f96a9dd2ea50de56acdc15bd56 | [
"MIT"
] | 56 | 2019-09-10T19:00:38.000Z | 2022-02-10T00:35:57.000Z | src/logml/core/logml.py | AstraZeneca-NGS/LogMl | cf254b358150f0f96a9dd2ea50de56acdc15bd56 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import logging
import pandas as pd
from pathlib import Path
from . import Config, CONFIG_CROSS_VALIDATION, CONFIG_DATASET, CONFIG_DATASET_EXPLORE, CONFIG_FUNCTIONS, CONFIG_LOGGER, CONFIG_MODEL
from .files import MlFiles, set_plots
from .registry import MODEL_CREATE
from .scatter_gather import init_scatter_gather, scatter
from ..analysis import AnalysisDf
from ..datasets import Datasets, DatasetsCv, DatasetsDf, DfExplore
from ..feature_importance import DataFeatureImportance
from ..models import HyperOpt, Model, ModelCv, ModelSearch, SkLearnModel
from ..util.results_df import ResultsDf
class LogMl(MlFiles):
"""
ML Logger definition
Note: This class is used as a singleton
"""
def __init__(self, config_file=None, config=None, datasets=None, verbose=False, debug=False):
if config is None and config_file is not None:
config = Config(config_file=config_file)
config()
if config is not None:
self.is_debug = debug or config.is_debug
self.is_verbose = verbose or config.is_verbose
if self.is_debug:
config.set_log_level(logging.DEBUG)
elif self.is_verbose:
config.set_log_level(logging.INFO)
else:
config.set_log_level(logging.WARNING)
else:
self.is_debug = debug
self.is_verbose = verbose
super().__init__(config, CONFIG_LOGGER)
self.datasets = datasets
self._id_counter = 0
self.dataset_feature_importance = None
self.dataset_feature_importance_na = None
self.disable_plots = False
self.disable_scatter_model = False
self.display_model_results = True
self.display_max_columns = 1000
self.display_max_rows = 1000
self.hyper_parameter_optimization = None
self.model = None
self.model_ori = None
self.model_search = None
self.model_analysis = None
self.plots_path = 'logml_plots'
self.save_model_results = True
self.save_plots = True
self.show_plots = True
self.cv_enable = False
self._set_from_config()
if self.config is not None:
self.initialize()
self.model_results = ResultsDf()
def _analysis(self):
""" Perform analises """
if not self.is_dataset_df():
self._debug("Analysis: Only available for dataset type 'df', skipping")
return True
self.analysis = AnalysisDf(self.config, self.datasets)
return self.analysis()
def __call__(self):
""" Execute model trainig """
self._info(f"LogMl: Start")
# Configure
if self.config is None:
self.config = Config()
if not self.config():
self._error("Could not load config")
return False
# Initialize
self.initialize()
# Dataset: Load or create dataset, augment, preprocess, split
if not self.datasets:
self.datasets = self._new_dataset()
if not self.datasets():
self._error("Could not load or create dataset")
return False
# Explore dataset
ret = self._dataset_explore()
if ret is not None and not ret:
self._debug("Dataset not explored")
# Feature importance
if not self._feature_importance():
self._debug("Could not perform feature importance")
# Feature importance is missing values
if not self._feature_importance_na():
self._debug("Could not perform feature importance of missing data")
# Analysis
if not self._analysis():
self._error("Could not analyze data")
return False
# Models Train
if not self.models_train():
self._error("Could not train model")
return False
# Gather or show models results
self.models_results()
self._info(f"LogMl: End")
return True
def _config_sanity_check(self):
"""
Check parameters from config.
Return True on success, False if there are errors
"""
wf_enabled = list()
for wf_name in ['cross_validation', 'hyper_parameter_optimization', 'mode_search']:
wf = self.__dict__.get(wf_name)
if wf is None:
continue
if wf.enable:
wf_enabled.append(wf_name)
if len(wf_enabled) > 1:
self._error(f"More than one workflow enabled (only one can be enabled): {wf_enabled}, config file '{self.config.config_file}'")
return False
return True
def _dataset_explore(self):
""" Explore dataset """
if not self.is_dataset_df():
self._debug("Dataset Explore: Only available for dataset type 'df', skipping")
return True
self._debug("Dataset Explore: Start")
ok = True
# Explore original dataset
if self.config.get_parameters_section(CONFIG_DATASET_EXPLORE, 'is_use_ori', True):
files_base = self.datasets.get_file(f"dataset_explore.original", ext='')
self.dataset_explore_original = DfExplore(self.datasets.get_ori(), 'original', self.config, files_base)
ok = self.dataset_explore_original() and ok
else:
self._debug("Dataset Explore: Exploring 'original' datasets disabled ('is_use_ori'=False), skipping")
# Explore pre-processed dataset
files_base = self.datasets.get_file(f"dataset_explore.preprocessed", ext='')
self.dataset_explore_preprocessed = DfExplore(self.datasets.get(), 'preprocessed', self.config, files_base)
ok = self.dataset_explore_preprocessed() and ok
self._debug("Dataset Explore: End")
return ok
def _feature_importance(self):
""" Feature importance / feature selection """
if not self.is_dataset_df():
self._debug("Dataset feature importance only available for dataset type 'df'")
return True
model_type = self.model_ori.model_type
self.dataset_feature_importance = DataFeatureImportance(self.config, self.datasets, model_type, 'all')
return self.dataset_feature_importance()
def _feature_importance_na(self):
""" Feature importance / feature selection """
if not self.is_dataset_df():
self._debug("Dataset feature importance (missing data) is only available for dataset type 'df'")
return True
if not self.dataset_feature_importance.enable:
return True
model_type = self.model_ori.model_type
datasets_na = self.datasets.get_datasets_na()
if datasets_na is None or datasets_na.dataset is None:
self._debug("Dataset feature importance (missing data): Could not create 'missing' dataset, skipping. datasets_na={datasets_na}")
return False
if datasets_na.dataset.abs().sum().sum() == 0:
self._debug("Dataset feature importance (missing data): There are no missing values, skipping. datasets_na={datasets_na}")
return True
self._debug("Dataset feature importance (missing data): datasets_na={datasets_na}")
self.dataset_feature_importance_na = DataFeatureImportance(self.config, datasets_na, model_type, 'na')
return self.dataset_feature_importance_na()
def get_model_eval_test(self):
""" Get model test results """
return self.model.eval_test
def get_model_eval_validate(self):
""" Get model validate results """
return self.model.eval_validate
def initialize(self):
""" Initialize objects after config is setup """
if self.config is not None:
self._set_from_config()
self.config.get_parameters_section(CONFIG_DATASET, "")
scatter_path = Path('.') / f"scatter_{self.config.scatter_total}_{self.config.config_hash}"
init_scatter_gather(scatter_num=self.config.scatter_num, scatter_total=self.config.scatter_total, data_path=scatter_path, force=False)
if self.model_ori is None:
self.model_ori = Model(self.config)
if self.hyper_parameter_optimization is None:
self.hyper_parameter_optimization = HyperOpt(self)
if self.model_search is None:
self.model_search = ModelSearch(self)
# Table width
pd.set_option('display.max_columns', self.display_max_columns)
pd.set_option('display.max_rows', self.display_max_rows)
pd.set_option('display.max_colwidth', None)
# Set plots options
set_plots(disable=self.disable_plots, show=self.show_plots, save=self.save_plots, path=self.plots_path)
self.cv_enable = self.config.get_parameters(CONFIG_CROSS_VALIDATION).get('enable', False)
return self._config_sanity_check()
def is_dataset_df(self):
""" Is a 'df' type of dataset? """
ds_type = self.config.get_parameters(CONFIG_DATASET).get('dataset_type')
return ds_type == 'df'
def models_results(self):
""" Gather models resouts and or show them """
if self.display_model_results:
self.model_results.sort(['validation', 'train', 'time'])
self.model_results.print()
if self.save_model_results and self.model_results is not None:
m = self.model_ori if self.model is None else self.model
file_csv = m.get_file('models', ext=f"csv")
self._save_csv(file_csv, "Model resutls (CSV)", self.model_results.df, save_index=True)
def model_train(self, config=None, dataset=None):
"""
Train a single model
This method can be called from o
"""
self._debug(f"Start")
self.model = self._new_model(config, dataset)
ret = self.model()
# Add results
self.model_results.add_row_df(self.model.model_results.df)
self._debug(f"End")
return ret
@scatter
def model_train_scatter(self):
""" Perform model train, allowing scatter & gather """
return self.model_train()
@scatter
def hyper_parameter_optimization_scatter(self):
return self.hyper_parameter_optimization()
def models_train(self):
"""
Train (several) models, with or without scatter/gather enabled
"""
if self.model_search.enable:
return self.model_search()
elif self.hyper_parameter_optimization.enable:
if self.disable_scatter_model:
return self.hyper_parameter_optimization()
else:
return self.hyper_parameter_optimization_scatter()
else:
if self.disable_scatter_model:
return self.model_train()
else:
return self.model_train_scatter()
def _new_dataset(self):
model_type = self.model_ori.model_type
ds = None
if self.is_dataset_df():
self._debug(f"Using dataset class 'DatasetsDf'")
ds = DatasetsDf(self.config, model_type)
else:
self._debug(f"Using dataset class 'Dataset'")
ds = Datasets(self.config)
# Cross-validation enabled? Then we should wrap the dataset using a DatasetCv
if self.cv_enable:
self._debug(f"Using dataset class 'DatasetCv'")
ds = DatasetsCv(self.config, ds, model_type)
return ds
def _new_model(self, config=None, datasets=None):
""" Create an Model: This is a factory method """
if config is None:
config = self.config
if datasets is None:
datasets = self.datasets
self._debug(f"Parameters: {config.parameters[CONFIG_FUNCTIONS]}")
# Create models depending on class
model_class = config.get_parameters_section(CONFIG_MODEL, 'model_class')
if model_class is not None:
model_params = config.get_parameters_functions(MODEL_CREATE)
if model_class.startswith('sklearn'):
return SkLearnModel(config, datasets, model_class, model_params)
if self.cv_enable:
return ModelCv(config, datasets)
return Model(config, datasets)
| 41.786441 | 146 | 0.642411 |
acf0dd37488e937be75847d3065b315c83f59f14 | 544 | py | Python | page_parser/xpath/test.py | 2581676612/python | b309564a05838b23044bb8112fd4ef71307266b6 | [
"MIT"
] | 112 | 2017-09-19T17:38:38.000Z | 2020-05-27T18:00:27.000Z | page_parser/xpath/test.py | tomoncle/Python-notes | ce675486290c3d1c7c2e4890b57e3d0c8a1228cc | [
"MIT"
] | null | null | null | page_parser/xpath/test.py | tomoncle/Python-notes | ce675486290c3d1c7c2e4890b57e3d0c8a1228cc | [
"MIT"
] | 56 | 2017-09-20T01:24:12.000Z | 2020-04-16T06:19:31.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @Time : 17-5-6 下午1:10
# @Author : tom.lee
# @Site :
# @File : test.py
# @Software: PyCharm
from lxml import etree
f = open('file.txt')
content = f.read()
selector = etree.HTML(content)
divs = selector.xpath('//div[@class="site-item "]/div[@class="title-and-desc"]')
for r in divs:
item_ = None or {}
item_['title'] = r.xpath('a/div/text()')[0]
item_['link'] = r.xpath('a/@href')[0]
item_['desc'] = r.xpath('div/text()')[0].replace('\n', '').strip()
print item_
| 23.652174 | 80 | 0.573529 |
acf0ddd6741a00658d8644a7ed3271028c0d4731 | 5,816 | py | Python | test/test_pine.py | dusenberrymw/Pine | bec07aef0811a5746282e574e439277a40994523 | [
"MIT"
] | 4 | 2016-05-20T03:29:40.000Z | 2018-11-13T22:03:36.000Z | test/test_pine.py | dusenberrymw/Pine | bec07aef0811a5746282e574e439277a40994523 | [
"MIT"
] | null | null | null | test/test_pine.py | dusenberrymw/Pine | bec07aef0811a5746282e574e439277a40994523 | [
"MIT"
] | 1 | 2018-11-13T22:03:38.000Z | 2018-11-13T22:03:38.000Z | #! /usr/bin/env python3
'''
Created on Sept 9, 2014
@author: dusenberrymw
'''
import math
import os
import sys
import unittest
sys.path.append(os.path.join(os.path.dirname(os.path.realpath(__file__)), '..'))
# import pine.data
import pine.activation
import pine.network
import pine.trainer
import pine.util
# network.py
class TestNetwork(unittest.TestCase):
"""Testing for network.py"""
def setUp(self):
self.act_func = pine.activation.Logistic()
self.input_vector = [5,6,7]
self.neuron = pine.network.Neuron(3, self.act_func)
self.neuron.weights = [1,-2,3]
self.neuron.threshold = 4
local_output = sum([x*y for x,y in zip(self.input_vector, self.neuron.weights)]) + self.neuron.threshold
self.output = 1.0 / (1 + math.exp(-1.0*local_output)) #0.99999999999
self.layer = pine.network.Layer()
self.layer.neurons = [self.neuron, self.neuron]
def test_neuron_forward(self):
self.assertEqual(self.neuron.forward(self.input_vector), self.output)
def test_layer_forward(self):
self.assertEqual(self.layer.forward(self.input_vector),
[self.output, self.output])
def test_network_forward(self):
network = pine.network.Network()
network.layers.append(self.layer)
new_neuron = pine.network.Neuron(2,self.act_func)
new_neuron.weights = [1,-2]
new_neuron.threshold = 4
new_layer = pine.network.Layer()
new_layer.neurons = [self.neuron]
network.layers.append(new_layer)
local_output = sum([x*y for x,y in zip([self.output, self.output], self.neuron.weights)]) + self.neuron.threshold
out = [1.0 / (1 + math.exp(-1.0*local_output))]
self.assertEqual(network.forward(self.input_vector), out) #0.9525741275104728
def test_neuron_backward(self):
self.neuron.forward(self.input_vector)
self.neuron.output = 2
down_gradient = 3.2
chain_gradient = down_gradient * (2*(1-2))
weight_gradients = [chain_gradient*x for x in self.input_vector]
thresh_gradient = chain_gradient * 1
input_gradients = [chain_gradient*x for x in self.neuron.weights]
computed_gradients = self.neuron.backward(down_gradient)
self.assertEqual(self.neuron.weight_gradients, weight_gradients)
self.assertEqual(computed_gradients, input_gradients)
self.assertEqual(self.neuron.threshold_gradient, thresh_gradient)
def test_gradients(self):
layout = [3,5,2]
network = pine.util.create_network(layout, ['logistic']*2)
input_vector = [-2.3,3.1,-5.8]
target_output_vector = [0.4,1]
network.forward(input_vector)
cost_gradient_vec = network.cost_gradient(target_output_vector)
network.backward(cost_gradient_vec)
for layer in network.layers:
for neuron in layer.neurons:
# weight gradients check:
for i in range(len(neuron.weights)):
epsilon = 0.0001
old_theta = neuron.weights[i]
neuron.weights[i] = neuron.weights[i] + epsilon
network.forward(input_vector)
J1 = network.cost(target_output_vector)
neuron.weights[i] = old_theta - epsilon
network.forward(input_vector)
J2 = network.cost(target_output_vector)
estimated_gradient = (J1 - J2) / (2*epsilon)
diff = abs(neuron.weight_gradients[i] - estimated_gradient)
assert diff < 0.0001, "w difference: {}".format(diff)
# print("w difference: {}".format(diff))
# print("weight_gradient[i]: {}".format(neuron.weight_gradients[i]))
# print("estimated_gradient: {}".format(estimated_gradient))
neuron.weights[i] = old_theta
# threshold gradient check:
epsilon = 0.0001
old_theta = neuron.threshold
neuron.threshold = neuron.threshold + epsilon
network.forward(input_vector)
J1 = network.cost(target_output_vector)
neuron.threshold = old_theta - epsilon
network.forward(input_vector)
J2 = network.cost(target_output_vector)
estimated_gradient = (J1 - J2) / (2*epsilon)
diff = abs(neuron.threshold_gradient - estimated_gradient)
assert diff < 0.0001, "t difference: {}".format(diff)
# print("t difference: {}".format(diff))
neuron.threshold = old_theta
def test_reset_gradients(self):
network = pine.util.create_network([3,5,2], ['logistic']*2)
for layer in network.layers:
for neuron in layer.neurons:
for grad in neuron.weight_gradients:
self.assertEqual(grad, 0)
self.assertEqual(neuron.threshold_gradient, 0)
def tearDown(self):
pass
# util.py
class TestUtil(unittest.TestCase):
"""Testing for util"""
def setUp(self):
pass
def test_is_valid_function(self):
self.assertTrue(pine.util.is_valid_function("logistic"))
self.assertFalse(pine.util.is_valid_function("test"))
def tearDown(self):
pass
class TestActivation(unittest.TestCase):
"""Testing for activation"""
def setUp(self):
pass
def test_METHOD(self):
pass
def tearDown(self):
pass
class TestMODULE(unittest.TestCase):
"""Testing for MODULE"""
def setUp(self):
pass
def test_METHOD(self):
pass
def tearDown(self):
pass
if __name__ == '__main__':
unittest.main()
| 35.463415 | 121 | 0.613308 |
acf0ded89a6fbe33eded667df93bd7a63c80e3b6 | 19,063 | py | Python | makeplots2_chary.py | bjweiner/sedfitting | 4164ed19ec44c50d658ae19a1c866314399e0ad8 | [
"MIT"
] | 1 | 2019-03-04T20:28:10.000Z | 2019-03-04T20:28:10.000Z | makeplots2_chary.py | bjweiner/sedfitting | 4164ed19ec44c50d658ae19a1c866314399e0ad8 | [
"MIT"
] | null | null | null | makeplots2_chary.py | bjweiner/sedfitting | 4164ed19ec44c50d658ae19a1c866314399e0ad8 | [
"MIT"
] | null | null | null | #
# use ureka to get newer scipy
# set import to read from python/sedfitting
# eg
# ur_setup
# PYTHONPATH=/Users/bjw/software/ureka/Ureka/python/lib/python2.7/site-packages/
# export PYTHONPATH=$PYTHONPATH:$HOME/python/sedfitting
# python makeplots1.py
# or
# python ~/text/conf/cmu-stat-jun16/makeplots1.py
# see python/sedfitting/ Readme.testing, Readme.montecarlo
import numpy as np
import matplotlib.pyplot as plt
import scipy.special
from scipy import optimize
# maybe: from sedfitting import ...
# Change or add chary in place of rieke - Feb 2019
import read_rieke_seds, read_ir_filters
import read_chary_seds
import sedflux, make_sedflux_z
import lumdistance_lcdm
import read_one_draineli_model, read_draineli_models
import convert_draineli_sed
import composite_draineli_sed
import fitonesed
import fitsedfamily, fitsedfamily_mc
import fitsedcombine, fitsedcombine_mc
import plot_data_sed
# upper LIR limit for LIR plots
# 13.2 to show everything, 12.7 to suppress the top 2 SEDs that George
# says are total extrapolations
# loglir_uplim = 13.2
loglir_uplim = 12.7
plotdir='chary_plots_v2/'
# read filters and seds
filt1 = read_ir_filters.read_ir_filters(makeplot=0)
filtwaves = np.zeros(len(filt1))
for i in range(len(filt1)):
filtwaves[i] = filt1[i]['label']
# sedrieke = read_rieke_seds.read_rieke_seds(makeplot=0)
# rieke_loglir = 9.75 + 0.25 * np.arange(len(sedrieke))
# I made the Chary file to be like the Rieke file and loglir are the same
sedchary = read_chary_seds.read_chary_seds(makeplot=0)
chary_loglir = 9.75 + 0.25 * np.arange(len(sedchary))
nsed_max = np.size(np.where(chary_loglir < loglir_uplim))
# nsed_touse = len(sedchary)
nsed_touse = nsed_max
ztest = 0.003
# flux1 = make_sedflux_z.make_sedflux_z(sedrieke,filt1,ztest)
flux1 = make_sedflux_z.make_sedflux_z(sedchary,filt1,ztest)
# read DL models
fname = '/Users/bjw/dustmass/draine_li_2007/list.U1.00.model_subset1'
direc = '/Users/bjw/dustmass/draine_li_2007'
dlmodels_set1 = read_draineli_models.read_draineli_models(fname,dir=direc,makeplot=0)
dlseds_set1 = convert_draineli_sed.convert_draineli_sed(dlmodels_set1)
fname = '/Users/bjw/dustmass/draine_li_2007/list.models.largesubset1'
direc = '/Users/bjw/dustmass/draine_li_2007'
dlmodels_set2 = read_draineli_models.read_draineli_models(fname,dir=direc,makeplot=0)
dlseds_set2 = convert_draineli_sed.convert_draineli_sed(dlmodels_set2)
# make composite DL models
fname = '/Users/bjw/dustmass/draine_li_2007/list.composite_models1.umax'
direc = '/Users/bjw/dustmass/draine_li_2007'
dlmodels_part1 = read_draineli_models.read_draineli_models(fname,dir=direc,makeplot=0)
dlseds_part1 = convert_draineli_sed.convert_draineli_sed(dlmodels_part1)
fname = '/Users/bjw/dustmass/draine_li_2007/list.composite_models1.umin'
direc = '/Users/bjw/dustmass/draine_li_2007'
dlmodels_part2 = read_draineli_models.read_draineli_models(fname,dir=direc,makeplot=0)
dlseds_part2 = convert_draineli_sed.convert_draineli_sed(dlmodels_part2)
gammavals = [0.0, 0.1, 0.2, 0.3]
dlseds_composite = composite_draineli_sed.composite_draineli_sed(dlseds_part1, dlseds_part2, gammavals, makeplot=0)
import copy
# renormalize some DL models to be in units of 1e6 msun
# This makes fitting much more stable since coeffs are near 1.
# are there syntax problems here?
dlseds_composite_renorm = copy.deepcopy(dlseds_composite)
for i in range(len(dlseds_composite_renorm)):
tmp1 = 1e6 * dlseds_composite_renorm[i]['flux']
dlseds_composite_renorm[i]['flux'] = tmp1
####
# plot the Chary template spectra
# change to obey loglir_uplim
plt.clf()
for i in range(nsed_touse):
linestyle = 'k-'
plt.plot(np.log10(sedchary[i]['wave']), np.log10(sedchary[i]['flux']), linestyle)
dotstyle = 'ko'
plt.plot(np.log10(filtwaves),np.log10(flux1[i,0:]),dotstyle)
# fig = plt.xlim(0.3,3.0)
fig = plt.axis([0.5,3.0,-1.0,5.5])
fig = plt.xlabel('log wavelength, microns')
fig = plt.ylabel('Chary template flux, Jy')
plt.savefig(plotdir + 'chary_templ_logflux.pdf')
####
# plot some Draine & Li models
# dlseds_plot = dlseds_composite_best9
# dlseds_plot = dlseds_composite
dlseds_plot = dlseds_composite_renorm
plt.clf()
for i in range(len(dlseds_plot)):
plt.plot(np.log10(dlseds_plot[i]['wave']),np.log10(dlseds_plot[i]['flux']),'k-')
#plt.xlim(0.3,3.0)
#plt.axis([0.3,3.0,-11.0,-4.0])
plt.axis([0.3,3.0,-5.0,2.0])
fig = plt.xlabel('log wavelength, microns')
fig = plt.ylabel('DL07 model flux for 10^6 Msun')
plt.savefig(plotdir + 'dlseds_composite_renorm_flux.pdf')
# moved the plotting of the best-9 models to later
####
# fit best single SED to a series of Chary models and plot each
# dlseds_fituse = dlseds_set2
dlseds_fituse = dlseds_composite_renorm
iobs_array = [1,3,5,7,9]
lir_name = ['10','10.5','11','11.5','12']
for ii in range(len(iobs_array)):
# iobs = 5
iobs = iobs_array[ii]
plotname = 'chary_lir' + lir_name[ii] + '_onesed_fit.pdf'
# iobs = 5
testwave = filtwaves
zobs = 0.003
testflux = flux1[iobs,0:]
testferr = 0.1 * testflux
nbest, fnormarray, chisqarray = fitsedfamily.fitsedfamily(dlseds_fituse, zobs, testwave, testflux, testferr, filt1, makeplot=0, logplot=1)
nobs=len(testwave)
probarray = scipy.special.gammaincc((nobs-2)/2.0, chisqarray/2.0)
totprob = sum(probarray)
fitwave_model = dlseds_fituse[nbest]['wave'] * (1+zobs)
fitflux_model = fnormarray[nbest] * dlseds_fituse[nbest]['flux']
fpredict = np.zeros(len(testwave))
for i in range(len(fpredict)):
fpredict[i] = fnormarray[nbest] * sedflux.sedflux(fitwave_model, dlseds_fituse[nbest]['flux'], filt1[i]['wave'], filt1[i]['response'])
fitwave = testwave
plt.clf()
plt.plot(np.log10(sedchary[iobs]['wave']), np.log10(sedchary[iobs]['flux']), 'k-')
plt.plot(np.log10(testwave), np.log10(testflux), 'ko')
plt.errorbar(np.log10(testwave), np.log10(testflux), yerr=testferr/testflux/2.3026, fmt='ko')
plt.plot(np.log10(fitwave), np.log10(fpredict), 'rx')
plt.plot(np.log10(fitwave_model), np.log10(fitflux_model), 'r-')
# plt.xlim(0.3,3.0)
ax = plt.axis([0.3,3.0,0.0,4.0])
plt.xlabel('log wavelength')
plt.ylabel('log flux, Chary template + 1-model fit')
plt.savefig(plotdir + plotname)
# plt.savefig(plotdir + 'chary_lir11_onesed_fit.pdf')
#wobs2, fpredict2 = plot_data_sed.plot_data_sed(dlseds_composite, nbest, fnormarray[nbest], zobs, testwave, testflux, testferr, filt1, logplot=1)
#plt.savefig(plotdir + 'chary_lir11_onesed_fitv2.pdf')
##########
# monte carlo one model SED at a time to Chary templates
# dlseds_fituse = dlseds_set2
dlseds_fituse = dlseds_composite_renorm
ztest = 0.003
# nsed = len(sedchary)
nsed = nsed_touse
testwave = filtwaves
zobs = 0.003
# nmonte = 100
nmonte = 20
result_norm_list = []
result_prob_list = []
result_mc_list = []
for iobs in range(nsed):
testflux = flux1[iobs,0:]
testferr = 0.1 * testflux
# testferr = 0.2 * testflux
result_norm, result_prob, result_mcfits = fitsedfamily_mc.fitsedfamily_mc(dlseds_fituse, zobs, testwave, testflux, testferr, filt1, nmonte, makeplot=0)
result_norm_list.append(result_norm)
result_prob_list.append(result_prob)
result_mc_list.append(result_mcfits)
print " best rms expect rms wtmean median-err"
for i in range(nsed):
tmp1 = np.array(result_norm_list[i])
print '%7.2f %7.2f %7.2f %7.2f %7.2f %7.2f' % tuple(tmp1)
result_mc_list_10 = copy.deepcopy(result_mc_list)
# Look at result_mc_list to see what SEDs are used
nbest10all = []
for i in range(len(result_mc_list_10)):
# nbest10all.append(result_mc_list_10[i]['nbest'])
nbest10all = nbest10all + result_mc_list_10[i]['nbest']
#nbest20all = []
#for i in range(len(result_mc_list_20)):
# # nbest20all.append(result_mc_list_20[i]['nbest'])
# nbest20all = nbest20all + result_mc_list_20[i]['nbest']
# plot histogram of which spectra get used as best fits
bestmodels10, counts10 = np.unique(np.array(nbest10all),return_counts=True)
#bestmodels20, counts20 = np.unique(np.array(nbest20all),return_counts=True)
counts10sort = sorted(counts10, reverse=True)
#counts20sort = sorted(counts20, reverse=True)
ntotmodels = sum(counts10)
counts10sortfrac = np.array(counts10sort)/float(ntotmodels)
#counts20sortfrac = np.array(counts20sort)/float(ntotmodels)
# print the indexes of the N most often used models
model_index_counts = zip(bestmodels10, counts10)
tmp1 = sorted(model_index_counts, key=lambda elem: elem[1])
# this undoes the zip, making two tuples sorted by the counts
tmp2 = zip(*tmp1)
model_index_sorted = tmp2[0]
models_10mostfrequent = model_index_sorted[0:10]
print "indexes of most used models: ", models_10mostfrequent
print "count frac of most used models: ", counts10sortfrac[0:10]
plt.clf()
plt.xlabel('DL07 SED models ordered by fit popularity')
plt.ylabel('Fraction of best fits that are model N')
ax1 = plt.step(range(len(counts10)),counts10sortfrac,'b-')
#ax2 = plt.step(range(len(counts20)),counts20sortfrac,'r-')
plt.text(7,0.25,'10% flux errors',color='blue')
#plt.text(7,0.2,'20% flux errors',color='red')
#plt.figlegend( (ax1[0], ax2[0]), ('10% flux errors', '20% flux errors'), 'upper right')
plt.savefig(plotdir + 'hist_modelcounts_fitonesed.pdf')
mass_result_best = np.zeros(nsed)
mass_result_bestrms = np.zeros(nsed)
mass_result_expect = np.zeros(nsed)
mass_result_expectrms = np.zeros(nsed)
mass_result_marginalrms = np.zeros(nsed)
for i in range(nsed):
mass_result_best[i] = result_norm_list[i][0]
mass_result_bestrms[i] = result_norm_list[i][1]
mass_result_expect[i] = result_norm_list[i][2]
mass_result_expectrms[i] = result_norm_list[i][3]
mass_result_marginalrms[i] = result_norm_list[i][5]
#mass_result_best = result_norm_list[0:,0]
#mass_result_bestrms = result_norm_list[0:,1]
#mass_result_expect = result_norm_list[0:,2]
#mass_result_expectrms = result_norm_list[0:,3]
#mass_result_marginalrms = result_norm_list[0:,5]
# plot log Mdust estimate as fn of log LIR
plt.clf()
plt.plot(chary_loglir[0:nsed], np.log10(mass_result_expect)+6, 'ko')
plt.errorbar(chary_loglir[0:nsed], np.log10(mass_result_expect)+6, yerr=mass_result_expectrms/mass_result_expect/2.3026,fmt='ko')
fig = plt.xlabel('Chary log IR luminosity, Lsun')
fig = plt.ylabel('log dust mass, Msun')
fig = plt.axis([9.55,loglir_uplim,7.0,9.5])
plt.savefig(plotdir + 'loglir_logmdust_onesed_expect.pdf')
# plot ratio of Mdust error from MC to median error est from marginalizing
# over probablilities in single sim
# this may not be meaningful if the MC realizations mostly get stuck
# on the same SED
# suppress the log lir = 12.75 and 13.0 points because the SEDs are
# extrapolations and the fit failed
# itoplot = range(len(chary_loglir))
# ntoplot = len(chary_loglir) - 2
ntoplot = nsed_touse
itoplot = range(ntoplot)
plt.clf()
plt.plot(chary_loglir[itoplot], mass_result_expectrms[itoplot]/mass_result_marginalrms[itoplot], 'ko')
plt.plot(chary_loglir[itoplot], mass_result_expectrms[itoplot]/mass_result_marginalrms[itoplot], 'k-')
fig = plt.xlabel('Chary log IR luminosity, Lsun')
fig = plt.ylabel('Mdust error: MC RMS / marginal RMS')
# plt.xlim(9.5,13.25)
fig = plt.axis([9.55,loglir_uplim,0.0,10.0])
plt.savefig(plotdir + 'loglir_mdust_onesed_error_ratio.pdf')
##########
# most frequently used SEDs in some fits I did earlier, for Rieke
#
# These are old.
# indexbest3 = [ 56, 76, 149]
# indexbest9 = [ 56, 76, 113, 116, 133, 136, 149, 181, 201]
# Use the indexes from the sorted list of most popular
indexbest3 = model_index_sorted[0:3]
indexbest9 = model_index_sorted[0:9]
# There's probably a better way but this works
dlseds_composite_best9 = []
for i in indexbest9:
dlseds_composite_best9.append(dlseds_composite[i])
dlseds_composite_best3 = []
for i in indexbest3:
dlseds_composite_best3.append(dlseds_composite[i])
dlseds_composite_renorm9 = copy.deepcopy(dlseds_composite_best9)
for i in range(len(dlseds_composite_renorm9)):
tmp1 = 1e6 * dlseds_composite_renorm9[i]['flux']
dlseds_composite_renorm9[i]['flux'] = tmp1
dlseds_composite_renorm3 = copy.deepcopy(dlseds_composite_best3)
for i in range(len(dlseds_composite_renorm3)):
tmp1 = 1e6 * dlseds_composite_renorm3[i]['flux']
dlseds_composite_renorm3[i]['flux'] = tmp1
dlseds_plot = dlseds_composite_renorm9
plt.clf()
for i in range(len(dlseds_plot)):
plt.plot(np.log10(dlseds_plot[i]['wave']),np.log10(dlseds_plot[i]['flux']),'k-')
#plt.xlim(0.3,3.0)
#plt.axis([0.3,3.0,-11.0,-4.0])
plt.axis([0.3,3.0,-5.0,2.0])
fig = plt.xlabel('log wavelength, microns')
fig = plt.ylabel('DL07 model flux for 10^6 Msun')
plt.savefig(plotdir + 'dlseds_composite_renorm9_flux.pdf')
##########
# fit a combination to a single Chary SED and plot
dlseds_fituse = dlseds_composite_renorm9
iobs_array = [1,3,5,7,9]
lir_name = ['10','10.5','11','11.5','12']
for ii in range(len(iobs_array)):
# iobs = 5
iobs = iobs_array[ii]
plotname = 'chary_lir' + lir_name[ii] + '_combine_fit.pdf'
testwave = filtwaves
zobs = 0.003
testflux = flux1[iobs,0:]
testferr = 0.1 * testflux
fitcoeffs, fiterrors, chisq = fitsedcombine.fitsedcombine(dlseds_fituse, zobs, testwave, testflux, testferr, filt1, penalize=1.0, initguess=0.0, makeplot=0, logplot=1)
nobs=len(testwave)
plt.clf()
plt.plot(np.log10(sedchary[iobs]['wave']), np.log10(sedchary[iobs]['flux']), 'k-')
plt.plot(np.log10(testwave), np.log10(testflux), 'ko')
plt.errorbar(np.log10(testwave), np.log10(testflux), yerr=testferr/testflux/2.3026, fmt='ko')
fpredict = np.zeros(len(testwave))
fsum = np.zeros(len(testwave))
fsum_model = np.zeros(len(dlseds_fituse[0]['wave']))
for j in range(len(dlseds_fituse)):
if fitcoeffs[j] > 1.0e-6:
fitwave_model = dlseds_fituse[j]['wave']
fitflux_model = fitcoeffs[j] * dlseds_fituse[j]['flux']
for k in range(nobs):
fpredict[k] = fitcoeffs[j] * sedflux.sedflux(fitwave_model, dlseds_fituse[j]['flux'], filt1[k]['wave'], filt1[k]['response'])
fitwave=testwave
plt.plot(np.log10(fitwave), np.log10(fpredict), 'bx')
plt.plot(np.log10(fitwave_model), np.log10(fitflux_model), 'b-')
fsum = fsum + fpredict
fsum_model = fsum_model + fitflux_model
plt.plot(np.log10(fitwave), np.log10(fsum), 'rx')
plt.plot(np.log10(fitwave_model), np.log10(fsum_model), 'r-')
#plt.xlim(0.3,3.0)
ax = plt.axis([0.3,3.0,0.0,4.0])
plt.xlabel('log wavelength')
plt.ylabel('log flux, Chary template + combined fit')
plt.savefig(plotdir + plotname)
# plt.savefig(plotdir + 'chary_lir11.5_combine_fit.pdf')
#stop
####
# monte carlo of fitting combination over renorm best9 modes to all
# Chary templates
# try penalize=1.0, initguess=0, and SLSQP
dlseds_fituse = dlseds_composite_renorm9
#nsed = len(sedchary)
nsed = nsed_touse
testwave = filtwaves
zobs = 0.003
# nmonte = 100
# nmonte = 20
nmonte = 40
result_coeffs_list = []
result_prob_list = []
result_mc_list = []
for iobs in range(nsed):
testflux = flux1[iobs,0:]
testferr = 0.1 * testflux
# testferr = 0.2 * testflux
result_coeffs, result_prob, result_mcfits = fitsedcombine_mc.fitsedcombine_mc(dlseds_fituse, zobs, testwave, testflux, testferr, filt1, nmonte, penalize=1.0, initguess=0, makeplot=0, logplot=0)
result_coeffs_list.append(result_coeffs)
result_prob_list.append(result_prob)
result_mc_list.append(result_mcfits)
result_mc_list_10 = result_mc_list[:]
for i in range(nsed):
tmp1 = (result_coeffs_list[i]['meansum'], result_coeffs_list[i]['rmssum'],
result_coeffs_list[i]['meannzero'], result_coeffs_list[i]['rmsnzero'])
print '%6.2f %5.2f %5.2f %5.2f' % tmp1
mass_combine_mean = np.zeros(nsed)
mass_combine_rms = np.zeros(nsed)
for i in range(nsed):
mass_combine_mean[i] = result_coeffs_list[i]['meansum']
mass_combine_rms[i] = result_coeffs_list[i]['rmssum']
# plot log Mdust estimate as fn of log LIR
plt.clf()
plt.plot(chary_loglir[0:nsed], np.log10(mass_combine_mean)+6, 'ko')
plt.errorbar(chary_loglir[0:nsed], np.log10(mass_combine_mean)+6, yerr=mass_combine_rms/mass_combine_mean/2.3026,fmt='ko')
fig = plt.xlabel('log IR luminosity, Lsun')
fig = plt.ylabel('log combined dust mass, Msun')
fig = plt.axis([9.55,loglir_uplim,7.0,9.5])
plt.savefig(plotdir + 'loglir_logmdust_combine_mean.pdf')
# plot fit to one template showing all fitted components - see above
####
#stop
####
# plot comparing the log LIR estimates from combine and onesed
plt.clf()
plt.subplot(1,1,1)
plt.plot(chary_loglir[itoplot], np.log10(mass_result_expect[itoplot])+6, 'ro')
plt.plot(chary_loglir[itoplot], np.log10(mass_result_expect[itoplot])+6, 'r-')
plt.errorbar(chary_loglir[itoplot], np.log10(mass_result_expect[itoplot])+6, yerr=mass_result_expectrms[itoplot]/mass_result_expect[itoplot]/2.3026,fmt='ro')
fig = plt.xlabel('Chary log IR luminosity, Lsun')
fig = plt.ylabel('log dust mass, Msun')
fig = plt.text(9.8,9.1,'one SED fit',color='red')
#fig = plt.axis([9.55,loglir_uplim,7.0,9.5])
#plt.subplot(2,1,2)
plt.plot(chary_loglir[itoplot], np.log10(mass_combine_mean[itoplot])+6, 'bo')
plt.plot(chary_loglir[itoplot], np.log10(mass_combine_mean[itoplot])+6, 'b-')
plt.errorbar(chary_loglir[itoplot], np.log10(mass_combine_mean[itoplot])+6, yerr=mass_combine_rms[itoplot]/mass_combine_mean[itoplot]/2.3026,fmt='bo')
#fig = plt.xlabel('Chary log IR luminosity, Lsun')
#fig = plt.ylabel('log combined dust mass, Msun')
fig = plt.text(9.8,8.7,'combined SED fit',color='blue')
fig = plt.axis([9.55,loglir_uplim,7.0,9.5])
plt.savefig(plotdir + 'loglir_logmdust_combine_and_onesed.pdf')
logmassdiff = np.log10(mass_result_expect[itoplot]) - np.log10(mass_combine_mean[itoplot])
plt.subplot(1,1,1)
####
# plot comparing the error estimates
plt.clf()
plt.subplot(1,1,1)
#logerror_ratio = (mass_combine_rms/mass_combine_mean/2.3026) / (mass_result_expectrms/mass_result_expect/2.3026)
error_ratio = mass_result_expectrms / mass_combine_rms
error_ratio_dex = np.log10(error_ratio)
plt.plot(chary_loglir[itoplot], error_ratio, 'ko')
plt.plot(chary_loglir[itoplot], error_ratio, 'k-')
fig = plt.xlabel('Chary log IR luminosity, Lsun')
fig = plt.ylabel('error estimate ratio, 1 SED / combination')
fig = plt.axis([9.55,loglir_uplim,0.0,8.0])
plt.savefig(plotdir + 'loglir_errmdust_ratio.pdf')
#####
print "Summary:"
print "number of DL models in U1 set and in large subset: ",len(dlmodels_set1),len(dlmodels_set2)
print "number of DL models, composite: ",len(dlseds_composite)
print "number of DL models plotted: ",len(dlseds_composite_renorm)," and",len(dlseds_plot)
print "number of DL models used in 1-model fits: ",len(dlseds_fituse)
print "indexes of most used models: ", models_10mostfrequent
print "count frac of most used models: ", counts10sortfrac[0:10]
print "indexes of models I was using in fit: ", indexbest9
print "number of galaxy templates fitted: ", nsed
print "log mass offset, onesed - combine: ", logmassdiff
print "mean log mass offset over the SEDs: ", np.mean(logmassdiff)
| 37.232422 | 196 | 0.734722 |
acf0df05f0d1b3ebf74c599182173f8a24de6754 | 3,496 | py | Python | tools/list_image_annotations_pairs.py | NiklasHoltmeyer/FashionDatasets | a9309f90abd6bff739ecffafd69cf52506f2cb97 | [
"MIT"
] | null | null | null | tools/list_image_annotations_pairs.py | NiklasHoltmeyer/FashionDatasets | a9309f90abd6bff739ecffafd69cf52506f2cb97 | [
"MIT"
] | null | null | null | tools/list_image_annotations_pairs.py | NiklasHoltmeyer/FashionDatasets | a9309f90abd6bff739ecffafd69cf52506f2cb97 | [
"MIT"
] | null | null | null | import argparse
import os
from pathlib import Path
from random import shuffle
def parse_args():
parser = argparse.ArgumentParser(
description=
'Export Image Annotations Pairs as TXT'
)
parser.add_argument(
'--ds_path',
dest='dataset_path',
help='Base Dataset Path',
type=str,
required=True)
parser.add_argument(
'--split',
dest='split',
help='Desired Split [Train, Validate, Test] e.g. default 0.7, 0.15, 0.15',
nargs=3,
type=float,
required=True,
default=[0.7, 0.15, 0.15]
)
parser.add_argument(
'--image_dir_name',
dest='image_dir_name',
help='Name of Image (Input) Folder.',
type=str,
required=False,
default="images"
)
parser.add_argument(
'--label_dir_name',
dest='label_dir_name',
help='Name of Image (Input) Folder.',
type=str,
required=False,
default="annotations"
)
parser.add_argument(
'--sep',
dest='sep',
help='Separator',
type=str,
required=False,
default=" "
)
return parser.parse_args()
def list_image_annotations_pairs(ds_path, image_dir_name, label_dir_name):
image_file_names = os.listdir(Path(ds_path, image_dir_name))
label_file_names = os.listdir(Path(ds_path, label_dir_name))
assert len(image_file_names) == len(label_file_names), "Len(Images) != Len(Labels)"
def same_file_name(img_lbl, IGNORE_FILE_FORMAT=True):
img, lbl = img_lbl
if IGNORE_FILE_FORMAT:
return img.split(".")[0] == lbl.split(".")[0]
return img == lbl
image_labels = list(zip(image_file_names, label_file_names))
assert all(map(same_file_name, image_labels)), "Annotations != Imgs"
def relative_paths(img_lbl):
img, lbl = img_lbl
return f"{image_dir_name}/{img}", f"{label_dir_name}/{lbl}"
image_labels = map(relative_paths, image_labels)
return list(image_labels)
def split_pairs(pairs, splits, shuffle_pairs=True):
assert sum(splits.values()) == 1.0
if shuffle_pairs:
shuffle(pairs)
train_samples = int(splits["train"] * len(pairs))
validate_samples = int(splits["val"] * len(pairs))
test_samples = int(splits["test"] * len(pairs))
train_samples += (len(pairs) - train_samples - validate_samples - test_samples)
ds = {
"train": pairs[:train_samples],
"val": pairs[train_samples:-validate_samples],
"test": pairs[-validate_samples:]
}
assert (len(ds["train"]) + len(ds["val"]) + len(ds["test"])) == len(pairs)
return ds
def save_pairings_to_txt(_args):
split = {
"train": _args.split[0],
"val": _args.split[1],
"test": _args.split[2]
}
img_annotation_pairs = list_image_annotations_pairs(_args.dataset_path, _args.image_dir_name, _args.label_dir_name)
img_annotation_pairs = list(map(lambda x: _args.sep.join(x) + "\n", img_annotation_pairs))
splitted_data = split_pairs(img_annotation_pairs, split)
for split, pairs in splitted_data.items():
with open(Path(_args.dataset_path, split + ".txt"), 'w+') as f:
f.writelines(pairs)
with open(Path(_args.dataset_path, split + ".txt"), 'r') as f:
assert (len(list(f.readlines()))) == len(pairs)
if __name__ == "__main__":
args = parse_args()
save_pairings_to_txt(args)
| 26.892308 | 119 | 0.619279 |
acf0e01c7e446dc4f68d183fabde178fb7f39777 | 943 | py | Python | tyrell/venv/lib/python3.8/site-packages/rpy2/tests/robjects/test_translated_function.py | YuehanLee/CS190I | c5e3dca9f3b936a15b254abfd0c245c470e8c27e | [
"Apache-2.0"
] | null | null | null | tyrell/venv/lib/python3.8/site-packages/rpy2/tests/robjects/test_translated_function.py | YuehanLee/CS190I | c5e3dca9f3b936a15b254abfd0c245c470e8c27e | [
"Apache-2.0"
] | null | null | null | tyrell/venv/lib/python3.8/site-packages/rpy2/tests/robjects/test_translated_function.py | YuehanLee/CS190I | c5e3dca9f3b936a15b254abfd0c245c470e8c27e | [
"Apache-2.0"
] | null | null | null | import pytest
import rpy2.robjects as robjects
rinterface = robjects.rinterface
import array
identical = rinterface.baseenv['identical']
Function = robjects.functions.Function
SignatureTranslatedFunction = robjects.functions.SignatureTranslatedFunction
def test_init_invalid():
with pytest.raises(ValueError):
SignatureTranslatedFunction('a')
def test_init():
ri_f = rinterface.baseenv.find('rank')
ro_f = SignatureTranslatedFunction(ri_f)
assert identical(ri_f, ro_f)[0] is True
def test_init_with_translation():
ri_f = rinterface.baseenv.find('rank')
ro_f = SignatureTranslatedFunction(
ri_f,
init_prm_translate = {'foo_bar': 'na.last'})
assert identical(ri_f, ro_f)[0] is True
def test_call():
ri_f = rinterface.baseenv.find('sum')
ro_f = robjects.Function(ri_f)
ro_v = robjects.IntVector(array.array('i', [1,2,3]))
s = ro_f(ro_v)
assert s[0] == 6
| 24.179487 | 76 | 0.709438 |
acf0e10f3f6bed2bf342e803fc0fc1d477c80869 | 725 | py | Python | cape_frontend/webapp/mocks/timeout/timeout_settings.py | edwardmjackson/cape-frontend | 4204f50304ee5cf8808a564b6f8bf969a5bf4043 | [
"Apache-2.0"
] | 5 | 2018-08-01T16:44:23.000Z | 2018-08-15T14:19:58.000Z | cape_frontend/webapp/mocks/timeout/timeout_settings.py | edwardmjackson/cape-frontend | 4204f50304ee5cf8808a564b6f8bf969a5bf4043 | [
"Apache-2.0"
] | null | null | null | cape_frontend/webapp/mocks/timeout/timeout_settings.py | edwardmjackson/cape-frontend | 4204f50304ee5cf8808a564b6f8bf969a5bf4043 | [
"Apache-2.0"
] | 7 | 2018-09-27T14:02:30.000Z | 2020-06-29T03:45:16.000Z | # Copyright 2018 BLEMUNDSBURY AI LIMITED
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from cape_frontend.webapp.mocks.mocks_settings import URL_MOCKS_BASE,API_VERSION
URL_BASE = URL_MOCKS_BASE+'/timeout/api/'+API_VERSION
| 38.157895 | 80 | 0.78069 |
acf0e1628bf654e517da544f0db0141d9bb54aef | 1,811 | py | Python | pgweb/docs/migrations/0001_initial.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgweb/docs/migrations/0001_initial.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | pgweb/docs/migrations/0001_initial.py | WeilerWebServices/PostgreSQL | ae594ed077bebbad1be3c1d95c38b7c2c2683e8c | [
"PostgreSQL"
] | null | null | null | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import migrations, models
from django.conf import settings
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
('core', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='DocComment',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('version', models.DecimalField(max_digits=3, decimal_places=1)),
('file', models.CharField(max_length=64)),
('comment', models.TextField()),
('posted_at', models.DateTimeField(auto_now_add=True)),
('approved', models.BooleanField(default=False)),
('submitter', models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE)),
],
options={
'ordering': ('-posted_at',),
},
),
migrations.CreateModel(
name='DocPage',
fields=[
('id', models.AutoField(serialize=False, primary_key=True)),
('file', models.CharField(max_length=64)),
('title', models.CharField(max_length=256, null=True, blank=True)),
('content', models.TextField(null=True, blank=True)),
('version', models.ForeignKey(to='core.Version', db_column='version', to_field='tree', on_delete=models.CASCADE)),
],
options={
'db_table': 'docs',
},
),
migrations.AlterUniqueTogether(
name='docpage',
unique_together=set([('file', 'version')]),
),
]
| 36.959184 | 130 | 0.557151 |
acf0e1a2c694ee076a0c7665231b6d8afaa59339 | 11,005 | py | Python | compiler/sram_2bank.py | xinjie0831/OpenRAM | 76e2ab88fe4097ffa51e0387ba72165bcda49e68 | [
"BSD-3-Clause"
] | null | null | null | compiler/sram_2bank.py | xinjie0831/OpenRAM | 76e2ab88fe4097ffa51e0387ba72165bcda49e68 | [
"BSD-3-Clause"
] | null | null | null | compiler/sram_2bank.py | xinjie0831/OpenRAM | 76e2ab88fe4097ffa51e0387ba72165bcda49e68 | [
"BSD-3-Clause"
] | null | null | null | # See LICENSE for licensing information.
#
#Copyright (c) 2016-2019 Regents of the University of California and The Board
#of Regents for the Oklahoma Agricultural and Mechanical College
#(acting for and on behalf of Oklahoma State University)
#All rights reserved.
#
import sys
from tech import drc, spice
import debug
from math import log,sqrt,ceil
import datetime
import getpass
from vector import vector
from globals import OPTS, print_time
from sram_base import sram_base
from bank import bank
from dff_buf_array import dff_buf_array
from dff_array import dff_array
class sram_2bank(sram_base):
"""
Procedures specific to a two bank SRAM.
"""
def __init__(self, name, sram_config):
sram_base.__init__(self, name, sram_config)
def compute_bank_offsets(self):
""" Compute the overall offsets for a two bank SRAM """
# In 2 bank SRAM, the height is determined by the control bus which is higher than the msb address
self.vertical_bus_height = self.bank.height + 2*self.bank_to_bus_distance + self.data_bus_height + self.control_bus_height
# The address bus extends down through the power rails, but control and bank_sel bus don't
self.addr_bus_height = self.vertical_bus_height
self.vertical_bus_offset = vector(self.bank.width + self.bank_to_bus_distance, 0)
self.data_bus_offset = vector(0, self.bank.height + self.bank_to_bus_distance)
self.supply_bus_offset = vector(0, self.data_bus_offset.y + self.data_bus_height)
self.control_bus_offset = vector(0, self.supply_bus_offset.y + self.supply_bus_height)
self.bank_sel_bus_offset = self.vertical_bus_offset + vector(self.m2_pitch*self.control_size,0)
self.addr_bus_offset = self.bank_sel_bus_offset.scale(1,0) + vector(self.m2_pitch*self.num_banks,0)
# Control is placed at the top above the control bus and everything
self.control_logic_position = vector(0, self.control_bus_offset.y + self.control_bus_height + self.m1_pitch)
# Bank select flops get put to the right of control logic above bank1 and the buses
# Leave a pitch to get the vdd rails up to M2
self.msb_address_position = vector(self.bank_inst[1].lx() + 3*self.supply_rail_pitch,
self.supply_bus_offset.y + self.supply_bus_height \
+ 2*self.m1_pitch + self.msb_address.width)
def add_modules(self):
""" Adds the modules and the buses to the top level """
self.compute_bus_sizes()
self.add_banks()
self.compute_bank_offsets()
self.add_busses()
self.add_logic()
self.width = self.bank_inst[1].ur().x
self.height = self.control_logic_inst.uy()
def add_banks(self):
# Placement of bank 0 (left)
bank_position_0 = vector(self.bank.width,
self.bank.height)
self.bank_inst=[self.add_bank(0, bank_position_0, -1, -1)]
# Placement of bank 1 (right)
x_off = self.bank.width + self.vertical_bus_width + 2*self.bank_to_bus_distance
bank_position_1 = vector(x_off, bank_position_0.y)
self.bank_inst.append(self.add_bank(1, bank_position_1, -1, 1))
def add_logic(self):
""" Add the control and MSB logic """
self.add_control_logic(position=self.control_logic_position)
self.msb_address_inst = self.add_inst(name="msb_address",
mod=self.msb_address,
offset=self.msb_address_position,
rotate=270)
self.msb_bank_sel_addr = "ADDR[{}]".format(self.addr_size-1)
self.connect_inst([self.msb_bank_sel_addr,"bank_sel[1]","bank_sel[0]","clk_buf", "vdd", "gnd"])
def route_shared_banks(self):
""" Route the shared signals for two and four bank configurations. """
# create the input control pins
for n in self.control_logic_inputs + ["clk"]:
self.copy_layout_pin(self.control_logic_inst, n)
# connect the control logic to the control bus
for n in self.control_logic_outputs + ["vdd", "gnd"]:
pins = self.control_logic_inst.get_pins(n)
for pin in pins:
if pin.layer=="metal2":
pin_pos = pin.bc()
break
rail_pos = vector(pin_pos.x,self.horz_control_bus_positions[n].y)
self.add_path("metal2",[pin_pos,rail_pos])
self.add_via_center(("metal1","via1","metal2"),rail_pos)
# connect the control logic cross bar
for n in self.control_logic_outputs:
cross_pos = vector(self.vert_control_bus_positions[n].x,self.horz_control_bus_positions[n].y)
self.add_via_center(("metal1","via1","metal2"),cross_pos)
# connect the bank select signals to the vertical bus
for i in range(self.num_banks):
pin = self.bank_inst[i].get_pin("bank_sel")
pin_pos = pin.rc() if i==0 else pin.lc()
rail_pos = vector(self.vert_control_bus_positions["bank_sel[{}]".format(i)].x,pin_pos.y)
self.add_path("metal3",[pin_pos,rail_pos])
self.add_via_center(("metal2","via2","metal3"),rail_pos)
def route_single_msb_address(self):
""" Route one MSB address bit for 2-bank SRAM """
# connect the bank MSB flop supplies
vdd_pins = self.msb_address_inst.get_pins("vdd")
for vdd_pin in vdd_pins:
if vdd_pin.layer != "metal1": continue
vdd_pos = vdd_pin.bc()
down_pos = vdd_pos - vector(0,self.m1_pitch)
rail_pos = vector(vdd_pos.x,self.horz_control_bus_positions["vdd"].y)
self.add_path("metal1",[vdd_pos,down_pos])
self.add_via_center(("metal1","via1","metal2"),down_pos,rotate=90)
self.add_path("metal2",[down_pos,rail_pos])
self.add_via_center(("metal1","via1","metal2"),rail_pos)
gnd_pins = self.msb_address_inst.get_pins("gnd")
# Only add the ground connection to the lowest metal2 rail in the flop array
# FIXME: SCMOS doesn't have a vertical rail in the cell, or we could use those
lowest_y = None
for gnd_pin in gnd_pins:
if gnd_pin.layer != "metal2": continue
if lowest_y==None or gnd_pin.by()<lowest_y:
lowest_y=gnd_pin.by()
gnd_pos = gnd_pin.ur()
rail_pos = vector(gnd_pos.x,self.horz_control_bus_positions["gnd"].y)
self.add_path("metal2",[gnd_pos,rail_pos])
self.add_via_center(("metal1","via1","metal2"),rail_pos)
# connect the MSB flop to the address input bus
msb_pins = self.msb_address_inst.get_pins("din[0]")
for msb_pin in msb_pins:
if msb_pin.layer == "metal3":
msb_pin_pos = msb_pin.lc()
break
rail_pos = vector(self.vert_control_bus_positions[self.msb_bank_sel_addr].x,msb_pin_pos.y)
self.add_path("metal3",[msb_pin_pos,rail_pos])
self.add_via_center(("metal2","via2","metal3"),rail_pos)
# Connect the output bar to select 0
msb_out_pin = self.msb_address_inst.get_pin("dout_bar[0]")
msb_out_pos = msb_out_pin.rc()
out_extend_right_pos = msb_out_pos + vector(2*self.m2_pitch,0)
out_extend_up_pos = out_extend_right_pos + vector(0,self.m2_width)
rail_pos = vector(self.vert_control_bus_positions["bank_sel[0]"].x,out_extend_up_pos.y)
self.add_path("metal2",[msb_out_pos,out_extend_right_pos,out_extend_up_pos])
self.add_wire(("metal3","via2","metal2"),[out_extend_right_pos,out_extend_up_pos,rail_pos])
self.add_via_center(("metal2","via2","metal3"),rail_pos)
# Connect the output to select 1
msb_out_pin = self.msb_address_inst.get_pin("dout[0]")
msb_out_pos = msb_out_pin.rc()
out_extend_right_pos = msb_out_pos + vector(2*self.m2_pitch,0)
out_extend_down_pos = out_extend_right_pos - vector(0,2*self.m1_pitch)
rail_pos = vector(self.vert_control_bus_positions["bank_sel[1]"].x,out_extend_down_pos.y)
self.add_path("metal2",[msb_out_pos,out_extend_right_pos,out_extend_down_pos])
self.add_wire(("metal3","via2","metal2"),[out_extend_right_pos,out_extend_down_pos,rail_pos])
self.add_via_center(("metal2","via2","metal3"),rail_pos)
# Connect clk
clk_pin = self.msb_address_inst.get_pin("clk")
clk_pos = clk_pin.bc()
rail_pos = self.horz_control_bus_positions["clk_buf"]
bend_pos = vector(clk_pos.x,self.horz_control_bus_positions["clk_buf"].y)
self.add_path("metal1",[clk_pos,bend_pos,rail_pos])
def route(self):
""" Route all of the signals for the two bank SRAM. """
self.route_shared_banks()
# connect the horizontal control bus to the vertical bus
# connect the data output to the data bus
for n in self.data_bus_names:
for i in [0,1]:
pin_pos = self.bank_inst[i].get_pin(n).uc()
rail_pos = vector(pin_pos.x,self.data_bus_positions[n].y)
self.add_path("metal2",[pin_pos,rail_pos])
self.add_via_center(("metal2","via2","metal3"),rail_pos)
self.route_single_msb_address()
# connect the banks to the vertical address bus
# connect the banks to the vertical control bus
for n in self.addr_bus_names + self.control_bus_names:
# Skip these from the horizontal bus
if n in ["vdd", "gnd"]: continue
# This will be the bank select, so skip it
if n == self.msb_bank_sel_addr: continue
pin0_pos = self.bank_inst[0].get_pin(n).rc()
pin1_pos = self.bank_inst[1].get_pin(n).lc()
rail_pos = vector(self.vert_control_bus_positions[n].x,pin0_pos.y)
self.add_path("metal3",[pin0_pos,pin1_pos])
self.add_via_center(("metal2","via2","metal3"),rail_pos)
def add_lvs_correspondence_points(self):
"""
This adds some points for easier debugging if LVS goes wrong.
These should probably be turned off by default though, since extraction
will show these as ports in the extracted netlist.
"""
if self.num_banks==1: return
for n in self.control_bus_names:
self.add_label(text=n,
layer="metal2",
offset=self.vert_control_bus_positions[n])
for n in self.bank_sel_bus_names:
self.add_label(text=n,
layer="metal2",
offset=self.vert_control_bus_positions[n])
| 45.6639 | 130 | 0.633439 |
acf0e5f93f43919ca8a537e46d570aa00d8144da | 1,639 | py | Python | backend/serv/online_data.py | Alliance-Of-Independent-Programmers/acc-book | 3a0f9fa1092d7eee54102e787e2233607c6922cf | [
"MIT"
] | null | null | null | backend/serv/online_data.py | Alliance-Of-Independent-Programmers/acc-book | 3a0f9fa1092d7eee54102e787e2233607c6922cf | [
"MIT"
] | 1 | 2021-11-02T22:22:57.000Z | 2021-11-02T22:22:57.000Z | backend/serv/online_data.py | Alliance-Of-Independent-Programmers/acc-book | 3a0f9fa1092d7eee54102e787e2233607c6922cf | [
"MIT"
] | null | null | null | import base64
import os.path
path=os.path.dirname(__file__)
misha = base64.b64encode(open(os.path.join(path, "../Pics/Miahs.jpg"), "rb").read()).decode("UTF-8")
yaroslav = base64.b64encode(open(os.path.join(path, "../Pics/Yaroslav.jpg"), "rb").read()).decode("UTF-8")
goblin = base64.b64encode(open(os.path.join(path, "../Pics/Goblin.jpg"), "rb").read()).decode("UTF-8")
sanya = base64.b64encode(open(os.path.join(path, "../Pics/Sanya.jpg"), "rb").read()).decode("UTF-8")
artem = base64.b64encode(open(os.path.join(path, "../Pics/Artem.jpg"), "rb").read()).decode("UTF-8")
slava = base64.b64encode(open(os.path.join(path, "../Pics/Slava.jpg"), "rb").read()).decode("UTF-8")
andrew = base64.b64encode(open(os.path.join(path, "../Pics/Andrew.jpg"), "rb").read()).decode("UTF-8")
killreal = base64.b64encode(open(os.path.join(path, "../Pics/KillReal.jpg"), "rb").read()).decode("UTF-8")
mauri = base64.b64encode(open(os.path.join(path, "../Pics/Maury.jpg"), "rb").read()).decode("UTF-8")
online1 = {
"login": "Artem",
"img": artem,
}
online2 = {
"login": "Slava",
"img": slava,
}
online3 = {
"login": "Misha",
"img": misha,
}
online4 = {
"login": "Andrew",
"img": andrew,
}
online5 = {
"login": "Goblin",
"img": goblin,
}
online6 = {
"login": "KillReal",
"img": killreal,
}
online7 = {
"login": "Mauri",
"img": mauri,
}
online8 = {
"login": "Sany0K",
"img": sanya,
}
online9 = {
"login": "Yaroslave",
"img": yaroslav,
}
all_online = [
online1,
online2,
online3,
online4,
online5,
online6,
online7,
online8,
online9,
]
| 21.565789 | 106 | 0.594875 |
acf0e6025cfbb7d5769c3487e7126644636cbdcf | 810 | py | Python | BioInformaticsStronghold/Computing_GC_Content.py | dmartmillan/rosalind-problems | 2b6e9073257ae2e5a701388caf3bbeff74960f45 | [
"MIT"
] | null | null | null | BioInformaticsStronghold/Computing_GC_Content.py | dmartmillan/rosalind-problems | 2b6e9073257ae2e5a701388caf3bbeff74960f45 | [
"MIT"
] | null | null | null | BioInformaticsStronghold/Computing_GC_Content.py | dmartmillan/rosalind-problems | 2b6e9073257ae2e5a701388caf3bbeff74960f45 | [
"MIT"
] | null | null | null | fileDNA = open("rosalind_gc.txt", "r")
linesFile = fileDNA.readlines()
maximGCname = ''
maximGCvalue = 0
countC, countG = 0, 0
seqDNA = ""
name = ""
for line in linesFile:
if line[0] == '>':
if len(seqDNA) > 0:
cgValue = (countC + countG) / len(seqDNA) * 100
if cgValue > maximGCvalue:
maximGCvalue = cgValue
maximGCname = name
name = line.replace('\n', '')
seqDNA = ""
countG, countC = 0, 0
else:
seqDNA += line.replace('\n', '')
countG += line.count('C')
countC += line.count('G')
if len(seqDNA) > 0:
cgValue = (countC + countG) / len(seqDNA) * 100
if cgValue > maximGCvalue:
maximGCvalue = cgValue
maximGCname = name
print(maximGCname)
print(maximGCvalue)
| 22.5 | 59 | 0.546914 |
acf0e620155faddeebca0ba5655a3010a4d1b34c | 13,269 | py | Python | lesson7.4/tensorflow/core/framework/tensor_pb2.py | magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda | cc226deb7b46852407900f9fec0caf62638defe2 | [
"MIT"
] | 21 | 2018-12-11T20:07:47.000Z | 2021-11-08T13:12:32.000Z | lesson7.4/tensorflow/core/framework/tensor_pb2.py | magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda | cc226deb7b46852407900f9fec0caf62638defe2 | [
"MIT"
] | 1 | 2020-07-07T21:30:02.000Z | 2020-07-08T18:16:03.000Z | lesson7.4/tensorflow/core/framework/tensor_pb2.py | magnusmel/Serverless-Deep-Learning-with-TensorFlow-and-AWS-Lambda | cc226deb7b46852407900f9fec0caf62638defe2 | [
"MIT"
] | 15 | 2018-12-12T02:32:28.000Z | 2021-11-05T20:40:10.000Z | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: tensorflow/core/framework/tensor.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from tensorflow.core.framework import resource_handle_pb2 as tensorflow_dot_core_dot_framework_dot_resource__handle__pb2
from tensorflow.core.framework import tensor_shape_pb2 as tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2
from tensorflow.core.framework import types_pb2 as tensorflow_dot_core_dot_framework_dot_types__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='tensorflow/core/framework/tensor.proto',
package='tensorflow',
syntax='proto3',
serialized_pb=_b('\n&tensorflow/core/framework/tensor.proto\x12\ntensorflow\x1a/tensorflow/core/framework/resource_handle.proto\x1a,tensorflow/core/framework/tensor_shape.proto\x1a%tensorflow/core/framework/types.proto\"\xdc\x03\n\x0bTensorProto\x12#\n\x05\x64type\x18\x01 \x01(\x0e\x32\x14.tensorflow.DataType\x12\x32\n\x0ctensor_shape\x18\x02 \x01(\x0b\x32\x1c.tensorflow.TensorShapeProto\x12\x16\n\x0eversion_number\x18\x03 \x01(\x05\x12\x16\n\x0etensor_content\x18\x04 \x01(\x0c\x12\x14\n\x08half_val\x18\r \x03(\x05\x42\x02\x10\x01\x12\x15\n\tfloat_val\x18\x05 \x03(\x02\x42\x02\x10\x01\x12\x16\n\ndouble_val\x18\x06 \x03(\x01\x42\x02\x10\x01\x12\x13\n\x07int_val\x18\x07 \x03(\x05\x42\x02\x10\x01\x12\x12\n\nstring_val\x18\x08 \x03(\x0c\x12\x18\n\x0cscomplex_val\x18\t \x03(\x02\x42\x02\x10\x01\x12\x15\n\tint64_val\x18\n \x03(\x03\x42\x02\x10\x01\x12\x14\n\x08\x62ool_val\x18\x0b \x03(\x08\x42\x02\x10\x01\x12\x18\n\x0c\x64\x63omplex_val\x18\x0c \x03(\x01\x42\x02\x10\x01\x12<\n\x13resource_handle_val\x18\x0e \x03(\x0b\x32\x1f.tensorflow.ResourceHandleProto\x12\x37\n\x0bvariant_val\x18\x0f \x03(\x0b\x32\".tensorflow.VariantTensorDataProto\"g\n\x16VariantTensorDataProto\x12\x11\n\ttype_name\x18\x01 \x01(\t\x12\x10\n\x08metadata\x18\x02 \x01(\x0c\x12(\n\x07tensors\x18\x03 \x03(\x0b\x32\x17.tensorflow.TensorProtoB-\n\x18org.tensorflow.frameworkB\x0cTensorProtosP\x01\xf8\x01\x01\x62\x06proto3')
,
dependencies=[tensorflow_dot_core_dot_framework_dot_resource__handle__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2.DESCRIPTOR,tensorflow_dot_core_dot_framework_dot_types__pb2.DESCRIPTOR,])
_TENSORPROTO = _descriptor.Descriptor(
name='TensorProto',
full_name='tensorflow.TensorProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='dtype', full_name='tensorflow.TensorProto.dtype', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_shape', full_name='tensorflow.TensorProto.tensor_shape', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='version_number', full_name='tensorflow.TensorProto.version_number', index=2,
number=3, type=5, cpp_type=1, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensor_content', full_name='tensorflow.TensorProto.tensor_content', index=3,
number=4, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='half_val', full_name='tensorflow.TensorProto.half_val', index=4,
number=13, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='float_val', full_name='tensorflow.TensorProto.float_val', index=5,
number=5, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='double_val', full_name='tensorflow.TensorProto.double_val', index=6,
number=6, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='int_val', full_name='tensorflow.TensorProto.int_val', index=7,
number=7, type=5, cpp_type=1, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='string_val', full_name='tensorflow.TensorProto.string_val', index=8,
number=8, type=12, cpp_type=9, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='scomplex_val', full_name='tensorflow.TensorProto.scomplex_val', index=9,
number=9, type=2, cpp_type=6, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='int64_val', full_name='tensorflow.TensorProto.int64_val', index=10,
number=10, type=3, cpp_type=2, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='bool_val', full_name='tensorflow.TensorProto.bool_val', index=11,
number=11, type=8, cpp_type=7, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='dcomplex_val', full_name='tensorflow.TensorProto.dcomplex_val', index=12,
number=12, type=1, cpp_type=5, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=_descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))),
_descriptor.FieldDescriptor(
name='resource_handle_val', full_name='tensorflow.TensorProto.resource_handle_val', index=13,
number=14, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='variant_val', full_name='tensorflow.TensorProto.variant_val', index=14,
number=15, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=189,
serialized_end=665,
)
_VARIANTTENSORDATAPROTO = _descriptor.Descriptor(
name='VariantTensorDataProto',
full_name='tensorflow.VariantTensorDataProto',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='type_name', full_name='tensorflow.VariantTensorDataProto.type_name', index=0,
number=1, type=9, cpp_type=9, label=1,
has_default_value=False, default_value=_b("").decode('utf-8'),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='metadata', full_name='tensorflow.VariantTensorDataProto.metadata', index=1,
number=2, type=12, cpp_type=9, label=1,
has_default_value=False, default_value=_b(""),
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='tensors', full_name='tensorflow.VariantTensorDataProto.tensors', index=2,
number=3, type=11, cpp_type=10, label=3,
has_default_value=False, default_value=[],
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=667,
serialized_end=770,
)
_TENSORPROTO.fields_by_name['dtype'].enum_type = tensorflow_dot_core_dot_framework_dot_types__pb2._DATATYPE
_TENSORPROTO.fields_by_name['tensor_shape'].message_type = tensorflow_dot_core_dot_framework_dot_tensor__shape__pb2._TENSORSHAPEPROTO
_TENSORPROTO.fields_by_name['resource_handle_val'].message_type = tensorflow_dot_core_dot_framework_dot_resource__handle__pb2._RESOURCEHANDLEPROTO
_TENSORPROTO.fields_by_name['variant_val'].message_type = _VARIANTTENSORDATAPROTO
_VARIANTTENSORDATAPROTO.fields_by_name['tensors'].message_type = _TENSORPROTO
DESCRIPTOR.message_types_by_name['TensorProto'] = _TENSORPROTO
DESCRIPTOR.message_types_by_name['VariantTensorDataProto'] = _VARIANTTENSORDATAPROTO
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
TensorProto = _reflection.GeneratedProtocolMessageType('TensorProto', (_message.Message,), dict(
DESCRIPTOR = _TENSORPROTO,
__module__ = 'tensorflow.core.framework.tensor_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.TensorProto)
))
_sym_db.RegisterMessage(TensorProto)
VariantTensorDataProto = _reflection.GeneratedProtocolMessageType('VariantTensorDataProto', (_message.Message,), dict(
DESCRIPTOR = _VARIANTTENSORDATAPROTO,
__module__ = 'tensorflow.core.framework.tensor_pb2'
# @@protoc_insertion_point(class_scope:tensorflow.VariantTensorDataProto)
))
_sym_db.RegisterMessage(VariantTensorDataProto)
DESCRIPTOR.has_options = True
DESCRIPTOR._options = _descriptor._ParseOptions(descriptor_pb2.FileOptions(), _b('\n\030org.tensorflow.frameworkB\014TensorProtosP\001\370\001\001'))
_TENSORPROTO.fields_by_name['half_val'].has_options = True
_TENSORPROTO.fields_by_name['half_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['float_val'].has_options = True
_TENSORPROTO.fields_by_name['float_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['double_val'].has_options = True
_TENSORPROTO.fields_by_name['double_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['int_val'].has_options = True
_TENSORPROTO.fields_by_name['int_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['scomplex_val'].has_options = True
_TENSORPROTO.fields_by_name['scomplex_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['int64_val'].has_options = True
_TENSORPROTO.fields_by_name['int64_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['bool_val'].has_options = True
_TENSORPROTO.fields_by_name['bool_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
_TENSORPROTO.fields_by_name['dcomplex_val'].has_options = True
_TENSORPROTO.fields_by_name['dcomplex_val']._options = _descriptor._ParseOptions(descriptor_pb2.FieldOptions(), _b('\020\001'))
# @@protoc_insertion_point(module_scope)
| 53.504032 | 1,407 | 0.768332 |
acf0e66785a10d37a85d56a7adf43656ba552601 | 1,085 | py | Python | uranium/exceptions.py | toumorokoshi/uranium | 2d99deb7762c7a788966637157afcee171fcf6a8 | [
"MIT"
] | 21 | 2016-01-14T04:06:08.000Z | 2021-03-23T01:43:48.000Z | uranium/exceptions.py | toumorokoshi/uranium | 2d99deb7762c7a788966637157afcee171fcf6a8 | [
"MIT"
] | 45 | 2015-02-09T06:02:01.000Z | 2018-07-22T19:16:01.000Z | uranium/exceptions.py | toumorokoshi/uranium | 2d99deb7762c7a788966637157afcee171fcf6a8 | [
"MIT"
] | 10 | 2015-02-07T20:56:22.000Z | 2018-07-20T03:18:07.000Z | class UraniumException(Exception):
pass
class ExitCodeException(UraniumException):
"""
use this to return a particular status code.
exceptions work much better for bailout cases,
so rely on that behaviour to handle non-zero status codes.
"""
def __init__(self, source, code):
self.source = source
self.code = code
super(ExitCodeException, self).__init__("")
def __str__(self):
return "{0} returned exit code {1}".format(self.source, self.code)
class CacheException(UraniumException):
""" exception with the cache object """
pass
class HistoryException(UraniumException):
pass
class HooksException(UraniumException):
pass
class PluginException(UraniumException):
""" an exception that occurred with the plugin """
pass
class ScriptException(UraniumException):
pass
class ConfigException(ScriptException):
pass
class NonZeroExitCodeException(ScriptException):
pass
class PackageException(UraniumException):
""" exceptions with the package object """
pass
| 18.706897 | 74 | 0.703226 |
acf0e67ace5696209ac314d112ffd3feb9e3c278 | 3,600 | py | Python | sql_graphviz.py | valerio-vaccaro/sql_graphviz | 45169f10a9a766bb1ab871c120fa19819adae392 | [
"MIT"
] | null | null | null | sql_graphviz.py | valerio-vaccaro/sql_graphviz | 45169f10a9a766bb1ab871c120fa19819adae392 | [
"MIT"
] | null | null | null | sql_graphviz.py | valerio-vaccaro/sql_graphviz | 45169f10a9a766bb1ab871c120fa19819adae392 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
import html
import sys
from datetime import datetime
from pyparsing import alphas, alphanums, Literal, Word, Forward, OneOrMore, ZeroOrMore, CharsNotIn, Suppress, QuotedString, Optional
def field_act(s, loc, tok):
fieldName = tok[0].replace('"', '')
fieldSpec = html.escape(' '.join(tok[1::]).replace('"', '\\"'))
return '<tr><td bgcolor="grey96" align="left" port="{0}"><font face="Times-bold">{0}</font> <font color="#535353">{1}</font></td></tr>'.format(fieldName, fieldSpec)
def field_list_act(s, loc, tok):
return "\n ".join(tok)
def create_table_act(s, loc, tok):
return '''
"{tableName}" [
shape=none
label=<
<table border="0" cellspacing="0" cellborder="1">
<tr><td bgcolor="lightblue2"><font face="Times-bold" point-size="20">{tableName}</font></td></tr>
{fields}
</table>
>];'''.format(**tok)
def add_fkey_act(s, loc, tok):
return ' "{tableName}":{keyName} -> "{fkTable}":{fkCol}'.format(**tok)
def other_statement_act(s, loc, tok):
return ""
def join_string_act(s, loc, tok):
return "".join(tok).replace('\n', '\\n')
def quoted_default_value_act(s, loc, tok):
return tok[0] + " " + "".join(tok[1::])
def grammar():
parenthesis = Forward()
parenthesis <<= "(" + ZeroOrMore(CharsNotIn("()") | parenthesis) + ")"
parenthesis.setParseAction(join_string_act)
quoted_string = "'" + OneOrMore(CharsNotIn("'")) + "'"
quoted_string.setParseAction(join_string_act)
quoted_default_value = "DEFAULT" + quoted_string + OneOrMore(CharsNotIn(", \n\t"))
quoted_default_value.setParseAction(quoted_default_value_act)
field_def = OneOrMore(quoted_default_value | Word(alphanums + "_\"'`:-/[].") | parenthesis)
field_def.setParseAction(field_act)
tablename_def = ( Word(alphas + "`_.") | QuotedString("\"") )
field_list_def = field_def + ZeroOrMore(Suppress(",") + field_def)
field_list_def.setParseAction(field_list_act)
create_table_def = Literal("CREATE") + "TABLE" + tablename_def.setResultsName("tableName") + "(" + field_list_def.setResultsName("fields") + ")" + ";"
create_table_def.setParseAction(create_table_act)
add_fkey_def = Literal("ALTER") + "TABLE" + "ONLY" + tablename_def.setResultsName("tableName") + "ADD" + "CONSTRAINT" + Word(alphanums + "_") + "FOREIGN" + "KEY" + "(" + Word(alphanums + "_").setResultsName("keyName") + ")" + "REFERENCES" + Word(alphanums + "._").setResultsName("fkTable") + "(" + Word(alphanums + "_").setResultsName("fkCol") + ")" + Optional(Literal("DEFERRABLE")) + Optional(Literal("INITIALLY")) + Optional(Literal("DEFERRED")) + Optional(Literal("ON") + "DELETE" + ( Literal("CASCADE") | Literal("RESTRICT") )) + ";"
add_fkey_def.setParseAction(add_fkey_act)
other_statement_def = OneOrMore(CharsNotIn(";")) + ";"
other_statement_def.setParseAction(other_statement_act)
comment_def = "--" + ZeroOrMore(CharsNotIn("\n"))
comment_def.setParseAction(other_statement_act)
return OneOrMore(comment_def | create_table_def | add_fkey_def | other_statement_def)
def graphviz(filename):
print("/*")
print(" * Graphviz of '%s', created %s" % (filename, datetime.now()))
print(" * Generated from https://github.com/rm-hull/sql_graphviz")
print(" */")
print("digraph g { graph [ rankdir = \"LR\" ];")
for i in grammar().setDebug(False).parseFile(filename):
if i != "":
print(i)
print("}")
if __name__ == '__main__':
filename = sys.stdin if len(sys.argv) == 1 else sys.argv[1]
graphviz(filename)
| 37.5 | 542 | 0.649722 |
acf0e7e55a751afe32875890230bee1ab0dc9b3c | 8,363 | py | Python | api/tests/scheme/test_user.py | mingrammer/pyconkr-api | 3c9fc70ed26008a50d3b4c296a4da84a8f93babb | [
"Apache-2.0"
] | 1 | 2021-01-06T21:22:31.000Z | 2021-01-06T21:22:31.000Z | api/tests/scheme/test_user.py | mingrammer/pyconkr-api | 3c9fc70ed26008a50d3b4c296a4da84a8f93babb | [
"Apache-2.0"
] | null | null | null | api/tests/scheme/test_user.py | mingrammer/pyconkr-api | 3c9fc70ed26008a50d3b4c296a4da84a8f93babb | [
"Apache-2.0"
] | null | null | null | from datetime import timedelta
from json import loads, dumps
from unittest import mock
from django.contrib.auth import get_user_model
from django.utils.timezone import get_current_timezone
from django.utils.timezone import now
from graphql_extensions.exceptions import PermissionDenied
from graphql_jwt.testcases import JSONWebTokenTestCase
from api.tests.base import BaseTestCase
from api.tests.common import generate_mock_response
from api.tests.oauth_app_response import GITHUB_USER_RESPONSE
from api.tests.scheme.user_queries import ME, UPDATE_PROFILE, UPDATE_AGREEMENT, PATRONS
from ticket.models import TicketProduct, Ticket
TIMEZONE = get_current_timezone()
UserModel = get_user_model()
class UserTestCase(BaseTestCase, JSONWebTokenTestCase):
@mock.patch('api.oauth_tokenbackend.OAuth2Session.fetch_token')
@mock.patch('api.oauth_tokenbackend.OAuth2Session.get')
def test_oauth_token_auth(self, mock_get, mock_fetch_token):
# Given
mock_resp = generate_mock_response(
status=200, json=GITHUB_USER_RESPONSE)
mock_get.side_effect = [mock_resp]
# Given
mutation = '''
mutation OAuthTokenAuth($oauthType: String!, $clientId: String!, $code: String!, $redirectUri: String!) {
oAuthTokenAuth(oauthType: $oauthType, clientId: $clientId, code: $code, redirectUri: $redirectUri) {
token
}
}
'''
variables = {
'oauthType': 'github',
'clientId': 'prod_github_client_id',
'code': 'CODE',
'redirectUri': 'REDIRECT_ME'
}
# When
result = self.client.execute(mutation, variables)
# Then
actual = loads(dumps(result.data))
self.assertIsNotNone(actual['oAuthTokenAuth']['token'])
def test_update_profile(self):
# Given
variables = {
'data': {
'nameKo': '코니',
'nameEn': 'Coni',
'bioKo': '파이콘 한국을 참석하고 있지요',
'bioEn': 'PyCon Korea Good',
'phone': '010-1111-1111',
'email': 'pyconkr@pycon.kr',
'organization': '파이콘!',
'nationality': '미국',
}
}
user = UserModel(username='develop_github_123', email='me@pycon.kr')
user.save()
self.client.authenticate(user)
result = self.client.execute(
UPDATE_PROFILE, variables)
# Then
actual = loads(dumps(result.data))
self.assertIsNotNone(actual)
profile = actual['updateProfile']['profile']
self.assertEqual(profile['nameKo'], '코니')
self.assertEqual(profile['nameEn'], 'Coni')
self.assertEqual(profile['bioKo'], '파이콘 한국을 참석하고 있지요')
self.assertEqual(profile['bioEn'], 'PyCon Korea Good')
self.assertEqual(profile['phone'], '010-1111-1111')
self.assertEqual(profile['email'], 'pyconkr@pycon.kr')
self.assertEqual(profile['organization'], '파이콘!')
self.assertEqual(profile['nationality'], '미국')
def test_me(self):
# Given
user = UserModel(username='develop_github_123')
user.save()
user.profile.name_ko = '파이콘 천사'
user.profile.name_en = 'pycon_angel'
user.profile.bio_ko = '파이콘 천사입니다.'
user.profile.bio_en = "I'm pycon angel."
user.profile.email = 'me@pycon.kr'
user.profile.phone = '222-2222-2222'
user.profile.organization = '좋은회사'
user.profile.nationality = '우리나라'
user.save()
self.client.authenticate(user)
# When
result = self.client.execute(ME)
# Then
actual = loads(dumps(result.data))
self.assertIsNotNone(actual)
profile = actual['me']['profile']
self.assertEqual(profile['nameKo'], '파이콘 천사')
self.assertEqual(profile['nameEn'], 'pycon_angel')
self.assertEqual(profile['bioKo'], '파이콘 천사입니다.')
self.assertEqual(profile['bioEn'], 'I\'m pycon angel.')
self.assertEqual(profile['email'], 'me@pycon.kr')
self.assertEqual(profile['phone'], '222-2222-2222')
self.assertEqual(profile['organization'], '좋은회사')
self.assertEqual(profile['nationality'], '우리나라')
def test_me_anonymous(self):
# When
actual = self.client.execute(ME)
self.assertIsNotNone(actual.errors)
self.assertIsInstance(actual.errors[0].original_error, PermissionDenied)
def test_agreed_all(self):
# Given
user = UserModel.objects.create(username='develop_github_123')
self.client.authenticate(user)
variable = {
'isPrivacyPolicy': True,
'isTermsOfService': True,
}
result = self.client.execute(UPDATE_AGREEMENT, variable)
self.assertIsNotNone(result.data['updateAgreement'])
self.assertTrue(result.data['updateAgreement']['isAgreedAll'])
def test_WHEN_동의를_다_하지_않으면_THEN_is_agreed_all이_False_여야한다(self):
# Given
user = UserModel.objects.create(username='develop_github_123')
self.client.authenticate(user)
variable = {
'isPrivacyPolicy': False,
'isTermsOfService': True,
}
result = self.client.execute(UPDATE_AGREEMENT, variable)
self.assertIsNotNone(result.data['updateAgreement'])
self.assertFalse(result.data['updateAgreement']['isAgreedAll'])
def test_WHEN_최초에는_THEN_is_agreed_all이_False_여야한다(self):
# Given
user = UserModel.objects.create(username='develop_github_123')
self.assertFalse(user.agreement.is_agreed_all())
def test_patrons_without_patron_product_THEN_error(self):
result = self.client.execute(PATRONS)
self.assertIsNotNone(result.errors)
def test_patrons(self):
user1 = get_user_model().objects.create(
username='user1',
email='me@pycon.kr')
user1.profile.name = 'user1'
user1.save()
user2 = get_user_model().objects.create(
username='user2',
email='me@pycon.kr')
user2.profile.name = 'user2'
user2.save()
user3 = get_user_model().objects.create(
username='user3',
email='me@pycon.kr')
user3.profile.name = 'user3'
user3.save()
user4 = get_user_model().objects.create(
username='user4',
email='me@pycon.kr')
user4.profile.name = 'user4'
user4.save()
user5 = get_user_model().objects.create(
username='user5',
email='me@pycon.kr')
user5.profile.name = 'user5'
user5.save()
user6 = get_user_model().objects.create(
username='user6',
email='me@pycon.kr')
user6.profile.name = 'user6'
user6.save()
product = TicketProduct.objects.create(name='Patron', type=TicketProduct.TYPE_CONFERENCE,
is_editable_price=True, active=True)
Ticket.objects.create(owner=user1, product=product, status=Ticket.STATUS_PAID, amount=3000, paid_at=now())
Ticket.objects.create(owner=user2, product=product, status=Ticket.STATUS_PAID, amount=2000, paid_at=now())
Ticket.objects.create(
owner=user3, product=product, status=Ticket.STATUS_PAID, amount=4000, paid_at=now() - timedelta(days=2))
Ticket.objects.create(
owner=user4, product=product, status=Ticket.STATUS_PAID, amount=4000, paid_at=now() - timedelta(days=3))
Ticket.objects.create(
owner=user5, product=product, status=Ticket.STATUS_PAID, amount=4000, paid_at=now())
Ticket.objects.create(
owner=user6, product=product, status=Ticket.STATUS_CANCELLED, amount=4000, paid_at=now())
result = self.client.execute(PATRONS)
self.assertIsNone(result.errors)
self.assertIsNotNone(result.data['patrons'])
self.assertEqual(5, len(result.data['patrons']))
self.assertEqual('user4', result.data['patrons'][0]['name'])
self.assertEqual('user3', result.data['patrons'][1]['name'])
self.assertEqual('user5', result.data['patrons'][2]['name'])
self.assertEqual('user1', result.data['patrons'][3]['name'])
self.assertEqual('user2', result.data['patrons'][4]['name'])
| 39.448113 | 116 | 0.627646 |
acf0e86403bd1afbd48c4d4c74b4526314ec0b14 | 85,647 | py | Python | lib/galaxy/webapps/galaxy/controllers/admin.py | ClayBirkett/galaxy | b5afa3c1a90d269f1d438ffde481ff2e4178a72b | [
"CC-BY-3.0"
] | 1 | 2019-11-15T01:50:38.000Z | 2019-11-15T01:50:38.000Z | lib/galaxy/webapps/galaxy/controllers/admin.py | userssss/galaxy | 9662164ad68b39adf5a5606a7aa8e388f6a79f1e | [
"CC-BY-3.0"
] | 2 | 2019-04-03T15:37:17.000Z | 2019-04-03T19:37:09.000Z | lib/galaxy/webapps/galaxy/controllers/admin.py | userssss/galaxy | 9662164ad68b39adf5a5606a7aa8e388f6a79f1e | [
"CC-BY-3.0"
] | null | null | null | import imp
import logging
import os
from collections import OrderedDict
from datetime import datetime, timedelta
from string import punctuation as PUNCTUATION
import six
from sqlalchemy import and_, false, or_
from galaxy import (
model,
util,
web
)
from galaxy.actions.admin import AdminActions
from galaxy.exceptions import ActionInputError, MessageException
from galaxy.model import tool_shed_install as install_model
from galaxy.tool_shed.util.repository_util import get_ids_of_tool_shed_repositories_being_installed
from galaxy.util import (
nice_size,
sanitize_text,
url_get
)
from galaxy.util.tool_shed import common_util, encoding_util
from galaxy.web import url_for
from galaxy.web.framework.helpers import grids, time_ago
from galaxy.web.params import QuotaParamParser
from galaxy.webapps.base import controller
from galaxy.webapps.base.controller import UsesQuotaMixin
from tool_shed.util.web_util import escape
log = logging.getLogger(__name__)
class UserListGrid(grids.Grid):
class EmailColumn(grids.TextColumn):
def get_value(self, trans, grid, user):
return escape(user.email)
class UserNameColumn(grids.TextColumn):
def get_value(self, trans, grid, user):
if user.username:
return escape(user.username)
return 'not set'
class StatusColumn(grids.GridColumn):
def get_value(self, trans, grid, user):
if user.purged:
return "purged"
elif user.deleted:
return "deleted"
return ""
class GroupsColumn(grids.GridColumn):
def get_value(self, trans, grid, user):
if user.groups:
return len(user.groups)
return 0
class RolesColumn(grids.GridColumn):
def get_value(self, trans, grid, user):
if user.roles:
return len(user.roles)
return 0
class ExternalColumn(grids.GridColumn):
def get_value(self, trans, grid, user):
if user.external:
return 'yes'
return 'no'
class LastLoginColumn(grids.GridColumn):
def get_value(self, trans, grid, user):
if user.galaxy_sessions:
return self.format(user.galaxy_sessions[0].update_time)
return 'never'
class TimeCreatedColumn(grids.GridColumn):
def get_value(self, trans, grid, user):
return user.create_time.strftime('%x')
class ActivatedColumn(grids.GridColumn):
def get_value(self, trans, grid, user):
if user.active:
return 'Y'
else:
return 'N'
class APIKeyColumn(grids.GridColumn):
def get_value(self, trans, grid, user):
if user.api_keys:
return user.api_keys[0].key
else:
return ""
# Grid definition
title = "Users"
title_id = "users-grid"
model_class = model.User
default_sort_key = "email"
columns = [
EmailColumn("Email",
key="email",
model_class=model.User,
link=(lambda item: dict(controller="user", action="information", id=item.id, webapp="galaxy")),
attach_popup=True,
filterable="advanced",
target="top"),
UserNameColumn("User Name",
key="username",
model_class=model.User,
attach_popup=False,
filterable="advanced"),
GroupsColumn("Groups", attach_popup=False),
RolesColumn("Roles", attach_popup=False),
ExternalColumn("External", attach_popup=False),
LastLoginColumn("Last Login", format=time_ago),
StatusColumn("Status", attach_popup=False),
TimeCreatedColumn("Created", attach_popup=False),
ActivatedColumn("Activated", attach_popup=False),
APIKeyColumn("API Key", attach_popup=False),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn("Deleted", key="deleted", visible=False, filterable="advanced"),
grids.PurgedColumn("Purged", key="purged", visible=False, filterable="advanced")
]
columns.append(grids.MulticolFilterColumn("Search",
cols_to_filter=[columns[0], columns[1]],
key="free-text-search",
visible=False,
filterable="standard"))
global_actions = [
grids.GridAction("Create new user", url_args=dict(action="users/create"))
]
operations = [
grids.GridOperation("Manage Information",
condition=(lambda item: not item.deleted),
allow_multiple=False,
url_args=dict(controller="user", action="information", webapp="galaxy")),
grids.GridOperation("Manage Roles and Groups",
condition=(lambda item: not item.deleted),
allow_multiple=False,
url_args=dict(action="form/manage_roles_and_groups_for_user")),
grids.GridOperation("Reset Password",
condition=(lambda item: not item.deleted),
allow_multiple=True,
url_args=dict(action="form/reset_user_password"),
target="top"),
grids.GridOperation("Recalculate Disk Usage",
condition=(lambda item: not item.deleted),
allow_multiple=False),
grids.GridOperation("Generate New API Key",
allow_multiple=False,
async_compatible=True)
]
standard_filters = [
grids.GridColumnFilter("Active", args=dict(deleted=False)),
grids.GridColumnFilter("Deleted", args=dict(deleted=True, purged=False)),
grids.GridColumnFilter("Purged", args=dict(purged=True)),
grids.GridColumnFilter("All", args=dict(deleted='All'))
]
num_rows_per_page = 50
use_paging = True
default_filter = dict(purged="False")
use_default_filter = True
def get_current_item(self, trans, **kwargs):
return trans.user
class RoleListGrid(grids.Grid):
class NameColumn(grids.TextColumn):
def get_value(self, trans, grid, role):
return escape(role.name)
class DescriptionColumn(grids.TextColumn):
def get_value(self, trans, grid, role):
if role.description:
return escape(role.description)
return ''
class TypeColumn(grids.TextColumn):
def get_value(self, trans, grid, role):
return role.type
class StatusColumn(grids.GridColumn):
def get_value(self, trans, grid, role):
if role.deleted:
return "deleted"
return ""
class GroupsColumn(grids.GridColumn):
def get_value(self, trans, grid, role):
if role.groups:
return len(role.groups)
return 0
class UsersColumn(grids.GridColumn):
def get_value(self, trans, grid, role):
if role.users:
return len(role.users)
return 0
# Grid definition
title = "Roles"
title_id = "roles-grid"
model_class = model.Role
default_sort_key = "name"
columns = [
NameColumn("Name",
key="name",
link=(lambda item: dict(action="form/manage_users_and_groups_for_role", id=item.id, webapp="galaxy")),
model_class=model.Role,
attach_popup=True,
filterable="advanced",
target="top"),
DescriptionColumn("Description",
key='description',
model_class=model.Role,
attach_popup=False,
filterable="advanced"),
TypeColumn("Type",
key='type',
model_class=model.Role,
attach_popup=False,
filterable="advanced"),
GroupsColumn("Groups", attach_popup=False),
UsersColumn("Users", attach_popup=False),
StatusColumn("Status", attach_popup=False),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn("Deleted", key="deleted", visible=False, filterable="advanced"),
grids.GridColumn("Last Updated", key="update_time", format=time_ago)
]
columns.append(grids.MulticolFilterColumn("Search",
cols_to_filter=[columns[0], columns[1], columns[2]],
key="free-text-search",
visible=False,
filterable="standard"))
global_actions = [
grids.GridAction("Add new role", url_args=dict(action="form/create_role"))
]
operations = [grids.GridOperation("Edit Name/Description",
condition=(lambda item: not item.deleted),
allow_multiple=False,
url_args=dict(action="form/rename_role")),
grids.GridOperation("Edit Permissions",
condition=(lambda item: not item.deleted),
allow_multiple=False,
url_args=dict(action="form/manage_users_and_groups_for_role", webapp="galaxy")),
grids.GridOperation("Delete",
condition=(lambda item: not item.deleted),
allow_multiple=True),
grids.GridOperation("Undelete",
condition=(lambda item: item.deleted),
allow_multiple=True),
grids.GridOperation("Purge",
condition=(lambda item: item.deleted),
allow_multiple=True)]
standard_filters = [
grids.GridColumnFilter("Active", args=dict(deleted=False)),
grids.GridColumnFilter("Deleted", args=dict(deleted=True)),
grids.GridColumnFilter("All", args=dict(deleted='All'))
]
num_rows_per_page = 50
use_paging = True
def apply_query_filter(self, trans, query, **kwargs):
return query.filter(model.Role.type != model.Role.types.PRIVATE)
class GroupListGrid(grids.Grid):
class NameColumn(grids.TextColumn):
def get_value(self, trans, grid, group):
return escape(group.name)
class StatusColumn(grids.GridColumn):
def get_value(self, trans, grid, group):
if group.deleted:
return "deleted"
return ""
class RolesColumn(grids.GridColumn):
def get_value(self, trans, grid, group):
if group.roles:
return len(group.roles)
return 0
class UsersColumn(grids.GridColumn):
def get_value(self, trans, grid, group):
if group.members:
return len(group.members)
return 0
# Grid definition
title = "Groups"
title_id = "groups-grid"
model_class = model.Group
default_sort_key = "name"
columns = [
NameColumn("Name",
key="name",
link=(lambda item: dict(action="form/manage_users_and_roles_for_group", id=item.id, webapp="galaxy")),
model_class=model.Group,
attach_popup=True,
filterable="advanced"),
UsersColumn("Users", attach_popup=False),
RolesColumn("Roles", attach_popup=False),
StatusColumn("Status", attach_popup=False),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn("Deleted", key="deleted", visible=False, filterable="advanced"),
grids.GridColumn("Last Updated", key="update_time", format=time_ago)
]
columns.append(grids.MulticolFilterColumn("Search",
cols_to_filter=[columns[0]],
key="free-text-search",
visible=False,
filterable="standard"))
global_actions = [
grids.GridAction("Add new group", url_args=dict(action="form/create_group"))
]
operations = [grids.GridOperation("Edit Name",
condition=(lambda item: not item.deleted),
allow_multiple=False,
url_args=dict(action="form/rename_group")),
grids.GridOperation("Edit Permissions",
condition=(lambda item: not item.deleted),
allow_multiple=False,
url_args=dict(action="form/manage_users_and_roles_for_group", webapp="galaxy")),
grids.GridOperation("Delete",
condition=(lambda item: not item.deleted),
allow_multiple=True),
grids.GridOperation("Undelete",
condition=(lambda item: item.deleted),
allow_multiple=True),
grids.GridOperation("Purge",
condition=(lambda item: item.deleted),
allow_multiple=True)]
standard_filters = [
grids.GridColumnFilter("Active", args=dict(deleted=False)),
grids.GridColumnFilter("Deleted", args=dict(deleted=True)),
grids.GridColumnFilter("All", args=dict(deleted='All'))
]
num_rows_per_page = 50
use_paging = True
class QuotaListGrid(grids.Grid):
class NameColumn(grids.TextColumn):
def get_value(self, trans, grid, quota):
return escape(quota.name)
class DescriptionColumn(grids.TextColumn):
def get_value(self, trans, grid, quota):
if quota.description:
return escape(quota.description)
return ''
class AmountColumn(grids.TextColumn):
def get_value(self, trans, grid, quota):
return quota.operation + quota.display_amount
class StatusColumn(grids.GridColumn):
def get_value(self, trans, grid, quota):
if quota.deleted:
return "deleted"
elif quota.default:
return "<strong>default for %s users</strong>" % quota.default[0].type
return ""
class UsersColumn(grids.GridColumn):
def get_value(self, trans, grid, quota):
if quota.users:
return len(quota.users)
return 0
class GroupsColumn(grids.GridColumn):
def get_value(self, trans, grid, quota):
if quota.groups:
return len(quota.groups)
return 0
# Grid definition
title = "Quotas"
model_class = model.Quota
default_sort_key = "name"
columns = [
NameColumn("Name",
key="name",
link=(lambda item: dict(action="form/edit_quota", id=item.id)),
model_class=model.Quota,
attach_popup=True,
filterable="advanced"),
DescriptionColumn("Description",
key='description',
model_class=model.Quota,
attach_popup=False,
filterable="advanced"),
AmountColumn("Amount",
key='amount',
model_class=model.Quota,
attach_popup=False),
UsersColumn("Users", attach_popup=False),
GroupsColumn("Groups", attach_popup=False),
StatusColumn("Status", attach_popup=False),
# Columns that are valid for filtering but are not visible.
grids.DeletedColumn("Deleted", key="deleted", visible=False, filterable="advanced")
]
columns.append(grids.MulticolFilterColumn("Search",
cols_to_filter=[columns[0], columns[1]],
key="free-text-search",
visible=False,
filterable="standard"))
global_actions = [
grids.GridAction("Add new quota", dict(action="form/create_quota"))
]
operations = [grids.GridOperation("Rename",
condition=(lambda item: not item.deleted),
allow_multiple=False,
url_args=dict(action="form/rename_quota")),
grids.GridOperation("Change amount",
condition=(lambda item: not item.deleted),
allow_multiple=False,
url_args=dict(action="form/edit_quota")),
grids.GridOperation("Manage users and groups",
condition=(lambda item: not item.default and not item.deleted),
allow_multiple=False,
url_args=dict(action="form/manage_users_and_groups_for_quota")),
grids.GridOperation("Set as different type of default",
condition=(lambda item: item.default),
allow_multiple=False,
url_args=dict(action="form/set_quota_default")),
grids.GridOperation("Set as default",
condition=(lambda item: not item.default and not item.deleted),
allow_multiple=False,
url_args=dict(action="form/set_quota_default")),
grids.GridOperation("Unset as default",
condition=(lambda item: item.default and not item.deleted),
allow_multiple=False),
grids.GridOperation("Delete",
condition=(lambda item: not item.deleted and not item.default),
allow_multiple=True),
grids.GridOperation("Undelete",
condition=(lambda item: item.deleted),
allow_multiple=True),
grids.GridOperation("Purge",
condition=(lambda item: item.deleted),
allow_multiple=True)]
standard_filters = [
grids.GridColumnFilter("Active", args=dict(deleted=False)),
grids.GridColumnFilter("Deleted", args=dict(deleted=True)),
grids.GridColumnFilter("Purged", args=dict(purged=True)),
grids.GridColumnFilter("All", args=dict(deleted='All'))
]
num_rows_per_page = 50
use_paging = True
class ToolVersionListGrid(grids.Grid):
class ToolIdColumn(grids.TextColumn):
def get_value(self, trans, grid, tool_version):
toolbox = trans.app.toolbox
if toolbox.has_tool(tool_version.tool_id, exact=True):
link = url_for(controller='tool_runner', tool_id=tool_version.tool_id)
link_str = '<a target="_blank" href="%s">' % link
return '<div class="count-box state-color-ok">%s%s</a></div>' % (link_str, tool_version.tool_id)
return tool_version.tool_id
class ToolVersionsColumn(grids.TextColumn):
def get_value(self, trans, grid, tool_version):
tool_ids_str = ''
toolbox = trans.app.toolbox
tool = toolbox._tools_by_id.get(tool_version.tool_id)
if tool:
for tool_id in tool.lineage.tool_ids:
if toolbox.has_tool(tool_id, exact=True):
link = url_for(controller='tool_runner', tool_id=tool_id)
link_str = '<a target="_blank" href="%s">' % link
tool_ids_str += '<div class="count-box state-color-ok">%s%s</a></div><br/>' % (link_str, tool_id)
else:
tool_ids_str += '%s<br/>' % tool_version.tool_id
else:
tool_ids_str += '%s<br/>' % tool_version.tool_id
return tool_ids_str
# Grid definition
title = "Tool versions"
model_class = install_model.ToolVersion
default_sort_key = "tool_id"
columns = [
ToolIdColumn("Tool id",
key='tool_id',
attach_popup=False),
ToolVersionsColumn("Version lineage by tool id (parent/child ordered)")
]
columns.append(grids.MulticolFilterColumn("Search tool id",
cols_to_filter=[columns[0]],
key="free-text-search",
visible=False,
filterable="standard"))
global_actions = []
operations = []
standard_filters = []
default_filter = {}
num_rows_per_page = 50
use_paging = True
def build_initial_query(self, trans, **kwd):
return trans.install_model.context.query(self.model_class)
class AdminGalaxy(controller.JSAppLauncher, AdminActions, UsesQuotaMixin, QuotaParamParser):
user_list_grid = UserListGrid()
role_list_grid = RoleListGrid()
group_list_grid = GroupListGrid()
quota_list_grid = QuotaListGrid()
tool_version_list_grid = ToolVersionListGrid()
delete_operation = grids.GridOperation("Delete", condition=(lambda item: not item.deleted and not item.purged), allow_multiple=True)
undelete_operation = grids.GridOperation("Undelete", condition=(lambda item: item.deleted and not item.purged), allow_multiple=True)
purge_operation = grids.GridOperation("Purge", condition=(lambda item: item.deleted and not item.purged), allow_multiple=True)
impersonate_operation = grids.GridOperation(
"Impersonate",
url_args=dict(controller="admin", action="impersonate"),
condition=(lambda item: not item.deleted and not item.purged),
allow_multiple=False
)
@web.expose
@web.require_admin
def index(self, trans, **kwd):
return self.client(trans, **kwd)
@web.expose
@web.require_admin
def client(self, trans, **kwd):
"""
Endpoint for admin clientside routes.
"""
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
settings = {
'is_repo_installed': trans.install_model.context.query(trans.install_model.ToolShedRepository).first() is not None,
'installing_repository_ids': get_ids_of_tool_shed_repositories_being_installed(trans.app, as_string=True),
'is_tool_shed_installed': bool(trans.app.tool_shed_registry and trans.app.tool_shed_registry.tool_sheds)
}
return self._bootstrapped_client(trans, app_name='admin', settings=settings, message=message, status=status)
@web.expose
@web.json
@web.require_admin
def data_tables_list(self, trans, **kwd):
data = []
message = kwd.get('message', '')
status = kwd.get('status', 'done')
sorted_data_tables = sorted(
trans.app.tool_data_tables.get_tables().items()
)
for data_table_elem_name, data_table in sorted_data_tables:
for filename, file_dict in data_table.filenames.items():
file_missing = ['file missing'] \
if not file_dict.get('found') else []
data.append({
'name': data_table.name,
'filename': filename,
'tool_data_path': file_dict.get('tool_data_path'),
'errors': ', '.join(file_missing + [
error for error in file_dict.get('errors', [])
]),
})
return {'data': data, 'message': message, 'status': status}
@web.expose
@web.json
@web.require_admin
def data_types_list(self, trans, **kwd):
datatypes = []
keys = set()
message = kwd.get('message', '')
status = kwd.get('status', 'done')
for dtype in sorted(trans.app.datatypes_registry.datatype_elems,
key=lambda dtype: dtype.get('extension')):
datatypes.append(dtype.attrib)
keys |= set(dtype.attrib)
return {'keys': list(keys), 'data': datatypes, 'message': message, 'status': status}
@web.expose
@web.json
@web.require_admin
def users_list(self, trans, **kwd):
message = kwd.get('message', '')
status = kwd.get('status', '')
if 'operation' in kwd:
id = kwd.get('id')
if not id:
message, status = ('Invalid user id (%s) received.' % str(id), 'error')
ids = util.listify(id)
operation = kwd['operation'].lower()
if operation == 'delete':
message, status = self._delete_user(trans, ids)
elif operation == 'undelete':
message, status = self._undelete_user(trans, ids)
elif operation == 'purge':
message, status = self._purge_user(trans, ids)
elif operation == 'recalculate disk usage':
message, status = self._recalculate_user(trans, id)
elif operation == 'generate new api key':
message, status = self._new_user_apikey(trans, id)
if trans.app.config.allow_user_deletion:
if self.delete_operation not in self.user_list_grid.operations:
self.user_list_grid.operations.append(self.delete_operation)
if self.undelete_operation not in self.user_list_grid.operations:
self.user_list_grid.operations.append(self.undelete_operation)
if self.purge_operation not in self.user_list_grid.operations:
self.user_list_grid.operations.append(self.purge_operation)
if trans.app.config.allow_user_impersonation:
if self.impersonate_operation not in self.user_list_grid.operations:
self.user_list_grid.operations.append(self.impersonate_operation)
if message and status:
kwd['message'] = util.sanitize_text(message)
kwd['status'] = status
return self.user_list_grid(trans, **kwd)
@web.legacy_expose_api
@web.require_admin
def quotas_list(self, trans, payload=None, **kwargs):
message = kwargs.get('message', '')
status = kwargs.get('status', '')
if 'operation' in kwargs:
id = kwargs.get('id')
if not id:
return self.message_exception(trans, 'Invalid quota id (%s) received.' % str(id))
quotas = []
for quota_id in util.listify(id):
try:
quotas.append(get_quota(trans, quota_id))
except MessageException as e:
return self.message_exception(trans, util.unicodify(e))
operation = kwargs.pop('operation').lower()
try:
if operation == 'delete':
message = self._delete_quota(quotas)
elif operation == 'undelete':
message = self._undelete_quota(quotas)
elif operation == 'purge':
message = self._purge_quota(quotas)
elif operation == 'unset as default':
message = self._unset_quota_default(quotas[0])
except ActionInputError as e:
message, status = (e.err_msg, 'error')
if message:
kwargs['message'] = util.sanitize_text(message)
kwargs['status'] = status or 'done'
return self.quota_list_grid(trans, **kwargs)
@web.legacy_expose_api
@web.require_admin
def create_quota(self, trans, payload=None, **kwd):
if trans.request.method == 'GET':
all_users = []
all_groups = []
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
all_users.append((user.email, trans.security.encode_id(user.id)))
for group in trans.sa_session.query(trans.app.model.Group) \
.filter(trans.app.model.Group.table.c.deleted == false()) \
.order_by(trans.app.model.Group.table.c.name):
all_groups.append((group.name, trans.security.encode_id(group.id)))
default_options = [('No', 'no')]
for typ in trans.app.model.DefaultQuotaAssociation.types.__dict__.values():
default_options.append(('Yes, ' + typ, typ))
return {'title' : 'Create Quota',
'inputs' : [
{
'name' : 'name',
'label' : 'Name'
}, {
'name' : 'description',
'label' : 'Description'
}, {
'name' : 'amount',
'label' : 'Amount',
'help' : 'Examples: "10000MB", "99 gb", "0.2T", "unlimited"'
}, {
'name' : 'operation',
'label' : 'Assign, increase by amount, or decrease by amount?',
'options' : [('=', '='), ('+', '+'), ('-', '-')]
}, {
'name' : 'default',
'label' : 'Is this quota a default for a class of users (if yes, what type)?',
'options' : default_options,
'help' : 'Warning: Any users or groups associated with this quota will be disassociated.'
},
build_select_input('in_groups', 'Groups', all_groups, []),
build_select_input('in_users', 'Users', all_users, [])]}
else:
try:
quota, message = self._create_quota(util.Params(payload), decode_id=trans.security.decode_id)
return {'message': message}
except ActionInputError as e:
return self.message_exception(trans, e.err_msg)
@web.legacy_expose_api
@web.require_admin
def rename_quota(self, trans, payload=None, **kwd):
id = kwd.get('id')
if not id:
return self.message_exception(trans, 'No quota id received for renaming.')
quota = get_quota(trans, id)
if trans.request.method == 'GET':
return {
'title' : 'Change quota name and description for \'%s\'' % util.sanitize_text(quota.name),
'inputs' : [{
'name' : 'name',
'label' : 'Name',
'value' : quota.name
}, {
'name' : 'description',
'label' : 'Description',
'value' : quota.description
}]
}
else:
try:
return {'message': self._rename_quota(quota, util.Params(payload))}
except ActionInputError as e:
return self.message_exception(trans, e.err_msg)
@web.legacy_expose_api
@web.require_admin
def manage_users_and_groups_for_quota(self, trans, payload=None, **kwd):
quota_id = kwd.get('id')
if not quota_id:
return self.message_exception(trans, 'Invalid quota id (%s) received' % str(quota_id))
quota = get_quota(trans, quota_id)
if trans.request.method == 'GET':
in_users = []
all_users = []
in_groups = []
all_groups = []
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
if user in [x.user for x in quota.users]:
in_users.append(trans.security.encode_id(user.id))
all_users.append((user.email, trans.security.encode_id(user.id)))
for group in trans.sa_session.query(trans.app.model.Group) \
.filter(trans.app.model.Group.table.c.deleted == false()) \
.order_by(trans.app.model.Group.table.c.name):
if group in [x.group for x in quota.groups]:
in_groups.append(trans.security.encode_id(group.id))
all_groups.append((group.name, trans.security.encode_id(group.id)))
return {'title' : 'Quota \'%s\'' % quota.name,
'message': 'Quota \'%s\' is currently associated with %d user(s) and %d group(s).' %
(quota.name, len(in_users), len(in_groups)),
'status' : 'info',
'inputs' : [build_select_input('in_groups', 'Groups', all_groups, in_groups),
build_select_input('in_users', 'Users', all_users, in_users)]}
else:
try:
return {'message': self._manage_users_and_groups_for_quota(quota, util.Params(payload), decode_id=trans.security.decode_id)}
except ActionInputError as e:
return self.message_exception(trans, e.err_msg)
@web.legacy_expose_api
@web.require_admin
def edit_quota(self, trans, payload=None, **kwd):
id = kwd.get('id')
if not id:
return self.message_exception(trans, 'No quota id received for renaming.')
quota = get_quota(trans, id)
if trans.request.method == 'GET':
return {
'title' : 'Edit quota size for \'%s\'' % util.sanitize_text(quota.name),
'inputs' : [{
'name' : 'amount',
'label' : 'Amount',
'value' : quota.display_amount,
'help' : 'Examples: "10000MB", "99 gb", "0.2T", "unlimited"'
}, {
'name' : 'operation',
'label' : 'Assign, increase by amount, or decrease by amount?',
'options' : [('=', '='), ('+', '+'), ('-', '-')],
'value' : quota.operation
}]
}
else:
try:
return {'message': self._edit_quota(quota, util.Params(payload))}
except ActionInputError as e:
return self.message_exception(trans, e.err_msg)
@web.legacy_expose_api
@web.require_admin
def set_quota_default(self, trans, payload=None, **kwd):
id = kwd.get('id')
if not id:
return self.message_exception(trans, 'No quota id received for renaming.')
quota = get_quota(trans, id)
if trans.request.method == 'GET':
default_value = quota.default[0].type if quota.default else 'no'
default_options = [('No', 'no')]
for typ in trans.app.model.DefaultQuotaAssociation.types.__dict__.values():
default_options.append(('Yes, ' + typ, typ))
return {
'title' : 'Set quota default for \'%s\'' % util.sanitize_text(quota.name),
'inputs' : [{
'name' : 'default',
'label' : 'Assign, increase by amount, or decrease by amount?',
'options' : default_options,
'value' : default_value,
'help' : 'Warning: Any users or groups associated with this quota will be disassociated.'
}]
}
else:
try:
return {'message': self._set_quota_default(quota, util.Params(payload))}
except ActionInputError as e:
return self.message_exception(trans, e.err_msg)
@web.expose
@web.require_admin
def impersonate(self, trans, **kwd):
if not trans.app.config.allow_user_impersonation:
return trans.show_error_message("User impersonation is not enabled in this instance of Galaxy.")
user = None
user_id = kwd.get('id', None)
if user_id is not None:
try:
user = trans.sa_session.query(trans.app.model.User).get(trans.security.decode_id(user_id))
if user:
trans.handle_user_logout()
trans.handle_user_login(user)
return trans.show_message('You are now logged in as %s, <a target="_top" href="%s">return to the home page</a>' % (user.email, url_for(controller='root')), use_panels=True)
except Exception:
log.exception("Error fetching user for impersonation")
return trans.response.send_redirect(web.url_for(controller='admin',
action='users',
message="Invalid user selected", status="error"))
def check_for_tool_dependencies(self, trans, migration_stage):
# Get the 000x_tools.xml file associated with migration_stage.
tools_xml_file_path = os.path.abspath(os.path.join(common_util.TOOL_MIGRATION_SCRIPTS_DIR, '%04d_tools.xml' % migration_stage))
tree = util.parse_xml(tools_xml_file_path)
root = tree.getroot()
tool_shed = root.get('name')
shed_url = common_util.get_tool_shed_url_from_tool_shed_registry(trans.app, tool_shed)
repo_name_dependency_tups = []
if shed_url:
for elem in root:
if elem.tag == 'repository':
tool_dependencies = []
tool_dependencies_dict = {}
repository_name = elem.get('name')
changeset_revision = elem.get('changeset_revision')
params = dict(name=repository_name, owner='devteam', changeset_revision=changeset_revision)
pathspec = ['repository', 'get_tool_dependencies']
text = url_get(shed_url, password_mgr=self.app.tool_shed_registry.url_auth(shed_url), pathspec=pathspec, params=params)
if text:
tool_dependencies_dict = encoding_util.tool_shed_decode(text)
for dependency_key, requirements_dict in tool_dependencies_dict.items():
tool_dependency_name = requirements_dict['name']
tool_dependency_version = requirements_dict['version']
tool_dependency_type = requirements_dict['type']
tool_dependency_readme = requirements_dict.get('readme', '')
tool_dependencies.append((tool_dependency_name, tool_dependency_version, tool_dependency_type, tool_dependency_readme))
repo_name_dependency_tups.append((repository_name, tool_dependencies))
return repo_name_dependency_tups
@web.expose
@web.require_admin
def review_tool_migration_stages(self, trans, **kwd):
message = escape(util.restore_text(kwd.get('message', '')))
status = util.restore_text(kwd.get('status', 'done'))
migration_stages_dict = OrderedDict()
# FIXME: this isn't valid in an installed context
migration_scripts_dir = os.path.abspath(os.path.join(trans.app.config.root, 'lib', 'galaxy', 'tool_shed', 'galaxy_install', 'migrate', 'versions'))
modules = os.listdir(migration_scripts_dir)
modules.sort()
modules.reverse()
for item in modules:
if not item.endswith('_tools.py') or item.startswith('0001_tools'):
continue
module = item.replace('.py', '')
migration_stage = int(module.replace('_tools', ''))
repo_name_dependency_tups = self.check_for_tool_dependencies(trans, migration_stage)
open_file_obj, file_name, description = imp.find_module(module, [migration_scripts_dir])
imported_module = imp.load_module('upgrade', open_file_obj, file_name, description)
migration_info = imported_module.__doc__
open_file_obj.close()
migration_stages_dict[migration_stage] = (migration_info, repo_name_dependency_tups)
return trans.fill_template('admin/review_tool_migration_stages.mako',
migration_stages_dict=migration_stages_dict,
message=message,
status=status)
@web.expose
@web.require_admin
def center(self, trans, **kwd):
message = escape(kwd.get('message', ''))
status = kwd.get('status', 'done')
is_repo_installed = trans.install_model.context.query(trans.install_model.ToolShedRepository).first() is not None
installing_repository_ids = get_ids_of_tool_shed_repositories_being_installed(trans.app, as_string=True)
return trans.fill_template('/webapps/galaxy/admin/center.mako',
is_repo_installed=is_repo_installed,
installing_repository_ids=installing_repository_ids,
message=message,
status=status)
@web.legacy_expose_api
@web.require_admin
def tool_versions_list(self, trans, **kwd):
return self.tool_version_list_grid(trans, **kwd)
@web.expose
@web.json
@web.require_admin
def roles_list(self, trans, **kwargs):
message = kwargs.get('message')
status = kwargs.get('status')
if 'operation' in kwargs:
id = kwargs.get('id', None)
if not id:
message, status = ('Invalid role id (%s) received.' % str(id), 'error')
ids = util.listify(id)
operation = kwargs['operation'].lower().replace('+', ' ')
if operation == 'delete':
message, status = self._delete_role(trans, ids)
elif operation == 'undelete':
message, status = self._undelete_role(trans, ids)
elif operation == 'purge':
message, status = self._purge_role(trans, ids)
if message and status:
kwargs['message'] = util.sanitize_text(message)
kwargs['status'] = status
return self.role_list_grid(trans, **kwargs)
@web.legacy_expose_api
@web.require_admin
def create_role(self, trans, payload=None, **kwd):
if trans.request.method == 'GET':
all_users = []
all_groups = []
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
all_users.append((user.email, trans.security.encode_id(user.id)))
for group in trans.sa_session.query(trans.app.model.Group) \
.filter(trans.app.model.Group.table.c.deleted == false()) \
.order_by(trans.app.model.Group.table.c.name):
all_groups.append((group.name, trans.security.encode_id(group.id)))
return {
'title' : 'Create Role',
'inputs' : [{
'name' : 'name',
'label' : 'Name'
}, {
'name' : 'description',
'label' : 'Description'
},
build_select_input('in_groups', 'Groups', all_groups, []),
build_select_input('in_users', 'Users', all_users, []), {
'name' : 'auto_create',
'label' : 'Create a new group of the same name for this role:',
'type' : 'boolean'
}]}
else:
name = util.restore_text(payload.get('name', ''))
description = util.restore_text(payload.get('description', ''))
auto_create_checked = payload.get('auto_create') == 'true'
in_users = [trans.sa_session.query(trans.app.model.User).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_users'))]
in_groups = [trans.sa_session.query(trans.app.model.Group).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_groups'))]
if not name or not description:
return self.message_exception(trans, 'Enter a valid name and a description.')
elif trans.sa_session.query(trans.app.model.Role).filter(trans.app.model.Role.table.c.name == name).first():
return self.message_exception(trans, 'Role names must be unique and a role with that name already exists, so choose another name.')
elif None in in_users or None in in_groups:
return self.message_exception(trans, 'One or more invalid user/group id has been provided.')
else:
# Create the role
role = trans.app.model.Role(name=name, description=description, type=trans.app.model.Role.types.ADMIN)
trans.sa_session.add(role)
# Create the UserRoleAssociations
for user in in_users:
ura = trans.app.model.UserRoleAssociation(user, role)
trans.sa_session.add(ura)
# Create the GroupRoleAssociations
for group in in_groups:
gra = trans.app.model.GroupRoleAssociation(group, role)
trans.sa_session.add(gra)
if auto_create_checked:
# Check if role with same name already exists
if trans.sa_session.query(trans.app.model.Group).filter(trans.app.model.Group.table.c.name == name).first():
return self.message_exception(trans, 'A group with that name already exists, so choose another name or disable group creation.')
# Create the group
group = trans.app.model.Group(name=name)
trans.sa_session.add(group)
# Associate the group with the role
gra = trans.model.GroupRoleAssociation(group, role)
trans.sa_session.add(gra)
num_in_groups = len(in_groups) + 1
else:
num_in_groups = len(in_groups)
trans.sa_session.flush()
message = 'Role \'%s\' has been created with %d associated users and %d associated groups.' % (role.name, len(in_users), num_in_groups)
if auto_create_checked:
message += 'One of the groups associated with this role is the newly created group with the same name.'
return {'message' : message}
@web.legacy_expose_api
@web.require_admin
def rename_role(self, trans, payload=None, **kwd):
id = kwd.get('id')
if not id:
return self.message_exception(trans, 'No role id received for renaming.')
role = get_role(trans, id)
if trans.request.method == 'GET':
return {
'title' : 'Change role name and description for \'%s\'' % util.sanitize_text(role.name),
'inputs' : [{
'name' : 'name',
'label' : 'Name',
'value' : role.name
}, {
'name' : 'description',
'label' : 'Description',
'value' : role.description
}]
}
else:
old_name = role.name
new_name = util.restore_text(payload.get('name'))
new_description = util.restore_text(payload.get('description'))
if not new_name:
return self.message_exception(trans, 'Enter a valid role name.')
else:
existing_role = trans.sa_session.query(trans.app.model.Role).filter(trans.app.model.Role.table.c.name == new_name).first()
if existing_role and existing_role.id != role.id:
return self.message_exception(trans, 'A role with that name already exists.')
else:
if not (role.name == new_name and role.description == new_description):
role.name = new_name
role.description = new_description
trans.sa_session.add(role)
trans.sa_session.flush()
return {'message': 'Role \'%s\' has been renamed to \'%s\'.' % (old_name, new_name)}
@web.legacy_expose_api
@web.require_admin
def manage_users_and_groups_for_role(self, trans, payload=None, **kwd):
role_id = kwd.get('id')
if not role_id:
return self.message_exception(trans, 'Invalid role id (%s) received' % str(role_id))
role = get_role(trans, role_id)
if trans.request.method == 'GET':
in_users = []
all_users = []
in_groups = []
all_groups = []
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
if user in [x.user for x in role.users]:
in_users.append(trans.security.encode_id(user.id))
all_users.append((user.email, trans.security.encode_id(user.id)))
for group in trans.sa_session.query(trans.app.model.Group) \
.filter(trans.app.model.Group.table.c.deleted == false()) \
.order_by(trans.app.model.Group.table.c.name):
if group in [x.group for x in role.groups]:
in_groups.append(trans.security.encode_id(group.id))
all_groups.append((group.name, trans.security.encode_id(group.id)))
return {'title' : 'Role \'%s\'' % role.name,
'message': 'Role \'%s\' is currently associated with %d user(s) and %d group(s).' %
(role.name, len(in_users), len(in_groups)),
'status' : 'info',
'inputs' : [build_select_input('in_groups', 'Groups', all_groups, in_groups),
build_select_input('in_users', 'Users', all_users, in_users)]}
else:
in_users = [trans.sa_session.query(trans.app.model.User).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_users'))]
in_groups = [trans.sa_session.query(trans.app.model.Group).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_groups'))]
if None in in_users or None in in_groups:
return self.message_exception(trans, 'One or more invalid user/group id has been provided.')
for ura in role.users:
user = trans.sa_session.query(trans.app.model.User).get(ura.user_id)
if user not in in_users:
# Delete DefaultUserPermissions for previously associated users that have been removed from the role
for dup in user.default_permissions:
if role == dup.role:
trans.sa_session.delete(dup)
# Delete DefaultHistoryPermissions for previously associated users that have been removed from the role
for history in user.histories:
for dhp in history.default_permissions:
if role == dhp.role:
trans.sa_session.delete(dhp)
trans.sa_session.flush()
trans.app.security_agent.set_entity_role_associations(roles=[role], users=in_users, groups=in_groups)
trans.sa_session.refresh(role)
return {'message' : 'Role \'%s\' has been updated with %d associated users and %d associated groups.' % (role.name, len(in_users), len(in_groups))}
def _delete_role(self, trans, ids):
message = 'Deleted %d roles: ' % len(ids)
for role_id in ids:
role = get_role(trans, role_id)
role.deleted = True
trans.sa_session.add(role)
trans.sa_session.flush()
message += ' %s ' % role.name
return (message, 'done')
def _undelete_role(self, trans, ids):
count = 0
undeleted_roles = ""
for role_id in ids:
role = get_role(trans, role_id)
if not role.deleted:
return ("Role '%s' has not been deleted, so it cannot be undeleted." % role.name, "error")
role.deleted = False
trans.sa_session.add(role)
trans.sa_session.flush()
count += 1
undeleted_roles += " %s" % role.name
return ("Undeleted %d roles: %s" % (count, undeleted_roles), "done")
def _purge_role(self, trans, ids):
# This method should only be called for a Role that has previously been deleted.
# Purging a deleted Role deletes all of the following from the database:
# - UserRoleAssociations where role_id == Role.id
# - DefaultUserPermissions where role_id == Role.id
# - DefaultHistoryPermissions where role_id == Role.id
# - GroupRoleAssociations where role_id == Role.id
# - DatasetPermissionss where role_id == Role.id
message = "Purged %d roles: " % len(ids)
for role_id in ids:
role = get_role(trans, role_id)
if not role.deleted:
return ("Role '%s' has not been deleted, so it cannot be purged." % role.name, "error")
# Delete UserRoleAssociations
for ura in role.users:
user = trans.sa_session.query(trans.app.model.User).get(ura.user_id)
# Delete DefaultUserPermissions for associated users
for dup in user.default_permissions:
if role == dup.role:
trans.sa_session.delete(dup)
# Delete DefaultHistoryPermissions for associated users
for history in user.histories:
for dhp in history.default_permissions:
if role == dhp.role:
trans.sa_session.delete(dhp)
trans.sa_session.delete(ura)
# Delete GroupRoleAssociations
for gra in role.groups:
trans.sa_session.delete(gra)
# Delete DatasetPermissionss
for dp in role.dataset_actions:
trans.sa_session.delete(dp)
trans.sa_session.flush()
message += " %s " % role.name
return (message, "done")
@web.legacy_expose_api
@web.require_admin
def groups_list(self, trans, **kwargs):
message = kwargs.get('message')
status = kwargs.get('status')
if 'operation' in kwargs:
id = kwargs.get('id')
if not id:
return self.message_exception(trans, 'Invalid group id (%s) received.' % str(id))
ids = util.listify(id)
operation = kwargs['operation'].lower().replace('+', ' ')
if operation == 'delete':
message, status = self._delete_group(trans, ids)
elif operation == 'undelete':
message, status = self._undelete_group(trans, ids)
elif operation == 'purge':
message, status = self._purge_group(trans, ids)
if message and status:
kwargs['message'] = util.sanitize_text(message)
kwargs['status'] = status
return self.group_list_grid(trans, **kwargs)
@web.legacy_expose_api
@web.require_admin
def rename_group(self, trans, payload=None, **kwd):
id = kwd.get('id')
if not id:
return self.message_exception(trans, 'No group id received for renaming.')
group = get_group(trans, id)
if trans.request.method == 'GET':
return {
'title' : 'Change group name for \'%s\'' % util.sanitize_text(group.name),
'inputs' : [{
'name' : 'name',
'label' : 'Name',
'value' : group.name
}]
}
else:
old_name = group.name
new_name = util.restore_text(payload.get('name'))
if not new_name:
return self.message_exception(trans, 'Enter a valid group name.')
else:
existing_group = trans.sa_session.query(trans.app.model.Group).filter(trans.app.model.Group.table.c.name == new_name).first()
if existing_group and existing_group.id != group.id:
return self.message_exception(trans, 'A group with that name already exists.')
else:
if not (group.name == new_name):
group.name = new_name
trans.sa_session.add(group)
trans.sa_session.flush()
return {'message': 'Group \'%s\' has been renamed to \'%s\'.' % (old_name, new_name)}
@web.legacy_expose_api
@web.require_admin
def manage_users_and_roles_for_group(self, trans, payload=None, **kwd):
group_id = kwd.get('id')
if not group_id:
return self.message_exception(trans, 'Invalid group id (%s) received' % str(group_id))
group = get_group(trans, group_id)
if trans.request.method == 'GET':
in_users = []
all_users = []
in_roles = []
all_roles = []
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
if user in [x.user for x in group.users]:
in_users.append(trans.security.encode_id(user.id))
all_users.append((user.email, trans.security.encode_id(user.id)))
for role in trans.sa_session.query(trans.app.model.Role) \
.filter(trans.app.model.Role.table.c.deleted == false()) \
.order_by(trans.app.model.Role.table.c.name):
if role in [x.role for x in group.roles]:
in_roles.append(trans.security.encode_id(role.id))
all_roles.append((role.name, trans.security.encode_id(role.id)))
return {'title' : 'Group \'%s\'' % group.name,
'message': 'Group \'%s\' is currently associated with %d user(s) and %d role(s).' %
(group.name, len(in_users), len(in_roles)),
'status' : 'info',
'inputs' : [build_select_input('in_roles', 'Roles', all_roles, in_roles),
build_select_input('in_users', 'Users', all_users, in_users)]}
else:
in_users = [trans.sa_session.query(trans.app.model.User).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_users'))]
in_roles = [trans.sa_session.query(trans.app.model.Role).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_roles'))]
if None in in_users or None in in_roles:
return self.message_exception(trans, 'One or more invalid user/role id has been provided.')
trans.app.security_agent.set_entity_group_associations(groups=[group], users=in_users, roles=in_roles)
trans.sa_session.refresh(group)
return {'message' : 'Group \'%s\' has been updated with %d associated users and %d associated roles.' % (group.name, len(in_users), len(in_roles))}
@web.legacy_expose_api
@web.require_admin
def create_group(self, trans, payload=None, **kwd):
if trans.request.method == 'GET':
all_users = []
all_roles = []
for user in trans.sa_session.query(trans.app.model.User) \
.filter(trans.app.model.User.table.c.deleted == false()) \
.order_by(trans.app.model.User.table.c.email):
all_users.append((user.email, trans.security.encode_id(user.id)))
for role in trans.sa_session.query(trans.app.model.Role) \
.filter(trans.app.model.Role.table.c.deleted == false()) \
.order_by(trans.app.model.Role.table.c.name):
all_roles.append((role.name, trans.security.encode_id(role.id)))
return {
'title' : 'Create Group',
'title_id' : 'create-group',
'inputs' : [{
'name' : 'name',
'label' : 'Name'
},
build_select_input('in_roles', 'Roles', all_roles, []),
build_select_input('in_users', 'Users', all_users, []), {
'name' : 'auto_create',
'label' : 'Create a new role of the same name for this group:',
'type' : 'boolean'
}]
}
else:
name = util.restore_text(payload.get('name', ''))
auto_create_checked = payload.get('auto_create') == 'true'
in_users = [trans.sa_session.query(trans.app.model.User).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_users'))]
in_roles = [trans.sa_session.query(trans.app.model.Role).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_roles'))]
if not name:
return self.message_exception(trans, 'Enter a valid name.')
elif trans.sa_session.query(trans.app.model.Group).filter(trans.app.model.Group.table.c.name == name).first():
return self.message_exception(trans, 'Group names must be unique and a group with that name already exists, so choose another name.')
elif None in in_users or None in in_roles:
return self.message_exception(trans, 'One or more invalid user/role id has been provided.')
else:
# Create the role
group = trans.app.model.Group(name=name)
trans.sa_session.add(group)
# Create the UserRoleAssociations
for user in in_users:
uga = trans.app.model.UserGroupAssociation(user, group)
trans.sa_session.add(uga)
# Create the GroupRoleAssociations
for role in in_roles:
gra = trans.app.model.GroupRoleAssociation(group, role)
trans.sa_session.add(gra)
if auto_create_checked:
# Check if role with same name already exists
if trans.sa_session.query(trans.app.model.Role).filter(trans.app.model.Role.table.c.name == name).first():
return self.message_exception(trans, 'A role with that name already exists, so choose another name or disable role creation.')
# Create the role
role = trans.app.model.Role(name=name, description='Role for group %s' % name)
trans.sa_session.add(role)
# Associate the group with the role
gra = trans.model.GroupRoleAssociation(group, role)
trans.sa_session.add(gra)
num_in_roles = len(in_roles) + 1
else:
num_in_roles = len(in_roles)
trans.sa_session.flush()
message = 'Group \'%s\' has been created with %d associated users and %d associated roles.' % (group.name, len(in_users), num_in_roles)
if auto_create_checked:
message += 'One of the roles associated with this group is the newly created role with the same name.'
return {'message' : message}
def _delete_group(self, trans, ids):
message = 'Deleted %d groups: ' % len(ids)
for group_id in ids:
group = get_group(trans, group_id)
group.deleted = True
trans.sa_session.add(group)
trans.sa_session.flush()
message += ' %s ' % group.name
return (message, 'done')
def _undelete_group(self, trans, ids):
count = 0
undeleted_groups = ""
for group_id in ids:
group = get_group(trans, group_id)
if not group.deleted:
return ("Group '%s' has not been deleted, so it cannot be undeleted." % group.name, "error")
group.deleted = False
trans.sa_session.add(group)
trans.sa_session.flush()
count += 1
undeleted_groups += " %s" % group.name
return ("Undeleted %d groups: %s" % (count, undeleted_groups), "done")
def _purge_group(self, trans, ids):
message = "Purged %d groups: " % len(ids)
for group_id in ids:
group = get_group(trans, group_id)
if not group.deleted:
return ("Group '%s' has not been deleted, so it cannot be purged." % group.name, "error")
# Delete UserGroupAssociations
for uga in group.users:
trans.sa_session.delete(uga)
# Delete GroupRoleAssociations
for gra in group.roles:
trans.sa_session.delete(gra)
trans.sa_session.flush()
message += " %s " % group.name
return (message, "done")
@web.expose
@web.require_admin
def create_new_user(self, trans, **kwd):
return trans.response.send_redirect(web.url_for(controller='user',
action='create',
cntrller='admin'))
@web.legacy_expose_api
@web.require_admin
def reset_user_password(self, trans, payload=None, **kwd):
users = {user_id: get_user(trans, user_id) for user_id in util.listify(kwd.get('id'))}
if users:
if trans.request.method == 'GET':
return {
'message': 'Changes password(s) for: %s.' % ', '.join(user.email for user in users.values()),
'status' : 'info',
'inputs' : [{'name' : 'password', 'label' : 'New password', 'type' : 'password'},
{'name' : 'confirm', 'label' : 'Confirm password', 'type' : 'password'}]
}
else:
password = payload.get('password')
confirm = payload.get('confirm')
if len(password) < 6:
return self.message_exception(trans, 'Use a password of at least 6 characters.')
elif password != confirm:
return self.message_exception(trans, 'Passwords do not match.')
for user in users.values():
user.set_password_cleartext(password)
trans.sa_session.add(user)
trans.sa_session.flush()
return {'message': 'Passwords reset for %d user(s).' % len(users)}
else:
return self.message_exception(trans, 'Please specify user ids.')
def _delete_user(self, trans, ids):
message = 'Deleted %d users: ' % len(ids)
for user_id in ids:
user = get_user(trans, user_id)
# Actually do the delete
self.user_manager.delete(user)
# Accumulate messages for the return message
message += ' %s ' % user.email
return (message, 'done')
def _undelete_user(self, trans, ids):
count = 0
undeleted_users = ""
for user_id in ids:
user = get_user(trans, user_id)
# Actually do the undelete
self.user_manager.undelete(user)
# Count and accumulate messages to return to the admin panel
count += 1
undeleted_users += ' %s' % user.email
message = 'Undeleted %d users: %s' % (count, undeleted_users)
return (message, 'done')
def _purge_user(self, trans, ids):
# This method should only be called for a User that has previously been deleted.
# We keep the User in the database ( marked as purged ), and stuff associated
# with the user's private role in case we want the ability to unpurge the user
# some time in the future.
# Purging a deleted User deletes all of the following:
# - History where user_id = User.id
# - HistoryDatasetAssociation where history_id = History.id
# - UserGroupAssociation where user_id == User.id
# - UserRoleAssociation where user_id == User.id EXCEPT FOR THE PRIVATE ROLE
# - UserAddress where user_id == User.id
# Purging Histories and Datasets must be handled via the cleanup_datasets.py script
message = 'Purged %d users: ' % len(ids)
for user_id in ids:
user = get_user(trans, user_id)
self.user_manager.purge(user)
message += '\t%s\n ' % user.email
return (message, 'done')
def _recalculate_user(self, trans, user_id):
user = trans.sa_session.query(trans.model.User).get(trans.security.decode_id(user_id))
if not user:
return ('User not found for id (%s)' % sanitize_text(str(user_id)), 'error')
current = user.get_disk_usage()
user.calculate_and_set_disk_usage()
new = user.get_disk_usage()
if new in (current, None):
message = 'Usage is unchanged at %s.' % nice_size(current)
else:
message = 'Usage has changed by %s to %s.' % (nice_size(new - current), nice_size(new))
return (message, 'done')
def _new_user_apikey(self, trans, user_id):
user = trans.sa_session.query(trans.model.User).get(trans.security.decode_id(user_id))
if not user:
return ('User not found for id (%s)' % sanitize_text(str(user_id)), 'error')
new_key = trans.app.model.APIKeys(
user_id=trans.security.decode_id(user_id),
key=trans.app.security.get_new_guid()
)
trans.sa_session.add(new_key)
trans.sa_session.flush()
return ("New key '%s' generated for requested user '%s'." % (new_key.key, user.email), "done")
@web.legacy_expose_api
@web.require_admin
def manage_roles_and_groups_for_user(self, trans, payload=None, **kwd):
user_id = kwd.get('id')
if not user_id:
return self.message_exception(trans, 'Invalid user id (%s) received' % str(user_id))
user = get_user(trans, user_id)
if trans.request.method == 'GET':
in_roles = []
all_roles = []
in_groups = []
all_groups = []
for role in trans.sa_session.query(trans.app.model.Role).filter(trans.app.model.Role.table.c.deleted == false()) \
.order_by(trans.app.model.Role.table.c.name):
if role in [x.role for x in user.roles]:
in_roles.append(trans.security.encode_id(role.id))
if role.type != trans.app.model.Role.types.PRIVATE:
# There is a 1 to 1 mapping between a user and a PRIVATE role, so private roles should
# not be listed in the roles form fields, except for the currently selected user's private
# role, which should always be in in_roles. The check above is added as an additional
# precaution, since for a period of time we were including private roles in the form fields.
all_roles.append((role.name, trans.security.encode_id(role.id)))
for group in trans.sa_session.query(trans.app.model.Group).filter(trans.app.model.Group.table.c.deleted == false()) \
.order_by(trans.app.model.Group.table.c.name):
if group in [x.group for x in user.groups]:
in_groups.append(trans.security.encode_id(group.id))
all_groups.append((group.name, trans.security.encode_id(group.id)))
return {'title' : 'Roles and groups for \'%s\'' % user.email,
'message': 'User \'%s\' is currently associated with %d role(s) and is a member of %d group(s).' %
(user.email, len(in_roles) - 1, len(in_groups)),
'status' : 'info',
'inputs' : [build_select_input('in_roles', 'Roles', all_roles, in_roles),
build_select_input('in_groups', 'Groups', all_groups, in_groups)]}
else:
in_roles = [trans.sa_session.query(trans.app.model.Role).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_roles'))]
in_groups = [trans.sa_session.query(trans.app.model.Group).get(trans.security.decode_id(x)) for x in util.listify(payload.get('in_groups'))]
if None in in_groups or None in in_roles:
return self.message_exception(trans, 'One or more invalid role/group id has been provided.')
# make sure the user is not dis-associating himself from his private role
private_role = trans.app.security_agent.get_private_user_role(user)
if private_role not in in_roles:
in_roles.append(private_role)
trans.app.security_agent.set_entity_user_associations(users=[user], roles=in_roles, groups=in_groups)
trans.sa_session.refresh(user)
return {'message' : 'User \'%s\' has been updated with %d associated roles and %d associated groups (private roles are not displayed).' % (user.email, len(in_roles) - 1, len(in_groups))}
@web.expose
@web.json
@web.require_admin
def jobs_control(self, trans, job_lock=None, **kwd):
if job_lock is not None:
job_lock = True if job_lock == 'true' else False
trans.app.queue_worker.send_control_task('admin_job_lock', kwargs={'job_lock': job_lock}, get_response=True)
job_lock = trans.app.job_manager.job_lock
return {'job_lock': job_lock}
@web.expose
@web.json
@web.require_admin
def jobs_list(self, trans, stop=[], stop_msg=None, cutoff=180, **kwd):
deleted = []
message = kwd.get('message', '')
status = kwd.get('status', 'info')
job_ids = util.listify(stop)
if job_ids and stop_msg in [None, '']:
message = 'Please enter an error message to display to the user describing why the job was terminated'
return self.message_exception(trans, message)
elif job_ids:
if stop_msg[-1] not in PUNCTUATION:
stop_msg += '.'
for job_id in job_ids:
error_msg = "This job was stopped by an administrator: %s <a href='%s' target='_blank'>Contact support</a> for additional help." \
% (stop_msg, self.app.config.get("support_url", "https://galaxyproject.org/support/"))
if trans.app.config.track_jobs_in_database:
job = trans.sa_session.query(trans.app.model.Job).get(job_id)
job.job_stderr = error_msg
job.set_state(trans.app.model.Job.states.DELETED_NEW)
trans.sa_session.add(job)
else:
trans.app.job_manager.stop(job, message=error_msg)
deleted.append(str(job_id))
if deleted:
message = 'Queued job'
if len(deleted) > 1:
message += 's'
message += ' for deletion: '
message += ', '.join(deleted)
status = 'done'
trans.sa_session.flush()
job_lock = trans.app.job_manager.job_lock
cutoff_time = datetime.utcnow() - timedelta(seconds=int(cutoff))
jobs = trans.sa_session.query(trans.app.model.Job) \
.filter(and_(trans.app.model.Job.table.c.update_time < cutoff_time,
or_(trans.app.model.Job.state == trans.app.model.Job.states.NEW,
trans.app.model.Job.state == trans.app.model.Job.states.QUEUED,
trans.app.model.Job.state == trans.app.model.Job.states.RUNNING,
trans.app.model.Job.state == trans.app.model.Job.states.UPLOAD))) \
.order_by(trans.app.model.Job.table.c.update_time.desc()).all()
recent_jobs = trans.sa_session.query(trans.app.model.Job) \
.filter(and_(trans.app.model.Job.table.c.update_time > cutoff_time,
or_(trans.app.model.Job.state == trans.app.model.Job.states.ERROR,
trans.app.model.Job.state == trans.app.model.Job.states.OK))) \
.order_by(trans.app.model.Job.table.c.update_time.desc()).all()
def prepare_jobs_list(jobs):
res = []
for job in jobs:
delta = datetime.utcnow() - job.update_time
update_time = ""
if delta.days > 0:
update_time = '%s hours ago' % (delta.days * 24 + int(delta.seconds / 60 / 60))
elif delta > timedelta(minutes=59):
update_time = '%s hours ago' % int(delta.seconds / 60 / 60)
else:
update_time = '%s minutes ago' % int(delta.seconds / 60)
inputs = ""
try:
inputs = ", ".join(['{} {}'.format(da.dataset.id, da.dataset.state) for da in job.input_datasets])
except Exception:
inputs = 'Unable to determine inputs'
res.append({
'job_info': {
'id': job.id,
'info_url': "{}?jobid={}".format(web.url_for(controller="admin", action="job_info"), job.id)
},
'user': job.history.user.email if job.history and job.history.user else 'anonymous',
'update_time': update_time,
'tool_id': job.tool_id,
'state': job.state,
'input_dataset': inputs,
'command_line': job.command_line,
'job_runner_name': job.job_runner_name,
'job_runner_external_id': job.job_runner_external_id
})
return res
return {'jobs': prepare_jobs_list(jobs),
'recent_jobs': prepare_jobs_list(recent_jobs),
'cutoff': cutoff,
'message': message,
'status': status,
'job_lock': job_lock}
@web.expose
@web.require_admin
def job_info(self, trans, jobid=None):
job = None
if jobid is not None:
job = trans.sa_session.query(trans.app.model.Job).get(jobid)
return trans.fill_template('/webapps/reports/job_info.mako',
job=job,
message="<a href='jobs'>Back</a>")
@web.expose
@web.require_admin
def manage_tool_dependencies(self,
trans,
install_dependencies=False,
uninstall_dependencies=False,
remove_unused_dependencies=False,
selected_tool_ids=None,
selected_environments_to_uninstall=None,
viewkey='View tool-centric dependencies'):
if not selected_tool_ids:
selected_tool_ids = []
if not selected_environments_to_uninstall:
selected_environments_to_uninstall = []
tools_by_id = trans.app.toolbox.tools_by_id.copy()
view = six.next(six.itervalues(trans.app.toolbox.tools_by_id))._view
if selected_tool_ids:
# install the dependencies for the tools in the selected_tool_ids list
if not isinstance(selected_tool_ids, list):
selected_tool_ids = [selected_tool_ids]
requirements = set([tools_by_id[tid].tool_requirements for tid in selected_tool_ids])
if install_dependencies:
[view.install_dependencies(r) for r in requirements]
elif uninstall_dependencies:
[view.uninstall_dependencies(index=None, requirements=r) for r in requirements]
if selected_environments_to_uninstall and remove_unused_dependencies:
if not isinstance(selected_environments_to_uninstall, list):
selected_environments_to_uninstall = [selected_environments_to_uninstall]
view.remove_unused_dependency_paths(selected_environments_to_uninstall)
return trans.fill_template('/webapps/galaxy/admin/manage_dependencies.mako',
tools=tools_by_id,
requirements_status=view.toolbox_requirements_status,
tool_ids_by_requirements=view.tool_ids_by_requirements,
unused_environments=view.unused_dependency_paths,
viewkey=viewkey)
@web.expose
@web.require_admin
def sanitize_whitelist(self, trans, submit_whitelist=False, tools_to_whitelist=[]):
if submit_whitelist:
# write the configured sanitize_whitelist_file with new whitelist
# and update in-memory list.
with open(trans.app.config.sanitize_whitelist_file, 'wt') as f:
if isinstance(tools_to_whitelist, six.string_types):
tools_to_whitelist = [tools_to_whitelist]
new_whitelist = sorted([tid for tid in tools_to_whitelist if tid in trans.app.toolbox.tools_by_id])
f.write("\n".join(new_whitelist))
trans.app.config.sanitize_whitelist = new_whitelist
trans.app.queue_worker.send_control_task('reload_sanitize_whitelist', noop_self=True)
# dispatch a message to reload list for other processes
return trans.fill_template('/webapps/galaxy/admin/sanitize_whitelist.mako',
sanitize_all=trans.app.config.sanitize_all_html,
tools=trans.app.toolbox.tools_by_id)
# ---- Utility methods -------------------------------------------------------
def build_select_input(name, label, options, value):
return {'type' : 'select',
'multiple' : True,
'optional' : True,
'individual': True,
'name' : name,
'label' : label,
'options' : options,
'value' : value}
def get_user(trans, user_id):
"""Get a User from the database by id."""
user = trans.sa_session.query(trans.model.User).get(trans.security.decode_id(user_id))
if not user:
return trans.show_error_message("User not found for id (%s)" % str(user_id))
return user
def get_role(trans, id):
"""Get a Role from the database by id."""
# Load user from database
id = trans.security.decode_id(id)
role = trans.sa_session.query(trans.model.Role).get(id)
if not role:
return trans.show_error_message("Role not found for id (%s)" % str(id))
return role
def get_group(trans, id):
"""Get a Group from the database by id."""
# Load user from database
id = trans.security.decode_id(id)
group = trans.sa_session.query(trans.model.Group).get(id)
if not group:
return trans.show_error_message("Group not found for id (%s)" % str(id))
return group
def get_quota(trans, id):
"""Get a Quota from the database by id."""
# Load user from database
id = trans.security.decode_id(id)
quota = trans.sa_session.query(trans.model.Quota).get(id)
return quota
| 49.53557 | 198 | 0.554812 |
acf0e9b72550e3acf0fe5605b9db6b4ab37259c9 | 2,826 | py | Python | molecule/command/syntax.py | westurner/molecule | 1babb77a8785192be38ab122e8206a0e53777b83 | [
"MIT"
] | null | null | null | molecule/command/syntax.py | westurner/molecule | 1babb77a8785192be38ab122e8206a0e53777b83 | [
"MIT"
] | null | null | null | molecule/command/syntax.py | westurner/molecule | 1babb77a8785192be38ab122e8206a0e53777b83 | [
"MIT"
] | null | null | null | # Copyright (c) 2015-2018 Cisco Systems, Inc.
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER
# DEALINGS IN THE SOFTWARE.
"""Syntax Command Module."""
import click
from molecule import logger
from molecule.command import base
LOG = logger.get_logger(__name__)
class Syntax(base.Base):
"""
Syntax Command Class.
.. program:: molecule syntax
.. option:: molecule syntax
Target the default scenario.
.. program:: molecule syntax --scenario-name foo
.. option:: molecule syntax --scenario-name foo
Targeting a specific scenario.
.. program:: molecule --debug syntax
.. option:: molecule --debug syntax
Executing with `debug`.
.. program:: molecule --base-config base.yml syntax
.. option:: molecule --base-config base.yml syntax
Executing with a `base-config`.
.. program:: molecule --env-file foo.yml syntax
.. option:: molecule --env-file foo.yml syntax
Load an env file to read variables from when rendering
molecule.yml.
"""
def execute(self):
"""Execute the actions necessary to perform a `molecule syntax` and \
returns None.
:return: None
"""
self.print_info()
self._config.provisioner.syntax()
@base.click_command_ex()
@click.pass_context
@click.option(
'--scenario-name',
'-s',
default=base.MOLECULE_DEFAULT_SCENARIO_NAME,
help='Name of the scenario to target. ({})'.format(
base.MOLECULE_DEFAULT_SCENARIO_NAME
),
)
def syntax(ctx, scenario_name): # pragma: no cover
"""Use the provisioner to syntax check the role."""
args = ctx.obj.get('args')
subcommand = base._get_subcommand(__name__)
command_args = {'subcommand': subcommand}
base.execute_cmdline_scenarios(scenario_name, args, command_args)
| 30.387097 | 79 | 0.700283 |
acf0ea081196fdcaa8448d959385eacc3ae88049 | 202 | py | Python | profiles_api/serializers.py | parth-singh71/profiles-rest-api | c415d2fd6c1c6c51674bca601644bcedb67cf72c | [
"MIT"
] | null | null | null | profiles_api/serializers.py | parth-singh71/profiles-rest-api | c415d2fd6c1c6c51674bca601644bcedb67cf72c | [
"MIT"
] | 4 | 2020-04-15T07:14:27.000Z | 2021-06-04T22:31:09.000Z | profiles_api/serializers.py | parth-singh71/profiles-rest-api | c415d2fd6c1c6c51674bca601644bcedb67cf72c | [
"MIT"
] | null | null | null | from rest_framework import serializers
class HelloSerializer(serializers.Serializer):
"""Serializers a name field for testing our APIView"""
name = serializers.CharField(max_length= 10)
| 25.25 | 58 | 0.757426 |
acf0ea0d1a9e64a67024d9675783fa4b5fd5a254 | 266 | py | Python | src/api_v1/viewsets/nagroda.py | iplweb/django-bpp | 85f183a99d8d5027ae4772efac1e4a9f21675849 | [
"BSD-3-Clause"
] | 1 | 2017-04-27T19:50:02.000Z | 2017-04-27T19:50:02.000Z | src/api_v1/viewsets/nagroda.py | mpasternak/django-bpp | 434338821d5ad1aaee598f6327151aba0af66f5e | [
"BSD-3-Clause"
] | 41 | 2019-11-07T00:07:02.000Z | 2022-02-27T22:09:39.000Z | src/api_v1/viewsets/nagroda.py | iplweb/bpp | f027415cc3faf1ca79082bf7bacd4be35b1a6fdf | [
"BSD-3-Clause"
] | null | null | null | from rest_framework import viewsets
from api_v1.serializers.nagroda import NagrodaSerializer
from bpp.models.nagroda import Nagroda
class NagrodaViewSet(viewsets.ReadOnlyModelViewSet):
queryset = Nagroda.objects.all()
serializer_class = NagrodaSerializer
| 26.6 | 56 | 0.830827 |
acf0eb26dd030ec27ae83e6d6018fcc48acbbbd2 | 6,174 | py | Python | Payload Computer/wp_trigger.py | km5es/Drone-Project-code | 72ef28df78b064b34f6449fa4accd63a5fbfe097 | [
"Apache-2.0"
] | null | null | null | Payload Computer/wp_trigger.py | km5es/Drone-Project-code | 72ef28df78b064b34f6449fa4accd63a5fbfe097 | [
"Apache-2.0"
] | null | null | null | Payload Computer/wp_trigger.py | km5es/Drone-Project-code | 72ef28df78b064b34f6449fa4accd63a5fbfe097 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
"""
This code will trigger when a WP is reached and when the linear velocity is below vel_threshold.
It will trigger a flag which in turn will begin the cal sequence at each WP which in turn will
begin saving metadata. When sequence is completed, another flag will trigger on write_WPs.py
which will update the WP table.
If the sequence does not complete within a specified time, the WP table will be updated anyway.
author: Krishna Makhija
rev: 25th April 2021
"""
import rospy, time, re
import numpy as np
from termcolor import colored
from threading import Event, Thread
from os.path import expanduser
from std_msgs.msg import String
from mavros_msgs.msg import *
from mavros_msgs.srv import *
from std_msgs.msg import Float32
from geometry_msgs.msg import PoseStamped
from mavros_msgs.msg import WaypointReached, WaypointList, PositionTarget
from sensor_msgs.msg import Imu, NavSatFix
from nav_msgs.msg import Odometry
n = 1
event = Event()
vel_threshold = 0.35 # linear vel threshold below which drone is considered "stationary" (m/s)
wp_num = 1
#rospy.set_param('trigger/waypoint', False)
rospy.set_param('trigger/sequence', False)
seq_timeout = 30
timeout_event = Event()
start = time.time()
error_tolerance = 1.0 ## distance in m from where to begin sequence
GPS_refresh = 10
wp_wait_timeout = 10
def get_velocity(data):
"""
Get the current magnitude of linear velocity of the drone.
"""
global v
x_vel = data.twist.twist.linear.x
y_vel = data.twist.twist.linear.y
z_vel = data.twist.twist.linear.z
v = (x_vel**2 + y_vel**2 + z_vel**2)**0.5
def wp_reached(data):
"""
Set trigger when arrived at WP.
"""
global n
global sequence
global wp_num
if n == 1:
sequence = data.header.seq
n = 2
print("The sequence value is set at %s" %sequence)
print("The current drone sequence on the FCU is %s" %data.header.seq)
if data.header.seq == sequence + 1:
print("Begin countdown to updating WP table: %s seconds." %seq_timeout)
timeout_event.set()
if data.header.seq == sequence + 2:
print("WP reached: %s. Waiting for drone to be stationary." %wp_num)
wp_num = wp_num + 1
event.set()
timeout_event.clear()
sequence = data.header.seq
def haversine(lat1, long1, lat2, long2):
"""
Calculate distance between two points given lat/long coordinates.
REFERENCE: https://www.movable-type.co.uk/scripts/latlong.html
"""
r = 6.3781e6 # radius of earth
phi1 = np.deg2rad(lat1)
phi2 = np.deg2rad(lat2)
lam1 = np.deg2rad(long1)
lam2 = np.deg2rad(long2)
delta_phi = phi2 - phi1
delta_lam = lam2 - lam1
a = np.sin(delta_phi/2) * np.sin(delta_phi/2) + np.cos(lat1) * np.cos(lat2) * np.sin(delta_lam/2) * np.sin(delta_lam/2)
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a))
d = r * c # distance
return d
def haversine_3d(lat1, long1, alt1, lat2, long2, alt2):
"""
Calculate haversine distance in 3 dimensions i.e. Euclidean distance using lat / long coordinates
"""
alt_diff = (alt2 - alt1)
d_3d = ((haversine(lat1, long1, lat2, long2))**2 + alt_diff**2)**0.5
return d_3d
def get_waypoints(data):
"""
Look up waypoints in FC and "target" them.
"""
global wp_x_lat
global wp_y_long
global wp_z_alt
try:
wp_list = data.waypoints
# skip first two waypoints, i.e. home and takeoff
target_wp = wp_list[2:]
wp_x_lat = target_wp[0].x_lat
wp_y_long = target_wp[0].y_long
wp_z_alt = target_wp[0].z_alt
print("Retrieved WP list.")
print("The current target WP coords are: %s, %s, and %s" %(wp_x_lat, wp_y_long, wp_z_alt))
except IndexError:
pass
def get_haversine(data):
"""
Calculate 2D haversine distance to target using real-time GPS data
"""
global h
#while True:
#time.sleep(0.01)
if data.status.status == 0:
if event.is_set() == False:
h = haversine(data.latitude, data.longitude, wp_x_lat, wp_y_long)
event.set()
elif data.status.status == -1:
print('GPS fix not available.')
def get_distance(data):
"""
Calculate 3D haversine distance to target
"""
global distance
if event.is_set():
try:
alt_diff = wp_z_alt - data.pose.position.z
distance = (h**2 + alt_diff**2)**0.5
#print('The closest WP is: %s m away.' %(distance))
event.clear()
if distance <= error_tolerance and v <= vel_threshold and rospy.get_param('trigger/waypoint') == False:
print(">>>>WP reached<<< ||| Drone is stable and (almost) not moving.")
#rospy.set_param('trigger/waypoint', True)
rospy.set_param('trigger/sequence', True)
#FIXME: this is another open loop. what do? can't seem to avoid them
time.sleep(wp_wait_timeout)
except IndexError:
print("index error")
pass
except NameError:
print("Waypoints not received from FCU.")
pass
def main():
global get_mission
try:
rospy.init_node('wp_trigger', anonymous = True)
rospy.Subscriber('/mavros/mission/waypoints', WaypointList, get_waypoints)
rospy.Subscriber('/mavros/global_position/global', NavSatFix, get_haversine)
rospy.Subscriber('/mavros/local_position/pose', PoseStamped, get_distance)
rospy.Subscriber('/mavros/local_position/odom', Odometry, get_velocity)
rospy.spin()
except (rospy.ROSInterruptException):
pass
def main_seq():
try:
rospy.init_node('wp_trigger', anonymous = True)
rospy.Subscriber('/mavros/mission/reached', WaypointReached, wp_reached)
rospy.Subscriber('/mavros/local_position/odom', Odometry, get_velocity)
rospy.spin()
except (rospy.ROSInterruptException):
pass
if __name__ == '__main__':
main() | 32.666667 | 131 | 0.637998 |
acf0eb7b77200ab606cd70be935022f0da46e7d3 | 3,731 | py | Python | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/network/ipset_binding.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/network/ipset_binding.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | nitro-python/nssrc/com/citrix/netscaler/nitro/resource/config/network/ipset_binding.py | culbertm/NSttyPython | ff9f6aedae3fb8495342cd0fc4247c819cf47397 | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2008-2016 Citrix Systems, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License")
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_resource
from nssrc.com.citrix.netscaler.nitro.resource.base.base_resource import base_response
from nssrc.com.citrix.netscaler.nitro.service.options import options
from nssrc.com.citrix.netscaler.nitro.exception.nitro_exception import nitro_exception
from nssrc.com.citrix.netscaler.nitro.util.nitro_util import nitro_util
class ipset_binding(base_resource):
""" Binding class showing the resources that can be bound to ipset_binding.
"""
def __init__(self) :
self._name = None
self.ipset_nsip_binding = []
self.ipset_nsip6_binding = []
@property
def name(self) :
r"""Name of the IP set whose details you want to display.<br/>Minimum length = 1.
"""
try :
return self._name
except Exception as e:
raise e
@name.setter
def name(self, name) :
r"""Name of the IP set whose details you want to display.<br/>Minimum length = 1
"""
try :
self._name = name
except Exception as e:
raise e
@property
def ipset_nsip_bindings(self) :
r"""nsip that can be bound to ipset.
"""
try :
return self._ipset_nsip_binding
except Exception as e:
raise e
@property
def ipset_nsip6_bindings(self) :
r"""nsip6 that can be bound to ipset.
"""
try :
return self._ipset_nsip6_binding
except Exception as e:
raise e
def _get_nitro_response(self, service, response) :
r""" converts nitro response into object and returns the object array in case of get request.
"""
try :
result = service.payload_formatter.string_to_resource(ipset_binding_response, response, self.__class__.__name__)
if(result.errorcode != 0) :
if (result.errorcode == 444) :
service.clear_session(self)
if result.severity :
if (result.severity == "ERROR") :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
else :
raise nitro_exception(result.errorcode, str(result.message), str(result.severity))
return result.ipset_binding
except Exception as e :
raise e
def _get_object_name(self) :
r""" Returns the value of object identifier argument
"""
try :
if self.name is not None :
return str(self.name)
return None
except Exception as e :
raise e
@classmethod
def get(self, service, name="", option_="") :
r""" Use this API to fetch ipset_binding resource.
"""
try :
if not name :
obj = ipset_binding()
response = obj.get_resources(service, option_)
elif type(name) is not list :
obj = ipset_binding()
obj.name = name
response = obj.get_resource(service)
else :
if name and len(name) > 0 :
obj = [ipset_binding() for _ in range(len(name))]
for i in range(len(name)) :
obj[i].name = name[i];
response[i] = obj[i].get_resource(service)
return response
except Exception as e:
raise e
class ipset_binding_response(base_response) :
def __init__(self, length=1) :
self.ipset_binding = []
self.errorcode = 0
self.message = ""
self.severity = ""
self.sessionid = ""
self.ipset_binding = [ipset_binding() for _ in range(length)]
| 29.148438 | 115 | 0.707585 |
acf0eba9a4886210dcb8d28921c50b24fd44ea8c | 2,027 | py | Python | config/settings/local.py | brightparagon/instagram-clone-rn | 51d5fdb41e42cd4d5fd334141ba5dc06233495e4 | [
"MIT"
] | 1 | 2020-03-03T22:56:06.000Z | 2020-03-03T22:56:06.000Z | config/settings/local.py | brightparagon/instagram-clone-rn | 51d5fdb41e42cd4d5fd334141ba5dc06233495e4 | [
"MIT"
] | null | null | null | config/settings/local.py | brightparagon/instagram-clone-rn | 51d5fdb41e42cd4d5fd334141ba5dc06233495e4 | [
"MIT"
] | null | null | null | """
Local settings for Nomadgram project.
- Run in Debug mode
- Use console backend for emails
- Add Django Debug Toolbar
- Add django-extensions as app
"""
from .base import * # noqa
# DEBUG
# ------------------------------------------------------------------------------
DEBUG = env.bool('DJANGO_DEBUG', default=True)
TEMPLATES[0]['OPTIONS']['debug'] = DEBUG
# SECRET CONFIGURATION
# ------------------------------------------------------------------------------
# See: https://docs.djangoproject.com/en/dev/ref/settings/#secret-key
# Note: This key only used for development and testing.
SECRET_KEY = env('DJANGO_SECRET_KEY', default='p3.P/T{&3Sz8QR_u?(C)H;T5KEb*:X?#6a?6m|[bWzR^=]q8.z')
# Mail settings
# ------------------------------------------------------------------------------
EMAIL_PORT = 1025
EMAIL_HOST = 'localhost'
EMAIL_BACKEND = env('DJANGO_EMAIL_BACKEND',
default='django.core.mail.backends.console.EmailBackend')
# CACHING
# ------------------------------------------------------------------------------
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.locmem.LocMemCache',
'LOCATION': ''
}
}
# django-debug-toolbar
# ------------------------------------------------------------------------------
MIDDLEWARE += ['debug_toolbar.middleware.DebugToolbarMiddleware', ]
INSTALLED_APPS += ['debug_toolbar', ]
INTERNAL_IPS = ['127.0.0.1', '10.0.2.2', ]
DEBUG_TOOLBAR_CONFIG = {
'DISABLE_PANELS': [
'debug_toolbar.panels.redirects.RedirectsPanel',
],
'SHOW_TEMPLATE_CONTEXT': True,
}
# django-extensions
# ------------------------------------------------------------------------------
INSTALLED_APPS += ['django_extensions', ]
# TESTING
# ------------------------------------------------------------------------------
TEST_RUNNER = 'django.test.runner.DiscoverRunner'
# Your local stuff: Below this line define 3rd party library settings
# ------------------------------------------------------------------------------
| 29.808824 | 99 | 0.47558 |
acf0ec3140ef13dc6fefdf8d53c8a3ecdcd21cc2 | 5,207 | py | Python | Python/tdw/add_ons/resonance_audio_initializer.py | xf-zhao/tdw | 918b5b4c87ccf21738bd4f8c5f44e2fc8f73826d | [
"BSD-2-Clause"
] | null | null | null | Python/tdw/add_ons/resonance_audio_initializer.py | xf-zhao/tdw | 918b5b4c87ccf21738bd4f8c5f44e2fc8f73826d | [
"BSD-2-Clause"
] | null | null | null | Python/tdw/add_ons/resonance_audio_initializer.py | xf-zhao/tdw | 918b5b4c87ccf21738bd4f8c5f44e2fc8f73826d | [
"BSD-2-Clause"
] | null | null | null | from typing import List, Dict
from tdw.add_ons.audio_initializer_base import AudioInitializerBase
from tdw.physics_audio.audio_material import AudioMaterial
class ResonanceAudioInitializer(AudioInitializerBase):
"""
Initialize Resonance Audio.
This assumes that an avatar corresponding to `avatar_id` has already been added to the scene.
"""
""":class_var
A dictionary. Key = A Resonance Audio material string. Value = An [`AudioMaterial`](../physics_audio/audio_material.md).
"""
AUDIO_MATERIALS: Dict[str, AudioMaterial] = {"roughPlaster": AudioMaterial.wood_soft,
"tile": AudioMaterial.ceramic,
"concrete": AudioMaterial.ceramic,
"wood": AudioMaterial.wood_soft,
"smoothPlaster": AudioMaterial.wood_soft,
"acousticTile": AudioMaterial.cardboard,
"glass": AudioMaterial.glass,
"parquet": AudioMaterial.wood_medium,
"marble": AudioMaterial.stone,
"grass": AudioMaterial.fabric,
"brick": AudioMaterial.ceramic,
"metal": AudioMaterial.metal}
def __init__(self, avatar_id: str = "a", region_id: int = -1, floor: str = "parquet", ceiling: str = "acousticTile",
front_wall: str = "smoothPlaster", back_wall: str = "smoothPlaster", left_wall: str = "smoothPlaster",
right_wall: str = "smoothPlaster", framerate: int = 60):
"""
:param avatar_id: The ID of the avatar.
:param region_id: The ID of the scene region (room) to enable reverberation in. If -1, the reverb space will encapsulate the entire scene instead of a single room.
:param floor: The floor material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
:param ceiling: The ceiling material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
:param front_wall: The front wall material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
:param back_wall: The back wall material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
:param left_wall: The left wall material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
:param right_wall: The right wall material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
:param framerate: The target simulation framerate.
"""
super().__init__(avatar_id=avatar_id, framerate=framerate)
""":field
The ID of the scene region (room) to enable reverberation in. If -1, the reverb space will encapsulate the entire scene instead of a single room.
"""
self.region_id = region_id
""":field
The floor material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
"""
self.floor: str = floor
""":field
The ceiling material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
"""
self.ceiling: str = ceiling
""":field
The front wall material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
"""
self.front_wall: str = front_wall
""":field
The back wall material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
"""
self.back_wall: str = back_wall
""":field
The left wall material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
"""
self.left_wall: str = left_wall
""":field
The right wall material. [Read this for a list of options.](../../api/command_api.md#set_reverb_space_simple)
"""
self.right_wall: str = right_wall
def get_initialization_commands(self) -> List[dict]:
commands = super().get_initialization_commands()
commands.insert(0, {"$type": "set_reverb_space_simple",
"region_id": self.region_id,
"reverb_floor_material": self.floor,
"reverb_ceiling_material": self.ceiling,
"reverb_front_wall_material": self.front_wall,
"reverb_back_wall_material": self.back_wall,
"reverb_left_wall_material": self.left_wall,
"reverb_right_wall_material": self.right_wall})
return commands
def _get_sensor_command_name(self) -> str:
return "add_environ_audio_sensor"
def _get_play_audio_command_name(self) -> str:
return "play_point_source_data"
| 57.21978 | 171 | 0.5942 |
acf0ee1f9ba3a27b79c054467012dbb15823c406 | 3,458 | py | Python | ci/render_periodic_jobs_page.py | harana-oss/kubeinit | 9f4beb189b60741eba877d6e514896b811f923ff | [
"Apache-2.0"
] | null | null | null | ci/render_periodic_jobs_page.py | harana-oss/kubeinit | 9f4beb189b60741eba877d6e514896b811f923ff | [
"Apache-2.0"
] | null | null | null | ci/render_periodic_jobs_page.py | harana-oss/kubeinit | 9f4beb189b60741eba877d6e514896b811f923ff | [
"Apache-2.0"
] | null | null | null | #!/bin/python3
"""
Copyright kubeinit contributors.
Licensed under the Apache License, Version 2.0 (the "License"); you may
not use this file except in compliance with the License. You may obtain
a copy of the License at:
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
License for the specific language governing permissions and limitations
under the License.
"""
import os
import re
from jinja2 import Environment, FileSystemLoader
from kubeinit_ci_utils import get_periodic_jobs_labels
def main():
"""Run the main method."""
labels = get_periodic_jobs_labels()
jobs = []
for label in labels:
if re.match(r"[a-z|0-9|\.]+-[a-z]+-\d+-\d+-\d+-[v|c]-[c|h]", label):
print("'render_periodic_jobs_page.py' ==> Matching a periodic job label")
params = label.split("-")
distro = params[0]
driver = params[1]
masters = params[2]
workers = params[3]
hypervisors = params[4]
services_type = params[5]
launch_from = params[6]
if distro == 'okd':
distro = "Origin Distribution of K8s"
elif distro == 'kid':
distro = "KubeInit distro"
elif distro == 'eks':
distro = "Amazon EKS Distro"
elif distro == 'rke':
distro = "Rancher K8s Engine"
elif distro == 'cdk':
distro = "Canonical Distribution of K8s"
elif distro == 'k8s':
distro = "Vanilla K8s"
elif distro == 'okd.rke':
distro = "OKD/RKE/Submariner"
if services_type == 'c':
services_type = "Containerized"
elif services_type == 'v':
services_type = "Virtualized"
if launch_from == 'h':
launch_from = "Host"
elif launch_from == 'c':
launch_from = "Container"
else:
print("'render_periodic_jobs_page.py' ==> This label do not match")
print(label)
raise Exception("'render_periodic_jobs_page.py' ==> This label do not match: %s" % (label))
jobs.append({'distro': distro,
'driver': driver,
'masters': masters,
'workers': workers,
'hypervisors': hypervisors,
'services_type': services_type,
'launch_from': launch_from,
'url': "<a href='https://storage.googleapis.com/kubeinit-ci/jobs/" + label + "-periodic-pid-weekly-u/index.html'><img height='20px' src='https://storage.googleapis.com/kubeinit-ci/jobs/" + label + "-periodic-pid-weekly-u/badge_status.svg'/></a>"})
path = os.path.join(os.path.dirname(__file__))
file_loader = FileSystemLoader(searchpath=path)
env = Environment(loader=file_loader)
template_index = "periodic_jobs.md.j2"
print("'render_periodic_jobs_page.py' ==> The path for the template is: " + path)
template = env.get_template(template_index)
output = template.render(jobs=jobs)
with open("periodic_jobs.md", "w+") as text_file:
text_file.write(output)
if __name__ == "__main__":
main()
| 35.285714 | 268 | 0.586177 |
acf0ef527845159b3dcd675d4b23a1d12f3bd65e | 1,715 | py | Python | banded_matrices/library.py | secondmind-labs/banded_matrices | b1c816e1fe8d4de9b251c95fc20045b12f0035fe | [
"Apache-2.0"
] | null | null | null | banded_matrices/library.py | secondmind-labs/banded_matrices | b1c816e1fe8d4de9b251c95fc20045b12f0035fe | [
"Apache-2.0"
] | null | null | null | banded_matrices/library.py | secondmind-labs/banded_matrices | b1c816e1fe8d4de9b251c95fc20045b12f0035fe | [
"Apache-2.0"
] | null | null | null | #
# Copyright (c) 2021 The banded_matrices Contributors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from pathlib import Path
import tensorflow as tf
from banded_matrices.platform import get_library_extension
_EXPECTED_LIBRARY_LOCATION = Path(__file__).parent / "lib"
_EXPECTED_LIBRARY_NAME = f"libbanded_matrices.{get_library_extension()}"
_EXPECTED_LIBRARY_PATH = _EXPECTED_LIBRARY_LOCATION / _EXPECTED_LIBRARY_NAME
class CompiledLibraryError(BaseException):
pass
def _load_library():
"""Attempt to load the Banded Matrices library."""
if not _EXPECTED_LIBRARY_PATH.exists():
raise CompiledLibraryError(
f"A compiled version of the Banded Matrices library was not found in the expected "
f"location ({_EXPECTED_LIBRARY_PATH})"
)
try:
return tf.load_op_library(str(_EXPECTED_LIBRARY_PATH))
except Exception as e:
raise CompiledLibraryError(
"An unknown error occurred when loading the Banded Matrices library. This can "
"sometimes occur if the library was build against a different version of TensorFlow "
"than you are currently running."
) from e
banded_ops = _load_library()
| 33.627451 | 97 | 0.738776 |
acf0efcd25917a68bc28ecf76bb51fb861683416 | 4,830 | py | Python | neutron/extensions/portbindings.py | yagosys/neutron | 005fec677c3bf8b2aa0df68c4aedc2b708ec7caf | [
"Apache-2.0"
] | 1 | 2016-01-13T14:29:07.000Z | 2016-01-13T14:29:07.000Z | neutron/extensions/portbindings.py | yagosys/neutron | 005fec677c3bf8b2aa0df68c4aedc2b708ec7caf | [
"Apache-2.0"
] | null | null | null | neutron/extensions/portbindings.py | yagosys/neutron | 005fec677c3bf8b2aa0df68c4aedc2b708ec7caf | [
"Apache-2.0"
] | 3 | 2015-04-03T08:47:02.000Z | 2020-02-05T10:40:45.000Z | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright (c) 2012 OpenStack Foundation.
# All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from neutron.api import extensions
from neutron.api.v2 import attributes
# The type of vnic that this port should be attached to
VNIC_TYPE = 'binding:vnic_type'
# The service will return the vif type for the specific port.
VIF_TYPE = 'binding:vif_type'
# The service may return a dictionary containing additional
# information needed by the interface driver. The set of items
# returned may depend on the value of VIF_TYPE.
VIF_DETAILS = 'binding:vif_details'
# In some cases different implementations may be run on different hosts.
# The host on which the port will be allocated.
HOST_ID = 'binding:host_id'
# The profile will be a dictionary that enables the application running
# on the specific host to pass and receive vif port specific information to
# the plugin.
PROFILE = 'binding:profile'
# The keys below are used in the VIF_DETAILS attribute to convey
# information to the VIF driver.
# TODO(rkukura): Replace CAP_PORT_FILTER, which nova no longer
# understands, with the new set of VIF security details to be used in
# the VIF_DETAILS attribute.
#
# - port_filter : Boolean value indicating Neutron provides port filtering
# features such as security group and anti MAC/IP spoofing
CAP_PORT_FILTER = 'port_filter'
VIF_TYPE_UNBOUND = 'unbound'
VIF_TYPE_BINDING_FAILED = 'binding_failed'
VIF_TYPE_IOVISOR = 'iovisor'
VIF_TYPE_OVS = 'ovs'
VIF_TYPE_IVS = 'ivs'
VIF_TYPE_BRIDGE = 'bridge'
VIF_TYPE_802_QBG = '802.1qbg'
VIF_TYPE_802_QBH = '802.1qbh'
VIF_TYPE_HYPERV = 'hyperv'
VIF_TYPE_MIDONET = 'midonet'
VIF_TYPE_MLNX_DIRECT = 'mlnx_direct'
VIF_TYPE_MLNX_HOSTDEV = 'hostdev'
VIF_TYPE_OTHER = 'other'
VIF_TYPES = [VIF_TYPE_UNBOUND, VIF_TYPE_BINDING_FAILED, VIF_TYPE_OVS,
VIF_TYPE_IVS, VIF_TYPE_BRIDGE, VIF_TYPE_802_QBG,
VIF_TYPE_802_QBH, VIF_TYPE_HYPERV, VIF_TYPE_MIDONET,
VIF_TYPE_MLNX_DIRECT, VIF_TYPE_MLNX_HOSTDEV, VIF_TYPE_OTHER]
VNIC_NORMAL = 'normal'
VNIC_DIRECT = 'direct'
VNIC_MACVTAP = 'macvtap'
VNIC_TYPES = [VNIC_NORMAL, VNIC_DIRECT, VNIC_MACVTAP]
EXTENDED_ATTRIBUTES_2_0 = {
'ports': {
VIF_TYPE: {'allow_post': False, 'allow_put': False,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
VIF_DETAILS: {'allow_post': False, 'allow_put': False,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'is_visible': True},
VNIC_TYPE: {'allow_post': True, 'allow_put': True,
'default': VNIC_NORMAL,
'is_visible': True,
'validate': {'type:values': VNIC_TYPES},
'enforce_policy': True},
HOST_ID: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'is_visible': True,
'enforce_policy': True},
PROFILE: {'allow_post': True, 'allow_put': True,
'default': attributes.ATTR_NOT_SPECIFIED,
'enforce_policy': True,
'validate': {'type:dict_or_none': None},
'is_visible': True},
}
}
class Portbindings(extensions.ExtensionDescriptor):
"""Extension class supporting port bindings.
This class is used by neutron's extension framework to make
metadata about the port bindings available to external applications.
With admin rights one will be able to update and read the values.
"""
@classmethod
def get_name(cls):
return "Port Binding"
@classmethod
def get_alias(cls):
return "binding"
@classmethod
def get_description(cls):
return "Expose port bindings of a virtual port to external application"
@classmethod
def get_namespace(cls):
return "http://docs.openstack.org/ext/binding/api/v1.0"
@classmethod
def get_updated(cls):
return "2014-02-03T10:00:00-00:00"
def get_extended_resources(self, version):
if version == "2.0":
return EXTENDED_ATTRIBUTES_2_0
else:
return {}
| 36.315789 | 79 | 0.67971 |
acf0f05dd07e3d68609ccda5295083be48e3b3c9 | 7,116 | py | Python | xpsi/PostProcessing/_cache.py | DevarshiChoudhury/xpsi | 200b82b4ef4a4e7342fc30dd03c5821cff0031c2 | [
"MIT"
] | 14 | 2019-09-26T12:08:06.000Z | 2021-05-11T15:26:10.000Z | xpsi/PostProcessing/_cache.py | DevarshiChoudhury/xpsi | 200b82b4ef4a4e7342fc30dd03c5821cff0031c2 | [
"MIT"
] | 13 | 2020-01-10T11:03:28.000Z | 2021-10-04T14:44:01.000Z | xpsi/PostProcessing/_cache.py | DevarshiChoudhury/xpsi | 200b82b4ef4a4e7342fc30dd03c5821cff0031c2 | [
"MIT"
] | 9 | 2020-03-04T13:28:05.000Z | 2021-09-28T09:00:50.000Z | from __future__ import division, print_function
from .. import __version__
from ._global_imports import *
try:
import h5py
except ImportError:
print('Install h5py to enable signal caching.')
raise
class _Cache(object):
""" Cache numerical model objects computed during likelihood evaluation.
:param str filename:
Filename of cache.
:param str cache_dir:
Directory to write cache to.
:param bool read_only:
Do not write to cache file?
:param bool archive:
If not read-only, then archive an existing cache file found at the
same path?
"""
def __init__(self, filename, cache_dir='./',
read_only=False, archive=True):
if isinstance(filename, _six.string_types):
if filename[-3:] != '.h5':
self._filename = filename + '.h5'
else:
self._filename = filename
self._cache_dir = cache_dir
self._path = _os.path.join(self._cache_dir, self._filename)
self._read_only = read_only
self._archive_if_incompatible = archive
def __enter__(self):
return self
def __exit__(self, exc, exc_value, traceback):
if exc:
print('Encountered problem whilst caching:')
def _open(self, mode='r'):
""" Get the :mod:`h5py` context manager. """
if self._read_only and mode != 'r':
raise RuntimeError('The cache is in read-only mode.')
return h5py.File(self._path, mode)
def cache(self, data):
""" Cache the computational data. """
with self._open('r+') as f:
g = f['data']
for key, value in data.iteritems():
if isinstance(value, tuple) or isinstance(value, list):
if key not in g.keys():
shape = [f.attrs['n'], len(value)]
shape += [s for s in value[0].shape]
g.create_dataset(key, shape=shape, dtype='float64')
for j, v in enumerate(value):
g[key][self.i,j,...] = v
else:
if key not in g.keys():
shape = [f.attrs['n']] + [s for s in value.shape]
g.create_dataset(key, shape=shape, dtype='float64')
g[key][self.i,...] = value
self.i += 1
def reset_iterator(self):
""" Reset the counter for the cache iterator. """
self.i = 0
def __iter__(self):
self.reset_iterator()
return self
def __next__(self):
""" Read from the cache. """
cached = {}
with self._open('r') as f:
g = f['data']
for key in g.keys():
cached[key] = g[key][self.i,...]
self.i += 1
return cached
def next(self):
""" Python 2.x compatibility. """
return self.__next__()
@make_verbose('Checking whether an existing cache can be read:',
'Cache state determined')
def do_caching(self, samples, force=False):
""" Check whether a new cache is required or whether an exising
cache can be read without additional computation.
:return: Boolean indicating whether to read (``False``) or write.
"""
if force:
self._new(samples)
return True
try: # try reading file and checking keys
with self._open('r') as f:
if 'thetas' not in f.keys():
self._new(samples)
return True
except IOError: # create new cache file
self._new(samples)
return True
else: # can be read, so check if samples array are matching
if self._changed(samples):
self._new(samples)
return True
else:
return False
@make_verbose('Creating new cache file', 'Cache file created')
def _new(self, samples):
""" Prepare a new cache file. """
if not _os.path.isdir(self._cache_dir):
_os.mkdir(self._cache_dir)
if self._archive_if_incompatible:
try:
with self._open('r'):
pass
except IOError:
self._initialise(samples)
else:
self._archive()
self._initialise(samples)
else:
self._initialise(samples)
@make_verbose('Initialising cache file', 'Cache file initialised')
def _initialise(self, samples):
""" Initialise the cache. """
with self._open('w') as f:
f.attrs['version'] = __version__
f.attrs['n'] = samples.shape[0]
f.create_dataset('thetas', data=samples)
f.create_group('/data')
self.reset_iterator()
def _changed(self, samples):
""" Check whether software version or sample set has changed. """
with self._open('r') as f:
if f.attrs['version'] != __version__:
return True
if not _np.array_equal(f['thetas'], samples):
return True
return False
@make_verbose('Attempting to archive existing cache file in '
'a subdirectory')
def _archive(self):
""" Archive an existing cache file. """
# to archive the existing cache file
archive_dir = _os.path.join(self._cache_dir, 'archive')
try:
if not _os.path.isdir(archive_dir):
_os.mkdir(archive_dir)
except OSError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
yield
else:
yield 'Targeting subdirectory: %s.' % archive_dir
try:
from datetime import datetime
except ImportError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
yield
else:
name_archived = self._filename[:-3] + '__archive__'
name_archived += 'xpsi_version_%s__' % __version__
obj = datetime.now()
name_archived += 'datetime__%i.%i.%i__%i.%i.%i' % (obj.day,
obj.month,
obj.year,
obj.hour,
obj.minute,
obj.second)
try:
_os.rename(self._filename,
_os.path.join(archive_dir, name_archived + '.h5'))
except OSError:
yield ('Archiving failed... cache file %s will be '
'overwritten.' % self._filename)
else:
yield ('Exisiting cache file archived in '
'subdirectory %s.' % archive_dir)
yield None
| 32.199095 | 77 | 0.51068 |
acf0f1d060d494ed1fe6726c19e31eabec75fd9d | 2,053 | py | Python | electrumsv_sdk/builtin_components/status_monitor/status_monitor.py | electrumsv/electrumsv-sdk | 2d4b9474b2e2fc5518bba10684c5d5130ffb6328 | [
"OML"
] | 4 | 2020-07-06T12:13:14.000Z | 2021-07-29T12:45:27.000Z | electrumsv_sdk/builtin_components/status_monitor/status_monitor.py | electrumsv/electrumsv-sdk | 2d4b9474b2e2fc5518bba10684c5d5130ffb6328 | [
"OML"
] | 62 | 2020-07-04T04:50:27.000Z | 2021-08-19T21:06:10.000Z | electrumsv_sdk/builtin_components/status_monitor/status_monitor.py | electrumsv/electrumsv-sdk | 2d4b9474b2e2fc5518bba10684c5d5130ffb6328 | [
"OML"
] | 3 | 2021-01-21T09:22:45.000Z | 2021-06-12T10:16:03.000Z | import logging
import os
import sys
from pathlib import Path
from typing import Optional, Set
from electrumsv_sdk.sdk_types import AbstractPlugin
from electrumsv_sdk.config import CLIInputs
from electrumsv_sdk.components import Component
from electrumsv_sdk.utils import get_directory_name, kill_process
from electrumsv_sdk.plugin_tools import PluginTools
from . import server_app
class Plugin(AbstractPlugin):
SERVER_HOST = server_app.SERVER_HOST
SERVER_PORT = server_app.SERVER_PORT
RESERVED_PORTS: Set[int] = {SERVER_PORT}
PING_URL = server_app.PING_URL
COMPONENT_NAME = get_directory_name(__file__)
COMPONENT_PATH = Path(os.path.dirname(os.path.abspath(__file__)))
SCRIPT_PATH = COMPONENT_PATH / "server_app.py"
def __init__(self, cli_inputs: CLIInputs):
self.cli_inputs = cli_inputs
self.plugin_tools = PluginTools(self, self.cli_inputs)
self.logger = logging.getLogger(self.COMPONENT_NAME)
self.src = self.COMPONENT_PATH
self.datadir = None # dynamically allocated
self.id = None # dynamically allocated
self.port = None # dynamically allocated
self.component_info: Optional[Component] = None
def install(self) -> None:
self.logger.debug(f"Installing {self.COMPONENT_NAME} is not applicable")
def start(self) -> None:
self.id = self.plugin_tools.get_id(self.COMPONENT_NAME)
logfile = self.plugin_tools.get_logfile_path(self.id)
env_vars = {"PYTHONUNBUFFERED": "1"}
command = f"{sys.executable} {self.SCRIPT_PATH}"
self.plugin_tools.spawn_process(command, env_vars=env_vars, id=self.id,
component_name=self.COMPONENT_NAME, src=self.src, logfile=logfile)
def stop(self) -> None:
self.logger.debug("Attempting to kill the process if it is even running")
self.plugin_tools.call_for_component_id_or_type(self.COMPONENT_NAME, callable=kill_process)
def reset(self) -> None:
self.logger.info("resetting the status monitor is not applicable.")
| 37.327273 | 99 | 0.732099 |
acf0f22db50f9f02ce38123e2f01af2654d4a8c4 | 1,290 | py | Python | kittygram/urls.py | qwertyk06/kittygram | a77bc0d2b41b096538024ece6b3f4fa502225d14 | [
"MIT"
] | null | null | null | kittygram/urls.py | qwertyk06/kittygram | a77bc0d2b41b096538024ece6b3f4fa502225d14 | [
"MIT"
] | null | null | null | kittygram/urls.py | qwertyk06/kittygram | a77bc0d2b41b096538024ece6b3f4fa502225d14 | [
"MIT"
] | null | null | null | # Обновлённый urls.py
from django.urls import include, path
from cats.views import CatViewSet
from rest_framework.routers import SimpleRouter
# Создаётся роутер
router = SimpleRouter()
# Вызываем метод .register с нужными параметрами
router.register('cats', CatViewSet)
# В роутере можно зарегистрировать любое количество пар "URL, viewset":
# например
# router.register('owners', OwnerViewSet)
# Но нам это пока не нужно
# router.register('cats', CatViewSet, basename='tiger')
urlpatterns = [
# Все зарегистрированные в router пути доступны в router.urls
# Включим их в головной urls.py
path('', include(router.urls)),
]
# from rest_framework.routers import SimpleRouter
# from django.urls import path, include
# from posts.views import PostViewSet
# router = SimpleRouter()
# router.register('posts', PostViewSet)
# urlpatterns = [
# path('api/v1/posts/', include(router.urls)),
# ]
# urlpatterns = [
# path('cats/', CatList.as_view()),
# path('cats/<int:pk>/', CatDetail.as_view()),
# ]
# from django.urls import include, path
# from cats.views import cat_list
# urlpatterns = [
# path('cats/', cat_list),
# ]
# from django.urls import include, path
# from cats.views import APICat
# urlpatterns = [
# path('cats/', APICat.as_view()),
# ]
| 23.454545 | 71 | 0.709302 |
acf0f27e8911d24cc785ecc2224ac37254a3186a | 2,128 | py | Python | IbavaSource/variable_methods.py | TanaySinghal/Ibava | 546a382999666e28dab6a8986a9e2608f4373e66 | [
"MIT"
] | null | null | null | IbavaSource/variable_methods.py | TanaySinghal/Ibava | 546a382999666e28dab6a8986a9e2608f4373e66 | [
"MIT"
] | null | null | null | IbavaSource/variable_methods.py | TanaySinghal/Ibava | 546a382999666e28dab6a8986a9e2608f4373e66 | [
"MIT"
] | null | null | null | from run import *
#list of dictionaries.. {'type': varType, 'name': varName, 'value': varValue}
variables = []
#VARIABLE METHODS
def parseAndCreateOrSetVariable(_text):
_expressions = _text.split("=", 1)
_varName = _expressions[0]
_rightSide = _expressions[1]
createOrSetVariable(_varName, _rightSide)
def createOrSetVariable(varName, rightSide):
#Var name must be an upper case word
if varName.isupper() and varName.isalpha():
varType, varValue = interpretVarType(rightSide)
#Check if variable exists
var = getVariable(varName)
if var is not None:
var['value'], var['type'] = varValue, varType
return
#if it does not, add it to list
variables.append({'type': varType, 'name': varName, 'value': varValue})
return
printError("ERROR: Invalid variable name. Names must be capital words")
return
def interpretVarType(_value):
from run import howManyComparison
from run import howManyOperator
from run import Interpreter
#if this is a comparison
if howManyComparison(_value) == 1:
return "BOOLEAN", comparison(_value)
#if this is operation
if howManyOperator(_value) == 1:
_interpreter = Interpreter(_value)
return "INTEGER", _interpreter.expr()
#if this is plain integer
if _value.isdigit():
return "INTEGER", int(_value)
#if this is plain boolean
if _value == "true" or _value == "false":
return "BOOLEAN", _value
#if this is plain string
if _value[0] == '"' and _value[len(_value)-1] == '"':
return "STRING", _value
#Only remaining possibility is that it is a variable
#Go through list of variables
var = getVariable(_value)
if var is not None:
return var['type'], var['value']
printError("ERROR: Failed to set variable")
return None
def getVariable(_myVariableName):
for var in variables:
if var['name'] == _myVariableName:
return var
#print "Variable does not exist "
#Returning none is variable
return None
#END VARIABLE METHODS | 27.636364 | 79 | 0.652256 |
acf0f286a35c113b43ccaca86a23dc3d39e3bb0b | 491 | py | Python | python/pip_package/setup.py | xuyanbo03/lab | cf2f5250e1a00ecce37b3480df28c3a5dcd08b57 | [
"CC-BY-4.0"
] | 7,407 | 2016-12-06T08:40:58.000Z | 2022-03-31T12:19:09.000Z | python/pip_package/setup.py | xuyanbo03/lab | cf2f5250e1a00ecce37b3480df28c3a5dcd08b57 | [
"CC-BY-4.0"
] | 227 | 2016-12-06T22:05:33.000Z | 2022-03-29T09:47:06.000Z | python/pip_package/setup.py | xuyanbo03/lab | cf2f5250e1a00ecce37b3480df28c3a5dcd08b57 | [
"CC-BY-4.0"
] | 1,594 | 2016-12-06T08:44:13.000Z | 2022-03-31T12:19:12.000Z | """Setup for the deepmind_lab module."""
import setuptools
setuptools.setup(
name='deepmind-lab',
version='1.0',
description='DeepMind Lab: A 3D learning environment',
long_description='',
url='https://github.com/deepmind/lab',
author='DeepMind',
packages=setuptools.find_packages(),
install_requires=[
'numpy >= 1.13.3',
'six >= 1.10.0',
],
extras_require={
'dmenv_module': ['dm-env'],
},
include_package_data=True)
| 23.380952 | 58 | 0.619145 |
acf0f3a127da35edf3b1f42998c7ca6458482ad8 | 2,064 | py | Python | proper_parenthetics/proper_parenthetics.py | philipwerner/code-katas | 3bdce2b5d12df612e7c8f2e2b8b5ebe16a653712 | [
"MIT"
] | null | null | null | proper_parenthetics/proper_parenthetics.py | philipwerner/code-katas | 3bdce2b5d12df612e7c8f2e2b8b5ebe16a653712 | [
"MIT"
] | null | null | null | proper_parenthetics/proper_parenthetics.py | philipwerner/code-katas | 3bdce2b5d12df612e7c8f2e2b8b5ebe16a653712 | [
"MIT"
] | null | null | null | """Proper paranthetics code kata module."""
class Node(object):
"""Node class for parens."""
def __init__(self, data, previous):
"""Create a new Node."""
self.data = data
self.previous = previous
self.next_node = None
class Queue(object):
"""Queue class."""
def __init__(self):
"""Create an instance of a queue."""
self.head = None
self.tail = None
self._counter = 0
def enqueue(self, val):
"""Add a node to the queue."""
new_tail = Node(val, self.tail)
if self.tail is None:
self.head = new_tail
self.tail = new_tail
else:
self.tail.next_node = new_tail
self.tail = new_tail
self._counter += 1
def dequeue(self):
"""Remove a node from the queue."""
if not self.head:
raise IndexError("There is nothing to remove.")
removed = self.head.data
if self.head.next_node:
self.head.next_node.previous = None
self.head = self.head.next_node
else:
self.head = None
self._counter -= 1
return removed
def size(self):
"""Return the size of the queue."""
return self._counter
def __len__(self):
"""Return the length of the queue."""
return self._counter
def parens(data):
"""
Will check the string for open, closed or even parens.
If (), will return 0
If )(, will return -1
if ((, will return 1
"""
q = Queue()
data = list(data)
bal = 0
for i in data:
q.enqueue(i)
if len(q) == 0:
raise ValueError("The string needs at least 1 paren.")
while bal >= 0 and len(q) > 0:
top = q.dequeue()
if top == "(":
bal += 1
if top == ")":
bal -= 1
if bal > 1:
return 1
elif bal < -1:
return -1
else:
return bal
| 24.86747 | 66 | 0.49564 |
acf0f713be1f7fe60b62375235f6507029e2e57c | 11,317 | py | Python | tests/components/test_dialogue.py | dyoshiha/mindmeld | 95f0e8482594f00040766a2ee687e9c9338f5a74 | [
"Apache-2.0"
] | 1 | 2019-12-12T12:44:33.000Z | 2019-12-12T12:44:33.000Z | tests/components/test_dialogue.py | AravindR7/mindmeld | 470bba73ac56b6388146212ddaf697097e81cec3 | [
"Apache-2.0"
] | null | null | null | tests/components/test_dialogue.py | AravindR7/mindmeld | 470bba73ac56b6388146212ddaf697097e81cec3 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
test_dialogue
----------------------------------
Tests for dialogue module.
These tests apply regardless of async/await support.
"""
# pylint: disable=locally-disabled,redefined-outer-name
import pytest
from mindmeld.components import Conversation, DialogueManager, DialogueResponder
from mindmeld.components.request import Request, Params
from mindmeld.components.dialogue import DialogueStateRule
from mindmeld.system_entity_recognizer import SystemEntityRecognizer
def create_request(domain, intent, entities=None):
"""Creates a request object for use by the dialogue manager"""
entities = entities or ()
return Request(domain=domain, intent=intent, entities=entities, text='')
def create_responder(request):
"""Creates a response object for use by the dialogue manager"""
return DialogueResponder(request=request)
@pytest.fixture
def dm():
dm = DialogueManager()
dm.add_dialogue_rule('domain', lambda x, y: None, domain='domain')
dm.add_dialogue_rule('intent', lambda x, y: None, intent='intent')
dm.add_dialogue_rule('domain_intent', lambda x, y: None,
domain='domain', intent='intent')
dm.add_dialogue_rule('intent_entity_1', lambda x, y: None,
intent='intent', has_entity='entity_1')
dm.add_dialogue_rule('intent_entity_2', lambda x, y: None,
intent='intent', has_entity='entity_2')
dm.add_dialogue_rule('intent_entities', lambda x, y: None,
intent='intent', has_entities=('entity_1', 'entity_2', 'entity_3'))
dm.add_dialogue_rule('targeted_only', lambda x, y: None, targeted_only=True)
dm.add_dialogue_rule('dummy_ruleless', lambda x, y: None) # Defined to test default use
dm.add_dialogue_rule('default', lambda x, y: None, default=True)
return dm
def test_dialogue_state_rule_equal():
rule1 = DialogueStateRule(dialogue_state='some-state', domain='some-domain')
rule2 = DialogueStateRule(dialogue_state='some-state', domain='some-domain')
assert rule1 == rule2
def test_dialogue_state_rule_not_equal():
rule1 = DialogueStateRule(dialogue_state='some-state', domain='some-domain')
rule2 = DialogueStateRule(dialogue_state='some-state-2', domain='some-domain')
assert rule1 != rule2
rule2 = DialogueStateRule(dialogue_state='some-state')
assert rule1 != rule2
rule2 = DialogueStateRule(dialogue_state='some-state', domain='some-domain',
intent='some-intent')
assert rule1 != rule2
def test_dialogue_state_rule_unexpected_keyword():
with pytest.raises(TypeError) as ex:
DialogueStateRule(dialogue_state='some-state', domain='some-domain', new_key='some-key')
assert "DialogueStateRule() got an unexpected keyword argument 'new_key'" in str(ex)
def test_dialogue_state_rule_targeted_only():
request = create_request('some-domain', 'some-intent')
rule1 = DialogueStateRule(dialogue_state='some-state', targeted_only=True)
assert not rule1.apply(request)
with pytest.raises(ValueError) as ex:
DialogueStateRule(dialogue_state='some-state', domain='some-domain', targeted_only=True)
msg = "For a dialogue state rule, if targeted_only is True, domain, intent, and has_entity" \
" must be omitted"
assert msg in str(ex)
def test_dialogue_state_rule_exception():
with pytest.raises(ValueError):
DialogueStateRule(dialogue_state='some-state', has_entities=[1, 2])
rule1 = DialogueStateRule(dialogue_state='some-state', has_entity="entity_1")
assert rule1.entity_types == frozenset(("entity_1",))
rule2 = DialogueStateRule(dialogue_state='some-state', has_entities=["entity_2", "entity_3"])
assert rule2.entity_types == frozenset(("entity_2", "entity_3",))
with pytest.raises(ValueError):
DialogueStateRule(dialogue_state='some-state', has_entity="entity_1",
has_entities=["entity_2", "entity_3"])
with pytest.raises(NotImplementedError):
assert rule1 == 1
with pytest.raises(NotImplementedError):
assert rule1 != 1
assert repr(rule1) == "<DialogueStateRule 'some-state'>"
with pytest.raises(NotImplementedError):
assert DialogueStateRule.compare(rule1, 1)
class TestDialogueManager:
"""Tests for the dialogue manager"""
def test_default(self, dm):
"""Default dialogue state when no rules match
This will select the rule with default=True"""
request = create_request('other', 'other')
response = create_responder(request)
result = dm.apply_handler(request, response)
assert result.dialogue_state == 'default'
def test_default_uniqueness(self, dm):
with pytest.raises(AssertionError):
dm.add_dialogue_rule('default2', lambda x, y: None, default=True)
def test_default_kwarg_exclusion(self, dm):
with pytest.raises(ValueError):
dm.add_dialogue_rule('default3', lambda x, y: None,
intent='intent', default=True)
def test_domain(self, dm):
"""Correct dialogue state is found for a domain"""
request = create_request('domain', 'other')
response = create_responder(request)
result = dm.apply_handler(request, response)
assert result.dialogue_state == 'domain'
def test_domain_intent(self, dm):
"""Correct state should be found for domain and intent"""
request = create_request('domain', 'intent')
response = create_responder(request)
result = dm.apply_handler(request, response)
assert result.dialogue_state == 'domain_intent'
def test_intent(self, dm):
"""Correct state should be found for intent"""
request = create_request('other', 'intent')
response = create_responder(request)
result = dm.apply_handler(request, response)
assert result.dialogue_state == 'intent'
def test_intent_entity(self, dm):
"""Correctly match intent and entity"""
request = create_request('domain', 'intent', [{'type': 'entity_2'}])
response = create_responder(request)
result = dm.apply_handler(request, response)
assert result.dialogue_state == 'intent_entity_2'
def test_intent_entity_tiebreak(self, dm):
"""Correctly break ties between rules of equal complexity"""
request = create_request('domain', 'intent', [{'type': 'entity_1'}, {'type': 'entity_2'}])
response = create_responder(request)
result = dm.apply_handler(request, response)
assert result.dialogue_state == 'intent_entity_1'
def test_intent_entities(self, dm):
"""Correctly break ties between rules of equal complexity"""
request = create_request('domain', 'intent', [{'type': 'entity_1'}, {'type': 'entity_2'},
{'type': 'entity_3'}])
response = create_responder(request)
result = dm.apply_handler(request, response)
assert result.dialogue_state == 'intent_entities'
def test_target_dialogue_state_management(self, dm):
"""Correctly sets the dialogue state based on the target_dialogue_state"""
request = create_request('domain', 'intent')
response = create_responder(request)
result = dm.apply_handler(request, response, target_dialogue_state='intent_entity_2')
assert result.dialogue_state == 'intent_entity_2'
def test_target_dialogue_state_management_targeted_only(self, dm):
"""Correctly sets the dialogue state based on the target_dialogue_state"""
request = create_request('domain', 'intent')
response = create_responder(request)
result = dm.apply_handler(request, response, target_dialogue_state='targeted_only')
assert result.dialogue_state == 'targeted_only'
def test_targeted_only_kwarg_exclusion(self, dm):
with pytest.raises(ValueError):
dm.add_dialogue_rule('targeted_only2', lambda x, y: None,
intent='intent', targeted_only=True)
def test_middleware_single(self, dm):
"""Adding a single middleware works"""
def _middle(request, responder, handler):
responder.flag = True
handler(request, responder)
def _handler(request, responder):
assert responder.flag
dm.add_middleware(_middle)
dm.add_dialogue_rule('middleware_test', _handler, intent='middle')
request = create_request('domain', 'middle')
response = create_responder(request)
result = dm.apply_handler(request, response)
assert result.dialogue_state == 'middleware_test'
def test_middleware_multiple(self, dm):
"""Adding multiple middleware works"""
def _first(request, responder, handler):
responder.middles = vars(responder).get('middles', []) + ['first']
handler(request, responder)
def _second(request, responder, handler):
responder.middles = vars(responder).get('middles', []) + ['second']
handler(request, responder)
def _handler(request, responder):
# '_first' should have been called first, then '_second'
assert responder.middles == ['first', 'second']
dm.add_middleware(_first)
dm.add_middleware(_second)
dm.add_dialogue_rule('middleware_test', _handler, intent='middle')
request = create_request('domain', 'middle')
response = create_responder(request)
result = dm.apply_handler(request, response)
assert result.dialogue_state == 'middleware_test'
def test_convo_params_are_cleared(kwik_e_mart_nlp, kwik_e_mart_app_path):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(nlp=kwik_e_mart_nlp, app_path=kwik_e_mart_app_path)
convo.params = Params(allowed_intents=['store_info.find_nearest_store'],
target_dialogue_state='greeting')
convo.say('close door')
assert convo.params == Params()
@pytest.mark.parametrize(
"language, locale, expected_ser_call",
[
('en', 'en_GB', {'lang': 'EN', 'latent': True, 'locale': 'en_GB'}),
('es', 'en_US', {'latent': True, 'locale': 'en_US'}),
(None, None, {'latent': True, 'locale': 'en_US', 'lang': 'EN'}),
('INVALID_LANG_CODE', 'en_GB', {'latent': True, 'locale': 'en_GB'}),
('es', 'INVALID_LOCALE_CODE', {'lang': 'ES', 'latent': True}),
('eng', 'en_GB', {'lang': 'EN', 'latent': True, 'locale': 'en_GB'}),
]
)
def test_convo_language_and_locales(mocker, kwik_e_mart_nlp,
kwik_e_mart_app_path, language, locale, expected_ser_call):
"""Tests that the params are cleared in one trip from app to mm."""
convo = Conversation(nlp=kwik_e_mart_nlp, app_path=kwik_e_mart_app_path)
convo.params = Params(language=language, locale=locale)
mock1 = mocker.patch.object(SystemEntityRecognizer, 'get_response', return_value=({}, 400))
convo.say('set alarm for 4pm tomorrow')
mock1.call_args_list[0][0][0].pop('text')
assert mock1.call_args_list[0][0][0] == expected_ser_call
| 41.454212 | 98 | 0.669789 |
acf0f753fe8968fc81fcda060848c356d982de07 | 8,663 | py | Python | coinrun/policies (conv idea alt).py | mchldann/CoinRun | a9cc33d1b93c2e78219528d9d4383271ad4a4ff5 | [
"MIT"
] | null | null | null | coinrun/policies (conv idea alt).py | mchldann/CoinRun | a9cc33d1b93c2e78219528d9d4383271ad4a4ff5 | [
"MIT"
] | null | null | null | coinrun/policies (conv idea alt).py | mchldann/CoinRun | a9cc33d1b93c2e78219528d9d4383271ad4a4ff5 | [
"MIT"
] | null | null | null | import numpy as np
import tensorflow as tf
from baselines.a2c.utils import conv, fc, conv_to_fc, batch_to_seq, seq_to_batch, lstm
from baselines.common.distributions import make_pdtype
from baselines.common.input import observation_input
from coinrun.config import Config
def impala_cnn(images, depths=[16, 32, 32]):
"""
Model used in the paper "IMPALA: Scalable Distributed Deep-RL with
Importance Weighted Actor-Learner Architectures" https://arxiv.org/abs/1802.01561
"""
use_batch_norm = Config.USE_BATCH_NORM == 1
dropout_layer_num = [0]
dropout_assign_ops = []
def dropout_layer(out):
if Config.DROPOUT > 0:
out_shape = out.get_shape().as_list()
num_features = np.prod(out_shape[1:])
var_name = 'mask_' + str(dropout_layer_num[0])
batch_seed_shape = out_shape[1:]
batch_seed = tf.get_variable(var_name, shape=batch_seed_shape, initializer=tf.random_uniform_initializer(minval=0, maxval=1), trainable=False)
batch_seed_assign = tf.assign(batch_seed, tf.random_uniform(batch_seed_shape, minval=0, maxval=1))
dropout_assign_ops.append(batch_seed_assign)
curr_mask = tf.sign(tf.nn.relu(batch_seed[None,...] - Config.DROPOUT))
curr_mask = curr_mask * (1.0 / (1.0 - Config.DROPOUT))
out = out * curr_mask
dropout_layer_num[0] += 1
return out
def conv_layer(out, depth):
out = tf.layers.conv2d(out, depth, 3, padding='same')
out = dropout_layer(out)
if use_batch_norm:
out = tf.contrib.layers.batch_norm(out, center=True, scale=True, is_training=True)
return out
def residual_block(inputs):
depth = inputs.get_shape()[-1].value
out = tf.nn.relu(inputs)
out = conv_layer(out, depth)
out = tf.nn.relu(out)
out = conv_layer(out, depth)
return out + inputs
def conv_sequence(inputs, depth):
out = conv_layer(inputs, depth)
out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same')
out = residual_block(out)
out = residual_block(out)
return out
out = images
for depth in depths:
out = conv_sequence(out, depth)
out = tf.layers.flatten(out)
out = tf.nn.relu(out)
out = tf.layers.dense(out, 256, activation=tf.nn.relu)
return out, dropout_assign_ops
def nature_cnn(scaled_images, **conv_kwargs):
"""
Model used in the paper "Human-level control through deep reinforcement learning"
https://www.nature.com/articles/nature14236
"""
def activ_1(curr):
return tf.nn.relu(curr)
def activ_2(curr):
out = tf.nn.relu(curr)
out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='VALID') # Maybe try pool_size = 3, also padding = 'SAME', as per the IMPALA architecture.
return out
#return tf.nn.max_pool(tf.nn.relu(curr), [1, 2, 2, 1], [1, 2, 2, 1], 'VALID')
#out = tf.layers.max_pooling2d(out, pool_size=3, strides=2, padding='same')
#self.pool_old = nn.MaxPool2d(2, 2) # kernel size, stride
# FOR NATURE CNN:
# total num params: 604840
# FOR ARCH BELOW:
# total num params: 598780
#h = activ(conv(scaled_images, 'c1', nf=32, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs))
#h2 = activ(conv(h, 'c2', nf=64, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
#h3 = activ(conv(h2, 'c3', nf=64, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
#h3 = conv_to_fc(h3)
h11 = activ_1(conv(scaled_images, 'c11', nf=16, rf=8, stride=4, init_scale=np.sqrt(2), **conv_kwargs))
h12 = activ_1(conv(h11, 'c12', nf=32, rf=4, stride=2, init_scale=np.sqrt(2), **conv_kwargs))
h13 = activ_1(conv(h12, 'c13', nf=32, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h13 = conv_to_fc(h13)
h21 = activ_2(conv(scaled_images, 'c21', nf=12, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs)) # I *think* IMPALA uses rf = 3 everywhere
h22 = activ_2(conv(h21, 'c22', nf=12, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h23 = activ_2(conv(h22, 'c23', nf=24, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
#h24 = activ_2(conv(h23, 'c24', nf=32, rf=3, stride=1, init_scale=np.sqrt(2), **conv_kwargs))
h24 = conv_to_fc(h23)
# out_channels = nf
# kernel_size = rf
# stride = stride
#self.conv11 = nn.Conv2d(in_channels=in_channels, out_channels=21, kernel_size=8, stride=4)
#self.conv12 = nn.Conv2d(in_channels=21, out_channels=42, kernel_size=4, stride=2)
#self.conv13 = nn.Conv2d(in_channels=42, out_channels=42, kernel_size=3, stride=1)
# Architecture taken from here: https://github.com/Nasdin/ReinforcementLearning-AtariGame
#self.conv21 = nn.Conv2d(in_channels=in_channels, out_channels=32, kernel_size=5, stride=1, padding=2)
#self.conv22 = nn.Conv2d(in_channels=32, out_channels=32, kernel_size=5, stride=1, padding=1)
#self.conv23 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=1, padding=1)
#self.conv24 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1, padding=1)
#self.fc1 = nn.Linear(in_features=7*7*42 + 4*4*64, out_features=512)
#print('scaled_images shape = ', scaled_images.get_shape()) # for some reason it's 4*4*42 + 1*1*64
#print('h13 shape = ', h13.get_shape())
#print('h24 shape = ', h24.get_shape())
#print('a' + 0)
h_cat = tf.concat([h13, h24], 1)
meh1 = fc(h_cat, 'fc1', nh=512, init_scale=np.sqrt(2))
#meh2 = fc(h24, 'fc21', nh=256, init_scale=np.sqrt(2))
#print('shape = ', meh1.get_shape())
#meh_concat = tf.concat([meh1, meh2], 1)
return activ_1(meh1)
def choose_cnn(images):
arch = Config.ARCHITECTURE
scaled_images = tf.cast(images, tf.float32) / 255.
dropout_assign_ops = []
if arch == 'nature':
out = nature_cnn(scaled_images)
elif arch == 'impala':
out, dropout_assign_ops = impala_cnn(scaled_images)
elif arch == 'impalalarge':
out, dropout_assign_ops = impala_cnn(scaled_images, depths=[32, 64, 64, 64, 64])
else:
assert(False)
return out, dropout_assign_ops
class LstmPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, nlstm=256):
nenv = nbatch // nsteps
self.pdtype = make_pdtype(ac_space)
X, processed_x = observation_input(ob_space, nbatch)
M = tf.placeholder(tf.float32, [nbatch]) #mask (done t-1)
S = tf.placeholder(tf.float32, [nenv, nlstm*2]) #states
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
h, self.dropout_assign_ops = choose_cnn(processed_x)
xs = batch_to_seq(h, nenv, nsteps)
ms = batch_to_seq(M, nenv, nsteps)
h5, snew = lstm(xs, ms, S, 'lstm1', nh=nlstm)
h5 = seq_to_batch(h5)
vf = fc(h5, 'v', 1)[:,0]
self.pd, self.pi = self.pdtype.pdfromlatent(h5)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = np.zeros((nenv, nlstm*2), dtype=np.float32)
def step(ob, state, mask):
return sess.run([a0, vf, snew, neglogp0], {X:ob, S:state, M:mask})
def value(ob, state, mask):
return sess.run(vf, {X:ob, S:state, M:mask})
self.X = X
self.M = M
self.S = S
self.vf = vf
self.step = step
self.value = value
class CnnPolicy(object):
def __init__(self, sess, ob_space, ac_space, nbatch, nsteps, **conv_kwargs): #pylint: disable=W0613
self.pdtype = make_pdtype(ac_space)
X, processed_x = observation_input(ob_space, nbatch)
with tf.variable_scope("model", reuse=tf.AUTO_REUSE):
h, self.dropout_assign_ops = choose_cnn(processed_x)
vf = fc(h, 'v', 1)[:,0]
self.pd, self.pi = self.pdtype.pdfromlatent(h, init_scale=0.01)
a0 = self.pd.sample()
neglogp0 = self.pd.neglogp(a0)
self.initial_state = None
def step(ob, *_args, **_kwargs):
a, v, neglogp = sess.run([a0, vf, neglogp0], {X:ob})
return a, v, self.initial_state, neglogp
def value(ob, *_args, **_kwargs):
return sess.run(vf, {X:ob})
self.X = X
self.vf = vf
self.step = step
self.value = value
def get_policy():
use_lstm = Config.USE_LSTM
if use_lstm == 1:
policy = LstmPolicy
elif use_lstm == 0:
policy = CnnPolicy
else:
assert(False)
return policy
| 35.946058 | 165 | 0.628651 |
acf0f8a6b4362a86def177fc4d6512ad9d469fd9 | 16,265 | py | Python | client/team07/tytus-flask/venv/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 54 | 2019-10-30T19:32:23.000Z | 2022-03-16T13:40:40.000Z | client/team07/tytus-flask/venv/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 61 | 2021-01-10T12:59:01.000Z | 2021-06-24T09:19:20.000Z | client/team07/tytus-flask/venv/Lib/site-packages/pip/_vendor/resolvelib/resolvers.py | webdev188/tytus | 847071edb17b218f51bb969d335a8ec093d13f94 | [
"MIT"
] | 556 | 2020-12-07T03:13:31.000Z | 2021-06-17T17:41:10.000Z | import collections
from .providers import AbstractResolver
from .structs import DirectedGraph, build_iter_view
RequirementInformation = collections.namedtuple(
"RequirementInformation", ["requirement", "parent"]
)
class ResolverException(Exception):
"""A base class for all exceptions raised by this module.
Exceptions derived by this class should all be handled in this module. Any
bubbling pass the resolver should be treated as a bug.
"""
class RequirementsConflicted(ResolverException):
def __init__(self, criterion):
super(RequirementsConflicted, self).__init__(criterion)
self.criterion = criterion
def __str__(self):
return "Requirements conflict: {}".format(
", ".join(repr(r) for r in self.criterion.iter_requirement()),
)
class InconsistentCandidate(ResolverException):
def __init__(self, candidate, criterion):
super(InconsistentCandidate, self).__init__(candidate, criterion)
self.candidate = candidate
self.criterion = criterion
def __str__(self):
return "Provided candidate {!r} does not satisfy {}".format(
self.candidate,
", ".join(repr(r) for r in self.criterion.iter_requirement()),
)
class Criterion(object):
"""Representation of possible resolution results of a package.
This holds three attributes:
* `information` is a collection of `RequirementInformation` pairs.
Each pair is a requirement contributing to this criterion, and the
candidate that provides the requirement.
* `incompatibilities` is a collection of all known not-to-work candidates
to exclude from consideration.
* `candidates` is a collection containing all possible candidates deducted
from the union of contributing requirements and known incompatibilities.
It should never be empty, except when the criterion is an attribute of a
raised `RequirementsConflicted` (in which case it is always empty).
.. note::
This class is intended to be externally immutable. **Do not** mutate
any of its attribute containers.
"""
def __init__(self, candidates, information, incompatibilities):
self.candidates = candidates
self.information = information
self.incompatibilities = incompatibilities
def __repr__(self):
requirements = ", ".join(
"({!r}, via={!r})".format(req, parent)
for req, parent in self.information
)
return "Criterion({})".format(requirements)
@classmethod
def from_requirement(cls, provider, requirement, parent):
"""Build an instance from a requirement."""
cands = build_iter_view(provider.find_matches([requirement]))
infos = [RequirementInformation(requirement, parent)]
criterion = cls(cands, infos, incompatibilities=[])
if not cands:
raise RequirementsConflicted(criterion)
return criterion
def iter_requirement(self):
return (i.requirement for i in self.information)
def iter_parent(self):
return (i.parent for i in self.information)
def merged_with(self, provider, requirement, parent):
"""Build a new instance from this and a new requirement."""
infos = list(self.information)
infos.append(RequirementInformation(requirement, parent))
cands = build_iter_view(provider.find_matches([r for r, _ in infos]))
criterion = type(self)(cands, infos, list(self.incompatibilities))
if not cands:
raise RequirementsConflicted(criterion)
return criterion
def excluded_of(self, candidates):
"""Build a new instance from this, but excluding specified candidates.
Returns the new instance, or None if we still have no valid candidates.
"""
cands = self.candidates.excluding(candidates)
if not cands:
return None
incompats = self.incompatibilities + candidates
return type(self)(cands, list(self.information), incompats)
class ResolutionError(ResolverException):
pass
class ResolutionImpossible(ResolutionError):
def __init__(self, causes):
super(ResolutionImpossible, self).__init__(causes)
# causes is a list of RequirementInformation objects
self.causes = causes
class ResolutionTooDeep(ResolutionError):
def __init__(self, round_count):
super(ResolutionTooDeep, self).__init__(round_count)
self.round_count = round_count
# Resolution state in a round.
State = collections.namedtuple("State", "mapping criteria")
class Resolution(object):
"""Stateful resolution object.
This is designed as a one-off object that holds information to kick start
the resolution process, and holds the results afterwards.
"""
def __init__(self, provider, reporter):
self._p = provider
self._r = reporter
self._states = []
@property
def state(self):
try:
return self._states[-1]
except IndexError:
raise AttributeError("state")
def _push_new_state(self):
"""Push a new state into history.
This new state will be used to hold resolution results of the next
coming round.
"""
base = self._states[-1]
state = State(
mapping=base.mapping.copy(),
criteria=base.criteria.copy(),
)
self._states.append(state)
def _merge_into_criterion(self, requirement, parent):
self._r.adding_requirement(requirement, parent)
name = self._p.identify(requirement)
try:
crit = self.state.criteria[name]
except KeyError:
crit = Criterion.from_requirement(self._p, requirement, parent)
else:
crit = crit.merged_with(self._p, requirement, parent)
return name, crit
def _get_criterion_item_preference(self, item):
name, criterion = item
return self._p.get_preference(
self.state.mapping.get(name),
criterion.candidates.for_preference(),
criterion.information,
)
def _is_current_pin_satisfying(self, name, criterion):
try:
current_pin = self.state.mapping[name]
except KeyError:
return False
return all(
self._p.is_satisfied_by(r, current_pin)
for r in criterion.iter_requirement()
)
def _get_criteria_to_update(self, candidate):
criteria = {}
for r in self._p.get_dependencies(candidate):
name, crit = self._merge_into_criterion(r, parent=candidate)
criteria[name] = crit
return criteria
def _attempt_to_pin_criterion(self, name, criterion):
causes = []
for candidate in criterion.candidates:
try:
criteria = self._get_criteria_to_update(candidate)
except RequirementsConflicted as e:
causes.append(e.criterion)
continue
# Check the newly-pinned candidate actually works. This should
# always pass under normal circumstances, but in the case of a
# faulty provider, we will raise an error to notify the implementer
# to fix find_matches() and/or is_satisfied_by().
satisfied = all(
self._p.is_satisfied_by(r, candidate)
for r in criterion.iter_requirement()
)
if not satisfied:
raise InconsistentCandidate(candidate, criterion)
# Put newly-pinned candidate at the end. This is essential because
# backtracking looks at this mapping to get the last pin.
self._r.pinning(candidate)
self.state.mapping.pop(name, None)
self.state.mapping[name] = candidate
self.state.criteria.update(criteria)
return []
# All candidates tried, nothing works. This criterion is a dead
# end, signal for backtracking.
return causes
def _backtrack(self):
"""Perform backtracking.
When we enter here, the stack is like this::
[ state Z ]
[ state Y ]
[ state X ]
.... earlier states are irrelevant.
1. No pins worked for Z, so it does not have a pin.
2. We want to reset state Y to unpinned, and pin another candidate.
3. State X holds what state Y was before the pin, but does not
have the incompatibility information gathered in state Y.
Each iteration of the loop will:
1. Discard Z.
2. Discard Y but remember its incompatibility information gathered
previously, and the failure we're dealing with right now.
3. Push a new state Y' based on X, and apply the incompatibility
information from Y to Y'.
4a. If this causes Y' to conflict, we need to backtrack again. Make Y'
the new Z and go back to step 2.
4b. If the incompatibilites apply cleanly, end backtracking.
"""
while len(self._states) >= 3:
# Remove the state that triggered backtracking.
del self._states[-1]
# Retrieve the last candidate pin and known incompatibilities.
broken_state = self._states.pop()
name, candidate = broken_state.mapping.popitem()
incompatibilities_from_broken = [
(k, v.incompatibilities)
for k, v in broken_state.criteria.items()
]
self._r.backtracking(candidate)
# Create a new state from the last known-to-work one, and apply
# the previously gathered incompatibility information.
self._push_new_state()
for k, incompatibilities in incompatibilities_from_broken:
try:
crit = self.state.criteria[k]
except KeyError:
continue
self.state.criteria[k] = crit.excluded_of(incompatibilities)
# Mark the newly known incompatibility.
criterion = self.state.criteria[name].excluded_of([candidate])
# It works! Let's work on this new state.
if criterion:
self.state.criteria[name] = criterion
return True
# State does not work after adding the new incompatibility
# information. Try the still previous state.
# No way to backtrack anymore.
return False
def resolve(self, requirements, max_rounds):
if self._states:
raise RuntimeError("already resolved")
self._r.starting()
# Initialize the root state.
self._states = [State(mapping=collections.OrderedDict(), criteria={})]
for r in requirements:
try:
name, crit = self._merge_into_criterion(r, parent=None)
except RequirementsConflicted as e:
raise ResolutionImpossible(e.criterion.information)
self.state.criteria[name] = crit
# The root state is saved as a sentinel so the first ever pin can have
# something to backtrack to if it fails. The root state is basically
# pinning the virtual "root" package in the graph.
self._push_new_state()
for round_index in range(max_rounds):
self._r.starting_round(round_index)
unsatisfied_criterion_items = [
item
for item in self.state.criteria.items()
if not self._is_current_pin_satisfying(*item)
]
# All criteria are accounted for. Nothing more to pin, we are done!
if not unsatisfied_criterion_items:
self._r.ending(self.state)
return self.state
# Choose the most preferred unpinned criterion to try.
name, criterion = min(
unsatisfied_criterion_items,
key=self._get_criterion_item_preference,
)
failure_causes = self._attempt_to_pin_criterion(name, criterion)
if failure_causes:
# Backtrack if pinning fails. The backtrack process puts us in
# an unpinned state, so we can work on it in the next round.
success = self._backtrack()
# Dead ends everywhere. Give up.
if not success:
causes = [i for c in failure_causes for i in c.information]
raise ResolutionImpossible(causes)
else:
# Pinning was successful. Push a new state to do another pin.
self._push_new_state()
self._r.ending_round(round_index, self.state)
raise ResolutionTooDeep(max_rounds)
def _has_route_to_root(criteria, key, all_keys, connected):
if key in connected:
return True
if key not in criteria:
return False
for p in criteria[key].iter_parent():
try:
pkey = all_keys[id(p)]
except KeyError:
continue
if pkey in connected:
connected.add(key)
return True
if _has_route_to_root(criteria, pkey, all_keys, connected):
connected.add(key)
return True
return False
Result = collections.namedtuple("Result", "mapping graph criteria")
def _build_result(state):
mapping = state.mapping
all_keys = {id(v): k for k, v in mapping.items()}
all_keys[id(None)] = None
graph = DirectedGraph()
graph.add(None) # Sentinel as root dependencies' parent.
connected = {None}
for key, criterion in state.criteria.items():
if not _has_route_to_root(state.criteria, key, all_keys, connected):
continue
if key not in graph:
graph.add(key)
for p in criterion.iter_parent():
try:
pkey = all_keys[id(p)]
except KeyError:
continue
if pkey not in graph:
graph.add(pkey)
graph.connect(pkey, key)
return Result(
mapping={k: v for k, v in mapping.items() if k in connected},
graph=graph,
criteria=state.criteria,
)
class Resolver(AbstractResolver):
"""The thing that performs the actual resolution work."""
base_exception = ResolverException
def resolve(self, requirements, max_rounds=100):
"""Take a collection of constraints, spit out the resolution result.
The return value is a representation to the final resolution result. It
is a tuple subclass with three public members:
* `mapping`: A dict of resolved candidates. Each key is an identifier
of a requirement (as returned by the provider's `identify` method),
and the value is the resolved candidate.
* `graph`: A `DirectedGraph` instance representing the dependency tree.
The vertices are keys of `mapping`, and each edge represents *why*
a particular package is included. A special vertex `None` is
included to represent parents of user-supplied requirements.
* `criteria`: A dict of "criteria" that hold detailed information on
how edges in the graph are derived. Each key is an identifier of a
requirement, and the value is a `Criterion` instance.
The following exceptions may be raised if a resolution cannot be found:
* `ResolutionImpossible`: A resolution cannot be found for the given
combination of requirements. The `causes` attribute of the
exception is a list of (requirement, parent), giving the
requirements that could not be satisfied.
* `ResolutionTooDeep`: The dependency tree is too deeply nested and
the resolver gave up. This is usually caused by a circular
dependency, but you can try to resolve this by increasing the
`max_rounds` argument.
"""
resolution = Resolution(self.provider, self.reporter)
state = resolution.resolve(requirements, max_rounds=max_rounds)
return _build_result(state)
| 36.387025 | 79 | 0.630618 |
acf0f97ed50fee8829bf6915db341812929f9c87 | 74 | py | Python | pass.py | theGreenJedi/practicepy | 330da97b0c79c3c8792ebb4166ecf2609545e127 | [
"MIT"
] | null | null | null | pass.py | theGreenJedi/practicepy | 330da97b0c79c3c8792ebb4166ecf2609545e127 | [
"MIT"
] | null | null | null | pass.py | theGreenJedi/practicepy | 330da97b0c79c3c8792ebb4166ecf2609545e127 | [
"MIT"
] | null | null | null | bool = True
if bool :
print( 'Python in easy steps' )
else :
pass | 12.333333 | 33 | 0.594595 |
acf0fa0f42937c92e8479db335f4f6b6e27ae971 | 97 | py | Python | lang/py/cookbook/v2/source/cb2_8_10_sol_3.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_8_10_sol_3.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | lang/py/cookbook/v2/source/cb2_8_10_sol_3.py | ch1huizong/learning | 632267634a9fd84a5f5116de09ff1e2681a6cc85 | [
"MIT"
] | null | null | null | .
----------------------------------------------------------------------
Ran 1 test in 0.003s
OK
| 19.4 | 70 | 0.175258 |
acf0fa4080a95ac1e084aa64e0264256781608ec | 1,003 | py | Python | setup.py | jdlabsco/wagtail-themes | 35c39cd17b44c0476c3fd5b45277e383963bdbb2 | [
"MIT"
] | null | null | null | setup.py | jdlabsco/wagtail-themes | 35c39cd17b44c0476c3fd5b45277e383963bdbb2 | [
"MIT"
] | null | null | null | setup.py | jdlabsco/wagtail-themes | 35c39cd17b44c0476c3fd5b45277e383963bdbb2 | [
"MIT"
] | null | null | null | from setuptools import find_packages, setup
install_requires = [
'django>=2.0',
'wagtail>=2.0'
]
test_require = [
'flake8',
'isort',
'pytest',
'pytest-cov',
'pytest-django',
'wagtail',
]
setup(
name='wagtail-themes',
version='0.3.0',
description='Site specific theme loader for Django Wagtail.',
author='Rob Moorman',
author_email='rob@moori.nl',
url='https://github.com/moorinteractive/wagtail-themes',
license='MIT',
install_requires=install_requires,
extras_require={
'test': test_require,
},
package_dir={'': 'src'},
packages=find_packages('src'),
include_package_data=True,
classifiers=[
'Environment :: Web Environment',
'Framework :: Django',
'Operating System :: Unix',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7'
]
)
| 23.325581 | 65 | 0.602193 |
acf0fa929f124e7620ccfe3ad697e9ed1473a262 | 2,180 | py | Python | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/data_specification/__init__.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | 2 | 2020-11-01T13:22:11.000Z | 2020-11-01T13:22:20.000Z | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/data_specification/__init__.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | src/spinnaker_ros_lsm/venv/lib/python2.7/site-packages/data_specification/__init__.py | Roboy/LSM_SpiNNaker_MyoArm | 04fa1eaf78778edea3ba3afa4c527d20c491718e | [
"BSD-3-Clause"
] | null | null | null | """ Used to generate memory images from a set of instructions.
The main part of this package is the\
:py:class:`data_specification.data_specification_generator.DataSpecificationGenerator`\
class. This is used to generate a "Data Specification", which can then be\
executed to produce a memory image. This package also handles this function\
if required, through the\
:py:class:`data_specification.data_specification_executor.DataSpecificationExecutor`\
class.
Functional Requirements
=======================
* Creation of a Data Specification Language file which can be executed\
to produce a memory image.
* Any errors that can be checked during the creation of the\
specification should throw an exception.
* It will be impossible to detect all errors at creation time.
* There should be no assumption of where the data specification is\
be stored, although a default provision of a way to write the\
specification to a file is acceptable.
* Execution of a Data Specification Language file, producing a\
memory image.
* This should detect any errors during execution and report them,\
halting the execution.
* There should be no assumption of where the data specification is\
read from, although a default provision of a way to read the\
specification from a file is acceptable.
Use Cases
=========
There are a number of use-cases of this library:
* :py:class:`~data_specification.data_specification_generator.DataSpecificationGenerator`\
is used to create a compressed memory image which can be expanded\
later, to reduce the amount of data that needs to be transferred over\
a slow connection
* :py:class:`~data_specification.data_specification_executor.DataSpecificationExecutor`\
is used to execute a previously generated specification at the\
receiving end of a slow connection.
""" | 46.382979 | 98 | 0.653211 |
acf0fadf2f1fea3394ef74414747f25fd50eb456 | 734 | py | Python | var/spack/repos/builtin/packages/bpp-suite/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 2 | 2018-11-27T03:39:44.000Z | 2021-09-06T15:50:35.000Z | var/spack/repos/builtin/packages/bpp-suite/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2019-01-11T20:11:52.000Z | 2019-01-11T20:11:52.000Z | var/spack/repos/builtin/packages/bpp-suite/package.py | HaochengLIU/spack | 26e51ff1705a4d6234e2a0cf734f93f7f95df5cb | [
"ECL-2.0",
"Apache-2.0",
"MIT"
] | 1 | 2020-10-14T14:20:17.000Z | 2020-10-14T14:20:17.000Z | # Copyright 2013-2018 Lawrence Livermore National Security, LLC and other
# Spack Project Developers. See the top-level COPYRIGHT file for details.
#
# SPDX-License-Identifier: (Apache-2.0 OR MIT)
from spack import *
class BppSuite(CMakePackage):
"""BppSuite is a suite of ready-to-use programs for phylogenetic and
sequence analysis."""
homepage = "http://biopp.univ-montp2.fr/wiki/index.php/BppSuite"
url = "http://biopp.univ-montp2.fr/repos/sources/bppsuite/bppsuite-2.2.0.tar.gz"
version('2.2.0', 'd8b29ad7ccf5bd3a7beb701350c9e2a4')
depends_on('cmake@2.6:', type='build')
depends_on('texinfo', type='build')
depends_on('bpp-core')
depends_on('bpp-seq')
depends_on('bpp-phyl')
| 31.913043 | 89 | 0.70436 |
acf0fb76c995df571efc78d9e8e0991819266c9b | 2,947 | py | Python | budget-rnn/src/data_preparation/pen_digits/tokenize_dataset.py | tejaskannan/ml-models | ad5acad2c0ce75773062ffcdff088a6fbe5ffc17 | [
"Apache-2.0"
] | 1 | 2021-06-28T15:40:41.000Z | 2021-06-28T15:40:41.000Z | budget-rnn/src/data_preparation/pen_digits/tokenize_dataset.py | tejaskannan/ml-models | ad5acad2c0ce75773062ffcdff088a6fbe5ffc17 | [
"Apache-2.0"
] | 5 | 2021-03-04T19:42:15.000Z | 2022-02-10T05:46:15.000Z | budget-rnn/src/data_preparation/pen_digits/tokenize_dataset.py | tejaskannan/budget-rnn | ad5acad2c0ce75773062ffcdff088a6fbe5ffc17 | [
"Apache-2.0"
] | null | null | null | import os
import random
from argparse import ArgumentParser
from collections import Counter
from typing import Iterable, Dict, Any, List
from utils.data_writer import DataWriter
from utils.file_utils import make_dir
from utils.constants import TRAIN, VALID, TEST, SAMPLE_ID, INPUTS, OUTPUT
def read_dataset(input_path: str) -> Iterable[Dict[str, Any]]:
with open(input_path, 'r') as input_file:
is_header = True
sample_id = 0
for line in input_file:
line = line.strip().lower()
if line == '@data':
is_header = False
elif not is_header:
tokens = line.split(':')
xs = list(map(float, tokens[0].split(',')))
ys = list(map(float, tokens[1].split(',')))
label = int(tokens[-1])
features = [[x, y] for x, y in zip(xs, ys)]
yield {
SAMPLE_ID: sample_id,
INPUTS: features,
OUTPUT: label
}
sample_id += 1
def get_partition(partitions: List[str], fractions: List[float]) -> str:
r = random.random()
frac_sum = 0.0
for frac, partition in zip(fractions, partitions):
frac_sum += frac
if r < frac_sum:
return partition
return partitions[-1]
def write_dataset(dataset: List[Dict[str, Any]], partitions: List[str], fractions: List[float], output_folder: str):
# Initialize writers and counters
writers: Dict[str, DataWriter] = dict()
label_counters: Dict[str, Counter] = dict()
for partition in partitions:
writer = DataWriter(os.path.join(output_folder, partition), chunk_size=5000, file_prefix='data', file_suffix='jsonl.gz')
writers[partition] = writer
label_counters[partition] = Counter()
# Write all samples
for sample in dataset:
partition = get_partition(partitions, fractions)
writers[partition].add(sample)
label_counters[partition][sample[OUTPUT]] += 1
# Close all writers
for writer in writers.values():
writer.close()
print(label_counters)
if __name__ == '__main__':
parser = ArgumentParser()
parser.add_argument('--input-folder', type=str, required=True)
parser.add_argument('--output-folder', type=str, required=True)
args = parser.parse_args()
train_file = os.path.join(args.input_folder, 'PenDigits_TRAIN.ts')
test_file = os.path.join(args.input_folder, 'PenDigits_TEST.ts')
train_dataset = list(read_dataset(train_file))
test_dataset = list(read_dataset(test_file))
# Set random seed for reproducible results
random.seed(42)
make_dir(args.output_folder)
write_dataset(train_dataset, partitions=[TRAIN, VALID], fractions=[0.8, 0.2], output_folder=args.output_folder)
write_dataset(test_dataset, partitions=[TEST], fractions=[1.0], output_folder=args.output_folder)
| 32.032609 | 128 | 0.638955 |
acf0fbb004d6514b8a3749af5a8a4e243561035f | 8,642 | py | Python | src/oci/load_balancer/models/backend_set_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/load_balancer/models/backend_set_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | src/oci/load_balancer/models/backend_set_details.py | LaudateCorpus1/oci-python-sdk | b0d3ce629d5113df4d8b83b7a6502b2c5bfa3015 | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class BackendSetDetails(object):
"""
The configuration details for a load balancer backend set.
For more information on backend set configuration, see
`Managing Backend Sets`__.
**Note:** The `sessionPersistenceConfiguration` (application cookie stickiness) and `lbCookieSessionPersistenceConfiguration`
(LB cookie stickiness) attributes are mutually exclusive. To avoid returning an error, configure only one of these two
attributes per backend set.
__ https://docs.cloud.oracle.com/Content/Balance/Tasks/managingbackendsets.htm
"""
def __init__(self, **kwargs):
"""
Initializes a new BackendSetDetails object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param policy:
The value to assign to the policy property of this BackendSetDetails.
:type policy: str
:param backends:
The value to assign to the backends property of this BackendSetDetails.
:type backends: list[oci.load_balancer.models.BackendDetails]
:param health_checker:
The value to assign to the health_checker property of this BackendSetDetails.
:type health_checker: oci.load_balancer.models.HealthCheckerDetails
:param ssl_configuration:
The value to assign to the ssl_configuration property of this BackendSetDetails.
:type ssl_configuration: oci.load_balancer.models.SSLConfigurationDetails
:param session_persistence_configuration:
The value to assign to the session_persistence_configuration property of this BackendSetDetails.
:type session_persistence_configuration: oci.load_balancer.models.SessionPersistenceConfigurationDetails
:param lb_cookie_session_persistence_configuration:
The value to assign to the lb_cookie_session_persistence_configuration property of this BackendSetDetails.
:type lb_cookie_session_persistence_configuration: oci.load_balancer.models.LBCookieSessionPersistenceConfigurationDetails
"""
self.swagger_types = {
'policy': 'str',
'backends': 'list[BackendDetails]',
'health_checker': 'HealthCheckerDetails',
'ssl_configuration': 'SSLConfigurationDetails',
'session_persistence_configuration': 'SessionPersistenceConfigurationDetails',
'lb_cookie_session_persistence_configuration': 'LBCookieSessionPersistenceConfigurationDetails'
}
self.attribute_map = {
'policy': 'policy',
'backends': 'backends',
'health_checker': 'healthChecker',
'ssl_configuration': 'sslConfiguration',
'session_persistence_configuration': 'sessionPersistenceConfiguration',
'lb_cookie_session_persistence_configuration': 'lbCookieSessionPersistenceConfiguration'
}
self._policy = None
self._backends = None
self._health_checker = None
self._ssl_configuration = None
self._session_persistence_configuration = None
self._lb_cookie_session_persistence_configuration = None
@property
def policy(self):
"""
**[Required]** Gets the policy of this BackendSetDetails.
The load balancer policy for the backend set. To get a list of available policies, use the
:func:`list_policies` operation.
Example: `LEAST_CONNECTIONS`
:return: The policy of this BackendSetDetails.
:rtype: str
"""
return self._policy
@policy.setter
def policy(self, policy):
"""
Sets the policy of this BackendSetDetails.
The load balancer policy for the backend set. To get a list of available policies, use the
:func:`list_policies` operation.
Example: `LEAST_CONNECTIONS`
:param policy: The policy of this BackendSetDetails.
:type: str
"""
self._policy = policy
@property
def backends(self):
"""
Gets the backends of this BackendSetDetails.
:return: The backends of this BackendSetDetails.
:rtype: list[oci.load_balancer.models.BackendDetails]
"""
return self._backends
@backends.setter
def backends(self, backends):
"""
Sets the backends of this BackendSetDetails.
:param backends: The backends of this BackendSetDetails.
:type: list[oci.load_balancer.models.BackendDetails]
"""
self._backends = backends
@property
def health_checker(self):
"""
**[Required]** Gets the health_checker of this BackendSetDetails.
:return: The health_checker of this BackendSetDetails.
:rtype: oci.load_balancer.models.HealthCheckerDetails
"""
return self._health_checker
@health_checker.setter
def health_checker(self, health_checker):
"""
Sets the health_checker of this BackendSetDetails.
:param health_checker: The health_checker of this BackendSetDetails.
:type: oci.load_balancer.models.HealthCheckerDetails
"""
self._health_checker = health_checker
@property
def ssl_configuration(self):
"""
Gets the ssl_configuration of this BackendSetDetails.
:return: The ssl_configuration of this BackendSetDetails.
:rtype: oci.load_balancer.models.SSLConfigurationDetails
"""
return self._ssl_configuration
@ssl_configuration.setter
def ssl_configuration(self, ssl_configuration):
"""
Sets the ssl_configuration of this BackendSetDetails.
:param ssl_configuration: The ssl_configuration of this BackendSetDetails.
:type: oci.load_balancer.models.SSLConfigurationDetails
"""
self._ssl_configuration = ssl_configuration
@property
def session_persistence_configuration(self):
"""
Gets the session_persistence_configuration of this BackendSetDetails.
:return: The session_persistence_configuration of this BackendSetDetails.
:rtype: oci.load_balancer.models.SessionPersistenceConfigurationDetails
"""
return self._session_persistence_configuration
@session_persistence_configuration.setter
def session_persistence_configuration(self, session_persistence_configuration):
"""
Sets the session_persistence_configuration of this BackendSetDetails.
:param session_persistence_configuration: The session_persistence_configuration of this BackendSetDetails.
:type: oci.load_balancer.models.SessionPersistenceConfigurationDetails
"""
self._session_persistence_configuration = session_persistence_configuration
@property
def lb_cookie_session_persistence_configuration(self):
"""
Gets the lb_cookie_session_persistence_configuration of this BackendSetDetails.
:return: The lb_cookie_session_persistence_configuration of this BackendSetDetails.
:rtype: oci.load_balancer.models.LBCookieSessionPersistenceConfigurationDetails
"""
return self._lb_cookie_session_persistence_configuration
@lb_cookie_session_persistence_configuration.setter
def lb_cookie_session_persistence_configuration(self, lb_cookie_session_persistence_configuration):
"""
Sets the lb_cookie_session_persistence_configuration of this BackendSetDetails.
:param lb_cookie_session_persistence_configuration: The lb_cookie_session_persistence_configuration of this BackendSetDetails.
:type: oci.load_balancer.models.LBCookieSessionPersistenceConfigurationDetails
"""
self._lb_cookie_session_persistence_configuration = lb_cookie_session_persistence_configuration
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
| 39.281818 | 245 | 0.714649 |
acf0fc7a061af769f3a63e4f7180e78f1b1bf450 | 25 | py | Python | kyh_test.py | pengjeck/ici-Backend | 176240d242196c75a66dbd59a0e4e8f3af0e0f07 | [
"MIT"
] | null | null | null | kyh_test.py | pengjeck/ici-Backend | 176240d242196c75a66dbd59a0e4e8f3af0e0f07 | [
"MIT"
] | null | null | null | kyh_test.py | pengjeck/ici-Backend | 176240d242196c75a66dbd59a0e4e8f3af0e0f07 | [
"MIT"
] | null | null | null | import os
print("Hello")
| 8.333333 | 14 | 0.72 |
acf0fe0919bbf3a8cdb7dfdd2d5987509d517b80 | 10,657 | py | Python | app/face_recognition.py | XPPGX/mask_face_recognition_system | b07b7a6f0aceae95502419a9b42891eb0ebc28d9 | [
"MIT"
] | null | null | null | app/face_recognition.py | XPPGX/mask_face_recognition_system | b07b7a6f0aceae95502419a9b42891eb0ebc28d9 | [
"MIT"
] | null | null | null | app/face_recognition.py | XPPGX/mask_face_recognition_system | b07b7a6f0aceae95502419a9b42891eb0ebc28d9 | [
"MIT"
] | null | null | null | #3Data_Preprocessing.py
import os
import random
import numpy as np
from six import string_types
from sklearn.model_selection import train_test_split
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Sequential
from keras.layers import Dense, Activation , Flatten, Dropout
from keras.layers import Conv2D, MaxPool2D
#from keras.optimizers import SGD
from tensorflow.keras.optimizers import SGD
from keras.utils import np_utils
from keras.models import load_model
from keras import backend as K
import cv2
from tensorflow.python.keras.engine.training import Model
Image_size = 64
def resize_image(image,height=Image_size,width=Image_size):
top,bottom,left,right = 0,0,0,0
h,w,tunnel = image.shape
longest_edge = max(h,w)
if (h<longest_edge):
d = longest_edge - h
top = d // 2 #"//"是取整除的商
bottom = d // 2
elif (w<longest_edge):
d = longest_edge - w
left = d//2
right = d//2
else:
pass
BLACK = [0,0,0]
constant = cv2.copyMakeBorder(image,top,bottom,left,right,cv2.BORDER_CONSTANT,value=BLACK)
return cv2.resize(constant,(height,width))
def read_dataset(dir_path):
images, labels = list(),list()
aim_dir = dir_path
dir_name = deal_dir_str(dir_path)
for dir_item in os.listdir(aim_dir):
#os.listdir(目標資料夾):可以列出目標資料夾裡面的所有檔案與資料夾
#dir_item.endswith('.jpg'):可以檢查檔案名稱最末端是否包含有'.jpg'
if dir_item.endswith('.jpg'):
full_path = os.path.join(aim_dir,dir_item)
abs_full_path = os.path.abspath(full_path)
image = cv2.imread(abs_full_path)
image = resize_image(image,Image_size,Image_size)
images.append(image)
labels.append(dir_name)
print(labels)
return images,labels
def deal_dir_str(dir_path):
fixed_dir_path = dir_path[:len(dir_path)-1]
pos = fixed_dir_path.rfind('/')
head_pos = pos + 1
tail_pos = len(fixed_dir_path)
dir_name = fixed_dir_path[head_pos:tail_pos]
return dir_name
####################################
def load_dataset(dir_name):
images,labels = read_dataset(dir_name)
#把images由list型態化為矩陣
images = np.array(images)
data_labels = list()
print("(圖片檔案數量,長,寬,色彩通道)={}".format(images.shape))
for label in labels:
if label.endswith('FaceData_wong'):
data_labels.append(1)
else:
data_labels.append(0)
data_labels = np.array(data_labels)
return images,data_labels
#####################################
#####################################
####### 測試區 ######
def data_list(dir_path):
#把某個資料夾的圖片與標籤個別加入list,回傳此資料夾的圖片list與標籤list
images,labels = read_dataset(dir_path)
data_labels = list()
for label in labels:
if label.endswith('Trump_test'):
data_labels.append(1)
else:
data_labels.append(0)
return images,data_labels
def load_multi_dataset(dir_list):
#把所有資料夾的圖片與標籤都加入list,之後再把All_imgs、All_labels轉成np.array,
#回傳np.array型態的兩個陣列
All_imgs= list()
All_labels= list()
for dir_path in dir_list:
images,labels = data_list(dir_path)
All_imgs = All_imgs + images
All_labels = All_labels + labels
All_imgs = np.array(All_imgs)
All_labels = np.array(All_labels)
return All_imgs,All_labels
####### 測試區 ######
######################################
#4face_train.py
IMAGE_SIZE = 64
class Dataset:
def __init__(self,path_name1,path_name2):
#訓練集
self.train_images = None
self.train_labels = None
#測試集
self.test_images = None
self.test_labels = None
#資料路徑
self.path_name1 = path_name1
self.path_name2 = path_name2
#self.path_name3 = path_name3
#self.path_name4 = path_name4
#self.path_name5 = path_name5
self.dir_list = list()
self.dir_list.append(self.path_name1)
self.dir_list.append(self.path_name2)
#self.dir_list.append(self.path_name3)
#self.dir_list.append(self.path_name4)
#self.dir_list.append(self.path_name5)
#當前的資料維度順序
self.input_shape = None
'''
def __init__(self,path_name):
#訓練集
self.train_images = None
self.train_labels = None
#測試集
self.test_images = None
self.test_labels = None
#資料路徑
self.path_name = path_name
#當前的資料維度順序
self.input_shape = None
'''
def load(self, img_rows = IMAGE_SIZE, img_cols = IMAGE_SIZE, img_channels = 3, nb_classes = 2):
###測試區
images,labels = load_multi_dataset(self.dir_list)
'''
images, labels = load_dataset(self.path_name) #這是可用的
'''
train_images, test_images, train_labels, test_labels = train_test_split(images, labels, test_size = 0.3, random_state = random.randint(0,10))
#輸出訓練資料集、測試資料集的數量
print(train_images.shape[0],'train samples')
print(test_images.shape[0],'test samples')
#使用categorical_crossentropy作為損失函數
#class標籤進行one-hot編碼使其向量化,在此練習中標籤只有兩種
train_labels = np_utils.to_categorical(train_labels, nb_classes)
test_labels = np_utils.to_categorical(test_labels, nb_classes)
#將圖片浮點化以便歸一化
train_images = train_images.astype('float32')
test_images = test_images.astype('float32')
#開始歸一化,將圖像的各像素值
train_images = train_images / 255.0
test_images = test_images / 255.0
self.input_shape = (img_rows,img_cols,img_channels)
self.train_images = train_images
self.test_images = test_images
self.train_labels = train_labels
self.test_labels = test_labels
class MODEL:
def __init__(self):
self.model = None
self.history = object()
def build_model(self,dataset,nb_classes = 2):
self.model = Sequential()
#以下是第一個code的
self.model.add(Conv2D(32,kernel_size=(3,3),padding = "same", input_shape = (64,64,3),activation = "relu"))
self.model.add(MaxPool2D(pool_size=(2,2)))
self.model.add(Conv2D(32,kernel_size = (3,3),padding = "same",activation="relu"))
self.model.add(MaxPool2D(pool_size = (2,2)))
self.model.add(Dropout(0.25))
self.model.add(Conv2D(64,3,3,padding="same",activation="relu"))
self.model.add(MaxPool2D(pool_size=(2,2)))
self.model.add(Dropout(0.25))
self.model.add(Flatten())
self.model.add(Dense(512,activation="relu"))
self.model.add(Dropout(0.5))
self.model.add(Dense(nb_classes,activation = "softmax"))
self.model.summary()
def train(self, dataset, batch_size = 20, epochs = 20, data_augmentation = False):
sgd = SGD(learning_rate = 0.01, momentum = 0.9, nesterov = False, decay = 1e-6)
self.model.compile(loss='categorical_crossentropy',optimizer = sgd, metrics = ['accuracy'])
#######################可用block#############################
'''self.history = self.model.fit(dataset.train_images, dataset.train_labels,
batch_size = batch_size, epochs = epochs,
validation_data = (dataset.test_images, dataset.test_labels),
shuffle = True)'''
######################可用block###############################
########################測試 block#############################################
if not data_augmentation:
self.history = self.model.fit(dataset.train_images, dataset.train_labels,
batch_size = batch_size, epochs = epochs,
validation_data = (dataset.test_images, dataset.test_labels),
shuffle = True)
else:
datagen = ImageDataGenerator(
featurewise_center = False, #是否使輸入資料去中心化(均值為0),
samplewise_center = False, #是否使輸入資料的每個樣本均值為0
featurewise_std_normalization = False, #是否資料標準化(輸入資料除以資料集的標準差)
samplewise_std_normalization = False, #是否將每個樣本資料除以自身的標準差
zca_whitening = False, #是否對輸入資料施以ZCA白化
rotation_range = 20, #資料提升時圖片隨機轉動的角度(範圍為0~180)
width_shift_range = 0.2, #資料提升時圖片水平偏移的幅度(單位為圖片寬度的佔比,0~1之間的浮點數)
height_shift_range = 0.2, #同上,只不過這裡是垂直
horizontal_flip = True, #是否進行隨機水平翻轉
vertical_flip = False) #是否進行隨機垂直翻轉
datagen.fit(dataset.train_images)
self.history = self.model.fit_generator(datagen.flow(dataset.train_images,dataset.train_labels,batch_size = batch_size),
steps_per_epoch = None,
epochs = epochs,
validation_data = (dataset.test_images,dataset.test_labels))
########################測試 block##############################################
MODEL_PATH = './face_model.h5'
def save_model(self,file_path):
self.model.save(file_path)
def load_model(self,file_path = MODEL_PATH):
self.model = load_model(file_path)
def evaluate(self,dataset):
score = self.model.evaluate(dataset.test_images, dataset.test_labels, verbose = 1)
print(f'{self.model.metrics_names[1]}:{score[1] * 100}%')
def face_predict(self,image):
image = resize_image(image)
image = image.reshape((1,IMAGE_SIZE,IMAGE_SIZE,3))
image = image.astype('float32')
image = image / 255.0
#result = self.model.predict_classes(image)
result = self.model.predict(image)
result = np.argmax(result,axis =1)
#print('result:{}'.format(result))
return result
# def show_img(img):
# cv2.imshow('test',img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
def face_recognition_api(filename):
print(filename)
model = MODEL()
model.load_model('./model/only_face_trump.h5')
img = cv2.imread(filename)
#img = cv2.imread('./test/trump_test1.png')
result = model.face_predict(img)
if result[0] == 1:
return "這張圖片辨識為川普"
else:
return "這張圖片辨識為金正恩"
# if __name__ == '__main__':
# model = MODEL()
# model.load_model('./model/only_face_trump.h5')
# img = cv2.imread('./test/trump_test1.png')
# result = model.face_predict(img)
# if result[0] == 1:
# print("這張圖片辨識為川普")
# else:
# print("這張圖片辨識為金正恩")
#cv2.imwrite("./static/predict_"+filename+".jpg", img)
#show_img(img)
| 36.125424 | 149 | 0.60289 |
acf0fe5edbaafb6375268afbaa305878602530c2 | 6,327 | py | Python | backend/config.example.py | codebyravi/flask-unchained-react-spa | eef0ee00d3a23bcb26377a5d8bfdfabeaa76eb1d | [
"MIT"
] | 5 | 2018-10-15T15:33:32.000Z | 2021-01-13T23:03:48.000Z | backend/config.example.py | briancappello/flask-unchained-react-spa | 5aaac045f4537660bebd9814c5e91166cdb17ead | [
"MIT"
] | 15 | 2018-10-15T20:14:21.000Z | 2022-03-15T19:15:09.000Z | backend/config.example.py | codebyravi/flask-unchained-react-spa | eef0ee00d3a23bcb26377a5d8bfdfabeaa76eb1d | [
"MIT"
] | 4 | 2018-10-15T15:59:25.000Z | 2020-04-11T17:48:35.000Z | import os
import redis
from appdirs import AppDirs
from datetime import timedelta
from flask_unchained import BundleConfig, get_boolean_env, url_for
from werkzeug.local import LocalProxy
class Config(BundleConfig):
##########################################################################
# flask #
##########################################################################
DEBUG = get_boolean_env('FLASK_DEBUG', False)
FLASH_MESSAGES = False
SECRET_KEY = os.getenv('FLASK_SECRET_KEY', 'not-secret-key') # FIXME
app_dirs = AppDirs('flask-unchained-react-spa')
APP_CACHE_FOLDER = app_dirs.user_cache_dir
APP_DATA_FOLDER = app_dirs.user_data_dir
ADMIN_CATEGORY_ICON_CLASSES = {
'Security': 'glyphicon glyphicon-lock',
'Mail': 'glyphicon glyphicon-envelope',
}
##########################################################################
# celery #
##########################################################################
CELERY_BROKER_URL = 'redis://{host}:{port}/0'.format(
host=os.getenv('FLASK_REDIS_HOST', '127.0.0.1'),
port=os.getenv('FLASK_REDIS_PORT', 6379),
)
CELERY_RESULT_BACKEND = CELERY_BROKER_URL
##########################################################################
# mail #
##########################################################################
MAIL_ADMINS = ['admin@example.com'] # FIXME
MAIL_DEFAULT_SENDER = (
os.environ.get('FLASK_MAIL_DEFAULT_SENDER_NAME', 'Flask Unchained React SPA'),
os.environ.get('FLASK_MAIL_DEFAULT_SENDER_EMAIL',
f"noreply@{os.environ.get('FLASK_DOMAIN', 'localhost')}")
)
##########################################################################
# session/cookies #
##########################################################################
SESSION_TYPE = 'redis'
SESSION_REDIS = redis.Redis(
host=os.getenv('FLASK_REDIS_HOST', '127.0.0.1'),
port=int(os.getenv('FLASK_REDIS_PORT', 6379)),
)
SESSION_PROTECTION = 'strong'
SESSION_COOKIE_HTTPONLY = True
SESSION_COOKIE_SECURE = True
REMEMBER_COOKIE_HTTPONLY = True
# SECURITY_TOKEN_MAX_AGE is fixed from time of token generation;
# it does not update on refresh like a session timeout would. for that,
# we set (the ironically named) PERMANENT_SESSION_LIFETIME
PERMANENT_SESSION_LIFETIME = timedelta(minutes=60)
##########################################################################
# security #
##########################################################################
SECURITY_PASSWORD_SALT = 'security-password-salt'
SECURITY_CONFIRMABLE = True
SECURITY_REGISTERABLE = True
SECURITY_RECOVERABLE = True
SECURITY_CHANGEABLE = True
ADMIN_LOGIN_ENDPOINT = 'admin.login'
ADMIN_LOGOUT_ENDPOINT = 'admin.logout'
SECURITY_POST_LOGIN_REDIRECT_ENDPOINT = 'admin.index'
ADMIN_POST_LOGOUT_ENDPOINT = LocalProxy(
lambda: url_for('frontend.index', _external=True))
SECURITY_FORGOT_PASSWORD_ENDPOINT = 'frontend.forgot_password'
SECURITY_API_RESET_PASSWORD_HTTP_GET_REDIRECT = 'frontend.reset_password'
SECURITY_INVALID_RESET_TOKEN_REDIRECT = LocalProxy(
lambda: url_for('frontend.forgot_password', _external=True) + '?invalid')
SECURITY_EXPIRED_RESET_TOKEN_REDIRECT = LocalProxy(
lambda: url_for('frontend.forgot_password', _external=True) + '?expired')
SECURITY_POST_CONFIRM_REDIRECT_ENDPOINT = LocalProxy(
lambda: url_for('frontend.index', _external=True) + '?welcome')
SECURITY_CONFIRM_ERROR_REDIRECT_ENDPOINT = LocalProxy(
lambda: url_for('frontend.resend_confirmation_email', _external=True))
##########################################################################
# database #
##########################################################################
SQLALCHEMY_DATABASE_URI = '{engine}://{user}:{pw}@{host}:{port}/{db}'.format(
engine=os.getenv('FLASK_DATABASE_ENGINE', 'postgresql+psycopg2'),
user=os.getenv('FLASK_DATABASE_USER', 'flask_api'),
pw=os.getenv('FLASK_DATABASE_PASSWORD', 'flask_api'),
host=os.getenv('FLASK_DATABASE_HOST', '127.0.0.1'),
port=os.getenv('FLASK_DATABASE_PORT', 5432),
db=os.getenv('FLASK_DATABASE_NAME', 'flask_api'))
class DevConfig(Config):
DEBUG = get_boolean_env('FLASK_DEBUG', True)
# EXPLAIN_TEMPLATE_LOADING = True
# SQLALCHEMY_ECHO = True
SERVER_NAME = '{host}:5000'.format(host=os.getenv('API_HOST', 'localhost'))
EXTERNAL_SERVER_NAME = 'http://localhost:8888'
SESSION_COOKIE_SECURE = False
##########################################################################
# mail #
##########################################################################
MAIL_PORT = 1025 # MailHog
MAIL_DEFAULT_SENDER = ('Flask Unchained React SPA', 'noreply@localhost') # FIXME
##########################################################################
# security #
##########################################################################
SECURITY_CONFIRM_EMAIL_WITHIN = '1 minutes' # for easier manual testing
class ProdConfig(Config):
pass
class StagingConfig(ProdConfig):
pass
class TestConfig(Config):
TESTING = True
SQLALCHEMY_DATABASE_URI = '{engine}://{user}:{pw}@{host}:{port}/{db}'.format(
engine=os.getenv('FLASK_DATABASE_ENGINE', 'postgresql+psycopg2'),
user=os.getenv('FLASK_DATABASE_USER', 'flask_api_test'),
pw=os.getenv('FLASK_DATABASE_PASSWORD', 'flask_api_test'),
host=os.getenv('FLASK_DATABASE_HOST', '127.0.0.1'),
port=os.getenv('FLASK_DATABASE_PORT', 5432),
db=os.getenv('FLASK_DATABASE_NAME', 'flask_api_test'))
| 44.87234 | 86 | 0.507508 |
acf0fedbc4f4d49e3747ff11418ed85988314cf8 | 6,126 | py | Python | kubernetes/client/models/v1beta1_pod_disruption_budget_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 1 | 2020-05-08T12:41:04.000Z | 2020-05-08T12:41:04.000Z | kubernetes/client/models/v1beta1_pod_disruption_budget_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | null | null | null | kubernetes/client/models/v1beta1_pod_disruption_budget_spec.py | jashandeep-sohi/kubernetes-python | e057f273069de445a2d5a250ac5fe37d79671f3b | [
"Apache-2.0"
] | 2 | 2021-07-09T08:49:05.000Z | 2021-08-03T18:08:36.000Z | # coding: utf-8
"""
Kubernetes
No description provided (generated by Swagger Codegen https://github.com/swagger-api/swagger-codegen)
OpenAPI spec version: v1.10.0
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from pprint import pformat
from six import iteritems
import re
class V1beta1PodDisruptionBudgetSpec(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'max_unavailable': 'object',
'min_available': 'object',
'selector': 'V1LabelSelector'
}
attribute_map = {
'max_unavailable': 'maxUnavailable',
'min_available': 'minAvailable',
'selector': 'selector'
}
def __init__(self, max_unavailable=None, min_available=None, selector=None):
"""
V1beta1PodDisruptionBudgetSpec - a model defined in Swagger
"""
self._max_unavailable = None
self._min_available = None
self._selector = None
self.discriminator = None
if max_unavailable is not None:
self.max_unavailable = max_unavailable
if min_available is not None:
self.min_available = min_available
if selector is not None:
self.selector = selector
@property
def max_unavailable(self):
"""
Gets the max_unavailable of this V1beta1PodDisruptionBudgetSpec.
An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".
:return: The max_unavailable of this V1beta1PodDisruptionBudgetSpec.
:rtype: object
"""
return self._max_unavailable
@max_unavailable.setter
def max_unavailable(self, max_unavailable):
"""
Sets the max_unavailable of this V1beta1PodDisruptionBudgetSpec.
An eviction is allowed if at most \"maxUnavailable\" pods selected by \"selector\" are unavailable after the eviction, i.e. even in absence of the evicted pod. For example, one can prevent all voluntary evictions by specifying 0. This is a mutually exclusive setting with \"minAvailable\".
:param max_unavailable: The max_unavailable of this V1beta1PodDisruptionBudgetSpec.
:type: object
"""
self._max_unavailable = max_unavailable
@property
def min_available(self):
"""
Gets the min_available of this V1beta1PodDisruptionBudgetSpec.
An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".
:return: The min_available of this V1beta1PodDisruptionBudgetSpec.
:rtype: object
"""
return self._min_available
@min_available.setter
def min_available(self, min_available):
"""
Sets the min_available of this V1beta1PodDisruptionBudgetSpec.
An eviction is allowed if at least \"minAvailable\" pods selected by \"selector\" will still be available after the eviction, i.e. even in the absence of the evicted pod. So for example you can prevent all voluntary evictions by specifying \"100%\".
:param min_available: The min_available of this V1beta1PodDisruptionBudgetSpec.
:type: object
"""
self._min_available = min_available
@property
def selector(self):
"""
Gets the selector of this V1beta1PodDisruptionBudgetSpec.
Label query over pods whose evictions are managed by the disruption budget.
:return: The selector of this V1beta1PodDisruptionBudgetSpec.
:rtype: V1LabelSelector
"""
return self._selector
@selector.setter
def selector(self, selector):
"""
Sets the selector of this V1beta1PodDisruptionBudgetSpec.
Label query over pods whose evictions are managed by the disruption budget.
:param selector: The selector of this V1beta1PodDisruptionBudgetSpec.
:type: V1LabelSelector
"""
self._selector = selector
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if not isinstance(other, V1beta1PodDisruptionBudgetSpec):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
| 33.47541 | 297 | 0.629285 |
acf0ff15e60c37ccacbad0cf250f798fd8b383c7 | 4,514 | py | Python | kubernetes/models/v1/PersistentVolumeClaimSpec.py | riconnon/kubernetes-py | 42a4537876985ed105ee44b6529763ba5d57c179 | [
"Apache-2.0"
] | null | null | null | kubernetes/models/v1/PersistentVolumeClaimSpec.py | riconnon/kubernetes-py | 42a4537876985ed105ee44b6529763ba5d57c179 | [
"Apache-2.0"
] | null | null | null | kubernetes/models/v1/PersistentVolumeClaimSpec.py | riconnon/kubernetes-py | 42a4537876985ed105ee44b6529763ba5d57c179 | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# This file is subject to the terms and conditions defined in
# file 'LICENSE.md', which is part of this source code package.
#
from kubernetes.models.v1.PersistentVolumeSpec import PersistentVolumeSpec
from kubernetes.models.v1.ResourceRequirements import ResourceRequirements
from kubernetes.models.v1beta1.LabelSelector import LabelSelector
from kubernetes.utils import is_valid_list, is_valid_string
class PersistentVolumeClaimSpec(object):
"""
http://kubernetes.io/docs/api-reference/v1/definitions/#_v1_persistentvolumeclaimspec
"""
VALID_RESOURCES = ['storage']
def __init__(self, model=None):
super(PersistentVolumeClaimSpec, self).__init__()
self._access_modes = []
self._selector = LabelSelector()
self._resources = ResourceRequirements()
self._volume_name = None
self._storage_class_name = ""
self.access_modes = ['ReadWriteOnce']
self.resources.requests = {'storage': '10Gi'}
if model is not None:
self._build_with_model(model)
def _build_with_model(self, model=None):
if 'accessModes' in model:
self.access_modes = model['accessModes']
if 'resources' in model:
self.resources = ResourceRequirements(model['resources'])
if 'storageClassName' in model:
self.storage_class_name = model['storageClassName']
if 'selector' in model:
self.selector = LabelSelector(model['selector'])
if 'volumeName' in model:
self.volume_name = model['volumeName']
# ------------------------------------------------------------------------------------- accessModes
@property
def access_modes(self):
return self._access_modes
@access_modes.setter
def access_modes(self, modes=None):
if not is_valid_list(modes, str):
raise SyntaxError('PersistentVolumeClaimSpec: access_modes: [ {} ] is invalid.'.format(modes))
filtered = list(filter(lambda x: x in PersistentVolumeSpec.VALID_ACCESS_MODES, modes))
self._access_modes = filtered
# ------------------------------------------------------------------------------------- resources
@property
def resources(self):
return self._resources
@resources.setter
def resources(self, res=None):
if not isinstance(res, ResourceRequirements):
raise SyntaxError('PersistentVolumeClaimSpec: resources: [ {} ] is invalid.'.format(res))
self._resources = res
# ------------------------------------------------------------------------------------- selector
@property
def selector(self):
return self._selector
@selector.setter
def selector(self, sel=None):
if not isinstance(sel, LabelSelector):
raise SyntaxError('PersistentVolumeClaimSpec: selector: [ {} ] is invalid.'.format(sel))
self._selector = sel
# ------------------------------------------------------------------------------------- storage_class_name
@property
def storage_class_name(self):
return self._storage_class_name
@storage_class_name.setter
def storage_class_name(self, name=None):
if not is_valid_string(name):
raise SyntaxError('PersistentVolumeClaimSpec: storage_class_name: [ {} ] is invalid.'.format(name))
self._storage_class_name = name
# ------------------------------------------------------------------------------------- volumeName
@property
def volume_name(self):
return self._volume_name
@volume_name.setter
def volume_name(self, name=None):
if not is_valid_string(name):
raise SyntaxError('PersistentVolumeClaimSpec: volume_name: [ {} ] is invalid.'.format(name))
self._volume_name = name
# ------------------------------------------------------------------------------------- serialize
def serialize(self):
data = {}
if self.access_modes is not None:
data['accessModes'] = self.access_modes
if self.selector is not None:
data['selector'] = self.selector.serialize()
if self.storage_class_name is not None:
data['storageClassName'] = self.storage_class_name
if self.resources is not None:
data['resources'] = self.resources.serialize()
if self.volume_name is not None:
data['volumeName'] = self.volume_name
return data
| 36.112 | 111 | 0.587506 |
acf0ff3fa33c0d55a305a0b84c20a6ef9e712468 | 1,649 | py | Python | sgp/GraphUtil.py | arongdari/sparse-graph-prior | 01bbe59d356b24e9967851d3ab5d7195c3bcd790 | [
"MIT"
] | 1 | 2016-12-08T19:04:31.000Z | 2016-12-08T19:04:31.000Z | sgp/GraphUtil.py | dongwookim-ml/sparse-graph-prior | 01bbe59d356b24e9967851d3ab5d7195c3bcd790 | [
"MIT"
] | 1 | 2016-07-10T05:20:44.000Z | 2016-07-10T05:20:44.000Z | sgp/GraphUtil.py | dongwookim-ml/sparse-graph-prior | 01bbe59d356b24e9967851d3ab5d7195c3bcd790 | [
"MIT"
] | null | null | null | import numpy as np
import networkx as nx
from collections import defaultdict
from scipy.sparse import csr_matrix, csc_matrix, triu
def sparse_to_networkx(G):
nnz = G.nonzero()
_G = nx.Graph()
_G.add_edges_from(zip(nnz[0], nnz[1]))
return _G
def compute_growth_rate(G, n_repeat=10):
"""
Compute the growth rate of graph G
:param G: sparse matrix (csc_matrix or csr_matrix)
:param n_repeat: int
:return:
"""
n_n = G.shape[0]
nnz = G.nonzero()
n_link = defaultdict(list)
for si in range(n_repeat):
rnd_nodes = np.arange(n_n, dtype=int)
np.random.shuffle(rnd_nodes)
node_dic = {i: n for i, n in enumerate(rnd_nodes)}
row_idx = list(map(lambda x: node_dic[x], nnz[0]))
col_idx = list(map(lambda x: node_dic[x], nnz[1]))
rnd_row = csr_matrix((G.data, (row_idx, col_idx)), shape=G.shape)
rnd_col = csc_matrix((G.data, (row_idx, col_idx)), shape=G.shape)
n_link[0].append(0)
for i in range(1, n_n):
# counting triples by expanding tensor
cnt = 0
cnt += rnd_row.getrow(i)[:, :i].nnz
cnt += rnd_col.getcol(i)[:i - 1, :].nnz
n_link[i].append(cnt + n_link[i - 1][-1])
return np.array([np.mean(n_link[x]) for x in range(n_n)])
def degree_distribution(G):
d = defaultdict(int)
# degree = triu(G).sum(0)
degree = G.sum(0) + G.sum(1)
degree /= 2
max_d = degree.max()
for _d in degree.tolist()[0]:
d[int(_d)] += 1
return d, [d[i] for i in range(int(max_d))]
def degree_one_nodes(G):
return np.sum(G.sum(0) / 2 == 1)
| 25.369231 | 73 | 0.59248 |
acf0ff9b5151f96fa5633454890b6589682201fa | 4,605 | py | Python | nornir_salt/plugins/tasks/pyats_send_config.py | dmulyalin/nornir-salt | 184002995515dddc802b578400370c2219e94957 | [
"MIT"
] | 5 | 2021-01-22T09:34:55.000Z | 2021-12-22T08:12:34.000Z | nornir_salt/plugins/tasks/pyats_send_config.py | dmulyalin/nornir-salt | 184002995515dddc802b578400370c2219e94957 | [
"MIT"
] | 2 | 2022-01-27T14:46:40.000Z | 2022-02-28T16:59:01.000Z | nornir_salt/plugins/tasks/pyats_send_config.py | dmulyalin/nornir-salt | 184002995515dddc802b578400370c2219e94957 | [
"MIT"
] | 1 | 2021-01-10T04:37:08.000Z | 2021-01-10T04:37:08.000Z | """
pyats_send_config
#######################
This task plugin relies on Genie device conection ``config`` method
to send configuration commands to devices over SSH or Telnet.
This task plugin applies device configuration following this sequence:
- Retrieve and use, if any, per-host configuration rendered by SaltStack from host's
inventory data ``task.host.data["__task__"]["commands"]`` or
``task.host.data["__task__"]["filename"]`` locations, use configuration provided
by ``config`` argument otherwise
- If configuration is a multi-line string, split it to a list of commands
- Check if device in enable mode, if not enter device enabled mode if device supports it
- Push configuration commands to device using ``send_config_set`` Netmiko connection's method,
if ``batch`` argument given, pushes commands in batches
- If ``commit`` argument provided, perform configuration commit if device supports it
- If ``commit_final_delay`` argument provided, wait for a given timer and perform final commit
- Exit device configuration mode and return configuration results
Dependencies:
* `PyATS library <https://pypi.org/project/pyats/>`_ required
* `Genie library <https://pypi.org/project/genie/>`_ required
Sample Usage
============
Code to invoke ``pyats_send_config`` task::
from nornir_salt import pyats_send_config
output = nr.run(
task=pyats_send_config,
commands=["sinterface loopback 0", "description 'configured by script'"]
)
``pyats_send_config`` returns Nornir results object with task name set
to ``pyats_send_config`` and results containing configuration commands
applied to device.
API Reference
=============
.. autofunction:: nornir_salt.plugins.tasks.pyats_send_config.pyats_send_config
"""
import logging
import traceback
from nornir.core.task import Result, Task
log = logging.getLogger(__name__)
# define connection name for RetryRunner to properly detect it using:
# connection_name = task.task.__globals__.get("CONNECTION_NAME", None)
CONNECTION_NAME = "pyats"
def pyats_send_config(task: Task, config: str = None, **kwargs):
"""
Salt-nornir Task function to send configuration to devices using
``nornir_netmiko.tasks.pyats_send_config`` plugin.
Device ``configure`` method does not support specifying connection to use to
send configuration via.
:param config: (str or list) configuration string or list of commands to send to device
:param kwargs: (dict) any additional ``**kwargs`` for device connection ``configure`` method
:return result: Nornir result object with task execution results
Device ``configure`` method supports below additional arguments that can be passed
via ``**kwargs``:
:param reply: Addition Dialogs for interactive config commands.
:param timeout: Timeout value in sec, Default Value is 30 sec
:param error_pattern: list of regex to detect command errors
:param target: Target RP where to execute service, for DualRp only
:param lock_retries: retry times if config mode is locked, default is 0
:param lock_retry_sleep: sleep between retries, default is 2 sec
:param bulk: If False, send all commands in one sendline,
If True, send commands in chunked mode, default is False
:param bulk_chunk_lines: maximum number of commands to send per chunk,
default is 50, 0 means to send all commands in a single chunk
:param bulk_chunk_sleep: sleep between sending command chunks, default is 0.5 sec
"""
# run sanity check
if kwargs.get("dry_run"):
raise ValueError("pyats_send_config does not support dry_run")
task.name = "pyats_send_config"
task_result = Result(host=task.host, result=[], changed=True)
# get PyATS testbed, device object
testbed = task.host.get_connection(CONNECTION_NAME, task.nornir.config)
device = testbed.devices[task.host.name]
# get configuration from host data if any
if "commands" in task.host.data.get("__task__", {}):
config = task.host.data["__task__"]["commands"]
elif "filename" in task.host.data.get("__task__", {}):
config = task.host.data["__task__"]["filename"]
# transform configuration to a list if string given
if isinstance(config, str):
config = config.splitlines()
# send config
try:
task_result.result = device.configure(config, **kwargs)
except:
log.exception("nornir-salt:pyats_send_config configure error")
task_result.failed = True
task_result.exception = traceback.format_exc()
task_result.changed = False
return task_result
| 39.358974 | 96 | 0.727904 |
acf0ffb84bc04da776050188917b212353a89f22 | 1,670 | py | Python | test/PR_test/unit_test/backend/test_hinge.py | TortoiseHam/fastestimator | 97b9fe134a8b5cc3cf21e84c782d1149eecfa3cc | [
"Apache-2.0"
] | 57 | 2019-05-21T21:29:26.000Z | 2022-02-23T05:55:21.000Z | test/PR_test/unit_test/backend/test_hinge.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 93 | 2019-05-23T18:36:07.000Z | 2022-03-23T17:15:55.000Z | test/PR_test/unit_test/backend/test_hinge.py | vbvg2008/fastestimator | 6061a4fbbeb62a2194ef82ba8017f651710d0c65 | [
"Apache-2.0"
] | 47 | 2019-05-09T15:41:37.000Z | 2022-03-26T17:00:08.000Z | # Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import unittest
import tensorflow as tf
import torch
import fastestimator as fe
from fastestimator.test.unittest_util import is_equal
class TestHinge(unittest.TestCase):
def test_tf(self):
true = tf.constant([[-1, 1, 1, -1], [1, 1, 1, 1], [-1, -1, 1, -1], [1, -1, -1, -1]])
pred = tf.constant([[0.1, 0.9, 0.05, 0.05], [0.1, -0.2, 0.0, -0.7], [0.0, 0.15, 0.8, 0.05],
[1.0, -1.0, -1.0, -1.0]])
b = fe.backend.hinge(y_pred=pred, y_true=true)
self.assertTrue(is_equal(b, tf.constant([0.8, 1.2, 0.85, 0.0])))
def test_torch(self):
true = torch.tensor([[-1, 1, 1, -1], [1, 1, 1, 1], [-1, -1, 1, -1], [1, -1, -1, -1]])
pred = torch.tensor([[0.1, 0.9, 0.05, 0.05], [0.1, -0.2, 0.0, -0.7], [0.0, 0.15, 0.8, 0.05],
[1.0, -1.0, -1.0, -1.0]])
b = fe.backend.hinge(y_pred=pred, y_true=true)
self.assertTrue(is_equal(b, torch.tensor([0.8, 1.2, 0.85, 0.0])))
| 43.947368 | 100 | 0.576647 |
acf0ffe6ad163da2a8bcb79bb18cfe03b8dd2298 | 837 | py | Python | web-app/ride/migrations/0008_auto_20190206_1710.py | kayzhang/Ride-Sharing-Service | 9ef63203a899ca78aac5732de68ccb77d3041a0e | [
"MIT"
] | null | null | null | web-app/ride/migrations/0008_auto_20190206_1710.py | kayzhang/Ride-Sharing-Service | 9ef63203a899ca78aac5732de68ccb77d3041a0e | [
"MIT"
] | null | null | null | web-app/ride/migrations/0008_auto_20190206_1710.py | kayzhang/Ride-Sharing-Service | 9ef63203a899ca78aac5732de68ccb77d3041a0e | [
"MIT"
] | null | null | null | # Generated by Django 2.1.5 on 2019-02-06 22:10
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('ride', '0007_auto_20190203_2052'),
]
operations = [
migrations.AddField(
model_name='ride',
name='license_plate_number',
field=models.CharField(blank=True, max_length=20, verbose_name='License Plate Number'),
),
migrations.AddField(
model_name='ride',
name='max_pas_num',
field=models.IntegerField(default=0, verbose_name='Maxmium Number of Passengers'),
),
migrations.AddField(
model_name='ride',
name='vehicle_type',
field=models.CharField(blank=True, max_length=200, verbose_name='Vehicle Type'),
),
]
| 28.862069 | 99 | 0.603345 |
acf100b8eaacba13b5ea52da8dedf4d0762e3393 | 6,875 | py | Python | experiments/plot_test_results.py | vene/marseille | c86faf3d97fd9063a6fe0ee74b302f09250e36c5 | [
"BSD-3-Clause"
] | 65 | 2017-04-25T01:14:03.000Z | 2022-03-22T06:11:48.000Z | experiments/plot_test_results.py | vene/marseille | c86faf3d97fd9063a6fe0ee74b302f09250e36c5 | [
"BSD-3-Clause"
] | 9 | 2017-07-18T16:47:51.000Z | 2021-03-15T20:25:27.000Z | experiments/plot_test_results.py | vene/marseille | c86faf3d97fd9063a6fe0ee74b302f09250e36c5 | [
"BSD-3-Clause"
] | 34 | 2017-04-25T14:38:18.000Z | 2021-12-20T12:50:56.000Z | """Assuming test predictions are available, compute and display scores."""
import os
import sys
import warnings
import numpy as np
import dill
from sklearn.metrics import precision_recall_fscore_support, f1_score
from marseille.datasets import get_dataset_loader
from marseille.custom_logging import logging
def arg_p_r_f(Y_true, Y_pred, labels, **kwargs):
macro_p = []
macro_r = []
macro_f = []
micro_true = []
micro_pred = []
for y_true, y_pred in zip(Y_true, Y_pred):
p, r, f, _ = precision_recall_fscore_support(y_true, y_pred,
**kwargs)
macro_p.append(p)
macro_r.append(r)
macro_f.append(f)
micro_true.extend(y_true)
micro_pred.extend(y_pred)
micro_p, micro_r, micro_f, _ = precision_recall_fscore_support(
micro_true, micro_pred, **kwargs
)
kwargs.pop('average')
per_class_fs = f1_score(micro_true, micro_pred, average=None, **kwargs)
res = {
'p_macro': np.mean(macro_p),
'r_macro': np.mean(macro_r),
'f_macro': np.mean(macro_f),
'p_micro': micro_p,
'r_micro': micro_r,
'f_micro': micro_f
}
for label, per_class_f in zip(sorted(labels), per_class_fs):
res['f_class_{}'.format(label)] = per_class_f
return res
def compute_scores(Y_true, Y_pred, prop_labels, link_labels):
# hard accuracy
acc = sum(1 for y_true, y_pred in zip(Y_true, Y_pred)
if np.all(y_true.links == y_pred.links) and
np.all(y_true.nodes == y_pred.nodes))
acc /= len(Y_true)
with warnings.catch_warnings() as w:
warnings.simplefilter('ignore')
link_results = arg_p_r_f(
(y.links for y in Y_true),
(y.links for y in Y_pred),
labels=link_labels,
average='binary',
pos_label=True
)
prop_results = arg_p_r_f(
(y.nodes for y in Y_true),
(y.nodes for y in Y_pred),
labels=prop_labels,
average='macro',
)
scores = {"prop_{}".format(key): val for key, val in prop_results.items()}
scores.update({"link_{}".format(key): val for key, val in
link_results.items()})
scores['avg_f_micro'] = 0.5 * (scores['link_f_micro'] +
scores['prop_f_micro'])
scores['accuracy'] = acc
return scores
# tpl = os.path.join("test_results", "{}_{}_{}.predictions.dill")
tpl = os.path.join("test_results",
# "exact_predictions",
"exact=True_{}_{}_{}.predictions.dill")
if __name__ == '__main__':
dataset = sys.argv[1]
if dataset not in ('cdcp', 'ukp'):
raise ValueError("Unknown dataset {}. "
"Supported: ukp|cdcp.".format(dataset))
link_labels = [False, True]
prop_labels = (['MajorClaim', 'Claim', 'Premise'] if dataset == 'ukp'
else ['value', 'policy', 'testimony', 'fact', 'reference'])
# get true test labels
load_te, ids_te = get_dataset_loader(dataset, split='test')
Y_true = [doc.label for doc in load_te(ids_te)]
print("dataset={}".format(dataset))
scores = dict()
for method in ("linear", "linear-struct", "rnn", "rnn-struct"):
scores[method] = dict()
for model in ("bare", "full", "strict"):
scores_ = scores[method][model] = dict()
fn = tpl.format(dataset, method, model)
if not os.path.isfile(fn):
logging.info("Could not find {}".format(fn))
continue
with open(fn, "rb") as f:
Y_pred = dill.load(f)
# compute test scores:
scores[method][model] = compute_scores(Y_true,
Y_pred,
prop_labels,
link_labels)
pretty = {'avg_f_micro': 'Average $F_1$',
'accuracy': 'Accuracy',
'link_f_micro': '{\Link} $F_1$',
'link_p_micro': '{\Link} $P$',
'link_r_micro': '{\Link} $R$',
'prop_f_micro': '{\Prop} $F_1$',
'prop_p_micro': '{\Prop} $P$',
'prop_r_micro': '{\Prop} $R$',
'prop_f_class_MajorClaim': 'MajorClaim $F_1$',
'prop_f_class_Claim': 'Claim $F_1$',
'prop_f_class_Premise': 'Premise $F_1$',
'prop_f_class_fact': 'Fact $F_1$',
'prop_f_class_value': 'Value $F_1$',
'prop_f_class_policy': 'Policy $F_1$',
'prop_f_class_testimony': 'Testimony $F_1$',
'prop_f_class_reference': 'Reference $F_1$'}
pretty = {'avg_f_micro': 'Average',
'link_f_micro': '{\Link}',
'prop_f_micro': '{\Prop}',
'prop_f_class_MajorClaim': '{\quad}MajorClaim',
'prop_f_class_Claim': '{\quad}Claim',
'prop_f_class_Premise': '{\quad}Premise',
'prop_f_class_fact': '{\quad}Fact',
'prop_f_class_value': '{\quad}Value',
'prop_f_class_policy': '{\quad}Policy',
'prop_f_class_testimony': '{\quad}Testimony',
'prop_f_class_reference': '{\quad}Reference'}
# keys = ['avg_f_micro', 'link_f_micro', 'link_p_micro', 'link_r_micro',
# 'prop_f_micro', 'prop_p_micro', 'prop_r_micro']
# keys += ['prop_f_class_{}'.format(lbl) for lbl in prop_labels]
# keys += ['accuracy']
keys = ['avg_f_micro', 'link_f_micro', 'prop_f_micro']
keys += ['prop_f_class_{}'.format(lbl) for lbl in prop_labels]
def _row(numbers):
argmax = np.argmax(numbers)
strs = ["{:.1f} ".format(100 * x) for x in numbers]
strs[argmax] = "{\\bf %s}" % strs[argmax][:-1]
strs = [s.rjust(10) for s in strs]
return " & ".join(strs)
# keys = ['avg_f_micro', 'link_f_micro', 'link_p_micro', 'link_r_micro',
# 'prop_f_micro', 'prop_p_micro', 'prop_r_micro']
# keys += ['prop_f_class_{}'.format(lbl) for lbl in prop_labels]
# keys += ['accuracy']
keys = ['avg_f_micro', 'link_f_micro', 'prop_f_micro']
keys += ['prop_f_class_{}'.format(lbl) for lbl in prop_labels]
def _row(numbers):
argmax = np.argmax(numbers)
strs = ["{:.1f} ".format(100 * x) for x in numbers]
strs[argmax] = "{\\bf %s}" % strs[argmax][:-1]
strs = [s.rjust(10) for s in strs]
return " & ".join(strs)
for key in keys:
print("{:>20}".format(pretty[key]), "&", _row([
scores[method][model].get(key, -1)
for method in ('linear', 'rnn', 'linear-struct', 'rnn-struct')
for model in ('bare', 'full', 'strict')]),
r"\\")
| 34.722222 | 78 | 0.5456 |
acf1024c9345280c49a3678e66864c401e6ef977 | 4,518 | py | Python | neutron/tests/unit/services/qos/notification_drivers/test_manager.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 1 | 2016-03-25T21:13:13.000Z | 2016-03-25T21:13:13.000Z | neutron/tests/unit/services/qos/notification_drivers/test_manager.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-27T00:48:55.000Z | 2015-04-21T05:29:37.000Z | neutron/tests/unit/services/qos/notification_drivers/test_manager.py | ISCAS-VDI/neutron-base | 687f03d7131839ae8bc324d5823194d1245bb050 | [
"Apache-2.0"
] | 3 | 2015-02-26T00:55:17.000Z | 2020-03-01T17:05:40.000Z | # Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo_config import cfg
from oslo_utils import uuidutils
from neutron.api.rpc.callbacks import events
from neutron import context
from neutron.objects.qos import policy as policy_object
from neutron.services.qos.notification_drivers import manager as driver_mgr
from neutron.services.qos.notification_drivers import message_queue
from neutron.tests.unit.services.qos import base
DUMMY_DRIVER = ("neutron.tests.unit.services.qos.notification_drivers."
"dummy.DummyQosServiceNotificationDriver")
def _load_multiple_drivers():
cfg.CONF.set_override(
"notification_drivers",
["message_queue", DUMMY_DRIVER],
"qos")
class TestQosDriversManagerBase(base.BaseQosTestCase):
def setUp(self):
super(TestQosDriversManagerBase, self).setUp()
self.config_parse()
self.setup_coreplugin()
config = cfg.ConfigOpts()
config.register_opts(driver_mgr.QOS_PLUGIN_OPTS, "qos")
self.policy_data = {'policy': {
'id': uuidutils.generate_uuid(),
'tenant_id': uuidutils.generate_uuid(),
'name': 'test-policy',
'description': 'test policy description',
'shared': True}}
self.context = context.get_admin_context()
self.policy = policy_object.QosPolicy(self.context,
**self.policy_data['policy'])
ctxt = None
self.kwargs = {'context': ctxt}
class TestQosDriversManager(TestQosDriversManagerBase):
def setUp(self):
super(TestQosDriversManager, self).setUp()
#TODO(Qos): Fix this unittest to test manager and not message_queue
# notification driver
rpc_api_cls = mock.patch('neutron.api.rpc.handlers.resources_rpc'
'.ResourcesPushRpcApi').start()
self.rpc_api = rpc_api_cls.return_value
self.driver_manager = driver_mgr.QosServiceNotificationDriverManager()
def _validate_registry_params(self, event_type, policy):
self.rpc_api.push.assert_called_with(self.context, policy,
event_type)
def test_create_policy_default_configuration(self):
#RPC driver should be loaded by default
self.driver_manager.create_policy(self.context, self.policy)
self.assertFalse(self.rpc_api.push.called)
def test_update_policy_default_configuration(self):
#RPC driver should be loaded by default
self.driver_manager.update_policy(self.context, self.policy)
self._validate_registry_params(events.UPDATED, self.policy)
def test_delete_policy_default_configuration(self):
#RPC driver should be loaded by default
self.driver_manager.delete_policy(self.context, self.policy)
self._validate_registry_params(events.DELETED, self.policy)
class TestQosDriversManagerMulti(TestQosDriversManagerBase):
def _test_multi_drivers_configuration_op(self, op):
_load_multiple_drivers()
driver_manager = driver_mgr.QosServiceNotificationDriverManager()
handler = '%s_policy' % op
with mock.patch('.'.join([DUMMY_DRIVER, handler])) as dummy_mock:
rpc_driver = message_queue.RpcQosServiceNotificationDriver
with mock.patch.object(rpc_driver, handler) as rpc_mock:
getattr(driver_manager, handler)(self.context, self.policy)
for mock_ in (dummy_mock, rpc_mock):
mock_.assert_called_with(self.context, self.policy)
def test_multi_drivers_configuration_create(self):
self._test_multi_drivers_configuration_op('create')
def test_multi_drivers_configuration_update(self):
self._test_multi_drivers_configuration_op('update')
def test_multi_drivers_configuration_delete(self):
self._test_multi_drivers_configuration_op('delete')
| 41.449541 | 78 | 0.694776 |
acf10275fc283d71ed2ba80851d8fbd94ebf6959 | 81 | py | Python | qnapstats/__init__.py | M4v3r1cK87/python-qnapstats | 9ff63a8353fa882a102d84efac1f9955de3391ed | [
"MIT"
] | 42 | 2017-04-28T13:35:43.000Z | 2022-02-03T06:53:36.000Z | qnapstats/__init__.py | M4v3r1cK87/python-qnapstats | 9ff63a8353fa882a102d84efac1f9955de3391ed | [
"MIT"
] | 60 | 2017-02-12T09:09:36.000Z | 2022-03-26T11:59:57.000Z | qnapstats/__init__.py | M4v3r1cK87/python-qnapstats | 9ff63a8353fa882a102d84efac1f9955de3391ed | [
"MIT"
] | 17 | 2017-02-12T08:12:50.000Z | 2021-12-26T09:52:36.000Z | """Main module for QNAPStats."""
from .qnap_stats import QNAPStats # noqa: F401
| 27 | 47 | 0.728395 |
acf103887f377db63f74b488c85e680299025f9d | 453 | py | Python | records/migrations/0007_auto_20191014_1420.py | heitorchang/students | ba5d6ca721d85aacb5f1563fff6c7d1c4b021d54 | [
"MIT"
] | null | null | null | records/migrations/0007_auto_20191014_1420.py | heitorchang/students | ba5d6ca721d85aacb5f1563fff6c7d1c4b021d54 | [
"MIT"
] | 1 | 2020-06-05T23:35:40.000Z | 2020-06-05T23:35:40.000Z | records/migrations/0007_auto_20191014_1420.py | heitorchang/students | ba5d6ca721d85aacb5f1563fff6c7d1c4b021d54 | [
"MIT"
] | null | null | null | # Generated by Django 2.2.6 on 2019-10-14 17:20
from django.db import migrations
import django.db.models.functions.text
class Migration(migrations.Migration):
dependencies = [
('records', '0006_auto_20191014_0943'),
]
operations = [
migrations.AlterModelOptions(
name='student',
options={'ordering': ['teacher', django.db.models.functions.text.Lower('name')]},
),
]
| 23.842105 | 94 | 0.609272 |
acf1042875f4f5abe06a173a273fecf18046d515 | 3,318 | py | Python | fairseq/modules/__init__.py | fengpeng-yue/speech-to-speech-translation | 099aa326f29c51a882532952186e329a87d2c4d5 | [
"MIT"
] | 2 | 2022-03-30T08:20:16.000Z | 2022-03-30T08:25:48.000Z | fairseq/modules/__init__.py | fengpeng-yue/speech-to-speech-translation | 099aa326f29c51a882532952186e329a87d2c4d5 | [
"MIT"
] | null | null | null | fairseq/modules/__init__.py | fengpeng-yue/speech-to-speech-translation | 099aa326f29c51a882532952186e329a87d2c4d5 | [
"MIT"
] | null | null | null | # Copyright (c) Facebook, Inc. and its affiliates.
#
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
"""isort:skip_file"""
from .adaptive_input import AdaptiveInput
from .adaptive_softmax import AdaptiveSoftmax
from .base_layer import BaseLayer
from .beamable_mm import BeamableMM
from .character_token_embedder import CharacterTokenEmbedder
from .conv_tbc import ConvTBC
from .cross_entropy import cross_entropy
from .convolution import ConvolutionModule
from .conformer_layer import ConformerEncoderLayer
from .downsampled_multihead_attention import DownsampledMultiHeadAttention
from .dynamic_convolution import DynamicConv, DynamicConv1dTBC
from .dynamic_crf_layer import DynamicCRF
from .fairseq_dropout import FairseqDropout
from .fp32_group_norm import Fp32GroupNorm
from .gelu import gelu, gelu_accurate
from .grad_multiply import GradMultiply
from .gumbel_vector_quantizer import GumbelVectorQuantizer
from .kmeans_vector_quantizer import KmeansVectorQuantizer
from .layer_drop import LayerDropModuleList
from .layer_norm import Fp32LayerNorm, LayerNorm
from .learned_positional_embedding import LearnedPositionalEmbedding
from .lightweight_convolution import LightweightConv, LightweightConv1dTBC
from .linearized_convolution import LinearizedConvolution
from .location_attention import LocationAttention
from .lstm_cell_with_zoneout import LSTMCellWithZoneOut
from .multihead_attention import MultiheadAttention
from .positional_embedding import PositionalEmbedding
#from .reduced_multihead_attention import ReducedMultiheadAttention
from .rel_position_multihead_attention import RelPositionMultiheadAttention
from .relative_multihead_attention import RelativeMultiheadAttention
from .same_pad import SamePad
from .scalar_bias import ScalarBias
from .sinusoidal_positional_embedding import SinusoidalPositionalEmbedding
from .transformer_sentence_encoder_layer import TransformerSentenceEncoderLayer
from .transformer_sentence_encoder import TransformerSentenceEncoder
from .transpose_last import TransposeLast
from .unfold import unfold1d
from .transformer_layer import TransformerDecoderLayer, TransformerEncoderLayer
from .vggblock import VGGBlock
__all__ = [
"AdaptiveInput",
"AdaptiveSoftmax",
"BaseLayer",
"BeamableMM",
"CharacterTokenEmbedder",
"ConvTBC",
"cross_entropy",
"ConformerEncoderLayer",
"ConvolutionModule",
"DownsampledMultiHeadAttention",
"DynamicConv1dTBC",
"DynamicConv",
"DynamicCRF",
"FairseqDropout",
"Fp32GroupNorm",
"Fp32LayerNorm",
"gelu",
"gelu_accurate",
"GradMultiply",
"GumbelVectorQuantizer",
"KmeansVectorQuantizer",
"LayerDropModuleList",
"LayerNorm",
"LearnedPositionalEmbedding",
"LightweightConv1dTBC",
"LightweightConv",
"LinearizedConvolution",
"LocationAttention",
"LSTMCellWithZoneOut",
"MultiheadAttention",
"PositionalEmbedding",
"RelPositionMultiheadAttention",
"RelativeMultiheadAttention",
"SamePad",
"ScalarBias",
"SinusoidalPositionalEmbedding",
"TransformerSentenceEncoderLayer",
"TransformerSentenceEncoder",
"TransformerDecoderLayer",
"TransformerEncoderLayer",
"TransposeLast",
"VGGBlock",
"unfold1d",
]
| 36.065217 | 79 | 0.816154 |
acf1058826e50342f322bf8e4b82233049ac73b7 | 6,438 | py | Python | models/network/models_utils.py | SohamChattopadhyayEE/Multi-class-semantic-segmentation | 122bd6c340207bb003110ecc37416b88c33c59e9 | [
"MIT"
] | null | null | null | models/network/models_utils.py | SohamChattopadhyayEE/Multi-class-semantic-segmentation | 122bd6c340207bb003110ecc37416b88c33c59e9 | [
"MIT"
] | null | null | null | models/network/models_utils.py | SohamChattopadhyayEE/Multi-class-semantic-segmentation | 122bd6c340207bb003110ecc37416b88c33c59e9 | [
"MIT"
] | 2 | 2022-02-03T08:34:13.000Z | 2022-02-03T08:48:17.000Z |
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class DoubleConv(nn.Module):
def __init__(self, in_channels, out_channels, mid_channels=None):
super().__init__()
if not mid_channels:
mid_channels = out_channels
self.double_conv = nn.Sequential(
nn.Conv2d(in_channels, mid_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(mid_channels),
nn.ReLU(inplace=True),
nn.Conv2d(mid_channels, out_channels, kernel_size=3, padding=1),
nn.BatchNorm2d(out_channels),
nn.ReLU(inplace=True)
)
def forward(self, x):
return self.double_conv(x)
class Down(nn.Module):
def __init__(self, in_channels, out_channels):
super().__init__()
self.maxpool_conv = nn.Sequential(
nn.MaxPool2d(2),
DoubleConv(in_channels, out_channels)
)
def forward(self, x):
return self.maxpool_conv(x)
class Up(nn.Module):
def __init__(self, in_channels, out_channels, bilinear=True):
super().__init__()
if bilinear:
self.up = nn.Upsample(scale_factor=2, mode='bilinear', align_corners=True)
self.conv = DoubleConv(in_channels, out_channels, in_channels // 2)
else:
self.up = nn.ConvTranspose2d(in_channels , in_channels // 2, kernel_size=2, stride=2)
self.conv = DoubleConv(in_channels, out_channels)
def forward(self, x1, x2):
x1 = self.up(x1)
diffY = x2.size()[2] - x1.size()[2]
diffX = x2.size()[3] - x1.size()[3]
x1 = F.pad(x1, [diffX // 2, diffX - diffX // 2,
diffY // 2, diffY - diffY // 2])
x = torch.cat([x2, x1], dim=1)
return self.conv(x)
class OutConv(nn.Module):
def __init__(self, in_channels, out_channels):
super(OutConv, self).__init__()
self.conv = nn.Conv2d(in_channels, out_channels, kernel_size=1)
def forward(self, x):
return self.conv(x)
def init_weights(net, init_type='normal', gain=0.02):
def init_func(m):
classname = m.__class__.__name__
if hasattr(m, 'weight') and (classname.find('Conv') != -1 or classname.find('Linear') != -1):
if init_type == 'normal':
init.normal_(m.weight.data, 0.0, gain)
elif init_type == 'xavier':
init.xavier_normal_(m.weight.data, gain=gain)
elif init_type == 'kaiming':
init.kaiming_normal_(m.weight.data, a=0, mode='fan_in')
elif init_type == 'orthogonal':
init.orthogonal_(m.weight.data, gain=gain)
else:
raise NotImplementedError('initialization method [%s] is not implemented' % init_type)
if hasattr(m, 'bias') and m.bias is not None:
init.constant_(m.bias.data, 0.0)
elif classname.find('BatchNorm2d') != -1:
init.normal_(m.weight.data, 1.0, gain)
init.constant_(m.bias.data, 0.0)
print('initialize network with %s' % init_type)
net.apply(init_func)
class conv_block(nn.Module):
def __init__(self,ch_in,ch_out):
super(conv_block,self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True),
nn.Conv2d(ch_out, ch_out, kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.conv(x)
return x
class up_conv(nn.Module):
def __init__(self,ch_in,ch_out):
super(up_conv,self).__init__()
self.up = nn.Sequential(
nn.Upsample(scale_factor=2),
nn.Conv2d(ch_in,ch_out,kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.up(x)
return x
class Recurrent_block(nn.Module):
def __init__(self,ch_out,t=2):
super(Recurrent_block,self).__init__()
self.t = t
self.ch_out = ch_out
self.conv = nn.Sequential(
nn.Conv2d(ch_out,ch_out,kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
for i in range(self.t):
if i==0:
x1 = self.conv(x)
x1 = self.conv(x+x1)
return x1
class RRCNN_block(nn.Module):
def __init__(self,ch_in,ch_out,t=2):
super(RRCNN_block,self).__init__()
self.RCNN = nn.Sequential(
Recurrent_block(ch_out,t=t),
Recurrent_block(ch_out,t=t)
)
self.Conv_1x1 = nn.Conv2d(ch_in,ch_out,kernel_size=1,stride=1,padding=0)
def forward(self,x):
x = self.Conv_1x1(x)
x1 = self.RCNN(x)
return x+x1
class single_conv(nn.Module):
def __init__(self,ch_in,ch_out):
super(single_conv,self).__init__()
self.conv = nn.Sequential(
nn.Conv2d(ch_in, ch_out, kernel_size=3,stride=1,padding=1,bias=True),
nn.BatchNorm2d(ch_out),
nn.ReLU(inplace=True)
)
def forward(self,x):
x = self.conv(x)
return x
class Attention_block(nn.Module):
def __init__(self,F_g,F_l,F_int):
super(Attention_block,self).__init__()
self.W_g = nn.Sequential(
nn.Conv2d(F_g, F_int, kernel_size=1,stride=1,padding=0,bias=True),
nn.BatchNorm2d(F_int)
)
self.W_x = nn.Sequential(
nn.Conv2d(F_l, F_int, kernel_size=1,stride=1,padding=0,bias=True),
nn.BatchNorm2d(F_int)
)
self.psi = nn.Sequential(
nn.Conv2d(F_int, 1, kernel_size=1,stride=1,padding=0,bias=True),
nn.BatchNorm2d(1),
nn.Sigmoid()
)
self.relu = nn.ReLU(inplace=True)
def forward(self,g,x):
g1 = self.W_g(g)
x1 = self.W_x(x)
psi = self.relu(g1+x1)
psi = self.psi(psi)
return x*psi
| 32.029851 | 103 | 0.561976 |
acf105cca7f23ea78c000d9299f82ddd7e668652 | 1,880 | py | Python | image_process.py | Prof-Iz/Solar_Path_App | d8203d3123c6f6539cdf1fd01acba56927289b26 | [
"MIT"
] | null | null | null | image_process.py | Prof-Iz/Solar_Path_App | d8203d3123c6f6539cdf1fd01acba56927289b26 | [
"MIT"
] | null | null | null | image_process.py | Prof-Iz/Solar_Path_App | d8203d3123c6f6539cdf1fd01acba56927289b26 | [
"MIT"
] | null | null | null | import cv2
from PIL import Image
import os
def overlay_graph(route_base,graph):
'''
route_base = String Path to image
graph = image generated of graph at coordinates
'''
directory = "C:\\Users\\User\\Documents\\GitHub\\Solar_Path_App\\test_pics"
os.chdir(directory)
img = cv2.imread(route_base,1)
# convert image to grayscale image
gray_image = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
# cv2.imshow("grey image",gray_image)
# convert the grayscale image to binary image
ret,thresh = cv2.threshold(gray_image,50,255,cv2.CV_8UC1)
# cv2.imshow("Thresh image",thresh)
contours, heirarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_NONE)
max_countour = contours[0]
for i in range(1,len(contours)):
if cv2.contourArea(contours[i]) > cv2.contourArea(max_countour):
max_countour = contours[i]
center, radius = cv2.minEnclosingCircle(max_countour)
print(center, radius)
im2 = Image.open(graph)
w, h = im2.size
mf = (radius / 300) #300 if width is 800, 370 if 960
im2_large = im2.resize((int(w * mf),int(h * mf)))
# w, h = im2.size
w, h = im2_large.size
x_of_graph = int(center[0] - w/2 - 20)
y_of_graph = int(center[1] - h/2 - 5)
center_int = (x_of_graph,y_of_graph)
# cv2.circle(img, center_int, 5, (255, 0, 0), -1)
# cv2.circle(img, center_int, int(radius), (0, 255, 0),lineType=cv2.LINE_4)
im1 = Image.open(route_base)
im1.paste(im2_large,center_int,mask=im2_large)
# cv2.imwrite("Centre_skye.jpg",img)
im1.save("C:\\Users\\User\\Documents\\GitHub\\Solar_Path_App\\test_pics\\combined.png")
# cv2.waitKey(0)
im1.show()
temp_base.close()
temp_graph.close()
im1.close()
im2.close() | 25.753425 | 91 | 0.626064 |
acf10846a00553e0d082e4ce1bff7ab229e93fee | 1,499 | py | Python | mysite/polls/views.py | mweeden2/django_tutorial | 6c477ffef6fb7effa552084cca028948e957b81a | [
"MIT"
] | null | null | null | mysite/polls/views.py | mweeden2/django_tutorial | 6c477ffef6fb7effa552084cca028948e957b81a | [
"MIT"
] | null | null | null | mysite/polls/views.py | mweeden2/django_tutorial | 6c477ffef6fb7effa552084cca028948e957b81a | [
"MIT"
] | null | null | null | from django.http import HttpResponseRedirect # , HttpResponse
from django.shortcuts import get_object_or_404, render
from django.urls import reverse
from django.views import generic
# from django.http import Http404
from .models import Choice, Question
class IndexView(generic.ListView):
template_name = 'polls/index.html'
context_object_name = 'latest_question_list'
def get_queryset(self):
"""Return the last five published questions."""
return Question.objects.order_by('-pub_date')[:5]
class DetailView(generic.DetailView):
model = Question
template_name = 'polls/detail.html'
class ResultsView(generic.DetailView):
model = Question
template_name = 'polls/results.html'
def vote(request, question_id):
question = get_object_or_404(Question, pk=question_id)
try:
selected_choice = question.choice_set.get(pk=request.POST['choice'])
except (KeyError, Choice.DoesNotExist):
# Redisplay the question voting form.
return render(request, 'polls/detail.html', {
'question': question,
'error_message': "You didn't select a choice.",
})
else:
selected_choice.votes += 1
selected_choice.save()
# Always retuan an HttpResponseRedirect after successfully dealing
# with POST data. This prevents data from being posted twice if a
# user hits the Back button
return HttpResponseRedirect(reverse('polls:results', args=(question.id,)))
| 32.586957 | 82 | 0.703803 |
acf1090b3f49db6f496d5bd86c8a63c5047096bd | 2,436 | py | Python | tests/test_views.py | zmrenwu/django-mptt-comments | 14c9b949d93a43c36357660282033f391195f629 | [
"MIT"
] | 32 | 2018-11-06T04:10:19.000Z | 2020-08-26T02:34:48.000Z | tests/test_views.py | alice314272/django-mptt-comments | 14c9b949d93a43c36357660282033f391195f629 | [
"MIT"
] | 2 | 2019-05-16T08:16:51.000Z | 2020-05-14T14:43:07.000Z | tests/test_views.py | alice314272/django-mptt-comments | 14c9b949d93a43c36357660282033f391195f629 | [
"MIT"
] | 7 | 2018-11-06T04:15:04.000Z | 2020-09-09T10:26:58.000Z | from django.conf import settings
from django.contrib.auth.models import AnonymousUser, User
from django.contrib.contenttypes.models import ContentType
from django.contrib.sites.models import Site
from django.test import RequestFactory, TestCase, modify_settings, override_settings
from django.urls import reverse
from django_mptt_comments.models import MPTTComment
from django_mptt_comments.views import ReplySuccessView, ReplyView, post_mptt_comment
class MPTTCommentsPostCommentTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(
username='test', email='test@test.com', password='test')
def test_authenticated_user_post_comment(self):
self.client.login(username='test', password='test')
response = self.client.post(reverse('mptt-comments-post-comment'), data={})
self.assertEqual(response.status_code, 400)
# TODO: override_settings doesn't work as control is module level.
# see: https://docs.djangoproject.com/en/2.1/topics/testing/tools/#overriding-settings
# @override_settings(MPTT_COMMENTS_ALLOW_ANONYMOUS=False)
# def test_doesnt_allow_anonymous_user_post_comment(self):
# response = self.client.post(reverse('django_mptt_comments:mptt-comments-post-comment'), data={})
# self.assertEqual(response.status_code, 302)
# self.assertEqual(response.url, settings.LOGIN_URL + '?next=/post/')
class ReplyViewTestCase(TestCase):
def setUp(self):
self.factory = RequestFactory()
self.user = User.objects.create_user(username='test', email='test@test.com', password='test')
site = Site.objects.create(name='test', domain='test.com')
self.comment = MPTTComment.objects.create(**{
'content_type': ContentType.objects.get_for_model(site),
'object_pk': site.pk,
'site': site,
'user': self.user,
'comment': 'test comment',
})
def test_reply(self):
url = reverse('mptt_comments_reply', kwargs={'parent': self.comment.pk})
request = self.factory.get(url)
request.user = self.user
response = ReplyView.as_view()(request, parent=self.comment.pk)
self.assertEqual(response.status_code, 200)
self.assertIn('form', response.context_data)
self.assertEqual(response.context_data['form'].initial['parent'], self.comment.pk)
| 45.111111 | 106 | 0.707307 |
acf10a733335350dc02a26ba8d31e50645e86ae2 | 823 | py | Python | examples/run_agede.py | rhododendrom/NiaPy | 873037e4337474bb75714f1c2be273c97de3eded | [
"MIT"
] | 1 | 2020-03-16T11:15:43.000Z | 2020-03-16T11:15:43.000Z | examples/run_agede.py | rhododendrom/NiaPy | 873037e4337474bb75714f1c2be273c97de3eded | [
"MIT"
] | null | null | null | examples/run_agede.py | rhododendrom/NiaPy | 873037e4337474bb75714f1c2be273c97de3eded | [
"MIT"
] | null | null | null | # encoding=utf8
# This is temporary fix to import module from parent folder
# It will be removed when package is published on PyPI
import sys
sys.path.append('../')
# End of fix
import random
from NiaPy.algorithms.basic import AgingNpDifferentialEvolution
from NiaPy.algorithms.basic.de import bilinear
from NiaPy.task.task import StoppingTask, OptimizationType
from NiaPy.benchmarks import Sphere
# we will run Differential Evolution for 5 independent runs
for i in range(5):
task = StoppingTask(D=10, nFES=10000, optType=OptimizationType.MINIMIZATION, benchmark=Sphere())
algo = AgingNpDifferentialEvolution(NP=40, F=0.63, CR=0.9, Lt_min=3, Lt_max=7, omega=0.2, delta_np=0.1, age=bilinear)
best = algo.run(task=task)
print('%s -> %s' % (best[0].x, best[1]))
# vim: tabstop=3 noexpandtab shiftwidth=3 softtabstop=3
| 37.409091 | 118 | 0.767922 |
acf10acb7fd1b7718b33c922523e7fca191ac7f4 | 1,167 | py | Python | scripts/dd-algorithm-example.py | grimm-co/delta-debugging | 17d8f7d6a7ed1d62f06b1625ae9274849af8f41c | [
"WTFPL"
] | 26 | 2018-06-22T02:13:29.000Z | 2022-03-17T10:24:11.000Z | scripts/dd-algorithm-example.py | grimm-co/delta-debugging | 17d8f7d6a7ed1d62f06b1625ae9274849af8f41c | [
"WTFPL"
] | null | null | null | scripts/dd-algorithm-example.py | grimm-co/delta-debugging | 17d8f7d6a7ed1d62f06b1625ae9274849af8f41c | [
"WTFPL"
] | 3 | 2018-08-01T23:05:02.000Z | 2020-10-17T11:40:33.000Z | #!/usr/bin/env python3
# This test script illustrates the
try:
from delta_debugging.DD import DD
except ImportError as e:
print("Unable to import delta debugging library. Please ensure it is "
"installed. https://github.com/grimm-co/delta-debugging")
from sys import exit
exit(-1)
class TestDD(DD):
def __init__(self):
DD.__init__(self)
self.debug_dd = 0
self.verbose = 0
def _test(self, deltas):
# Build input file
found = []
for (index, byte) in deltas:
if byte == "1" or byte == "7" or byte == "8":
found.append(byte)
ret = self.PASS
if found.count("1") == 1 and found.count("7") == 1 and found.count("8") == 1:
ret = self.FAIL
print('Testing case {:11}: {}'.format('"' + "".join([x[1] for x in deltas]) + '"', str(ret)))
return ret
if __name__ == '__main__':
test_input = "12345678"
print('Minimizing input: "{}"'.format(test_input))
# Convert string into the delta format
deltas = list(map(lambda x: (x, test_input[x]), range(len(test_input))))
mydd = TestDD()
c = mydd.ddmin(deltas) # Invoke DDMIN
minimal = "".join([x[1] for x in c])
print('Found minimal test case: "{}"'.format(minimal))
| 26.522727 | 95 | 0.641817 |
acf10cb3bbdbbe5653ab80bddf0f91e102d8ad0a | 792 | py | Python | wiki/urls.py | krushilnaik/Wikipedia-Clone | 996c1d4071db0258d52376267cfb6c414ef554c1 | [
"MIT"
] | null | null | null | wiki/urls.py | krushilnaik/Wikipedia-Clone | 996c1d4071db0258d52376267cfb6c414ef554c1 | [
"MIT"
] | null | null | null | wiki/urls.py | krushilnaik/Wikipedia-Clone | 996c1d4071db0258d52376267cfb6c414ef554c1 | [
"MIT"
] | null | null | null | """wiki URL Configuration
The `urlpatterns` list routes URLs to views. For more information please see:
https://docs.djangoproject.com/en/3.0/topics/http/urls/
Examples:
Function views
1. Add an import: from my_app import views
2. Add a URL to urlpatterns: path('', views.home, name='home')
Class-based views
1. Add an import: from other_app.views import Home
2. Add a URL to urlpatterns: path('', Home.as_view(), name='home')
Including another URLconf
1. Import the include() function: from django.urls import include, path
2. Add a URL to urlpatterns: path('blog/', include('blog.urls'))
"""
from django.contrib import admin
from django.urls import include, path
urlpatterns = [
path('admin/', admin.site.urls),
path('', include("encyclopedia.urls"))
]
| 34.434783 | 77 | 0.709596 |
acf10cbf61f7fc4b1ffe56d0206ecc203d715284 | 3,535 | py | Python | reflex/repo.py | kenichi/Reflex | f21d502ec5b46b48818f09369d788093c71871a0 | [
"MIT"
] | null | null | null | reflex/repo.py | kenichi/Reflex | f21d502ec5b46b48818f09369d788093c71871a0 | [
"MIT"
] | null | null | null | reflex/repo.py | kenichi/Reflex | f21d502ec5b46b48818f09369d788093c71871a0 | [
"MIT"
] | 1 | 2020-10-30T00:14:19.000Z | 2020-10-30T00:14:19.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
from shutil import rmtree
from subprocess import Popen, PIPE
from tempfile import mkdtemp
from reflex.error import GitCommandError
class PrestineRepo():
"""
Creates a context with a temporary clone of a given repository.
This repo that may be manipulated safely without worrying about performing
actions on a real local copy of the repo. The repo has the 'origin' remote
configured for the clone uri which is passed in during initialization. It
also provides some useful helper methods that can be preformed on the repo
itself.
"""
def __init__(self, clone_uri, prod_branch=None, dev_branches=None):
if not prod_branch:
prod_branch = 'main'
if not dev_branches:
dev_branches = ['develop']
self.dir = mkdtemp()
self.clone_uri = clone_uri
self.production_branch = prod_branch
self.development_branches = dev_branches
def __enter__(self):
self.git('clone', self.clone_uri, self.dir)
self.git('fetch', 'origin')
return self
def __exit__(self, *exc):
rmtree(self.dir)
def git(self, *args):
""" Git command helper.
"""
command = ['git'] + list(args)
result = Popen(command, cwd=self.dir, stdout=PIPE, stderr=PIPE)
result.wait()
if result.returncode != 0:
err = result.stderr.readlines()
raise GitCommandError(
"Failed to run '{}'.".format(' '.join(command)),
err
)
return result
def branches(self, match=None):
""" List all branches matching an optional pattern in a repo.
"""
args = ['--list', '--remote']
if match:
args.append(match)
result = self.git('branch', *args)
branches = [branch.strip() for branch in result.stdout.readlines()]
return [branch.decode() for branch in branches]
def branch_exists(self, full_branch_name):
""" Returns True or False depending on if a branch exists or not.
"""
return full_branch_name in self.branches()
def checkout(self, branch_name, reset_sha=None):
""" Checks out a git reference in a repo with the option to hard reset.
"""
if self.branch_exists('origin/{}'.format(branch_name)):
self.git('checkout', branch_name)
else:
self.git('checkout', '-b', branch_name)
if reset_sha:
self.git('reset', '--hard', reset_sha)
def tag(self, tag, message, sha=None):
""" Creates an annotated tag on the repo at the provided sha (Or HEAD).
"""
args = []
if sha:
args.append(sha)
return self.git('tag', '--annotate', '--message', message, tag, *args)
def get_last_release(self, sha):
"""
Returns the latest release tag on a given tree by calling get_last_tag
with the match argument specified in order to filter non-release tags.
"""
return self.get_last_tag(sha, 'release-*')
def get_last_tag(self, sha=None, match=None):
"""
Returns the latest tag on a given tree. Can also filter by tags
matching the match argument.
"""
options = ['--abbrev=0']
if match:
options += ['--match', match]
if sha:
options.append(sha)
tag = self.git('describe', *options).stdout.read()
return tag.decode().strip()
| 33.037383 | 79 | 0.597171 |
acf10d94a6973606b770c0ce6eb00393d591dbf3 | 7,993 | py | Python | sendfriends.py | tangqipeng/auto_wechat_python | a55204519f9f6132e92173ee42933e369cbd03f9 | [
"Apache-2.0"
] | 1 | 2020-11-08T15:31:07.000Z | 2020-11-08T15:31:07.000Z | sendfriends.py | tangqipeng/auto_wechat_python | a55204519f9f6132e92173ee42933e369cbd03f9 | [
"Apache-2.0"
] | null | null | null | sendfriends.py | tangqipeng/auto_wechat_python | a55204519f9f6132e92173ee42933e369cbd03f9 | [
"Apache-2.0"
] | null | null | null | #!/usr/local/bin/python
# -*- coding:utf-8 -*-
import time
import glob
class Sendfriends:
def __init__(self, adb, wechat_list, image_num, strlist, startwechat, main):
self._adb = adb
# 微信名称
self._wechat_list = wechat_list
# 选择图片的数量
self._imagenum = image_num
# 从哪一个微信开始运行
self.wechats_index = startwechat
# self.imagelist = sorted(glob.glob(self._image))
self._strlist = strlist
print(len(self._strlist))
print(len(strlist))
for _str in self._strlist:
print(_str)
self._main = main
# 输出添加结果到内存 或 文件
def clean_wechat(self):
time.sleep(5)
self._adb.adb_put_back()
time.sleep(1)
# 点击进程按钮,显示所有后台进程
self._adb.adb_keyboard(82)
time.sleep(1)
# 点击清理按钮
self._adb.click_by_text_do_not_refresh0('清理')
time.sleep(2)
def send_msg(self):
print('发送')
self._adb.click_by_text_after_refresh('发表')
time.sleep(5)
self._adb.refresh_nodes()
time.sleep(2)
if self._adb.find_nodes_by_content('拍照分享'):
self._main.push('success_circle', self._wechat_list[self.wechats_index].strip() + ' 已经发送')
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self.clean_wechat()
self.wechats_index += 1
self.find_wechat()
elif self._adb.find_nodes_by_text('发表'):
self.send_msg()
else:
self._main.push('failed_circle', self._wechat_list[self.wechats_index].strip() + ' 发送失败')
def choice_images(self):
self._adb.refresh_nodes()
time.sleep(1)
print(self._imagenum)
for num in range(self._imagenum):
print(num)
if num < 4:
self._adb.adb_click(250 * (num + 1), 300)
elif num >= 4 and num < 8:
print('4-8')
self._adb.adb_click(250 * (num - 3), 300 + 250)
else:
self._adb.adb_click(250 * (num - 7), 300 + 500)
time.sleep(1)
time.sleep(1)
self._adb.refresh_nodes()
if self._adb.find_nodes_by_text('完成(' + str(self._imagenum) + '/9)'):
self._adb.click(0)
else:
if self._adb.find_nodes_by_text('完成(' + str(self._imagenum - 1) + '/9)'):
self._adb.click(0)
elif self._adb.find_nodes_by_text('完成(' + str(self._imagenum - 2) + '/9)'):
self._adb.click(0)
elif self._adb.find_nodes_by_text('完成(' + str(self._imagenum - 3) + '/9)'):
self._adb.click(0)
elif self._adb.find_nodes_by_text('完成(' + str(self._imagenum - 4) + '/9)'):
self._adb.click(0)
elif self._adb.find_nodes_by_text('完成(' + str(self._imagenum - 5) + '/9)'):
self._adb.click(0)
elif self._adb.find_nodes_by_text('完成(' + str(self._imagenum - 6) + '/9)'):
self._adb.click(0)
elif self._adb.find_nodes_by_text('完成(' + str(self._imagenum - 7) + '/9)'):
self._adb.click(0)
elif self._adb.find_nodes_by_text('完成(' + str(self._imagenum - 8) + '/9)'):
self._adb.click(0)
# 找到需要打开的微信
def find_wechat(self):
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.refresh_nodes()
time.sleep(2)
if self.wechats_index < len(self._wechat_list):
if self._adb.find_nodes_by_text(self._wechat_list[self.wechats_index].strip()):
print('找到' + self._wechat_list[self.wechats_index].strip())
self._adb.click(0)
time.sleep(15)
self._adb.refresh_nodes()
if self._adb.find_nodes_by_text(' 取消 '):
self._adb.click(0)
time.sleep(1)
self._adb.click_by_text_after_refresh('发现')
time.sleep(1)
self._adb.click_by_text_after_refresh('朋友圈')
time.sleep(1)
self._adb.refresh_nodes()
time.sleep(1)
if self._adb.find_nodes_by_content('拍照分享'):
print('分享')
self._adb.click(0)
time.sleep(1)
self._adb.refresh_nodes()
time.sleep(1)
if self._adb.find_nodes_by_text('从相册选择'):
self._adb.click(0)
time.sleep(3)
self.choice_images()
time.sleep(3)
if len(self._strlist) > 0:
self._adb.click_by_text_after_refresh('这一刻的想法...')
time.sleep(1)
for _str in self._strlist:
string = _str + '\n'
self._adb.adb_input_chinese(string)
time.sleep(1)
self.send_msg()
else:
print(len(self._strlist))
else:
print('没找到')
else:
print('未找到分享')
elif self._adb.find_nodes_by_text('找回密码'):
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self._adb.adb_put_back()
self.clean_wechat()
self.wechats_index += 1
self.find_wechat()
else:
self._adb.click_by_text_after_refresh('发现')
time.sleep(1)
self._adb.click_by_text_after_refresh('朋友圈')
time.sleep(1)
self._adb.refresh_nodes()
time.sleep(1)
if self._adb.find_nodes_by_content('拍照分享'):
print('分享')
self._adb.click(0)
time.sleep(1)
self._adb.refresh_nodes()
time.sleep(1)
if self._adb.find_nodes_by_text('从相册选择'):
self._adb.click(0)
time.sleep(3)
self.choice_images()
time.sleep(3)
if len(self._strlist) > 0:
self._adb.click_by_text_after_refresh('这一刻的想法...')
time.sleep(1)
for _str in self._strlist:
string = _str + '\n'
self._adb.adb_input_chinese(string)
time.sleep(1)
self.send_msg()
else:
print('没找到')
else:
print('未找到分享')
else:
print('未找到' + self._wechat_list[self.wechats_index].strip())
else:
print('已添加完')
def test(self):
self.choice_images()
def main(self):
try:
# self.test()
if self._imagenum >= 1:
self._adb.adb_keyboard(63)
self._adb.click_by_text_after_refresh("ADB Keyboard")
self.find_wechat()
else:
print('choicenum设置错误')
except KeyboardInterrupt as e:
print('e', e)
| 37.350467 | 103 | 0.463405 |
acf10e340330248397515e1698d1849c1f867c42 | 775 | py | Python | zhihu/crawl/login_zhihu.py | githubao/xiao-awesome-zhihu | 120dd16c731ec610e68dc94eff923e878a71e00e | [
"Apache-2.0"
] | null | null | null | zhihu/crawl/login_zhihu.py | githubao/xiao-awesome-zhihu | 120dd16c731ec610e68dc94eff923e878a71e00e | [
"Apache-2.0"
] | null | null | null | zhihu/crawl/login_zhihu.py | githubao/xiao-awesome-zhihu | 120dd16c731ec610e68dc94eff923e878a71e00e | [
"Apache-2.0"
] | null | null | null | #!/usr/bin/env python
# encoding: utf-8
"""
@description: //TODO
@version: 1.0
@author: BaoQiang
@license: Apache Licence
@contact: mailbaoqiang@gmail.com
@site: http://www.github.com/githubao
@software: PyCharm
@file: login_zhihu.py
@time: 2016/10/5 23:12
"""
import os
from client import ZhihuClient
import logging
from settings import TOKEN_FILE
def log_in():
client = ZhihuClient()
if os.path.isfile(TOKEN_FILE):
if not client.load_token(TOKEN_FILE):
return None
else:
if not client.login_in_terminal():
logging.error('log_in_terminal failed')
return None
client.save_token(TOKEN_FILE)
return client
return True
def main():
log_in()
if __name__ == '__main__':
main()
| 16.145833 | 51 | 0.660645 |
acf10e4fdda0a0d3706e0ad4f35b67f8cc4604d7 | 56,636 | py | Python | boost/boost_1_56_0/tools/build/src/build/targets.py | cooparation/caffe-android | cd91078d1f298c74fca4c242531989d64a32ba03 | [
"BSD-2-Clause-FreeBSD"
] | 39 | 2015-01-16T09:17:05.000Z | 2021-12-15T23:02:00.000Z | boost/boost_1_56_0/tools/build/src/build/targets.py | cooparation/caffe-android | cd91078d1f298c74fca4c242531989d64a32ba03 | [
"BSD-2-Clause-FreeBSD"
] | 26 | 2015-01-03T20:26:27.000Z | 2019-12-30T22:46:15.000Z | boost/boost_1_56_0/tools/build/src/build/targets.py | cooparation/caffe-android | cd91078d1f298c74fca4c242531989d64a32ba03 | [
"BSD-2-Clause-FreeBSD"
] | 14 | 2015-10-23T08:46:01.000Z | 2022-03-24T18:08:24.000Z | # Status: ported.
# Base revision: 64488
# Copyright Vladimir Prus 2002-2007.
# Copyright Rene Rivera 2006.
#
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Supports 'abstract' targets, which are targets explicitly defined in Jamfile.
#
# Abstract targets are represented by classes derived from 'AbstractTarget' class.
# The first abstract target is 'project_target', which is created for each
# Jamfile, and can be obtained by the 'target' rule in the Jamfile's module.
# (see project.jam).
#
# Project targets keep a list of 'MainTarget' instances.
# A main target is what the user explicitly defines in a Jamfile. It is
# possible to have several definitions for a main target, for example to have
# different lists of sources for different platforms. So, main targets
# keep a list of alternatives.
#
# Each alternative is an instance of 'AbstractTarget'. When a main target
# subvariant is defined by some rule, that rule will decide what class to
# use, create an instance of that class and add it to the list of alternatives
# for the main target.
#
# Rules supplied by the build system will use only targets derived
# from 'BasicTarget' class, which will provide some default behaviour.
# There will be two classes derived from it, 'make-target', created by the
# 'make' rule, and 'TypedTarget', created by rules such as 'exe' and 'dll'.
#
# +------------------------+
# |AbstractTarget |
# +========================+
# |name |
# |project |
# | |
# |generate(properties) = 0|
# +-----------+------------+
# |
# ^
# / \
# +-+-+
# |
# |
# +------------------------+------+------------------------------+
# | | |
# | | |
# +----------+-----------+ +------+------+ +------+-------+
# | project_target | | MainTarget | | BasicTarget |
# +======================+ 1 * +=============+ alternatives +==============+
# | generate(properties) |o-----------+ generate |<>------------->| generate |
# | main-target | +-------------+ | construct = 0|
# +----------------------+ +--------------+
# |
# ^
# / \
# +-+-+
# |
# |
# ...--+----------------+------------------+----------------+---+
# | | | |
# | | | |
# ... ---+-----+ +------+-------+ +------+------+ +--------+-----+
# | | TypedTarget | | make-target | | stage-target |
# . +==============+ +=============+ +==============+
# . | construct | | construct | | construct |
# +--------------+ +-------------+ +--------------+
import re
import os.path
import sys
from b2.manager import get_manager
from b2.util.utility import *
import property, project, virtual_target, property_set, feature, generators, toolset
from virtual_target import Subvariant
from b2.exceptions import *
from b2.util.sequence import unique
from b2.util import path, bjam_signature
from b2.build.errors import user_error_checkpoint
import b2.build.build_request as build_request
import b2.util.set
_re_separate_target_from_properties = re.compile (r'^([^<]*)(/(<.*))?$')
class TargetRegistry:
def __init__ (self):
# All targets that are currently being built.
# Only the key is id (target), the value is the actual object.
self.targets_being_built_ = {}
# Current indent for debugging messages
self.indent_ = ""
self.debug_building_ = "--debug-building" in bjam.variable("ARGV")
self.targets_ = []
def main_target_alternative (self, target):
""" Registers the specified target as a main target alternatives.
Returns 'target'.
"""
target.project ().add_alternative (target)
return target
def main_target_sources (self, sources, main_target_name, no_renaming=0):
"""Return the list of sources to use, if main target rule is invoked
with 'sources'. If there are any objects in 'sources', they are treated
as main target instances, and the name of such targets are adjusted to
be '<name_of_this_target>__<name_of_source_target>'. Such renaming
is disabled is non-empty value is passed for 'no-renaming' parameter."""
result = []
for t in sources:
t = b2.util.jam_to_value_maybe(t)
if isinstance (t, AbstractTarget):
name = t.name ()
if not no_renaming:
name = main_target_name + '__' + name
t.rename (name)
# Inline targets are not built by default.
p = t.project()
p.mark_targets_as_explicit([name])
result.append(name)
else:
result.append (t)
return result
def main_target_requirements(self, specification, project):
"""Returns the requirement to use when declaring a main target,
which are obtained by
- translating all specified property paths, and
- refining project requirements with the one specified for the target
'specification' are the properties xplicitly specified for a
main target
'project' is the project where the main taret is to be declared."""
specification.extend(toolset.requirements())
requirements = property_set.refine_from_user_input(
project.get("requirements"), specification,
project.project_module(), project.get("location"))
return requirements
def main_target_usage_requirements (self, specification, project):
""" Returns the use requirement to use when declaraing a main target,
which are obtained by
- translating all specified property paths, and
- adding project's usage requirements
specification: Use-properties explicitly specified for a main target
project: Project where the main target is to be declared
"""
project_usage_requirements = project.get ('usage-requirements')
# We don't use 'refine-from-user-input' because I'm not sure if:
# - removing of parent's usage requirements makes sense
# - refining of usage requirements is not needed, since usage requirements
# are always free.
usage_requirements = property_set.create_from_user_input(
specification, project.project_module(), project.get("location"))
return project_usage_requirements.add (usage_requirements)
def main_target_default_build (self, specification, project):
""" Return the default build value to use when declaring a main target,
which is obtained by using specified value if not empty and parent's
default build attribute otherwise.
specification: Default build explicitly specified for a main target
project: Project where the main target is to be declared
"""
if specification:
return property_set.create_with_validation(specification)
else:
return project.get ('default-build')
def start_building (self, main_target_instance):
""" Helper rules to detect cycles in main target references.
"""
if self.targets_being_built_.has_key(id(main_target_instance)):
names = []
for t in self.targets_being_built_.values() + [main_target_instance]:
names.append (t.full_name())
get_manager().errors()("Recursion in main target references\n")
self.targets_being_built_[id(main_target_instance)] = main_target_instance
def end_building (self, main_target_instance):
assert (self.targets_being_built_.has_key (id (main_target_instance)))
del self.targets_being_built_ [id (main_target_instance)]
def create_typed_target (self, type, project, name, sources, requirements, default_build, usage_requirements):
""" Creates a TypedTarget with the specified properties.
The 'name', 'sources', 'requirements', 'default_build' and
'usage_requirements' are assumed to be in the form specified
by the user in Jamfile corresponding to 'project'.
"""
return self.main_target_alternative (TypedTarget (name, project, type,
self.main_target_sources (sources, name),
self.main_target_requirements (requirements, project),
self.main_target_default_build (default_build, project),
self.main_target_usage_requirements (usage_requirements, project)))
def increase_indent(self):
self.indent_ += " "
def decrease_indent(self):
self.indent_ = self.indent_[0:-4]
def logging(self):
return self.debug_building_
def log(self, message):
if self.debug_building_:
print self.indent_ + message
def push_target(self, target):
self.targets_.append(target)
def pop_target(self):
self.targets_ = self.targets_[:-1]
def current(self):
return self.targets_[0]
class GenerateResult:
def __init__ (self, ur=None, targets=None):
if not targets:
targets = []
self.__usage_requirements = ur
self.__targets = targets
assert all(isinstance(t, virtual_target.VirtualTarget) for t in targets)
if not self.__usage_requirements:
self.__usage_requirements = property_set.empty ()
def usage_requirements (self):
return self.__usage_requirements
def targets (self):
return self.__targets
def extend (self, other):
assert (isinstance (other, GenerateResult))
self.__usage_requirements = self.__usage_requirements.add (other.usage_requirements ())
self.__targets.extend (other.targets ())
class AbstractTarget:
""" Base class for all abstract targets.
"""
def __init__ (self, name, project, manager = None):
""" manager: the Manager object
name: name of the target
project: the project target to which this one belongs
manager:the manager object. If none, uses project.manager ()
"""
assert (isinstance (project, ProjectTarget))
# Note: it might seem that we don't need either name or project at all.
# However, there are places where we really need it. One example is error
# messages which should name problematic targets. Another is setting correct
# paths for sources and generated files.
# Why allow manager to be specified? Because otherwise project target could not derive
# from this class.
if manager:
self.manager_ = manager
else:
self.manager_ = project.manager ()
self.name_ = name
self.project_ = project
def manager (self):
return self.manager_
def name (self):
""" Returns the name of this target.
"""
return self.name_
def project (self):
""" Returns the project for this target.
"""
return self.project_
def location (self):
""" Return the location where the target was declared.
"""
return self.location_
def full_name (self):
""" Returns a user-readable name for this target.
"""
location = self.project ().get ('location')
return location + '/' + self.name_
def generate (self, property_set):
""" Takes a property set. Generates virtual targets for this abstract
target, using the specified properties, unless a different value of some
feature is required by the target.
On success, returns a GenerateResult instance with:
- a property_set with the usage requirements to be
applied to dependents
- a list of produced virtual targets, which may be
empty.
If 'property_set' is empty, performs default build of this
target, in a way specific to derived class.
"""
raise BaseException ("method should be defined in derived classes")
def rename (self, new_name):
self.name_ = new_name
class ProjectTarget (AbstractTarget):
""" Project target class (derived from 'AbstractTarget')
This class these responsibilities:
- maintaining a list of main target in this project and
building it
Main targets are constructed in two stages:
- When Jamfile is read, a number of calls to 'add_alternative' is made.
At that time, alternatives can also be renamed to account for inline
targets.
- The first time 'main-target' or 'has-main-target' rule is called,
all alternatives are enumerated an main targets are created.
"""
def __init__ (self, manager, name, project_module, parent_project, requirements, default_build):
AbstractTarget.__init__ (self, name, self, manager)
self.project_module_ = project_module
self.location_ = manager.projects().attribute (project_module, 'location')
self.requirements_ = requirements
self.default_build_ = default_build
self.build_dir_ = None
# A cache of IDs
self.ids_cache_ = {}
# True is main targets have already been built.
self.built_main_targets_ = False
# A list of the registered alternatives for this project.
self.alternatives_ = []
# A map from main target name to the target corresponding
# to it.
self.main_target_ = {}
# Targets marked as explicit.
self.explicit_targets_ = set()
# Targets marked as always
self.always_targets_ = set()
# The constants defined for this project.
self.constants_ = {}
# Whether targets for all main target are already created.
self.built_main_targets_ = 0
if parent_project:
self.inherit (parent_project)
# TODO: This is needed only by the 'make' rule. Need to find the
# way to make 'make' work without this method.
def project_module (self):
return self.project_module_
def get (self, attribute):
return self.manager().projects().attribute(
self.project_module_, attribute)
def build_dir (self):
if not self.build_dir_:
self.build_dir_ = self.get ('build-dir')
if not self.build_dir_:
self.build_dir_ = os.path.join(self.project_.get ('location'), 'bin')
return self.build_dir_
def generate (self, ps):
""" Generates all possible targets contained in this project.
"""
self.manager_.targets().log(
"Building project '%s' with '%s'" % (self.name (), str(ps)))
self.manager_.targets().increase_indent ()
result = GenerateResult ()
for t in self.targets_to_build ():
g = t.generate (ps)
result.extend (g)
self.manager_.targets().decrease_indent ()
return result
def targets_to_build (self):
""" Computes and returns a list of AbstractTarget instances which
must be built when this project is built.
"""
result = []
if not self.built_main_targets_:
self.build_main_targets ()
# Collect all main targets here, except for "explicit" ones.
for n, t in self.main_target_.iteritems ():
if not t.name () in self.explicit_targets_:
result.append (t)
# Collect all projects referenced via "projects-to-build" attribute.
self_location = self.get ('location')
for pn in self.get ('projects-to-build'):
result.append (self.find(pn + "/"))
return result
def mark_targets_as_explicit (self, target_names):
"""Add 'target' to the list of targets in this project
that should be build only by explicit request."""
# Record the name of the target, not instance, since this
# rule is called before main target instaces are created.
self.explicit_targets_.update(target_names)
def mark_targets_as_always(self, target_names):
self.always_targets_.update(target_names)
def add_alternative (self, target_instance):
""" Add new target alternative.
"""
if self.built_main_targets_:
raise IllegalOperation ("add-alternative called when main targets are already created for project '%s'" % self.full_name ())
self.alternatives_.append (target_instance)
def main_target (self, name):
if not self.built_main_targets_:
self.build_main_targets()
return self.main_target_[name]
def has_main_target (self, name):
"""Tells if a main target with the specified name exists."""
if not self.built_main_targets_:
self.build_main_targets()
return self.main_target_.has_key(name)
def create_main_target (self, name):
""" Returns a 'MainTarget' class instance corresponding to the 'name'.
"""
if not self.built_main_targets_:
self.build_main_targets ()
return self.main_targets_.get (name, None)
def find_really(self, id):
""" Find and return the target with the specified id, treated
relative to self.
"""
result = None
current_location = self.get ('location')
__re_split_project_target = re.compile (r'(.*)//(.*)')
split = __re_split_project_target.match (id)
project_part = None
target_part = None
if split:
project_part = split.group (1)
target_part = split.group (2)
project_registry = self.project_.manager ().projects ()
extra_error_message = ''
if project_part:
# There's explicit project part in id. Looks up the
# project and pass the request to it.
pm = project_registry.find (project_part, current_location)
if pm:
project_target = project_registry.target (pm)
result = project_target.find (target_part, no_error=1)
else:
extra_error_message = "error: could not find project '$(project_part)'"
else:
# Interpret target-name as name of main target
# Need to do this before checking for file. Consider this:
#
# exe test : test.cpp ;
# install s : test : <location>. ;
#
# After first build we'll have target 'test' in Jamfile and file
# 'test' on the disk. We need target to override the file.
result = None
if self.has_main_target(id):
result = self.main_target(id)
if not result:
result = FileReference (self.manager_, id, self.project_)
if not result.exists ():
# File actually does not exist.
# Reset 'target' so that an error is issued.
result = None
if not result:
# Interpret id as project-id
project_module = project_registry.find (id, current_location)
if project_module:
result = project_registry.target (project_module)
return result
def find (self, id, no_error = False):
v = self.ids_cache_.get (id, None)
if not v:
v = self.find_really (id)
self.ids_cache_ [id] = v
if v or no_error:
return v
raise BaseException ("Unable to find file or target named '%s'\nreferred from project at '%s'" % (id, self.get ('location')))
def build_main_targets (self):
self.built_main_targets_ = True
for a in self.alternatives_:
name = a.name ()
if not self.main_target_.has_key (name):
t = MainTarget (name, self.project_)
self.main_target_ [name] = t
if name in self.always_targets_:
a.always()
self.main_target_ [name].add_alternative (a)
def add_constant(self, name, value, path=0):
"""Adds a new constant for this project.
The constant will be available for use in Jamfile
module for this project. If 'path' is true,
the constant will be interpreted relatively
to the location of project.
"""
if path:
l = self.location_
if not l:
# Project corresponding to config files do not have
# 'location' attribute, but do have source location.
# It might be more reasonable to make every project have
# a location and use some other approach to prevent buildable
# targets in config files, but that's for later.
l = get('source-location')
value = os.path.join(l, value)
# Now make the value absolute path. Constants should be in
# platform-native form.
value = os.path.normpath(os.path.join(os.getcwd(), value))
self.constants_[name] = value
bjam.call("set-variable", self.project_module(), name, value)
def inherit(self, parent_project):
for c in parent_project.constants_:
# No need to pass the type. Path constants were converted to
# absolute paths already by parent.
self.add_constant(c, parent_project.constants_[c])
# Import rules from parent
this_module = self.project_module()
parent_module = parent_project.project_module()
rules = bjam.call("RULENAMES", parent_module)
if not rules:
rules = []
user_rules = [x for x in rules
if x not in self.manager().projects().project_rules().all_names()]
if user_rules:
bjam.call("import-rules-from-parent", parent_module, this_module, user_rules)
class MainTarget (AbstractTarget):
""" A named top-level target in Jamfile.
"""
def __init__ (self, name, project):
AbstractTarget.__init__ (self, name, project)
self.alternatives_ = []
self.default_build_ = property_set.empty ()
def add_alternative (self, target):
""" Add a new alternative for this target.
"""
d = target.default_build ()
if self.alternatives_ and self.default_build_ != d:
get_manager().errors()("default build must be identical in all alternatives\n"
"main target is '%s'\n"
"with '%s'\n"
"differing from previous default build: '%s'" % (self.full_name (), d.raw (), self.default_build_.raw ()))
else:
self.default_build_ = d
self.alternatives_.append (target)
def __select_alternatives (self, property_set, debug):
""" Returns the best viable alternative for this property_set
See the documentation for selection rules.
# TODO: shouldn't this be 'alternative' (singular)?
"""
# When selecting alternatives we have to consider defaults,
# for example:
# lib l : l.cpp : <variant>debug ;
# lib l : l_opt.cpp : <variant>release ;
# won't work unless we add default value <variant>debug.
property_set = property_set.add_defaults ()
# The algorithm: we keep the current best viable alternative.
# When we've got new best viable alternative, we compare it
# with the current one.
best = None
best_properties = None
if len (self.alternatives_) == 0:
return None
if len (self.alternatives_) == 1:
return self.alternatives_ [0]
if debug:
print "Property set for selection:", property_set
for v in self.alternatives_:
properties = v.match (property_set, debug)
if properties is not None:
if not best:
best = v
best_properties = properties
else:
if b2.util.set.equal (properties, best_properties):
return None
elif b2.util.set.contains (properties, best_properties):
# Do nothing, this alternative is worse
pass
elif b2.util.set.contains (best_properties, properties):
best = v
best_properties = properties
else:
return None
return best
def apply_default_build (self, property_set):
return apply_default_build(property_set, self.default_build_)
def generate (self, ps):
""" Select an alternative for this main target, by finding all alternatives
which requirements are satisfied by 'properties' and picking the one with
longest requirements set.
Returns the result of calling 'generate' on that alternative.
"""
self.manager_.targets ().start_building (self)
# We want composite properties in build request act as if
# all the properties it expands too are explicitly specified.
ps = ps.expand ()
all_property_sets = self.apply_default_build (ps)
result = GenerateResult ()
for p in all_property_sets:
result.extend (self.__generate_really (p))
self.manager_.targets ().end_building (self)
return result
def __generate_really (self, prop_set):
""" Generates the main target with the given property set
and returns a list which first element is property_set object
containing usage_requirements of generated target and with
generated virtual target in other elements. It's possible
that no targets are generated.
"""
best_alternative = self.__select_alternatives (prop_set, debug=0)
if not best_alternative:
# FIXME: revive.
# self.__select_alternatives(prop_set, debug=1)
self.manager_.errors()(
"No best alternative for '%s'.\n"
% (self.full_name(),))
result = best_alternative.generate (prop_set)
# Now return virtual targets for the only alternative
return result
def rename(self, new_name):
AbstractTarget.rename(self, new_name)
for a in self.alternatives_:
a.rename(new_name)
class FileReference (AbstractTarget):
""" Abstract target which refers to a source file.
This is artificial creature; it's usefull so that sources to
a target can be represented as list of abstract target instances.
"""
def __init__ (self, manager, file, project):
AbstractTarget.__init__ (self, file, project)
self.file_location_ = None
def generate (self, properties):
return GenerateResult (None, [
self.manager_.virtual_targets ().from_file (
self.name_, self.location(), self.project_) ])
def exists (self):
""" Returns true if the referred file really exists.
"""
if self.location ():
return True
else:
return False
def location (self):
# Returns the location of target. Needed by 'testing.jam'
if not self.file_location_:
source_location = self.project_.get('source-location')
for src_dir in source_location:
location = os.path.join(src_dir, self.name())
if os.path.isfile(location):
self.file_location_ = src_dir
self.file_path = location
break
return self.file_location_
def resolve_reference(target_reference, project):
""" Given a target_reference, made in context of 'project',
returns the AbstractTarget instance that is referred to, as well
as properties explicitly specified for this reference.
"""
# Separate target name from properties override
split = _re_separate_target_from_properties.match (target_reference)
if not split:
raise BaseException ("Invalid reference: '%s'" % target_reference)
id = split.group (1)
sproperties = []
if split.group (3):
sproperties = property.create_from_strings(feature.split(split.group(3)))
sproperties = feature.expand_composites(sproperties)
# Find the target
target = project.find (id)
return (target, property_set.create(sproperties))
def generate_from_reference(target_reference, project, property_set):
""" Attempts to generate the target given by target reference, which
can refer both to a main target or to a file.
Returns a list consisting of
- usage requirements
- generated virtual targets, if any
target_reference: Target reference
project: Project where the reference is made
property_set: Properties of the main target that makes the reference
"""
target, sproperties = resolve_reference(target_reference, project)
# Take properties which should be propagated and refine them
# with source-specific requirements.
propagated = property_set.propagated()
rproperties = propagated.refine(sproperties)
return target.generate(rproperties)
class BasicTarget (AbstractTarget):
""" Implements the most standard way of constructing main target
alternative from sources. Allows sources to be either file or
other main target and handles generation of those dependency
targets.
"""
def __init__ (self, name, project, sources, requirements = None, default_build = None, usage_requirements = None):
AbstractTarget.__init__ (self, name, project)
for s in sources:
if get_grist (s):
raise InvalidSource ("property '%s' found in the 'sources' parameter for '%s'" % (s, name))
self.sources_ = sources
if not requirements: requirements = property_set.empty ()
self.requirements_ = requirements
if not default_build: default_build = property_set.empty ()
self.default_build_ = default_build
if not usage_requirements: usage_requirements = property_set.empty ()
self.usage_requirements_ = usage_requirements
# A cache for resolved references
self.source_targets_ = None
# A cache for generated targets
self.generated_ = {}
# A cache for build requests
self.request_cache = {}
# Result of 'capture_user_context' has everything. For example, if this
# target is declare as result of loading Jamfile which was loaded when
# building target B which was requested from A, then we'll have A, B and
# Jamroot location in context. We only care about Jamroot location, most
# of the times.
self.user_context_ = self.manager_.errors().capture_user_context()[-1:]
self.always_ = False
def always(self):
self.always_ = True
def sources (self):
""" Returns the list of AbstractTargets which are used as sources.
The extra properties specified for sources are not represented.
The only used of this rule at the moment is the '--dump-tests'
feature of the test system.
"""
if self.source_targets_ == None:
self.source_targets_ = []
for s in self.sources_:
self.source_targets_.append(resolve_reference(s, self.project_)[0])
return self.source_targets_
def requirements (self):
return self.requirements_
def default_build (self):
return self.default_build_
def common_properties (self, build_request, requirements):
""" Given build request and requirements, return properties
common to dependency build request and target build
properties.
"""
# For optimization, we add free unconditional requirements directly,
# without using complex algorithsm.
# This gives the complex algorithm better chance of caching results.
# The exact effect of this "optimization" is no longer clear
free_unconditional = []
other = []
for p in requirements.all():
if p.feature().free() and not p.condition() and p.feature().name() != 'conditional':
free_unconditional.append(p)
else:
other.append(p)
other = property_set.create(other)
key = (build_request, other)
if not self.request_cache.has_key(key):
self.request_cache[key] = self.__common_properties2 (build_request, other)
return self.request_cache[key].add_raw(free_unconditional)
# Given 'context' -- a set of already present properties, and 'requirements',
# decide which extra properties should be applied to 'context'.
# For conditional requirements, this means evaluating condition. For
# indirect conditional requirements, this means calling a rule. Ordinary
# requirements are always applied.
#
# Handles situation where evaluating one conditional requirements affects
# condition of another conditional requirements, for example:
#
# <toolset>gcc:<variant>release <variant>release:<define>RELEASE
#
# If 'what' is 'refined' returns context refined with new requirements.
# If 'what' is 'added' returns just the requirements that must be applied.
def evaluate_requirements(self, requirements, context, what):
# Apply non-conditional requirements.
# It's possible that that further conditional requirement change
# a value set by non-conditional requirements. For example:
#
# exe a : a.cpp : <threading>single <toolset>foo:<threading>multi ;
#
# I'm not sure if this should be an error, or not, especially given that
#
# <threading>single
#
# might come from project's requirements.
unconditional = feature.expand(requirements.non_conditional())
context = context.refine(property_set.create(unconditional))
# We've collected properties that surely must be present in common
# properties. We now try to figure out what other properties
# should be added in order to satisfy rules (4)-(6) from the docs.
conditionals = property_set.create(requirements.conditional())
# It's supposed that #conditionals iterations
# should be enough for properties to propagate along conditions in any
# direction.
max_iterations = len(conditionals.all()) +\
len(requirements.get("<conditional>")) + 1
added_requirements = []
current = context
# It's assumed that ordinary conditional requirements can't add
# <indirect-conditional> properties, and that rules referred
# by <indirect-conditional> properties can't add new
# <indirect-conditional> properties. So the list of indirect conditionals
# does not change.
indirect = requirements.get("<conditional>")
ok = 0
for i in range(0, max_iterations):
e = conditionals.evaluate_conditionals(current).all()[:]
# Evaluate indirect conditionals.
for i in indirect:
i = b2.util.jam_to_value_maybe(i)
if callable(i):
# This is Python callable, yeah.
e.extend(i(current))
else:
# Name of bjam function. Because bjam is unable to handle
# list of Property, pass list of strings.
br = b2.util.call_jam_function(i[1:], [str(p) for p in current.all()])
if br:
e.extend(property.create_from_strings(br))
if e == added_requirements:
# If we got the same result, we've found final properties.
ok = 1
break
else:
# Oops, results of evaluation of conditionals has changed.
# Also 'current' contains leftover from previous evaluation.
# Recompute 'current' using initial properties and conditional
# requirements.
added_requirements = e
current = context.refine(property_set.create(feature.expand(e)))
if not ok:
self.manager().errors()("Can't evaluate conditional properties "
+ str(conditionals))
if what == "added":
return property_set.create(unconditional + added_requirements)
elif what == "refined":
return current
else:
self.manager().errors("Invalid value of the 'what' parameter")
def __common_properties2(self, build_request, requirements):
# This guarantees that default properties are present
# in result, unless they are overrided by some requirement.
# TODO: There is possibility that we've added <foo>bar, which is composite
# and expands to <foo2>bar2, but default value of <foo2> is not bar2,
# in which case it's not clear what to do.
#
build_request = build_request.add_defaults()
# Featured added by 'add-default' can be composite and expand
# to features without default values -- so they are not added yet.
# It could be clearer/faster to expand only newly added properties
# but that's not critical.
build_request = build_request.expand()
return self.evaluate_requirements(requirements, build_request,
"refined")
def match (self, property_set, debug):
""" Returns the alternative condition for this alternative, if
the condition is satisfied by 'property_set'.
"""
# The condition is composed of all base non-conditional properties.
# It's not clear if we should expand 'self.requirements_' or not.
# For one thing, it would be nice to be able to put
# <toolset>msvc-6.0
# in requirements.
# On the other hand, if we have <variant>release in condition it
# does not make sense to require <optimization>full to be in
# build request just to select this variant.
bcondition = self.requirements_.base ()
ccondition = self.requirements_.conditional ()
condition = b2.util.set.difference (bcondition, ccondition)
if debug:
print " next alternative: required properties:", [str(p) for p in condition]
if b2.util.set.contains (condition, property_set.all()):
if debug:
print " matched"
return condition
else:
return None
def generate_dependency_targets (self, target_ids, property_set):
targets = []
usage_requirements = []
for id in target_ids:
result = generate_from_reference(id, self.project_, property_set)
targets += result.targets()
usage_requirements += result.usage_requirements().all()
return (targets, usage_requirements)
def generate_dependency_properties(self, properties, ps):
""" Takes a target reference, which might be either target id
or a dependency property, and generates that target using
'property_set' as build request.
Returns a tuple (result, usage_requirements).
"""
result_properties = []
usage_requirements = []
for p in properties:
result = generate_from_reference(p.value(), self.project_, ps)
for t in result.targets():
result_properties.append(property.Property(p.feature(), t))
usage_requirements += result.usage_requirements().all()
return (result_properties, usage_requirements)
@user_error_checkpoint
def generate (self, ps):
""" Determines final build properties, generates sources,
and calls 'construct'. This method should not be
overridden.
"""
self.manager_.errors().push_user_context(
"Generating target " + self.full_name(), self.user_context_)
if self.manager().targets().logging():
self.manager().targets().log(
"Building target '%s'" % self.name_)
self.manager().targets().increase_indent ()
self.manager().targets().log(
"Build request: '%s'" % str (ps.raw ()))
cf = self.manager().command_line_free_features()
self.manager().targets().log(
"Command line free features: '%s'" % str (cf.raw ()))
self.manager().targets().log(
"Target requirements: %s'" % str (self.requirements().raw ()))
self.manager().targets().push_target(self)
if not self.generated_.has_key(ps):
# Apply free features form the command line. If user
# said
# define=FOO
# he most likely want this define to be set for all compiles.
ps = ps.refine(self.manager().command_line_free_features())
rproperties = self.common_properties (ps, self.requirements_)
self.manager().targets().log(
"Common properties are '%s'" % str (rproperties))
if rproperties.get("<build>") != ["no"]:
result = GenerateResult ()
properties = rproperties.non_dependency ()
(p, u) = self.generate_dependency_properties (rproperties.dependency (), rproperties)
properties += p
assert all(isinstance(p, property.Property) for p in properties)
usage_requirements = u
(source_targets, u) = self.generate_dependency_targets (self.sources_, rproperties)
usage_requirements += u
self.manager_.targets().log(
"Usage requirements for '%s' are '%s'" % (self.name_, usage_requirements))
# FIXME:
rproperties = property_set.create(properties + usage_requirements)
usage_requirements = property_set.create (usage_requirements)
self.manager_.targets().log(
"Build properties: '%s'" % str(rproperties))
source_targets += rproperties.get('<source>')
# We might get duplicate sources, for example if
# we link to two library which have the same <library> in
# usage requirements.
# Use stable sort, since for some targets the order is
# important. E.g. RUN_PY target need python source to come
# first.
source_targets = unique(source_targets, stable=True)
# FIXME: figure why this call messes up source_targets in-place
result = self.construct (self.name_, source_targets[:], rproperties)
if result:
assert len(result) == 2
gur = result [0]
result = result [1]
if self.always_:
for t in result:
t.always()
s = self.create_subvariant (
result,
self.manager().virtual_targets().recent_targets(), ps,
source_targets, rproperties, usage_requirements)
self.manager().virtual_targets().clear_recent_targets()
ur = self.compute_usage_requirements (s)
ur = ur.add (gur)
s.set_usage_requirements (ur)
self.manager_.targets().log (
"Usage requirements from '%s' are '%s'" %
(self.name(), str(rproperties)))
self.generated_[ps] = GenerateResult (ur, result)
else:
self.generated_[ps] = GenerateResult (property_set.empty(), [])
else:
# If we just see <build>no, we cannot produce any reasonable
# diagnostics. The code that adds this property is expected
# to explain why a target is not built, for example using
# the configure.log-component-configuration function.
# If this target fails to build, add <build>no to properties
# to cause any parent target to fail to build. Except that it
# - does not work now, since we check for <build>no only in
# common properties, but not in properties that came from
# dependencies
# - it's not clear if that's a good idea anyway. The alias
# target, for example, should not fail to build if a dependency
# fails.
self.generated_[ps] = GenerateResult(
property_set.create(["<build>no"]), [])
else:
self.manager().targets().log ("Already built")
self.manager().targets().pop_target()
self.manager().targets().decrease_indent()
return self.generated_[ps]
def compute_usage_requirements (self, subvariant):
""" Given the set of generated targets, and refined build
properties, determines and sets appripriate usage requirements
on those targets.
"""
rproperties = subvariant.build_properties ()
xusage_requirements =self.evaluate_requirements(
self.usage_requirements_, rproperties, "added")
# We generate all dependency properties and add them,
# as well as their usage requirements, to result.
(r1, r2) = self.generate_dependency_properties(xusage_requirements.dependency (), rproperties)
extra = r1 + r2
result = property_set.create (xusage_requirements.non_dependency () + extra)
# Propagate usage requirements we've got from sources, except
# for the <pch-header> and <pch-file> features.
#
# That feature specifies which pch file to use, and should apply
# only to direct dependents. Consider:
#
# pch pch1 : ...
# lib lib1 : ..... pch1 ;
# pch pch2 :
# lib lib2 : pch2 lib1 ;
#
# Here, lib2 should not get <pch-header> property from pch1.
#
# Essentially, when those two features are in usage requirements,
# they are propagated only to direct dependents. We might need
# a more general mechanism, but for now, only those two
# features are special.
removed_pch = filter(lambda prop: prop.feature().name() not in ['<pch-header>', '<pch-file>'], subvariant.sources_usage_requirements().all())
result = result.add(property_set.PropertySet(removed_pch))
return result
def create_subvariant (self, root_targets, all_targets,
build_request, sources,
rproperties, usage_requirements):
"""Creates a new subvariant-dg instances for 'targets'
- 'root-targets' the virtual targets will be returned to dependents
- 'all-targets' all virtual
targets created while building this main target
- 'build-request' is property-set instance with
requested build properties"""
for e in root_targets:
e.root (True)
s = Subvariant (self, build_request, sources,
rproperties, usage_requirements, all_targets)
for v in all_targets:
if not v.creating_subvariant():
v.creating_subvariant(s)
return s
def construct (self, name, source_targets, properties):
""" Constructs the virtual targets for this abstract targets and
the dependecy graph. Returns a tuple consisting of the properties and the list of virtual targets.
Should be overrided in derived classes.
"""
raise BaseException ("method should be defined in derived classes")
class TypedTarget (BasicTarget):
import generators
def __init__ (self, name, project, type, sources, requirements, default_build, usage_requirements):
BasicTarget.__init__ (self, name, project, sources, requirements, default_build, usage_requirements)
self.type_ = type
def __jam_repr__(self):
return b2.util.value_to_jam(self)
def type (self):
return self.type_
def construct (self, name, source_targets, prop_set):
r = generators.construct (self.project_, os.path.splitext(name)[0],
self.type_,
prop_set.add_raw(['<main-target-type>' + self.type_]),
source_targets, True)
if not r:
print "warning: Unable to construct '%s'" % self.full_name ()
# Are there any top-level generators for this type/property set.
if not generators.find_viable_generators (self.type_, prop_set):
print "error: no generators were found for type '" + self.type_ + "'"
print "error: and the requested properties"
print "error: make sure you've configured the needed tools"
print "See http://boost.org/boost-build2/doc/html/bbv2/advanced/configuration.html"
print "To debug this problem, try the --debug-generators option."
sys.exit(1)
return r
def apply_default_build(property_set, default_build):
# 1. First, see what properties from default_build
# are already present in property_set.
specified_features = set(p.feature() for p in property_set.all())
defaults_to_apply = []
for d in default_build.all():
if not d.feature() in specified_features:
defaults_to_apply.append(d)
# 2. If there's any defaults to be applied, form the new
# build request. Pass it throw 'expand-no-defaults', since
# default_build might contain "release debug", which will
# result in two property_sets.
result = []
if defaults_to_apply:
# We have to compress subproperties here to prevent
# property lists like:
#
# <toolset>msvc <toolset-msvc:version>7.1 <threading>multi
#
# from being expanded into:
#
# <toolset-msvc:version>7.1/<threading>multi
# <toolset>msvc/<toolset-msvc:version>7.1/<threading>multi
#
# due to cross-product property combination. That may
# be an indication that
# build_request.expand-no-defaults is the wrong rule
# to use here.
compressed = feature.compress_subproperties(property_set.all())
result = build_request.expand_no_defaults(
b2.build.property_set.create(feature.expand([p])) for p in (compressed + defaults_to_apply))
else:
result.append (property_set)
return result
def create_typed_metatarget(name, type, sources, requirements, default_build, usage_requirements):
from b2.manager import get_manager
t = get_manager().targets()
project = get_manager().projects().current()
return t.main_target_alternative(
TypedTarget(name, project, type,
t.main_target_sources(sources, name),
t.main_target_requirements(requirements, project),
t.main_target_default_build(default_build, project),
t.main_target_usage_requirements(usage_requirements, project)))
def create_metatarget(klass, name, sources, requirements=[], default_build=[], usage_requirements=[]):
from b2.manager import get_manager
t = get_manager().targets()
project = get_manager().projects().current()
return t.main_target_alternative(
klass(name, project,
t.main_target_sources(sources, name),
t.main_target_requirements(requirements, project),
t.main_target_default_build(default_build, project),
t.main_target_usage_requirements(usage_requirements, project)))
def metatarget_function_for_class(class_):
@bjam_signature((["name"], ["sources", "*"], ["requirements", "*"],
["default_build", "*"], ["usage_requirements", "*"]))
def create_metatarget(name, sources, requirements = [], default_build = None, usage_requirements = []):
from b2.manager import get_manager
t = get_manager().targets()
project = get_manager().projects().current()
return t.main_target_alternative(
class_(name, project,
t.main_target_sources(sources, name),
t.main_target_requirements(requirements, project),
t.main_target_default_build(default_build, project),
t.main_target_usage_requirements(usage_requirements, project)))
return create_metatarget
| 40.396576 | 149 | 0.5758 |
acf10ef3e550da9f0909d8cb6a4a837f2b90060e | 65 | py | Python | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/I/inverse meter-kelvin relationship.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/I/inverse meter-kelvin relationship.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | null | null | null | example_snippets/multimenus_snippets/Snippets/SciPy/Physical and mathematical constants/CODATA physical constants/I/inverse meter-kelvin relationship.py | kuanpern/jupyterlab-snippets-multimenus | 477f51cfdbad7409eab45abe53cf774cd70f380c | [
"BSD-3-Clause"
] | 1 | 2021-02-04T04:51:48.000Z | 2021-02-04T04:51:48.000Z | constants.physical_constants["inverse meter-kelvin relationship"] | 65 | 65 | 0.876923 |
acf10f2f26ff39f5ba9b70658a064d3faba14c9a | 45,602 | py | Python | python/cssbeautifier/tests/generated/tests.py | royriojas/js-beautify | 9f2aa0445667b13b474ab973c464b74fc566e795 | [
"MIT"
] | 54 | 2018-07-30T11:47:21.000Z | 2022-02-11T06:19:44.000Z | python/cssbeautifier/tests/generated/tests.py | royriojas/js-beautify | 9f2aa0445667b13b474ab973c464b74fc566e795 | [
"MIT"
] | 3 | 2018-07-27T03:58:11.000Z | 2020-09-08T13:39:43.000Z | python/cssbeautifier/tests/generated/tests.py | royriojas/js-beautify | 9f2aa0445667b13b474ab973c464b74fc566e795 | [
"MIT"
] | 23 | 2018-09-04T12:54:28.000Z | 2020-11-26T01:25:09.000Z | #!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
AUTO-GENERATED. DO NOT MODIFY.
Script: test/generate-tests.js
Template: test/data/css/python.mustache
Data: test/data/css/tests.js
The MIT License (MIT)
Copyright (c) 2007-2017 Einar Lielmanis, Liam Newman, and contributors.
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation files
(the "Software"), to deal in the Software without restriction,
including without limitation the rights to use, copy, modify, merge,
publish, distribute, sublicense, and/or sell copies of the Software,
and to permit persons to whom the Software is furnished to do so,
subject to the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
import unittest
import cssbeautifier
import copy
class CSSBeautifierTest(unittest.TestCase):
options = None
@classmethod
def setUpClass(cls):
false = False
true = True
default_options = cssbeautifier.default_options()
default_options.indent_size = 1
default_options.indent_char = '\t'
default_options.selector_separator_newline = true
default_options.end_with_newline = false
default_options.newline_between_rules = false
default_options.indent_size = 1
default_options.indent_char = '\t'
default_options.selector_separator_newline = true
default_options.end_with_newline = false
default_options.newline_between_rules = false
default_options.space_around_combinator = false
default_options.preserve_newlines = false
default_options.space_around_selector_separator = false
cls.default_options = default_options
def reset_options(self):
self.options = copy.copy(self.default_options)
def testGenerated(self):
self.reset_options()
test_fragment = self.decodesto
t = self.decodesto
false = False
true = True
#============================================================
# End With Newline - (eof = "\n")
self.reset_options();
self.options.end_with_newline = true
test_fragment('', '\n')
test_fragment(' .tabs{}', ' .tabs {}\n')
test_fragment(
' \n' +
'\n' +
'.tabs{}\n' +
'\n' +
'\n' +
'\n',
# -- output --
' .tabs {}\n')
test_fragment('\n')
# End With Newline - (eof = "")
self.reset_options();
self.options.end_with_newline = false
test_fragment('')
test_fragment(' .tabs{}', ' .tabs {}')
test_fragment(
' \n' +
'\n' +
'.tabs{}\n' +
'\n' +
'\n' +
'\n',
# -- output --
' .tabs {}')
test_fragment('\n', '')
#============================================================
# Empty braces
self.reset_options();
t('.tabs{}', '.tabs {}')
t('.tabs { }', '.tabs {}')
t('.tabs { }', '.tabs {}')
t(
'.tabs \n' +
'{\n' +
' \n' +
' }',
# -- output --
'.tabs {}')
#============================================================
#
self.reset_options();
t(
'#cboxOverlay {\n' +
'\tbackground: url(images/overlay.png) repeat 0 0;\n' +
'\topacity: 0.9;\n' +
'\tfilter: alpha(opacity = 90);\n' +
'}',
# -- output --
'#cboxOverlay {\n' +
'\tbackground: url(images/overlay.png) repeat 0 0;\n' +
'\topacity: 0.9;\n' +
'\tfilter: alpha(opacity=90);\n' +
'}')
#============================================================
# Support simple language specific option inheritance/overriding - (c = " ")
self.reset_options();
self.options.indent_char = ' '
self.options.indent_size = 4
self.options.js = { 'indent_size': 3 }
self.options.css = { 'indent_size': 5 }
t(
'.selector {\n' +
' font-size: 12px;\n' +
'}')
# Support simple language specific option inheritance/overriding - (c = " ")
self.reset_options();
self.options.indent_char = ' '
self.options.indent_size = 4
self.options.html = { 'js': { 'indent_size': 3 }, 'css': { 'indent_size': 5 } }
t(
'.selector {\n' +
' font-size: 12px;\n' +
'}')
# Support simple language specific option inheritance/overriding - (c = " ")
self.reset_options();
self.options.indent_char = ' '
self.options.indent_size = 9
self.options.html = { 'js': { 'indent_size': 3 }, 'css': { 'indent_size': 8 }, 'indent_size': 2}
self.options.js = { 'indent_size': 5 }
self.options.css = { 'indent_size': 3 }
t(
'.selector {\n' +
' font-size: 12px;\n' +
'}')
#============================================================
# Space Around Combinator - (space = " ")
self.reset_options();
self.options.space_around_combinator = true
t('a>b{}', 'a > b {}')
t('a~b{}', 'a ~ b {}')
t('a+b{}', 'a + b {}')
t('a+b>c{}', 'a + b > c {}')
t('a > b{}', 'a > b {}')
t('a ~ b{}', 'a ~ b {}')
t('a + b{}', 'a + b {}')
t('a + b > c{}', 'a + b > c {}')
t(
'a > b{width: calc(100% + 45px);}',
# -- output --
'a > b {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
t(
'a ~ b{width: calc(100% + 45px);}',
# -- output --
'a ~ b {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
t(
'a + b{width: calc(100% + 45px);}',
# -- output --
'a + b {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
t(
'a + b > c{width: calc(100% + 45px);}',
# -- output --
'a + b > c {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
# Space Around Combinator - (space = "")
self.reset_options();
self.options.space_around_combinator = false
t('a>b{}', 'a>b {}')
t('a~b{}', 'a~b {}')
t('a+b{}', 'a+b {}')
t('a+b>c{}', 'a+b>c {}')
t('a > b{}', 'a>b {}')
t('a ~ b{}', 'a~b {}')
t('a + b{}', 'a+b {}')
t('a + b > c{}', 'a+b>c {}')
t(
'a > b{width: calc(100% + 45px);}',
# -- output --
'a>b {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
t(
'a ~ b{width: calc(100% + 45px);}',
# -- output --
'a~b {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
t(
'a + b{width: calc(100% + 45px);}',
# -- output --
'a+b {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
t(
'a + b > c{width: calc(100% + 45px);}',
# -- output --
'a+b>c {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
# Space Around Combinator - (space = " ")
self.reset_options();
self.options.space_around_selector_separator = true
t('a>b{}', 'a > b {}')
t('a~b{}', 'a ~ b {}')
t('a+b{}', 'a + b {}')
t('a+b>c{}', 'a + b > c {}')
t('a > b{}', 'a > b {}')
t('a ~ b{}', 'a ~ b {}')
t('a + b{}', 'a + b {}')
t('a + b > c{}', 'a + b > c {}')
t(
'a > b{width: calc(100% + 45px);}',
# -- output --
'a > b {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
t(
'a ~ b{width: calc(100% + 45px);}',
# -- output --
'a ~ b {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
t(
'a + b{width: calc(100% + 45px);}',
# -- output --
'a + b {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
t(
'a + b > c{width: calc(100% + 45px);}',
# -- output --
'a + b > c {\n' +
'\twidth: calc(100% + 45px);\n' +
'}')
#============================================================
# Selector Separator - (separator = " ", separator1 = " ")
self.reset_options();
self.options.selector_separator_newline = false
self.options.selector_separator = " "
t(
'#bla, #foo{color:green}',
# -- output --
'#bla, #foo {\n' +
'\tcolor: green\n' +
'}')
t(
'@media print {.tab{}}',
# -- output --
'@media print {\n' +
'\t.tab {}\n' +
'}')
t(
'@media print {.tab,.bat{}}',
# -- output --
'@media print {\n' +
'\t.tab, .bat {}\n' +
'}')
t(
'#bla, #foo{color:black}',
# -- output --
'#bla, #foo {\n' +
'\tcolor: black\n' +
'}')
t(
'a:first-child,a:first-child{color:red;div:first-child,div:hover{color:black;}}',
# -- output --
'a:first-child, a:first-child {\n' +
'\tcolor: red;\n' +
'\tdiv:first-child, div:hover {\n' +
'\t\tcolor: black;\n' +
'\t}\n' +
'}')
# Selector Separator - (separator = " ", separator1 = " ")
self.reset_options();
self.options.selector_separator_newline = false
self.options.selector_separator = " "
t(
'#bla, #foo{color:green}',
# -- output --
'#bla, #foo {\n' +
'\tcolor: green\n' +
'}')
t(
'@media print {.tab{}}',
# -- output --
'@media print {\n' +
'\t.tab {}\n' +
'}')
t(
'@media print {.tab,.bat{}}',
# -- output --
'@media print {\n' +
'\t.tab, .bat {}\n' +
'}')
t(
'#bla, #foo{color:black}',
# -- output --
'#bla, #foo {\n' +
'\tcolor: black\n' +
'}')
t(
'a:first-child,a:first-child{color:red;div:first-child,div:hover{color:black;}}',
# -- output --
'a:first-child, a:first-child {\n' +
'\tcolor: red;\n' +
'\tdiv:first-child, div:hover {\n' +
'\t\tcolor: black;\n' +
'\t}\n' +
'}')
# Selector Separator - (separator = "\n", separator1 = "\n\t")
self.reset_options();
self.options.selector_separator_newline = true
self.options.selector_separator = " "
t(
'#bla, #foo{color:green}',
# -- output --
'#bla,\n#foo {\n' +
'\tcolor: green\n' +
'}')
t(
'@media print {.tab{}}',
# -- output --
'@media print {\n' +
'\t.tab {}\n' +
'}')
t(
'@media print {.tab,.bat{}}',
# -- output --
'@media print {\n' +
'\t.tab,\n\t.bat {}\n' +
'}')
t(
'#bla, #foo{color:black}',
# -- output --
'#bla,\n#foo {\n' +
'\tcolor: black\n' +
'}')
t(
'a:first-child,a:first-child{color:red;div:first-child,div:hover{color:black;}}',
# -- output --
'a:first-child,\na:first-child {\n' +
'\tcolor: red;\n' +
'\tdiv:first-child,\n\tdiv:hover {\n' +
'\t\tcolor: black;\n' +
'\t}\n' +
'}')
# Selector Separator - (separator = "\n", separator1 = "\n\t")
self.reset_options();
self.options.selector_separator_newline = true
self.options.selector_separator = " "
t(
'#bla, #foo{color:green}',
# -- output --
'#bla,\n#foo {\n' +
'\tcolor: green\n' +
'}')
t(
'@media print {.tab{}}',
# -- output --
'@media print {\n' +
'\t.tab {}\n' +
'}')
t(
'@media print {.tab,.bat{}}',
# -- output --
'@media print {\n' +
'\t.tab,\n\t.bat {}\n' +
'}')
t(
'#bla, #foo{color:black}',
# -- output --
'#bla,\n#foo {\n' +
'\tcolor: black\n' +
'}')
t(
'a:first-child,a:first-child{color:red;div:first-child,div:hover{color:black;}}',
# -- output --
'a:first-child,\na:first-child {\n' +
'\tcolor: red;\n' +
'\tdiv:first-child,\n\tdiv:hover {\n' +
'\t\tcolor: black;\n' +
'\t}\n' +
'}')
#============================================================
# Preserve Newlines - (separator_input = "\n\n", separator_output = "\n\n")
self.reset_options();
self.options.preserve_newlines = true
t('.div {}\n\n.span {}')
t(
'#bla, #foo{\n' +
'\tcolor:black;\n\n\tfont-size: 12px;\n' +
'}',
# -- output --
'#bla,\n' +
'#foo {\n' +
'\tcolor: black;\n\n\tfont-size: 12px;\n' +
'}')
# Preserve Newlines - (separator_input = "\n\n", separator_output = "\n")
self.reset_options();
self.options.preserve_newlines = false
t('.div {}\n\n.span {}', '.div {}\n.span {}')
t(
'#bla, #foo{\n' +
'\tcolor:black;\n\n\tfont-size: 12px;\n' +
'}',
# -- output --
'#bla,\n' +
'#foo {\n' +
'\tcolor: black;\n\tfont-size: 12px;\n' +
'}')
#============================================================
# Preserve Newlines and newline_between_rules
self.reset_options();
self.options.preserve_newlines = true
self.options.newline_between_rules = true
t(
'.div {}.span {}',
# -- output --
'.div {}\n' +
'\n' +
'.span {}')
t(
'#bla, #foo{\n' +
'\tcolor:black;\n' +
'\tfont-size: 12px;\n' +
'}',
# -- output --
'#bla,\n' +
'#foo {\n' +
'\tcolor: black;\n' +
'\tfont-size: 12px;\n' +
'}')
t(
'#bla, #foo{\n' +
'\tcolor:black;\n' +
'\n' +
'\n' +
'\tfont-size: 12px;\n' +
'}',
# -- output --
'#bla,\n' +
'#foo {\n' +
'\tcolor: black;\n' +
'\n' +
'\n' +
'\tfont-size: 12px;\n' +
'}')
t(
'#bla,\n' +
'\n' +
'#foo {\n' +
'\tcolor: black;\n' +
'\tfont-size: 12px;\n' +
'}')
t(
'a {\n' +
'\tb: c;\n' +
'\n' +
'\n' +
'\td: {\n' +
'\t\te: f;\n' +
'\t}\n' +
'}')
t(
'.div {}\n' +
'\n' +
'.span {}')
t(
'.div {\n' +
'\ta: 1;\n' +
'\n' +
'\n' +
'\tb: 2;\n' +
'}\n' +
'\n' +
'\n' +
'\n' +
'.span {\n' +
'\ta: 1;\n' +
'}')
t(
'.div {\n' +
'\n' +
'\n' +
'\ta: 1;\n' +
'\n' +
'\n' +
'\tb: 2;\n' +
'}\n' +
'\n' +
'\n' +
'\n' +
'.span {\n' +
'\ta: 1;\n' +
'}')
t(
'@media screen {\n' +
'\t.div {\n' +
'\t\ta: 1;\n' +
'\n' +
'\n' +
'\t\tb: 2;\n' +
'\t}\n' +
'\n' +
'\n' +
'\n' +
'\t.span {\n' +
'\t\ta: 1;\n' +
'\t}\n' +
'}\n' +
'\n' +
'.div {}\n' +
'\n' +
'.span {}')
#============================================================
# Preserve Newlines and add tabs
self.reset_options();
self.options.preserve_newlines = true
t(
'.tool-tip {\n' +
'\tposition: relative;\n' +
'\n' +
'\t\t\n' +
'\t.tool-tip-content {\n' +
'\t\t&>* {\n' +
'\t\t\tmargin-top: 0;\n' +
'\t\t}\n' +
'\t\t\n' +
'\n' +
'\t\t.mixin-box-shadow(.2rem .2rem .5rem rgba(0, 0, 0, .15));\n' +
'\t\tpadding: 1rem;\n' +
'\t\tposition: absolute;\n' +
'\t\tz-index: 10;\n' +
'\t}\n' +
'}',
# -- output --
'.tool-tip {\n' +
'\tposition: relative;\n' +
'\n' +
'\n' +
'\t.tool-tip-content {\n' +
'\t\t&>* {\n' +
'\t\t\tmargin-top: 0;\n' +
'\t\t}\n' +
'\n\n\t\t.mixin-box-shadow(.2rem .2rem .5rem rgba(0, 0, 0, .15));\n' +
'\t\tpadding: 1rem;\n' +
'\t\tposition: absolute;\n' +
'\t\tz-index: 10;\n' +
'\t}\n' +
'}')
#============================================================
# Newline Between Rules - (separator = "\n")
self.reset_options();
self.options.newline_between_rules = true
t(
'.div {}\n' +
'.span {}',
# -- output --
'.div {}\n' +
'\n.span {}')
t(
'.div{}\n' +
' \n' +
'.span{}',
# -- output --
'.div {}\n' +
'\n.span {}')
t(
'.div {} \n' +
' \n' +
'.span { } \n',
# -- output --
'.div {}\n' +
'\n.span {}')
t(
'.div {\n' +
' \n' +
'} \n' +
' .span {\n' +
' } ',
# -- output --
'.div {}\n' +
'\n.span {}')
t(
'.selector1 {\n' +
'\tmargin: 0; /* This is a comment including an url http://domain.com/path/to/file.ext */\n' +
'}\n' +
'.div{height:15px;}',
# -- output --
'.selector1 {\n' +
'\tmargin: 0;\n' +
'\t/* This is a comment including an url http://domain.com/path/to/file.ext */\n' +
'}\n' +
'\n.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'.tabs{width:10px;//end of line comment\n' +
'height:10px;//another\n' +
'}\n' +
'.div{height:15px;}',
# -- output --
'.tabs {\n' +
'\twidth: 10px; //end of line comment\n' +
'\theight: 10px; //another\n' +
'}\n' +
'\n.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'#foo {\n' +
'\tbackground-image: url(foo@2x.png);\n' +
'\t@font-face {\n' +
'\t\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\t\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'\t}\n' +
'}\n' +
'.div{height:15px;}',
# -- output --
'#foo {\n' +
'\tbackground-image: url(foo@2x.png);\n' +
'\t@font-face {\n' +
'\t\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\t\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'\t}\n' +
'}\n' +
'\n.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'@media screen {\n' +
'\t#foo:hover {\n' +
'\t\tbackground-image: url(foo@2x.png);\n' +
'\t}\n' +
'\t@font-face {\n' +
'\t\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\t\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'\t}\n' +
'}\n' +
'.div{height:15px;}',
# -- output --
'@media screen {\n' +
'\t#foo:hover {\n' +
'\t\tbackground-image: url(foo@2x.png);\n' +
'\t}\n' +
'\t@font-face {\n' +
'\t\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\t\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'\t}\n' +
'}\n' +
'\n.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'@font-face {\n' +
'\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'}\n' +
'@media screen {\n' +
'\t#foo:hover {\n' +
'\t\tbackground-image: url(foo.png);\n' +
'\t}\n' +
'\t@media screen and (min-device-pixel-ratio: 2) {\n' +
'\t\t@font-face {\n' +
'\t\t\tfont-family: "Helvetica Neue"\n' +
'\t\t}\n' +
'\t\t#foo:hover {\n' +
'\t\t\tbackground-image: url(foo@2x.png);\n' +
'\t\t}\n' +
'\t}\n' +
'}',
# -- output --
'@font-face {\n' +
'\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'}\n' +
'\n@media screen {\n' +
'\t#foo:hover {\n' +
'\t\tbackground-image: url(foo.png);\n' +
'\t}\n' +
'\t@media screen and (min-device-pixel-ratio: 2) {\n' +
'\t\t@font-face {\n' +
'\t\t\tfont-family: "Helvetica Neue"\n' +
'\t\t}\n' +
'\t\t#foo:hover {\n' +
'\t\t\tbackground-image: url(foo@2x.png);\n' +
'\t\t}\n' +
'\t}\n' +
'}')
t(
'a:first-child{color:red;div:first-child{color:black;}}\n' +
'.div{height:15px;}',
# -- output --
'a:first-child {\n' +
'\tcolor: red;\n' +
'\tdiv:first-child {\n' +
'\t\tcolor: black;\n' +
'\t}\n' +
'}\n' +
'\n.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'a:first-child{color:red;div:not(.peq){color:black;}}\n' +
'.div{height:15px;}',
# -- output --
'a:first-child {\n' +
'\tcolor: red;\n' +
'\tdiv:not(.peq) {\n' +
'\t\tcolor: black;\n' +
'\t}\n' +
'}\n' +
'\n.div {\n' +
'\theight: 15px;\n' +
'}')
# Newline Between Rules - (separator = "")
self.reset_options();
self.options.newline_between_rules = false
t(
'.div {}\n' +
'.span {}')
t(
'.div{}\n' +
' \n' +
'.span{}',
# -- output --
'.div {}\n' +
'.span {}')
t(
'.div {} \n' +
' \n' +
'.span { } \n',
# -- output --
'.div {}\n' +
'.span {}')
t(
'.div {\n' +
' \n' +
'} \n' +
' .span {\n' +
' } ',
# -- output --
'.div {}\n' +
'.span {}')
t(
'.selector1 {\n' +
'\tmargin: 0; /* This is a comment including an url http://domain.com/path/to/file.ext */\n' +
'}\n' +
'.div{height:15px;}',
# -- output --
'.selector1 {\n' +
'\tmargin: 0;\n' +
'\t/* This is a comment including an url http://domain.com/path/to/file.ext */\n' +
'}\n' +
'.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'.tabs{width:10px;//end of line comment\n' +
'height:10px;//another\n' +
'}\n' +
'.div{height:15px;}',
# -- output --
'.tabs {\n' +
'\twidth: 10px; //end of line comment\n' +
'\theight: 10px; //another\n' +
'}\n' +
'.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'#foo {\n' +
'\tbackground-image: url(foo@2x.png);\n' +
'\t@font-face {\n' +
'\t\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\t\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'\t}\n' +
'}\n' +
'.div{height:15px;}',
# -- output --
'#foo {\n' +
'\tbackground-image: url(foo@2x.png);\n' +
'\t@font-face {\n' +
'\t\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\t\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'\t}\n' +
'}\n' +
'.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'@media screen {\n' +
'\t#foo:hover {\n' +
'\t\tbackground-image: url(foo@2x.png);\n' +
'\t}\n' +
'\t@font-face {\n' +
'\t\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\t\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'\t}\n' +
'}\n' +
'.div{height:15px;}',
# -- output --
'@media screen {\n' +
'\t#foo:hover {\n' +
'\t\tbackground-image: url(foo@2x.png);\n' +
'\t}\n' +
'\t@font-face {\n' +
'\t\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\t\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'\t}\n' +
'}\n' +
'.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'@font-face {\n' +
'\tfont-family: "Bitstream Vera Serif Bold";\n' +
'\tsrc: url("http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf");\n' +
'}\n' +
'@media screen {\n' +
'\t#foo:hover {\n' +
'\t\tbackground-image: url(foo.png);\n' +
'\t}\n' +
'\t@media screen and (min-device-pixel-ratio: 2) {\n' +
'\t\t@font-face {\n' +
'\t\t\tfont-family: "Helvetica Neue"\n' +
'\t\t}\n' +
'\t\t#foo:hover {\n' +
'\t\t\tbackground-image: url(foo@2x.png);\n' +
'\t\t}\n' +
'\t}\n' +
'}')
t(
'a:first-child{color:red;div:first-child{color:black;}}\n' +
'.div{height:15px;}',
# -- output --
'a:first-child {\n' +
'\tcolor: red;\n' +
'\tdiv:first-child {\n' +
'\t\tcolor: black;\n' +
'\t}\n' +
'}\n' +
'.div {\n' +
'\theight: 15px;\n' +
'}')
t(
'a:first-child{color:red;div:not(.peq){color:black;}}\n' +
'.div{height:15px;}',
# -- output --
'a:first-child {\n' +
'\tcolor: red;\n' +
'\tdiv:not(.peq) {\n' +
'\t\tcolor: black;\n' +
'\t}\n' +
'}\n' +
'.div {\n' +
'\theight: 15px;\n' +
'}')
#============================================================
# Functions braces
self.reset_options();
t('.tabs(){}', '.tabs() {}')
t('.tabs (){}', '.tabs () {}')
t(
'.tabs (pa, pa(1,2)), .cols { }',
# -- output --
'.tabs (pa, pa(1, 2)),\n' +
'.cols {}')
t(
'.tabs(pa, pa(1,2)), .cols { }',
# -- output --
'.tabs(pa, pa(1, 2)),\n' +
'.cols {}')
t('.tabs ( ) { }', '.tabs () {}')
t('.tabs( ) { }', '.tabs() {}')
t(
'.tabs (t, t2) \n' +
'{\n' +
' key: val(p1 ,p2); \n' +
' }',
# -- output --
'.tabs (t, t2) {\n' +
'\tkey: val(p1, p2);\n' +
'}')
t(
'.box-shadow(@shadow: 0 1px 3px rgba(0, 0, 0, .25)) {\n' +
'\t-webkit-box-shadow: @shadow;\n' +
'\t-moz-box-shadow: @shadow;\n' +
'\tbox-shadow: @shadow;\n' +
'}')
#============================================================
# Comments
self.reset_options();
t('/* test */')
t(
'.tabs{/* test */}',
# -- output --
'.tabs {\n' +
'\t/* test */\n' +
'}')
t(
'.tabs{/* test */}',
# -- output --
'.tabs {\n' +
'\t/* test */\n' +
'}')
t(
'/* header */.tabs {}',
# -- output --
'/* header */\n' +
'\n' +
'.tabs {}')
t(
'.tabs {\n' +
'/* non-header */\n' +
'width:10px;}',
# -- output --
'.tabs {\n' +
'\t/* non-header */\n' +
'\twidth: 10px;\n' +
'}')
t('/* header')
t('// comment')
t(
'.selector1 {\n' +
'\tmargin: 0; /* This is a comment including an url http://domain.com/path/to/file.ext */\n' +
'}',
# -- output --
'.selector1 {\n' +
'\tmargin: 0;\n' +
'\t/* This is a comment including an url http://domain.com/path/to/file.ext */\n' +
'}')
# single line comment support (less/sass)
t(
'.tabs{\n' +
'// comment\n' +
'width:10px;\n' +
'}',
# -- output --
'.tabs {\n' +
'\t// comment\n' +
'\twidth: 10px;\n' +
'}')
t(
'.tabs{// comment\n' +
'width:10px;\n' +
'}',
# -- output --
'.tabs {\n' +
'\t// comment\n' +
'\twidth: 10px;\n' +
'}')
t(
'//comment\n' +
'.tabs{width:10px;}',
# -- output --
'//comment\n' +
'.tabs {\n' +
'\twidth: 10px;\n' +
'}')
t(
'.tabs{//comment\n' +
'//2nd single line comment\n' +
'width:10px;}',
# -- output --
'.tabs {\n' +
'\t//comment\n' +
'\t//2nd single line comment\n' +
'\twidth: 10px;\n' +
'}')
t(
'.tabs{width:10px;//end of line comment\n' +
'}',
# -- output --
'.tabs {\n' +
'\twidth: 10px; //end of line comment\n' +
'}')
t(
'.tabs{width:10px;//end of line comment\n' +
'height:10px;}',
# -- output --
'.tabs {\n' +
'\twidth: 10px; //end of line comment\n' +
'\theight: 10px;\n' +
'}')
t(
'.tabs{width:10px;//end of line comment\n' +
'height:10px;//another\n' +
'}',
# -- output --
'.tabs {\n' +
'\twidth: 10px; //end of line comment\n' +
'\theight: 10px; //another\n' +
'}')
#============================================================
# Handle LESS property name interpolation
self.reset_options();
t(
'tag {\n' +
'\t@{prop}: none;\n' +
'}')
t(
'tag{@{prop}:none;}',
# -- output --
'tag {\n' +
'\t@{prop}: none;\n' +
'}')
t(
'tag{ @{prop}: none;}',
# -- output --
'tag {\n' +
'\t@{prop}: none;\n' +
'}')
# can also be part of property name
t(
'tag {\n' +
'\tdynamic-@{prop}: none;\n' +
'}')
t(
'tag{dynamic-@{prop}:none;}',
# -- output --
'tag {\n' +
'\tdynamic-@{prop}: none;\n' +
'}')
t(
'tag{ dynamic-@{prop}: none;}',
# -- output --
'tag {\n' +
'\tdynamic-@{prop}: none;\n' +
'}')
#============================================================
# Handle LESS property name interpolation, test #631
self.reset_options();
t(
'.generate-columns(@n, @i: 1) when (@i =< @n) {\n' +
'\t.column-@{i} {\n' +
'\t\twidth: (@i * 100% / @n);\n' +
'\t}\n' +
'\t.generate-columns(@n, (@i + 1));\n' +
'}')
t(
'.generate-columns(@n,@i:1) when (@i =< @n){.column-@{i}{width:(@i * 100% / @n);}.generate-columns(@n,(@i + 1));}',
# -- output --
'.generate-columns(@n, @i: 1) when (@i =< @n) {\n' +
'\t.column-@{i} {\n' +
'\t\twidth: (@i * 100% / @n);\n' +
'\t}\n' +
'\t.generate-columns(@n, (@i + 1));\n' +
'}')
#============================================================
# Psuedo-classes vs Variables
self.reset_options();
t('@page :first {}')
# Assume the colon goes with the @name. If we're in LESS, this is required regardless of the at-string.
t('@page:first {}', '@page: first {}')
t('@page: first {}')
#============================================================
# SASS/SCSS
self.reset_options();
# Basic Interpolation
t(
'p {\n' +
'\t$font-size: 12px;\n' +
'\t$line-height: 30px;\n' +
'\tfont: #{$font-size}/#{$line-height};\n' +
'}')
t('p.#{$name} {}')
t(
'@mixin itemPropertiesCoverItem($items, $margin) {\n' +
'\twidth: calc((100% - ((#{$items} - 1) * #{$margin}rem)) / #{$items});\n' +
'\tmargin: 1.6rem #{$margin}rem 1.6rem 0;\n' +
'}')
# Multiple filed issues in LESS due to not(:blah)
t('&:first-of-type:not(:last-child) {}')
t(
'div {\n' +
'\t&:not(:first-of-type) {\n' +
'\t\tbackground: red;\n' +
'\t}\n' +
'}')
#============================================================
# Proper handling of colon in selectors
self.reset_options();
self.options.selector_separator_newline = false
t('a :b {}')
t('a ::b {}')
t('a:b {}')
t('a::b {}')
t(
'a {}, a::b {}, a ::b {}, a:b {}, a :b {}',
# -- output --
'a {}\n' +
', a::b {}\n' +
', a ::b {}\n' +
', a:b {}\n' +
', a :b {}')
t(
'.card-blue ::-webkit-input-placeholder {\n' +
'\tcolor: #87D1FF;\n' +
'}')
t(
'div [attr] :not(.class) {\n' +
'\tcolor: red;\n' +
'}')
#============================================================
# Regresssion Tests
self.reset_options();
self.options.selector_separator_newline = false
t(
'@media(min-width:768px) {\n' +
'\t.selector::after {\n' +
'\t\t/* property: value */\n' +
'\t}\n' +
'\t.other-selector {\n' +
'\t\t/* property: value */\n' +
'\t}\n' +
'}')
t(
'.fa-rotate-270 {\n' +
'\tfilter: progid:DXImageTransform.Microsoft.BasicImage(rotation=3);\n' +
'}')
#============================================================
#
self.reset_options();
def testNewline(self):
self.reset_options()
t = self.decodesto
self.options.end_with_newline = True
t("", "\n")
t("\n", "\n")
t(".tabs{}\n", ".tabs {}\n")
t(".tabs{}", ".tabs {}\n")
def testBasics(self):
self.reset_options()
t = self.decodesto
t("", "")
t("\n", "")
t(".tabs{}\n", ".tabs {}")
t(".tabs{}", ".tabs {}")
t(".tabs{color:red}", ".tabs {\n\tcolor: red\n}")
t(".tabs{color:rgb(255, 255, 0)}", ".tabs {\n\tcolor: rgb(255, 255, 0)\n}")
t(".tabs{background:url('back.jpg')}", ".tabs {\n\tbackground: url('back.jpg')\n}")
t("#bla, #foo{color:red}", "#bla,\n#foo {\n\tcolor: red\n}")
t("@media print {.tab{}}", "@media print {\n\t.tab {}\n}")
t("@media print {.tab{background-image:url(foo@2x.png)}}", "@media print {\n\t.tab {\n\t\tbackground-image: url(foo@2x.png)\n\t}\n}")
t("a:before {\n" +
"\tcontent: 'a{color:black;}\"\"\\'\\'\"\\n\\n\\na{color:black}\';\n" +
"}");
# may not eat the space before "["
t('html.js [data-custom="123"] {\n\topacity: 1.00;\n}')
t('html.js *[data-custom="123"] {\n\topacity: 1.00;\n}')
# lead-in whitespace determines base-indent.
# lead-in newlines are stripped.
t("\n\na, img {padding: 0.2px}", "a,\nimg {\n\tpadding: 0.2px\n}")
t(" a, img {padding: 0.2px}", " a,\n img {\n \tpadding: 0.2px\n }")
t(" \t \na, img {padding: 0.2px}", " \t a,\n \t img {\n \t \tpadding: 0.2px\n \t }")
t("\n\n a, img {padding: 0.2px}", "a,\nimg {\n\tpadding: 0.2px\n}")
def testSeperateSelectors(self):
self.reset_options()
t = self.decodesto
t("#bla, #foo{color:red}", "#bla,\n#foo {\n\tcolor: red\n}")
t("a, img {padding: 0.2px}", "a,\nimg {\n\tpadding: 0.2px\n}")
def testBlockNesting(self):
self.reset_options()
t = self.decodesto
t("#foo {\n\tbackground-image: url(foo@2x.png);\n\t@font-face {\n\t\tfont-family: 'Bitstream Vera Serif Bold';\n\t\tsrc: url('http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf');\n\t}\n}")
t("@media screen {\n\t#foo:hover {\n\t\tbackground-image: url(foo@2x.png);\n\t}\n\t@font-face {\n\t\tfont-family: 'Bitstream Vera Serif Bold';\n\t\tsrc: url('http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf');\n\t}\n}")
# @font-face {
# font-family: 'Bitstream Vera Serif Bold';
# src: url('http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf');
# }
# @media screen {
# #foo:hover {
# background-image: url(foo.png);
# }
# @media screen and (min-device-pixel-ratio: 2) {
# @font-face {
# font-family: 'Helvetica Neue'
# }
# #foo:hover {
# background-image: url(foo@2x.png);
# }
# }
# }
t("@font-face {\n\tfont-family: 'Bitstream Vera Serif Bold';\n\tsrc: url('http://developer.mozilla.org/@api/deki/files/2934/=VeraSeBd.ttf');\n}\n@media screen {\n\t#foo:hover {\n\t\tbackground-image: url(foo.png);\n\t}\n\t@media screen and (min-device-pixel-ratio: 2) {\n\t\t@font-face {\n\t\t\tfont-family: 'Helvetica Neue'\n\t\t}\n\t\t#foo:hover {\n\t\t\tbackground-image: url(foo@2x.png);\n\t\t}\n\t}\n}")
def testOptions(self):
self.reset_options()
self.options.indent_size = 2
self.options.indent_char = ' '
self.options.selector_separator_newline = False
t = self.decodesto
# pseudo-classes and pseudo-elements
t("#foo:hover {\n background-image: url(foo@2x.png)\n}")
t("#foo *:hover {\n color: purple\n}")
t("::selection {\n color: #ff0000;\n}")
# TODO: don't break nested pseduo-classes
t("@media screen {.tab,.bat:hover {color:red}}", "@media screen {\n .tab, .bat:hover {\n color: red\n }\n}")
# particular edge case with braces and semicolons inside tags that allows custom text
t( "a:not(\"foobar\\\";{}omg\"){\ncontent: 'example\\';{} text';\ncontent: \"example\\\";{} text\";}",
"a:not(\"foobar\\\";{}omg\") {\n content: 'example\\';{} text';\n content: \"example\\\";{} text\";\n}")
def testLessCss(self):
self.reset_options()
t = self.decodesto
t('.well{ \n @well-bg:@bg-color;@well-fg:@fg-color;}','.well {\n\t@well-bg: @bg-color;\n\t@well-fg: @fg-color;\n}')
t('.well {&.active {\nbox-shadow: 0 1px 1px @border-color, 1px 0 1px @border-color;}}',
'.well {\n' +
'\t&.active {\n' +
'\t\tbox-shadow: 0 1px 1px @border-color, 1px 0 1px @border-color;\n' +
'\t}\n' +
'}')
t('a {\n' +
'\tcolor: blue;\n' +
'\t&:hover {\n' +
'\t\tcolor: green;\n' +
'\t}\n' +
'\t& & &&&.active {\n' +
'\t\tcolor: green;\n' +
'\t}\n' +
'}')
# Not sure if this is sensible
# but I believe it is correct to not remove the space in "&: hover".
t('a {\n' +
'\t&: hover {\n' +
'\t\tcolor: green;\n' +
'\t}\n' +
'}');
# import
t('@import "test";');
# don't break nested pseudo-classes
t("a:first-child{color:red;div:first-child{color:black;}}",
"a:first-child {\n\tcolor: red;\n\tdiv:first-child {\n\t\tcolor: black;\n\t}\n}");
# handle SASS/LESS parent reference
t("div{&:first-letter {text-transform: uppercase;}}",
"div {\n\t&:first-letter {\n\t\ttext-transform: uppercase;\n\t}\n}");
# nested modifiers (&:hover etc)
t(".tabs{&:hover{width:10px;}}", ".tabs {\n\t&:hover {\n\t\twidth: 10px;\n\t}\n}")
t(".tabs{&.big{width:10px;}}", ".tabs {\n\t&.big {\n\t\twidth: 10px;\n\t}\n}")
t(".tabs{&>big{width:10px;}}", ".tabs {\n\t&>big {\n\t\twidth: 10px;\n\t}\n}")
t(".tabs{&+.big{width:10px;}}", ".tabs {\n\t&+.big {\n\t\twidth: 10px;\n\t}\n}")
# nested rules
t(".tabs{.child{width:10px;}}", ".tabs {\n\t.child {\n\t\twidth: 10px;\n\t}\n}")
# variables
t("@myvar:10px;.tabs{width:10px;}", "@myvar: 10px;\n.tabs {\n\twidth: 10px;\n}")
t("@myvar:10px; .tabs{width:10px;}", "@myvar: 10px;\n.tabs {\n\twidth: 10px;\n}")
def decodesto(self, input, expectation=None):
if expectation == None:
expectation = input
self.assertMultiLineEqual(
cssbeautifier.beautify(input, self.options), expectation)
# if the expected is different from input, run it again
# expected output should be unchanged when run twice.
if not expectation != input:
self.assertMultiLineEqual(
cssbeautifier.beautify(expectation, self.options), expectation)
# Everywhere we do newlines, they should be replaced with opts.eol
self.options.eol = '\r\\n';
expectation = expectation.replace('\n', '\r\n')
self.assertMultiLineEqual(
cssbeautifier.beautify(input, self.options), expectation)
if input.find('\n') != -1:
input = input.replace('\n', '\r\n')
self.assertMultiLineEqual(
cssbeautifier.beautify(input, self.options), expectation)
# Ensure support for auto eol detection
self.options.eol = 'auto'
self.assertMultiLineEqual(
cssbeautifier.beautify(input, self.options), expectation)
self.options.eol = '\n'
if __name__ == '__main__':
unittest.main()
| 32.689606 | 416 | 0.380839 |
acf110204cf547867a354ad4e6213032dfb22fe8 | 25,721 | py | Python | .pc/hg-updates.diff/Lib/test/test_posixpath.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 3 | 2016-12-26T18:35:07.000Z | 2021-08-24T22:49:40.000Z | .pc/hg-updates.diff/Lib/test/test_posixpath.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | null | null | null | .pc/hg-updates.diff/Lib/test/test_posixpath.py | Hadron/python | 73137f499ed658169f49273eee46845e3b53e800 | [
"PSF-2.0"
] | 1 | 2016-11-05T05:26:18.000Z | 2016-11-05T05:26:18.000Z | import itertools
import os
import posixpath
import sys
import unittest
import warnings
from posixpath import realpath, abspath, dirname, basename
from test import support, test_genericpath
try:
import posix
except ImportError:
posix = None
# An absolute path to a temporary filename for testing. We can't rely on TESTFN
# being an absolute path, so we need this.
ABSTFN = abspath(support.TESTFN)
def skip_if_ABSTFN_contains_backslash(test):
"""
On Windows, posixpath.abspath still returns paths with backslashes
instead of posix forward slashes. If this is the case, several tests
fail, so skip them.
"""
found_backslash = '\\' in ABSTFN
msg = "ABSTFN is not a posix path - tests fail"
return [test, unittest.skip(msg)(test)][found_backslash]
def safe_rmdir(dirname):
try:
os.rmdir(dirname)
except OSError:
pass
class PosixPathTest(unittest.TestCase):
def setUp(self):
self.tearDown()
def tearDown(self):
for suffix in ["", "1", "2"]:
support.unlink(support.TESTFN + suffix)
safe_rmdir(support.TESTFN + suffix)
def test_join(self):
self.assertEqual(posixpath.join("/foo", "bar", "/bar", "baz"),
"/bar/baz")
self.assertEqual(posixpath.join("/foo", "bar", "baz"), "/foo/bar/baz")
self.assertEqual(posixpath.join("/foo/", "bar/", "baz/"),
"/foo/bar/baz/")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"/bar", b"baz"),
b"/bar/baz")
self.assertEqual(posixpath.join(b"/foo", b"bar", b"baz"),
b"/foo/bar/baz")
self.assertEqual(posixpath.join(b"/foo/", b"bar/", b"baz/"),
b"/foo/bar/baz/")
def test_split(self):
self.assertEqual(posixpath.split("/foo/bar"), ("/foo", "bar"))
self.assertEqual(posixpath.split("/"), ("/", ""))
self.assertEqual(posixpath.split("foo"), ("", "foo"))
self.assertEqual(posixpath.split("////foo"), ("////", "foo"))
self.assertEqual(posixpath.split("//foo//bar"), ("//foo", "bar"))
self.assertEqual(posixpath.split(b"/foo/bar"), (b"/foo", b"bar"))
self.assertEqual(posixpath.split(b"/"), (b"/", b""))
self.assertEqual(posixpath.split(b"foo"), (b"", b"foo"))
self.assertEqual(posixpath.split(b"////foo"), (b"////", b"foo"))
self.assertEqual(posixpath.split(b"//foo//bar"), (b"//foo", b"bar"))
def splitextTest(self, path, filename, ext):
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext("/" + path), ("/" + filename, ext))
self.assertEqual(posixpath.splitext("abc/" + path),
("abc/" + filename, ext))
self.assertEqual(posixpath.splitext("abc.def/" + path),
("abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext("/abc.def/" + path),
("/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + "/"),
(filename + ext + "/", ""))
path = bytes(path, "ASCII")
filename = bytes(filename, "ASCII")
ext = bytes(ext, "ASCII")
self.assertEqual(posixpath.splitext(path), (filename, ext))
self.assertEqual(posixpath.splitext(b"/" + path),
(b"/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc/" + path),
(b"abc/" + filename, ext))
self.assertEqual(posixpath.splitext(b"abc.def/" + path),
(b"abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(b"/abc.def/" + path),
(b"/abc.def/" + filename, ext))
self.assertEqual(posixpath.splitext(path + b"/"),
(filename + ext + b"/", b""))
def test_splitext(self):
self.splitextTest("foo.bar", "foo", ".bar")
self.splitextTest("foo.boo.bar", "foo.boo", ".bar")
self.splitextTest("foo.boo.biff.bar", "foo.boo.biff", ".bar")
self.splitextTest(".csh.rc", ".csh", ".rc")
self.splitextTest("nodots", "nodots", "")
self.splitextTest(".cshrc", ".cshrc", "")
self.splitextTest("...manydots", "...manydots", "")
self.splitextTest("...manydots.ext", "...manydots", ".ext")
self.splitextTest(".", ".", "")
self.splitextTest("..", "..", "")
self.splitextTest("........", "........", "")
self.splitextTest("", "", "")
def test_isabs(self):
self.assertIs(posixpath.isabs(""), False)
self.assertIs(posixpath.isabs("/"), True)
self.assertIs(posixpath.isabs("/foo"), True)
self.assertIs(posixpath.isabs("/foo/bar"), True)
self.assertIs(posixpath.isabs("foo/bar"), False)
self.assertIs(posixpath.isabs(b""), False)
self.assertIs(posixpath.isabs(b"/"), True)
self.assertIs(posixpath.isabs(b"/foo"), True)
self.assertIs(posixpath.isabs(b"/foo/bar"), True)
self.assertIs(posixpath.isabs(b"foo/bar"), False)
def test_basename(self):
self.assertEqual(posixpath.basename("/foo/bar"), "bar")
self.assertEqual(posixpath.basename("/"), "")
self.assertEqual(posixpath.basename("foo"), "foo")
self.assertEqual(posixpath.basename("////foo"), "foo")
self.assertEqual(posixpath.basename("//foo//bar"), "bar")
self.assertEqual(posixpath.basename(b"/foo/bar"), b"bar")
self.assertEqual(posixpath.basename(b"/"), b"")
self.assertEqual(posixpath.basename(b"foo"), b"foo")
self.assertEqual(posixpath.basename(b"////foo"), b"foo")
self.assertEqual(posixpath.basename(b"//foo//bar"), b"bar")
def test_dirname(self):
self.assertEqual(posixpath.dirname("/foo/bar"), "/foo")
self.assertEqual(posixpath.dirname("/"), "/")
self.assertEqual(posixpath.dirname("foo"), "")
self.assertEqual(posixpath.dirname("////foo"), "////")
self.assertEqual(posixpath.dirname("//foo//bar"), "//foo")
self.assertEqual(posixpath.dirname(b"/foo/bar"), b"/foo")
self.assertEqual(posixpath.dirname(b"/"), b"/")
self.assertEqual(posixpath.dirname(b"foo"), b"")
self.assertEqual(posixpath.dirname(b"////foo"), b"////")
self.assertEqual(posixpath.dirname(b"//foo//bar"), b"//foo")
def test_islink(self):
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), False)
f = open(support.TESTFN + "1", "wb")
try:
f.write(b"foo")
f.close()
self.assertIs(posixpath.islink(support.TESTFN + "1"), False)
if support.can_symlink():
os.symlink(support.TESTFN + "1", support.TESTFN + "2")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
os.remove(support.TESTFN + "1")
self.assertIs(posixpath.islink(support.TESTFN + "2"), True)
self.assertIs(posixpath.exists(support.TESTFN + "2"), False)
self.assertIs(posixpath.lexists(support.TESTFN + "2"), True)
finally:
if not f.close():
f.close()
def test_ismount(self):
self.assertIs(posixpath.ismount("/"), True)
with warnings.catch_warnings():
warnings.simplefilter("ignore", DeprecationWarning)
self.assertIs(posixpath.ismount(b"/"), True)
def test_ismount_non_existent(self):
# Non-existent mountpoint.
self.assertIs(posixpath.ismount(ABSTFN), False)
try:
os.mkdir(ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
safe_rmdir(ABSTFN)
@unittest.skipUnless(support.can_symlink(),
"Test requires symlink support")
def test_ismount_symlinks(self):
# Symlinks are never mountpoints.
try:
os.symlink("/", ABSTFN)
self.assertIs(posixpath.ismount(ABSTFN), False)
finally:
os.unlink(ABSTFN)
@unittest.skipIf(posix is None, "Test requires posix module")
def test_ismount_different_device(self):
# Simulate the path being on a different device from its parent by
# mocking out st_dev.
save_lstat = os.lstat
def fake_lstat(path):
st_ino = 0
st_dev = 0
if path == ABSTFN:
st_dev = 1
st_ino = 1
return posix.stat_result((0, st_ino, st_dev, 0, 0, 0, 0, 0, 0, 0))
try:
os.lstat = fake_lstat
self.assertIs(posixpath.ismount(ABSTFN), True)
finally:
os.lstat = save_lstat
def test_expanduser(self):
self.assertEqual(posixpath.expanduser("foo"), "foo")
self.assertEqual(posixpath.expanduser(b"foo"), b"foo")
with support.EnvironmentVarGuard() as env:
for home in '/', '', '//', '///':
with self.subTest(home=home):
env['HOME'] = home
self.assertEqual(posixpath.expanduser("~"), "/")
self.assertEqual(posixpath.expanduser("~/"), "/")
self.assertEqual(posixpath.expanduser("~/foo"), "/foo")
try:
import pwd
except ImportError:
pass
else:
self.assertIsInstance(posixpath.expanduser("~/"), str)
self.assertIsInstance(posixpath.expanduser(b"~/"), bytes)
# if home directory == root directory, this test makes no sense
if posixpath.expanduser("~") != '/':
self.assertEqual(
posixpath.expanduser("~") + "/",
posixpath.expanduser("~/")
)
self.assertEqual(
posixpath.expanduser(b"~") + b"/",
posixpath.expanduser(b"~/")
)
self.assertIsInstance(posixpath.expanduser("~root/"), str)
self.assertIsInstance(posixpath.expanduser("~foo/"), str)
self.assertIsInstance(posixpath.expanduser(b"~root/"), bytes)
self.assertIsInstance(posixpath.expanduser(b"~foo/"), bytes)
with support.EnvironmentVarGuard() as env:
# expanduser should fall back to using the password database
del env['HOME']
home = pwd.getpwuid(os.getuid()).pw_dir
# $HOME can end with a trailing /, so strip it (see #17809)
home = home.rstrip("/") or '/'
self.assertEqual(posixpath.expanduser("~"), home)
def test_normpath(self):
self.assertEqual(posixpath.normpath(""), ".")
self.assertEqual(posixpath.normpath("/"), "/")
self.assertEqual(posixpath.normpath("//"), "//")
self.assertEqual(posixpath.normpath("///"), "/")
self.assertEqual(posixpath.normpath("///foo/.//bar//"), "/foo/bar")
self.assertEqual(posixpath.normpath("///foo/.//bar//.//..//.//baz"),
"/foo/baz")
self.assertEqual(posixpath.normpath("///..//./foo/.//bar"), "/foo/bar")
self.assertEqual(posixpath.normpath(b""), b".")
self.assertEqual(posixpath.normpath(b"/"), b"/")
self.assertEqual(posixpath.normpath(b"//"), b"//")
self.assertEqual(posixpath.normpath(b"///"), b"/")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//"), b"/foo/bar")
self.assertEqual(posixpath.normpath(b"///foo/.//bar//.//..//.//baz"),
b"/foo/baz")
self.assertEqual(posixpath.normpath(b"///..//./foo/.//bar"),
b"/foo/bar")
@skip_if_ABSTFN_contains_backslash
def test_realpath_curdir(self):
self.assertEqual(realpath('.'), os.getcwd())
self.assertEqual(realpath('./.'), os.getcwd())
self.assertEqual(realpath('/'.join(['.'] * 100)), os.getcwd())
self.assertEqual(realpath(b'.'), os.getcwdb())
self.assertEqual(realpath(b'./.'), os.getcwdb())
self.assertEqual(realpath(b'/'.join([b'.'] * 100)), os.getcwdb())
@skip_if_ABSTFN_contains_backslash
def test_realpath_pardir(self):
self.assertEqual(realpath('..'), dirname(os.getcwd()))
self.assertEqual(realpath('../..'), dirname(dirname(os.getcwd())))
self.assertEqual(realpath('/'.join(['..'] * 100)), '/')
self.assertEqual(realpath(b'..'), dirname(os.getcwdb()))
self.assertEqual(realpath(b'../..'), dirname(dirname(os.getcwdb())))
self.assertEqual(realpath(b'/'.join([b'..'] * 100)), b'/')
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_basic(self):
# Basic operation.
try:
os.symlink(ABSTFN+"1", ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_relative(self):
try:
os.symlink(posixpath.relpath(ABSTFN+"1"), ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN+"1")
finally:
support.unlink(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_symlink_loops(self):
# Bug #930024, return the path unchanged if we get into an infinite
# symlink loop.
try:
os.symlink(ABSTFN, ABSTFN)
self.assertEqual(realpath(ABSTFN), ABSTFN)
os.symlink(ABSTFN+"1", ABSTFN+"2")
os.symlink(ABSTFN+"2", ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"1"), ABSTFN+"1")
self.assertEqual(realpath(ABSTFN+"2"), ABSTFN+"2")
self.assertEqual(realpath(ABSTFN+"1/x"), ABSTFN+"1/x")
self.assertEqual(realpath(ABSTFN+"1/.."), dirname(ABSTFN))
self.assertEqual(realpath(ABSTFN+"1/../x"), dirname(ABSTFN) + "/x")
os.symlink(ABSTFN+"x", ABSTFN+"y")
self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "y"),
ABSTFN + "y")
self.assertEqual(realpath(ABSTFN+"1/../" + basename(ABSTFN) + "1"),
ABSTFN + "1")
os.symlink(basename(ABSTFN) + "a/b", ABSTFN+"a")
self.assertEqual(realpath(ABSTFN+"a"), ABSTFN+"a/b")
os.symlink("../" + basename(dirname(ABSTFN)) + "/" +
basename(ABSTFN) + "c", ABSTFN+"c")
self.assertEqual(realpath(ABSTFN+"c"), ABSTFN+"c")
# Test using relative path as well.
with support.change_cwd(dirname(ABSTFN)):
self.assertEqual(realpath(basename(ABSTFN)), ABSTFN)
finally:
support.unlink(ABSTFN)
support.unlink(ABSTFN+"1")
support.unlink(ABSTFN+"2")
support.unlink(ABSTFN+"y")
support.unlink(ABSTFN+"c")
support.unlink(ABSTFN+"a")
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_repeated_indirect_symlinks(self):
# Issue #6975.
try:
os.mkdir(ABSTFN)
os.symlink('../' + basename(ABSTFN), ABSTFN + '/self')
os.symlink('self/self/self', ABSTFN + '/link')
self.assertEqual(realpath(ABSTFN + '/link'), ABSTFN)
finally:
support.unlink(ABSTFN + '/self')
support.unlink(ABSTFN + '/link')
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_deep_recursion(self):
depth = 10
try:
os.mkdir(ABSTFN)
for i in range(depth):
os.symlink('/'.join(['%d' % i] * 10), ABSTFN + '/%d' % (i + 1))
os.symlink('.', ABSTFN + '/0')
self.assertEqual(realpath(ABSTFN + '/%d' % depth), ABSTFN)
# Test using relative path as well.
with support.change_cwd(ABSTFN):
self.assertEqual(realpath('%d' % depth), ABSTFN)
finally:
for i in range(depth + 1):
support.unlink(ABSTFN + '/%d' % i)
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_parents(self):
# We also need to resolve any symlinks in the parents of a relative
# path passed to realpath. E.g.: current working directory is
# /usr/doc with 'doc' being a symlink to /usr/share/doc. We call
# realpath("a"). This should return /usr/share/doc/a/.
try:
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/y")
os.symlink(ABSTFN + "/y", ABSTFN + "/k")
with support.change_cwd(ABSTFN + "/k"):
self.assertEqual(realpath("a"), ABSTFN + "/y/a")
finally:
support.unlink(ABSTFN + "/k")
safe_rmdir(ABSTFN + "/y")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_before_normalizing(self):
# Bug #990669: Symbolic links should be resolved before we
# normalize the path. E.g.: if we have directories 'a', 'k' and 'y'
# in the following hierarchy:
# a/k/y
#
# and a symbolic link 'link-y' pointing to 'y' in directory 'a',
# then realpath("link-y/..") should return 'k', not 'a'.
try:
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.mkdir(ABSTFN + "/k/y")
os.symlink(ABSTFN + "/k/y", ABSTFN + "/link-y")
# Absolute path.
self.assertEqual(realpath(ABSTFN + "/link-y/.."), ABSTFN + "/k")
# Relative path.
with support.change_cwd(dirname(ABSTFN)):
self.assertEqual(realpath(basename(ABSTFN) + "/link-y/.."),
ABSTFN + "/k")
finally:
support.unlink(ABSTFN + "/link-y")
safe_rmdir(ABSTFN + "/k/y")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
@unittest.skipUnless(hasattr(os, "symlink"),
"Missing symlink implementation")
@skip_if_ABSTFN_contains_backslash
def test_realpath_resolve_first(self):
# Bug #1213894: The first component of the path, if not absolute,
# must be resolved too.
try:
os.mkdir(ABSTFN)
os.mkdir(ABSTFN + "/k")
os.symlink(ABSTFN, ABSTFN + "link")
with support.change_cwd(dirname(ABSTFN)):
base = basename(ABSTFN)
self.assertEqual(realpath(base + "link"), ABSTFN)
self.assertEqual(realpath(base + "link/k"), ABSTFN + "/k")
finally:
support.unlink(ABSTFN + "link")
safe_rmdir(ABSTFN + "/k")
safe_rmdir(ABSTFN)
def test_relpath(self):
(real_getcwd, os.getcwd) = (os.getcwd, lambda: r"/home/user/bar")
try:
curdir = os.path.split(os.getcwd())[-1]
self.assertRaises(ValueError, posixpath.relpath, "")
self.assertEqual(posixpath.relpath("a"), "a")
self.assertEqual(posixpath.relpath(posixpath.abspath("a")), "a")
self.assertEqual(posixpath.relpath("a/b"), "a/b")
self.assertEqual(posixpath.relpath("../a/b"), "../a/b")
self.assertEqual(posixpath.relpath("a", "../b"), "../"+curdir+"/a")
self.assertEqual(posixpath.relpath("a/b", "../c"),
"../"+curdir+"/a/b")
self.assertEqual(posixpath.relpath("a", "b/c"), "../../a")
self.assertEqual(posixpath.relpath("a", "a"), ".")
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x/y/z"), '../../../foo/bar/bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/foo/bar"), 'bat')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/"), 'foo/bar/bat')
self.assertEqual(posixpath.relpath("/", "/foo/bar/bat"), '../../..')
self.assertEqual(posixpath.relpath("/foo/bar/bat", "/x"), '../foo/bar/bat')
self.assertEqual(posixpath.relpath("/x", "/foo/bar/bat"), '../../../x')
self.assertEqual(posixpath.relpath("/", "/"), '.')
self.assertEqual(posixpath.relpath("/a", "/a"), '.')
self.assertEqual(posixpath.relpath("/a/b", "/a/b"), '.')
finally:
os.getcwd = real_getcwd
def test_relpath_bytes(self):
(real_getcwdb, os.getcwdb) = (os.getcwdb, lambda: br"/home/user/bar")
try:
curdir = os.path.split(os.getcwdb())[-1]
self.assertRaises(ValueError, posixpath.relpath, b"")
self.assertEqual(posixpath.relpath(b"a"), b"a")
self.assertEqual(posixpath.relpath(posixpath.abspath(b"a")), b"a")
self.assertEqual(posixpath.relpath(b"a/b"), b"a/b")
self.assertEqual(posixpath.relpath(b"../a/b"), b"../a/b")
self.assertEqual(posixpath.relpath(b"a", b"../b"),
b"../"+curdir+b"/a")
self.assertEqual(posixpath.relpath(b"a/b", b"../c"),
b"../"+curdir+b"/a/b")
self.assertEqual(posixpath.relpath(b"a", b"b/c"), b"../../a")
self.assertEqual(posixpath.relpath(b"a", b"a"), b".")
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x/y/z"), b'../../../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/foo/bar"), b'bat')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/"), b'foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/", b"/foo/bar/bat"), b'../../..')
self.assertEqual(posixpath.relpath(b"/foo/bar/bat", b"/x"), b'../foo/bar/bat')
self.assertEqual(posixpath.relpath(b"/x", b"/foo/bar/bat"), b'../../../x')
self.assertEqual(posixpath.relpath(b"/", b"/"), b'.')
self.assertEqual(posixpath.relpath(b"/a", b"/a"), b'.')
self.assertEqual(posixpath.relpath(b"/a/b", b"/a/b"), b'.')
self.assertRaises(TypeError, posixpath.relpath, b"bytes", "str")
self.assertRaises(TypeError, posixpath.relpath, "str", b"bytes")
finally:
os.getcwdb = real_getcwdb
def test_commonpath(self):
def check(paths, expected):
self.assertEqual(posixpath.commonpath(paths), expected)
self.assertEqual(posixpath.commonpath([os.fsencode(p) for p in paths]),
os.fsencode(expected))
def check_error(exc, paths):
self.assertRaises(exc, posixpath.commonpath, paths)
self.assertRaises(exc, posixpath.commonpath,
[os.fsencode(p) for p in paths])
self.assertRaises(ValueError, posixpath.commonpath, [])
check_error(ValueError, ['/usr', 'usr'])
check_error(ValueError, ['usr', '/usr'])
check(['/usr/local'], '/usr/local')
check(['/usr/local', '/usr/local'], '/usr/local')
check(['/usr/local/', '/usr/local'], '/usr/local')
check(['/usr/local/', '/usr/local/'], '/usr/local')
check(['/usr//local', '//usr/local'], '/usr/local')
check(['/usr/./local', '/./usr/local'], '/usr/local')
check(['/', '/dev'], '/')
check(['/usr', '/dev'], '/')
check(['/usr/lib/', '/usr/lib/python3'], '/usr/lib')
check(['/usr/lib/', '/usr/lib64/'], '/usr')
check(['/usr/lib', '/usr/lib64'], '/usr')
check(['/usr/lib/', '/usr/lib64'], '/usr')
check(['spam'], 'spam')
check(['spam', 'spam'], 'spam')
check(['spam', 'alot'], '')
check(['and/jam', 'and/spam'], 'and')
check(['and//jam', 'and/spam//'], 'and')
check(['and/./jam', './and/spam'], 'and')
check(['and/jam', 'and/spam', 'alot'], '')
check(['and/jam', 'and/spam', 'and'], 'and')
check([''], '')
check(['', 'spam/alot'], '')
check_error(ValueError, ['', '/spam/alot'])
self.assertRaises(TypeError, posixpath.commonpath,
[b'/usr/lib/', '/usr/lib/python3'])
self.assertRaises(TypeError, posixpath.commonpath,
[b'/usr/lib/', 'usr/lib/python3'])
self.assertRaises(TypeError, posixpath.commonpath,
[b'usr/lib/', '/usr/lib/python3'])
self.assertRaises(TypeError, posixpath.commonpath,
['/usr/lib/', b'/usr/lib/python3'])
self.assertRaises(TypeError, posixpath.commonpath,
['/usr/lib/', b'usr/lib/python3'])
self.assertRaises(TypeError, posixpath.commonpath,
['usr/lib/', b'/usr/lib/python3'])
class PosixCommonTest(test_genericpath.CommonTest, unittest.TestCase):
pathmodule = posixpath
attributes = ['relpath', 'samefile', 'sameopenfile', 'samestat']
if __name__=="__main__":
unittest.main()
| 44.270224 | 100 | 0.552234 |
acf1106421329b45dc1ec1f2fd10e673b03beb3c | 581 | py | Python | hypergan/__init__.py | Darkar25/HyperGAN | 76ef7e0c20569ceece88dc76396d92c77050692b | [
"MIT"
] | 1 | 2020-01-02T06:29:56.000Z | 2020-01-02T06:29:56.000Z | hypergan/__init__.py | KonradLinkowski/HyperGAN | 3153daee838dbb8e8d8926b1e81419682a24f2fe | [
"MIT"
] | 218 | 2021-05-25T01:46:15.000Z | 2022-02-11T01:08:52.000Z | hypergan/__init__.py | KonradLinkowski/HyperGAN | 3153daee838dbb8e8d8926b1e81419682a24f2fe | [
"MIT"
] | null | null | null | """
# HyperGAN
A composable GAN API and CLI. Built for developers, researchers, and artists.
HyperGAN is currently in open beta.

Please see [https://github.com/255BITS/HyperGAN](https://github.com/255BITS/HyperGAN) for an introduction, usage and API examples.
## License
MIT - https://opensource.org/licenses/MIT
"""
import hypergan
from .gan import GAN
from .cli import CLI
from .configuration import Configuration
import tensorflow as tf
import hypergan.cli
import hypergan as hg
| 24.208333 | 130 | 0.767642 |
acf110cb2ead8ba2ab4bdc52c885d5b1e3ace71b | 649 | py | Python | 01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/01-Gabarito/077.py | moacirsouza/nadas | ad98d73b4281d1581fd2b2a9d29001acb426ee56 | [
"MIT"
] | 1 | 2020-07-03T13:54:18.000Z | 2020-07-03T13:54:18.000Z | 01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/01-Gabarito/077.py | moacirsouza/nadas | ad98d73b4281d1581fd2b2a9d29001acb426ee56 | [
"MIT"
] | null | null | null | 01-DesenvolvimentoDeSistemas/02-LinguagensDeProgramacao/01-Python/01-ListaDeExercicios/01-Gabarito/077.py | moacirsouza/nadas | ad98d73b4281d1581fd2b2a9d29001acb426ee56 | [
"MIT"
] | null | null | null | print("""
077) Crie um programa que tenha uma tupla com várias palavras (não usar
acentos). Depois disso, você deve mostrar, para cada palavra, quais são
as suas vogais.
""")
listaDePalavras = ('mongoloide', 'egregios', 'assincrona', 'mitigar',
'sincrona', 'confinamento', 'zaragatoa',
'comorbidade', 'inferir', 'dicotomia', 'connosco',
'inerente', 'moratoria', 'corroborar', 'conquanto')
for palavra in listaDePalavras:
print(f'\nAs vogais da palavra {palavra.upper()}, são: ', end='')
for letra in palavra:
if letra in 'aeiou':
print(letra, end=' ')
print('')
| 36.055556 | 71 | 0.613251 |
acf111098ac78cd8789a3c11389ad9a906926944 | 397 | py | Python | mekavita_hu/wsgi.py | aavkvard/mekavita.hu | d793d2aecac0513cff8ac5d09d4b1260e36b93c2 | [
"BSD-2-Clause"
] | null | null | null | mekavita_hu/wsgi.py | aavkvard/mekavita.hu | d793d2aecac0513cff8ac5d09d4b1260e36b93c2 | [
"BSD-2-Clause"
] | null | null | null | mekavita_hu/wsgi.py | aavkvard/mekavita.hu | d793d2aecac0513cff8ac5d09d4b1260e36b93c2 | [
"BSD-2-Clause"
] | null | null | null | """
WSGI config for mekavita_hu project.
It exposes the WSGI callable as a module-level variable named ``application``.
For more information on this file, see
https://docs.djangoproject.com/en/1.6/howto/deployment/wsgi/
"""
import os
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "mekavita_hu.settings")
from django.core.wsgi import get_wsgi_application
application = get_wsgi_application()
| 26.466667 | 78 | 0.793451 |
acf111135ac73fcbb32855ed3d234058909aaaa9 | 5,873 | py | Python | reckoner/kube.py | LynRodWS/reckoner | e477af228d04968ed64e2ccce6e943172ffd654f | [
"Apache-2.0"
] | null | null | null | reckoner/kube.py | LynRodWS/reckoner | e477af228d04968ed64e2ccce6e943172ffd654f | [
"Apache-2.0"
] | null | null | null | reckoner/kube.py | LynRodWS/reckoner | e477af228d04968ed64e2ccce6e943172ffd654f | [
"Apache-2.0"
] | null | null | null | # Copyright 2019 FairwindsOps Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
import logging
import traceback
from .config import Config
from kubernetes import client, config
class NamespaceManager(object):
def __init__(self, namespace_name, namespace_management) -> None:
""" Manages a namespace for the chart
Accepts:
- namespace: Which may be a string or a dictionary
"""
self._namespace_name = namespace_name
self._metadata = namespace_management.get('metadata', {})
self._overwrite = namespace_management.get(
'settings',
{}
).get(
'overwrite',
False
)
self.__load_config()
self.config = Config()
@property
def namespace_name(self) -> str:
""" Name of the namespace we are managing """
return self._namespace_name
@property
def namespace(self) -> str:
""" Namespace object we are managing
https://github.com/kubernetes-client/python/blob/master/kubernetes/docs/V1Namespace.md"""
return self._namespace
@property
def metadata(self) -> dict:
""" List of metadata settings parsed from the
from the chart and course """
return self._metadata
@property
def overwrite(self) -> bool:
""" List of metadata settings parsed from the
from the chart and course """
return self._overwrite
def __load_config(self):
""" Protected method to load kubernetes config"""
try:
config.load_kube_config()
self.v1client = client.CoreV1Api()
except Exception as e:
logging.error('Unable to load kubernetes configuration')
logging.debug(traceback.format_exc())
raise e
def create_and_manage(self):
""" Create namespace and patch metadata """
if self.config.dryrun:
logging.warning(
"Namespace not created or patched due to "
"--dry-run: {}".format(self.namespace_name)
)
return
self._namespace = self.create()
self.patch_metadata()
def patch_metadata(self):
""" Patch namespace with metadata respecting overwrite setting.
Returns True on success
Raises error on failure
"""
if self.overwrite:
patch_metadata = self.metadata
logging.info("Overwriting Namespace '{}' Metadata".format(self.namespace_name))
else:
annotations = {}
for annotation_name, annotation_value in self.metadata.get('annotations', {}).items():
try:
current_annotation_value = self.namespace.metadata.annotations[annotation_name]
if current_annotation_value != annotation_value:
logging.info("Not Overwriting Metadata Annotation '{}' in Namespace '{}'".format(annotation_name,self.namespace_name))
except (TypeError, KeyError):
annotations[annotation_name] = annotation_value
labels = {}
for label_name, label_value in self.metadata.get('labels', {}).items():
try:
current_label_value = self.namespace.metadata.labels[label_name]
if current_label_value != annotation_value:
logging.info("Not Overwriting Metadata Label '{}' in Namespace '{}'".format(annotation_name,self.namespace_name))
except (TypeError, KeyError):
labels[label_name] = label_value
patch_metadata = {'annotations': annotations, 'labels': labels}
logging.debug("Patch Metadata: {}".format(patch_metadata))
patch = {'metadata': patch_metadata}
res = self.v1client.patch_namespace(self.namespace_name, patch)
def create(self):
""" Create a namespace in the configured kubernetes cluster if it does not already exist
Arguments:
None
Returns Namespace
Raises error in case of failure
"""
_namespaces = [namespace for namespace in self.cluster_namespaces if namespace.metadata.name == self.namespace_name]
if _namespaces == []:
logging.info('Namespace {} not found. Creating it now.'.format(self.namespace_name))
try:
return self.v1client.create_namespace(
client.V1Namespace(
metadata=client.V1ObjectMeta(name=self.namespace_name)
)
)
except Exception as e:
logging.error("Unable to create namespace in cluster! {}".format(e))
logging.debug(traceback.format_exc())
raise e
else:
return _namespaces[0]
@property
def cluster_namespaces(self) -> list:
""" Lists namespaces in the configured kubernetes cluster.
No arguments
Returns list of namespace objects
"""
try:
namespaces = self.v1client.list_namespace()
return [namespace for namespace in namespaces.items]
except Exception as e:
logging.error("Unable to get namespaces in cluster! {}".format(e))
logging.debug(traceback.format_exc())
raise e
| 36.70625 | 142 | 0.613826 |
acf1117cf7f34fe46afcbb14c78a727dd7f6a611 | 1,487 | py | Python | tests/src/api/test_set_password.py | DinithHerath/drf-registration | 7cd0e48d125061c126765f7946401aa5363cef7f | [
"MIT"
] | 35 | 2020-09-23T02:22:48.000Z | 2022-03-25T10:09:48.000Z | tests/src/api/test_set_password.py | DinithHerath/drf-registration | 7cd0e48d125061c126765f7946401aa5363cef7f | [
"MIT"
] | 8 | 2020-11-17T06:56:04.000Z | 2022-03-29T23:40:23.000Z | tests/src/api/test_set_password.py | DinithHerath/drf-registration | 7cd0e48d125061c126765f7946401aa5363cef7f | [
"MIT"
] | 8 | 2020-10-05T14:56:25.000Z | 2022-03-28T14:13:26.000Z | from drf_registration.utils.users import get_user_model
from tests.utils import BaseAPITestCase
class SetPasswordAPITestCase(BaseAPITestCase):
def setUp(self):
super().setUp()
# Assuming that no user password created by social
self.user_1 = get_user_model().objects.create(
username='user1',
email='user1@example.com'
)
def test_set_password_unauthorized(self):
params = {}
self.put_json_unauthorized('set-password/', params)
def test_set_password_invalid_new_password(self):
self.client.force_authenticate(user=self.user_1)
params = {
'password': 'short'
}
resp = self.put_json_bad_request('set-password/', params)
self.assertHasErrorDetail(
resp.data['password'],
'This password is too short. It must contain at least 8 characters.'
)
def test_set_password_existed_password(self):
# Use user has a password
self.client.force_authenticate(user=self.user)
params = {
'password': 'abcABC@123'
}
resp = self.put_json_bad_request('set-password/', params)
self.assertHasErrorDetail(resp.data['password'], 'Your password is already existed.')
def test_set_password_ok(self):
self.client.force_authenticate(user=self.user_1)
params = {
'password': 'abcABC@123'
}
self.put_json_ok('set-password/', params)
| 32.326087 | 93 | 0.638198 |
acf1134c84dabedc97ea9be62e39b64ff51dbcd8 | 1,633 | py | Python | management/admin.py | folse/MTS | 183f7d479d5f6f90ad1bdd6a20d7ec334476dce1 | [
"MIT"
] | null | null | null | management/admin.py | folse/MTS | 183f7d479d5f6f90ad1bdd6a20d7ec334476dce1 | [
"MIT"
] | null | null | null | management/admin.py | folse/MTS | 183f7d479d5f6f90ad1bdd6a20d7ec334476dce1 | [
"MIT"
] | null | null | null | from django.contrib import admin
from management import models
from parse_rest.connection import register
from parse_rest.datatypes import Object, GeoPoint
class Photo(Object):
pass
class Category_Place(Object):
pass
class Place(Object):
#register('MQRrReTdb9c82PETy0BfUoL0ck6xGpwaZqelPWX5','44mp6LNgEmYEfZMYZQz16ncu7oqcnncGFtz762nC')
#print 'parse register'
pass
class PlaceAdmin(admin.ModelAdmin):
def save_model(self, request, obj, form, change):
photo = Photo()
photo.url = obj.photo
photo.save()
category = Category_Place.Query.filter(name=obj.category)[0]
if category:
pass
else:
category = Category_Place()
category.name = obj.category
category.save()
place = Place()
place.name = obj.name
place.phone = obj.phone
place.news = obj.news
place.open_hour = obj.open_hour
place.description = obj.description
place.location = GeoPoint(latitude = obj.latitude, longitude = obj.longitude)
place.save()
photoIdList = [photo.objectId]
place.addRelation('photos', 'Photo', photoIdList)
categoryIdList = [category.objectId]
place.addRelation('category', 'Category_Place', categoryIdList)
class PlaceCategoryAdmin(admin.ModelAdmin):
list_display = ('get_username',)
def get_username(self):
return 'abc'
def save_model(self, request, obj, form, change):
categoryList = Category_Place.Query.filter(name=obj.name)
if categoryList:
print 'already have this category'
else:
category = Category_Place()
category.name = obj.name
category.save()
#admin.site.register(models.Place_Category,PlaceCategoryAdmin)
#admin.site.register(models.Place,PlaceAdmin)
| 25.123077 | 97 | 0.755052 |
acf113c559adb1ab47bb621c77f319d9ff845765 | 1,148 | py | Python | tests/models/generators/image_to_image/test_unet_generators.py | tlatkowski/gans-2.0 | 974efc5bbcea39c0a7dec9405ba4514ada6dc39c | [
"MIT"
] | 78 | 2019-09-25T15:09:18.000Z | 2022-02-09T09:56:15.000Z | tests/models/generators/image_to_image/test_unet_generators.py | tlatkowski/gans-2.0 | 974efc5bbcea39c0a7dec9405ba4514ada6dc39c | [
"MIT"
] | 23 | 2019-10-09T21:24:39.000Z | 2022-03-12T00:00:53.000Z | tests/models/generators/image_to_image/test_unet_generators.py | tlatkowski/gans-2.0 | 974efc5bbcea39c0a7dec9405ba4514ada6dc39c | [
"MIT"
] | 18 | 2020-01-24T13:13:57.000Z | 2022-02-15T18:58:12.000Z | import tensorflow as tf
from easydict import EasyDict as edict
from gans.models.generators.image_to_image import unet
class TestUNetGenerators(tf.test.TestCase):
def test_unet_generator_output_shape(self):
model_parameters = edict({
'latent_size': 100,
'img_height': 256,
'img_width': 256,
'num_channels': 3,
})
g = unet.UNetGenerator(model_parameters)
z = tf.ones(shape=[4, 256, 256, 3])
output_img = g(z)
actual_shape = output_img.shape
expected_shape = (4, 256, 256, 3)
self.assertEqual(actual_shape, expected_shape)
def test_unet_subpixel_generator_output_shape(self):
model_parameters = edict({
'latent_size': 100,
'img_height': 256,
'img_width': 256,
'num_channels': 3,
})
g = unet.UNetSubpixelGenerator(model_parameters)
z = tf.ones(shape=[4, 256, 256, 3])
output_img = g(z)
actual_shape = output_img.shape
expected_shape = (4, 256, 256, 3)
self.assertEqual(actual_shape, expected_shape)
| 28.7 | 56 | 0.601916 |
acf1141aa5e46588b0270e0f5a28e53b72024a60 | 5,432 | py | Python | Support Vector Machine/supportVectorMachine.py | madscientist98/Deep-Learning | 4a5f27437224dec589623f3e4e621323fb1462bc | [
"MIT"
] | null | null | null | Support Vector Machine/supportVectorMachine.py | madscientist98/Deep-Learning | 4a5f27437224dec589623f3e4e621323fb1462bc | [
"MIT"
] | null | null | null | Support Vector Machine/supportVectorMachine.py | madscientist98/Deep-Learning | 4a5f27437224dec589623f3e4e621323fb1462bc | [
"MIT"
] | null | null | null | import matplotlib.pyplot as plt
from matplotlib import style
import numpy as np
style.use('ggplot')
class Support_Vector_Machine:
def __init__(self, visualizacion=True):
self.visualizacion = visualizacion
self.colors = {1:'r',-1:'b'}
if self.visualizacion:
self.fig = plt.figure()
self.ax = self.fig.add_subplot(1,1,1)
# train
def fit(self, data):
self.data = data
# { ||w||: [w,b] }
opt_dict = {}
transforms = [[1,1],
[-1,1],
[-1,-1],
[1,-1]]
all_data = []
for yi in self.data:
for set_caracteristicas in self.data[yi]:
for caracteristica in set_caracteristicas:
all_data.append(caracteristica)
self.max_caracteristica_valor = max(all_data)
self.min_caracteristica_valor = min(all_data)
all_data = None
# support vectors yi(xi.w+b) = 1
size_paso = [self.max_caracteristica_valor * 0.1,
self.max_caracteristica_valor * 0.01,
# point of expense:
self.max_caracteristica_valor * 0.001,
]
# extremely expensive
b_range_multiple = 2
# we dont need to take as small of steps
# with b as we do w
b_multiple = 5
latest_optimum = self.max_caracteristica_valor*10
for step in size_paso:
w = np.array([latest_optimum,latest_optimum])
# we can do this because convex
optimized = False
while not optimized:
for b in np.arange(-1*(self.max_caracteristica_valor*b_range_multiple),
self.max_caracteristica_valor*b_range_multiple,
step*b_multiple):
for transformation in transforms:
w_t = w*transformation
found_option = True
# weakest link in the SVM fundamentally
# SMO attempts to fix this a bit
# yi(xi.w+b) >= 1
#
# #### add a break here later..
for i in self.data:
for xi in self.data[i]:
yi=i
if not yi*(np.dot(w_t,xi)+b) >= 1:
found_option = False
#print(xi,':',yi*(np.dot(w_t,xi)+b))
if found_option:
opt_dict[np.linalg.norm(w_t)] = [w_t,b]
if w[0] < 0:
optimized = True
print('Optimized a step.')
else:
w = w - step
norms = sorted([n for n in opt_dict])
#||w|| : [w,b]
opt_choice = opt_dict[norms[0]]
self.w = opt_choice[0]
self.b = opt_choice[1]
latest_optimum = opt_choice[0][0]+step*2
for i in self.data:
for xi in self.data[i]:
yi=i
print(xi,':',yi*(np.dot(self.w,xi)+self.b))
def predict(self,caracteristicas):
# sign( x.w+b )
clasificacion = np.sign(np.dot(np.array(caracteristicas),self.w)+self.b)
if clasificacion !=0 and self.visualizacion:
self.ax.scatter(caracteristicas[0], caracteristicas[1], s=200, marker='*', c=self.colors[clasificacion])
return clasificacion
def visualize(self):
[[self.ax.scatter(x[0],x[1],s=100,color=self.colors[i]) for x in data_dict[i]] for i in data_dict]
# hyperplane = x.w+b
# v = x.w+b
# psv = 1
# nsv = -1
# dec = 0
def hyperplane(x,w,b,v):
return (-w[0]*x-b+v) / w[1]
datarange = (self.min_caracteristica_valor*0.9,self.max_caracteristica_valor*1.1)
hyp_x_min = datarange[0]
hyp_x_max = datarange[1]
# (w.x+b) = 1
# positive support vector hyperplane
psv1 = hyperplane(hyp_x_min, self.w, self.b, 1)
psv2 = hyperplane(hyp_x_max, self.w, self.b, 1)
self.ax.plot([hyp_x_min,hyp_x_max],[psv1,psv2], 'k')
# (w.x+b) = -1
# negative support vector hyperplane
nsv1 = hyperplane(hyp_x_min, self.w, self.b, -1)
nsv2 = hyperplane(hyp_x_max, self.w, self.b, -1)
self.ax.plot([hyp_x_min,hyp_x_max],[nsv1,nsv2], 'k')
# (w.x+b) = 0
# positive support vector hyperplane
db1 = hyperplane(hyp_x_min, self.w, self.b, 0)
db2 = hyperplane(hyp_x_max, self.w, self.b, 0)
self.ax.plot([hyp_x_min,hyp_x_max],[db1,db2], 'y--')
plt.show()
data_dict = {-1:np.array([[1,7],
[2,8],
[3,8],]),
1:np.array([[5,1],
[6,-1],
[7,3],])}
svm = Support_Vector_Machine()
svm.fit(data=data_dict)
predict_us = [[0,10],
[1,3],
[3,4],
[3,5],
[5,5],
[5,6],
[6,-5],
[5,8]]
for p in predict_us:
svm.predict(p)
svm.visualize() | 33.73913 | 116 | 0.470545 |
acf114b452c4516c289be287424149e4839fddd1 | 1,330 | py | Python | tests/common.py | Tarkiyah/kaotlin | 97374f648a53f6532f2348ca3f9ace943c4e2a4c | [
"ECL-2.0",
"Apache-2.0"
] | 2 | 2019-11-18T05:22:15.000Z | 2020-02-12T15:23:14.000Z | tests/common.py | AOE-khkhan/kaolin | ed132736421ee723d14d59eaeb0286a8916a159d | [
"ECL-2.0",
"Apache-2.0"
] | null | null | null | tests/common.py | AOE-khkhan/kaolin | ed132736421ee723d14d59eaeb0286a8916a159d | [
"ECL-2.0",
"Apache-2.0"
] | 1 | 2019-11-18T13:03:53.000Z | 2019-11-18T13:03:53.000Z | # Copyright (c) 2019, NVIDIA CORPORATION. All rights reserved.
# Kornia components Copyright (c) 2019 Kornia project authors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import torch
import pytest
# From kornia
# https://github.com/arraiyopensource/kornia/
def get_test_devices():
"""Creates a list of strings indicating available devices to test on.
Checks for CUDA devices, primarily. Assumes CPU is always available.
Return:
list (str): list of device names
"""
# Assumption: CPU is always available
devices = ['cpu']
if torch.cuda.is_available():
devices.append('cuda')
return devices
# Setup devices to run unit tests
TEST_DEVICES = get_test_devices()
@pytest.fixture()
def device_type(request):
typ = request.config.getoption('--typetest')
return typ
| 27.708333 | 74 | 0.726316 |
acf1162dd1e73e0b023c75fee635bb13852415b4 | 399 | py | Python | test/markdowntest.py | jfveronelli/sqink | 5e9e6bc6c5c6c00abbc07099bc1fa1ab6cf79577 | [
"Unlicense"
] | 32 | 2015-11-06T02:59:41.000Z | 2021-02-12T02:44:42.000Z | test/markdowntest.py | jfveronelli/sqink | 5e9e6bc6c5c6c00abbc07099bc1fa1ab6cf79577 | [
"Unlicense"
] | 6 | 2017-04-26T02:30:16.000Z | 2017-10-13T16:53:08.000Z | test/markdowntest.py | jfveronelli/sqink | 5e9e6bc6c5c6c00abbc07099bc1fa1ab6cf79577 | [
"Unlicense"
] | 4 | 2016-02-01T09:15:05.000Z | 2020-04-30T03:41:04.000Z | # coding:utf-8
from crossknight.sqink.domain import Note
from crossknight.sqink.markdown import renderHtml
from unittest import TestCase
class ModuleTest(TestCase):
def testRenderHtmlShouldSucceed(self):
note = Note(title="Some title", tags=["one", "two"], text="Hello, **world**!")
renderHtml(note)
self.assertIn("<p>Hello, <strong>world</strong>!</p>", note.html)
| 26.6 | 86 | 0.694236 |
acf1168ed2182f8a3bb532a66eb7268436938f69 | 2,117 | py | Python | Gds/src/fprime_gds/wxgui/src/GDSStatusPanelImpl.py | dgfmj/sfdghgmj | c30c61a6cb0f63d70d29c04ac31a60d53147947a | [
"Apache-2.0"
] | null | null | null | Gds/src/fprime_gds/wxgui/src/GDSStatusPanelImpl.py | dgfmj/sfdghgmj | c30c61a6cb0f63d70d29c04ac31a60d53147947a | [
"Apache-2.0"
] | null | null | null | Gds/src/fprime_gds/wxgui/src/GDSStatusPanelImpl.py | dgfmj/sfdghgmj | c30c61a6cb0f63d70d29c04ac31a60d53147947a | [
"Apache-2.0"
] | null | null | null | import wx
from . import GDSStatusPanelGUI
###########################################################################
## Class StatusImpl
###########################################################################
class StatusImpl(GDSStatusPanelGUI.Status):
"""Implementation of the status panel tab"""
def __init__(self, parent, config=None):
GDSStatusPanelGUI.Status.__init__(self, parent)
self._send_msg_buffer = []
self._recv_msg_buffer = []
# Start text control updating service
self.update_text_ctrl()
def __del__(self):
pass
def update_text_ctrl(self):
"""Called to update the status panel with new raw output. Called every 500ms on the GUI thread."""
for m in self._recv_msg_buffer:
self.StatusTabRecvTextCtl.AppendText(m)
for m in self._send_msg_buffer:
self.StatusTabSendTextCtl.AppendText(m)
self._send_msg_buffer = []
self._recv_msg_buffer = []
wx.CallLater(500, self.update_text_ctrl)
# [00 12 34 ...]
# Some data was sent
def send(self, data, dest):
"""Send callback for the encoder
Arguments:
data {bin} -- binary data packet
dest {string} -- where the data will be sent by the server
"""
str_data = (
"["
+ " ".join(
[
"{:2x}".format(byte if type(byte) != str else ord(byte))
for byte in data
]
)
+ "]\n\n"
)
self._send_msg_buffer.append(str_data)
# Some data was recvd
def on_recv(self, data):
"""Data was recved on the socket server
Arguments:
data {bin} --binnary data string that was recved
"""
str_data = (
"["
+ " ".join(
[
"{:2x}".format(byte if type(byte) != str else ord(byte))
for byte in data
]
)
+ "]\n\n"
)
self._recv_msg_buffer.append(str_data)
| 28.226667 | 106 | 0.490789 |
acf116cba8ca402c93e2f50d8b945e775c955a47 | 2,193 | py | Python | test_autolens/integration/tests/imaging/lens__source_inversion/rectangular/lens_mass__source__hyper.py | harshitjindal/PyAutoLens | f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035 | [
"MIT"
] | 1 | 2020-04-06T20:07:56.000Z | 2020-04-06T20:07:56.000Z | test_autolens/integration/tests/imaging/lens__source_inversion/rectangular/lens_mass__source__hyper.py | harshitjindal/PyAutoLens | f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035 | [
"MIT"
] | null | null | null | test_autolens/integration/tests/imaging/lens__source_inversion/rectangular/lens_mass__source__hyper.py | harshitjindal/PyAutoLens | f1d3f08f12a61f6634e1b7a0ccf8f5cfe0252035 | [
"MIT"
] | null | null | null | import autofit as af
import autolens as al
from test_autolens.integration.tests.imaging import runner
test_type = "lens__source_inversion"
test_name = "lens_mass__source_rectangular__hyper"
data_type = "lens_sie__source_smooth"
data_resolution = "lsst"
def make_pipeline(name, phase_folders, optimizer_class=af.MultiNest):
class SourcePix(al.PhaseImaging):
def customize_priors(self, results):
self.galaxies.lens.mass.centre.centre_0 = 0.0
self.galaxies.lens.mass.centre.centre_1 = 0.0
self.galaxies.lens.mass.einstein_radius = 1.6
phase1 = SourcePix(
phase_name="phase_1",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(redshift=0.5, mass=al.mp.EllipticalIsothermal),
source=al.GalaxyModel(
redshift=1.0,
pixelization=al.pix.Rectangular,
regularization=al.reg.Constant,
),
),
optimizer_class=optimizer_class,
)
phase1.optimizer.const_efficiency_mode = True
phase1.optimizer.n_live_points = 60
phase1.optimizer.sampling_efficiency = 0.8
phase1.extend_with_multiple_hyper_phases(hyper_galaxy=True)
phase2 = al.PhaseImaging(
phase_name="phase_2",
phase_folders=phase_folders,
galaxies=dict(
lens=al.GalaxyModel(
redshift=0.5,
mass=phase1.result.model.galaxies.lens.mass,
hyper_galaxy=al.HyperGalaxy,
),
source=al.GalaxyModel(
redshift=1.0,
pixelization=phase1.result.model.galaxies.source.pixelization,
regularization=phase1.result.model.galaxies.source.regularization,
hyper_galaxy=phase1.result.hyper_combined.instance.galaxies.source.hyper_galaxy,
),
),
optimizer_class=optimizer_class,
)
phase2.optimizer.const_efficiency_mode = True
phase2.optimizer.n_live_points = 40
phase2.optimizer.sampling_efficiency = 0.8
return al.PipelineDataset(name, phase1, phase2)
if __name__ == "__main__":
import sys
runner.run(sys.modules[__name__])
| 31.782609 | 96 | 0.660283 |
acf116e6e8b25d76e7a52a3f7d419f625e006cba | 65 | py | Python | src/scheduler/models/__init__.py | monosidev/monosi | a88b689fc74010b10dbabb32f4b2bdeae865f4d5 | [
"Apache-2.0"
] | 156 | 2021-11-19T18:50:14.000Z | 2022-03-31T19:48:59.000Z | src/scheduler/models/__init__.py | monosidev/monosi | a88b689fc74010b10dbabb32f4b2bdeae865f4d5 | [
"Apache-2.0"
] | 30 | 2021-12-27T19:30:56.000Z | 2022-03-30T17:49:00.000Z | src/scheduler/models/__init__.py | monosidev/monosi | a88b689fc74010b10dbabb32f4b2bdeae865f4d5 | [
"Apache-2.0"
] | 14 | 2022-01-17T23:24:34.000Z | 2022-03-29T09:27:47.000Z | from sqlalchemy.orm import registry
mapper_registry = registry() | 21.666667 | 35 | 0.830769 |
acf1184e0595d1ee0bb662e7131989dde8585a17 | 4,449 | py | Python | tests/kafkatest/sanity_checks/test_console_consumer.py | BoYiZhang/kafka-2.4.0-src | 752b76f7f48ca4c5ea20770fd990293b1b28fce4 | [
"Apache-2.0"
] | 126 | 2018-08-31T21:47:30.000Z | 2022-03-11T10:01:31.000Z | tests/kafkatest/sanity_checks/test_console_consumer.py | BoYiZhang/kafka-2.4.0-src | 752b76f7f48ca4c5ea20770fd990293b1b28fce4 | [
"Apache-2.0"
] | 75 | 2019-03-07T20:24:18.000Z | 2022-03-31T02:14:37.000Z | tests/kafkatest/sanity_checks/test_console_consumer.py | BoYiZhang/kafka-2.4.0-src | 752b76f7f48ca4c5ea20770fd990293b1b28fce4 | [
"Apache-2.0"
] | 46 | 2018-09-13T07:27:19.000Z | 2022-03-23T17:49:13.000Z | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import time
from ducktape.mark import matrix
from ducktape.mark import parametrize
from ducktape.mark.resource import cluster
from ducktape.tests.test import Test
from ducktape.utils.util import wait_until
from kafkatest.services.console_consumer import ConsoleConsumer
from kafkatest.services.kafka import KafkaService
from kafkatest.services.verifiable_producer import VerifiableProducer
from kafkatest.services.zookeeper import ZookeeperService
from kafkatest.utils.remote_account import line_count, file_exists
from kafkatest.version import LATEST_0_8_2
class ConsoleConsumerTest(Test):
"""Sanity checks on console consumer service class."""
def __init__(self, test_context):
super(ConsoleConsumerTest, self).__init__(test_context)
self.topic = "topic"
self.zk = ZookeeperService(test_context, num_nodes=1)
self.kafka = KafkaService(self.test_context, num_nodes=1, zk=self.zk, zk_chroot="/kafka",
topics={self.topic: {"partitions": 1, "replication-factor": 1}})
self.consumer = ConsoleConsumer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic)
def setUp(self):
self.zk.start()
@cluster(num_nodes=3)
@matrix(security_protocol=['PLAINTEXT', 'SSL'])
@cluster(num_nodes=4)
@matrix(security_protocol=['SASL_SSL'], sasl_mechanism=['PLAIN', 'SCRAM-SHA-256', 'SCRAM-SHA-512'])
@matrix(security_protocol=['SASL_PLAINTEXT', 'SASL_SSL'])
def test_lifecycle(self, security_protocol, sasl_mechanism='GSSAPI'):
"""Check that console consumer starts/stops properly, and that we are capturing log output."""
self.kafka.security_protocol = security_protocol
self.kafka.client_sasl_mechanism = sasl_mechanism
self.kafka.interbroker_sasl_mechanism = sasl_mechanism
self.kafka.start()
self.consumer.security_protocol = security_protocol
t0 = time.time()
self.consumer.start()
node = self.consumer.nodes[0]
wait_until(lambda: self.consumer.alive(node),
timeout_sec=20, backoff_sec=.2, err_msg="Consumer was too slow to start")
self.logger.info("consumer started in %s seconds " % str(time.time() - t0))
# Verify that log output is happening
wait_until(lambda: file_exists(node, ConsoleConsumer.LOG_FILE), timeout_sec=10,
err_msg="Timed out waiting for consumer log file to exist.")
wait_until(lambda: line_count(node, ConsoleConsumer.LOG_FILE) > 0, timeout_sec=1,
backoff_sec=.25, err_msg="Timed out waiting for log entries to start.")
# Verify no consumed messages
assert line_count(node, ConsoleConsumer.STDOUT_CAPTURE) == 0
self.consumer.stop_node(node)
@cluster(num_nodes=4)
def test_version(self):
"""Check that console consumer v0.8.2.X successfully starts and consumes messages."""
self.kafka.start()
num_messages = 1000
self.producer = VerifiableProducer(self.test_context, num_nodes=1, kafka=self.kafka, topic=self.topic,
max_messages=num_messages, throughput=1000)
self.producer.start()
self.producer.wait()
self.consumer.nodes[0].version = LATEST_0_8_2
self.consumer.new_consumer = False
self.consumer.consumer_timeout_ms = 1000
self.consumer.start()
self.consumer.wait()
num_consumed = len(self.consumer.messages_consumed[1])
num_produced = self.producer.num_acked
assert num_produced == num_consumed, "num_produced: %d, num_consumed: %d" % (num_produced, num_consumed)
| 44.49 | 112 | 0.71207 |
acf119e7c277821bbc64ba71171fddd1c61cd7ed | 1,234 | py | Python | multiprocessingTest.py | lakshay1296/python-multiprocessing-sample | c42788686168b95b3d98edb417d9071ef3e7eccd | [
"Unlicense"
] | null | null | null | multiprocessingTest.py | lakshay1296/python-multiprocessing-sample | c42788686168b95b3d98edb417d9071ef3e7eccd | [
"Unlicense"
] | null | null | null | multiprocessingTest.py | lakshay1296/python-multiprocessing-sample | c42788686168b95b3d98edb417d9071ef3e7eccd | [
"Unlicense"
] | null | null | null | from multiprocessing import Process, Manager
''' Custom Module Imports '''
from calculator.add import addition
from calculator.subtract import subtraction
from calculator.multiply import multiplication
from calculator.divide import division
class Main:
def __init__(self) -> None:
pass
def calculatorFunction(self):
ls = [[1,2],[3,4],[5,6]]
with Manager() as manager:
result = manager.dict()
for i in ls:
obj1 = addition(i[0],i[1], result)
obj2 = subtraction(i[0],i[1], result)
obj3 = multiplication(i[0],i[1], result)
obj4 = division(i[0],i[1], result)
p1 = Process(target=obj1.add)
p2 = Process(target=obj2.subtract)
p3 = Process(target=obj3.multiply)
p4 = Process(target=obj4.divide)
p = [p1,p2,p3,p4]
p1.start()
p2.start()
p3.start()
p4.start()
for procs in p:
procs.join()
print(result)
if __name__ == '__main__':
main = Main()
main.calculatorFunction() | 28.697674 | 57 | 0.508914 |
acf119f7561dd4c3b1521eddaf9df56b9be6411e | 2,471 | py | Python | homeassistant/components/rfxtrx.py | davidedmundson/home-assistant | cd02563552ffc28239fa17c79a5d9bc0013bd5ac | [
"MIT"
] | null | null | null | homeassistant/components/rfxtrx.py | davidedmundson/home-assistant | cd02563552ffc28239fa17c79a5d9bc0013bd5ac | [
"MIT"
] | null | null | null | homeassistant/components/rfxtrx.py | davidedmundson/home-assistant | cd02563552ffc28239fa17c79a5d9bc0013bd5ac | [
"MIT"
] | 1 | 2018-11-20T17:44:08.000Z | 2018-11-20T17:44:08.000Z | """
homeassistant.components.rfxtrx
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Provides support for RFXtrx components.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/rfxtrx/
"""
import logging
from homeassistant.util import slugify
REQUIREMENTS = ['https://github.com/Danielhiversen/pyRFXtrx/archive/0.4.zip' +
'#pyRFXtrx==0.4']
DOMAIN = "rfxtrx"
ATTR_DEVICE = 'device'
ATTR_DEBUG = 'debug'
ATTR_STATE = 'state'
ATTR_NAME = 'name'
ATTR_PACKETID = 'packetid'
ATTR_FIREEVENT = 'fire_event'
ATTR_DATA_TYPE = 'data_type'
EVENT_BUTTON_PRESSED = 'button_pressed'
RECEIVED_EVT_SUBSCRIBERS = []
RFX_DEVICES = {}
_LOGGER = logging.getLogger(__name__)
RFXOBJECT = None
def setup(hass, config):
""" Setup the RFXtrx component. """
# Declare the Handle event
def handle_receive(event):
""" Callback all subscribers for RFXtrx gateway. """
# Log RFXCOM event
if not event.device.id_string:
return
entity_id = slugify(event.device.id_string.lower())
packet_id = "".join("{0:02x}".format(x) for x in event.data)
entity_name = "%s : %s" % (entity_id, packet_id)
_LOGGER.info("Receive RFXCOM event from %s => %s",
event.device, entity_name)
# Callback to HA registered components
for subscriber in RECEIVED_EVT_SUBSCRIBERS:
subscriber(event)
# Try to load the RFXtrx module
import RFXtrx as rfxtrxmod
# Init the rfxtrx module
global RFXOBJECT
if ATTR_DEVICE not in config[DOMAIN]:
_LOGGER.exception(
"can found device parameter in %s YAML configuration section",
DOMAIN
)
return False
device = config[DOMAIN][ATTR_DEVICE]
debug = config[DOMAIN].get(ATTR_DEBUG, False)
RFXOBJECT = rfxtrxmod.Core(device, handle_receive, debug=debug)
return True
def get_rfx_object(packetid):
""" Return the RFXObject with the packetid. """
import RFXtrx as rfxtrxmod
binarypacket = bytearray.fromhex(packetid)
pkt = rfxtrxmod.lowlevel.parse(binarypacket)
if pkt is not None:
if isinstance(pkt, rfxtrxmod.lowlevel.SensorPacket):
obj = rfxtrxmod.SensorEvent(pkt)
elif isinstance(pkt, rfxtrxmod.lowlevel.Status):
obj = rfxtrxmod.StatusEvent(pkt)
else:
obj = rfxtrxmod.ControlEvent(pkt)
return obj
return None
| 26.569892 | 78 | 0.658438 |
acf11c660009ac1f41fb324f123c455f7acd690d | 5,271 | py | Python | or_shifty/model.py | aayaffe/or-shifty | d7530c1ceabd92708271207dec38478e8b56b243 | [
"MIT"
] | 5 | 2020-01-15T23:34:22.000Z | 2020-08-28T07:51:19.000Z | or_shifty/model.py | aayaffe/or-shifty | d7530c1ceabd92708271207dec38478e8b56b243 | [
"MIT"
] | 5 | 2020-01-10T22:14:59.000Z | 2022-01-21T19:00:28.000Z | or_shifty/model.py | aayaffe/or-shifty | d7530c1ceabd92708271207dec38478e8b56b243 | [
"MIT"
] | 2 | 2020-09-01T11:27:29.000Z | 2021-12-16T10:16:17.000Z | import logging
from typing import List
from ortools.sat.python import cp_model
from ortools.sat.python.cp_model import INFEASIBLE
from or_shifty.config import Config
from or_shifty.constraints import (
EVALUATION_CONSTRAINT,
FIXED_CONSTRAINTS,
Constraint,
)
from or_shifty.objective import Objective, RankingWeight
from or_shifty.shift import AssignedShift
log = logging.getLogger(__name__)
class Infeasible(Exception):
pass
def solve(
config: Config,
objective: Objective = RankingWeight(),
constraints: List[Constraint] = tuple(),
) -> List[AssignedShift]:
constraints = _constraints(constraints)
log.info(str(config.history_metrics))
solver, assignments = _run_with_retries(config, objective, list(constraints))
_validate_constraints_against_solution(solver, constraints, config, assignments)
_display_objective_function_score(solver)
solution = sorted(
list(_solution(solver, config, assignments)), key=lambda s: (s.day, s.name)
)
log.info("Solution\n%s", "\n".join(f">>>> {shift}" for shift in solution))
return solution
def evaluate(
config: Config,
objective: Objective,
constraints: List[Constraint],
solution: List[AssignedShift],
) -> None:
constraints = _constraints(constraints)
evaluation_constraint = EVALUATION_CONSTRAINT(priority=0, assigned_shifts=solution)
log.info(str(config.history_metrics))
solver, assignments = _run(config, objective, [evaluation_constraint])
_validate_constraints_against_solution(solver, constraints, config, assignments)
_display_objective_function_score(solver)
solution = sorted(
list(_solution(solver, config, assignments)), key=lambda s: (s.day, s.name)
)
log.info("Solution\n%s", "\n".join(f">>>> {shift}" for shift in solution))
def _constraints(constraints: List[Constraint]) -> List[Constraint]:
constraints = list(constraints) + FIXED_CONSTRAINTS
return sorted(constraints, key=lambda c: c.priority)
def _run_with_retries(config, objective, constraints):
log.info("Running model...")
while True:
try:
result = _run(config, objective, constraints)
log.info("Solution found")
return result
except Infeasible:
log.warning("Failed to find solution with current constraints")
constraints = _drop_least_important_constraints(constraints)
if constraints is None:
raise
log.info("Retrying model...")
def _drop_least_important_constraints(constraints):
priority_to_drop = max(constraint.priority for constraint in constraints)
if priority_to_drop == 0:
return None
log.debug("Dropping constraints with priority %s", priority_to_drop)
constraints_to_drop = [
constraint
for constraint in constraints
if constraint.priority == priority_to_drop
]
log.info("Dropping constraints %s", ", ".join(str(c) for c in constraints_to_drop))
return [
constraint
for constraint in constraints
if constraint.priority != priority_to_drop
]
def _run(data, objective, constraints):
model = cp_model.CpModel()
assignments = init_assignments(model, data)
for constraint in constraints:
log.debug("Adding constraint %s", constraint)
for expression, _ in constraint.generate(assignments, data):
model.Add(expression)
model.Maximize(objective.objective(assignments, data))
solver = cp_model.CpSolver()
status = solver.Solve(model)
if status is INFEASIBLE:
raise Infeasible()
return solver, assignments
def init_assignments(model, data):
assignments = {}
for index in data.indexer.iter():
assignments[index.idx] = model.NewBoolVar(
f"shift_{index.person.name}_{index.person_shift}_{index.day}_{index.day_shift.name}"
)
return assignments
def _solution(solver, data, assignments):
for day, day_shifts in data.shifts_by_day.items():
for day_shift in day_shifts:
for index in data.indexer.iter(day_filter=day, day_shift_filter=day_shift):
if solver.Value(assignments[index.idx]) == 1:
yield index.day_shift.assign(index.person)
def _validate_constraints_against_solution(solver, constraints, data, assignments):
for constraint in constraints:
log.debug("Evaluating constraint %s against solution", constraint)
for expression, impact in constraint.generate(assignments, data):
expr = expression.Expression()
bounds = expression.Bounds()
value = solver.Value(expr)
expr_valid = bounds[0] <= value <= bounds[1]
if not expr_valid:
log.debug(
"Solution violates constraint %s, expr %s, value %s, bounds %s, impact %s",
constraint,
expr,
value,
bounds,
impact,
)
log.warning("Solution violates constraint %s %s", constraint, impact)
def _display_objective_function_score(solver):
log.info("Objective function score was %s", solver.ObjectiveValue())
| 31.945455 | 96 | 0.674066 |
acf11cebaaec3bae79df4d2e12ced03424beebcc | 23,543 | py | Python | grgen/kohonen.py | dzilles/grgen | 7c80f1e6c7903355ac6cc427a1f526942110bff4 | [
"MIT"
] | 2 | 2021-05-18T13:25:42.000Z | 2021-06-23T14:36:13.000Z | grgen/kohonen.py | dzilles/grgen | 7c80f1e6c7903355ac6cc427a1f526942110bff4 | [
"MIT"
] | null | null | null | grgen/kohonen.py | dzilles/grgen | 7c80f1e6c7903355ac6cc427a1f526942110bff4 | [
"MIT"
] | 1 | 2020-12-11T12:39:57.000Z | 2020-12-11T12:39:57.000Z | # Copyright (c) 2020 Daniel Zilles
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import tensorflow as tf
import numpy as np
import scipy.spatial
import random
from shapely.geometry import Point, Polygon
from shapely.ops import nearest_points
from grgen.auxiliary import Timer
from grgen.auxiliary import Plotter
"""
Implementation of the Kohonen self-organizing map where a grid is trained to represent some input geometry.
"""
class Kohonen:
""" The class of the self-organizing map """
def __init__(self, spacing, geometry, dim=2, s=0.1, iterations=None, iterationsFactor=1, minRadius=None, maxRadius=None, batchSize=None, vertexType="triangular"):
""" Initialization of the Kohonen class
:param spacing: approximation of the grid spacing used to build the initial grid
:param geometry: geometry as sets of vertices inside a list. First entry is the outer boundary, second
is the inner boundary. Only one inner boundary is supported.
:param dim: currently only 2D
:param s: constant for the lateral connection of two neurons
:param iterations: maximum number of iterations
:param iterationsFactor: Factor to increase/decrease default iteration number
:param minRadius: minimum radius
:param maxRadius: maximum radius
:param batchSize: size of the training data for mini-batch learning
:param vertexType: "triangular", "rectangular" TODO implement rectangular
"""
self.spacing = spacing
self.geometry = geometry
self.dim = dim
self.s = s
self.iterations = iterations
self.minRadius = minRadius
self.maxRadius = maxRadius
self.batchSize = batchSize
self.vertexType = vertexType
# Weights of the Kohonen network. Also the grid coordinates of all cells.
self.weights = None
self.startWeights = None
# The position of coordinates can be fixed by using this array of booleans.
self.noPoints = None
self.noInternalPoints = None
self.noBoundaryPoints = None
self.noCells = None
# Minimum and maximum coordinates of the geometry
self.boundingBox = None
self.eps = 10e-12
self.dataType = np.float32
# Storage for the learning operation
self.tmpWeight = None
self.geometryProbability = None
self.vertexProbability = None
# Fixed topology of the grid
self.connection = None
self.neighbors = None
self.boundary = None
self.boundaryIdx = None
self.innerIdx = None
self.boundaryId = None
self.boundaryFace = None
# auxiliary
self.timer = Timer()
self.plotter = None
# Initialize som algorithm
# 1) Calculate bounding box and radius
self.calculateBoundingBox()
if maxRadius == None:
delta = np.subtract(self.boundingBox[:,1], self.boundingBox[:,0])
self.maxRadius = np.max(delta) + 10*spacing
if minRadius == None:
self.minRadius = 2*spacing
# 2) Initialize weights of the network
self.buildWeights()
# 3) Remove coordinates inside inner geometry or outside outer boundary
self.removeGridCoordinates()
# 3) Build the grid topology (connections, cell neighbors, ...)
self.buildGridTopology()
if iterations == None:
self.iterations = self.noPoints
self.iterations = int(iterationsFactor*self.iterations)
self.calculateBoundaryProbability()
def maskCornerPoints(self):
""" move the corner points on the corner of the outer geometry and fix their positions """
removeIndices = list()
for c in self.geometry[0]:
tmp=tf.cast(c, dtype=self.dataType)
neighbor = self.findNN(tf.gather(self.weights, self.boundaryIdx), tmp)
tf.compat.v1.scatter_update(self.weights, tf.Variable(self.boundaryIdx[neighbor], dtype=np.int64), tmp)
removeIndices.append(neighbor)
self.boundaryIdx = np.delete(self.boundaryIdx, removeIndices)
def findNN(self, searchSet, coordinates):
""" find the nearest neighbor and return its index
:param searchSet: set where the neighbor is searched
:param coordinates: the point that is searched for
"""
# squared euclidean distance of all weights to the input coordinates
squaredDistance = tf.reduce_sum( (searchSet - tf.expand_dims(coordinates, axis=0))**2, axis = 1)
# return the best matching unit
return tf.argmin(squaredDistance, axis=0)
def calculateBoundingBox(self):
""" Calculate the bounding box of the input geometry """
self.timer.startTimer("calculateBoundingBox")
boundingBox = np.zeros((self.dim, 2, len(self.geometry)))
index = 0
for g in self.geometry:
boundingBox[0, 0, index] = np.min(g[:,0])
boundingBox[0, 1, index] = np.max(g[:,0])
boundingBox[1, 0, index] = np.min(g[:,1])
boundingBox[1, 1, index] = np.max(g[:,1])
index += 1
a = np.min(boundingBox[:,0,:], axis =1).reshape(-1,1)
b = np.max(boundingBox[:,1,:], axis =1).reshape(-1,1)
self.boundingBox = np.concatenate((a, b), axis = 1)
self.timer.stopTimer("calculateBoundingBox")
def buildWeights(self):
""" Calculate weights (the initial coordinates of the grid) """
self.timer.startTimer("buildWeights")
minX = self.boundingBox[0,0]
minY = self.boundingBox[1,0]
maxX = self.boundingBox[0,1]
maxY = self.boundingBox[1,1]
if(self.vertexType == "triangular"):
spacingY = np.sqrt(self.spacing**2 - (self.spacing/2)**2)
else:
spacingY = self.spacing
rangeX = np.arange(minX-3*self.spacing, maxX+3*self.spacing, self.spacing)
rangeY = np.arange(minY-3*spacingY, maxY+3*spacingY, spacingY)
x, y = np.meshgrid(rangeX, rangeY)
if(self.vertexType == "triangular"):
x[::2,:]+=self.spacing/2
x = x.reshape(-1,1)
y = y.reshape(-1,1)
self.weights = tf.Variable(np.concatenate((x, y), axis = 1), dtype=self.dataType)
self.noPoints = tf.shape(self.weights)[0]
self.timer.stopTimer("buildWeights")
def removeGridCoordinates(self):
""" Remove coordinates inside geometry """
self.timer.startTimer("removeGridCoordinates")
removeCoord = np.ones((tf.shape(self.weights)[0]), dtype=bool)
inner = Polygon(self.geometry[1])
outer = Polygon(self.geometry[0])
for i in range(0, np.shape(self.weights)[0]):
point = Point(self.weights[i,0], self.weights[i,1])
if(inner.contains(point)):
removeCoord[i] = False
else:
if(outer.contains(point)):
removeCoord[i] = True
else:
removeCoord[i] = False
self.weights = tf.Variable(tf.boolean_mask(self.weights, removeCoord), dtype=self.dataType)
self.startWeights = self.weights
self.noPoints = tf.shape(self.weights)[0]
self.timer.stopTimer("removeGridCoordinates")
def buildGridTopology(self):
""" Grid topology """
self.timer.startTimer("buildGridTopology")
triangulation = scipy.spatial.Delaunay(self.weights.numpy())
self.connection = triangulation.simplices
self.neighbors = triangulation.neighbors
it = 0
remove = list()
for x in self.connection:
vertex = tf.gather(self.weights, x, axis=0)
minimum = tf.math.reduce_min(vertex, axis=0)
maximum = tf.math.reduce_max(vertex, axis=0)
if((maximum[0]-minimum[0])*(maximum[1]-minimum[1])/2 > self.spacing**2/2+self.eps):
remove.append(it)
it+=1
self.connection =np.delete(self.connection, remove, axis=0)
self.neighbors[np.isin(self.neighbors, remove)] = -1
self.neighbors = np.delete(self.neighbors, remove, axis=0)
self.boundary = np.argwhere(self.neighbors < 0)
tmpBndry = list()
for b in self.boundary:
if (b[1]==0):
tmpBndry.append(self.connection[b[0],1])
tmpBndry.append(self.connection[b[0],2])
if (b[1]==1):
tmpBndry.append(self.connection[b[0],2])
tmpBndry.append(self.connection[b[0],0])
if (b[1]==2):
tmpBndry.append(self.connection[b[0],0])
tmpBndry.append(self.connection[b[0],1])
self.boundaryIdx = np.unique(np.array(tmpBndry))
self.innerIdx = np.arange(0, self.noPoints, 1, dtype=np.int32)
self.innerIdx = np.delete(self.innerIdx, self.boundaryIdx)
self.noCells = np.shape(self.connection)[0]
self.noInternalPoints = np.shape(self.innerIdx)[0]
self.noBoundaryPoints = np.shape(self.boundaryIdx)[0]
self.timer.stopTimer("buildGridTopology")
def produceRandomInput(self, tensorflow=True):
""" produce random point for the learning step
:param tensorflow: return as tensorflow or numpy variable
"""
self.timer.startTimer("produceRandomInput")
minX = self.boundingBox[0,0]
minY = self.boundingBox[1,0]
maxX = self.boundingBox[0,1]
maxY = self.boundingBox[1,1]
inner = Polygon(self.geometry[1])
outer = Polygon(self.geometry[0])
while(True):
randomCoordinate = np.array([random.uniform(minX, maxX), random.uniform(minY, maxY)])
point = Point(randomCoordinate[0], randomCoordinate[1])
if(inner.contains(point)):
continue
else:
if(outer.contains(point)):
if (tensorflow):
return tf.Variable(randomCoordinate, dtype=self.dataType)
else:
return randomCoordinate
else:
continue
self.timer.stopTimer("produceRandomInput")
def calculateBoundaryProbability(self):
""" helper function for produceRandomInputBoundary() """
self.geometryProbability = list()
self.vertexProbability = list()
for idx in range(0, len(self.geometry)):
self.vertexProbability.append( np.sqrt(np.sum((self.geometry[idx] - np.roll(self.geometry[idx], 1, axis=0))**2, axis=1)) )
self.geometryProbability.append( np.sum(self.vertexProbability[idx], axis=0) )
self.vertexProbability[idx] = self.vertexProbability[idx]/np.sum(self.vertexProbability[idx])
self.geometryProbability = self.geometryProbability/np.sum(self.geometryProbability)
def produceRandomInputBoundary(self, tensorflow=True):
""" produce random point for the learning step on the boundary
:param tensorflow: return as tensorflow or numpy variable
"""
self.timer.startTimer("produceRandomInputBoundary")
idx = np.random.choice(len(self.geometry), size = 1, p=self.geometryProbability )
idx=int(idx)
nbr = np.shape(self.geometry[idx])[0]
v = np.random.choice(nbr, size = 1, p=self.vertexProbability[idx])
minX = self.geometry[idx][v,0]
minY = self.geometry[idx][v,1]
maxX = np.roll(self.geometry[idx], 1, axis=0)[v,0]
maxY = np.roll(self.geometry[idx], 1, axis=0)[v,1]
randomCoordinate = np.array([random.uniform(minX, maxX), random.uniform(minY, maxY)]).reshape(-1,)
self.timer.stopTimer("produceRandomInputBoundary")
if (tensorflow):
return tf.Variable(randomCoordinate, dtype=self.dataType)
else:
return randomCoordinate
def produceRandomBatch(self):
""" produce a batch of random points """
batchData = np.zeros((self.batchSize, self.dim))
for i in range(0, self.batchSize):
batchData[i,:] = self.produceRandomInputBoundary(False)
return tf.Variable(batchData, dtype=self.dataType)
def moveBoundaryPoints(self):
""" move boundary weights/points on the geometry boundary """
self.timer.startTimer("moveBoundaryPoints")
inner = Polygon(self.geometry[1])
outer = Polygon(self.geometry[0])
movement = np.zeros((np.shape(self.boundaryIdx)[0],2))
weightsBoundary = tf.Variable(tf.gather(self.weights, self.boundaryIdx), dtype=self.dataType).numpy()
for idx in range(0, np.shape(self.boundaryIdx)[0]):
point = Point(weightsBoundary[idx,0], weightsBoundary[idx,1])
pOuter, p = nearest_points(outer.boundary, point)
pInner, p = nearest_points(inner.boundary, point)
if(point.distance(pInner) > point.distance(pOuter)):
movement[idx,0] = pOuter.x
movement[idx,1] = pOuter.y
else:
movement[idx,0] = pInner.x
movement[idx,1] = pInner.y
print(np.shape(self.boundaryIdx))
print(np.shape(self.boundaryIdx))
tf.compat.v1.scatter_update(self.weights, self.boundaryIdx, movement)
self.timer.stopTimer("moveBoundaryPoints")
def trainingOperation(self, inputData, searchSet, searchSetStart, trainingSetStart, delta, radius, k=0, boundaryTraining = False):
""" ordering stage for all cells
:param inputData: random training data
:param searchSet: set for nearest neighbor search
:param searchSetStart: coordinate of the bmu is stored here
:param trainingSetStart: set for neighborhood calculation
:param delta: learning rate
:param radius: learning radius
:param k: parameter to eliminate the border effect
:param boundaryTraining: exchange random coordinate with nearest boundary point
"""
# find the best matching unit
bmuIndex = self.findNN(searchSet, inputData)
if(k > 0 or boundaryTraining):
inputData = searchSetStart[bmuIndex, :]
# calculate the neighbourhood and connectivity
squaredDistanceStart = tf.reduce_sum(
(trainingSetStart - tf.expand_dims(searchSetStart[bmuIndex,:], axis=0))**2, axis = 1)
lateralConnection = self.s**((tf.math.sqrt(squaredDistanceStart) + k*self.spacing)**2/(radius**2))
# update neighborhood
self.tmpWeights = self.tmpWeights + (
tf.expand_dims(delta*lateralConnection*(1 + k*tf.math.sqrt(squaredDistanceStart)), axis=1)
*(tf.expand_dims(inputData, axis=0) - self.tmpWeights))
def train(self):
""" train the grid """
self.timer.startTimer("train")
print("adaption")
#self.moveBoundaryPoints()
#self.startWeights = self.weights
#self.tmpWeights = self.weights#tf.Variable(tf.gather(self.weights, self.boundaryIdx), dtype=self.dataType)
self.tmpWeights = tf.Variable(self.weights, dtype=self.dataType)
searchSetStart = tf.gather(self.startWeights, self.boundaryIdx)
searchSet = tf.gather(self.startWeights, self.boundaryIdx)
trainingSetStart = self.startWeights #tf.gather(self.weights, self.boundaryIdx)
timeIt = 1
for it in range(1, int(10*self.noBoundaryPoints/self.noPoints*self.iterations)):
X = tf.cast(1 - tf.exp(5*(it-self.iterations)/self.iterations), dtype=self.dataType)
delta = 0.225*tf.cast((it)**(-0.2) * X, dtype=self.dataType)
#radius = 0.02*tf.cast(self.minRadius + X*(self.maxRadius*1.05**(it/self.iterations) - self.minRadius)*(it**(-0.25)),dtype=self.dataType)
radius = 2*self.spacing
self.trainingOperation(self.produceRandomInputBoundary(),
searchSet,
searchSetStart,
trainingSetStart,
delta,
radius,
boundaryTraining = False)
#tf.compat.v1.scatter_update(self.weights, self.boundaryIdx, self.tmpWeights)
if(not self.plotter == None):
self.plotter.plot(timeIt, self.weights[:,0], self.weights[:,1], self.connection)
timeIt += 1
self.weights = tf.Variable(self.tmpWeights, dtype=self.dataType)
self.maskCornerPoints()
self.moveBoundaryPoints()
self.tmpWeights = tf.Variable(tf.gather(self.weights, self.innerIdx), dtype=self.dataType)
searchSetStartCase1 = tf.gather(self.startWeights, self.boundaryIdx)
searchSetStartCase2 = self.startWeights
trainingSetStart = tf.gather(self.startWeights, self.innerIdx)
delta = 0.04*0.05
k = 10
radius = k*self.spacing
alpha_prob = self.noInternalPoints/(self.noBoundaryPoints*k + self.noInternalPoints)
print("smoothing")
for it in range(1, int(self.noInternalPoints/self.noPoints*self.iterations)):
alpha = np.random.uniform(0, 1, 1)
if(alpha > alpha_prob):
searchSetCase1 = tf.cast(tf.gather(self.weights, self.boundaryIdx), dtype=self.dataType)
self.trainingOperation(self.produceRandomInputBoundary(),
searchSetCase1,
searchSetStartCase1,
trainingSetStart,
delta,
radius,
k)
tf.compat.v1.scatter_update(self.weights, self.innerIdx, self.tmpWeights)
if(not self.plotter == None):
self.plotter.plot(timeIt, self.weights[:,0], self.weights[:,1], self.connection)
timeIt += 1
else:
searchSetCase2 = self.weights
self.trainingOperation(self.produceRandomInput(),
searchSetCase2,
searchSetStartCase2,
trainingSetStart,
delta,
radius,
boundaryTraining=True)
tf.compat.v1.scatter_update(self.weights, self.innerIdx, self.tmpWeights)
if(not self.plotter == None):
self.plotter.plot(timeIt, self.weights[:,0], self.weights[:,1], self.connection)
timeIt += 1
self.timer.stopTimer("train")
def summary(self):
""" Print a few grid information """
print("_________________________________________________________")
print(" ")
print("Summary of the grid")
print("_________________________________________________________")
print("spacing: ", self.spacing)
print("dimension: ", self.dim)
print("minimum x: ", self.boundingBox[0,0])
print("maximum x: ", self.boundingBox[0,1])
print("minimum y: ", self.boundingBox[1,0])
print("maximum y: ", self.boundingBox[1,1])
print("s: ", self.s)
print("iterations: ", self.iterations)
print("minRadius : ", self.minRadius)
print("maxRadius: ", self.maxRadius)
print("noPoints ", self.noPoints)
print("noCells: ", np.shape(self.connection)[0])
print("noBoundaryCells: ", np.shape(self.boundary)[0])
print("_________________________________________________________")
# def trainingOperationBatch(self, inputData, searchSet, searchSetStart, trainingSetStart):
# """ ordering stage for all cells batch learning (not working)
#
# :param inputData: random training data
# :param searchSet: set for nearest neighbor search
# :param searchSetStart: coordinate of the bmu is stored here
# :param trainingSetStart: set for neighborhood calculation
# """
#
# radius = self.minRadius
#
# self.squaredDistance = tf.reduce_sum(tf.pow(tf.subtract(tf.expand_dims(searchSet, axis=0),tf.expand_dims(inputData, axis=1)), 2), 2)
#
# bmuIndex = tf.argmin(self.squaredDistance, axis=1)
#
#
# self.squaredDistanceStart = tf.cast(tf.math.sqrt(tf.reduce_sum(tf.expand_dims(searchSetStart, axis=0) - tf.expand_dims(tf.gather(searchSetStart, bmuIndex), axis=1), 2)**2), dtype=self.dataType)
#
# self.squaredDistanceStart = self.squaredDistanceStart/(self.spacing)
#
#
# self.lateralConnection = tf.math.exp(self.squaredDistanceStart/(radius))
#
# self.numerator = tf.reduce_sum(tf.expand_dims(self.lateralConnection, axis=-1) * tf.expand_dims(inputData, axis=1), axis=0)
#
# self.denominator = tf.expand_dims(tf.reduce_sum(self.lateralConnection,axis=0)+self.eps, axis=-1)
#
# self.tmpWeights = self.numerator / self.denominator
#
#
# def batchTrain(self):
# """ batch training of the grid (not working)"""
#
# self.timer.startTimer("bacthTrain")
#
# self.weights = tf.Variable(self.weights, dtype=self.dataType)
# self.tmpWeights = tf.gather(self.weights, self.boundaryIdx)
# searchSetStart = self.startWeights
# trainingSetStart = self.startWeights
#
# for it in range(1, int(self.iterations)):
#
# searchSet = tf.cast(tf.gather(self.weights, self.boundaryIdx), dtype=self.dataType)
#
# self.trainingOperationBatch(self.produceRandomBatch(),
# searchSet,
# searchSetStart,
# trainingSetStart)
#
# self.weights = self.tmpWeights
#
# if(it%1==0):
# plot(self.weights[:,0], self.weights[:,1], self.connection)
# print(it, " ", self.iterations)
#
# self.timer.stopTimer("batchTrain")
#
#
| 38.095469 | 202 | 0.606805 |
acf11dc3c3f8171adf27dc9f0b3431af4797a5e4 | 31,595 | py | Python | Tests/test_methodbinder2.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 1,078 | 2016-07-19T02:48:30.000Z | 2022-03-30T21:22:34.000Z | Tests/test_methodbinder2.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 576 | 2017-05-21T12:36:48.000Z | 2022-03-30T13:47:03.000Z | Tests/test_methodbinder2.py | cwensley/ironpython2 | f854444e1e08afc8850cb7c1a739a7dd2d10d32a | [
"Apache-2.0"
] | 269 | 2017-05-21T04:44:47.000Z | 2022-03-31T16:18:13.000Z | # Licensed to the .NET Foundation under one or more agreements.
# The .NET Foundation licenses this file to you under the Apache 2.0 License.
# See the LICENSE file in the project root for more information.
#
# PART 2. how IronPython choose the overload methods
#
import unittest
from iptest import IronPythonTestCase, is_cli, run_test, skipUnlessIronPython
from iptest.type_util import array_int, array_byte, array_object, myint, mystr, types
class PT_int_old:
def __int__(self): return 200
class PT_int_new(object):
def __int__(self): return 300
def _self_defined_method(name): return len(name) == 4 and name[0] == "M"
def _result_pair(s, offset=0):
fn = s.split()
val = [int(x[1:]) + offset for x in fn]
return dict(zip(fn, val))
def _first(s): return _result_pair(s, 0)
def _second(s): return _result_pair(s, 100)
def _merge(*args):
ret = {}
for arg in args:
for (k, v) in arg.iteritems(): ret[k] = v
return ret
def _my_call(func, arg):
if isinstance(arg, tuple):
l = len(arg)
if l == 0: func()
elif l == 1: func(arg[0])
elif l == 2: func(arg[0], arg[1])
elif l == 3: func(arg[0], arg[1], arg[2])
elif l == 4: func(arg[0], arg[1], arg[2], arg[3])
elif l == 5: func(arg[0], arg[1], arg[2], arg[3], arg[4])
elif l == 6: func(arg[0], arg[1], arg[2], arg[3], arg[4], arg[5])
else: func(*arg)
else:
func(arg)
if is_cli:
import clr
import System
clrRefInt = clr.Reference[int](0)
UInt32Max = System.UInt32.MaxValue
Byte10 = System.Byte.Parse('10')
SBytem10 = System.SByte.Parse('-10')
Int1610 = System.Int16.Parse('10')
Int16m20 = System.Int16.Parse('-20')
UInt163 = System.UInt16.Parse('3')
arrayInt = array_int((10, 20))
tupleInt = ((10, 20), )
listInt = ([10, 20], )
tupleLong1, tupleLong2 = ((10L, 20L), ), ((System.Int64.MaxValue, System.Int32.MaxValue * 2),)
arrayByte = array_byte((10, 20))
arrayObj = array_object(['str', 10])
@skipUnlessIronPython()
class MethodBinder2Test(IronPythonTestCase):
def setUp(self):
super(MethodBinder2Test, self).setUp()
self.load_iron_python_test()
import System
from IronPythonTest.BinderTest import I, C1, C3
class PT_I(I): pass
class PT_C1(C1): pass
class PT_C3_int(C3):
def __int__(self): return 1
class PT_I_int(I):
def __int__(self): return 100
self.pt_i = PT_I()
self.pt_c1 = PT_C1()
self.pt_i_int = PT_I_int()
self.pt_int_old = PT_int_old()
self.pt_int_new = PT_int_new()
def _try_arg(self, target, arg, mapping, funcTypeError, funcOverflowError, verbose=False):
'''try the pass-in argument 'arg' on all methods 'target' has.
mapping specifies (method-name, flag-value)
funcOverflowError contains method-name, which will cause OverflowError when passing in 'arg'
'''
from IronPythonTest.BinderTest import Flag
if verbose: print arg,
for funcname in dir(target):
if not _self_defined_method(funcname) : continue
if verbose: print funcname,
func = getattr(target, funcname)
if funcname in funcOverflowError: expectError = OverflowError
elif funcname in funcTypeError: expectError = TypeError
else: expectError = None
if isinstance(arg, types.lambdaType):
arg = arg()
try:
_my_call(func, arg)
except Exception, e:
if expectError == None:
self.fail("unexpected exception %s when func %s with arg %s (%s)\n%s" % (e, funcname, arg, type(arg), func.__doc__))
if funcname in mapping.keys(): # No exception expected:
self.fail("unexpected exception %s when func %s with arg %s (%s)\n%s" % (e, funcname, arg, type(arg), func.__doc__))
if not isinstance(e, expectError):
self.fail("expect '%s', but got '%s' (flag %s) when func %s with arg %s (%s)\n%s" % (expectError, e, Flag.Value, funcname, arg, type(arg), func.__doc__))
else:
if not funcname in mapping.keys(): # Expecting exception
self.fail("expect %s, but got no exception (flag %s) when func %s with arg %s (%s)\n%s" % (expectError, Flag.Value, funcname, arg, type(arg), func.__doc__))
left, right = Flag.Value, mapping[funcname]
if left != right:
self.fail("left %s != right %s when func %s on arg %s (%s)\n%s" % (left, right, funcname, arg, type(arg), func.__doc__))
Flag.Value = -99 # reset
if verbose: print
def test_other_concerns(self):
from IronPythonTest.BinderTest import C1, C3, COtherOverloadConcern, Flag
target = COtherOverloadConcern()
# the one asking for Int32 is private
target.M100(100)
self.assertEqual(Flag.Value, 200); Flag.Value = 99
# static / instance
target.M110(target, 100)
self.assertEqual(Flag.Value, 110); Flag.Value = 99
COtherOverloadConcern.M110(100)
self.assertEqual(Flag.Value, 210); Flag.Value = 99
self.assertRaises(TypeError, COtherOverloadConcern.M110, target, 100)
# static / instance 2
target.M111(100)
self.assertEqual(Flag.Value, 111); Flag.Value = 99
COtherOverloadConcern.M111(target, 100)
self.assertEqual(Flag.Value, 211); Flag.Value = 99
self.assertRaises(TypeError, target.M111, target, 100)
self.assertRaises(TypeError, COtherOverloadConcern.M111, 100)
# statics
target.M120(target, 100)
self.assertEqual(Flag.Value, 120); Flag.Value = 99
target.M120(100)
self.assertEqual(Flag.Value, 220); Flag.Value = 99
COtherOverloadConcern.M120(target, 100)
self.assertEqual(Flag.Value, 120); Flag.Value = 99
COtherOverloadConcern.M120(100)
self.assertEqual(Flag.Value, 220); Flag.Value = 99
# generic
target.M130(100)
self.assertEqual(Flag.Value, 130); Flag.Value = 99
target.M130(100.1234)
self.assertEqual(Flag.Value, 230); Flag.Value = 99
target.M130(C1())
self.assertEqual(Flag.Value, 230); Flag.Value = 99
for x in [100, 100.1234]:
target.M130[int](x)
self.assertEqual(Flag.Value, 230); Flag.Value = 99
class PT_C3_int(C3):
def __int__(self): return 1
# narrowing levels and __int__ conversion
target.M140(PT_C3_int(), PT_C3_int())
self.assertEqual(Flag.Value, 140); Flag.Value = 99
######### generated python code below #########
def test_arg_ClrReference(self):
import clr
from IronPythonTest.BinderTest import C1, C2, COverloads_ClrReference
target = COverloads_ClrReference()
for (arg, mapping, funcTypeError, funcOverflowError) in [
(lambda : None, _merge(_first('M100 M101 M107 '), _second('M102 M104 M105 M106 ')), 'M103 ', '', ),
(lambda : clr.Reference[object](None), _second('M100 M104 M105 M107 '), 'M101 M102 M103 M104 M106 ', '', ),
(lambda : clr.Reference[object](None), _second('M100 M104 M105 M107 '), 'M101 M102 M103 M106 ', '', ),
(lambda : clr.Reference[int](9), _merge(_first('M100 M102 M103 M104 '), _second('M105 M107 ')), 'M101 M106 ', '', ),
(lambda : clr.Reference[bool](True), _merge(_first('M100 M105 '), _second('M101 M102 M104 M107 ')), 'M103 M106 ', '', ),
(lambda : clr.Reference[type](complex), _merge(_first('M100 '), _second('M104 M105 M107 ')), 'M101 M102 M103 M106 ', '', ),
(lambda : clr.Reference[C1](C1()), _merge(_first('M100 M106 M107 '), _second('M104 M105 ')), 'M101 M102 M103 ', '', ),
(lambda : clr.Reference[C1](C2()), _merge(_first('M100 M106 M107 '), _second('M104 M105 ')), 'M101 M102 M103 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_NoArgNecessary(self):
from IronPythonTest.BinderTest import COverloads_NoArgNecessary
target = COverloads_NoArgNecessary()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), _merge(_first('M100 M101 M102 M105 '), _second('M103 M104 M106 ')), '', '', ),
( 100, _merge(_first('M105 M106 '), _second('M101 M102 M103 M104 ')), 'M100 ', '', ),
( (100, 200), _second('M102 M104 M105 M106 '), 'M100 M101 M103 ', '', ),
( clrRefInt, _merge(_first('M103 M104 '), _second('M100 ')), 'M101 M102 M105 M106 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_OneArg_NormalArg(self):
from IronPythonTest.BinderTest import COverloads_OneArg_NormalArg
target = COverloads_OneArg_NormalArg()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), dict(), 'M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 ', '', ),
( 100, _first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 '), '', '', ),
( (100, 200), _second('M102 M107 M108 '), 'M100 M101 M103 M104 M105 M106 M109 ', '', ),
( clrRefInt, _second('M100 '), 'M101 M102 M103 M104 M105 M106 M107 M108 M109 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_OneArg_RefArg(self):
from IronPythonTest.BinderTest import COverloads_OneArg_RefArg
target = COverloads_OneArg_RefArg()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), dict(), 'M100 M101 M102 M103 M104 M105 M106 M107 M108 ', '', ),
( 100, _merge(_first('M100 M101 M103 M105 M108 '), _second('M106 M107 ')), 'M102 M104 ', '', ),
( (100, 200), _second('M101 M106 M107 '), 'M100 M102 M103 M104 M105 M108 ', '', ),
( clrRefInt, _first('M100 M101 M102 M103 M104 M105 M106 M107 M108 '), '', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_OneArg_NullableArg(self):
from IronPythonTest.BinderTest import COverloads_OneArg_NullableArg
target = COverloads_OneArg_NullableArg()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), dict(), 'M100 M101 M102 M103 M104 M105 M106 M107 ', '', ),
( 100, _merge(_first('M100 M107 '), _second('M101 M102 M103 M104 M105 M106 ')), '', '', ),
( (100, 200), _second('M100 M105 M106 '), 'M101 M102 M103 M104 M107 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_OneArg_TwoArgs(self):
from IronPythonTest.BinderTest import COverloads_OneArg_TwoArgs
target = COverloads_OneArg_TwoArgs()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), dict(), 'M100 M101 M102 M103 M104 M105 ', '', ),
( 100, _second('M100 M101 M102 M103 M104 '), 'M105 ', '', ),
( (100, 200), _first('M100 M101 M102 M103 M104 M105 '), '', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_OneArg_NormalOut(self):
from IronPythonTest.BinderTest import COverloads_OneArg_NormalOut
target = COverloads_OneArg_NormalOut()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), dict(), 'M100 M101 M102 M103 M104 M105 ', '', ),
( 100, _merge(_first('M100 M102 M105 '), _second('M103 M104 ')), 'M101 ', '', ),
( (100, 200), _second('M103 M104 '), 'M100 M101 M102 M105 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_OneArg_RefOut(self):
from IronPythonTest.BinderTest import COverloads_OneArg_RefOut
target = COverloads_OneArg_RefOut()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), dict(), 'M100 M101 M102 M103 ', '', ),
( 100, _merge(_first('M103 '), _second('M100 M101 M102 ')), '', '', ),
( (100, 200), _second('M101 M102 '), 'M100 M103 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_OneArg_OutNormal(self):
from IronPythonTest.BinderTest import COverloads_OneArg_OutNormal
target = COverloads_OneArg_OutNormal()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), dict(), 'M100 M101 M102 M103 ', '', ),
( 100, _merge(_first('M100 M103 '), _second('M101 M102 ')), '', '', ),
( (100, 200), _second('M101 M102 '), 'M100 M103 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_OneArg_OutRef(self):
from IronPythonTest.BinderTest import COverloads_OneArg_OutRef
target = COverloads_OneArg_OutRef()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), dict(), 'M100 M101 M102 ', '', ),
( 100, _merge(_first('M102 '), _second('M100 M101 ')), '', '', ),
( (100, 200), _second('M100 M101 '), 'M102 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_OneArg_NormalDefault(self):
from IronPythonTest.BinderTest import COverloads_OneArg_NormalDefault
target = COverloads_OneArg_NormalDefault()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( tuple(), dict(), 'M100 M101 ', '', ),
( 100, _first('M100 M101 '), '', '', ),
( (100, 200), _first('M100 M101 '), '', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_String(self):
from IronPythonTest.BinderTest import COverloads_String
target = COverloads_String()
from IronPythonTest.BinderTest import COverloads_String
for (arg, mapping, funcTypeError, funcOverflowError) in [
( 'a', _merge(_first('M100 M101 '), _second('M102 ')), '', '', ),
( 'abc', _merge(_first('M100 M101 '), _second('M102 ')), '', '', ),
( mystr('a'), _merge(_first('M100 M101 '), _second('M102 ')), '', '', ),
(mystr('abc'), _merge(_first('M100 M101 '), _second('M102 ')), '', '', ),
( 1, _first('M101 M102 '), 'M100 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_Enum(self):
from IronPythonTest.BinderTest import COverloads_Enum, E1, E2
target = COverloads_Enum()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( E1.A, _first('M100 '), 'M101 ', '', ),
( E2.A, _first('M101 '), 'M100 ', '', ),
( 1, _second('M100 M101 '), '', '', ),
( UInt163, _second('M101 '), 'M100 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_UserDefined(self):
from IronPythonTest.BinderTest import C1, C2, C3, C6, S1, COverloads_UserDefined
target = COverloads_UserDefined()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( C1(), _merge(_first('M101 M102 M103 M104 '), _second('M100 ')), 'M105 ', '', ),
( C2(), _merge(_first('M102 M103 '), _second('M100 M101 M104 ')), 'M105 ', '', ),
( C3(), _second('M103 '), 'M100 M101 M102 M104 M105 ', '', ),
( S1(), _first('M100 M101 M102 M103 '), 'M104 M105 ', '', ),
( C6(), _second('M103 M105 '), 'M100 M101 M102 M104 ', '', ),
( self.pt_i, _first('M100 M101 M102 M103 '), 'M104 M105 ', '', ),
( self.pt_c1, _merge(_first('M101 M102 M103 M104 '), _second('M100 ')), 'M105 ', '', ),
( self.pt_i_int, _first('M100 M101 M102 M103 '), 'M104 M105 ', '', ),
(self.pt_int_old, _second('M102 M103 '), 'M100 M101 M104 M105 ', '', ),
(self.pt_int_new, _second('M102 M103 '), 'M100 M101 M104 M105 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_Derived_Number(self):
from IronPythonTest.BinderTest import COverloads_Derived_Number
target = COverloads_Derived_Number()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( None, _merge(_first('M106 '), _second('M102 M103 ')), 'M100 M101 M104 M105 ', '', ),
( True, _merge(_first('M100 M103 '), _second('M104 M105 M106 ')), 'M101 M102 ', '', ),
( -100, _merge(_first('M100 '), _second('M104 M105 M106 ')), 'M101 M102 M103 ', '', ),
( 200L, _merge(_first('M106 M105 '), _second('M100 M102 M101 ')), 'M103 M104 ', '', ),
( Byte10, _merge(_first('M103 '), _second('M100 M105 M106 ')), 'M101 M102 M104 ', '', ),
( 12.34, _merge(_first('M105 M106 '), _second('M101 M102 M100 ')), 'M103 M104 ', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_Collections(self):
from IronPythonTest.BinderTest import COverloads_Collections
target = COverloads_Collections()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( arrayInt, _merge(_first('M100 '), _second('M101 M102 M103 M104 ')), '', '', ),
( tupleInt, _merge(_first(''), _second('M100 M101 M102 M103 M104 ')), '', '', ),
( listInt, _merge(_first('M102 M104 '), _second('M100 M103 ')), 'M101 ', '', ),
( tupleLong1, _merge(_first(''), _second('M100 M101 M102 M103 M104 ')), '', '', ),
( tupleLong2, _merge(_first(''), _second('M100 M103 ')), '', 'M101 M102 M104 ', ),
( arrayByte, _first('M101 M103 M104 '), 'M100 M102 ', '', ),
( arrayObj, _merge(_first('M101 M102 M104 '), _second('M100 M103 ')), '', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
#------------------------------------------------------------------------------
#--Boolean
def test_arg_boolean_overload(self):
'''
TODO:
In addition to test_arg_boolean_overload, we need to split up test_arg_Boolean
into two more functions as well - test_arg_boolean_overload_typeerror and
test_arg_boolean_overload_overflowerror. This should be done for all of these
types of tests to make them more readable and maintainable.
'''
from IronPythonTest.BinderTest import COverloads_Boolean, Flag
o = COverloads_Boolean()
param_method_map = {
None : [ o.M100, o.M101, o.M102, o.M103, o.M104, o.M105, o.M106,
o.M107, o.M108, o.M109, o.M110, o.M111],
True : [ o.M100, o.M101, o.M102, o.M103, o.M104, o.M105, o.M106, o.M107, o.M108, o.M109, o.M110, o.M111, o.M112],
False : [ o.M100, o.M101, o.M102, o.M103, o.M104, o.M105, o.M106, o.M107, o.M108, o.M109, o.M110, o.M111, o.M112],
100 : [ o.M100],
myint(100): [ o.M100],
-100 : [ o.M100],
UInt32Max: [ o.M100, o.M106],
200L : [ o.M100, o.M106, o.M109],
-200L : [ o.M100, o.M106, o.M109],
Byte10 : [ o.M100],
SBytem10 : [ o.M100],
Int1610 : [ o.M100],
Int16m20 : [ o.M100],
12.34 : [ o.M100, o.M101, o.M102, o.M103, o.M104, o.M105, o.M106, o.M107, o.M108, o.M109, o.M110],
}
for param in param_method_map.keys():
for meth in param_method_map[param]:
expected_flag = int(meth.__name__[1:])
meth(param)
self.assertEqual(expected_flag, Flag.Value)
def test_arg_Boolean(self):
from IronPythonTest.BinderTest import COverloads_Boolean
target = COverloads_Boolean()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( None, _merge(_first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 '), _second('M112 ')), '', '', ),
( True, _merge(_first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), _second('')), '', '', ),
( False, _merge(_first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), _second('')), '', '', ),
( 100, _merge(_first('M100 '), _second('M106 M108 M109 M110 M111 M112 ')), 'M101 M102 M103 M104 M105 M107 ', '', ),
( myint(100), _merge(_first('M100 '), _second('M106 M108 M109 M110 M111 M112 ')), 'M101 M102 M103 M104 M105 M107 ', '', ),
( -100, _merge(_first('M100 '), _second('M106 M108 M109 M110 M111 M112 ')), 'M101 M102 M103 M104 M105 M107 ', '', ),
( UInt32Max, _merge(_first('M100 M106 '), _second('M105 M107 M108 M109 M110 M111 M112 ')), 'M101 M102 M103 M104 ', '', ),
( 200L, _merge(_first('M100 M106 M109 '), _second('M108 M112 M110 M111 ')), 'M101 M102 M103 M104 M105 M107 ', '', ),
( -200L, _merge(_first('M100 M106 M109 '), _second('M108 M112 M110 M111 ')), 'M101 M102 M103 M104 M105 M107 ', '', ),
( Byte10, _merge(_first('M100 '), _second('M101 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 ')), 'M102 ', '', ),
( SBytem10, _merge(_first('M100 '), _second('M102 M104 M106 M108 M109 M110 M111 M112 ')), 'M101 M103 M105 M107 ', '', ),
( Int1610, _merge(_first('M100 '), _second('M104 M106 M108 M109 M110 M111 M112 ')), 'M101 M102 M103 M105 M107 ', '', ),
( Int16m20, _merge(_first('M100 '), _second('M104 M106 M108 M109 M110 M111 M112 ')), 'M101 M102 M103 M105 M107 ', '', ),
( 12.34, _merge(_first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 '), _second('M111 M112 ')), '', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_Byte(self):
from IronPythonTest.BinderTest import COverloads_Byte
target = COverloads_Byte()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( None, _merge(_first(''), _second('M100 M112 ')), 'M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 ', '', ),
( True, _merge(_first('M101 M102 M103 M104 M105 M107 '), _second('M100 M106 M108 M109 M110 M112 M111 ')), '', '', ),
( False, _merge(_first('M101 M102 M103 M104 M105 M107 '), _second('M100 M106 M108 M109 M110 M112 M111 ')), '', '', ),
( 100, _merge(_first('M101 M102 M103 M104 M105 M107 '), _second('M106 M108 M109 M110 M111 M112 ')), 'M100 ', '', ),
( myint(100), _merge(_first('M101 M102 M103 M104 M105 M107 '), _second('M106 M108 M109 M110 M111 M112 ')), 'M100 ', '', ),
( -100, _merge(_first(''), _second('M106 M108 M109 M110 M111 M112 ')), 'M100 ', 'M101 M102 M103 M104 M105 M107 ', ),
( UInt32Max, _merge(_first(''), _second('M105 M107 M108 M109 M110 M111 M112 ')), 'M100 ', 'M101 M102 M103 M104 M106 ', ),
( 200L, _merge(_first('M101 M102 M103 M104 M105 M106 M107 M109 '), _second('M108 M112 M110 M111 ')), 'M100 ', '', ),
( -200L, _merge(_first(''), _second('M108 M112 M110 M111 ')), 'M100 ', 'M101 M102 M103 M104 M105 M106 M107 M109 ', ),
( Byte10, _first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), '', '', ),
( SBytem10, _merge(_first(''), _second('M102 M104 M106 M108 M109 M110 M111 M112 ')), 'M100 ', 'M101 M103 M105 M107 ', ),
( Int1610, _merge(_first('M101 M102 M103 M105 M107 '), _second('M104 M106 M108 M109 M110 M111 M112 ')), 'M100 ', '', ),
( Int16m20, _merge(_first(''), _second('M104 M106 M108 M109 M110 M111 M112 ')), 'M100 ', 'M101 M102 M103 M105 M107 ', ),
( 12.34, _merge(_first('M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 '), _second('M100 M111 M112 ')), '', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_Int16(self):
from IronPythonTest.BinderTest import COverloads_Int16
target = COverloads_Int16()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( None, _merge(_first(''), _second('M100 M112 ')), 'M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 ', '', ),
( True, _merge(_first('M101 '), _second('M100 M102 M103 M104 M105 M107 M106 M108 M109 M110 M112 M111 ')), '', '', ),
( False, _merge(_first('M101 '), _second('M100 M102 M103 M104 M105 M107 M106 M108 M109 M110 M112 M111 ')), '', '', ),
( 100, _merge(_first('M101 '), _second('M102 M103 M104 M105 M107 M106 M108 M109 M110 M111 M112 ')), 'M100 ', '', ),
( myint(100), _merge(_first('M101 '), _second('M102 M103 M104 M105 M107 M106 M108 M109 M110 M111 M112 ')), 'M100 ', '', ),
( -100, _merge(_first('M101 '), _second('M103 M106 M108 M109 M110 M111 M112 ')), 'M100 ', 'M102 M104 M105 M107 ', ),
( UInt32Max, _merge(_first(''), _second('M105 M107 M108 M109 M110 M111 M112 ')), 'M100 ', 'M101 M102 M103 M104 M106 ', ),
( 200L, _merge(_first('M101 M106 M109 '), _second('M102 M104 M105 M107 M108 M110 M111 M112 ')), 'M100 ', 'M103 ', ),
( -200L, _merge(_first('M101 M106 M109 '), _second('M108 M110 M111 M112 ')), 'M100 ', 'M102 M103 M104 M105 M107 ', ),
( Byte10, _merge(_first('M100 M101 M103 M106 M108 M109 M110 M111 M112'), _second('M102 M104 M105 M107 ')), '', '', ),
( SBytem10, _merge(_first('M100 M101 M102 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), _second('M103 ')), '', '', ),
( Int1610, _first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), '', '', ),
( Int16m20, _first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), '', '', ),
( 12.34, _merge(_first('M101 M106 M108 M109 M110 '), _second('M100 M111 M112 M102 M103 M104 M105 M107 ')), '', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_Int32(self):
from IronPythonTest.BinderTest import COverloads_Int32
target = COverloads_Int32()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( None, _merge(_first(''), _second('M100 M112 ')), 'M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 ', '', ),
( True, _merge(_first('M101 M102 M103 M104 M105 M107 M106 M108 M109 M110 M112 M111 '), _second('M100 ')), '', '', ),
( False, _merge(_first('M101 M102 M103 M104 M105 M107 M106 M108 M109 M110 M112 M111 '), _second('M100 ')), '', '', ),
( 100, _first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), '', '', ),
( myint(100), _first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), '', '', ),
( -100, _first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), '', '', ),
( UInt32Max, _merge(_first(''), _second('M100 M106 M107 M108 M109 M110 M111 M112 ')), '', 'M101 M102 M103 M104 M105 ', ),
( 200L, _merge(_first('M101 M109 '), _second('M100 M102 M104 M105 M106 M107 M108 M110 M111 M112 ')), '', 'M103 ', ),
( -200L, _merge(_first('M101 M109 '), _second('M100 M105 M108 M110 M111 M112 ')), '', 'M102 M103 M104 M106 M107 ', ),
( Byte10, _merge(_first('M100 M101 M103 M108 M109 M110 M111 M112'), _second('M102 M104 M105 M106 M107 ')), '', '', ),
( SBytem10, _merge(_first('M100 M101 M102 M104 M106 M107 M108 M109 M110 M111 M112 '), _second('M103 M105 ')), '', '', ),
( Int1610, _merge(_first('M100 M101 M102 M103 M104 M106 M107 M108 M109 M110 M111 M112 '), _second('M105 ')), '', '', ),
( Int16m20, _merge(_first('M100 M101 M102 M103 M104 M106 M107 M108 M109 M110 M111 M112 '), _second('M105 ')), '', '', ),
( 12.34, _merge(_first('M101 M108 M109 M110 '), _second('M100 M106 M111 M112 M102 M103 M104 M105 M107 ')), '', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
def test_arg_Double(self):
from IronPythonTest.BinderTest import COverloads_Double
target = COverloads_Double()
for (arg, mapping, funcTypeError, funcOverflowError) in [
( None, _merge(_first(''), _second('M100 M112 ')), 'M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 ', '', ),
( True, _merge(_first('M101 M102 M103 M104 M105 M106 M108 M112 '), _second('M100 M107 M109 M111 ')), 'M110 ', '', ),
( False, _merge(_first('M101 M102 M103 M104 M105 M106 M108 M112 '), _second('M100 M107 M109 M111 ')), 'M110 ', '', ),
( 100, _merge(_first('M100 M101 M102 M103 M104 M105 M106 M108 M112 '), _second('M107 M109 M111 ')), 'M110 ', '', ),
( myint(100), _merge(_first('M100 M101 M102 M103 M104 M105 M106 M108 M112 '), _second('M107 M109 M111 ')), 'M110 ', '', ),
( -100, _merge(_first('M100 M101 M102 M103 M104 M105 M106 M108 M112 '), _second('M107 M109 M111 ')), 'M110 ', '', ),
( UInt32Max, _merge(_first('M100 M101 M102 M103 M104 M105 M107 M112 '), _second('M106 M108 M109 M111 ')), 'M110 ', '', ),
( 200L, _merge(_first('M101 M100 M102 M103 M104 M105 M106 M107 M108 M109 M110 M112 '), _second('M111 ')), '', '', ),
( -200L, _merge(_first('M101 M100 M102 M103 M104 M105 M106 M107 M108 M109 M110 M112 '), _second('M111 ')), '', '', ),
( Byte10, _merge(_first('M100 M101 M103 M112 '), _second('M102 M104 M105 M106 M107 M108 M109 M111 ')), 'M110 ', '', ),
( SBytem10, _merge(_first('M100 M101 M102 M104 M106 M108 M112 '), _second('M103 M105 M107 M109 M111 ')), 'M110 ', '', ),
( Int1610, _merge(_first('M100 M101 M102 M103 M104 M106 M108 M112 '), _second('M105 M107 M109 M111 ')), 'M110 ', '', ),
( Int16m20, _merge(_first('M100 M101 M102 M103 M104 M106 M108 M112 '), _second('M105 M107 M109 M111 ')), 'M110 ', '', ),
( 12.34, _first('M100 M101 M102 M103 M104 M105 M106 M107 M108 M109 M110 M111 M112 '), '', '', ),
]:
self._try_arg(target, arg, mapping, funcTypeError, funcOverflowError)
run_test(__name__)
| 59.500942 | 176 | 0.571673 |
acf11ea6db546eeb71d8485cbd88d3fc25ba8c50 | 1,125 | py | Python | common/queue_utils.py | sears-s/fuzzbench | fbed13638497cec46da66d7b0cebe294e0e01ff5 | [
"Apache-2.0"
] | 800 | 2020-03-02T18:14:07.000Z | 2022-03-29T05:04:46.000Z | common/queue_utils.py | sears-s/fuzzbench | fbed13638497cec46da66d7b0cebe294e0e01ff5 | [
"Apache-2.0"
] | 995 | 2020-03-02T19:21:51.000Z | 2022-03-31T13:52:59.000Z | common/queue_utils.py | sears-s/fuzzbench | fbed13638497cec46da66d7b0cebe294e0e01ff5 | [
"Apache-2.0"
] | 292 | 2020-03-02T19:07:30.000Z | 2022-03-30T09:38:12.000Z | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for setting up a work queue with rq."""
import redis
import rq
import rq.job
from common import experiment_utils
def initialize_queue(redis_host):
"""Returns a redis-backed rq queue."""
queue_name = experiment_utils.get_experiment_name()
redis_connection = redis.Redis(host=redis_host)
queue = rq.Queue(queue_name, connection=redis_connection)
return queue
def get_all_jobs(queue):
"""Returns all the jobs in queue."""
job_ids = queue.get_job_ids()
return rq.job.Job.fetch_many(job_ids, queue.connection)
| 33.088235 | 74 | 0.750222 |
acf11fa3d7e7f965ad5b2ff2bc8b32d6c04f171c | 3,426 | py | Python | src/vehicle.py | DenMaslov/dz | 84ebf41f654d7010aee6f8346c842d910d6cdb86 | [
"MIT"
] | null | null | null | src/vehicle.py | DenMaslov/dz | 84ebf41f654d7010aee6f8346c842d910d6cdb86 | [
"MIT"
] | null | null | null | src/vehicle.py | DenMaslov/dz | 84ebf41f654d7010aee6f8346c842d910d6cdb86 | [
"MIT"
] | null | null | null | import random
from statistics import geometric_mean as gavg
from soldier import Soldier
from config import Config
from base import BaseUnit
class Vehicle(BaseUnit, Config):
""" Class represents Vehicle model.
Contains main fields of Vehicle.
As operators are used instances of Soldier class.
"""
def __init__(self) -> None:
random.seed(self.seed)
self.__operators = []
self.__health = self.MAX_HEALTH
self.__is_alive = False
def add_operator(self, operator: Soldier) -> None:
"""Adds operator to list of operators.
List should contain from 1 to 3 operators.
"""
if isinstance(operator, Soldier):
if len(self.__operators) < self.MAX_OPERATORS:
self.__operators.append(operator)
self.__is_alive = True
else:
raise TypeError("argument must be a Soldier")
@property
def recharge(self) -> int:
""" Returns recharge in ms """
return random.randint(self.MIN_RECHARGE_VEHICLE,
self.MAX_RECHARGE)
@property
def is_alive(self) -> bool:
"""Checks if crew are alive and total health greater than min"""
self.check_is_alive()
return self.__is_alive
@property
def health(self) -> float:
return self.__health
@health.setter
def health(self, hp: float) -> None:
if hp > self.MIN_HEALTH:
self.__health = hp
self.__is_alive = True
else:
self.__health = self.MIN_HEALTH
self.__is_alive = False
@property
def attack_success(self) -> float:
attack_operators = []
for operator in self.__operators:
attack_operators.append(operator.attack_success)
return (0.5 * (1 + self.health / 100) *
gavg(attack_operators))
def estimate_total_health(self) -> None:
"""Health = avarage health of crew and vehicle"""
crew_health = 0
for operator in self.__operators:
crew_health += operator.health
crew_health += self.__health
self.__health = crew_health / (len(self.__operators) + 1)
def do_damage(self) -> float:
"""Returns amount of damage"""
sum = 0
for operator in self.__operators:
if operator.is_alive:
operator.experience += 1
sum += operator.experience / 100
return 0.1 + sum
def get_damage(self, amount: float) -> None:
"""Distributes damage to crew and vehicle"""
self.health = self.health - amount * self.DMG_TO_VEHICLE
rnd_operator = random.choice(self.__operators)
rnd_operator.get_damage(amount * self.DMG_TO_ONE_OPER)
for operator in self.__operators:
if operator != rnd_operator:
operator.get_damage(amount * self.DMG_TO_OPER)
self.estimate_total_health()
self.check_is_alive()
def check_is_alive(self) -> bool:
"""Checks if crew and vehicle are alive """
crew_alive = False
for operator in self.__operators:
if operator.is_alive:
crew_alive = True
break
if crew_alive and self.health > self.MIN_HEALTH:
self.__is_alive = True
return True
else:
self.__is_alive = False
return False
| 32.320755 | 72 | 0.600117 |
acf1203cc6e41b2861c2868e9d27ded1b5cf00db | 1,038 | py | Python | seaworthy/fixtures.py | praekeltfoundation/ndoh-hub | 91d834ff8fe43b930a73d8debdaa0e6af78c5efc | [
"BSD-3-Clause"
] | null | null | null | seaworthy/fixtures.py | praekeltfoundation/ndoh-hub | 91d834ff8fe43b930a73d8debdaa0e6af78c5efc | [
"BSD-3-Clause"
] | 126 | 2016-07-12T19:39:44.000Z | 2022-03-24T13:39:38.000Z | seaworthy/fixtures.py | praekeltfoundation/ndoh-hub | 91d834ff8fe43b930a73d8debdaa0e6af78c5efc | [
"BSD-3-Clause"
] | 3 | 2016-09-28T13:16:11.000Z | 2020-11-07T15:32:37.000Z | import pytest
from seaworthy.containers.postgresql import PostgreSQLContainer
from seaworthy.definitions import ContainerDefinition
HUB_IMAGE = pytest.config.getoption("--hub-image")
class HubContainer(ContainerDefinition):
WAIT_PATTERNS = (r"Listening at: unix:/run/gunicorn/gunicorn.sock",)
def __init__(self, name, db_url, image=HUB_IMAGE):
super().__init__(name, image, self.WAIT_PATTERNS)
self.db_url = db_url
def base_kwargs(self):
return {
"ports": {"8000/tcp": None},
"environment": {"HUB_DATABASE": self.db_url},
}
postgresql_container = PostgreSQLContainer("postgresql")
f = postgresql_container.pytest_clean_fixtures("postgresql_container")
postgresql_fixture, clean_postgresql_fixture = f
hub_container = HubContainer("ndoh-hub", postgresql_container.database_url())
hub_fixture = hub_container.pytest_fixture(
"hub_container", dependencies=["postgresql_container"]
)
__all__ = ["clean_postgresql_fixture", "hub_fixture", "postgresql_fixture"]
| 31.454545 | 77 | 0.744701 |
acf1208cb12be615edb84e7b82a025345d315042 | 62,768 | py | Python | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_private_link_services_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 3 | 2020-06-23T02:25:27.000Z | 2021-09-07T18:48:11.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_private_link_services_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 510 | 2019-07-17T16:11:19.000Z | 2021-08-02T08:38:32.000Z | sdk/network/azure-mgmt-network/azure/mgmt/network/v2020_03_01/aio/operations/_private_link_services_operations.py | rsdoherty/azure-sdk-for-python | 6bba5326677468e6660845a703686327178bb7b1 | [
"MIT"
] | 5 | 2019-09-04T12:51:37.000Z | 2020-09-16T07:28:40.000Z | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateLinkServicesOperations:
"""PrivateLinkServicesOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.mgmt.network.v2020_03_01.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def _delete_initial(
self,
resource_group_name: str,
service_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
service_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes the specified private link service.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
service_name=service_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def get(
self,
resource_group_name: str,
service_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.PrivateLinkService":
"""Gets the specified private link service by resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateLinkService, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.PrivateLinkService
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
service_name: str,
parameters: "_models.PrivateLinkService",
**kwargs
) -> "_models.PrivateLinkService":
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkService"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateLinkService')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 201]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if response.status_code == 200:
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if response.status_code == 201:
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
service_name: str,
parameters: "_models.PrivateLinkService",
**kwargs
) -> AsyncLROPoller["_models.PrivateLinkService"]:
"""Creates or updates an private link service in the specified resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param parameters: Parameters supplied to the create or update private link service operation.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.PrivateLinkService
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateLinkService or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.PrivateLinkService]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkService"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
service_name=service_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateLinkService', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'azure-async-operation'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}'} # type: ignore
def list(
self,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.PrivateLinkServiceListResult"]:
"""Gets all private link services in a resource group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkServiceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.PrivateLinkServiceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices'} # type: ignore
def list_by_subscription(
self,
**kwargs
) -> AsyncIterable["_models.PrivateLinkServiceListResult"]:
"""Gets all private link service in a subscription.
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateLinkServiceListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.PrivateLinkServiceListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_subscription.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_subscription.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/privateLinkServices'} # type: ignore
async def get_private_endpoint_connection(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
expand: Optional[str] = None,
**kwargs
) -> "_models.PrivateEndpointConnection":
"""Get the specific private end point connection by specific private link service in the resource
group.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param pe_connection_name: The name of the private end point connection.
:type pe_connection_name: str
:param expand: Expands referenced resources.
:type expand: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self.get_private_endpoint_connection.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
if expand is not None:
query_parameters['$expand'] = self._serialize.query("expand", expand, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
async def update_private_endpoint_connection(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
parameters: "_models.PrivateEndpointConnection",
**kwargs
) -> "_models.PrivateEndpointConnection":
"""Approve or reject private end point connection for a private link service in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param pe_connection_name: The name of the private end point connection.
:type pe_connection_name: str
:param parameters: Parameters supplied to approve or reject the private end point connection.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~azure.mgmt.network.v2020_03_01.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.update_private_endpoint_connection.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
update_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
async def _delete_private_endpoint_connection_initial(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
# Construct URL
url = self._delete_private_endpoint_connection_initial.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize.failsafe_deserialize(_models.Error, response)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_private_endpoint_connection_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
async def begin_delete_private_endpoint_connection(
self,
resource_group_name: str,
service_name: str,
pe_connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Delete private end point connection for a private link service in a subscription.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:param pe_connection_name: The name of the private end point connection.
:type pe_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_private_endpoint_connection_initial(
resource_group_name=resource_group_name,
service_name=service_name,
pe_connection_name=pe_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'peConnectionName': self._serialize.url("pe_connection_name", pe_connection_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete_private_endpoint_connection.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections/{peConnectionName}'} # type: ignore
def list_private_endpoint_connections(
self,
resource_group_name: str,
service_name: str,
**kwargs
) -> AsyncIterable["_models.PrivateEndpointConnectionListResult"]:
"""Gets all private end point connections for a specific private link service.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param service_name: The name of the private link service.
:type service_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_private_endpoint_connections.metadata['url'] # type: ignore
path_format_arguments = {
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'serviceName': self._serialize.url("service_name", service_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
error = self._deserialize.failsafe_deserialize(_models.Error, response)
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, model=error, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_private_endpoint_connections.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/privateLinkServices/{serviceName}/privateEndpointConnections'} # type: ignore
async def _check_private_link_service_visibility_initial(
self,
location: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs
) -> Optional["_models.PrivateLinkServiceVisibility"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateLinkServiceVisibility"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_private_link_service_visibility_initial.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CheckPrivateLinkServiceVisibilityRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_private_link_service_visibility_initial.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
async def begin_check_private_link_service_visibility(
self,
location: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs
) -> AsyncLROPoller["_models.PrivateLinkServiceVisibility"]:
"""Checks whether the subscription is visible to private link service.
:param location: The location of the domain name.
:type location: str
:param parameters: The request body of CheckPrivateLinkService API call.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.CheckPrivateLinkServiceVisibilityRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateLinkServiceVisibility or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.PrivateLinkServiceVisibility]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceVisibility"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._check_private_link_service_visibility_initial(
location=location,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_private_link_service_visibility.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
async def _check_private_link_service_visibility_by_resource_group_initial(
self,
location: str,
resource_group_name: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs
) -> Optional["_models.PrivateLinkServiceVisibility"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["_models.PrivateLinkServiceVisibility"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._check_private_link_service_visibility_by_resource_group_initial.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'CheckPrivateLinkServiceVisibilityRequest')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_check_private_link_service_visibility_by_resource_group_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
async def begin_check_private_link_service_visibility_by_resource_group(
self,
location: str,
resource_group_name: str,
parameters: "_models.CheckPrivateLinkServiceVisibilityRequest",
**kwargs
) -> AsyncLROPoller["_models.PrivateLinkServiceVisibility"]:
"""Checks whether the subscription is visible to private link service in the specified resource
group.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param parameters: The request body of CheckPrivateLinkService API call.
:type parameters: ~azure.mgmt.network.v2020_03_01.models.CheckPrivateLinkServiceVisibilityRequest
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: Pass in True if you'd like the AsyncARMPolling polling method,
False for no polling, or your own initialized polling object for a personal polling strategy.
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateLinkServiceVisibility or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~azure.mgmt.network.v2020_03_01.models.PrivateLinkServiceVisibility]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["_models.PrivateLinkServiceVisibility"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._check_private_link_service_visibility_by_resource_group_initial(
location=location,
resource_group_name=resource_group_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateLinkServiceVisibility', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
if polling is True: polling_method = AsyncARMPolling(lro_delay, lro_options={'final-state-via': 'location'}, path_format_arguments=path_format_arguments, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_check_private_link_service_visibility_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/checkPrivateLinkServiceVisibility'} # type: ignore
def list_auto_approved_private_link_services(
self,
location: str,
**kwargs
) -> AsyncIterable["_models.AutoApprovedPrivateLinkServicesResult"]:
"""Returns all of the private link service ids that can be linked to a Private Endpoint with auto
approved in this subscription in this region.
:param location: The location of the domain name.
:type location: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AutoApprovedPrivateLinkServicesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.AutoApprovedPrivateLinkServicesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AutoApprovedPrivateLinkServicesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_auto_approved_private_link_services.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AutoApprovedPrivateLinkServicesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_auto_approved_private_link_services.metadata = {'url': '/subscriptions/{subscriptionId}/providers/Microsoft.Network/locations/{location}/autoApprovedPrivateLinkServices'} # type: ignore
def list_auto_approved_private_link_services_by_resource_group(
self,
location: str,
resource_group_name: str,
**kwargs
) -> AsyncIterable["_models.AutoApprovedPrivateLinkServicesResult"]:
"""Returns all of the private link service ids that can be linked to a Private Endpoint with auto
approved in this subscription in this region.
:param location: The location of the domain name.
:type location: str
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either AutoApprovedPrivateLinkServicesResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~azure.mgmt.network.v2020_03_01.models.AutoApprovedPrivateLinkServicesResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.AutoApprovedPrivateLinkServicesResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2020-03-01"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_auto_approved_private_link_services_by_resource_group.metadata['url'] # type: ignore
path_format_arguments = {
'location': self._serialize.url("location", location, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('AutoApprovedPrivateLinkServicesResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_auto_approved_private_link_services_by_resource_group.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Network/locations/{location}/autoApprovedPrivateLinkServices'} # type: ignore
| 51.960265 | 260 | 0.675535 |
acf1226ddddfc7887f46e322b6ba641465792947 | 5,243 | py | Python | mflowgen/core/run.py | jbrzozo24/mflowgen | fe168e1ea2311feb35588333aa5d7d7c6ba79625 | [
"BSD-3-Clause"
] | 53 | 2020-11-05T20:13:03.000Z | 2022-03-31T14:51:56.000Z | mflowgen/core/run.py | jbrzozo24/mflowgen | fe168e1ea2311feb35588333aa5d7d7c6ba79625 | [
"BSD-3-Clause"
] | 27 | 2020-11-04T19:52:38.000Z | 2022-03-17T17:11:01.000Z | mflowgen/core/run.py | jbrzozo24/mflowgen | fe168e1ea2311feb35588333aa5d7d7c6ba79625 | [
"BSD-3-Clause"
] | 26 | 2020-11-02T18:43:57.000Z | 2022-03-31T14:52:52.000Z | #=========================================================================
# run_handler
#=========================================================================
# Primary handler for generating build system files for a given graph
#
# Author : Christopher Torng
# Date : June 2, 2019
#
import importlib
import os
import sys
import yaml
from mflowgen.core.build_orchestrator import BuildOrchestrator
from mflowgen.backends import MakeBackend, NinjaBackend
from mflowgen.utils import bold
from mflowgen.utils import read_yaml, write_yaml
class RunHandler:
def __init__( s ):
pass
#-----------------------------------------------------------------------
# helpers
#-----------------------------------------------------------------------
# find_construct_path
#
# Locate the construct script
#
# - If --update is given, use the saved path
# - Otherwise..
# - Read from the .mflowgen.yml metadata in the design directory
# - If it does not exist, then use "construct.py" as default
#
def find_construct_path( s, design, update ):
# Check for --update first
if update:
try:
data = read_yaml( '.mflowgen.yml' ) # get metadata
construct_path = data['construct']
except Exception:
print()
print( bold( 'Error:' ), 'No pre-existing build in current',
'directory for running --update' )
print()
sys.exit( 1 )
return construct_path
# Search in the design directory
if not os.path.exists( design ):
print()
print( bold( 'Error:' ), 'Directory not found at path',
'"{}"'.format( design ) )
print()
sys.exit( 1 )
yaml_path = os.path.abspath( design + '/.mflowgen.yml' )
if not os.path.exists( yaml_path ):
construct_path = design + '/construct.py'
else:
data = read_yaml( yaml_path )
try:
construct_path = data['construct']
except KeyError:
raise KeyError(
'YAML file "{}" must have key "construct"'.format( yaml_path ) )
if not construct_path.startswith( '/' ): # check if absolute path
construct_path = design + '/' + construct_path
construct_path = os.path.abspath( construct_path )
if not os.path.exists( construct_path ):
raise ValueError(
'Construct script not found at "{}"'.format( construct_path ) )
return construct_path
# save_construct_path
#
# Save the path to the construct script for future use of --update
#
def save_construct_path( s, construct_path ):
yaml_path = '.mflowgen.yml'
try:
data = read_yaml( yaml_path )
except Exception:
data = {}
data['construct'] = construct_path
write_yaml( data = data, path = yaml_path )
#-----------------------------------------------------------------------
# launch
#-----------------------------------------------------------------------
# Dispatch function for commands
#
def launch( s, help_, design, update=False, backend='make' ):
# Check that this design directory exists
if not design and not update:
print( ' Error: argument --design required',
'unless using --update or --demo' )
sys.exit( 1 )
s.launch_run( design, update, backend )
#-----------------------------------------------------------------------
# launch_run
#-----------------------------------------------------------------------
# Generates the backend build files (e.g., the Makefile) from the python
# graph description.
#
def launch_run( s, design, update, backend ):
# Find the construct script (and check for --update) and save the path
# to the construct script for future use of --update
construct_path = s.find_construct_path( design, update )
s.save_construct_path( construct_path )
# Import the graph for this design
c_dirname = os.path.dirname( construct_path )
c_basename = os.path.splitext( os.path.basename( construct_path ) )[0]
sys.path.append( c_dirname )
try:
construct = importlib.import_module( c_basename )
except ModuleNotFoundError:
print()
print( bold( 'Error:' ), 'Could not open construct script at',
'"{}"'.format( construct_path ) )
print()
sys.exit( 1 )
try:
construct.construct
except AttributeError:
print()
print( bold( 'Error:' ), 'No module named "construct" in',
'"{}"'.format( construct_path ) )
print()
sys.exit( 1 )
# Construct the graph
g = construct.construct()
# Generate the build files (e.g., Makefile) for the selected backend
# build system
if backend == 'make':
backend_cls = MakeBackend
elif backend == 'ninja':
backend_cls = NinjaBackend
b = BuildOrchestrator( g, backend_cls )
b.build()
# Done
list_target = backend + " list"
status_target = backend + " status"
print( "Targets: run \"" + list_target + "\" and \""
+ status_target + "\"" )
print()
| 28.340541 | 74 | 0.53519 |
acf1242a15c2d66b6c86e4713c915b64271bd03f | 4,464 | py | Python | p1_navigation/p1_navigation_submission/agent.py | hogansung/deep-reinforcement-learning | 5170ca42bdfdb16cc5c2b86c61bee304015a6254 | [
"MIT"
] | null | null | null | p1_navigation/p1_navigation_submission/agent.py | hogansung/deep-reinforcement-learning | 5170ca42bdfdb16cc5c2b86c61bee304015a6254 | [
"MIT"
] | null | null | null | p1_navigation/p1_navigation_submission/agent.py | hogansung/deep-reinforcement-learning | 5170ca42bdfdb16cc5c2b86c61bee304015a6254 | [
"MIT"
] | null | null | null | import random
from typing import List
import numpy as np
import torch
import torch.nn.functional as F
import torch.optim as optim
from torch import nn
from model import QNetwork
from replay_buffer import ReplayBuffer
BUFFER_SIZE = int(1e5) # replay buffer size
BATCH_SIZE = 64 # minibatch size
GAMMA = 0.99 # discount factor
TAU = 1e-3 # for soft update of target parameters
LR = 5e-4 # learning rate
UPDATE_EVERY = 4 # how often to update the network
device = torch.device("cuda:0" if torch.cuda.is_available() else "cpu")
class Agent:
"""Interacts with and learns from the environment."""
def __init__(
self, state_size: int, action_size: int, seed: int,
):
"""Initialize an Agent object.
Params
======
state_size (int): dimension of each state
action_size (int): dimension of each action
seed (int): random seed
"""
self.state_size = state_size
self.action_size = action_size
random.seed(seed)
# Q-Network
self.qnetwork_local = QNetwork(state_size, action_size, seed).to(device)
self.qnetwork_target = QNetwork(state_size, action_size, seed).to(device)
self.optimizer = optim.Adam(self.qnetwork_local.parameters(), lr=LR)
# Replay memory
self.memory = ReplayBuffer(action_size, BUFFER_SIZE, BATCH_SIZE, seed)
# Initialize time step (for updating every UPDATE_EVERY steps)
self.t_step = 0
def step(
self,
state: np.ndarray,
action: int,
reward: float,
next_state: List[int],
done: bool,
):
# Save experience in replay memory
self.memory.add(state, action, reward, next_state, done)
# Learn every UPDATE_EVERY time steps.
self.t_step = (self.t_step + 1) % UPDATE_EVERY
if self.t_step == 0:
# If enough samples are available in memory, get random subset and learn
if len(self.memory) > BATCH_SIZE:
experiences = self.memory.sample()
self.learn(experiences, GAMMA)
def act(
self, state: np.ndarray, eps: float = 0.0,
):
"""Returns actions for given state as per current policy.
Params
======
state (array_like): current state
eps (float): epsilon, for epsilon-greedy action selection
"""
state = torch.from_numpy(state).float().unsqueeze(0).to(device)
self.qnetwork_local.eval()
with torch.no_grad():
action_values = self.qnetwork_local(state)
self.qnetwork_local.train()
# Epsilon-greedy action selection
if random.random() > eps:
return np.argmax(action_values.cpu().data.numpy())
else:
return random.choice(np.arange(self.action_size))
def learn(self, experiences, gamma):
"""Update value parameters using given batch of experience tuples.
Params
======
experiences (Tuple[torch.Tensor]): tuple of (s, a, r, s', done) tuples
gamma (float): discount factor
"""
states, actions, rewards, next_states, dones = experiences
max_q_values_for_next_state, _ = (
self.qnetwork_target(next_states).detach().max(dim=1)
)
q_target_values = rewards + gamma * max_q_values_for_next_state.unsqueeze(1)
q_expected_values = self.qnetwork_local(states).gather(1, actions)
loss = F.mse_loss(q_expected_values, q_target_values)
# Minimize the loss
self.optimizer.zero_grad()
loss.backward()
self.optimizer.step()
# ------------------- update target network ------------------- #
self.soft_update(self.qnetwork_local, self.qnetwork_target, TAU)
def soft_update(self, local_model: nn.Module, target_model: nn.Module, tau: float):
"""Soft update model parameters.
θ_target = τ*θ_local + (1 - τ)*θ_target
Params
======
local_model (PyTorch model): weights will be copied from
target_model (PyTorch model): weights will be copied to
tau (float): interpolation parameter
"""
for target_param, local_param in zip(
target_model.parameters(), local_model.parameters()
):
target_param.data.copy_(
tau * local_param.data + (1.0 - tau) * target_param.data
)
| 33.56391 | 87 | 0.614247 |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.