hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
7c381806df4f9a8daef26e21cae152813d0d29b1
| 1,548
|
py
|
Python
|
predict.py
|
faroit/deep-fireball
|
b37d08cb5b15359c363e7816fc7c163c1709a5ac
|
[
"MIT"
] | null | null | null |
predict.py
|
faroit/deep-fireball
|
b37d08cb5b15359c363e7816fc7c163c1709a5ac
|
[
"MIT"
] | null | null | null |
predict.py
|
faroit/deep-fireball
|
b37d08cb5b15359c363e7816fc7c163c1709a5ac
|
[
"MIT"
] | null | null | null |
# elsewhere...
import pandas as pd
from keras.models import model_from_json
import random
import sys
import numpy as np
maxlen = 15
step = 3
df = pd.read_pickle('articles.pandas')
text = str.join(' ', df.text.tolist())
chars = set(text)
print('total chars:', len(chars))
char_indices = dict((c, i) for i, c in enumerate(chars))
indices_char = dict((i, c) for i, c in enumerate(chars))
start_index = random.randint(0, len(text) - maxlen - 1)
model = model_from_json(open('model.json').read())
model.compile(loss='categorical_crossentropy', optimizer='rmsprop')
model.load_weights('weights.h5')
def sample(a, temperature=1.0):
# helper function to sample an index from a probability array
a = np.log(a) / temperature
a = np.exp(a) / np.sum(np.exp(a))
return np.argmax(np.random.multinomial(1, a, 1))
for diversity in [0.25]:
print()
print('----- diversity:', diversity)
generated = ''
sentence = text[start_index: start_index + maxlen]
generated += sentence
print('----- Generating with seed: "' + sentence + '"')
sys.stdout.write(generated)
for i in range(200):
x = np.zeros((1, maxlen, len(chars)))
for t, char in enumerate(sentence):
x[0, t, char_indices[char]] = 1.
preds = model.predict(x, verbose=0)[0]
next_index = sample(preds, diversity)
next_char = indices_char[next_index]
generated += next_char
sentence = sentence[1:] + next_char
sys.stdout.write(next_char)
sys.stdout.flush()
print()
| 25.8
| 67
| 0.646641
| 222
| 1,548
| 4.418919
| 0.400901
| 0.03262
| 0.026504
| 0.014271
| 0.042813
| 0.042813
| 0
| 0
| 0
| 0
| 0
| 0.01793
| 0.207364
| 1,548
| 59
| 68
| 26.237288
| 0.781581
| 0.046512
| 0
| 0.04878
| 0
| 0
| 0.084861
| 0.016293
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02439
| false
| 0
| 0.121951
| 0
| 0.170732
| 0.121951
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c3890eac8b3049a655feef5c632c8c9d2d8f1d4
| 4,200
|
py
|
Python
|
tests/simulation/test_container.py
|
Zavioer/SIR-simulation-IBM-ESI
|
45a7b1d4f0e3cec8dcd8284e00f25386b6e77c58
|
[
"MIT"
] | null | null | null |
tests/simulation/test_container.py
|
Zavioer/SIR-simulation-IBM-ESI
|
45a7b1d4f0e3cec8dcd8284e00f25386b6e77c58
|
[
"MIT"
] | 37
|
2020-05-19T20:11:53.000Z
|
2020-06-19T11:26:41.000Z
|
tests/simulation/test_container.py
|
Zavioer/SIR-simulation-IBM-ESI
|
45a7b1d4f0e3cec8dcd8284e00f25386b6e77c58
|
[
"MIT"
] | 1
|
2020-05-25T08:10:21.000Z
|
2020-05-25T08:10:21.000Z
|
import unittest
from simulation import container
from simulation import person
class ContainerTestCase(unittest.TestCase):
def setUp(self) -> None:
self.box = container.Container(100, 1000, 300, 1, 0.5)
self.s_instance = person.Person(x=0, y=0, infection_probability=0.25,
recover_probability=0.2, dead_probability=0.05,
infection_range=0.8)
def test_01__check_if_dimensions_was_set_correctly(self):
width = 100
height = 100
self.assertEqual(self.box.width, width,
msg="Container width was set incorrect.")
self.assertEqual(self.box.height, height,
msg="Container height was set incorrect.")
print("> (test_01) Container dimensions are set correctly.")
def test_02__check_if_new_object_added_correctly_to_objects_list(self):
self.box.add_instances(1, "susceptible", infection_probability=0.4,
recover_probability=0.2,
dead_probability=0.05, infection_range=1.25)
self.assertEqual(len(self.box.object_list), 1,
msg="New instance was not correctly added to"
"objects list.")
print("> (test_02) New instance correctly added to object_list.")
def test_03__check_if_container_time_to_live_not_elapsed__return_bool(self):
self.assertIsInstance(self.box.is_alive(), bool,
msg="Box method is_alive was not return bool.")
print("> (test_03) Method is_alive() returns bool type.")
def test_04__check_if_container_lives_in_elapsed_time(self):
self.box.time_to_live = 0
self.assertFalse(self.box.is_alive(), msg="Container instance lives longer"
"than time_to_live attribute.")
print("> (test_04) Container can not have more cycles than time_to_live "
"attribute specified.")
def test_05__check_if_action_time_interval_is_positive(self):
self.assertGreater(self.box.action_interval, 0,
msg="action_interval parameters allows to insert"
"negative values.")
print("> (test_05) Parameter action_interval can not allows to insert "
"negative values.")
def test_06__check_if_container_can_lives(self):
self.box.time_to_live = 100
self.assertTrue(self.box.is_alive(), msg="Container does not live in "
"correctly specified time_to_live.")
print("> (test_06) Container live correctly base on time_to_live"
" parameter.")
def test_07__check_if_possible_move_distance_is_positive(self):
self.assertGreater(self.box.move_distance_length, 0,
msg="move_distance parameter value can be negative.")
print("> (test_07) Parameter move_distance can not be negative.")
def test_08__check_if_possible_move_distance_is_less_than_container_size(self):
self.assertLess(self.box.move_distance_length, self.box.width,
msg="Parameter move_distance can be longer than"
"container size.")
print("> (test_08) Parameter move_distance is smaller than container size.")
def test_09__check_if_action_time_interval_is_less_than_minute(self):
self.assertLessEqual(self.box.action_interval, 60,
msg="action_time_interval could be greater than"
"minute.")
print("> (test_09) Parameter time_interval could not be greater than minute.")
def test_10__check_if_group_could_be_grater_than_population(self):
self.assertRaises(ValueError, self.box.initial_set_up, 900, 100, 10, 0,
infection_probability=0.4, recover_probability=0.2,
dead_probability=0.05, infection_range=1.25)
print("> (test_10) All specified groups can not be greater than population.")
if __name__ == '__main__':
unittest.main()
| 49.411765
| 87
| 0.627143
| 513
| 4,200
| 4.808967
| 0.255361
| 0.045399
| 0.028375
| 0.024321
| 0.254966
| 0.19619
| 0.114309
| 0.083502
| 0.083502
| 0.083502
| 0
| 0.036938
| 0.290952
| 4,200
| 84
| 88
| 50
| 0.791471
| 0
| 0
| 0.058824
| 0
| 0
| 0.275476
| 0
| 0
| 0
| 0
| 0
| 0.161765
| 1
| 0.161765
| false
| 0
| 0.044118
| 0
| 0.220588
| 0.147059
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c38a782a768dd6c26c320e977f3ea8c8bc5e836
| 1,403
|
py
|
Python
|
pontoon/base/migrations/0007_auto_20150710_0944.py
|
Tratty/pontoon
|
ecb903d72f9274f02137b16669cc3c5859f6329c
|
[
"BSD-3-Clause"
] | 3
|
2020-01-27T12:26:20.000Z
|
2022-02-03T09:56:02.000Z
|
pontoon/base/migrations/0007_auto_20150710_0944.py
|
texnoman/pontoon-src
|
6b40ac229605e99966c3bdd1510b772c89d4de24
|
[
"BSD-3-Clause"
] | 1
|
2021-03-24T12:33:03.000Z
|
2021-03-24T12:50:19.000Z
|
pontoon/base/migrations/0007_auto_20150710_0944.py
|
texnoman/pontoon-src
|
6b40ac229605e99966c3bdd1510b772c89d4de24
|
[
"BSD-3-Clause"
] | 4
|
2020-01-26T21:28:43.000Z
|
2021-06-10T15:25:19.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
import pontoon.base.models
class Migration(migrations.Migration):
dependencies = [
("base", "0006_auto_20150602_0616"),
]
operations = [
migrations.AddField(
model_name="locale",
name="cldr_plurals",
field=models.CommaSeparatedIntegerField(
blank=True,
max_length=11,
verbose_name=b"CLDR Plurals",
validators=[pontoon.base.models.validate_cldr],
),
),
migrations.AlterField(
model_name="resource",
name="format",
field=models.CharField(
blank=True,
max_length=20,
verbose_name=b"Format",
choices=[
(b"po", b"po"),
(b"xliff", b"xliff"),
(b"properties", b"properties"),
(b"dtd", b"dtd"),
(b"inc", b"inc"),
(b"ini", b"ini"),
(b"lang", b"lang"),
(b"l20n", b"l20n"),
],
),
),
migrations.AlterField(
model_name="translation",
name="date",
field=models.DateTimeField(auto_now_add=True),
),
]
| 28.06
| 63
| 0.459729
| 122
| 1,403
| 5.131148
| 0.47541
| 0.043131
| 0.054313
| 0.057508
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.030525
| 0.416251
| 1,403
| 49
| 64
| 28.632653
| 0.733822
| 0.014968
| 0
| 0.209302
| 0
| 0
| 0.115942
| 0.016667
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.069767
| 0
| 0.139535
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c3b77cba219a97b12762ac1a37f632c5f68d380
| 11,331
|
py
|
Python
|
platformio/project/commands/init.py
|
ufo2011/platformio-core
|
0ceae62701731f8b32c34d7993a34dea34aea59c
|
[
"Apache-2.0"
] | null | null | null |
platformio/project/commands/init.py
|
ufo2011/platformio-core
|
0ceae62701731f8b32c34d7993a34dea34aea59c
|
[
"Apache-2.0"
] | null | null | null |
platformio/project/commands/init.py
|
ufo2011/platformio-core
|
0ceae62701731f8b32c34d7993a34dea34aea59c
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2014-present PlatformIO <contact@platformio.org>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# pylint: disable=line-too-long,too-many-arguments,too-many-locals
import json
import os
import click
from platformio import fs
from platformio.package.commands.install import install_project_dependencies
from platformio.package.manager.platform import PlatformPackageManager
from platformio.platform.exception import UnknownBoard
from platformio.project.config import ProjectConfig
from platformio.project.generator import ProjectGenerator
from platformio.project.helpers import is_platformio_project
def validate_boards(ctx, param, value): # pylint: disable=W0613
pm = PlatformPackageManager()
for id_ in value:
try:
pm.board_config(id_)
except UnknownBoard:
raise click.BadParameter(
"`%s`. Please search for board ID using `platformio boards` "
"command" % id_
)
return value
@click.command("init", short_help="Initialize a project or update existing")
@click.option(
"--project-dir",
"-d",
default=os.getcwd,
type=click.Path(
exists=True, file_okay=False, dir_okay=True, writable=True, resolve_path=True
),
)
@click.option("-b", "--board", multiple=True, metavar="ID", callback=validate_boards)
@click.option("--ide", type=click.Choice(ProjectGenerator.get_supported_ides()))
@click.option("-e", "--environment", help="Update existing environment")
@click.option("-O", "--project-option", multiple=True)
@click.option("--env-prefix", default="")
@click.option("--no-install-dependencies", is_flag=True)
@click.option("-s", "--silent", is_flag=True)
def project_init_cmd(
project_dir,
board,
ide,
environment,
project_option,
env_prefix,
no_install_dependencies,
silent,
):
is_new_project = not is_platformio_project(project_dir)
if is_new_project:
if not silent:
print_header(project_dir)
init_base_project(project_dir)
if environment:
update_project_env(project_dir, environment, project_option)
elif board:
update_board_envs(project_dir, board, project_option, env_prefix)
# resolve project dependencies
if not no_install_dependencies and (environment or board):
install_project_dependencies(
options=dict(
project_dir=project_dir,
environments=[environment] if environment else [],
silent=silent,
)
)
if ide:
if not silent:
click.echo(
"Updating metadata for the %s IDE..." % click.style(ide, fg="cyan")
)
with fs.cd(project_dir):
config = ProjectConfig.get_instance(
os.path.join(project_dir, "platformio.ini")
)
config.validate()
ProjectGenerator(config, environment, ide, board).generate()
if is_new_project:
init_cvs_ignore(project_dir)
if not silent:
print_footer(is_new_project)
def print_header(project_dir):
if project_dir == os.getcwd():
click.secho("\nThe current working directory ", fg="yellow", nl=False)
try:
click.secho(project_dir, fg="cyan", nl=False)
except UnicodeEncodeError:
click.secho(json.dumps(project_dir), fg="cyan", nl=False)
click.secho(" will be used for the project.", fg="yellow")
click.echo("")
click.echo("The next files/directories have been created in ", nl=False)
try:
click.secho(project_dir, fg="cyan")
except UnicodeEncodeError:
click.secho(json.dumps(project_dir), fg="cyan")
click.echo("%s - Put project header files here" % click.style("include", fg="cyan"))
click.echo(
"%s - Put here project specific (private) libraries"
% click.style("lib", fg="cyan")
)
click.echo("%s - Put project source files here" % click.style("src", fg="cyan"))
click.echo(
"%s - Project Configuration File" % click.style("platformio.ini", fg="cyan")
)
def print_footer(is_new_project):
if is_new_project:
return click.secho(
"\nProject has been successfully initialized! Useful commands:\n"
"`pio run` - process/build project from the current directory\n"
"`pio run --target upload` or `pio run -t upload` "
"- upload firmware to a target\n"
"`pio run --target clean` - clean project (remove compiled files)"
"\n`pio run --help` - additional information",
fg="green",
)
return click.secho(
"Project has been successfully updated!",
fg="green",
)
def init_base_project(project_dir):
with fs.cd(project_dir):
config = ProjectConfig()
config.save()
dir_to_readme = [
(config.get("platformio", "src_dir"), None),
(config.get("platformio", "include_dir"), init_include_readme),
(config.get("platformio", "lib_dir"), init_lib_readme),
(config.get("platformio", "test_dir"), init_test_readme),
]
for (path, cb) in dir_to_readme:
if os.path.isdir(path):
continue
os.makedirs(path)
if cb:
cb(path)
def init_include_readme(include_dir):
with open(os.path.join(include_dir, "README"), mode="w", encoding="utf8") as fp:
fp.write(
"""
This directory is intended for project header files.
A header file is a file containing C declarations and macro definitions
to be shared between several project source files. You request the use of a
header file in your project source file (C, C++, etc) located in `src` folder
by including it, with the C preprocessing directive `#include'.
```src/main.c
#include "header.h"
int main (void)
{
...
}
```
Including a header file produces the same results as copying the header file
into each source file that needs it. Such copying would be time-consuming
and error-prone. With a header file, the related declarations appear
in only one place. If they need to be changed, they can be changed in one
place, and programs that include the header file will automatically use the
new version when next recompiled. The header file eliminates the labor of
finding and changing all the copies as well as the risk that a failure to
find one copy will result in inconsistencies within a program.
In C, the usual convention is to give header files names that end with `.h'.
It is most portable to use only letters, digits, dashes, and underscores in
header file names, and at most one dot.
Read more about using header files in official GCC documentation:
* Include Syntax
* Include Operation
* Once-Only Headers
* Computed Includes
https://gcc.gnu.org/onlinedocs/cpp/Header-Files.html
""",
)
def init_lib_readme(lib_dir):
with open(os.path.join(lib_dir, "README"), mode="w", encoding="utf8") as fp:
fp.write(
"""
This directory is intended for project specific (private) libraries.
PlatformIO will compile them to static libraries and link into executable file.
The source code of each library should be placed in a an own separate directory
("lib/your_library_name/[here are source files]").
For example, see a structure of the following two libraries `Foo` and `Bar`:
|--lib
| |
| |--Bar
| | |--docs
| | |--examples
| | |--src
| | |- Bar.c
| | |- Bar.h
| | |- library.json (optional, custom build options, etc) https://docs.platformio.org/page/librarymanager/config.html
| |
| |--Foo
| | |- Foo.c
| | |- Foo.h
| |
| |- README --> THIS FILE
|
|- platformio.ini
|--src
|- main.c
and a contents of `src/main.c`:
```
#include <Foo.h>
#include <Bar.h>
int main (void)
{
...
}
```
PlatformIO Library Dependency Finder will find automatically dependent
libraries scanning project source files.
More information about PlatformIO Library Dependency Finder
- https://docs.platformio.org/page/librarymanager/ldf.html
""",
)
def init_test_readme(test_dir):
with open(os.path.join(test_dir, "README"), mode="w", encoding="utf8") as fp:
fp.write(
"""
This directory is intended for PlatformIO Test Runner and project tests.
Unit Testing is a software testing method by which individual units of
source code, sets of one or more MCU program modules together with associated
control data, usage procedures, and operating procedures, are tested to
determine whether they are fit for use. Unit testing finds problems early
in the development cycle.
More information about PlatformIO Unit Testing:
- https://docs.platformio.org/en/latest/advanced/unit-testing/index.html
""",
)
def init_cvs_ignore(project_dir):
conf_path = os.path.join(project_dir, ".gitignore")
if os.path.isfile(conf_path):
return
with open(conf_path, mode="w", encoding="utf8") as fp:
fp.write(".pio\n")
def update_board_envs(project_dir, board_ids, project_option, env_prefix):
config = ProjectConfig(
os.path.join(project_dir, "platformio.ini"), parse_extra=False
)
used_boards = []
for section in config.sections():
cond = [section.startswith("env:"), config.has_option(section, "board")]
if all(cond):
used_boards.append(config.get(section, "board"))
pm = PlatformPackageManager()
modified = False
for id_ in board_ids:
board_config = pm.board_config(id_)
if id_ in used_boards:
continue
used_boards.append(id_)
modified = True
envopts = {"platform": board_config["platform"], "board": id_}
# find default framework for board
frameworks = board_config.get("frameworks")
if frameworks:
envopts["framework"] = frameworks[0]
for item in project_option:
if "=" not in item:
continue
_name, _value = item.split("=", 1)
envopts[_name.strip()] = _value.strip()
section = "env:%s%s" % (env_prefix, id_)
config.add_section(section)
for option, value in envopts.items():
config.set(section, option, value)
if modified:
config.save()
def update_project_env(project_dir, environment, project_option):
if not project_option:
return
config = ProjectConfig(
os.path.join(project_dir, "platformio.ini"), parse_extra=False
)
section = "env:%s" % environment
if not config.has_section(section):
config.add_section(section)
for item in project_option:
if "=" not in item:
continue
_name, _value = item.split("=", 1)
config.set(section, _name.strip(), _value.strip())
config.save()
| 31.828652
| 119
| 0.662519
| 1,472
| 11,331
| 4.991848
| 0.280571
| 0.035384
| 0.009526
| 0.009254
| 0.189303
| 0.15841
| 0.126293
| 0.105471
| 0.088051
| 0.078253
| 0
| 0.002177
| 0.2299
| 11,331
| 355
| 120
| 31.91831
| 0.839904
| 0.064602
| 0
| 0.228856
| 0
| 0
| 0.159669
| 0.00328
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054726
| false
| 0
| 0.049751
| 0
| 0.129353
| 0.019901
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c3f358dbfca775c5fb0e3b7866f5656395f8320
| 8,749
|
py
|
Python
|
WebIOPi-0.7.1/python/webiopi/devices/analog/__init__.py
|
MORIMOTO520212/Arm-crawler
|
95dca0ea9485e4c20a0910687362010604331b55
|
[
"MIT"
] | 1
|
2020-04-25T00:55:45.000Z
|
2020-04-25T00:55:45.000Z
|
WebIOPi-0.7.1/python/webiopi/devices/analog/__init__.py
|
MORIMOTO520212/Arm-crawler
|
95dca0ea9485e4c20a0910687362010604331b55
|
[
"MIT"
] | 4
|
2015-05-28T23:20:13.000Z
|
2015-05-28T23:24:01.000Z
|
services/webiopi/src/python/webiopi/devices/analog/__init__.py
|
creative-workflow/pi-setup
|
d6d28cb8d34ef71b1e8ac95dd94099bfad08837a
|
[
"MIT"
] | 1
|
2022-03-29T01:58:02.000Z
|
2022-03-29T01:58:02.000Z
|
# Copyright 2012-2013 Eric Ptak - trouch.com
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from webiopi.decorators.rest import request, response
from webiopi.utils.types import M_JSON
class ADC():
def __init__(self, channelCount, resolution, vref):
self._analogCount = channelCount
self._analogResolution = resolution
self._analogMax = 2**resolution - 1
self._analogRef = vref
def __family__(self):
return "ADC"
def checkAnalogChannel(self, channel):
if not 0 <= channel < self._analogCount:
raise ValueError("Channel %d out of range [%d..%d]" % (channel, 0, self._analogCount-1))
def checkAnalogValue(self, value):
if not 0 <= value <= self._analogMax:
raise ValueError("Value %d out of range [%d..%d]" % (value, 0, self._analogMax))
@request("GET", "analog/count")
@response("%d")
def analogCount(self):
return self._analogCount
@request("GET", "analog/resolution")
@response("%d")
def analogResolution(self):
return self._analogResolution
@request("GET", "analog/max")
@response("%d")
def analogMaximum(self):
return int(self._analogMax)
@request("GET", "analog/vref")
@response("%.2f")
def analogReference(self):
return self._analogRef
def __analogRead__(self, channel, diff):
raise NotImplementedError
@request("GET", "analog/%(channel)d/integer")
@response("%d")
def analogRead(self, channel, diff=False):
self.checkAnalogChannel(channel)
return self.__analogRead__(channel, diff)
@request("GET", "analog/%(channel)d/float")
@response("%.2f")
def analogReadFloat(self, channel, diff=False):
return self.analogRead(channel, diff) / float(self._analogMax)
@request("GET", "analog/%(channel)d/volt")
@response("%.2f")
def analogReadVolt(self, channel, diff=False):
if self._analogRef == 0:
raise NotImplementedError
return self.analogReadFloat(channel, diff) * self._analogRef
@request("GET", "analog/*/integer")
@response(contentType=M_JSON)
def analogReadAll(self):
values = {}
for i in range(self._analogCount):
values[i] = self.analogRead(i)
return values
@request("GET", "analog/*/float")
@response(contentType=M_JSON)
def analogReadAllFloat(self):
values = {}
for i in range(self._analogCount):
values[i] = float("%.2f" % self.analogReadFloat(i))
return values
@request("GET", "analog/*/volt")
@response(contentType=M_JSON)
def analogReadAllVolt(self):
values = {}
for i in range(self._analogCount):
values[i] = float("%.2f" % self.analogReadVolt(i))
return values
class DAC(ADC):
def __init__(self, channelCount, resolution, vref):
ADC.__init__(self, channelCount, resolution, vref)
def __family__(self):
return "DAC"
def __analogWrite__(self, channel, value):
raise NotImplementedError
@request("POST", "analog/%(channel)d/integer/%(value)d")
@response("%d")
def analogWrite(self, channel, value):
self.checkAnalogChannel(channel)
self.checkAnalogValue(value)
self.__analogWrite__(channel, value)
return self.analogRead(channel)
@request("POST", "analog/%(channel)d/float/%(value)f")
@response("%.2f")
def analogWriteFloat(self, channel, value):
self.analogWrite(channel, int(value * self._analogMax))
return self.analogReadFloat(channel)
@request("POST", "analog/%(channel)d/volt/%(value)f")
@response("%.2f")
def analogWriteVolt(self, channel, value):
self.analogWriteFloat(channel, value /self._analogRef)
return self.analogReadVolt(channel)
class PWM():
def __init__(self, channelCount, resolution, frequency):
self._pwmCount = channelCount
self._pwmResolution = resolution
self._pwmMax = 2**resolution - 1
self.frequency = frequency
self.period = 1.0/frequency
# Futaba servos standard
self.servo_neutral = 0.00152
self.servo_travel_time = 0.0004
self.servo_travel_angle = 45.0
self.reverse = [False for i in range(channelCount)]
def __family__(self):
return "PWM"
def checkPWMChannel(self, channel):
if not 0 <= channel < self._pwmCount:
raise ValueError("Channel %d out of range [%d..%d]" % (channel, 0, self._pwmCount-1))
def checkPWMValue(self, value):
if not 0 <= value <= self._pwmMax:
raise ValueError("Value %d out of range [%d..%d]" % (value, 0, self._pwmMax))
def __pwmRead__(self, channel):
raise NotImplementedError
def __pwmWrite__(self, channel, value):
raise NotImplementedError
@request("GET", "pwm/count")
@response("%d")
def pwmCount(self):
return self._pwmCount
@request("GET", "pwm/resolution")
@response("%d")
def pwmResolution(self):
return self._pwmResolution
@request("GET", "pwm/max")
@response("%d")
def pwmMaximum(self):
return int(self._pwmMax)
@request("GET", "pwm/%(channel)d/integer")
@response("%d")
def pwmRead(self, channel):
self.checkPWMChannel(channel)
return self.__pwmRead__(channel)
@request("GET", "pwm/%(channel)d/float")
@response("%.2f")
def pwmReadFloat(self, channel):
return self.pwmRead(channel) / float(self._pwmMax)
@request("POST", "pwm/%(channel)d/integer/%(value)d")
@response("%d")
def pwmWrite(self, channel, value):
self.checkPWMChannel(channel)
self.checkPWMValue(value)
self.__pwmWrite__(channel, value)
return self.pwmRead(channel)
@request("POST", "pwm/%(channel)d/float/%(value)f")
@response("%.2f")
def pwmWriteFloat(self, channel, value):
self.pwmWrite(channel, int(value * self._pwmMax))
return self.pwmReadFloat(channel)
def getReverse(self, channel):
self.checkChannel(channel)
return self.reverse[channel]
def setReverse(self, channel, value):
self.checkChannel(channel)
self.reverse[channel] = value
return value
def RatioToAngle(self, value):
f = value
f *= self.period
f -= self.servo_neutral
f *= self.servo_travel_angle
f /= self.servo_travel_time
return f
def AngleToRatio(self, value):
f = value
f *= self.servo_travel_time
f /= self.servo_travel_angle
f += self.servo_neutral
f /= self.period
return f
@request("GET", "pwm/%(channel)d/angle")
@response("%.2f")
def pwmReadAngle(self, channel):
f = self.pwmReadFloat(channel)
f = self.RatioToAngle(f)
if self.reverse[channel]:
f = -f
else:
f = f
return f
@request("POST", "pwm/%(channel)d/angle/%(value)f")
@response("%.2f")
def pwmWriteAngle(self, channel, value):
if self.reverse[channel]:
f = -value
else:
f = value
f = self.AngleToRatio(f)
self.pwmWriteFloat(channel, f)
return self.pwmReadAngle(channel)
@request("GET", "pwm/*")
@response(contentType=M_JSON)
def pwmWildcard(self):
values = {}
for i in range(self._pwmCount):
val = self.pwmReadFloat(i)
values[i] = {}
values[i]["float"] = float("%.2f" % val)
values[i]["angle"] = float("%.2f" % self.RatioToAngle(val))
return values
DRIVERS = {}
DRIVERS["ads1x1x"] = ["ADS1014", "ADS1015", "ADS1114", "ADS1115"]
DRIVERS["mcp3x0x"] = ["MCP3002", "MCP3004", "MCP3008", "MCP3204", "MCP3208"]
DRIVERS["mcp4725"] = ["MCP4725"]
DRIVERS["mcp48XX"] = ["MCP4802", "MCP4812", "MCP4822"]
DRIVERS["mcp492X"] = ["MCP4921", "MCP4922"]
DRIVERS["pca9685"] = ["PCA9685"]
DRIVERS["pcf8591"] = ["PCF8591"]
| 32.403704
| 100
| 0.606927
| 957
| 8,749
| 5.430512
| 0.196447
| 0.042332
| 0.030787
| 0.02309
| 0.374062
| 0.214547
| 0.14085
| 0.100635
| 0.063691
| 0.063691
| 0
| 0.022366
| 0.259001
| 8,749
| 269
| 101
| 32.524164
| 0.779269
| 0.069151
| 0
| 0.287081
| 0
| 0
| 0.113449
| 0.041344
| 0
| 0
| 0
| 0
| 0
| 1
| 0.196172
| false
| 0
| 0.009569
| 0.057416
| 0.363636
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c3f8ea43badcc3a68b54f56814ef9f940a1de25
| 3,142
|
py
|
Python
|
osc_choochoo/tests/v1/test_train.py
|
dtroyer/osc-loco
|
57119ab84528933da9cbcd57dcd4f5b842a58186
|
[
"Apache-2.0"
] | 1
|
2019-01-15T10:02:06.000Z
|
2019-01-15T10:02:06.000Z
|
osc_choochoo/tests/v1/test_train.py
|
dtroyer/osc-loco
|
57119ab84528933da9cbcd57dcd4f5b842a58186
|
[
"Apache-2.0"
] | 1
|
2018-03-03T13:28:09.000Z
|
2018-03-03T13:28:09.000Z
|
osc_choochoo/tests/v1/test_train.py
|
dtroyer/osc-loco
|
57119ab84528933da9cbcd57dcd4f5b842a58186
|
[
"Apache-2.0"
] | 1
|
2018-03-03T12:31:24.000Z
|
2018-03-03T12:31:24.000Z
|
# Copyright 2013 Nebula Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import mock
import os
from osc_choochoo.tests import base
from osc_choochoo.tests import fakes
from osc_choochoo.v1 import train
# Load the plugin init module for the plugin list and show commands
plugin_name = 'osc_choochoo'
plugin_client = 'osc_choochoo.plugin'
class FakeTrainV1Client(object):
def __init__(self, **kwargs):
self.auth_token = kwargs['token']
self.management_url = kwargs['endpoint']
class TestTrainV1(base.TestCommand):
def setUp(self):
super(TestTrainV1, self).setUp()
self.app.client_manager.osc_choochoo = FakeTrainV1Client(
endpoint=fakes.AUTH_URL,
token=fakes.AUTH_TOKEN,
)
class TestTrainList(TestTrainV1):
def setUp(self):
super(TestTrainList, self).setUp()
# Get the command object to test
self.cmd = train.TrainList(self.app, None)
def test_train_list(self):
arglist = []
verifylist = []
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
collist = ('Name', )
datalist = ['1.txt', '2.txt']
with mock.patch('os.listdir') as mock_list:
mock_list.return_value = datalist
# DisplayCommandBase.take_action() returns two tuples
columns, data = self.cmd.take_action(parsed_args)
self.assertEqual(collist, columns)
for d in data:
self.assertTrue(d[0] + '.txt' in datalist)
class TestTrainShow(TestTrainV1):
def setUp(self):
super(TestTrainShow, self).setUp()
# Get the command object to test
self.cmd = train.TrainShow(self.app, None)
def test_train_show(self):
arglist = [
plugin_name,
]
verifylist = [
('name', plugin_name),
]
parsed_args = self.check_parser(self.cmd, arglist, verifylist)
collist = ['name', 'data']
datalist = [
plugin_name,
'dummy',
]
with mock.patch('io.open') as mock_open:
mock_open.return_value = mock.MagicMock()
m_file = mock_open.return_value.__enter__.return_value
m_file.read.return_value = 'dummy'
columns, data = self.cmd.take_action(parsed_args)
mock_open.assert_called_once_with(
os.path.join(
train.DATA_PATH,
plugin_name + '.txt',
)
)
self.assertEqual(collist, columns)
self.assertEqual(datalist, data)
| 29.092593
| 77
| 0.624761
| 375
| 3,142
| 5.088
| 0.389333
| 0.031447
| 0.023585
| 0.02673
| 0.231656
| 0.175052
| 0.150943
| 0.150943
| 0.111111
| 0.111111
| 0
| 0.007996
| 0.283577
| 3,142
| 107
| 78
| 29.364486
| 0.839627
| 0.237747
| 0
| 0.171875
| 0
| 0
| 0.044229
| 0
| 0
| 0
| 0
| 0
| 0.078125
| 1
| 0.09375
| false
| 0
| 0.078125
| 0
| 0.234375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4069a9b6ece4c3708be4cbcbdf02893a94dc6d
| 1,134
|
py
|
Python
|
scripts/firefox-wrapper.py
|
darioncassel/OmniCrawl
|
62317e07340df7eb758a1b8de80679b6d4293d49
|
[
"MIT"
] | 2
|
2021-12-02T20:30:23.000Z
|
2022-01-05T01:38:45.000Z
|
scripts/firefox-wrapper.py
|
darioncassel/OmniCrawl
|
62317e07340df7eb758a1b8de80679b6d4293d49
|
[
"MIT"
] | null | null | null |
scripts/firefox-wrapper.py
|
darioncassel/OmniCrawl
|
62317e07340df7eb758a1b8de80679b6d4293d49
|
[
"MIT"
] | 4
|
2021-09-16T01:28:05.000Z
|
2022-03-20T21:38:06.000Z
|
#!/usr/bin/env python3
import sys
from os.path import dirname, abspath, join
import subprocess
# Note this does not resolve symbolic links
# https://stackoverflow.com/a/17806123
FIREFOX_BINARY = join(dirname(abspath(__file__)), 'firefox')
argvs = list(sys.argv)
argvs[0] = FIREFOX_BINARY
# geckdriver will run `firefox -version` first to check the version
if len(sys.argv) == 2 and sys.argv[1] == '-version':
subprocess.check_call(argvs)
exit(0)
# First search for the -tmpprofile option
new_profile_path = None
for idx, argv in enumerate(sys.argv):
if argv == '-tmpprofile':
new_profile_path = sys.argv[idx + 1]
break
# If it's present, replace profile with tmp_profile
if new_profile_path:
for idx, argv in enumerate(sys.argv):
if argv == '-profile':
old_profile_path = sys.argv[idx + 1]
subprocess.check_call(['rm', '-r', new_profile_path])
subprocess.check_call(['cp', '-r', old_profile_path, new_profile_path])
argvs[idx+1] = new_profile_path
break
# Firefox will ignore the -tmpprofile option
subprocess.check_call(argvs)
| 30.648649
| 83
| 0.686067
| 162
| 1,134
| 4.635802
| 0.425926
| 0.117177
| 0.111851
| 0.063915
| 0.149134
| 0.149134
| 0.090546
| 0.090546
| 0.090546
| 0
| 0
| 0.017641
| 0.200176
| 1,134
| 36
| 84
| 31.5
| 0.810364
| 0.263668
| 0
| 0.26087
| 0
| 0
| 0.050725
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.130435
| 0
| 0.130435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c409487091269b7e314c05627f47667f44be8cd
| 37,177
|
py
|
Python
|
src/pretix/base/payment.py
|
whiteyhat/pretix
|
34d1fcf077a92765cd796d81d1aa6695d4801a9a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/base/payment.py
|
whiteyhat/pretix
|
34d1fcf077a92765cd796d81d1aa6695d4801a9a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
src/pretix/base/payment.py
|
whiteyhat/pretix
|
34d1fcf077a92765cd796d81d1aa6695d4801a9a
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
import json
import logging
from collections import OrderedDict
from decimal import ROUND_HALF_UP, Decimal
from typing import Any, Dict, Union
import pytz
from django import forms
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from django.dispatch import receiver
from django.forms import Form
from django.http import HttpRequest
from django.template.loader import get_template
from django.utils.timezone import now
from django.utils.translation import pgettext_lazy, ugettext_lazy as _
from django_countries import Countries
from i18nfield.forms import I18nFormField, I18nTextarea, I18nTextInput
from i18nfield.strings import LazyI18nString
from pretix.base.forms import PlaceholderValidator
from pretix.base.models import (
CartPosition, Event, InvoiceAddress, Order, OrderPayment, OrderRefund,
Quota,
)
from pretix.base.reldate import RelativeDateField, RelativeDateWrapper
from pretix.base.settings import SettingsSandbox
from pretix.base.signals import register_payment_providers
from pretix.base.templatetags.money import money_filter
from pretix.base.templatetags.rich_text import rich_text
from pretix.helpers.money import DecimalTextInput
from pretix.presale.views import get_cart_total
from pretix.presale.views.cart import cart_session, get_or_create_cart_id
logger = logging.getLogger(__name__)
class PaymentProviderForm(Form):
def clean(self):
cleaned_data = super().clean()
for k, v in self.fields.items():
val = cleaned_data.get(k)
if v._required and not val:
self.add_error(k, _('This field is required.'))
class BasePaymentProvider:
"""
This is the base class for all payment providers.
"""
def __init__(self, event: Event):
self.event = event
self.settings = SettingsSandbox('payment', self.identifier, event)
# Default values
if self.settings.get('_fee_reverse_calc') is None:
self.settings.set('_fee_reverse_calc', True)
def __str__(self):
return self.identifier
@property
def is_implicit(self) -> bool:
"""
Returns whether or whether not this payment provider is an "implicit" payment provider that will
*always* and unconditionally be used if is_allowed() returns True and does not require any input.
This is intended to be used by the FreePaymentProvider, which skips the payment choice page.
By default, this returns ``False``. Please do not set this if you don't know exactly what you are doing.
"""
return False
@property
def is_meta(self) -> bool:
"""
Returns whether or whether not this payment provider is a "meta" payment provider that only
works as a settings holder for other payment providers and should never be used directly. This
is a trick to implement payment gateways with multiple payment methods but unified payment settings.
Take a look at the built-in stripe provider to see how this might be used.
By default, this returns ``False``.
"""
return False
@property
def is_enabled(self) -> bool:
"""
Returns whether or whether not this payment provider is enabled.
By default, this is determined by the value of the ``_enabled`` setting.
"""
return self.settings.get('_enabled', as_type=bool)
@property
def test_mode_message(self) -> str:
"""
If this property is set to a string, this will be displayed when this payment provider is selected
while the event is in test mode. You should use it to explain to your user how your plugin behaves,
e.g. if it falls back to a test mode automatically as well or if actual payments will be performed.
If you do not set this (or, return ``None``), pretix will show a default message warning the user
that this plugin does not support test mode payments.
"""
return None
def calculate_fee(self, price: Decimal) -> Decimal:
"""
Calculate the fee for this payment provider which will be added to
final price before fees (but after taxes). It should include any taxes.
The default implementation makes use of the setting ``_fee_abs`` for an
absolute fee and ``_fee_percent`` for a percentage.
:param price: The total value without the payment method fee, after taxes.
"""
fee_abs = self.settings.get('_fee_abs', as_type=Decimal, default=0)
fee_percent = self.settings.get('_fee_percent', as_type=Decimal, default=0)
fee_reverse_calc = self.settings.get('_fee_reverse_calc', as_type=bool, default=True)
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
if fee_reverse_calc:
return ((price + fee_abs) * (1 / (1 - fee_percent / 100)) - price).quantize(
Decimal('1') / 10 ** places, ROUND_HALF_UP
)
else:
return (price * fee_percent / 100 + fee_abs).quantize(
Decimal('1') / 10 ** places, ROUND_HALF_UP
)
@property
def verbose_name(self) -> str:
"""
A human-readable name for this payment provider. This should
be short but self-explaining. Good examples include 'Bank transfer'
and 'Credit card via Stripe'.
"""
raise NotImplementedError() # NOQA
@property
def public_name(self) -> str:
"""
A human-readable name for this payment provider to be shown to the public.
This should be short but self-explaining. Good examples include 'Bank transfer'
and 'Credit card', but 'Credit card via Stripe' might be to explicit. By default,
this is the same as ``verbose_name``
"""
return self.verbose_name
@property
def identifier(self) -> str:
"""
A short and unique identifier for this payment provider.
This should only contain lowercase letters and in most
cases will be the same as your package name.
"""
raise NotImplementedError() # NOQA
@property
def abort_pending_allowed(self) -> bool:
"""
Whether or not a user can abort a payment in pending start to switch to another
payment method. This returns ``False`` by default which is no guarantee that
aborting a pending payment can never happen, it just hides the frontend button
to avoid users accidentally committing double payments.
"""
return False
@property
def settings_form_fields(self) -> dict:
"""
When the event's administrator visits the event configuration
page, this method is called to return the configuration fields available.
It should therefore return a dictionary where the keys should be (unprefixed)
settings keys and the values should be corresponding Django form fields.
The default implementation returns the appropriate fields for the ``_enabled``,
``_fee_abs``, ``_fee_percent`` and ``_availability_date`` settings mentioned above.
We suggest that you return an ``OrderedDict`` object instead of a dictionary
and make use of the default implementation. Your implementation could look
like this::
@property
def settings_form_fields(self):
return OrderedDict(
list(super().settings_form_fields.items()) + [
('bank_details',
forms.CharField(
widget=forms.Textarea,
label=_('Bank account details'),
required=False
))
]
)
.. WARNING:: It is highly discouraged to alter the ``_enabled`` field of the default
implementation.
"""
places = settings.CURRENCY_PLACES.get(self.event.currency, 2)
d = OrderedDict([
('_enabled',
forms.BooleanField(
label=_('Enable payment method'),
required=False,
)),
('_availability_date',
RelativeDateField(
label=_('Available until'),
help_text=_('Users will not be able to choose this payment provider after the given date.'),
required=False,
)),
('_invoice_text',
I18nFormField(
label=_('Text on invoices'),
help_text=_('Will be printed just below the payment figures and above the closing text on invoices. '
'This will only be used if the invoice is generated before the order is paid. If the '
'invoice is generated later, it will show a text stating that it has already been paid.'),
required=False,
widget=I18nTextarea,
widget_kwargs={'attrs': {'rows': '2'}}
)),
('_total_min',
forms.DecimalField(
label=_('Minimum order total'),
help_text=_('This payment will be available only if the order total is equal to or exceeds the given '
'value. The order total for this purpose may be computed without taking the fees imposed '
'by this payment method into account.'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_total_max',
forms.DecimalField(
label=_('Maximum order total'),
help_text=_('This payment will be available only if the order total is equal to or below the given '
'value. The order total for this purpose may be computed without taking the fees imposed '
'by this payment method into account.'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_fee_abs',
forms.DecimalField(
label=_('Additional fee'),
help_text=_('Absolute value'),
localize=True,
required=False,
decimal_places=places,
widget=DecimalTextInput(places=places)
)),
('_fee_percent',
forms.DecimalField(
label=_('Additional fee'),
help_text=_('Percentage of the order total.'),
localize=True,
required=False,
)),
('_fee_reverse_calc',
forms.BooleanField(
label=_('Calculate the fee from the total value including the fee.'),
help_text=_('We recommend to enable this if you want your users to pay the payment fees of your '
'payment provider. <a href="{docs_url}" target="_blank" rel="noopener">Click here '
'for detailed information on what this does.</a> Don\'t forget to set the correct fees '
'above!').format(docs_url='https://docs.pretix.eu/en/latest/user/payments/fees.html'),
required=False
)),
('_restricted_countries',
forms.MultipleChoiceField(
label=_('Restrict to countries'),
choices=Countries(),
help_text=_('Only allow choosing this payment provider for invoice addresses in the selected '
'countries. If you don\'t select any country, all countries are allowed. This is only '
'enabled if the invoice address is required.'),
widget=forms.CheckboxSelectMultiple(
attrs={'class': 'scrolling-multiple-choice'}
),
required=False,
disabled=not self.event.settings.invoice_address_required
)),
])
d['_restricted_countries']._as_type = list
return d
def settings_form_clean(self, cleaned_data):
"""
Overriding this method allows you to inject custom validation into the settings form.
:param cleaned_data: Form data as per previous validations.
:return: Please return the modified cleaned_data
"""
return cleaned_data
def settings_content_render(self, request: HttpRequest) -> str:
"""
When the event's administrator visits the event configuration
page, this method is called. It may return HTML containing additional information
that is displayed below the form fields configured in ``settings_form_fields``.
"""
return ""
def render_invoice_text(self, order: Order, payment: OrderPayment) -> str:
"""
This is called when an invoice for an order with this payment provider is generated.
The default implementation returns the content of the _invoice_text configuration
variable (an I18nString), or an empty string if unconfigured. For paid orders, the
default implementation always renders a string stating that the invoice is already paid.
"""
if order.status == Order.STATUS_PAID:
return pgettext_lazy('invoice', 'The payment for this invoice has already been received.')
return self.settings.get('_invoice_text', as_type=LazyI18nString, default='')
@property
def payment_form_fields(self) -> dict:
"""
This is used by the default implementation of :py:meth:`payment_form`.
It should return an object similar to :py:attr:`settings_form_fields`.
The default implementation returns an empty dictionary.
"""
return {}
def payment_form(self, request: HttpRequest) -> Form:
"""
This is called by the default implementation of :py:meth:`payment_form_render`
to obtain the form that is displayed to the user during the checkout
process. The default implementation constructs the form using
:py:attr:`payment_form_fields` and sets appropriate prefixes for the form
and all fields and fills the form with data form the user's session.
If you overwrite this, we strongly suggest that you inherit from
``PaymentProviderForm`` (from this module) that handles some nasty issues about
required fields for you.
"""
form = PaymentProviderForm(
data=(request.POST if request.method == 'POST' and request.POST.get("payment") == self.identifier else None),
prefix='payment_%s' % self.identifier,
initial={
k.replace('payment_%s_' % self.identifier, ''): v
for k, v in request.session.items()
if k.startswith('payment_%s_' % self.identifier)
}
)
form.fields = self.payment_form_fields
for k, v in form.fields.items():
v._required = v.required
v.required = False
v.widget.is_required = False
return form
def _is_still_available(self, now_dt=None, cart_id=None, order=None):
now_dt = now_dt or now()
tz = pytz.timezone(self.event.settings.timezone)
availability_date = self.settings.get('_availability_date', as_type=RelativeDateWrapper)
if availability_date:
if self.event.has_subevents and cart_id:
availability_date = min([
availability_date.datetime(se).date()
for se in self.event.subevents.filter(
id__in=CartPosition.objects.filter(
cart_id=cart_id, event=self.event
).values_list('subevent', flat=True)
)
])
elif self.event.has_subevents and order:
availability_date = min([
availability_date.datetime(se).date()
for se in self.event.subevents.filter(
id__in=order.positions.values_list('subevent', flat=True)
)
])
elif self.event.has_subevents:
logger.error('Payment provider is not subevent-ready.')
return False
else:
availability_date = availability_date.datetime(self.event).date()
return availability_date >= now_dt.astimezone(tz).date()
return True
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
"""
You can use this method to disable this payment provider for certain groups
of users, products or other criteria. If this method returns ``False``, the
user will not be able to select this payment method. This will only be called
during checkout, not on retrying.
The default implementation checks for the _availability_date setting to be either unset or in the future
and for the _total_max and _total_min requirements to be met. It also checks the ``_restrict_countries``
setting.
:param total: The total value without the payment method fee, after taxes.
.. versionchanged:: 1.17.0
The ``total`` parameter has been added. For backwards compatibility, this method is called again
without this parameter if it raises a ``TypeError`` on first try.
"""
timing = self._is_still_available(cart_id=get_or_create_cart_id(request))
pricing = True
if (self.settings._total_max is not None or self.settings._total_min is not None) and total is None:
raise ImproperlyConfigured('This payment provider does not support maximum or minimum amounts.')
if self.settings._total_max is not None:
pricing = pricing and total <= Decimal(self.settings._total_max)
if self.settings._total_min is not None:
pricing = pricing and total >= Decimal(self.settings._total_min)
def get_invoice_address():
if not hasattr(request, '_checkout_flow_invoice_address'):
cs = cart_session(request)
iapk = cs.get('invoice_address')
if not iapk:
request._checkout_flow_invoice_address = InvoiceAddress()
else:
try:
request._checkout_flow_invoice_address = InvoiceAddress.objects.get(pk=iapk, order__isnull=True)
except InvoiceAddress.DoesNotExist:
request._checkout_flow_invoice_address = InvoiceAddress()
return request._checkout_flow_invoice_address
if self.event.settings.invoice_address_required:
restricted_countries = self.settings.get('_restricted_countries', as_type=list)
if restricted_countries:
ia = get_invoice_address()
if str(ia.country) not in restricted_countries:
return False
return timing and pricing
def payment_form_render(self, request: HttpRequest, total: Decimal) -> str:
"""
When the user selects this provider as their preferred payment method,
they will be shown the HTML you return from this method.
The default implementation will call :py:meth:`payment_form`
and render the returned form. If your payment method doesn't require
the user to fill out form fields, you should just return a paragraph
of explanatory text.
"""
form = self.payment_form(request)
template = get_template('pretixpresale/event/checkout_payment_form_default.html')
ctx = {'request': request, 'form': form}
return template.render(ctx)
def checkout_confirm_render(self, request) -> str:
"""
If the user has successfully filled in their payment data, they will be redirected
to a confirmation page which lists all details of their order for a final review.
This method should return the HTML which should be displayed inside the
'Payment' box on this page.
In most cases, this should include a short summary of the user's input and
a short explanation on how the payment process will continue.
"""
raise NotImplementedError() # NOQA
def payment_pending_render(self, request: HttpRequest, payment: OrderPayment) -> str:
"""
Render customer-facing instructions on how to proceed with a pending payment
:return: HTML
"""
return ""
def checkout_prepare(self, request: HttpRequest, cart: Dict[str, Any]) -> Union[bool, str]:
"""
Will be called after the user selects this provider as their payment method.
If you provided a form to the user to enter payment data, this method should
at least store the user's input into their session.
This method should return ``False`` if the user's input was invalid, ``True``
if the input was valid and the frontend should continue with default behavior
or a string containing a URL if the user should be redirected somewhere else.
On errors, you should use Django's message framework to display an error message
to the user (or the normal form validation error messages).
The default implementation stores the input into the form returned by
:py:meth:`payment_form` in the user's session.
If your payment method requires you to redirect the user to an external provider,
this might be the place to do so.
.. IMPORTANT:: If this is called, the user has not yet confirmed their order.
You may NOT do anything which actually moves money.
:param cart: This dictionary contains at least the following keys:
positions:
A list of ``CartPosition`` objects that are annotated with the special
attributes ``count`` and ``total`` because multiple objects of the
same content are grouped into one.
raw:
The raw list of ``CartPosition`` objects in the users cart
total:
The overall total *including* the fee for the payment method.
payment_fee:
The fee for the payment method.
"""
form = self.payment_form(request)
if form.is_valid():
for k, v in form.cleaned_data.items():
request.session['payment_%s_%s' % (self.identifier, k)] = v
return True
else:
return False
def payment_is_valid_session(self, request: HttpRequest) -> bool:
"""
This is called at the time the user tries to place the order. It should return
``True`` if the user's session is valid and all data your payment provider requires
in future steps is present.
"""
raise NotImplementedError() # NOQA
def execute_payment(self, request: HttpRequest, payment: OrderPayment) -> str:
"""
After the user has confirmed their purchase, this method will be called to complete
the payment process. This is the place to actually move the money if applicable.
You will be passed an :py:class:`pretix.base.models.OrderPayment` object that contains
the amount of money that should be paid.
If you need any special behavior, you can return a string
containing the URL the user will be redirected to. If you are done with your process
you should return the user to the order's detail page.
If the payment is completed, you should call ``payment.confirm()``. Please note that ``this`` might
raise a ``Quota.QuotaExceededException`` if (and only if) the payment term of this order is over and
some of the items are sold out. You should use the exception message to display a meaningful error
to the user.
The default implementation just returns ``None`` and therefore leaves the
order unpaid. The user will be redirected to the order's detail page by default.
On errors, you should raise a ``PaymentException``.
:param order: The order object
:param payment: An ``OrderPayment`` instance
"""
return None
def order_pending_mail_render(self, order: Order, payment: OrderPayment) -> str:
"""
After the user has submitted their order, they will receive a confirmation
email. You can return a string from this method if you want to add additional
information to this email.
:param order: The order object
:param payment: The payment object
"""
return ""
def order_change_allowed(self, order: Order) -> bool:
"""
Will be called to check whether it is allowed to change the payment method of
an order to this one.
The default implementation checks for the _availability_date setting to be either unset or in the future,
as well as for the _total_max, _total_min and _restricted_countries settings.
:param order: The order object
"""
ps = order.pending_sum
if self.settings._total_max is not None and ps > Decimal(self.settings._total_max):
return False
if self.settings._total_min is not None and ps < Decimal(self.settings._total_min):
return False
restricted_countries = self.settings.get('_restricted_countries', as_type=list)
if restricted_countries:
try:
ia = order.invoice_address
except InvoiceAddress.DoesNotExist:
return True
else:
if str(ia.country) not in restricted_countries:
return False
return self._is_still_available(order=order)
def payment_prepare(self, request: HttpRequest, payment: OrderPayment) -> Union[bool, str]:
"""
Will be called if the user retries to pay an unpaid order (after the user filled in
e.g. the form returned by :py:meth:`payment_form`) or if the user changes the payment
method.
It should return and report errors the same way as :py:meth:`checkout_prepare`, but
receives an ``Order`` object instead of a cart object.
Note: The ``Order`` object given to this method might be different from the version
stored in the database as it's total will already contain the payment fee for the
new payment method.
"""
form = self.payment_form(request)
if form.is_valid():
for k, v in form.cleaned_data.items():
request.session['payment_%s_%s' % (self.identifier, k)] = v
return True
else:
return False
def payment_control_render(self, request: HttpRequest, payment: OrderPayment) -> str:
"""
Will be called if the *event administrator* views the details of a payment.
It should return HTML code containing information regarding the current payment
status and, if applicable, next steps.
The default implementation returns the verbose name of the payment provider.
:param order: The order object
"""
return ''
def payment_refund_supported(self, payment: OrderPayment) -> bool:
"""
Will be called to check if the provider supports automatic refunding for this
payment.
"""
return False
def payment_partial_refund_supported(self, payment: OrderPayment) -> bool:
"""
Will be called to check if the provider supports automatic partial refunding for this
payment.
"""
return False
def execute_refund(self, refund: OrderRefund):
"""
Will be called to execute an refund. Note that refunds have an amount property and can be partial.
This should transfer the money back (if possible).
On success, you should call ``refund.done()``.
On failure, you should raise a PaymentException.
"""
raise PaymentException(_('Automatic refunds are not supported by this payment provider.'))
def shred_payment_info(self, obj: Union[OrderPayment, OrderRefund]):
"""
When personal data is removed from an event, this method is called to scrub payment-related data
from a payment or refund. By default, it removes all info from the ``info`` attribute. You can override
this behavior if you want to retain attributes that are not personal data on their own, i.e. a
reference to a transaction in an external system. You can also override this to scrub more data, e.g.
data from external sources that is saved in LogEntry objects or other places.
:param order: An order
"""
obj.info = '{}'
obj.save(update_fields=['info'])
class PaymentException(Exception):
pass
class FreeOrderProvider(BasePaymentProvider):
is_implicit = True
is_enabled = True
identifier = "free"
def checkout_confirm_render(self, request: HttpRequest) -> str:
return _("No payment is required as this order only includes products which are free of charge.")
def payment_is_valid_session(self, request: HttpRequest) -> bool:
return True
@property
def verbose_name(self) -> str:
return _("Free of charge")
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
payment.confirm(send_mail=False)
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
@property
def settings_form_fields(self) -> dict:
return {}
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
from .services.cart import get_fees
total = get_cart_total(request)
total += sum([f.value for f in get_fees(self.event, request, total, None, None)])
return total == 0
def order_change_allowed(self, order: Order) -> bool:
return False
class BoxOfficeProvider(BasePaymentProvider):
is_implicit = True
is_enabled = True
identifier = "boxoffice"
verbose_name = _("Box office")
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
payment.confirm(send_mail=False)
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
@property
def settings_form_fields(self) -> dict:
return {}
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
return False
def order_change_allowed(self, order: Order) -> bool:
return False
def payment_control_render(self, request, payment) -> str:
if not payment.info:
return
payment_info = json.loads(payment.info)
template = get_template('pretixcontrol/boxoffice/payment.html')
ctx = {
'request': request,
'event': self.event,
'settings': self.settings,
'payment_info': payment_info,
'payment': payment,
'provider': self,
}
return template.render(ctx)
class ManualPayment(BasePaymentProvider):
identifier = 'manual'
verbose_name = _('Manual payment')
@property
def test_mode_message(self):
return _('In test mode, you can just manually mark this order as paid in the backend after it has been '
'created.')
@property
def is_implicit(self):
return 'pretix.plugins.manualpayment' not in self.event.plugins
def is_allowed(self, request: HttpRequest, total: Decimal=None):
return 'pretix.plugins.manualpayment' in self.event.plugins and super().is_allowed(request, total)
def order_change_allowed(self, order: Order):
return 'pretix.plugins.manualpayment' in self.event.plugins and super().order_change_allowed(order)
@property
def public_name(self):
return str(self.settings.get('public_name', as_type=LazyI18nString))
@property
def settings_form_fields(self):
d = OrderedDict(
[
('public_name', I18nFormField(
label=_('Payment method name'),
widget=I18nTextInput,
)),
('checkout_description', I18nFormField(
label=_('Payment process description during checkout'),
help_text=_('This text will be shown during checkout when the user selects this payment method. '
'It should give a short explanation on this payment method.'),
widget=I18nTextarea,
)),
('email_instructions', I18nFormField(
label=_('Payment process description in order confirmation emails'),
help_text=_('This text will be included for the {payment_info} placeholder in order confirmation '
'mails. It should instruct the user on how to proceed with the payment. You can use'
'the placeholders {order}, {total}, {currency} and {total_with_currency}'),
widget=I18nTextarea,
validators=[PlaceholderValidator(['{order}', '{total}', '{currency}', '{total_with_currency}'])],
)),
('pending_description', I18nFormField(
label=_('Payment process description for pending orders'),
help_text=_('This text will be shown on the order confirmation page for pending orders. '
'It should instruct the user on how to proceed with the payment. You can use'
'the placeholders {order}, {total}, {currency} and {total_with_currency}'),
widget=I18nTextarea,
validators=[PlaceholderValidator(['{order}', '{total}', '{currency}', '{total_with_currency}'])],
)),
] + list(super().settings_form_fields.items())
)
d.move_to_end('_enabled', last=False)
return d
def payment_form_render(self, request) -> str:
return rich_text(
str(self.settings.get('checkout_description', as_type=LazyI18nString))
)
def checkout_prepare(self, request, total):
return True
def payment_is_valid_session(self, request):
return True
def checkout_confirm_render(self, request):
return self.payment_form_render(request)
def format_map(self, order):
return {
'order': order.code,
'total': order.total,
'currency': self.event.currency,
'total_with_currency': money_filter(order.total, self.event.currency)
}
def order_pending_mail_render(self, order) -> str:
msg = str(self.settings.get('email_instructions', as_type=LazyI18nString)).format_map(self.format_map(order))
return msg
def payment_pending_render(self, request, payment) -> str:
return rich_text(
str(self.settings.get('pending_description', as_type=LazyI18nString)).format_map(self.format_map(payment.order))
)
class OffsettingProvider(BasePaymentProvider):
is_enabled = True
identifier = "offsetting"
verbose_name = _("Offsetting")
is_implicit = True
def execute_payment(self, request: HttpRequest, payment: OrderPayment):
try:
payment.confirm()
except Quota.QuotaExceededException as e:
raise PaymentException(str(e))
def execute_refund(self, refund: OrderRefund):
code = refund.info_data['orders'][0]
try:
order = Order.objects.get(code=code, event__organizer=self.event.organizer)
except Order.DoesNotExist:
raise PaymentException(_('You entered an order that could not be found.'))
p = order.payments.create(
state=OrderPayment.PAYMENT_STATE_PENDING,
amount=refund.amount,
payment_date=now(),
provider='offsetting',
info=json.dumps({'orders': [refund.order.code]})
)
p.confirm()
@property
def settings_form_fields(self) -> dict:
return {}
def is_allowed(self, request: HttpRequest, total: Decimal=None) -> bool:
return False
def order_change_allowed(self, order: Order) -> bool:
return False
def payment_control_render(self, request: HttpRequest, payment: OrderPayment) -> str:
return _('Balanced against orders: %s' % ', '.join(payment.info_data['orders']))
@receiver(register_payment_providers, dispatch_uid="payment_free")
def register_payment_provider(sender, **kwargs):
return [FreeOrderProvider, BoxOfficeProvider, OffsettingProvider, ManualPayment]
| 42.536613
| 124
| 0.63047
| 4,425
| 37,177
| 5.181017
| 0.155706
| 0.009465
| 0.019192
| 0.01012
| 0.365524
| 0.316497
| 0.257524
| 0.229957
| 0.201518
| 0.181235
| 0
| 0.002633
| 0.295236
| 37,177
| 873
| 125
| 42.585338
| 0.872371
| 0.320709
| 0
| 0.399586
| 0
| 0
| 0.162504
| 0.017036
| 0
| 0
| 0
| 0
| 0
| 1
| 0.138716
| false
| 0.00207
| 0.060041
| 0.05176
| 0.378882
| 0.00207
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c41415e2c7a8ce5f2d75904be89b903c2cdfef0
| 633
|
py
|
Python
|
tests/AssertFail/run.py
|
sag-tgo/EPL_assert_demo
|
a43541e4472dfab7da6538ae9f220b5e042d158c
|
[
"Apache-2.0"
] | null | null | null |
tests/AssertFail/run.py
|
sag-tgo/EPL_assert_demo
|
a43541e4472dfab7da6538ae9f220b5e042d158c
|
[
"Apache-2.0"
] | null | null | null |
tests/AssertFail/run.py
|
sag-tgo/EPL_assert_demo
|
a43541e4472dfab7da6538ae9f220b5e042d158c
|
[
"Apache-2.0"
] | null | null | null |
from pysys.basetest import BaseTest
from apama.correlator import CorrelatorHelper
import os
class PySysTest(BaseTest):
def execute(self):
corr = CorrelatorHelper(self, name='correlator')
corr.start(logfile='correlator.log')
corr.injectEPL(os.getenv('APAMA_HOME','') + '/monitors/ManagementImpl.mon')
corr.injectEPL(os.getenv('APAMA_HOME','') + '/monitors/Management.mon')
corr.injectEPL('../../../src/Assert.mon')
corr.injectEPL('TestAssertFail.mon')
self.waitForGrep('correlator.log', 'Removed monitor TestAssertFail')
def validate(self):
self.assertGrep('correlator.log', r' (ERROR|WARN) .*', contains=False)
| 33.315789
| 77
| 0.737757
| 74
| 633
| 6.283784
| 0.513514
| 0.111828
| 0.103226
| 0.090323
| 0.163441
| 0.163441
| 0.163441
| 0
| 0
| 0
| 0
| 0
| 0.094787
| 633
| 18
| 78
| 35.166667
| 0.811518
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 0.118483
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.142857
| false
| 0
| 0.214286
| 0
| 0.428571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c419b5717a91fec1bbd9b0db3fdfc3ceb131303
| 1,231
|
py
|
Python
|
src/beast/python/beast/env/ReadEnvFile_test.py
|
Ziftr/stellard
|
626514cbbb2c6c2b6844315ca98a2bfcbca0b43d
|
[
"BSL-1.0"
] | 58
|
2015-01-07T09:10:59.000Z
|
2019-07-15T14:34:01.000Z
|
src/beast/python/beast/env/ReadEnvFile_test.py
|
Ziftr/stellard
|
626514cbbb2c6c2b6844315ca98a2bfcbca0b43d
|
[
"BSL-1.0"
] | 12
|
2015-01-02T00:01:45.000Z
|
2018-04-25T12:35:02.000Z
|
src/beast/python/beast/env/ReadEnvFile_test.py
|
Ziftr/stellard
|
626514cbbb2c6c2b6844315ca98a2bfcbca0b43d
|
[
"BSL-1.0"
] | 23
|
2015-01-04T00:13:27.000Z
|
2019-02-15T18:01:17.000Z
|
from __future__ import absolute_import, division, print_function, unicode_literals
from unittest import TestCase
from beast.env.ReadEnvFile import read_env_file
from beast.util import Terminal
Terminal.CAN_CHANGE_COLOR = False
JSON = """
{
"FOO": "foo",
"BAR": "bar bar bar",
"CPPFLAGS": "-std=c++11 -frtti -fno-strict-aliasing -DWOMBAT"
}"""
ENV = """
# An env file.
FOO=foo
export BAR="bar bar bar"
CPPFLAGS=-std=c++11 -frtti -fno-strict-aliasing -DWOMBAT
# export BAZ=baz should be ignored.
"""
RESULT = {
'FOO': 'foo',
'BAR': 'bar bar bar',
'CPPFLAGS': '-std=c++11 -frtti -fno-strict-aliasing -DWOMBAT',
}
BAD_ENV = ENV + """
This line isn't right.
NO SPACES IN NAMES="valid value"
"""
class test_ReadEnvFile(TestCase):
def test_read_json(self):
self.assertEqual(read_env_file(JSON), RESULT)
def test_read_env(self):
self.assertEqual(read_env_file(ENV), RESULT)
def test_read_env_error(self):
errors = []
self.assertEqual(read_env_file(BAD_ENV, errors.append), RESULT)
self.assertEqual(errors, [
"WARNING: Didn't understand the following environment file lines:",
"11. >>> This line isn't right.",
'12. >>> NO SPACES IN NAMES="valid value"'])
| 23.673077
| 82
| 0.680747
| 174
| 1,231
| 4.66092
| 0.396552
| 0.066584
| 0.066584
| 0.04439
| 0.477189
| 0.353884
| 0.218249
| 0.218249
| 0.218249
| 0.218249
| 0
| 0.009852
| 0.175467
| 1,231
| 51
| 83
| 24.137255
| 0.789163
| 0
| 0
| 0.051282
| 0
| 0
| 0.421608
| 0
| 0
| 0
| 0
| 0
| 0.102564
| 1
| 0.076923
| false
| 0
| 0.102564
| 0
| 0.205128
| 0.025641
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c46de4fdbd3dc2c58b659d8b01a5d17658d1622
| 14,736
|
py
|
Python
|
aiida/cmdline/params/options/test_interactive.py
|
tomzhang/aiida_core
|
949810e9f3daff0f748c5c9aa1dde4f5222bb49b
|
[
"BSD-2-Clause"
] | 1
|
2019-04-29T12:39:31.000Z
|
2019-04-29T12:39:31.000Z
|
aiida/cmdline/params/options/test_interactive.py
|
tomzhang/aiida_core
|
949810e9f3daff0f748c5c9aa1dde4f5222bb49b
|
[
"BSD-2-Clause"
] | null | null | null |
aiida/cmdline/params/options/test_interactive.py
|
tomzhang/aiida_core
|
949810e9f3daff0f748c5c9aa1dde4f5222bb49b
|
[
"BSD-2-Clause"
] | null | null | null |
"""Unit tests for the InteractiveOption."""
from __future__ import absolute_import
import unittest
import click
from click.testing import CliRunner
from click.types import IntParamType
from aiida.cmdline.params.options.interactive import InteractiveOption
from aiida.cmdline.params.options import NON_INTERACTIVE
class Only42IntParamType(IntParamType):
"""
Param type that only accepts 42 as valid value
"""
name = 'only42int'
def convert(self, value, param, ctx):
newval = super(Only42IntParamType, self).convert(value, param, ctx)
if newval != 42:
self.fail("Type validation: invalid, should be 42")
return newval
def __repr__(self):
return 'ONLY42INT'
class InteractiveOptionTest(unittest.TestCase):
"""Unit tests for InteractiveOption."""
# pylint: disable=too-many-public-methods, missing-docstring
def simple_command(self, **kwargs):
"""Return a simple command with one InteractiveOption, kwargs get relayed to the option."""
# pylint: disable=no-self-use
@click.command()
@click.option('--opt', prompt='Opt', cls=InteractiveOption, **kwargs)
@NON_INTERACTIVE()
def cmd(opt, non_interactive):
"""test command for InteractiveOption"""
# pylint: disable=unused-argument
click.echo(str(opt))
return cmd
@classmethod
def setUpClass(cls):
cls.runner = CliRunner()
def prompt_output(self, cli_input, converted=None):
"""Return expected output of simple_command, given a commandline cli_input string."""
# pylint: disable=no-self-use
return "Opt: {}\n{}\n".format(cli_input, converted or cli_input)
def test_prompt_str(self):
"""
scenario: using InteractiveOption with type=str
behaviour: giving no option prompts, accepts a string
"""
cmd = self.simple_command(type=str)
runner = CliRunner()
result = runner.invoke(cmd, [], input='TEST\n')
expected = self.prompt_output('TEST')
self.assertIsNone(result.exception)
self.assertIn(expected, result.output)
def test_prompt_empty_input(self):
"""
scenario: using InteractiveOption with type=str and invoking without options
behaviour: pressing enter on empty line at prompt repeats the prompt without a message
"""
cmd = self.simple_command(type=str)
runner = CliRunner()
result = runner.invoke(cmd, [], input='\nTEST\n')
expected = "Opt: \nOpt: TEST\nTEST\n"
self.assertIsNone(result.exception)
self.assertIn(expected, result.output)
def test_prompt_help_default(self):
"""
scenario: using InteractiveOption with type=str and no help parameter and invoking without options
behaviour: entering '?' leads to a default help message being printed and prompt repeated
"""
cmd = self.simple_command(type=str)
runner = CliRunner()
result = runner.invoke(cmd, [], input='?\nTEST\n')
expected_1 = 'Opt: ?\n'
expected_2 = 'Expecting text\n'
expected_3 = 'Opt: TEST\nTEST\n'
self.assertIsNone(result.exception)
self.assertIn(expected_1, result.output)
self.assertIn(expected_2, result.output)
self.assertIn(expected_3, result.output)
def test_prompt_help_custom(self):
"""
scenario: using InteractiveOption with type=str and help message and invoking without options
behaviour: entering '?' leads to the given help message being printed and the prompt repeated
"""
cmd = self.simple_command(type=str, help='Please enter some text')
runner = CliRunner()
result = runner.invoke(cmd, [], input='?\nTEST\n')
expected_1 = 'Opt: ?\n'
expected_2 = 'Please enter some text\n'
expected_3 = 'Opt: TEST\nTEST\n'
self.assertIsNone(result.exception)
self.assertIn(expected_1, result.output)
self.assertIn(expected_2, result.output)
self.assertIn(expected_3, result.output)
def test_prompt_simple(self):
"""
scenario: using InteractiveOption with type=bool
behaviour: giving no option prompts, accepts 'true'
"""
params = [(bool, 'true', 'True'), (int, '98', '98'), (float, '3.14e-7', '3.14e-07')]
for ptype, cli_input, output in params:
cmd = self.simple_command(type=ptype, help='help msg')
runner = CliRunner()
result = runner.invoke(cmd, [], input='\n?\n{}\n'.format(cli_input))
expected_1 = 'Opt: \nOpt: ?\n'
expected_2 = 'help msg\n'
expected_2 += self.prompt_output(cli_input, output)
self.assertIsNone(result.exception)
self.assertIn(expected_1, result.output)
self.assertIn(expected_2, result.output)
@staticmethod
def strip_line(text):
"""returns text without the last line"""
return text.rsplit('\n')[0]
def test_prompt_complex(self):
"""
scenario: using InteractiveOption with type=float
behaviour: giving no option prompts, accepts 3.14e-7
"""
params = [(click.File(), __file__), (click.Path(exists=True), __file__)]
for ptype, cli_input in params:
cmd = self.simple_command(type=ptype, help='help msg')
runner = CliRunner()
result = runner.invoke(cmd, [], input='\n?\n{}\n'.format(cli_input))
expected_1 = 'Opt: \nOpt: ?\n'
expected_2 = 'help msg\n'
expected_2 += self.strip_line(self.prompt_output(cli_input))
self.assertIsNone(result.exception)
self.assertIn(expected_1, result.output)
self.assertIn(expected_2, result.output)
def test_default_value_prompt(self):
"""
scenario: using InteractiveOption with a default value, invoke without options
behaviour: prompt, showing the default value, take default on empty cli_input.
"""
returns = []
cmd = self.simple_command(default='default')
result = self.runner.invoke(cmd, [], input='\n')
returns.append(result)
expected = 'Opt [default]: \ndefault\n'
self.assertIsNone(result.exception)
self.assertIn(expected, result.output)
result = self.runner.invoke(cmd, [], input='TEST\n')
returns.append(result)
expected = 'Opt [default]: TEST\nTEST\n'
self.assertIsNone(result.exception)
self.assertIn(expected, result.output)
return returns
def test_default_value_empty_opt(self):
"""
scenario: InteractiveOption with default value, invoke with empty option (--opt=)
behaviour: accept empty string as input
"""
cmd = self.simple_command(default='default')
runner = CliRunner()
result = runner.invoke(cmd, ['--opt='])
expected = '\n'
self.assertIsNone(result.exception)
self.assertEqual(result.output, expected)
def test_opt_given_valid(self):
"""
scenario: InteractiveOption, invoked with a valid value on the cmdline
behaviour: accept valid value
"""
cmd = self.simple_command(type=int)
runner = CliRunner()
result = runner.invoke(cmd, ['--opt=4'])
expected = '4\n'
self.assertIsNone(result.exception)
self.assertEqual(result.output, expected)
def test_opt_given_invalid(self):
"""
scenario: InteractiveOption, invoked with a valid value on the cmdline
behaviour: accept valid value
"""
cmd = self.simple_command(type=int)
runner = CliRunner()
result = runner.invoke(cmd, ['--opt=foo'])
self.assertIsNotNone(result.exception)
self.assertIn('Invalid value', result.output)
def test_non_interactive(self):
"""
scenario: InteractiveOption, invoked with only --non-interactive (and the option is required)
behaviout: fail
"""
cmd = self.simple_command(required=True)
runner = CliRunner()
result = runner.invoke(cmd, ['--non-interactive'])
self.assertIsNotNone(result.exception)
self.assertIn('Usage: ', result.output)
self.assertIn('Missing option', result.output)
def test_non_interactive_default(self):
"""
scenario: InteractiveOption, invoked with only --non-interactive
behaviour: fail
"""
cmd = self.simple_command(default='default')
runner = CliRunner()
result = runner.invoke(cmd, ['--non-interactive'])
self.assertIsNone(result.exception)
self.assertEqual(result.output, 'default\n')
@staticmethod
def user_callback(_ctx, param, value):
"""
A fake user callback ued for testing.
:param _ctx: The click context
:param param: The parameter name
:param value: The parameter value
:return: The validated parameter
"""
if not value:
return -1
elif value != 42:
raise click.BadParameter('invalid', param=param)
else:
return value
def test_after_callback_valid(self):
"""
scenario: InteractiveOption with a user callback
action: invoke with valid value
behaviour: user callback runs & succeeds
"""
cmd = self.simple_command(callback=self.user_callback, type=int)
result = self.runner.invoke(cmd, ['--opt=42'])
self.assertIsNone(result.exception)
self.assertEqual(result.output, '42\n')
def test_after_callback_invalid(self):
"""
scenario: InteractiveOption with a user callback
action: invoke with invalid value of right type
behaviour: user callback runs & succeeds
"""
cmd = self.simple_command(callback=self.user_callback, type=int)
result = self.runner.invoke(cmd, ['--opt=234234'])
self.assertIsNotNone(result.exception)
self.assertIn('Invalid value', result.output)
self.assertIn('invalid', result.output)
def test_after_callback_wrong_typ(self):
"""
scenario: InteractiveOption with a user callback
action: invoke with invalid value of wrong type
behaviour: user callback does not run
"""
cmd = self.simple_command(callback=self.user_callback, type=int)
result = self.runner.invoke(cmd, ['--opt=bla'])
self.assertIsNotNone(result.exception)
self.assertIn('Invalid value', result.output)
self.assertIn('bla', result.output)
def test_after_callback_empty(self):
"""
scenario: InteractiveOption with a user callback
action: invoke with invalid value of wrong type
behaviour: user callback does not run
"""
cmd = self.simple_command(callback=self.user_callback, type=int)
result = self.runner.invoke(cmd, ['--opt='])
self.assertIsNotNone(result.exception)
self.assertIn('Invalid value', result.output)
self.assertNotIn('empty', result.output)
def test_after_validation_interactive(self):
"""
Test that the type validation gets called on values entered at a prompt.
Scenario:
* InteractiveOption with custom type and prompt set
* invoked without passing the options
* on prompt: first enter an invalid value, then a valid one
Behaviour:
* Prompt for the value
* reject invalid value, prompt again
* accept valid value
"""
cmd = self.simple_command(callback=self.user_callback, type=Only42IntParamType())
result = self.runner.invoke(cmd, [], input='23\n42\n')
self.assertIsNone(result.exception)
self.assertIn('Opt: 23\n', result.output)
self.assertIn('Type validation: invalid', result.output)
self.assertIn('Opt: 42\n42\n', result.output)
def test_after_callback_default_noninteractive(self):
"""
Test that the callback gets called on the default, in line with click 6 behaviour.
Scenario:
* InteractiveOption with user callback and invalid default
* invoke with no options and --non-interactive
Behaviour:
* the default value gets passed through the callback and rejected
"""
# pylint: disable=invalid-name
cmd = self.simple_command(callback=self.user_callback, type=int, default=23)
result = self.runner.invoke(cmd, ['--non-interactive'])
self.assertIsNotNone(result.exception)
self.assertIn('Invalid value', result.output)
def test_default_empty_empty_cli(self):
"""Test that default="" allows to pass an empty cli option."""
cmd = self.simple_command(default="", type=str)
result = self.runner.invoke(cmd, ['--opt='])
self.assertIsNone(result.exception)
self.assertEqual(result.output, '\n')
def test_default_empty_prompt(self):
"""Test that default="" allows to pass an empty cli option."""
cmd = self.simple_command(default="", type=str)
result = self.runner.invoke(cmd, input='\n')
expected = 'Opt []: \n\n'
self.assertIsNone(result.exception)
self.assertIn(expected, result.output)
def test_prompt_dynamic_default(self):
"""Test that dynamic defaults for prompting still work."""
def test_not_required_noninteractive(self):
cmd = self.simple_command(required=False)
result = self.runner.invoke(cmd, ['--non-interactive'])
self.assertIsNone(result.exception)
# I strip, there is typically a \n at the end
self.assertEqual(result.output, 'None\n')
def test_not_required_interactive(self):
cmd = self.simple_command(required=False)
result = self.runner.invoke(cmd, input='value\n')
expected = 'Opt: value\nvalue\n'
self.assertIsNone(result.exception)
self.assertIn(expected, result.output)
def test_not_required_noninteractive_default(self):
cmd = self.simple_command(required=False, default='')
result = self.runner.invoke(cmd, ['--non-interactive'])
self.assertIsNone(result.exception)
self.assertEqual(result.output, '\n')
def test_not_required_interactive_default(self):
cmd = self.simple_command(required=False, default='')
result = self.runner.invoke(cmd, input='\nnot needed\n')
expected = 'Opt []: \n\n'
self.assertIsNone(result.exception)
self.assertIn(expected, result.output)
| 38.984127
| 106
| 0.636401
| 1,688
| 14,736
| 5.454384
| 0.133294
| 0.048224
| 0.04073
| 0.052134
| 0.657978
| 0.612252
| 0.558054
| 0.532638
| 0.480395
| 0.472901
| 0
| 0.007643
| 0.25414
| 14,736
| 377
| 107
| 39.087533
| 0.830043
| 0.244503
| 0
| 0.49537
| 0
| 0
| 0.082059
| 0
| 0
| 0
| 0
| 0
| 0.287037
| 1
| 0.152778
| false
| 0
| 0.032407
| 0.00463
| 0.236111
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4747750d4783fac45721046885fc982b92a07c
| 3,195
|
py
|
Python
|
scripts/migration/migrate_registered_meta.py
|
fabmiz/osf.io
|
8d86af3f0a6e5388bd5b18383e68e27b65a66247
|
[
"Apache-2.0"
] | null | null | null |
scripts/migration/migrate_registered_meta.py
|
fabmiz/osf.io
|
8d86af3f0a6e5388bd5b18383e68e27b65a66247
|
[
"Apache-2.0"
] | null | null | null |
scripts/migration/migrate_registered_meta.py
|
fabmiz/osf.io
|
8d86af3f0a6e5388bd5b18383e68e27b65a66247
|
[
"Apache-2.0"
] | null | null | null |
"""
Changes existing registered_meta on a node to new schema layout
required for the prereg-prize
"""
import json
import sys
import logging
from modularodm import Q
from framework.mongo import database as db
from framework.mongo.utils import from_mongo
from framework.transactions.context import TokuTransaction
from website.models import MetaSchema
from website.app import init_app
from website.project.metadata.schemas import _id_to_name
from scripts import utils as scripts_utils
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def prepare_nodes(_db=None):
_db = _db or db
_db['node'].update(
{},
{
'$set': {
'registered_schema': []
}
},
multi=True
)
def from_json_or_fail(schema):
# Unstringify stored metadata
try:
schema = json.loads(schema) if schema else {}
except TypeError as e:
if isinstance(schema, dict):
pass
else:
raise e
return schema
def main(dev=False, _db=None):
_db = _db or db
init_app(routes=False)
count = 0
skipped = 0
scripts_utils.add_file_logger(logger, __file__)
logger.info("Iterating over all registrations")
# convert registered_schema to list field
prepare_nodes()
node_documents = _db['node'].find({'is_registration': True})
for node in node_documents:
registered_schemas = []
registered_meta = {}
schemas = node['registered_meta']
if not schemas:
logger.info('Node: {0} is registered but has no registered_meta'.format(node['_id']))
continue
for schema_id, schema in schemas.iteritems():
name = _id_to_name(from_mongo(schema_id))
schema = from_json_or_fail(schema)
# append matching schema to node.registered_schema
try:
meta_schema = MetaSchema.find(
Q('name', 'eq', name)
).sort('-schema_version')[0]
except IndexError as e:
logger.error('No MetaSchema matching name: {0} found for node: {1}.'.format(name, node['_id']))
# Skip over missing schemas
skipped += 1
if dev:
continue
else:
raise e
else:
registered_meta[meta_schema._id] = {
key: {
'value': value
}
for key, value in schema.items()
}
registered_schemas.append(meta_schema._id)
db['node'].update(
{'_id': node['_id']},
{'$set': {
'registered_meta': registered_meta,
'registered_schema': registered_schemas
}}
)
count = count + 1
logger.info('Done with {0} nodes migrated and {1} nodes skipped.'.format(count, skipped))
if __name__ == '__main__':
dry_run = 'dry' in sys.argv
dev = 'dev' in sys.argv
with TokuTransaction():
main(dev=dev)
if dry_run:
raise RuntimeError('Dry run, rolling back transaction.')
| 29.859813
| 111
| 0.576526
| 361
| 3,195
| 4.889197
| 0.34349
| 0.055524
| 0.020397
| 0.013598
| 0.038527
| 0.015864
| 0
| 0
| 0
| 0
| 0
| 0.004682
| 0.331455
| 3,195
| 106
| 112
| 30.141509
| 0.821629
| 0.074178
| 0
| 0.126437
| 0
| 0
| 0.125891
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0.011494
| 0.126437
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c484eb9bf609790d2ba9c1adb147528492648ab
| 2,108
|
py
|
Python
|
graph/tsp.py
|
pingrunhuang/CodeChallenge
|
a8e5274e04c47d851836197907266418af4f1a22
|
[
"MIT"
] | null | null | null |
graph/tsp.py
|
pingrunhuang/CodeChallenge
|
a8e5274e04c47d851836197907266418af4f1a22
|
[
"MIT"
] | null | null | null |
graph/tsp.py
|
pingrunhuang/CodeChallenge
|
a8e5274e04c47d851836197907266418af4f1a22
|
[
"MIT"
] | null | null | null |
"""
given a fully connected undirected graph(If no path exists between two cities, adding an arbitrarily long edge will complete the graph without affecting the optimal tour),
find the path with the lowest cost in total for a salesman to travel from a given start vertex
"""
import time
class Edge:
def __init__(self, target, weight):
self.target = target
self.weight = weight
def __repr__(self):
return self.target
class TSP(object):
"""
This is a fully connected graph with edge weight value positive
"""
def __init__(self):
self.graph = {}
self.prev = {}
self.start = None
def add_vertex(self, name, edges):
self.graph[name] = edges
def permutation(self, edge, result=[]):
if edge.target == self.start:
return result
for x in result:
if x.target == edge.target:
return result
result.append(edge)
for next_edge in self.graph[edge.target]:
self.permutation(next_edge, result)
return result
def tsp_recursive(self, start):
"""
Essentially, the tsp problem is a permutation problem
"""
self.start = start
result = []
for edge in self.graph[start]:
result.append(self.permutation(edge, [Edge(start, 0)]))
smallest_val = 100000
print(result)
path = []
for solution in result:
total_cost = sum(map(lambda x:x.weight, solution))
if smallest_val>total_cost:
path = solution
smallest_val = total_cost
return (smallest_val, path)
def tsp_dp(self, graph, start):
pass
if __name__ == "__main__":
tsp = TSP()
tsp.add_vertex('w', [Edge('y', 1), Edge('x', 6), Edge('z', 3)])
tsp.add_vertex('x', [Edge('w', 6), Edge('z', 3), Edge('y', 4)])
tsp.add_vertex('z', [Edge('y', 2), Edge('w', 3), Edge('x', 3)])
tsp.add_vertex('y', [Edge('w', 1), Edge('x', 3), Edge('z', 2)])
result = tsp.tsp_recursive('x')
print(result)
| 29.690141
| 171
| 0.576376
| 278
| 2,108
| 4.23741
| 0.31295
| 0.0382
| 0.040747
| 0.025467
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.012873
| 0.29981
| 2,108
| 71
| 172
| 29.690141
| 0.78523
| 0.182163
| 0
| 0.104167
| 0
| 0
| 0.01489
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.145833
| false
| 0.020833
| 0.020833
| 0.020833
| 0.3125
| 0.041667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c487be5e97c4669f698df37e679a53c19c84c61
| 515
|
py
|
Python
|
firstBadVersion.py
|
pflun/learningAlgorithms
|
3101e989488dfc8a56f1bf256a1c03a837fe7d97
|
[
"MIT"
] | null | null | null |
firstBadVersion.py
|
pflun/learningAlgorithms
|
3101e989488dfc8a56f1bf256a1c03a837fe7d97
|
[
"MIT"
] | null | null | null |
firstBadVersion.py
|
pflun/learningAlgorithms
|
3101e989488dfc8a56f1bf256a1c03a837fe7d97
|
[
"MIT"
] | null | null | null |
# The isBadVersion API is already defined for you.
# @param version, an integer
# @return a bool
# def isBadVersion(version):
class Solution(object):
def firstBadVersion(self, n):
start = 1
end = n
while start + 1 < end:
mid = start + (end - start) / 2
if isBadVersion(mid):
end = mid
else:
start = mid
if isBadVersion(start):
return start
elif isBadVersion(end):
return end
| 22.391304
| 50
| 0.524272
| 56
| 515
| 4.821429
| 0.553571
| 0.044444
| 0.066667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009646
| 0.396117
| 515
| 22
| 51
| 23.409091
| 0.858521
| 0.227184
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4ae67259cc4af329dd35bc62e3ceddb69e3a57
| 7,190
|
py
|
Python
|
cogs/remind.py
|
LoganHaug/reminder-bot
|
1bb1853b79e0299240a214e947e8bc29ed34e46e
|
[
"MIT"
] | 2
|
2021-01-02T04:30:54.000Z
|
2021-01-02T04:30:54.000Z
|
cogs/remind.py
|
LoganHaug/reminder-bot
|
1bb1853b79e0299240a214e947e8bc29ed34e46e
|
[
"MIT"
] | 8
|
2021-01-02T02:06:04.000Z
|
2021-03-15T06:05:50.000Z
|
cogs/remind.py
|
LoganHaug/reminder-bot
|
1bb1853b79e0299240a214e947e8bc29ed34e46e
|
[
"MIT"
] | 2
|
2021-01-02T01:50:06.000Z
|
2021-01-02T20:02:58.000Z
|
import asyncio
from typing import Union
import datetime
import time
from discord.ext import commands
import yaml
from cogs import checks
import database
import utils
# Loads the repeating interval dictionary
with open("conversions.yml", "r") as conversion_file:
conversion_dict = yaml.load(conversion_file, Loader=yaml.Loader)
prefix = utils.get_prefix()
class Remind(commands.Cog):
def __init__(self, bot):
self.bot = bot
self.reminders = []
self.tasks = []
asyncio.create_task(self.update_schedule())
async def update_schedule(self):
"""Updates the schedule"""
reminders = database.get_reminders()
new_reminders = []
for reminder in reminders:
if reminder["date"] - time.time() < 0:
database.remove_reminder(reminder)
else:
new_reminders.append(reminder)
self.reminders.clear()
self.reminders.extend(new_reminders)
async def setup_reminders(self):
"""Sets up the reminders"""
await self.clear_tasks()
await self.update_schedule()
scheduled_reminders = []
for task in self.tasks:
if task.get_coro().cr_frame is not None:
scheduled_reminders.append(
task.get_coro().cr_frame.f_locals["reminder"]
)
# Create tasks for all reminders, call the remind function
for reminder in self.reminders:
if reminder not in scheduled_reminders:
task = asyncio.create_task(self.remind(reminder))
self.tasks.append(task)
scheduled_reminders.append(
task.get_coro().cr_frame.f_locals["reminder"]
)
# Run the tasks
asyncio.gather(*self.tasks)
async def clear_tasks(self):
for task in self.tasks:
if task._state == "FINISHED":
self.tasks.remove(task)
async def remind(self, reminder: dict):
"""Execute one reminder"""
# Check if the reminder is in the future and if it exists in the database
if reminder["date"] > time.time() and database.get_reminders(**reminder) != []:
await asyncio.sleep(reminder["date"] - time.time())
# Checks if the reminder is still exists, in case of deletion
if database.get_reminders(**reminder) != [] and reminder in self.reminders:
await self.bot.get_channel(reminder["channel"]).send(
f"Reminder:\n{reminder['reminder_text']}"
)
if reminder["repeating"] != False:
await self.schedule_repeat(reminder)
self.reminders.remove(reminder)
# Remove the reminder
database.remove_reminder(reminder)
# Remove a reminder that has passed
else:
database.remove_reminder(reminder)
async def schedule_repeat(self, reminder: dict):
"""Schedules a repeating reminder"""
if reminder["repeating"] and database.get_reminders(**reminder) != []:
# Calculate when the next reminder should be
reminder_date = datetime.datetime.fromtimestamp(
reminder["date"] + conversion_dict[reminder["repeating"]]
)
# Remove the old reminder
database.remove_reminder(reminder)
# Add the new reminder
database.insert_reminder(
reminder["guild"],
reminder["channel"],
reminder_date.year,
reminder_date.month,
reminder_date.day,
reminder_date.hour,
reminder_date.minute,
reminder["reminder_text"],
reminder["repeating"],
)
asyncio.create_task(self.setup_reminders())
@commands.command(
help="Date should be in month/day/year format, either with slashes or dashes (ex. month/day/year or month-day-year)\n\nRepeating is an interval of time after which the reminder should be sent again, must be either daily, weekly, biweekly, or triweekly\n\nText is the text the reminder will be sent with, wrap with quotations if this contains whitespace",
aliases=["reminder", "add_r", "ar"],
)
@commands.check(checks.is_operator)
async def add_reminder(
self,
ctx,
date: str,
user_time: str,
text: str,
repeating: Union[str, bool] = False,
):
"""Attempts to add a reminder"""
# Checks if the reminder should repeat, and if it is a valid interval
try:
_date = utils.split_date(date)
_time = utils.split_time(user_time)
except UnboundLocalError:
raise commands.UserInputError("Date or time was not in the correct format.")
if repeating and repeating not in conversion_dict:
raise commands.UserInputError()
# Tries to insert the reminder
result = database.insert_reminder(
ctx.guild.id,
ctx.channel.id,
_date["year"],
_date["month"],
_date["day"],
_time["hour"],
_time["minute"],
text,
repeating,
)
# Sends a status message, and restarts the reminders
if result:
await asyncio.create_task(self.setup_reminders())
await ctx.send(
embed=utils.generate_embed(
"Reminder Stored",
f"{date}\n{user_time}\n{text}\nrepeating: {repeating}",
)
)
# This means the insertion of the reminder failed
else:
await ctx.send(
embed=utils.generate_embed(
"Error",
"`This reminder already exists in the database or is not in the future`",
)
)
@add_reminder.error
async def add_reminder_error(self, ctx, error):
"""Called when add_reminder() errors"""
print(error)
if isinstance(error, commands.errors.MissingRequiredArgument):
await ctx.send(
embed=utils.generate_embed(
"Error", f"`{error} Run {prefix}help add_reminder`"
)
)
elif isinstance(error, commands.errors.UserInputError):
await ctx.send(
embed=utils.generate_embed(
"Error", f"`{error} Run {prefix}help add_reminder`"
)
)
elif isinstance(error, commands.errors.CheckFailure):
await ctx.send(
embed=utils.generate_embed(
"Error", "`You do not have permissions for this command`"
)
)
else:
await ctx.send(
embed=utils.generate_embed(
"Error",
f"`An unexpected error has occured, run {prefix}help add_reminder`",
)
)
def setup(bot):
cog = Remind(bot)
bot.add_cog(cog)
asyncio.create_task(cog.setup_reminders())
| 36.683673
| 362
| 0.568985
| 771
| 7,190
| 5.189364
| 0.246433
| 0.029993
| 0.017996
| 0.025494
| 0.205449
| 0.150462
| 0.132967
| 0.112222
| 0.102224
| 0.07998
| 0
| 0.000211
| 0.339917
| 7,190
| 195
| 363
| 36.871795
| 0.842815
| 0.080668
| 0
| 0.189873
| 0
| 0.006329
| 0.146201
| 0.016383
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012658
| false
| 0
| 0.056962
| 0
| 0.075949
| 0.006329
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4b0703a1999f5fa6b05313d2f3c64b1a7c6c84
| 948
|
py
|
Python
|
setup.py
|
csengor/toraman_py
|
5cb7b39ae073ecc2adcb7cea83b79492ac5aa485
|
[
"MIT"
] | 2
|
2020-02-01T08:21:11.000Z
|
2021-03-12T13:58:26.000Z
|
setup.py
|
csengor/toraman_py
|
5cb7b39ae073ecc2adcb7cea83b79492ac5aa485
|
[
"MIT"
] | null | null | null |
setup.py
|
csengor/toraman_py
|
5cb7b39ae073ecc2adcb7cea83b79492ac5aa485
|
[
"MIT"
] | null | null | null |
import setuptools
from toraman.version import __version__
with open('README.md', 'r') as input_file:
long_description = input_file.read()
setuptools.setup(
name='toraman',
version=__version__,
author='Çağatay Onur Şengör',
author_email='contact@csengor.com',
description='A computer-assisted translation tool package',
keywords = ['CAT', 'computer-assisted translation', 'computer-aided translation', 'translation', 'free-to-use'],
long_description=long_description,
long_description_content_type='text/markdown',
url='https://github.com/csengor/toraman-py',
packages=setuptools.find_packages(),
install_requires=[
'lxml',
'python-levenshtein',
'regex'
],
classifiers=[
'Development Status :: 3 - Alpha',
'License :: OSI Approved :: MIT License',
'Operating System :: OS Independent',
'Programming Language :: Python :: 3',
],
)
| 30.580645
| 116
| 0.667722
| 99
| 948
| 6.20202
| 0.69697
| 0.09772
| 0.087948
| 0.09772
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002639
| 0.200422
| 948
| 30
| 117
| 31.6
| 0.807388
| 0
| 0
| 0.074074
| 0
| 0
| 0.415612
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.074074
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4b2f5648b8aa2f586e693897cf20f646266eed
| 457
|
py
|
Python
|
declarations_site/cms_pages/migrations/0015_auto_20150615_0201.py
|
li-ar/declarations.com.ua
|
343cd86cc5a4bd895f2859ed896728f6416ac223
|
[
"MIT"
] | 32
|
2015-04-01T15:17:35.000Z
|
2021-05-02T20:46:33.000Z
|
declarations_site/cms_pages/migrations/0015_auto_20150615_0201.py
|
li-ar/declarations.com.ua
|
343cd86cc5a4bd895f2859ed896728f6416ac223
|
[
"MIT"
] | 52
|
2015-03-23T21:37:04.000Z
|
2022-02-10T07:27:13.000Z
|
declarations_site/cms_pages/migrations/0015_auto_20150615_0201.py
|
li-ar/declarations.com.ua
|
343cd86cc5a4bd895f2859ed896728f6416ac223
|
[
"MIT"
] | 18
|
2015-03-16T22:10:44.000Z
|
2021-11-01T12:56:12.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms_pages', '0014_homepage_news_count'),
]
operations = [
migrations.AlterField(
model_name='newspage',
name='lead',
field=models.TextField(blank=True, verbose_name='Лід'),
preserve_default=True,
),
]
| 21.761905
| 67
| 0.608315
| 44
| 457
| 6.045455
| 0.795455
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015106
| 0.275711
| 457
| 20
| 68
| 22.85
| 0.78852
| 0.045952
| 0
| 0
| 0
| 0
| 0.110599
| 0.0553
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4b6fc1c38e59243da6f002769c9090efca9c53
| 4,112
|
py
|
Python
|
tests/models/test_grad_norm.py
|
nightlessbaron/pytorch-lightning
|
239bea5c29cef0d1a0cfb319de5dbc9227aa2a53
|
[
"Apache-2.0"
] | 3
|
2021-01-28T14:04:17.000Z
|
2021-09-08T12:00:11.000Z
|
tests/models/test_grad_norm.py
|
nightlessbaron/pytorch-lightning
|
239bea5c29cef0d1a0cfb319de5dbc9227aa2a53
|
[
"Apache-2.0"
] | 8
|
2020-10-27T22:39:24.000Z
|
2021-01-24T16:41:34.000Z
|
tests/models/test_grad_norm.py
|
nightlessbaron/pytorch-lightning
|
239bea5c29cef0d1a0cfb319de5dbc9227aa2a53
|
[
"Apache-2.0"
] | 1
|
2022-03-21T18:37:54.000Z
|
2022-03-21T18:37:54.000Z
|
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from unittest import mock
from unittest.mock import patch
import numpy as np
import pytest
from pytorch_lightning import Trainer
from tests.base import EvalModelTemplate
from tests.base.develop_utils import reset_seed
class ModelWithManualGradTracker(EvalModelTemplate):
def __init__(self, norm_type, *args, **kwargs):
super().__init__(*args, **kwargs)
self.stored_grad_norms, self.norm_type = [], float(norm_type)
# validation spoils logger's metrics with `val_loss` records
validation_step = None
val_dataloader = None
def training_step(self, batch, batch_idx, optimizer_idx=None):
# just return a loss, no log or progress bar meta
x, y = batch
loss_val = self.loss(y, self(x.flatten(1, -1)))
return {'loss': loss_val}
def on_after_backward(self):
out, norms = {}, []
prefix = f'grad_{self.norm_type}_norm_'
for name, p in self.named_parameters():
if p.grad is None:
continue
# `np.linalg.norm` implementation likely uses fp64 intermediates
flat = p.grad.data.cpu().numpy().ravel()
norm = np.linalg.norm(flat, self.norm_type)
norms.append(norm)
out[prefix + name] = round(norm, 4)
# handle total norm
norm = np.linalg.norm(norms, self.norm_type)
out[prefix + 'total'] = round(norm, 4)
self.stored_grad_norms.append(out)
@mock.patch.dict(os.environ, {"PL_DEV_DEBUG": "1"})
@pytest.mark.parametrize("norm_type", [1., 1.25, 2, 3, 5, 10, 'inf'])
def test_grad_tracking(tmpdir, norm_type, rtol=5e-3):
# rtol=5e-3 respects the 3 decimals rounding in `.grad_norms` and above
reset_seed()
# use a custom grad tracking module and a list logger
model = ModelWithManualGradTracker(norm_type)
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=3,
track_grad_norm=norm_type,
log_every_n_steps=1, # request grad_norms every batch
)
result = trainer.fit(model)
assert result == 1, "Training failed"
logged_metrics = trainer.dev_debugger.logged_metrics
assert len(logged_metrics) == len(model.stored_grad_norms)
# compare the logged metrics against tracked norms on `.backward`
for mod, log in zip(model.stored_grad_norms, logged_metrics):
common = mod.keys() & log.keys()
log, mod = [log[k] for k in common], [mod[k] for k in common]
assert np.allclose(log, mod, rtol=rtol)
@pytest.mark.parametrize("log_every_n_steps", [1, 2, 3])
def test_grad_tracking_interval(tmpdir, log_every_n_steps):
""" Test that gradient norms get tracked in the right interval and that everytime the same keys get logged. """
trainer = Trainer(
default_root_dir=tmpdir,
track_grad_norm=2,
log_every_n_steps=log_every_n_steps,
max_steps=10,
)
with patch.object(trainer.logger, "log_metrics") as mocked:
model = EvalModelTemplate()
trainer.fit(model)
expected = trainer.global_step // log_every_n_steps
grad_norm_dicts = []
for _, kwargs in mocked.call_args_list:
metrics = kwargs.get("metrics", {})
grad_norm_dict = {k: v for k, v in metrics.items() if k.startswith("grad_")}
if grad_norm_dict:
grad_norm_dicts.append(grad_norm_dict)
assert len(grad_norm_dicts) == expected
assert all(grad_norm_dicts[0].keys() == g.keys() for g in grad_norm_dicts)
| 35.756522
| 115
| 0.674611
| 582
| 4,112
| 4.580756
| 0.369416
| 0.030008
| 0.020255
| 0.031508
| 0.046512
| 0.025506
| 0
| 0
| 0
| 0
| 0
| 0.011017
| 0.227383
| 4,112
| 114
| 116
| 36.070175
| 0.82814
| 0.260214
| 0
| 0.057143
| 0
| 0
| 0.038474
| 0.008955
| 0
| 0
| 0
| 0
| 0.071429
| 1
| 0.071429
| false
| 0
| 0.114286
| 0
| 0.242857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4b952636f3e94167bbd00880673a8dc5635803
| 2,278
|
py
|
Python
|
deep_speech_2/decoder.py
|
Canpio/models
|
72874de98fba93592edee42b776e3d876b1d5504
|
[
"Apache-2.0"
] | 1
|
2020-11-19T14:47:28.000Z
|
2020-11-19T14:47:28.000Z
|
deep_speech_2/decoder.py
|
JiayiFeng/models
|
72874de98fba93592edee42b776e3d876b1d5504
|
[
"Apache-2.0"
] | null | null | null |
deep_speech_2/decoder.py
|
JiayiFeng/models
|
72874de98fba93592edee42b776e3d876b1d5504
|
[
"Apache-2.0"
] | null | null | null |
"""
CTC-like decoder utilitis.
"""
from itertools import groupby
import numpy as np
def ctc_best_path_decode(probs_seq, vocabulary):
"""
Best path decoding, also called argmax decoding or greedy decoding.
Path consisting of the most probable tokens are further post-processed to
remove consecutive repetitions and all blanks.
:param probs_seq: 2-D list of probabilities over the vocabulary for each
character. Each element is a list of float probabilities
for one character.
:type probs_seq: list
:param vocabulary: Vocabulary list.
:type vocabulary: list
:return: Decoding result string.
:rtype: baseline
"""
# dimension verification
for probs in probs_seq:
if not len(probs) == len(vocabulary) + 1:
raise ValueError("probs_seq dimension mismatchedd with vocabulary")
# argmax to get the best index for each time step
max_index_list = list(np.array(probs_seq).argmax(axis=1))
# remove consecutive duplicate indexes
index_list = [index_group[0] for index_group in groupby(max_index_list)]
# remove blank indexes
blank_index = len(vocabulary)
index_list = [index for index in index_list if index != blank_index]
# convert index list to string
return ''.join([vocabulary[index] for index in index_list])
def ctc_decode(probs_seq, vocabulary, method):
"""
CTC-like sequence decoding from a sequence of likelihood probablilites.
:param probs_seq: 2-D list of probabilities over the vocabulary for each
character. Each element is a list of float probabilities
for one character.
:type probs_seq: list
:param vocabulary: Vocabulary list.
:type vocabulary: list
:param method: Decoding method name, with options: "best_path".
:type method: basestring
:return: Decoding result string.
:rtype: baseline
"""
for prob_list in probs_seq:
if not len(prob_list) == len(vocabulary) + 1:
raise ValueError("probs dimension mismatchedd with vocabulary")
if method == "best_path":
return ctc_best_path_decode(probs_seq, vocabulary)
else:
raise ValueError("Decoding method [%s] is not supported.")
| 37.344262
| 79
| 0.68525
| 296
| 2,278
| 5.158784
| 0.327703
| 0.057629
| 0.027505
| 0.047151
| 0.436149
| 0.436149
| 0.285527
| 0.239686
| 0.239686
| 0.239686
| 0
| 0.003486
| 0.244513
| 2,278
| 60
| 80
| 37.966667
| 0.883789
| 0.513608
| 0
| 0
| 0
| 0
| 0.139511
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.105263
| false
| 0
| 0.105263
| 0
| 0.315789
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4d61daea2ec370e51d0a70c14c812f08cd827f
| 1,491
|
py
|
Python
|
setup.py
|
swtwsk/dbt-airflow-manifest-parser
|
fae0049fb8ff3bc7a78488a48a31023f67fbeef3
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
swtwsk/dbt-airflow-manifest-parser
|
fae0049fb8ff3bc7a78488a48a31023f67fbeef3
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
swtwsk/dbt-airflow-manifest-parser
|
fae0049fb8ff3bc7a78488a48a31023f67fbeef3
|
[
"Apache-2.0"
] | null | null | null |
"""dbt_airflow_factory module."""
from setuptools import find_packages, setup
with open("README.md") as f:
README = f.read()
# Runtime Requirements.
INSTALL_REQUIRES = ["pytimeparse==1.1.8"]
# Dev Requirements
EXTRA_REQUIRE = {
"tests": [
"pytest>=6.2.2, <7.0.0",
"pytest-cov>=2.8.0, <3.0.0",
"tox==3.21.1",
"pre-commit==2.9.3",
"pandas==1.2.5",
"apache-airflow[kubernetes]==2.2.0",
],
"docs": [
"sphinx==4.3.1",
"sphinx-rtd-theme==1.0.0",
"sphinx-click>=3.0,<3.1",
"myst-parser>=0.16, <0.17",
"docutils>=0.17,<0.18",
],
}
setup(
name="dbt-airflow-factory",
version="0.18.0",
description="Library to convert DBT manifest metadata to Airflow tasks",
long_description=README,
long_description_content_type="text/markdown",
license="Apache Software License (Apache 2.0)",
python_requires=">=3",
classifiers=[
"Development Status :: 3 - Alpha",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
],
keywords="dbt airflow manifest parser python",
author=u"Piotr Pekala",
author_email="piotr.pekala@getindata.com",
url="https://github.com/getindata/dbt-airflow-factory/",
packages=find_packages(exclude=["ez_setup", "examples", "tests", "docs"]),
include_package_data=True,
zip_safe=False,
install_requires=INSTALL_REQUIRES,
extras_require=EXTRA_REQUIRE,
)
| 28.132075
| 78
| 0.613011
| 195
| 1,491
| 4.584615
| 0.517949
| 0.044743
| 0.057047
| 0.058166
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052676
| 0.210597
| 1,491
| 52
| 79
| 28.673077
| 0.706882
| 0.044936
| 0
| 0.068182
| 0
| 0
| 0.453776
| 0.073395
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.022727
| 0
| 0.022727
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4f383bc1d3e78bec978541f8102910be2e6494
| 1,489
|
py
|
Python
|
karabo_bridge/tests/test_serialize.py
|
European-XFEL/karabo-bridge-py
|
c4b2847b837ae7156640cb8f787fcf96ac7f632e
|
[
"BSD-3-Clause"
] | 6
|
2018-01-23T15:20:43.000Z
|
2022-02-28T13:20:50.000Z
|
karabo_bridge/tests/test_serialize.py
|
European-XFEL/karabo-bridge
|
a56f2bb57eecd49ebcdc9077234df8e76e725a6f
|
[
"BSD-3-Clause"
] | 43
|
2018-01-24T16:12:49.000Z
|
2021-05-27T14:56:42.000Z
|
karabo_bridge/tests/test_serialize.py
|
European-XFEL/karabo-bridge
|
a56f2bb57eecd49ebcdc9077234df8e76e725a6f
|
[
"BSD-3-Clause"
] | 4
|
2018-03-04T10:09:43.000Z
|
2018-05-03T14:49:27.000Z
|
import numpy as np
import pytest
from karabo_bridge import serialize, deserialize
from .utils import compare_nested_dict
def test_serialize(data, protocol_version):
msg = serialize(data, protocol_version=protocol_version)
assert isinstance(msg, list)
d, m = deserialize(msg)
compare_nested_dict(data, d)
assert m['source1'] == {'timestamp.tid': 9876543210, 'timestamp': 12345678}
assert m['XMPL/DET/MOD0'] == {}
def test_serialize_with_metadata(data, metadata, protocol_version):
msg = serialize(data, metadata, protocol_version=protocol_version)
d, m = deserialize(msg)
compare_nested_dict(metadata, m)
def test_serialize_with_dummy_timestamps(data, protocol_version):
msg = serialize(data, protocol_version=protocol_version,
dummy_timestamps=True)
d, m = deserialize(msg)
assert set(m['XMPL/DET/MOD0']) == {'timestamp', 'timestamp.sec', 'timestamp.frac'}
assert set(m['source1']) == {'timestamp', 'timestamp.tid'}
assert m['source1']['timestamp.tid'] == 9876543210
assert m['source1']['timestamp'] == 12345678
def test_serialize_with_metadata_and_dummy_timestamp(data, metadata, protocol_version):
msg = serialize(data, metadata, protocol_version=protocol_version,
dummy_timestamps=True)
d, m = deserialize(msg)
compare_nested_dict(metadata, m)
def test_wrong_version(data):
with pytest.raises(ValueError):
serialize(data, protocol_version='3.0')
| 31.020833
| 87
| 0.715917
| 182
| 1,489
| 5.637363
| 0.258242
| 0.190058
| 0.092593
| 0.109162
| 0.589669
| 0.535088
| 0.464912
| 0.432749
| 0.432749
| 0.432749
| 0
| 0.035513
| 0.167898
| 1,489
| 47
| 88
| 31.680851
| 0.792575
| 0
| 0
| 0.258065
| 0
| 0
| 0.106783
| 0
| 0
| 0
| 0
| 0
| 0.225806
| 1
| 0.16129
| false
| 0
| 0.129032
| 0
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c4fc346cb91cacd807ee64d79b21152c687d93c
| 2,029
|
py
|
Python
|
indico/testing/fixtures/util.py
|
bpedersen2/indico
|
8410ee5f8f8530a8692f3dd2d4015c3074b0aa30
|
[
"MIT"
] | 1
|
2021-12-27T17:51:27.000Z
|
2021-12-27T17:51:27.000Z
|
indico/testing/fixtures/util.py
|
bpedersen2/indico
|
8410ee5f8f8530a8692f3dd2d4015c3074b0aa30
|
[
"MIT"
] | 5
|
2021-04-08T19:26:47.000Z
|
2022-01-24T16:30:18.000Z
|
indico/testing/fixtures/util.py
|
bpedersen2/indico
|
8410ee5f8f8530a8692f3dd2d4015c3074b0aa30
|
[
"MIT"
] | 2
|
2019-02-24T17:29:10.000Z
|
2021-04-08T19:23:27.000Z
|
# This file is part of Indico.
# Copyright (C) 2002 - 2021 CERN
#
# Indico is free software; you can redistribute it and/or
# modify it under the terms of the MIT License; see the
# LICENSE file for more details.
import inspect
from datetime import datetime
import freezegun
import pytest
from sqlalchemy import DateTime, cast
from sqlalchemy.sql.functions import _FunctionGenerator
@pytest.fixture
def monkeypatch_methods(monkeypatch):
"""Monkeypatch all methods from `cls` onto `target`.
This utility lets you easily mock multiple methods in an existing class.
In case of classmethods the binding will not be changed, i.e. `cls` will
keep pointing to the source class and not the target class.
"""
def _monkeypatch_methods(target, cls):
for name, method in inspect.getmembers(cls, inspect.ismethod):
if method.__self__ is None:
# For unbound methods we need to copy the underlying function
method = method.__func__
monkeypatch.setattr(f'{target}.{name}', method)
return _monkeypatch_methods
@pytest.fixture
def freeze_time(monkeypatch):
"""Return a function that freezes the current time.
It affects datetime.now, date.today, etc. and also SQLAlchemy's `func.now()`
which simply returns the current time from `datetime.now()` instead of
retrieving it using the actual `now()` function of PostgreSQL.
"""
freezers = []
orig_call = _FunctionGenerator.__call__
def FunctionGenerator_call(self, *args, **kwargs):
if self._FunctionGenerator__names == ['now']:
return cast(datetime.now().isoformat(), DateTime)
return orig_call(self, *args, **kwargs)
monkeypatch.setattr(_FunctionGenerator, '__call__', FunctionGenerator_call)
def _freeze_time(time_to_freeze):
freezer = freezegun.freeze_time(time_to_freeze)
freezer.start()
freezers.append(freezer)
yield _freeze_time
for freezer in reversed(freezers):
freezer.stop()
| 32.725806
| 80
| 0.706752
| 258
| 2,029
| 5.399225
| 0.468992
| 0.028715
| 0.022972
| 0.040201
| 0.041637
| 0.041637
| 0
| 0
| 0
| 0
| 0
| 0.005009
| 0.212913
| 2,029
| 61
| 81
| 33.262295
| 0.867251
| 0.383933
| 0
| 0.066667
| 0
| 0
| 0.021685
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.2
| 0
| 0.466667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c508b5e90ac0bb6b42082e2791baf6ee6cd6d24
| 704
|
py
|
Python
|
config.py
|
RomashkaGang/Update_Checker
|
1763ec5d8110462a72f5015abdc5c5be3e3c9498
|
[
"MIT"
] | null | null | null |
config.py
|
RomashkaGang/Update_Checker
|
1763ec5d8110462a72f5015abdc5c5be3e3c9498
|
[
"MIT"
] | null | null | null |
config.py
|
RomashkaGang/Update_Checker
|
1763ec5d8110462a72f5015abdc5c5be3e3c9498
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# encoding: utf-8
import os
# 是否启用调试 若启用 将不再忽略检查过程中发生的任何异常
# 建议在开发环境中启用 在生产环境中禁用
DEBUG_ENABLE = False
# SQLite 数据库文件名
SQLITE_FILE = "saved.db"
# 日志文件名
LOG_FILE = "log.txt"
# 是否启用日志
ENABLE_LOGGER = True
# 循环检查的间隔时间(默认: 180分钟)
LOOP_CHECK_INTERVAL = 180 * 60
# 代理服务器
PROXIES = "127.0.0.1:1080"
# 请求超时
TIMEOUT = 20
# 是否为 Socks5 代理
IS_SOCKS = False
# 是否启用 TG BOT 发送消息的功能
ENABLE_SENDMESSAGE = False
# TG BOT TOKEN
TG_TOKEN = os.environ.get("TG_TOKEN", "")
# 发送消息到...
TG_SENDTO = os.environ.get("TG_SENDTO", "")
if IS_SOCKS:
_PROXIES_DIC = {"http": "socks5h://%s" % PROXIES, "https": "socks5h://%s" % PROXIES}
else:
_PROXIES_DIC = {"http": PROXIES, "https": PROXIES}
| 16
| 88
| 0.681818
| 101
| 704
| 4.584158
| 0.663366
| 0.030238
| 0.051836
| 0.060475
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.042808
| 0.170455
| 704
| 43
| 89
| 16.372093
| 0.75
| 0.285511
| 0
| 0
| 0
| 0
| 0.179959
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.0625
| 0
| 0.0625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c51d615d63f8eb8639b0e23a11927b8ddf8f7ce
| 567
|
py
|
Python
|
scripts/count.py
|
hellocit/kadai2
|
896acc2394ea522d4b0d32db31321aea5b5f5dbd
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/count.py
|
hellocit/kadai2
|
896acc2394ea522d4b0d32db31321aea5b5f5dbd
|
[
"BSD-3-Clause"
] | null | null | null |
scripts/count.py
|
hellocit/kadai2
|
896acc2394ea522d4b0d32db31321aea5b5f5dbd
|
[
"BSD-3-Clause"
] | null | null | null |
#!/usr/bin/env python3
import rospy
from std_msgs.msg import Int32
import time
rospy.init_node('count') # ノード名「count」に設定
pub = rospy.Publisher('count_up', Int32, queue_size=1) # パブリッシャ「count_up」を作成
rate = rospy.Rate(10) # 10Hzで実行
n = 0
time.sleep(2)
while not rospy.is_shutdown():
n += 1
if n % 3 == 0:
print("これは%d" % n)
pub.publish(n)
else:
pub.publish(n)
if n == 227:
print("\nThis is unko\n")
rate.sleep()
| 24.652174
| 77
| 0.502646
| 76
| 567
| 3.671053
| 0.605263
| 0.050179
| 0.078853
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.050704
| 0.373898
| 567
| 22
| 78
| 25.772727
| 0.735211
| 0.112875
| 0
| 0.111111
| 0
| 0
| 0.068136
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c52d32a4a5ef2c93209163ebb29e7bf07a94aa5
| 2,028
|
py
|
Python
|
rx/concurrency/timeoutscheduler.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2018-11-16T09:07:13.000Z
|
2018-11-16T09:07:13.000Z
|
rx/concurrency/timeoutscheduler.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
rx/concurrency/timeoutscheduler.py
|
yutiansut/RxPY
|
c3bbba77f9ebd7706c949141725e220096deabd4
|
[
"ECL-2.0",
"Apache-2.0"
] | 1
|
2020-05-08T08:23:08.000Z
|
2020-05-08T08:23:08.000Z
|
import logging
from threading import Timer
from datetime import timedelta
from rx.core import Scheduler, Disposable
from rx.disposables import SingleAssignmentDisposable, CompositeDisposable
from .schedulerbase import SchedulerBase
log = logging.getLogger("Rx")
class TimeoutScheduler(SchedulerBase):
"""A scheduler that schedules work via a timed callback based upon platform."""
def schedule(self, action, state=None):
"""Schedules an action to be executed."""
disposable = SingleAssignmentDisposable()
def interval():
disposable.disposable = self.invoke_action(action, state)
timer = Timer(0, interval)
timer.setDaemon(True)
timer.start()
def dispose():
timer.cancel()
return CompositeDisposable(disposable, Disposable.create(dispose))
def schedule_relative(self, duetime, action, state=None):
"""Schedules an action to be executed after duetime."""
scheduler = self
timespan = self.to_timedelta(duetime)
if timespan == timedelta(0):
return scheduler.schedule(action, state)
disposable = SingleAssignmentDisposable()
def interval():
disposable.disposable = self.invoke_action(action, state)
seconds = timespan.total_seconds()
log.debug("timeout: %s", seconds)
timer = Timer(seconds, interval)
timer.setDaemon(True)
timer.start()
def dispose():
timer.cancel()
return CompositeDisposable(disposable, Disposable.create(dispose))
def schedule_absolute(self, duetime, action, state=None):
"""Schedules an action to be executed after duetime."""
duetime = self.to_datetime(duetime)
return self.schedule_relative(duetime - self.now, action, state)
def _start_timer(self, period, action):
timer = Timer(period, action)
timer.setDaemon(True)
timer.start()
return timer
timeout_scheduler = TimeoutScheduler()
| 28.971429
| 83
| 0.668146
| 208
| 2,028
| 6.461538
| 0.298077
| 0.057292
| 0.033482
| 0.053571
| 0.480655
| 0.459821
| 0.459821
| 0.459821
| 0.459821
| 0.427083
| 0
| 0.001299
| 0.240631
| 2,028
| 69
| 84
| 29.391304
| 0.871429
| 0.103057
| 0
| 0.418605
| 0
| 0
| 0.00723
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.186047
| false
| 0
| 0.139535
| 0
| 0.465116
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c53272867356ba8303ec22e79720d622e10756c
| 2,330
|
py
|
Python
|
vgazer/version/custom_checker/inputproto.py
|
edomin/vgazer
|
3ffe64f2517cbfbe0b0292bacc9fbf7391687e76
|
[
"CC0-1.0"
] | 2
|
2020-10-09T13:51:04.000Z
|
2020-11-11T12:29:41.000Z
|
vgazer/version/custom_checker/inputproto.py
|
edomin/vgazer
|
3ffe64f2517cbfbe0b0292bacc9fbf7391687e76
|
[
"CC0-1.0"
] | null | null | null |
vgazer/version/custom_checker/inputproto.py
|
edomin/vgazer
|
3ffe64f2517cbfbe0b0292bacc9fbf7391687e76
|
[
"CC0-1.0"
] | null | null | null |
import requests
from bs4 import BeautifulSoup
def Check(auth, mirrors):
response = requests.get("https://www.x.org/releases/individual/proto/")
html = response.content.decode("utf-8")
parsedHtml = BeautifulSoup(html, "html.parser")
links = parsedHtml.find_all("a")
maxVersionMajor = -1
maxVersionMinor = -1
maxVersionPatch = -1
maxVersionSubpatch = -1
for link in links:
if ("inputproto-" in link.text and ".tar.gz" in link.text
and ".sig" not in link.text):
version = link.text.split("-")[1].split(".tar.gz")[0].split(".")
versionMajor = int(version[0])
versionMinor = int(version[1])
if len(version) == 3:
versionPatch = int(version[2])
versionSubpatch = 0
elif len(version) == 2:
versionPatch = 0
versionSubpatch = 0
else:
versionPatch = int(version[2])
versionSubpatch = int(version[3])
if versionMajor > maxVersionMajor:
maxVersionMajor = versionMajor
maxVersionMinor = versionMinor
maxVersionPatch = versionPatch
maxVersionSubpatch = versionSubpatch
versionText = link.text.split("-")[1].split(".tar.gz")[0]
elif (versionMajor == maxVersionMajor
and versionMinor > maxVersionMinor):
maxVersionMinor = versionMinor
maxVersionPatch = versionPatch
maxVersionSubpatch = versionSubpatch
versionText = link.text.split("-")[1].split(".tar.gz")[0]
elif (versionMajor == maxVersionMajor
and versionMinor == maxVersionMinor
and versionPatch > maxVersionPatch):
maxVersionPatch = versionPatch
maxVersionSubpatch = versionSubpatch
versionText = link.text.split("-")[1].split(".tar.gz")[0]
elif (versionMajor == maxVersionMajor
and versionMinor == maxVersionMinor
and versionPatch == maxVersionPatch
and versionSubpatch > maxVersionSubpatch):
maxVersionSubpatch = versionSubpatch
versionText = link.text.split("-")[1].split(".tar.gz")[0]
return versionText
| 40.877193
| 76
| 0.574249
| 198
| 2,330
| 6.752525
| 0.30303
| 0.047868
| 0.048616
| 0.052356
| 0.564697
| 0.507853
| 0.507853
| 0.507853
| 0.489155
| 0.489155
| 0
| 0.016466
| 0.322318
| 2,330
| 56
| 77
| 41.607143
| 0.830272
| 0
| 0
| 0.431373
| 0
| 0
| 0.053219
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019608
| false
| 0
| 0.039216
| 0
| 0.078431
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c53ef04b71561a704af8d84b7b218d0cc32e017
| 11,781
|
py
|
Python
|
src/pandas_profiling/model/summary_helpers.py
|
briangrahamww/pandas-profiling
|
62f8e3fd81720d444041069191c4aacd03d79ad5
|
[
"MIT"
] | null | null | null |
src/pandas_profiling/model/summary_helpers.py
|
briangrahamww/pandas-profiling
|
62f8e3fd81720d444041069191c4aacd03d79ad5
|
[
"MIT"
] | 4
|
2021-11-01T15:17:07.000Z
|
2022-01-26T15:22:15.000Z
|
src/pandas_profiling/model/summary_helpers.py
|
briangrahamww/pandas-profiling
|
62f8e3fd81720d444041069191c4aacd03d79ad5
|
[
"MIT"
] | null | null | null |
import os
import string
from collections import Counter
from datetime import datetime
from functools import partial
from pathlib import Path
from typing import Optional
import numpy as np
import pandas as pd
from scipy.stats.stats import chisquare
from tangled_up_in_unicode import block, block_abbr, category, category_long, script
from pandas_profiling.config import Settings
from pandas_profiling.model.summary_helpers_image import (
extract_exif,
hash_image,
is_image_truncated,
open_image,
)
def mad(arr: np.ndarray) -> np.ndarray:
"""Median Absolute Deviation: a "Robust" version of standard deviation.
Indices variability of the sample.
https://en.wikipedia.org/wiki/Median_absolute_deviation
"""
return np.median(np.abs(arr - np.median(arr)))
def named_aggregate_summary(series: pd.Series, key: str) -> dict:
summary = {
f"max_{key}": np.max(series),
f"mean_{key}": np.mean(series),
f"median_{key}": np.median(series),
f"min_{key}": np.min(series),
}
return summary
def length_summary(series: pd.Series, summary: dict = None) -> dict:
if summary is None:
summary = {}
length = series.str.len()
summary.update({"length": length})
summary.update(named_aggregate_summary(length, "length"))
return summary
def file_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# Transform
stats = series.map(lambda x: os.stat(x))
def convert_datetime(x: float) -> str:
return datetime.fromtimestamp(x).strftime("%Y-%m-%d %H:%M:%S")
# Transform some more
summary = {
"file_size": stats.map(lambda x: x.st_size),
"file_created_time": stats.map(lambda x: x.st_ctime).map(convert_datetime),
"file_accessed_time": stats.map(lambda x: x.st_atime).map(convert_datetime),
"file_modified_time": stats.map(lambda x: x.st_mtime).map(convert_datetime),
}
return summary
def path_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
# TODO: optimize using value counts
summary = {
"common_prefix": os.path.commonprefix(series.values.tolist())
or "No common prefix",
"stem_counts": series.map(lambda x: os.path.splitext(x)[0]).value_counts(),
"suffix_counts": series.map(lambda x: os.path.splitext(x)[1]).value_counts(),
"name_counts": series.map(lambda x: os.path.basename(x)).value_counts(),
"parent_counts": series.map(lambda x: os.path.dirname(x)).value_counts(),
"anchor_counts": series.map(lambda x: os.path.splitdrive(x)[0]).value_counts(),
}
summary["n_stem_unique"] = len(summary["stem_counts"])
summary["n_suffix_unique"] = len(summary["suffix_counts"])
summary["n_name_unique"] = len(summary["name_counts"])
summary["n_parent_unique"] = len(summary["parent_counts"])
summary["n_anchor_unique"] = len(summary["anchor_counts"])
return summary
def url_summary(series: pd.Series) -> dict:
"""
Args:
series: series to summarize
Returns:
"""
summary = {
"scheme_counts": series.map(lambda x: x.scheme).value_counts(),
"netloc_counts": series.map(lambda x: x.netloc).value_counts(),
"path_counts": series.map(lambda x: x.path).value_counts(),
"query_counts": series.map(lambda x: x.query).value_counts(),
"fragment_counts": series.map(lambda x: x.fragment).value_counts(),
}
return summary
def count_duplicate_hashes(image_descriptions: dict) -> int:
"""
Args:
image_descriptions:
Returns:
"""
counts = pd.Series(
[x["hash"] for x in image_descriptions if "hash" in x]
).value_counts()
return counts.sum() - len(counts)
def extract_exif_series(image_exifs: list) -> dict:
"""
Args:
image_exifs:
Returns:
"""
exif_keys = []
exif_values: dict = {}
for image_exif in image_exifs:
# Extract key
exif_keys.extend(list(image_exif.keys()))
# Extract values per key
for exif_key, exif_val in image_exif.items():
if exif_key not in exif_values:
exif_values[exif_key] = []
exif_values[exif_key].append(exif_val)
series = {"exif_keys": pd.Series(exif_keys, dtype=object).value_counts().to_dict()}
for k, v in exif_values.items():
series[k] = pd.Series(v).value_counts()
return series
def extract_image_information(
path: Path, exif: bool = False, hash: bool = False
) -> dict:
"""Extracts all image information per file, as opening files is slow
Args:
path: Path to the image
exif: extract exif information
hash: calculate hash (for duplicate detection)
Returns:
A dict containing image information
"""
information: dict = {}
image = open_image(path)
information["opened"] = image is not None
if image is not None:
information["truncated"] = is_image_truncated(image)
if not information["truncated"]:
information["size"] = image.size
if exif:
information["exif"] = extract_exif(image)
if hash:
information["hash"] = hash_image(image)
return information
def image_summary(series: pd.Series, exif: bool = False, hash: bool = False) -> dict:
"""
Args:
series: series to summarize
exif: extract exif information
hash: calculate hash (for duplicate detection)
Returns:
"""
image_information = series.apply(
partial(extract_image_information, exif=exif, hash=hash)
)
summary = {
"n_truncated": sum(
[1 for x in image_information if "truncated" in x and x["truncated"]]
),
"image_dimensions": pd.Series(
[x["size"] for x in image_information if "size" in x],
name="image_dimensions",
),
}
image_widths = summary["image_dimensions"].map(lambda x: x[0])
summary.update(named_aggregate_summary(image_widths, "width"))
image_heights = summary["image_dimensions"].map(lambda x: x[1])
summary.update(named_aggregate_summary(image_heights, "height"))
image_areas = image_widths * image_heights
summary.update(named_aggregate_summary(image_areas, "area"))
if hash:
summary["n_duplicate_hash"] = count_duplicate_hashes(image_information)
if exif:
exif_series = extract_exif_series(
[x["exif"] for x in image_information if "exif" in x]
)
summary["exif_keys_counts"] = exif_series["exif_keys"]
summary["exif_data"] = exif_series
return summary
def get_character_counts(series: pd.Series) -> Counter:
"""Function to return the character counts
Args:
series: the Series to process
Returns:
A dict with character counts
"""
return Counter(series.str.cat())
def counter_to_series(counter: Counter) -> pd.Series:
if not counter:
return pd.Series([], dtype=object)
counter_as_tuples = counter.most_common()
items, counts = zip(*counter_as_tuples)
return pd.Series(counts, index=items)
def unicode_summary(series: pd.Series) -> dict:
# Unicode Character Summaries (category and script name)
character_counts = get_character_counts(series)
character_counts_series = counter_to_series(character_counts)
char_to_block = {key: block(key) for key in character_counts.keys()}
char_to_category_short = {key: category(key) for key in character_counts.keys()}
char_to_script = {key: script(key) for key in character_counts.keys()}
summary = {
"n_characters": len(character_counts_series),
"character_counts": character_counts_series,
"category_alias_values": {
key: category_long(value) for key, value in char_to_category_short.items()
},
"block_alias_values": {
key: block_abbr(value) for key, value in char_to_block.items()
},
}
# Retrieve original distribution
block_alias_counts: Counter = Counter()
per_block_char_counts: dict = {
k: Counter() for k in summary["block_alias_values"].values()
}
for char, n_char in character_counts.items():
block_name = summary["block_alias_values"][char]
block_alias_counts[block_name] += n_char
per_block_char_counts[block_name][char] = n_char
summary["block_alias_counts"] = counter_to_series(block_alias_counts)
summary["block_alias_char_counts"] = {
k: counter_to_series(v) for k, v in per_block_char_counts.items()
}
script_counts: Counter = Counter()
per_script_char_counts: dict = {k: Counter() for k in char_to_script.values()}
for char, n_char in character_counts.items():
script_name = char_to_script[char]
script_counts[script_name] += n_char
per_script_char_counts[script_name][char] = n_char
summary["script_counts"] = counter_to_series(script_counts)
summary["script_char_counts"] = {
k: counter_to_series(v) for k, v in per_script_char_counts.items()
}
category_alias_counts: Counter = Counter()
per_category_alias_char_counts: dict = {
k: Counter() for k in summary["category_alias_values"].values()
}
for char, n_char in character_counts.items():
category_alias_name = summary["category_alias_values"][char]
category_alias_counts[category_alias_name] += n_char
per_category_alias_char_counts[category_alias_name][char] += n_char
summary["category_alias_counts"] = counter_to_series(category_alias_counts)
summary["category_alias_char_counts"] = {
k: counter_to_series(v) for k, v in per_category_alias_char_counts.items()
}
# Unique counts
summary["n_category"] = len(summary["category_alias_counts"])
summary["n_scripts"] = len(summary["script_counts"])
summary["n_block_alias"] = len(summary["block_alias_counts"])
if len(summary["category_alias_counts"]) > 0:
summary["category_alias_counts"].index = summary[
"category_alias_counts"
].index.str.replace("_", " ")
return summary
def histogram_compute(
config: Settings,
finite_values: np.ndarray,
n_unique: int,
name: str = "histogram",
weights: Optional[np.ndarray] = None,
) -> dict:
stats = {}
bins = config.plot.histogram.bins
bins_arg = "auto" if bins == 0 else min(bins, n_unique)
stats[name] = np.histogram(finite_values, bins=bins_arg, weights=weights)
max_bins = config.plot.histogram.max_bins
if bins_arg == "auto" and len(stats[name][1]) > max_bins:
stats[name] = np.histogram(finite_values, bins=max_bins, weights=None)
return stats
def chi_square(
values: Optional[np.ndarray] = None, histogram: Optional[np.ndarray] = None
) -> dict:
if histogram is None:
histogram, _ = np.histogram(values, bins="auto")
return dict(chisquare(histogram)._asdict())
def word_summary(series: pd.Series) -> dict:
# TODO: preprocess (stopwords)
# TODO: configurable lowercase/punctuation etc.
word_lists = series.str.lower().str.split()
words = word_lists.explode()
words = words.str.strip(string.punctuation)
return {"word_counts": words.value_counts()}
| 31.5
| 88
| 0.641881
| 1,473
| 11,781
| 4.898846
| 0.154786
| 0.032428
| 0.023559
| 0.02439
| 0.301414
| 0.221868
| 0.173503
| 0.114468
| 0.110588
| 0.075111
| 0
| 0.001011
| 0.244037
| 11,781
| 373
| 89
| 31.58445
| 0.80923
| 0.097275
| 0
| 0.111607
| 0
| 0
| 0.108726
| 0.021665
| 0
| 0
| 0
| 0.005362
| 0
| 1
| 0.075893
| false
| 0
| 0.058036
| 0.004464
| 0.214286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c54e5deea62f99049023a90de0d70c094863c10
| 10,143
|
py
|
Python
|
inverse_warp.py
|
ZephyrII/competitive_colaboration
|
a557d1e23ef2c0b8e3794f085a79bfffb860f9df
|
[
"MIT"
] | 357
|
2019-03-12T07:17:32.000Z
|
2022-03-24T14:13:24.000Z
|
inverse_warp.py
|
DevLooptt/SJTU-CS386-2021Fall-DIP-Project
|
2167e089be80ca01911ba55c07b83c9f26f147e7
|
[
"MIT"
] | 27
|
2019-03-11T19:16:11.000Z
|
2021-05-30T13:30:19.000Z
|
inverse_warp.py
|
DevLooptt/SJTU-CS386-2021Fall-DIP-Project
|
2167e089be80ca01911ba55c07b83c9f26f147e7
|
[
"MIT"
] | 66
|
2019-03-27T14:16:22.000Z
|
2021-11-11T12:40:33.000Z
|
# Author: Anurag Ranjan
# Copyright (c) 2019, Anurag Ranjan
# All rights reserved.
# based on github.com/ClementPinard/SfMLearner-Pytorch
from __future__ import division
import torch
from torch.autograd import Variable
pixel_coords = None
def set_id_grid(depth):
global pixel_coords
b, h, w = depth.size()
i_range = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w)).type_as(depth) # [1, H, W]
j_range = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w)).type_as(depth) # [1, H, W]
ones = Variable(torch.ones(1,h,w)).type_as(depth)
pixel_coords = torch.stack((j_range, i_range, ones), dim=1) # [1, 3, H, W]
def check_sizes(input, input_name, expected):
condition = [input.ndimension() == len(expected)]
for i,size in enumerate(expected):
if size.isdigit():
condition.append(input.size(i) == int(size))
assert(all(condition)), "wrong size for {}, expected {}, got {}".format(input_name, 'x'.join(expected), list(input.size()))
def pixel2cam(depth, intrinsics_inv):
global pixel_coords
"""Transform coordinates in the pixel frame to the camera frame.
Args:
depth: depth maps -- [B, H, W]
intrinsics_inv: intrinsics_inv matrix for each element of batch -- [B, 3, 3]
Returns:
array of (u,v,1) cam coordinates -- [B, 3, H, W]
"""
b, h, w = depth.size()
if (pixel_coords is None) or pixel_coords.size(2) != h or pixel_coords.size(3) != w:
set_id_grid(depth)
current_pixel_coords = pixel_coords[:,:,:h,:w].expand(b,3,h,w).contiguous().view(b, 3, -1) # [B, 3, H*W]
cam_coords = intrinsics_inv.bmm(current_pixel_coords).view(b, 3, h, w)
return cam_coords * depth.unsqueeze(1)
def cam2pixel(cam_coords, proj_c2p_rot, proj_c2p_tr, padding_mode):
"""Transform coordinates in the camera frame to the pixel frame.
Args:
cam_coords: pixel coordinates defined in the first camera coordinates system -- [B, 4, H, W]
proj_c2p_rot: rotation matrix of cameras -- [B, 3, 4]
proj_c2p_tr: translation vectors of cameras -- [B, 3, 1]
Returns:
array of [-1,1] coordinates -- [B, 2, H, W]
"""
b, _, h, w = cam_coords.size()
cam_coords_flat = cam_coords.view(b, 3, -1) # [B, 3, H*W]
if proj_c2p_rot is not None:
pcoords = proj_c2p_rot.bmm(cam_coords_flat)
else:
pcoords = cam_coords_flat
if proj_c2p_tr is not None:
pcoords = pcoords + proj_c2p_tr # [B, 3, H*W]
X = pcoords[:, 0]
Y = pcoords[:, 1]
Z = pcoords[:, 2].clamp(min=1e-3)
X_norm = 2*(X / Z)/(w-1) - 1 # Normalized, -1 if on extreme left, 1 if on extreme right (x = w-1) [B, H*W]
Y_norm = 2*(Y / Z)/(h-1) - 1 # Idem [B, H*W]
if padding_mode == 'zeros':
X_mask = ((X_norm > 1)+(X_norm < -1)).detach()
X_norm[X_mask] = 2 # make sure that no point in warped image is a combinaison of im and gray
Y_mask = ((Y_norm > 1)+(Y_norm < -1)).detach()
Y_norm[Y_mask] = 2
pixel_coords = torch.stack([X_norm, Y_norm], dim=2) # [B, H*W, 2]
return pixel_coords.view(b,h,w,2)
def euler2mat(angle):
"""Convert euler angles to rotation matrix.
Reference: https://github.com/pulkitag/pycaffe-utils/blob/master/rot_utils.py#L174
Args:
angle: rotation angle along 3 axis (in radians) -- size = [B, 3]
Returns:
Rotation matrix corresponding to the euler angles -- size = [B, 3, 3]
"""
B = angle.size(0)
x, y, z = angle[:,0], angle[:,1], angle[:,2]
cosz = torch.cos(z)
sinz = torch.sin(z)
zeros = z.detach()*0
ones = zeros.detach()+1
zmat = torch.stack([cosz, -sinz, zeros,
sinz, cosz, zeros,
zeros, zeros, ones], dim=1).view(B, 3, 3)
cosy = torch.cos(y)
siny = torch.sin(y)
ymat = torch.stack([cosy, zeros, siny,
zeros, ones, zeros,
-siny, zeros, cosy], dim=1).view(B, 3, 3)
cosx = torch.cos(x)
sinx = torch.sin(x)
xmat = torch.stack([ones, zeros, zeros,
zeros, cosx, -sinx,
zeros, sinx, cosx], dim=1).view(B, 3, 3)
rotMat = xmat.bmm(ymat).bmm(zmat)
return rotMat
def quat2mat(quat):
"""Convert quaternion coefficients to rotation matrix.
Args:
quat: first three coeff of quaternion of rotation. fourht is then computed to have a norm of 1 -- size = [B, 3]
Returns:
Rotation matrix corresponding to the quaternion -- size = [B, 3, 3]
"""
norm_quat = torch.cat([quat[:,:1].detach()*0 + 1, quat], dim=1)
norm_quat = norm_quat/norm_quat.norm(p=2, dim=1, keepdim=True)
w, x, y, z = norm_quat[:,0], norm_quat[:,1], norm_quat[:,2], norm_quat[:,3]
B = quat.size(0)
w2, x2, y2, z2 = w.pow(2), x.pow(2), y.pow(2), z.pow(2)
wx, wy, wz = w*x, w*y, w*z
xy, xz, yz = x*y, x*z, y*z
rotMat = torch.stack([w2 + x2 - y2 - z2, 2*xy - 2*wz, 2*wy + 2*xz,
2*wz + 2*xy, w2 - x2 + y2 - z2, 2*yz - 2*wx,
2*xz - 2*wy, 2*wx + 2*yz, w2 - x2 - y2 + z2], dim=1).view(B, 3, 3)
return rotMat
def pose_vec2mat(vec, rotation_mode='euler'):
"""
Convert 6DoF parameters to transformation matrix.
Args:s
vec: 6DoF parameters in the order of tx, ty, tz, rx, ry, rz -- [B, 6]
Returns:
A transformation matrix -- [B, 3, 4]
"""
translation = vec[:, :3].unsqueeze(-1) # [B, 3, 1]
rot = vec[:,3:]
if rotation_mode == 'euler':
rot_mat = euler2mat(rot) # [B, 3, 3]
elif rotation_mode == 'quat':
rot_mat = quat2mat(rot) # [B, 3, 3]
transform_mat = torch.cat([rot_mat, translation], dim=2) # [B, 3, 4]
return transform_mat
def flow_warp(img, flow, padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
img: the source image (where to sample pixels) -- [B, 3, H, W]
flow: flow map of the target image -- [B, 2, H, W]
Returns:
Source image warped to the target image plane
"""
check_sizes(img, 'img', 'BCHW')
check_sizes(flow, 'flow', 'B2HW')
bs, _, h, w = flow.size()
u = flow[:,0,:,:]
v = flow[:,1,:,:]
grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(u).expand_as(u) # [bs, H, W]
grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(v).expand_as(v) # [bs, H, W]
X = grid_x + u
Y = grid_y + v
X = 2*(X/(w-1.0) - 0.5)
Y = 2*(Y/(h-1.0) - 0.5)
grid_tf = torch.stack((X,Y), dim=3)
img_tf = torch.nn.functional.grid_sample(img, grid_tf, padding_mode=padding_mode)
return img_tf
def pose2flow(depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode=None):
"""
Converts pose parameters to rigid optical flow
"""
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B6')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
bs, h, w = depth.size()
grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W]
grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(depth).expand_as(depth) # [bs, H, W]
cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W]
pose_mat = pose_vec2mat(pose, rotation_mode) # [B,3,4]
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]
X = (w-1)*(src_pixel_coords[:,:,:,0]/2.0 + 0.5) - grid_x
Y = (h-1)*(src_pixel_coords[:,:,:,1]/2.0 + 0.5) - grid_y
return torch.stack((X,Y), dim=1)
def flow2oob(flow):
check_sizes(flow, 'flow', 'B2HW')
bs, _, h, w = flow.size()
u = flow[:,0,:,:]
v = flow[:,1,:,:]
grid_x = Variable(torch.arange(0, w).view(1, 1, w).expand(1,h,w), requires_grad=False).type_as(u).expand_as(u) # [bs, H, W]
grid_y = Variable(torch.arange(0, h).view(1, h, 1).expand(1,h,w), requires_grad=False).type_as(v).expand_as(v) # [bs, H, W]
X = grid_x + u
Y = grid_y + v
X = 2*(X/(w-1.0) - 0.5)
Y = 2*(Y/(h-1.0) - 0.5)
oob = (X.abs()>1).add(Y.abs()>1)>0
return oob
def occlusion_mask(grid, depth):
check_sizes(img, 'grid', 'BHW2')
check_sizes(depth, 'depth', 'BHW')
mask = grid
return mask
def inverse_warp(img, depth, pose, intrinsics, intrinsics_inv, rotation_mode='euler', padding_mode='zeros'):
"""
Inverse warp a source image to the target image plane.
Args:
img: the source image (where to sample pixels) -- [B, 3, H, W]
depth: depth map of the target image -- [B, H, W]
pose: 6DoF pose parameters from target to source -- [B, 6]
intrinsics: camera intrinsic matrix -- [B, 3, 3]
intrinsics_inv: inverse of the intrinsic matrix -- [B, 3, 3]
Returns:
Source image warped to the target image plane
"""
check_sizes(img, 'img', 'B3HW')
check_sizes(depth, 'depth', 'BHW')
check_sizes(pose, 'pose', 'B6')
check_sizes(intrinsics, 'intrinsics', 'B33')
check_sizes(intrinsics_inv, 'intrinsics', 'B33')
assert(intrinsics_inv.size() == intrinsics.size())
batch_size, _, img_height, img_width = img.size()
cam_coords = pixel2cam(depth, intrinsics_inv) # [B,3,H,W]
pose_mat = pose_vec2mat(pose, rotation_mode) # [B,3,4]
# Get projection matrix for tgt camera frame to source pixel frame
proj_cam_to_src_pixel = intrinsics.bmm(pose_mat) # [B, 3, 4]
src_pixel_coords = cam2pixel(cam_coords, proj_cam_to_src_pixel[:,:,:3], proj_cam_to_src_pixel[:,:,-1:], padding_mode) # [B,H,W,2]
projected_img = torch.nn.functional.grid_sample(img, src_pixel_coords, padding_mode=padding_mode)
return projected_img
| 35.714789
| 136
| 0.602484
| 1,638
| 10,143
| 3.592796
| 0.159341
| 0.015633
| 0.005607
| 0.006797
| 0.425998
| 0.383857
| 0.357009
| 0.357009
| 0.353271
| 0.337978
| 0
| 0.036207
| 0.234842
| 10,143
| 283
| 137
| 35.840989
| 0.722072
| 0.242729
| 0
| 0.296053
| 0
| 0
| 0.028659
| 0
| 0
| 0
| 0
| 0
| 0.019737
| 1
| 0.078947
| false
| 0
| 0.019737
| 0
| 0.164474
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c5685982f284836ad84a3186b0e3af7e951a8fa
| 7,853
|
py
|
Python
|
lingvo/core/egdd.py
|
ramonsanabria/lingvo
|
f38dc3801d36ed08a4117d4a66e6f1f10f76909d
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/egdd.py
|
ramonsanabria/lingvo
|
f38dc3801d36ed08a4117d4a66e6f1f10f76909d
|
[
"Apache-2.0"
] | null | null | null |
lingvo/core/egdd.py
|
ramonsanabria/lingvo
|
f38dc3801d36ed08a4117d4a66e6f1f10f76909d
|
[
"Apache-2.0"
] | null | null | null |
# Lint as: python2, python3
# Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Exponentiated Gradient Delta-Delta optimizer."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import clip_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import linalg_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import state_ops
from tensorflow.python.training import optimizer
# pylint: enable=g-direct-tensorflow-import
class EGDD(optimizer.Optimizer):
"""A version of GD Momentum with adaptive gain and learning rate.
Exponentiated Gradient Delta-delta optimizer starts with a local gain of 1.0
for every weight and a lr_scale of 1.0 for all weights. The EGDD update rule
applies:
momentum <- mu * momentum + learning_rate * gain * grad
var <- var - lr_scale * momentum
The gain as well as the lr_scale are updated using the unnormalized
exponentiated gradient algorithm [KW97].
Reference: TBA
[KW97] Kivinen, J., & Warmuth, M. K. Exponentiated gradient versus gradient
descent for linear predictors. Information and Computation, 1997.
"""
def __init__(self,
learning_rate,
momentum,
beta=0.9,
gain_learning_rate=0.01,
scale_learning_rate=0.001,
initial_gain=1.0,
min_gain=1e-2,
max_gain=1e2,
initial_scale=1.0,
min_scale=1e-1,
max_scale=1e1,
use_directions=True,
use_signs=True,
name="EGDD"):
"""Construct a new EG-DD optimizer.
Args:
learning_rate: A `Tensor` or a floating point value. The learning rate.
momentum: A `Tensor` or a floating point value.
beta: `float` decay rate of the gradient EMA.
gain_learning_rate: `float` gain learning rate.
scale_learning_rate: `float` scale learning rate.
initial_gain: `float` initial gain.
min_gain: `float` minimum gain.
max_gain: `float` maximum gain,
initial_scale: `float` initial scale.
min_scale: `float` minimum learning rate scale.
max_scale: `float` maximum learning rate scale.
use_directions: `bool` whether to use directions only for scale updates.
use_signs: `bool` whether to use the signs for updating gains.
name: Optional name prefix for the operations created when applying
gradients.
Raises:
ValueError: If the `initial_accumulator_value` is invalid.
"""
super(EGDD, self).__init__(False, name)
self._learning_rate = learning_rate
self._momentum = momentum
self._beta = beta
self._gain_learning_rate = gain_learning_rate
self._scale_learning_rate = scale_learning_rate
self._initial_gain = initial_gain
self._min_gain = min_gain
self._max_gain = max_gain
self._initial_scale = initial_scale
self._min_scale = min_scale
self._max_scale = max_scale
self._use_directions = use_directions
self._use_signs = use_signs
def _create_slots(self, var_list):
for v in var_list:
self._zeros_slot(v, "momentum", self._name)
self._zeros_slot(v, "gbar", self._name)
g_tensor = ops.convert_to_tensor(v)
gain_init = self._initial_gain * array_ops.ones_like(g_tensor)
_ = self._get_or_make_slot(v, self._initial_scale * array_ops.ones((1)),
"lr_scale", self._name)
_ = self._get_or_make_slot(v, gain_init, "gain", self._name)
_ = self._get_or_make_slot(v, array_ops.zeros((1)), "counter", self._name)
def _prepare(self):
learning_rate = self._call_if_callable(self._learning_rate)
self._learning_rate_tensor = ops.convert_to_tensor(
learning_rate, name="learning_rate")
momentum = self._call_if_callable(self._momentum)
self._momentum_tensor = ops.convert_to_tensor(momentum, name="momentum")
def _apply_dense(self, grad, var):
lr_scale = self.get_slot(var, "lr_scale")
momentum = self.get_slot(var, "momentum")
gbar = self.get_slot(var, "gbar")
gain = self.get_slot(var, "gain")
counter = self.get_slot(var, "counter")
counter_updated = state_ops.assign(counter, counter + 1)
# lr_scale update uses normalized grad and momentum to be independent of dim
normalized_grad = grad / (linalg_ops.norm(grad) + 1e-10)
normalized_momentum = momentum / (linalg_ops.norm(momentum) + 1e-10)
# Apply EG updates on lr_scale:
# grad_lr_scale = -inner_product(current_grad, old_momentum)
# lr_scale <- lr_scale * exp(-scale_learning_rate * grad_lr_scale)
lr_scale_unnormalized_updated = clip_ops.clip_by_value(
lr_scale * math_ops.exp(
self._scale_learning_rate * math_ops.reduce_sum(grad * momentum)),
self._min_scale, self._max_scale)
lr_scale_normalized_updated = clip_ops.clip_by_value(
lr_scale * math_ops.exp(self._scale_learning_rate * math_ops.reduce_sum(
normalized_grad * normalized_momentum)), self._min_scale,
self._max_scale)
lr_scale_updated = state_ops.assign(
lr_scale,
array_ops.where(self._use_directions, lr_scale_normalized_updated,
lr_scale_unnormalized_updated))
# remove the bias of zero initialization in gbar
corrected_gbar = gbar / (
1.0 - self._beta**math_ops.maximum(counter_updated - 1, 1))
# Apply EG updates on gain:
# grad_gain = - current_grad * old_gbar
# gain <- gain * exp(-gain_learning_rate * grad_gain)
gain_unnormalized_updated = clip_ops.clip_by_value(
gain * math_ops.exp(self._gain_learning_rate * grad * corrected_gbar),
self._min_gain, self._max_gain)
# Normalized update uses sign(grad) * sign(gbar) as a proxy for grad_gain.
gain_normalized_updated = clip_ops.clip_by_value(
gain * math_ops.exp(self._gain_learning_rate * math_ops.sign(grad) *
math_ops.sign(gbar)), self._min_gain,
self._max_gain)
gain_updated = state_ops.assign(
gain,
array_ops.where(self._use_signs, gain_normalized_updated,
gain_unnormalized_updated))
scaled_g = self._learning_rate_tensor * gain_updated * grad
with ops.control_dependencies([lr_scale_updated, scaled_g]):
momentum_updated = state_ops.assign(
momentum, self._momentum_tensor * momentum + scaled_g)
gbar_updated = state_ops.assign(
gbar, self._beta * gbar + (1.0 - self._beta) * grad)
with ops.control_dependencies([gbar_updated]):
return state_ops.assign_sub(var, lr_scale_updated * momentum_updated)
def _resource_apply_dense(self, grad, var):
return self._apply_dense(grad, var)
# Sparse gradients are not handled currently and is part of future work.
def _resource_apply_sparse(self, grad_values, var, grad_indices):
return control_flow_ops.no_op()
def _apply_sparse(self, grad, var):
return control_flow_ops.no_op()
| 42.448649
| 80
| 0.696676
| 1,078
| 7,853
| 4.765306
| 0.227273
| 0.072416
| 0.031147
| 0.031341
| 0.248783
| 0.158848
| 0.108624
| 0.079034
| 0.068912
| 0.053728
| 0
| 0.009359
| 0.210875
| 7,853
| 184
| 81
| 42.679348
| 0.81959
| 0.356042
| 0
| 0.019048
| 0
| 0
| 0.017705
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.104762
| 0.028571
| 0.219048
| 0.009524
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c56c7d2316646a3222e3085d34d2f51b63f5dac
| 3,556
|
py
|
Python
|
examples/nn_cudamat.py
|
cloudspectatordevelopment/cudamat
|
d26cf019a7855077b7d4344ae1a3202a156c5170
|
[
"BSD-3-Clause"
] | 526
|
2015-01-05T14:33:10.000Z
|
2022-03-09T12:41:37.000Z
|
examples/nn_cudamat.py
|
cloudspectatordevelopment/cudamat
|
d26cf019a7855077b7d4344ae1a3202a156c5170
|
[
"BSD-3-Clause"
] | 71
|
2015-01-01T01:03:09.000Z
|
2021-10-01T06:57:07.000Z
|
examples/nn_cudamat.py
|
cloudspectatordevelopment/cudamat
|
d26cf019a7855077b7d4344ae1a3202a156c5170
|
[
"BSD-3-Clause"
] | 139
|
2015-01-13T21:23:38.000Z
|
2022-02-24T03:26:34.000Z
|
# This file shows how to implement a single hidden layer neural network for
# performing binary classification on the GPU using cudamat.
from __future__ import division
import pdb
import time
import numpy as np
import cudamat as cm
from cudamat import learn as cl
import util
# initialize CUDA
cm.cublas_init()
# load data
util.load('mnist49.dat', globals())
# Put training data onto the GPU.
dat_train = dat_train/255.
dat_train = dat_train - (np.mean(dat_train, 1)+10**-8)[:, np.newaxis]
dev_train = cm.CUDAMatrix(dat_train)
dev_lbl = cm.CUDAMatrix(lbl_train)
# training parameters
epsilon = 0.01
momentum = 0.9
num_epochs = 30
batch_size = 128
num_batches = dat_train.shape[1]//batch_size
# model parameters
dim_in = dat_train.shape[0]
dim_out = 1
num_hid = 1024
# initialize weights
w_w1 = cm.CUDAMatrix(dim_in ** -0.5 * np.random.randn(dim_in, num_hid))
w_b1 = cm.CUDAMatrix(np.zeros((num_hid, 1)))
w_w2 = cm.CUDAMatrix(num_hid ** -0.5 * np.random.randn(num_hid, dim_out))
w_b2 = cm.CUDAMatrix(np.zeros((dim_out, 1)))
# initialize weight update matrices
wu_w1 = cm.empty(w_w1.shape).assign(0)
wu_b1 = cm.empty(w_b1.shape).assign(0)
wu_w2 = cm.empty(w_w2.shape).assign(0)
wu_b2 = cm.empty(w_b2.shape).assign(0)
# initialize temporary storage
h = cm.empty((num_hid, batch_size))
out = cm.empty((dim_out, batch_size))
delta = cm.empty((num_hid, batch_size))
# Train neural network.
start_time = time.time()
for epoch in range(num_epochs):
print("Epoch %i" % (epoch + 1))
err = []
for batch in range(num_batches):
# get current minibatch
inp = dev_train.slice(batch*batch_size,(batch + 1)*batch_size)
target = dev_lbl.slice(batch*batch_size,(batch + 1)*batch_size)
# forward pass
cm.dot(w_w1.T, inp, target = h)
h.add_col_vec(w_b1)
h.apply_sigmoid()
cm.dot(w_w2.T, h, target = out)
out.add_col_vec(w_b2)
out.apply_sigmoid()
# back prop errors
out.subtract(target) # compute error
# gradients for w_w2 and w_b2
wu_w2.add_dot(h, out.T, beta = momentum)
wu_b2.add_sums(out, axis = 1, beta = momentum)
# compute delta
cm.dot(w_w2, out, target = delta)
# delta = delta * h * (1 - h)
cl.mult_by_sigmoid_deriv(delta, h)
# gradients for w_w1 and w_b1
wu_w1.add_dot(inp, delta.T, beta = momentum)
wu_b1.add_sums(delta, axis = 1, beta = momentum)
# update weights
w_w1.subtract_mult(wu_w1, epsilon/batch_size)
w_b1.subtract_mult(wu_b1, epsilon/batch_size)
w_w2.subtract_mult(wu_w2, epsilon/batch_size)
w_b2.subtract_mult(wu_b2, epsilon/batch_size)
# calculate error on current minibatch
err.append(np.abs(out.asarray())>0.5)
print("Training misclassification rate: %f" % np.mean(err))
print("Time: %f" % (time.time() - start_time))
# Evaluate neural network on test data.
# Load test data onto the GPU.
dat_test = dat_test/255.
dat_test = dat_test - np.mean(dat_test, 1)[:, np.newaxis]
dev_test = cm.CUDAMatrix(dat_test)
dev_lbl = cm.CUDAMatrix(lbl_test)
# Initalize temporary storage.
h = cm.empty((num_hid, dat_test.shape[1]))
out = cm.empty((dim_out, dat_test.shape[1]))
# forward pass
cm.dot(w_w1.T, dev_test, target = h)
h.add_col_vec(w_b1)
h.apply_sigmoid()
cm.dot(w_w2.T, h, target = out)
out.add_col_vec(w_b2)
out.apply_sigmoid()
# compute error
out.subtract(dev_lbl)
print("Testing misclassification rate: %f" % np.mean(np.abs(out.asarray())>0.5))
cm.cublas_shutdown()
| 26.537313
| 80
| 0.683071
| 594
| 3,556
| 3.875421
| 0.240741
| 0.050825
| 0.013032
| 0.017376
| 0.256733
| 0.172459
| 0.144222
| 0.100782
| 0.071242
| 0.071242
| 0
| 0.030892
| 0.18982
| 3,556
| 133
| 81
| 26.736842
| 0.768136
| 0.188695
| 0
| 0.135135
| 0
| 0
| 0.03359
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.094595
| 0
| 0.094595
| 0.054054
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c57f754fa08c4237dd780441aaf7916aa4b730c
| 3,530
|
py
|
Python
|
tests/test_publish.py
|
oarepo/oarepo-references-draft
|
7e5ad4225c4ace9781d5de952c3765a65b33fd8e
|
[
"MIT"
] | null | null | null |
tests/test_publish.py
|
oarepo/oarepo-references-draft
|
7e5ad4225c4ace9781d5de952c3765a65b33fd8e
|
[
"MIT"
] | null | null | null |
tests/test_publish.py
|
oarepo/oarepo-references-draft
|
7e5ad4225c4ace9781d5de952c3765a65b33fd8e
|
[
"MIT"
] | null | null | null |
import uuid
from invenio_indexer.api import RecordIndexer
from invenio_pidstore.models import PersistentIdentifier, PIDStatus
from invenio_records_draft.api import RecordContext
from invenio_records_draft.proxies import current_drafts
from invenio_search import RecordsSearch, current_search, current_search_client
from sample.records.config import DraftRecord, PublishedRecord
from tests.helpers import disable_test_authenticated
def test_publish(app, db, schemas, mappings, prepare_es):
with disable_test_authenticated():
with db.session.begin_nested():
draft_uuid = uuid.uuid4()
rec1 = DraftRecord.create({
'id': '1',
'title': 'rec1'
}, id_=draft_uuid)
draft1_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='1', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft_uuid
)
published_uuid = uuid.uuid4()
published = PublishedRecord.create({
'id': '3',
'title': 'rec1a'
}, id_=published_uuid)
published_pid = PersistentIdentifier.create(
pid_type='recid', pid_value='3', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=published_uuid
)
draft2_uuid = uuid.uuid4()
rec2 = DraftRecord.create({
'id': '2',
'title': 'rec2',
'ref': {'$ref': 'http://localhost/drafts/records/1'},
'ref_pub': {'$ref': 'http://localhost/records/3'}
}, id_=draft2_uuid)
draft2_pid = PersistentIdentifier.create(
pid_type='drecid', pid_value='2', status=PIDStatus.REGISTERED,
object_type='rec', object_uuid=draft2_uuid
)
RecordIndexer().index(rec2)
current_search_client.indices.refresh()
current_search_client.indices.flush()
es_draft2 = RecordsSearch(index='draft-records-record-v1.0.0').\
get_record(draft2_pid.object_uuid).execute()
assert len(es_draft2.hits) == 1
current_drafts.publish(RecordContext(record=rec2, record_pid=draft2_pid))
published2_pid = PersistentIdentifier.get(pid_type='recid', pid_value=draft2_pid.pid_value)
pr = PublishedRecord.get_record(published2_pid.object_uuid)
assert pr.dumps() == {
'$schema': 'https://localhost/schemas/records/record-v1.0.0.json',
'id': '2',
'ref': {'$ref': 'http://localhost/records/1'},
'ref_pub': {'$ref': 'http://localhost/records/3'},
'title': 'rec2'
}
current_search_client.indices.refresh()
current_search_client.indices.flush()
es_published2 = RecordsSearch(index='records-record-v1.0.0').\
get_record(published2_pid.object_uuid).execute()
assert len(es_published2.hits) == 1
es_published2 = es_published2.hits[0].to_dict()
es_published2.pop('_created')
es_published2.pop('_updated')
assert es_published2 == {
'$schema': 'https://localhost/schemas/records/record-v1.0.0.json',
'id': '2',
'ref': {'published': '1'},
'ref_pub': {'published': '3'},
'title': 'rec2'}
es_draft2 = RecordsSearch(index='draft-records-record-v1.0.0').\
get_record(draft2_pid.object_uuid).execute()
assert len(es_draft2.hits) == 0
| 40.574713
| 99
| 0.608215
| 380
| 3,530
| 5.413158
| 0.236842
| 0.03403
| 0.046184
| 0.038892
| 0.457948
| 0.424404
| 0.403986
| 0.376276
| 0.257657
| 0.22071
| 0
| 0.026235
| 0.265722
| 3,530
| 86
| 100
| 41.046512
| 0.767361
| 0
| 0
| 0.178082
| 0
| 0
| 0.136261
| 0.021246
| 0
| 0
| 0
| 0
| 0.068493
| 1
| 0.013699
| false
| 0
| 0.109589
| 0
| 0.123288
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c591440c5e8dee3c070bc7ca52d3ba19f2b4743
| 5,580
|
py
|
Python
|
examples/ROS/tiscamera.py
|
xiaotiansf/tiscamera
|
8451449788f7429621240e2bbce065d65c5ac10e
|
[
"Apache-2.0"
] | null | null | null |
examples/ROS/tiscamera.py
|
xiaotiansf/tiscamera
|
8451449788f7429621240e2bbce065d65c5ac10e
|
[
"Apache-2.0"
] | null | null | null |
examples/ROS/tiscamera.py
|
xiaotiansf/tiscamera
|
8451449788f7429621240e2bbce065d65c5ac10e
|
[
"Apache-2.0"
] | null | null | null |
import os
import subprocess
from collections import namedtuple
import gi
gi.require_version("Gst", "1.0")
gi.require_version("Tcam", "0.1")
from gi.repository import Tcam, Gst, GLib, GObject
DeviceInfo = namedtuple("DeviceInfo", "status name identifier connection_type")
CameraProperty = namedtuple("CameraProperty", "status value min max default step type flags category group")
# Disable pylint false positives
# pylint:disable=E0712
class Camera:
""""""
def __init__(self, serial, width, height, framerate, color, liveview):
""" Constructor.
Creates the sink pipeline and the source pipeline.
:param serial: Serial number of the camera to use.
:param width: Width of the video format, e.g. 640, 1920 etc,
:param height: Height of the video format, e.g. 480, 1080
:param framerate: Numerator of the frame rate, e.g. 15, 30, 60 etc
:param color: If True, color is used, else gray scale
:param liveview: If True an own live window is opened.
"""
Gst.init([])
self.height = height
self.width = width
self.sample = None
self.samplelocked = False
self.newsample = False
self.pid = -1
self.__remove_tmp_file()
pixelformat = "BGRx"
if not color:
pixelformat = "GRAY8"
if liveview:
p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/1' % (serial, pixelformat, width, height, framerate,)
p += ' ! tee name=t'
p += ' t. ! queue ! videoconvert ! video/x-raw,format=RGB ,width=%d,height=%d,framerate=%d/1! shmsink socket-path=/tmp/ros_mem' % (width, height, framerate,)
p += ' t. ! queue ! videoconvert ! ximagesink'
else:
p = 'tcambin serial="%s" name=source ! video/x-raw,format=%s,width=%d,height=%d,framerate=%d/1' % (
serial, pixelformat, width, height, framerate,)
p += ' ! videoconvert ! video/x-raw,format=RGB ,width=%d,height=%d,framerate=%d/1! shmsink socket-path=/tmp/ros_mem' % (width, height, framerate,)
print(p)
try:
self.pipeline = Gst.parse_launch(p)
except GLib.Error as error:
raise RuntimeError("Error creating pipeline: {0}".format(error))
self.pipeline.set_state(Gst.State.READY)
if self.pipeline.get_state(10 * Gst.SECOND)[0] != Gst.StateChangeReturn.SUCCESS:
raise RuntimeError("Failed to start video stream.")
# Query a pointer to our source, so we can set properties.
self.source = self.pipeline.get_by_name("source")
# Create gscam_config variable with content
gscam = 'shmsrc socket-path=/tmp/ros_mem ! video/x-raw-rgb, width=%d,height=%d,framerate=%d/1' % (width, height, framerate,)
gscam += ',bpp=24,depth=24,blue_mask=16711680, green_mask=65280, red_mask=255 ! ffmpegcolorspace'
os.environ["GSCAM_CONFIG"] = gscam
def start_pipeline(self):
""" Starts the camera sink pipeline and the rosrun process
:return:
"""
try:
self.pipeline.set_state(Gst.State.PLAYING)
self.pid = subprocess.Popen(["rosrun", "gscam", "gscam"])
except GLib.Error as error:
print("Error starting pipeline: {0}".format(error))
raise
def stop_pipeline(self):
""" Stops the camera pipeline. Should also kill the rosrun process, but is not implemented
:return:
"""
self.pipeline.set_state(Gst.State.PAUSED)
self.pipeline.set_state(Gst.State.READY)
self.pipeline.set_state(Gst.State.NULL)
self.pid.kill()
def list_properties(self):
""" Helper function. List available properties
:return:
"""
for name in self.source.get_tcam_property_names():
print(name)
def get_property(self, property_name):
""" Return the value of the passed property.
Use list_properties for querying names of available properties.
:param property_name: Name of the property, e.g. Gain, Exposure, Gain Auto.
:return: Current value of the property.
"""
try:
return CameraProperty(*self.source.get_tcam_property(property_name))
except GLib.Error as error:
raise RuntimeError("Error get Property {0}: {1}", property_name, format(error))
def set_property(self, property_name, value):
""" Set a property. Use list_properties for querying names of available properties.
:param property_name: Name of the property, e.g. Gain, Exposure, Gain Auto.
:param value: Value to be set.
:return:
"""
try:
self.source.set_tcam_property(property_name, value)
except GLib.Error as error:
raise RuntimeError("Error set Property {0}: {1}", property_name, format(error))
def push_property(self, property_name):
""" Simplify push properties, like Auto Focus one push
:param property_name: Name of the property to be pushed
:return:
"""
try:
self.source.set_tcam_property(property_name, True)
except GLib.Error as error:
raise RuntimeError("Error set Property {0}: {1}", property_name, format(error))
def __remove_tmp_file(self):
""" Delete the memory file used by the pipelines to share memory
:return:
"""
try:
os.remove('/tmp/ros_mem')
except OSError:
pass
| 36.953642
| 170
| 0.624014
| 707
| 5,580
| 4.837341
| 0.292786
| 0.042105
| 0.035088
| 0.019006
| 0.398246
| 0.371637
| 0.33655
| 0.30731
| 0.263158
| 0.236257
| 0
| 0.01609
| 0.264875
| 5,580
| 150
| 171
| 37.2
| 0.81765
| 0.248208
| 0
| 0.192308
| 0
| 0.076923
| 0.251469
| 0.093279
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102564
| false
| 0.012821
| 0.064103
| 0
| 0.192308
| 0.038462
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c59c1fafc0db31d12d2731c296964f8cac7b7ce
| 274
|
py
|
Python
|
helpers/config.py
|
bertrand-caron/cv_blog_flask
|
ce779db31805f0b1a7bbc9a6f09a7d3fe1af74b2
|
[
"MIT"
] | null | null | null |
helpers/config.py
|
bertrand-caron/cv_blog_flask
|
ce779db31805f0b1a7bbc9a6f09a7d3fe1af74b2
|
[
"MIT"
] | null | null | null |
helpers/config.py
|
bertrand-caron/cv_blog_flask
|
ce779db31805f0b1a7bbc9a6f09a7d3fe1af74b2
|
[
"MIT"
] | null | null | null |
from typing import Dict, Any
from yaml import load
def get_config() -> Dict[str, Any]:
try:
return load(open('config/config.yml').read())
except Exception as e:
raise Exception('ERROR: Missing config/config.yml file.') from e
CONFIG = get_config()
| 24.909091
| 72
| 0.671533
| 40
| 274
| 4.55
| 0.6
| 0.098901
| 0.164835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.208029
| 274
| 10
| 73
| 27.4
| 0.83871
| 0
| 0
| 0
| 0
| 0
| 0.20073
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.125
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c59df650fcdcb09e11e3c4ab2f95de326942e41
| 4,758
|
py
|
Python
|
raman/unmixing.py
|
falckt/raman
|
8f9fae0e211dd49cebaba98e71787bb663be8fcf
|
[
"BSD-3-Clause"
] | 1
|
2020-05-21T11:56:32.000Z
|
2020-05-21T11:56:32.000Z
|
raman/unmixing.py
|
falckt/raman
|
8f9fae0e211dd49cebaba98e71787bb663be8fcf
|
[
"BSD-3-Clause"
] | null | null | null |
raman/unmixing.py
|
falckt/raman
|
8f9fae0e211dd49cebaba98e71787bb663be8fcf
|
[
"BSD-3-Clause"
] | null | null | null |
# Author: Tillmann Falck <tf-raman@lucidus.de>
#
# License: BSD 3 clause
#
# SPDX-License-Identifier: BSD-3-Clause
import collections
from itertools import product
import cvxpy as cp
import numpy as np
def sunsal_tv(A, Y, lambda_1, lambda_tv, sweep='prod', tv_type='iso', additional_constraint='none'):
r"""
Sparse unmixing via variable splitting and augmented Lagrangian and total variation (SUnSAL-TV)
solves the following optimization problem
min || Y - A * X ||_F + lambda_1 || X ||_1 + lambda_TV || X ||_TV
X
subject to X >= 0 # if additional_constraint is 'positive'
sum(X, axis=0) == 1 # if additional_constraint is 'sum_to_one'
with
|| X ||_1 = \sum_i | x_i | # for a flattened array X
|| X ||_TV = \sum_i (\sum_j |X_ij|^p)^(1/p) # p = 1 for non-isotropic and p = 2 for isotropic
Parameters
----------
A: array - N x L, spectral library, where L is the number of library elements and N the number of points in each spectrum
Y: array - N x m_1 x ... x m_d, target spectra, m_1, ..., m_d are spatial dimnesions
lambda_1: float - regularization constant for elementwise sparsity inducing term
lambda_TV: float - regularization constant for TV regularizer (sparse changes along spatial dimensions)
sweep: {'prod', 'zip'} -
tv_type: {'iso', 'non-iso'} - type of total variation norm, isotropic or non-isotropic
additional_constraint: {'none', 'positive', 'sum_to_one'} - additional constraint on solution
Returns
-------
X: array - L x m_1 x ... x m_d
References
----------
[1] M. Iordache, J. M. Bioucas-Dias and A. Plaza, "Total Variation Spatial Regularization for
Sparse Hyperspectral Unmixing," in IEEE Transactions on Geoscience and Remote Sensing,
vol. 50, no. 11, pp. 4484-4502, Nov. 2012.
[2] Matlab implementation, downloaded from
https://github.com/ricardoborsoi/MUA_SparseUnmixing/blob/57802d5b2f77649fb32c2e4c75258f8d91084f7d/sunsal_tv.m
[3] https://dsp.stackexchange.com/questions/57977/isotropic-and-anisotropic-in-the-total-variation-framework
"""
# get dimensions
num_spectra, lib_size = A.shape
sample_dims = Y.shape[1:]
assert Y.shape[0] == num_spectra, 'Size of library does not size of target variables'
# reshape Y from [spectra x Xpos x Ypos x ...] --> [spectra x (Xpos * Ypos * ...)]
Y = Y.reshape((num_spectra, -1))
num_samples = Y.shape[1]
# create optimization variables
positive_solution = (additional_constraint == 'positive')
X = cp.Variable((lib_size, num_samples), nonneg=positive_solution)
p_lambda_1 = cp.Parameter(1, nonneg=True)
p_lambda_tv = cp.Parameter(1, nonneg=True)
# calculate first differences in each direction
idx = np.r_[:num_samples]
idx_s = idx.reshape(sample_dims)
differences = []
for n, d in enumerate(sample_dims):
ia = np.ravel(idx_s.take(indices=np.r_[np.r_[1:d], 0], axis=n))
ib = np.ravel(idx_s.take(indices=np.r_[:d], axis=n))
differences.append(X[:, ia] - X[:, ib])
# compute TV norm
if tv_type == 'iso':
D = [x*x for x in differences]
D = cp.sqrt(cp.sum(D))
tv = cp.sum(D)
elif tv_type == 'non-iso':
D = [cp.sum(cp.abs(x)) for x in differences]
tv = cp.sum(D)
else:
raise ValueError(f'TV norm type `{tv_type}` is not defined')
# define object function
obj = cp.norm(Y - A @ X, p='fro') + p_lambda_1 * cp.pnorm(X, p=1) + p_lambda_tv * tv
# constraints
constr = []
if additional_constraint == 'sum_to_one':
constr.append(cp.sum(X, axis=0) == 1)
# opimiztion problem
prob = cp.Problem(cp.Minimize(obj), constr)
# init parameter sweep
# if lambda_1 and lambda_tv are scalar return result
# otherwise return a dict with (lambda_1, lambda_tv): result
lambda_scalar = True
if not isinstance(lambda_1, collections.Iterable):
lambda_1 = [lambda_1]
else:
lambda_scalar = False
if not isinstance(lambda_tv, collections.Iterable):
lambda_tv = [lambda_tv]
else:
lambda_scalar = False
if sweep == 'prod':
l_iter = product(lambda_1, lambda_tv)
elif sweep == 'zip':
l_iter = zip(lambda_1, lambda_tv)
else:
raise ValueError(f'Parameter sweep `{sweep}` not supported')
results = {}
for l_1, l_tv in l_iter:
p_lambda_1.value = l_1
p_lambda_tv.value = l_tv
# solution
prob.solve(solver=cp.SCS, verbose=True)
results[(l_1, l_tv)] = X.value.reshape((lib_size, ) + sample_dims)
if lambda_scalar:
return results.popitem()[1]
else:
return results
| 34.985294
| 125
| 0.640395
| 692
| 4,758
| 4.254335
| 0.316474
| 0.03091
| 0.022079
| 0.02038
| 0.071332
| 0.021739
| 0.021739
| 0.016984
| 0
| 0
| 0
| 0.025719
| 0.240017
| 4,758
| 135
| 126
| 35.244444
| 0.78844
| 0.464061
| 0
| 0.147541
| 0
| 0
| 0.07315
| 0
| 0
| 0
| 0
| 0
| 0.016393
| 1
| 0.016393
| false
| 0
| 0.065574
| 0
| 0.114754
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c5c4475e9ffb17b4a7ed0975ab0f7646445b8ba
| 4,011
|
py
|
Python
|
account.py
|
MaherClinc/stockly-bs
|
4a2c5741673b85bee9100afef0b404520cb10b5d
|
[
"MIT"
] | null | null | null |
account.py
|
MaherClinc/stockly-bs
|
4a2c5741673b85bee9100afef0b404520cb10b5d
|
[
"MIT"
] | null | null | null |
account.py
|
MaherClinc/stockly-bs
|
4a2c5741673b85bee9100afef0b404520cb10b5d
|
[
"MIT"
] | null | null | null |
from sqlalchemy import exc
from sqlalchemy.sql.expression import func
from models import Watchlist, Portfolio, Activity
from app import db
import metric
def buy_stock(ticker, units):
unit_price = metric.get_price(ticker)
total_price = units * unit_price
max_id = db.session.query(func.max(Activity.activity_id)).scalar()
if max_id is None:
old_buying_power = 100000
else:
old_buying_power = Activity.query.filter(Activity.activity_id == max_id).all()[0].buying_power
new_buying_power = old_buying_power - total_price
if new_buying_power > 0:
try:
db.session.add( Activity(ticker=ticker,
units=units, order_type= "b", unit_price=unit_price, total_price=total_price, buying_power=new_buying_power) )
update_portfolio_buy(ticker, units, total_price)
db.session.commit()
return { 'status': True, 'error': None }
except exc.SQLAlchemyError:
return { 'status': False, 'error': 'database error' }
else:
return { 'status': False, 'error': 'Insufficient Funds' }
def sell_stock(ticker, units):
unit_price = metric.get_price(ticker)
row = Portfolio.query.filter(Portfolio.ticker == ticker).all()
if len(row):
available_units = int(row[0].total_units)
units = min(available_units, units) if units >= 1 else int(available_units*units)
total_price = units * unit_price
max_id = db.session.query(func.max(Activity.activity_id)).scalar()
old_buying_power = Activity.query.filter(Activity.activity_id == max_id).all()[0].buying_power
new_buying_power = old_buying_power + total_price
try:
db.session.add( Activity(ticker=ticker,
units=units, order_type= "s", unit_price=unit_price, total_price=total_price, buying_power=new_buying_power) )
update_portfolio_sell(ticker, units, total_price)
db.session.commit()
return { 'status': True, 'amount': units, 'error': None }
except exc.SQLAlchemyError:
return { 'status': False, 'error': 'database error' }
else:
return { 'status': False, 'error': 'No Stock by this name' }
def update_portfolio_buy(ticker, units, total_price):
row = Portfolio.query.filter(Portfolio.ticker == ticker).all()
if len(row):
row[0].total_units = int(row[0].total_units) + units
row[0].total_invested = int(row[0].total_invested) + total_price
else:
db.session.add( Portfolio(ticker=ticker, total_units=units, total_invested=total_price) )
def update_portfolio_sell(ticker, units, total_price):
row = Portfolio.query.filter(Portfolio.ticker == ticker).all()
if len(row):
row[0].total_invested = int(row[0].total_invested) - ((int(row[0].total_invested)/int(row[0].total_units)) * units)
row[0].total_units = int(row[0].total_units) - units
Portfolio.query.filter(Portfolio.total_units == 0).delete()
def get_watchlist():
rows = Watchlist.query.all()
if len(rows):
watchlist = [row.ticker for row in rows]
else:
watchlist = []
return watchlist
def get_portfolio():
rows = Portfolio.query.all()
portfolio = [{'ticker':row.ticker, 'total_units':row.total_units, 'total_invested':row.total_invested} for row in rows]
return portfolio
def is_stock_in_watchlist(ticker):
rows = Watchlist.query.filter(Watchlist.ticker == ticker).all()
return True if len(rows) else False
def add_to_watchlist(ticker):
industry = metric.get_company(ticker)["industry"]
try:
db.session.add( Watchlist(ticker=ticker, industry=industry) )
db.session.commit()
return True
except exc.SQLAlchemyError:
return False
def remove_from_watchlist(ticker):
try:
Watchlist.query.filter(Watchlist.ticker == ticker).delete()
db.session.commit()
return True
except exc.SQLAlchemyError:
return False
| 36.463636
| 126
| 0.667913
| 518
| 4,011
| 4.978764
| 0.15251
| 0.054285
| 0.038387
| 0.032571
| 0.651415
| 0.651415
| 0.61962
| 0.59209
| 0.59209
| 0.538969
| 0
| 0.006993
| 0.215657
| 4,011
| 109
| 127
| 36.798165
| 0.812778
| 0
| 0
| 0.448276
| 0
| 0
| 0.044933
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.057471
| 0
| 0.310345
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c5c6bcf1d0acee591337a1dbb0080fdcf270c1f
| 3,175
|
py
|
Python
|
scripts/addons/kekit/ke_fit2grid.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | 2
|
2020-04-16T22:12:40.000Z
|
2022-01-22T17:18:45.000Z
|
scripts/addons/kekit/ke_fit2grid.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | null | null | null |
scripts/addons/kekit/ke_fit2grid.py
|
Tilapiatsu/blender-custom_conf
|
05592fedf74e4b7075a6228b8448a5cda10f7753
|
[
"MIT"
] | 2
|
2019-05-16T04:01:09.000Z
|
2020-08-25T11:42:26.000Z
|
bl_info = {
"name": "ke_fit2grid",
"author": "Kjell Emanuelsson",
"category": "Modeling",
"version": (1, 0, 2),
"blender": (2, 80, 0),
}
import bpy
import bmesh
from .ke_utils import get_loops, correct_normal, average_vector
from mathutils import Vector, Matrix
def fit_to_grid(co, grid):
x, y, z = round(co[0] / grid) * grid, round(co[1] / grid) * grid, round(co[2] / grid) * grid
return round(x, 5), round(y, 5), round(z, 5)
class VIEW3D_OT_ke_fit2grid(bpy.types.Operator):
bl_idname = "view3d.ke_fit2grid"
bl_label = "Fit2Grid"
bl_description = "EDIT: Snaps verts of selected VERTS/EDGES/FACES to nearest set world grid step."
bl_options = {'REGISTER', 'UNDO'}
set_grid: bpy.props.FloatProperty()
@classmethod
def poll(cls, context):
return context.object is not None
def execute(self, context):
if not self.set_grid:
grid_setting = bpy.context.scene.kekit.fit2grid
else:
grid_setting = self.set_grid
obj = context.object
if obj.type == 'MESH' and obj.data.is_editmode:
od = obj.data
bm = bmesh.from_edit_mesh(od)
obj_mtx = obj.matrix_world.copy()
verts = [v for v in bm.verts if v.select]
if verts:
vert_cos = [obj_mtx @ v.co for v in verts]
modified = []
for v,co in zip(verts, vert_cos):
new_coords = fit_to_grid(co, grid_setting)
old_coords = tuple([round(i, 5) for i in co])
if new_coords != old_coords:
new_coords = new_coords
v.co = obj_mtx.inverted() @ Vector(new_coords)
modified.append(v)
bpy.ops.mesh.select_all(action='DESELECT')
if modified:
for v in modified:
v.select = True
bmesh.update_edit_mesh(od)
bm.free()
bpy.ops.object.mode_set(mode="OBJECT")
bpy.ops.object.mode_set(mode='EDIT')
if modified:
bpy.ops.mesh.select_mode(type="VERT")
self.report({"INFO"}, "Fit2Grid: %i vert(s) not on grid" % len(modified))
else:
self.report({"INFO"}, "Fit2Grid: On grid - All good!")
else:
self.report({"INFO"}, "Fit2Grid: Nothing Selected?")
elif context.mode == "OBJECT":
new_loc = fit_to_grid(obj.location, grid_setting)
obj.location = new_loc
else:
self.report({"INFO"}, "Fit2Grid: Invalid object/mode - Aborted")
return {'FINISHED'}
# -------------------------------------------------------------------------------------------------
# Class Registration & Unregistration
# -------------------------------------------------------------------------------------------------
def register():
bpy.utils.register_class(VIEW3D_OT_ke_fit2grid)
def unregister():
bpy.utils.unregister_class(VIEW3D_OT_ke_fit2grid)
if __name__ == "__main__":
register()
| 31.75
| 102
| 0.521575
| 370
| 3,175
| 4.294595
| 0.345946
| 0.031466
| 0.035242
| 0.055381
| 0.14034
| 0.028949
| 0
| 0
| 0
| 0
| 0
| 0.013236
| 0.309921
| 3,175
| 99
| 103
| 32.070707
| 0.712004
| 0.072756
| 0
| 0.084507
| 0
| 0
| 0.127891
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070423
| false
| 0
| 0.056338
| 0.014085
| 0.253521
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c5e0af3e6fbbe4ea83ab673bc82739437ec8f74
| 453
|
py
|
Python
|
python/day5-1.py
|
Aerdan/adventcode-2020
|
83120aa8c7fc9d1f2d34780610401e3c6d4f583b
|
[
"BSD-1-Clause"
] | null | null | null |
python/day5-1.py
|
Aerdan/adventcode-2020
|
83120aa8c7fc9d1f2d34780610401e3c6d4f583b
|
[
"BSD-1-Clause"
] | null | null | null |
python/day5-1.py
|
Aerdan/adventcode-2020
|
83120aa8c7fc9d1f2d34780610401e3c6d4f583b
|
[
"BSD-1-Clause"
] | null | null | null |
#!/usr/bin/env python3
def binary(code, max, bits):
ret = []
for i in range(max):
ret.append(bits[code[i]])
return int(''.join(ret), base=2)
mid = 0
with open('input5.txt') as f:
for line in f.readlines():
line = line[:-1]
row = binary(line[:7], 7, {'F': '0', 'B': '1'})
col = binary(line[7:], 3, {'R': '1', 'L': '0'})
sid = row * 8 + col
mid = sid if sid > mid else mid
print(mid)
| 19.695652
| 55
| 0.487859
| 73
| 453
| 3.027397
| 0.589041
| 0.090498
| 0.099548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.044025
| 0.298013
| 453
| 22
| 56
| 20.590909
| 0.650943
| 0.046358
| 0
| 0
| 0
| 0
| 0.04186
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0
| 0
| 0.142857
| 0.071429
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c5fa2ddc156126b4dccbe0c281c6059666eccf4
| 501
|
py
|
Python
|
dummy_server.py
|
dpmkl/heimdall
|
184f169f0be9f6b6b708364725f5db8b1f249d9c
|
[
"MIT"
] | null | null | null |
dummy_server.py
|
dpmkl/heimdall
|
184f169f0be9f6b6b708364725f5db8b1f249d9c
|
[
"MIT"
] | null | null | null |
dummy_server.py
|
dpmkl/heimdall
|
184f169f0be9f6b6b708364725f5db8b1f249d9c
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
import SimpleHTTPServer
import SocketServer
import logging
PORT = 8000
class GetHandler(SimpleHTTPServer.SimpleHTTPRequestHandler):
def do_GET(self):
self.send_response(200)
self.send_header('Content-type','text/html')
self.end_headers()
self.wfile.write("Hello World ! '{}'".format(self.path))
return
for i in range(4):
Handler = GetHandler
httpd = SocketServer.TCPServer(("", PORT + i), Handler)
httpd.serve_forever()
| 25.05
| 64
| 0.682635
| 59
| 501
| 5.711864
| 0.745763
| 0.047478
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019802
| 0.193613
| 501
| 20
| 65
| 25.05
| 0.814356
| 0.03992
| 0
| 0
| 0
| 0
| 0.081081
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c605332955c4b043be9f4d88d8eb7ca6bb505c8
| 934
|
py
|
Python
|
scripts/49-cat-logs.py
|
jmviz/xd
|
f905e5c61b2835073b19cc3fa0d6917432fa7ece
|
[
"MIT"
] | 179
|
2016-03-05T03:14:56.000Z
|
2022-02-12T22:48:55.000Z
|
scripts/49-cat-logs.py
|
jmviz/xd
|
f905e5c61b2835073b19cc3fa0d6917432fa7ece
|
[
"MIT"
] | 24
|
2016-02-14T07:43:42.000Z
|
2021-12-14T01:09:54.000Z
|
scripts/49-cat-logs.py
|
jmviz/xd
|
f905e5c61b2835073b19cc3fa0d6917432fa7ece
|
[
"MIT"
] | 25
|
2016-02-19T20:35:03.000Z
|
2022-01-31T09:15:44.000Z
|
#!/usr/bin/env python3
# Usage:
# $0 -o log.txt products/
#
# concatenates .log files (even those in subdirs or .zip) and combines into a single combined.log
from xdfile.utils import find_files_with_time, open_output, get_args
import boto3
# from boto.s3.connection import S3Connection
import os
def main():
args = get_args('aggregates all .log files')
outf = open_output()
s3 = boto3.resource('s3')
s3path = "logs/"
# bucket = conn.get_bucket(s3path)
bucket = s3.Bucket(os.environ['DOMAIN'])
for obj in sorted(bucket.objects.all(), key=lambda x: x.last_modified):
# last_modified
if s3path in obj.key:
print("Name: %s LastModified:%s" % (obj.key.encode('utf-8'), obj.last_modified))
for fn, contents, dt in sorted(find_files_with_time(*args.inputs, ext=".log"), key=lambda x: x[2]): # earliest first
outf.write_file(fn, contents.decode("utf-8"))
main()
| 29.1875
| 121
| 0.671306
| 140
| 934
| 4.371429
| 0.578571
| 0.058824
| 0.042484
| 0.055556
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019894
| 0.192719
| 934
| 31
| 122
| 30.129032
| 0.791777
| 0.275161
| 0
| 0
| 0
| 0
| 0.113772
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.2
| 0
| 0.266667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c62e1ba59e97f238e09a86895f6c890c24d960e
| 5,819
|
py
|
Python
|
CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py
|
HermannLiang/CLIP-ViL
|
49c28bc5ece1aacfcbfd9c8810db70663ca0516a
|
[
"MIT"
] | null | null | null |
CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py
|
HermannLiang/CLIP-ViL
|
49c28bc5ece1aacfcbfd9c8810db70663ca0516a
|
[
"MIT"
] | null | null | null |
CLIP-ViL-Direct/vqa/pythia_clip_grid_feature.py
|
HermannLiang/CLIP-ViL
|
49c28bc5ece1aacfcbfd9c8810db70663ca0516a
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
"""
Grid features extraction script.
"""
import argparse
import os
import torch
import tqdm
from fvcore.common.file_io import PathManager
from detectron2.checkpoint import DetectionCheckpointer
from detectron2.config import get_cfg
from detectron2.engine import default_setup
from detectron2.evaluation import inference_context
from detectron2.modeling import build_model
import numpy as np
from clip.clip import load
import torch.nn as nn
from torchvision.transforms import Compose, Resize, CenterCrop, ToTensor, Normalize
from grid_feats import (
add_attribute_config,
build_detection_test_loader_with_attributes,
)
# from timm.models.vision_transformer import resize_pos_embed
# A simple mapper from object detection dataset to VQA dataset names
dataset_to_folder_mapper = {}
dataset_to_folder_mapper['coco_2014_train'] = 'train2014'
dataset_to_folder_mapper['coco_2014_val'] = 'val2014'
#dataset_to_folder_mapper['coco_2014_val'] = 'trainval2014'
#dataset_to_folder_mapper['coco_2014_train'] = 'trainval2014'
# One may need to change the Detectron2 code to support coco_2015_test
# insert "coco_2015_test": ("coco/test2015", "coco/annotations/image_info_test2015.json"),
# at: https://github.com/facebookresearch/detectron2/blob/master/detectron2/data/datasets/builtin.py#L36
dataset_to_folder_mapper['coco_2015_test'] = 'test2015'
dataset_to_folder_mapper['coco_2015_test-dev'] = 'test-dev2015'
def extract_grid_feature_argument_parser():
parser = argparse.ArgumentParser(description="Grid feature extraction")
parser.add_argument("--config-file", default="", metavar="FILE", help="path to config file")
parser.add_argument("--dataset", help="name of the dataset", default="coco_2014_train",
choices=['coco_2014_train', 'coco_2014_val', 'coco_2015_test', 'coco_2015_test-dev'])
parser.add_argument('--model_type', default='RN50', type=str, help='RN50, RN101, RN50x4, ViT-B/32, vit_base_patch32_224_in21k')
parser.add_argument(
"opts",
help="Modify config options using the command-line",
default=None,
nargs=argparse.REMAINDER,
)
return parser
def extract_grid_feature_on_dataset(model, data_loader, dump_folder):
for idx, inputs in enumerate(tqdm.tqdm(data_loader)):
with torch.no_grad():
image_id = inputs[0]['image_id']
file_name = '%d.pth' % image_id
# compute features
images = model.preprocess_image(inputs)
features = model.backbone(images.tensor)
outputs = model.roi_heads.get_conv5_features(features)
# modify the filename
file_name = inputs[0]['file_name'].split("/")[-1].replace("jpg", "npy")
outputs = outputs.permute(0, 2, 3, 1)
exit()
with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f:
np.save(f, outputs.cpu().numpy())
def do_feature_extraction(cfg, model, dataset_name, args):
with inference_context(model):
dump_folder = os.path.join(cfg.OUTPUT_DIR, "features", dataset_to_folder_mapper[dataset_name])
PathManager.mkdirs(dump_folder)
data_loader = build_detection_test_loader_with_attributes(cfg, dataset_name, model_type='clip')
extract_clip_feature_on_dataset(model, data_loader, dump_folder, args)
def setup(args):
"""
Create configs and perform basic setups.
"""
cfg = get_cfg()
add_attribute_config(cfg)
cfg.merge_from_file(args.config_file)
cfg.merge_from_list(args.opts)
# force the final residual block to have dilations 1
cfg.MODEL.RESNETS.RES5_DILATION = 1
cfg.freeze()
default_setup(cfg, args)
return cfg
def extract_clip_feature_on_dataset(model, data_loader, dump_folder, args):
save_args.model_type = args.model_type.split("-")[0]
mean = torch.Tensor([0.48145466, 0.4578275, 0.40821073]).to("cuda").reshape(3, 1, 1)
std = torch.Tensor([0.26862954, 0.26130258, 0.27577711]).to("cuda").reshape(3, 1, 1)
dump_folder = f"clip/{save_args.model_type}/" + dump_folder.split("/")[-1]
if args.model_type == "ViT-B/32":
num_patches = 558 #600 * 1000 // 32 // 32
print(num_patches)
pos_embed = nn.Parameter(torch.zeros(num_patches + 1, 768, device='cuda'),)
pos_embed.weight = resize_pos_embed(model.visual.positional_embedding.unsqueeze(0), pos_embed.unsqueeze(0))
model.visual.positional_embedding = pos_embed
print(model.visual.positional_embedding.device)
# pass
dump_folder.replace( "rscratch", "dnn" )
dump_folder = "/dnn/sheng.s/clip_boi/grid-feats-vqa/" + dump_folder
if not os.path.exists(dump_folder):
os.makedirs(dump_folder)
for idx, inputs in enumerate(tqdm.tqdm(data_loader)):
with torch.no_grad():
image_id = inputs[0]['image_id']
file_name = '%d.pth' % image_id
# compute features
image = inputs[0]['image'].to("cuda").float() / 255.0
image = (image - mean) / std
image = image.unsqueeze(0)
outputs = model.encode_image(image)
if "RN" in args.model_type:
outputs = outputs.permute(0, 2, 3, 1)
else:
outputs = outputs[:, :, :].reshape(1, 13, 43, 768)
with PathManager.open(os.path.join(dump_folder, file_name), "wb") as f:
# save as CPU tensors
torch.save(outputs.cpu(), f)
def main(args):
cfg = setup(args)
model, transform = load(args.model_type, jit=False)
do_feature_extraction(cfg, model, args.dataset, args)
if __name__ == "__main__":
args = extract_grid_feature_argument_parser().parse_args()
print("Command Line Args:", args)
main(args)
| 40.978873
| 131
| 0.687231
| 781
| 5,819
| 4.8758
| 0.318822
| 0.036765
| 0.031513
| 0.044118
| 0.265756
| 0.221901
| 0.19354
| 0.128414
| 0.120273
| 0.120273
| 0
| 0.048473
| 0.195223
| 5,819
| 141
| 132
| 41.269504
| 0.764681
| 0.129232
| 0
| 0.117647
| 0
| 0
| 0.114138
| 0.018095
| 0
| 0
| 0
| 0
| 0
| 1
| 0.058824
| false
| 0
| 0.147059
| 0
| 0.22549
| 0.029412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c6577b07dcade6abb36fc14d4e83aa262bb9bef
| 2,514
|
py
|
Python
|
src/node.py
|
aerendon/blockchain-basics
|
e3168afd097b26d23a09fd30e74e07b695e577d1
|
[
"MIT"
] | 6
|
2018-08-09T14:36:35.000Z
|
2021-03-23T06:53:01.000Z
|
src/node.py
|
aerendon/blockchain-basics
|
e3168afd097b26d23a09fd30e74e07b695e577d1
|
[
"MIT"
] | null | null | null |
src/node.py
|
aerendon/blockchain-basics
|
e3168afd097b26d23a09fd30e74e07b695e577d1
|
[
"MIT"
] | null | null | null |
from flask import Flask, request
import time
import requests
import json
from blockchain import Blockchain
from block import Block
app = Flask(__name__)
blockchain = Blockchain()
peers = set()
@app.route('/add_nodes', methods=['POST'])
def register_new_peers():
nodes = request.get_json()
if not nodes:
return "Invalid data", 400
for node in nodes:
peers.add(node)
return "Success", 201
@app.route('/new_transaction', methods=['POST'])
def new_transaction():
tx_data = request.get_json()
required_fields = ["author", "content"]
for field in required_fields:
if not tx_data.get(field):
return "Invalid transaction data", 404
tx_data["timestamp"] = time.time()
blockchain.add_new_transaction(tx_data)
return "Sucess", 201
@app.route('/chain', methods=['GET'])
def get_chain():
chain_data = []
for block in blockchain.chain:
chain_data.append(block.__dict__)
return json.dumps({ "length": len(chain_data), "chain": chain_data })
@app.route('/mine', methods=['GET'])
def mine_unconfirmed_transactions():
result = blockchain.mine()
if not result:
return "No transactions to mine"
return "Block #{} is mined.".format(result)
@app.route('/pending_tx')
def get_pending_tx():
return json.dumps(blockchain.unconfirmed_transactions)
def consensus():
global blockchain
longest_chain = None
current_len = len(blockchain)
for node in peers:
response = requests.get('http://{}/chain'.format(node))
length = response.json()['length']
chain = response.json()['chain']
if length > current_len and blockchain.check_chain_validity(chain):
current_len = length
longest_chain = chain
if longest_chain:
blockchain = longest_chain
return True
return False
@app.route('/add_block', methods=['POST'])
def validate_and_add_block():
block_data = request.get_json()
block = Block(block_data["index"], block_data["transactions"], block_data["timestamp", block_data["previous_hash"]])
proof = block_data['hash']
added = blockchain.add_block(block, proof)
if not added:
return "The block was discarded by the node", 400
return "Block added to the chain", 201
def announce_new_block(block):
for peer in peers:
url = "http://{}/add_block".format(peer)
requests.post(url, data=json.dumps(block.__dict__, sort_keys=True))
app.run(debug=True, port=8000)
| 25.917526
| 120
| 0.668258
| 323
| 2,514
| 5.003096
| 0.278638
| 0.029703
| 0.02599
| 0.024752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011022
| 0.206046
| 2,514
| 96
| 121
| 26.1875
| 0.798597
| 0
| 0
| 0
| 0
| 0
| 0.138027
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.112676
| false
| 0
| 0.084507
| 0.014085
| 0.366197
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c6a104628420af03301492fef43b77ba98e1a64
| 6,840
|
py
|
Python
|
examples/pytorch/mnist/plot.py
|
ThomasRot/rational_activations
|
1fa26d1ee5f3c916eda00c899afa96eccb960143
|
[
"MIT"
] | null | null | null |
examples/pytorch/mnist/plot.py
|
ThomasRot/rational_activations
|
1fa26d1ee5f3c916eda00c899afa96eccb960143
|
[
"MIT"
] | null | null | null |
examples/pytorch/mnist/plot.py
|
ThomasRot/rational_activations
|
1fa26d1ee5f3c916eda00c899afa96eccb960143
|
[
"MIT"
] | null | null | null |
import torch
import numpy as np
import pickle
torch.manual_seed(17)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(17)
import argparse
import torch.nn as nn
import torch.nn.functional as F
import matplotlib
import os
from rational.torch import Rational, RecurrentRational, RecurrentRationalModule
from torchvision import datasets, transforms
from torch.utils.tensorboard import SummaryWriter
from mnist import VGG, LeNet5, actfvs
from matplotlib import pyplot as plt
font = {'family': 'normal',
'weight': 'bold',
'size': 22}
matplotlib.rc('font', **font)
torch.set_anomaly_enabled(True)
def test(args, model, device, test_loader, epoch):
model.eval()
test_loss = 0
correct = 0
with torch.no_grad():
for data, target in test_loader:
data, target = data.to(device), target.to(device)
output = model(data)
test_loss += F.nll_loss(output, target, reduction='sum').item() # sum up batch loss
pred = output.argmax(dim=1, keepdim=True) # get the index of the max log-probability
correct += pred.eq(target.view_as(pred)).sum().item()
test_loss /= len(test_loader.dataset)
acc = 100. * correct / len(test_loader.dataset)
print('\nTest set: Epoch: {}, Average loss: {:.4f}, Accuracy: {}/{} ({:.2f}%)\n'.format(epoch, test_loss,
correct,
len(test_loader.dataset),
acc))
return acc
def main():
# Training settings
parser = argparse.ArgumentParser(description='PyTorch MNIST Example')
parser.add_argument('--batch-size', type=int, default=64, metavar='N',
help='input batch size for training (default: 64)')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='disables CUDA training')
parser.add_argument('--seed', type=int, default=17, metavar='S',
help='random seed (default: 1)')
parser.add_argument('--log-interval', type=int, default=10, metavar='N',
help='how many batches to wait before logging training status')
parser.add_argument('--dataset', type=str, default='mnist',
help='dataset to use')
parser.add_argument('--arch', type=str, required=True)
parser.add_argument('--init', type=str, default="", choices=["", "xavier", "he"])
args = parser.parse_args()
networks = dict({
"vgg": VGG,
"lenet": LeNet5,
})
network = networks[args.arch]
# activation_function_keys = [x for x in list(actfvs.keys()) if 'pau' in x]
# activation_function_keys = ['pau']
# activation_function_keys = ['recurrent_pau']
activation_function_keys = ['pau', 'recurrent_pau']
optimizer = 'sgd'
epochs = ['final']
for activation_function_key in activation_function_keys:
for epoch in epochs:
print("---" * 42)
print("Starting with dataset: {}, activation function: {}".format(args.dataset, activation_function_key))
print("---" * 42)
load_path = 'examples/runs/mnist/paper_{}_{}_{}{}_seed{}/'.format(args.dataset, args.arch, optimizer,
"_init_{}".format(args.init) if args.init != "" else "",
args.seed) + activation_function_key
use_cuda = not args.no_cuda and torch.cuda.is_available()
torch.manual_seed(args.seed)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
np.random.seed(args.seed)
device = torch.device("cuda" if use_cuda else "cpu")
kwargs = {'num_workers': 1, 'pin_memory': True} if use_cuda else {}
if args.dataset == 'mnist':
test_loader = torch.utils.data.DataLoader(
datasets.MNIST('../data', train=False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_scheduler_milestones = [30, 60, 90] # Simple CNN with 3 Conv
# lr_scheduler_milestones = [40, 80] # VGG
elif args.dataset == 'fmnist':
test_loader = torch.utils.data.DataLoader(
datasets.FashionMNIST('../data', train=False, transform=transforms.Compose([
transforms.Resize((32, 32)),
transforms.ToTensor(),
transforms.Normalize((0.1307,), (0.3081,))
])),
batch_size=args.batch_size, shuffle=True, **kwargs)
lr_scheduler_milestones = [40, 80]
else:
raise ValueError('dataset error')
model = network(activation_func=activation_function_key).to(device)
model.load_state_dict(torch.load(os.path.join(load_path, 'model_{}.pt'.format(epoch))))
paus = list()
for name, layer in model.named_modules():
if isinstance(layer, Rational):
layer.input_retrieve_mode(max_saves=10)
paus.append(('rational', name, layer))
if isinstance(layer, RecurrentRationalModule):
layer.input_retrieve_mode(max_saves=10)
paus.append(('recurrent_rational', name, layer))
if len(paus) > 0:
os.makedirs(os.path.join(load_path, 'plots'), exist_ok=True)
# dict(model.named_parameters())["features.3.0.bias"][0]
# dict(model.named_parameters())["features.4.2.numerator"][0]
print("Starting model eval")
acc = test(args, model, device, test_loader, epoch)
print("Finished model eval -> Plot")
# fig = plt.figure(1, figsize=(6*len(paus),6))
fig_dicts = []
for i, p in enumerate(paus):
fig = p[2].show(display=False)
print(fig)
fig_dicts.append(fig)
pickle.dump(fig_dicts, open(f'{args.dataset}_{args.arch}_{activation_function_key}_(acc{acc}%).fig.pkl', "wb"))
else:
print("No Rational Activations found. Exit without plotting")
if __name__ == '__main__':
main()
| 45.90604
| 134
| 0.55424
| 739
| 6,840
| 4.987821
| 0.339648
| 0.053717
| 0.032284
| 0.016278
| 0.254205
| 0.197504
| 0.197504
| 0.156267
| 0.156267
| 0.133478
| 0
| 0.018986
| 0.322368
| 6,840
| 148
| 135
| 46.216216
| 0.776268
| 0.066374
| 0
| 0.178862
| 0
| 0.00813
| 0.12553
| 0.018202
| 0
| 0
| 0
| 0
| 0
| 1
| 0.01626
| false
| 0
| 0.105691
| 0
| 0.130081
| 0.065041
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c6a234a8099f1c0f0c886e2b520d9f41e36c635
| 7,093
|
py
|
Python
|
fts/fluxrss.py
|
AetherBlack/Veille-Informatique
|
e80451c5eb21f43ac1a9baac3342ad0d4102d18b
|
[
"Linux-OpenIB"
] | null | null | null |
fts/fluxrss.py
|
AetherBlack/Veille-Informatique
|
e80451c5eb21f43ac1a9baac3342ad0d4102d18b
|
[
"Linux-OpenIB"
] | null | null | null |
fts/fluxrss.py
|
AetherBlack/Veille-Informatique
|
e80451c5eb21f43ac1a9baac3342ad0d4102d18b
|
[
"Linux-OpenIB"
] | null | null | null |
#!/usr/bin/python3
from urllib.parse import urlparse
import feedparser
import requests
import asyncio
import discord
import hashlib
import os
from const import CHANNEL_RSS, WAIT_UNTIL_NEW_CHECK, \
SQLITE_FOLDER_NAME, SQLITE_FILE_NAME
from fts.database import Database
from fts.cleandatabase import CleanDatabase
class FluxRSS:
"""
Class of FluxRSS.
Get news of the feedrss url parse in args.
"""
def __init__(self, bot, cwd):
"""
Initialize class
@param => DiscordBot: `bot`: Discord Bot Instance.
@param => str: `cwd`: Current Working Directory of main.py file.
"""
# Discord
self.bot = bot
self.bot_username = self.bot.user.name
self.rss_channel = self.bot.get_channel(CHANNEL_RSS)
# Path
self.cwd = cwd
# Database
self.db_path = os.path.join(self.cwd, SQLITE_FOLDER_NAME)
self.database = Database(self.db_path, SQLITE_FILE_NAME)
def get_news(self, url):
"""
Get the news of the rss feed.
@param => str: `url`: url of the rss feed.
Return dict with an int index key and
title, description and link in a list for the value.
"""
dict_news = dict()
# Get the content of the requests
content = requests.get(url).text
# Parse the content
parser = feedparser.parse(content)
# Set the root
parser = parser["entries"]
# Get the number of news
news_number = len(parser)
# Construct the dict
for index in range(news_number):
# Get the title
title = parser[index]["title"]
# Get the description
description = parser[index]["description"]
# Get the link
link = parser[index]["links"][0]["href"]
# Set list
args = [
title, description, link
]
# Add the list to the dict
dict_news[str(index)] = args
# Return the dict
return dict_news
def is_new(self, root, name, title, description, link):
"""
Return True if the news in the feed is new.
@param => str: `title`: Title of the news.
@param => str: `description`: Description of the news.
@param => str: `link`: Link of the rss feed.
"""
# Hash description
hash_description = hashlib.sha256(bytes(description, "utf-8", errors="ignore")).hexdigest()
# Return the check of the query
return not self.database.isNewsExists(root, name, title, hash_description, link)
def embeded_msg(self, root, name, title, content, link, color):
"""
Create the embeded message and send it to discord.
@param => str: `root`: Name of the Website.
@param => str: `name`: Name set in const. Categorie of the news
@param => str: `title`: Title of the news.
@param => str: `content`: Content description of the news.
@param => str: `link`: Link of the news.
@param => discord.Color: `color`: Color for the left panel.
"""
# Set the Name, description and color on the left
news = discord.Embed(title="{0} - {1}".format(root, name), description="News :", color=(color or 0x00ff00))
#Set bot name and profil picture
news.set_author(name=self.bot_username, icon_url=self.bot.user.avatar_url)
#Set the description and the link for the main message
content = content + "\n" + link
news.add_field(name=title, value=content[:1024], inline=False)
#Show the bot username in footer
news.set_footer(text="Generate by @{0}".format(self.bot_username))
# Return the final Discord embeded message
return news
async def feedrss(self, json_rss):
"""
Get the news and send it to the channel.
@param => dict: `json_rss`: JSON data of the RSS Flux.
"""
# Show const for the format
self.json_rss = json_rss
# While the connection is not closed
while not self.bot.is_closed():
# For each key
for key, sections in self.json_rss.items():
# Get the root name set in const
root = key
# For each sections
for index_section, section in enumerate(sections):
# Check customization of the section
if "custom" in section.keys():
# Check color
if "color" in section["custom"].keys():
color = getattr(discord.Color, section["custom"]["color"])()
else:
color = False
else:
color = False
# Get the name of the section
name = section["name"]
# Get the time until the cleaning of the database for the root and name given
wait_time = section["clean"]
# Check if the cleaning database is already launched
if isinstance(wait_time, str):
# Launch the function to clean the database
Thread = CleanDatabase(root, name, wait_time, self.db_path, SQLITE_FILE_NAME)
Thread.start()
# Change the variable type of the clean line in json_rss to launch relaunch the requests
self.json_rss[root][index_section]["clean"] = True
# For each link in the section
for link in section["link"]:
# Get title, description and link in a dict
dict_news = self.get_news(link)
# Verify if the news already exists
for value in dict_news.values():
# Get title
title = value[0]
# Get description
description = value[1]
# Get link
link = value[2]
# Check if the news is new
if self.is_new(root, name, title, description, link):
# Hash the description
hash_description = hashlib.sha256(bytes(description, "utf-8", errors="ignore")).hexdigest()
# write the news into the database
self.database.AddNews(root, name, title, hash_description, link)
#Create the discord message
message = self.embeded_msg(root, name, title, description, link, color)
#Send to discord
await self.rss_channel.send(embed=message)
# Wait until the next verification
await asyncio.sleep(WAIT_UNTIL_NEW_CHECK)
| 36.188776
| 123
| 0.537572
| 814
| 7,093
| 4.603194
| 0.219902
| 0.024019
| 0.020817
| 0.022418
| 0.151855
| 0.127035
| 0.083267
| 0.083267
| 0.083267
| 0.064585
| 0
| 0.005692
| 0.380798
| 7,093
| 195
| 124
| 36.374359
| 0.84745
| 0.302552
| 0
| 0.08
| 0
| 0
| 0.029224
| 0
| 0
| 0
| 0.001758
| 0
| 0
| 1
| 0.053333
| false
| 0
| 0.133333
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c6a29fe050821e14428d8ec0b7f5f5436d84fcb
| 11,691
|
py
|
Python
|
src/poke_env/player/player_network_interface.py
|
kiyohiro8/poke-env
|
7a1a4b155e8a73bd712d44e70c4192f8032d7e6f
|
[
"MIT"
] | null | null | null |
src/poke_env/player/player_network_interface.py
|
kiyohiro8/poke-env
|
7a1a4b155e8a73bd712d44e70c4192f8032d7e6f
|
[
"MIT"
] | null | null | null |
src/poke_env/player/player_network_interface.py
|
kiyohiro8/poke-env
|
7a1a4b155e8a73bd712d44e70c4192f8032d7e6f
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""This module defines a base class for communicating with showdown servers.
"""
import json
import logging
import requests
import websockets # pyre-ignore
from abc import ABC
from abc import abstractmethod
from asyncio import CancelledError
from asyncio import ensure_future
from asyncio import Event
from asyncio import Lock
from asyncio import sleep
from time import perf_counter
from typing import List
from typing import Optional
from aiologger import Logger # pyre-ignore
from poke_env.exceptions import ShowdownException
from poke_env.player_configuration import PlayerConfiguration
from poke_env.server_configuration import ServerConfiguration
class PlayerNetwork(ABC):
"""
Network interface of a player.
Responsible for communicating with showdown servers. Also implements some higher
level methods for basic tasks, such as changing avatar and low-level message
handling.
"""
def __init__(
self,
player_configuration: PlayerConfiguration,
*,
avatar: Optional[int] = None,
log_level: Optional[int] = None,
server_configuration: ServerConfiguration,
start_listening: bool = True,
) -> None:
"""
:param player_configuration: Player configuration.
:type player_configuration: PlayerConfiguration
:param avatar: Player avatar id. Optional.
:type avatar: int, optional
:param log_level: The player's logger level.
:type log_level: int. Defaults to logging's default level.
:param server_configuration: Server configuration.
:type server_configuration: ServerConfiguration
:param start_listening: Wheter to start listening to the server. Defaults to
True.
:type start_listening: bool
"""
self._authentication_url = server_configuration.authentication_url
self._avatar = avatar
self._password = player_configuration.password
self._username = player_configuration.username
self._server_url = server_configuration.server_url
self._logged_in: Event = Event()
self._sending_lock = Lock()
self._websocket: websockets.client.WebSocketClientProtocol # pyre-ignore
self._logger: Logger = self._create_player_logger(log_level) # pyre-ignore
if start_listening:
self._listening_coroutine = ensure_future(self.listen())
async def _accept_challenge(self, username: str) -> None:
assert self.logged_in.is_set()
await self._set_team()
await self._send_message("/accept %s" % username)
async def _challenge(self, username: str, format_: str):
assert self.logged_in.is_set()
await self._set_team()
await self._send_message(f"/challenge {username}, {format_}")
async def _change_avatar(self, avatar_id: Optional[int]) -> None:
"""Changes the player's avatar.
:param avatar_id: The new avatar id. If None, nothing happens.
:type avatar_id: int
"""
await self._wait_for_login()
if avatar_id is not None:
await self._send_message(f"/avatar {avatar_id}")
def _create_player_logger(self, log_level: Optional[int]) -> Logger: # pyre-ignore
"""Creates a logger for the player.
Returns a Logger displaying asctime and the player's username before messages.
:param log_level: The logger's level.
:type log_level: int
:return: The logger.
:rtype: Logger
"""
logger = logging.getLogger(self._username)
stream_handler = logging.StreamHandler()
if log_level is not None:
logger.setLevel(log_level)
formatter = logging.Formatter(
"%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
stream_handler.setFormatter(formatter)
logger.addHandler(stream_handler)
return logger
async def _handle_message(self, message: str) -> None:
"""Handle received messages.
:param message: The message to parse.
:type message: str
"""
try:
self.logger.debug("Received message to handle: %s", message)
# Showdown websocket messages are pipe-separated sequences
split_message = message.split("|")
assert len(split_message) > 1
# The type of message is determined by the first entry in the message
# For battles, this is the zero-th entry
# Otherwise it is the one-th entry
if split_message[1] == "challstr":
# Confirms connection to the server: we can login
await self._log_in(split_message)
elif split_message[1] == "updateuser":
if split_message[2] == " " + self._username:
# Confirms successful login
self.logged_in.set()
elif not split_message[2].startswith(" Guest "):
self.logger.warning(
"""Trying to login as %s, showdown returned %s """
"""- this might prevent future actions from this agent. """
"""Changing the agent's username might solve this problem.""",
self.username,
split_message[2],
)
elif "updatechallenges" in split_message[1]:
# Contain information about current challenge
await self._update_challenges(split_message)
elif split_message[0].startswith(">battle"):
# Battle update
await self._handle_battle_message(message)
elif split_message[1] == "updatesearch":
self.logger.debug("Ignored message: %s", message)
pass
elif split_message[1] == "popup":
self.logger.warning("Popup message received: %s", message)
elif split_message[1] in ["nametaken"]:
self.logger.critical("Error message received: %s", message)
raise ShowdownException("Error message received: %s", message)
elif split_message[1] == "pm":
self.logger.info("Received pm: %s", split_message)
else:
self.logger.critical("Unhandled message: %s", message)
raise NotImplementedError("Unhandled message: %s" % message)
except CancelledError as e:
self.logger.critical("CancelledError intercepted. %s", e)
except Exception as exception:
self.logger.exception(
"Unhandled exception raised while handling message:\n%s", message
)
raise exception
async def _log_in(self, split_message: List[str]) -> None:
"""Log the player with specified username and password.
Split message contains information sent by the server. This information is
necessary to log in.
:param split_message: Message received from the server that triggers logging in.
:type split_message: List[str]
"""
if self._password:
log_in_request = requests.post(
self._authentication_url,
data={
"act": "login",
"name": self._username,
"pass": self._password,
"challstr": split_message[2] + "%7C" + split_message[3],
},
)
self.logger.info("Sending authentication request")
assertion = json.loads(log_in_request.text[1:])["assertion"]
else:
self.logger.info("Bypassing authentication request")
assertion = ""
await self._send_message(f"/trn {self._username},0,{assertion}")
await self._change_avatar(self._avatar)
async def _search_ladder_game(self, format_):
await self._set_team()
await self._send_message(f"/search {format_}")
async def _send_message(
self, message: str, room: str = "", message_2: Optional[str] = None
) -> None:
"""Sends a message to the specified room.
`message_2` can be used to send a sequence of length 2.
:param message: The message to send.
:type message: str
:param room: The room to which the message should be sent.
:type room: str
:param message_2: Second element of the sequence to be sent. Optional.
:type message_2: str, optional
"""
if message_2:
to_send = "|".join([room, message, message_2])
else:
to_send = "|".join([room, message])
await self._websocket.send(to_send)
self.logger.info(">>> %s", to_send)
async def _set_team(self):
if self._team is not None:
await self._send_message("/utm %s" % self._team.yield_team())
async def _wait_for_login(
self, checking_interval: float = 0.001, wait_for: int = 5
) -> None:
start = perf_counter()
while perf_counter() - start < wait_for:
await sleep(checking_interval)
if self.logged_in:
return
assert self.logged_in
async def listen(self) -> None:
"""Listen to a showdown websocket and dispatch messages to be handled."""
self.logger.info("Starting listening to showdown websocket")
coroutines = []
try:
async with websockets.connect(
self.websocket_url, max_queue=None
) as websocket:
self._websocket = websocket
async for message in websocket:
self.logger.info("<<< %s", message)
coroutines.append(ensure_future(self._handle_message(message)))
except websockets.exceptions.ConnectionClosedOK:
self.logger.warning(
"Websocket connection with %s closed", self.websocket_url
)
except (CancelledError, RuntimeError) as e:
self.logger.critical("Listen interrupted by %s", e)
except Exception as e:
self.logger.exception(e)
finally:
for coroutine in coroutines:
coroutine.cancel()
async def stop_listening(self) -> None:
if self._listening_coroutine is not None:
self._listening_coroutine.cancel()
await self._websocket.close()
@abstractmethod
async def _handle_battle_message(self, message: str) -> None:
"""Abstract method.
Implementation should redirect messages to corresponding battles.
"""
@abstractmethod
async def _update_challenges(self, split_message: List[str]) -> None:
"""Abstract method.
Implementation should keep track of current challenges.
"""
@property
def logged_in(self) -> Event:
"""Event object associated with user login.
:return: The logged-in event
:rtype: Event
"""
return self._logged_in
@property
def logger(self) -> Logger: # pyre-ignore
"""Logger associated with the player.
:return: The logger.
:rtype: Logger
"""
return self._logger
@property
def username(self) -> str:
"""The player's username.
:return: The player's username.
:rtype: str
"""
return self._username
@property
def websocket_url(self) -> str:
"""The websocket url.
It is derived from the server url.
:return: The websocket url.
:rtype: str
"""
return f"ws://{self._server_url}/showdown/websocket"
| 36.307453
| 88
| 0.610042
| 1,295
| 11,691
| 5.34749
| 0.214672
| 0.039856
| 0.015018
| 0.017329
| 0.134874
| 0.061083
| 0.042455
| 0.034079
| 0.022527
| 0.01704
| 0
| 0.003677
| 0.302113
| 11,691
| 321
| 89
| 36.420561
| 0.845079
| 0.146694
| 0
| 0.112903
| 0
| 0
| 0.09367
| 0.008747
| 0
| 0
| 0
| 0
| 0.037634
| 1
| 0.032258
| false
| 0.026882
| 0.096774
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c6cbf4764a2e4e9b78da1978c82aa4f5d7862ce
| 3,637
|
py
|
Python
|
tests/conftest.py
|
priyatharsan/beyond
|
1061b870407d316d43e4d1351a7ec026629685ae
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
priyatharsan/beyond
|
1061b870407d316d43e4d1351a7ec026629685ae
|
[
"MIT"
] | null | null | null |
tests/conftest.py
|
priyatharsan/beyond
|
1061b870407d316d43e4d1351a7ec026629685ae
|
[
"MIT"
] | null | null | null |
import numpy as np
from pytest import fixture, mark, skip
from unittest.mock import patch
from pathlib import Path
from beyond.config import config
from beyond.dates.eop import Eop
from beyond.frames.stations import create_station
from beyond.io.tle import Tle
from beyond.propagators.keplernum import KeplerNum
from beyond.dates import Date, timedelta
from beyond.env.solarsystem import get_body
np.set_printoptions(linewidth=200)
@fixture(autouse=True, scope="session")
def config_override():
"""Create a dummy config dict containing basic data
"""
config.update({
"eop": {
"missing_policy": "pass",
}
})
@fixture
def common_env():
with patch('beyond.dates.date.EopDb.get') as m:
m.return_value = Eop(
x=-0.00951054166666622, y=0.31093590624999734, dpsi=-94.19544791666682,
deps=-10.295645833333051, dy=-0.10067361111115315, dx=-0.06829513888889051,
lod=1.6242802083331438, ut1_utc=0.01756018472222477, tai_utc=36.0
)
yield
@fixture
def station(common_env):
return create_station('Toulouse', (43.604482, 1.443962, 172.))
@fixture
def iss_tle(common_env):
return Tle("""ISS (ZARYA)
1 25544U 98067A 18124.55610684 .00001524 00000-0 30197-4 0 9997
2 25544 51.6421 236.2139 0003381 47.8509 47.6767 15.54198229111731""")
@fixture
def molniya_tle(common_env):
return Tle("""MOLNIYA 1-90
1 24960U 97054A 18123.22759647 .00000163 00000-0 24467-3 0 9999
2 24960 62.6812 182.7824 6470982 294.8616 12.8538 3.18684355160009""")
@fixture(params=["tle", "ephem"])
def orbit(request, iss_tle):
orb = iss_tle.orbit()
if request.param == "tle":
return orb
elif request.param == "ephem":
start = Date(2018, 4, 5, 16, 50)
stop = timedelta(hours=6)
step = timedelta(seconds=15)
return orb.ephem(start=start, stop=stop, step=step)
elif request.param == "kepler":
orb.propagator = KeplerNum(
timedelta(seconds=60),
get_body('Earth')
)
return orb
@fixture(params=["tle", "ephem"])
def molniya(request, molniya_tle):
orb = molniya_tle.orbit()
if request.param == "tle":
return orb
elif request.param == "ephem":
start = Date(2018, 4, 5, 16, 50)
stop = timedelta(hours=15)
step = timedelta(minutes=1)
return orb.ephem(start=start, stop=stop, step=step)
@fixture
def jplfiles():
config['env'] = {
'jpl': [
str(Path(__file__).parent / "data" / "jpl" / "de403_2000-2020.bsp"),
str(Path(__file__).parent / "data" / "jpl" / "pck00010.tpc"),
str(Path(__file__).parent / "data" / "jpl" / "gm_de431.tpc"),
]
}
def _skip_if_no_mpl():
"""Specific for dynamically skipping the test if matplotlib is not present
as it is not a dependency of the library, but merely a convenience
"""
try:
import matplotlib.pyplot as plt
except ImportError:
return True
else:
return False
def pytest_configure(config):
"""Declare the skip_if_no_mpl marker in pytest's '--markers' helper option
This has no actual effect on the tests
"""
config.addinivalue_line(
"markers", "skip_if_no_mpl: skip if matplotlib is not installed"
)
def pytest_runtest_setup(item):
"""This function is called for each test case.
It looks if the test case has the skip_if_no_mpl decorator. If so, skip the test case
"""
if _skip_if_no_mpl() and list(item.iter_markers(name="skip_if_no_mpl")):
skip("matplotlib not installed")
| 27.141791
| 89
| 0.653011
| 495
| 3,637
| 4.678788
| 0.452525
| 0.030225
| 0.020725
| 0.028497
| 0.208981
| 0.145078
| 0.11399
| 0.11399
| 0.11399
| 0.079447
| 0
| 0.150788
| 0.232334
| 3,637
| 133
| 90
| 27.345865
| 0.678725
| 0.119054
| 0
| 0.2
| 0
| 0
| 0.182193
| 0.008555
| 0
| 0
| 0
| 0
| 0
| 1
| 0.122222
| false
| 0.011111
| 0.144444
| 0.033333
| 0.377778
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c6d185f736a9be6f5e0a171cd9fc68f8a4ce031
| 12,105
|
py
|
Python
|
kornia/color/adjust.py
|
carlosb1/kornia
|
a2b34d497314e7ed65f114401efdd3cc9ba2077c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/color/adjust.py
|
carlosb1/kornia
|
a2b34d497314e7ed65f114401efdd3cc9ba2077c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
kornia/color/adjust.py
|
carlosb1/kornia
|
a2b34d497314e7ed65f114401efdd3cc9ba2077c
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
from typing import Union
import torch
import torch.nn as nn
from kornia.color.hsv import rgb_to_hsv, hsv_to_rgb
from kornia.constants import pi
def adjust_saturation_raw(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust color saturation of an image. Expecting input to be in hsv format already.
See :class:`~kornia.color.AdjustSaturation` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(saturation_factor, (float, torch.Tensor,)):
raise TypeError(f"The saturation_factor should be a float number or torch.Tensor."
f"Got {type(saturation_factor)}")
if isinstance(saturation_factor, float):
saturation_factor = torch.tensor([saturation_factor])
saturation_factor = saturation_factor.to(input.device).to(input.dtype)
if (saturation_factor < 0).any():
raise ValueError(f"Saturation factor must be non-negative. Got {saturation_factor}")
for _ in input.shape[1:]:
saturation_factor = torch.unsqueeze(saturation_factor, dim=-1)
# unpack the hsv values
h, s, v = torch.chunk(input, chunks=3, dim=-3)
# transform the hue value and appl module
s_out: torch.Tensor = torch.clamp(s * saturation_factor, min=0, max=1)
# pack back back the corrected hue
out: torch.Tensor = torch.cat([h, s_out, v], dim=-3)
return out
def adjust_saturation(input: torch.Tensor, saturation_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust color saturation of an image.
See :class:`~kornia.color.AdjustSaturation` for details.
"""
# convert the rgb image to hsv
x_hsv: torch.Tensor = rgb_to_hsv(input)
# perform the conversion
x_adjusted: torch.Tensor = adjust_saturation_raw(x_hsv, saturation_factor)
# convert back to rgb
out: torch.Tensor = hsv_to_rgb(x_adjusted)
return out
def adjust_hue_raw(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust hue of an image. Expecting input to be in hsv format already.
See :class:`~kornia.color.AdjustHue` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(hue_factor, (float, torch.Tensor)):
raise TypeError(f"The hue_factor should be a float number or torch.Tensor in the range between"
f" [-PI, PI]. Got {type(hue_factor)}")
if isinstance(hue_factor, float):
hue_factor = torch.tensor([hue_factor])
hue_factor = hue_factor.to(input.device).to(input.dtype)
if ((hue_factor < -pi) | (hue_factor > pi)).any():
raise ValueError(f"Hue-factor must be in the range [-PI, PI]. Got {hue_factor}")
for _ in input.shape[1:]:
hue_factor = torch.unsqueeze(hue_factor, dim=-1)
# unpack the hsv values
h, s, v = torch.chunk(input, chunks=3, dim=-3)
# transform the hue value and appl module
divisor: float = 2 * pi.item()
h_out: torch.Tensor = torch.fmod(h + hue_factor, divisor)
# pack back back the corrected hue
out: torch.Tensor = torch.cat([h_out, s, v], dim=-3)
return out
def adjust_hue(input: torch.Tensor, hue_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust hue of an image.
See :class:`~kornia.color.AdjustHue` for details.
"""
# convert the rgb image to hsv
x_hsv: torch.Tensor = rgb_to_hsv(input)
# perform the conversion
x_adjusted: torch.Tensor = adjust_hue_raw(x_hsv, hue_factor)
# convert back to rgb
out: torch.Tensor = hsv_to_rgb(x_adjusted)
return out
def adjust_gamma(input: torch.Tensor, gamma: Union[float, torch.Tensor],
gain: Union[float, torch.Tensor] = 1.) -> torch.Tensor:
r"""Perform gamma correction on an image.
See :class:`~kornia.color.AdjustGamma` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(gamma, (float, torch.Tensor)):
raise TypeError(f"The gamma should be a positive float or torch.Tensor. Got {type(gamma)}")
if not isinstance(gain, (float, torch.Tensor)):
raise TypeError(f"The gain should be a positive float or torch.Tensor. Got {type(gain)}")
if isinstance(gamma, float):
gamma = torch.tensor([gamma])
if isinstance(gain, float):
gain = torch.tensor([gain])
gamma = gamma.to(input.device).to(input.dtype)
gain = gain.to(input.device).to(input.dtype)
if (gamma < 0.0).any():
raise ValueError(f"Gamma must be non-negative. Got {gamma}")
if (gain < 0.0).any():
raise ValueError(f"Gain must be non-negative. Got {gain}")
for _ in input.shape[1:]:
gamma = torch.unsqueeze(gamma, dim=-1)
gain = torch.unsqueeze(gain, dim=-1)
# Apply the gamma correction
x_adjust: torch.Tensor = gain * torch.pow(input, gamma)
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_contrast(input: torch.Tensor,
contrast_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust Contrast of an image.
See :class:`~kornia.color.AdjustContrast` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(contrast_factor, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(contrast_factor)}")
if isinstance(contrast_factor, float):
contrast_factor = torch.tensor([contrast_factor])
contrast_factor = contrast_factor.to(input.device).to(input.dtype)
if (contrast_factor < 0).any():
raise ValueError(f"Contrast factor must be non-negative. Got {contrast_factor}")
for _ in input.shape[1:]:
contrast_factor = torch.unsqueeze(contrast_factor, dim=-1)
# Apply contrast factor to each channel
x_adjust: torch.Tensor = input * contrast_factor
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
def adjust_brightness(input: torch.Tensor,
brightness_factor: Union[float, torch.Tensor]) -> torch.Tensor:
r"""Adjust Brightness of an image.
See :class:`~kornia.color.AdjustBrightness` for details.
"""
if not torch.is_tensor(input):
raise TypeError(f"Input type is not a torch.Tensor. Got {type(input)}")
if not isinstance(brightness_factor, (float, torch.Tensor,)):
raise TypeError(f"The factor should be either a float or torch.Tensor. "
f"Got {type(brightness_factor)}")
if isinstance(brightness_factor, float):
brightness_factor = torch.tensor([brightness_factor])
brightness_factor = brightness_factor.to(input.device).to(input.dtype)
for _ in input.shape[1:]:
brightness_factor = torch.unsqueeze(brightness_factor, dim=-1)
# Apply brightness factor to each channel
x_adjust: torch.Tensor = input + brightness_factor
# Truncate between pixel values
out: torch.Tensor = torch.clamp(x_adjust, 0.0, 1.0)
return out
class AdjustSaturation(nn.Module):
r"""Adjust color saturation of an image.
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N).
saturation_factor (float): How much to adjust the saturation. 0 will give a black
and white image, 1 will give the original image while 2 will enhance the saturation
by a factor of 2.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, saturation_factor: Union[float, torch.Tensor]) -> None:
super(AdjustSaturation, self).__init__()
self.saturation_factor: Union[float, torch.Tensor] = saturation_factor
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_saturation(input, self.saturation_factor)
class AdjustHue(nn.Module):
r"""Adjust hue of an image.
The input image is expected to be an RGB image in the range of [0, 1].
Args:
input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N).
hue_factor (float): How much to shift the hue channel. Should be in [-PI, PI]. PI
and -PI give complete reversal of hue channel in HSV space in positive and negative
direction respectively. 0 means no shift. Therefore, both -PI and PI will give an
image with complementary colors while 0 gives the original image.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, hue_factor: Union[float, torch.Tensor]) -> None:
super(AdjustHue, self).__init__()
self.hue_factor: Union[float, torch.Tensor] = hue_factor
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_hue(input, self.hue_factor)
class AdjustGamma(nn.Module):
r"""Perform gamma correction on an image.
The input image is expected to be in the range of [0, 1].
Args:
input (torch.Tensor): Image/Tensor to be adjusted in the shape of (\*, N).
gamma (float): Non negative real number, same as γ\gammaγ in the equation.
gamma larger than 1 make the shadows darker, while gamma smaller than 1 make
dark regions lighter.
gain (float, optional): The constant multiplier. Default 1.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, gamma: Union[float, torch.Tensor], gain: Union[float, torch.Tensor] = 1.) -> None:
super(AdjustGamma, self).__init__()
self.gamma: Union[float, torch.Tensor] = gamma
self.gain: Union[float, torch.Tensor] = gain
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_gamma(input, self.gamma, self.gain)
class AdjustContrast(nn.Module):
r"""Adjust Contrast of an image. This implementation aligns OpenCV, not PIL. Hence,
the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input (torch.Tensor): Image to be adjusted in the shape of (\*, N).
contrast_factor (Union[float, torch.Tensor]): Contrast adjust factor per element
in the batch. 0 generates a compleatly black image, 1 does not modify
the input image while any other non-negative number modify the
brightness by this factor.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, contrast_factor: Union[float, torch.Tensor]) -> None:
super(AdjustContrast, self).__init__()
self.contrast_factor: Union[float, torch.Tensor] = contrast_factor
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_contrast(input, self.contrast_factor)
class AdjustBrightness(nn.Module):
r"""Adjust Brightness of an image. This implementation aligns OpenCV, not PIL. Hence,
the output differs from TorchVision.
The input image is expected to be in the range of [0, 1].
Args:
input (torch.Tensor): Image/Input to be adjusted in the shape of (\*, N).
brightness_factor (Union[float, torch.Tensor]): Brightness adjust factor per element
in the batch. 0 does not modify the input image while any other number modify the
brightness.
Returns:
torch.Tensor: Adjusted image.
"""
def __init__(self, brightness_factor: Union[float, torch.Tensor]) -> None:
super(AdjustBrightness, self).__init__()
self.brightness_factor: Union[float, torch.Tensor] = brightness_factor
def forward(self, input: torch.Tensor) -> torch.Tensor: # type: ignore
return adjust_brightness(input, self.brightness_factor)
| 34.884726
| 110
| 0.671871
| 1,695
| 12,105
| 4.694985
| 0.107965
| 0.131314
| 0.056296
| 0.058055
| 0.677934
| 0.656446
| 0.613471
| 0.541468
| 0.439055
| 0.396708
| 0
| 0.006588
| 0.222553
| 12,105
| 346
| 111
| 34.985549
| 0.839018
| 0.316563
| 0
| 0.255034
| 0
| 0
| 0.12825
| 0.009215
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114094
| false
| 0
| 0.033557
| 0.033557
| 0.261745
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c6ea33d579371cc05a40f107c83af6d179fcd7a
| 1,418
|
py
|
Python
|
pommerman/__init__.py
|
rmccann01/playground
|
354041cd1d9b70ffe82c18fb5b4035fab721eb92
|
[
"Apache-2.0"
] | 725
|
2018-02-14T09:48:18.000Z
|
2022-03-29T03:04:28.000Z
|
pommerman/__init__.py
|
rmccann01/playground
|
354041cd1d9b70ffe82c18fb5b4035fab721eb92
|
[
"Apache-2.0"
] | 214
|
2018-02-16T22:00:41.000Z
|
2022-03-11T23:26:20.000Z
|
pommerman/__init__.py
|
rmccann01/playground
|
354041cd1d9b70ffe82c18fb5b4035fab721eb92
|
[
"Apache-2.0"
] | 265
|
2018-02-15T05:33:46.000Z
|
2022-03-11T03:04:17.000Z
|
'''Entry point into the pommerman module'''
import gym
import inspect
from . import agents
from . import configs
from . import constants
from . import forward_model
from . import helpers
from . import utility
from . import network
gym.logger.set_level(40)
REGISTRY = None
def _register():
global REGISTRY
REGISTRY = []
for name, f in inspect.getmembers(configs, inspect.isfunction):
if not name.endswith('_env'):
continue
config = f()
gym.envs.registration.register(
id=config['env_id'],
entry_point=config['env_entry_point'],
kwargs=config['env_kwargs']
)
REGISTRY.append(config['env_id'])
# Register environments with gym
_register()
def make(config_id, agent_list, game_state_file=None, render_mode='human'):
'''Makes the pommerman env and registers it with gym'''
assert config_id in REGISTRY, "Unknown configuration '{}'. " \
"Possible values: {}".format(config_id, REGISTRY)
env = gym.make(config_id)
for id_, agent in enumerate(agent_list):
assert isinstance(agent, agents.BaseAgent)
# NOTE: This is IMPORTANT so that the agent character is initialized
agent.init_agent(id_, env.spec._kwargs['game_type'])
env.set_agents(agent_list)
env.set_init_game_state(game_state_file)
env.set_render_mode(render_mode)
return env
from . import cli
| 26.754717
| 76
| 0.682652
| 185
| 1,418
| 5.043243
| 0.437838
| 0.085745
| 0.02358
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001805
| 0.218618
| 1,418
| 52
| 77
| 27.269231
| 0.840253
| 0.131171
| 0
| 0
| 0
| 0
| 0.083607
| 0
| 0
| 0
| 0
| 0
| 0.054054
| 1
| 0.054054
| false
| 0
| 0.27027
| 0
| 0.351351
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c71eb8f52ad23f62b8d9e0d27dc37cf322f70c3
| 3,148
|
py
|
Python
|
tensorflow_datasets/structured/dart/dart_test.py
|
harsh020/datasets
|
b4ad3617b279ec65356e696c4c860458621976f6
|
[
"Apache-2.0"
] | 1
|
2020-12-10T06:37:27.000Z
|
2020-12-10T06:37:27.000Z
|
tensorflow_datasets/structured/dart/dart_test.py
|
Jinwook-shim/datasets
|
815037e87150e3c8a557d91a68b07e8ffb6a2a86
|
[
"Apache-2.0"
] | null | null | null |
tensorflow_datasets/structured/dart/dart_test.py
|
Jinwook-shim/datasets
|
815037e87150e3c8a557d91a68b07e8ffb6a2a86
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# Copyright 2020 The TensorFlow Datasets Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Dart dataset tests."""
import json
import mock
import tensorflow.compat.v2 as tf
import tensorflow_datasets.public_api as tfds
from tensorflow_datasets.structured.dart import dart
class DartTest(tfds.testing.DatasetBuilderTestCase):
DATASET_CLASS = dart.Dart
SPLITS = {
'train': 2,
'validation': 1,
'test': 2,
}
def test_split_generators(self):
json_str = """
[
{
"tripleset": [
[
"Mars Hill College",
"JOINED",
"1973"
],
[
"Mars Hill College",
"LOCATION",
"Mars Hill, North Carolina"
]
],
"subtree_was_extended": true,
"annotations": [
{
"source": "WikiSQL_decl_sents",
"text": "A school from Mars Hill, North Carolina, joined in 1973."
}
]
}
]
"""
expected_examples = [{
'input_text': {
'table': [
{
'column_header': 'subject',
'row_number': 0,
'content': 'Mars Hill College',
},
{
'column_header': 'predicate',
'row_number': 0,
'content': 'JOINED',
},
{
'column_header': 'object',
'row_number': 0,
'content': '1973',
},
{
'column_header': 'subject',
'row_number': 1,
'content': 'Mars Hill College',
},
{
'column_header': 'predicate',
'row_number': 1,
'content': 'LOCATION',
},
{
'column_header': 'object',
'row_number': 1,
'content': 'Mars Hill, North Carolina',
},
]
},
'target_text':
'A school from Mars Hill, North Carolina, joined in 1973.'
}]
dart_dataset = dart.Dart()
with mock.patch.object(
json, 'load',
return_value=json.loads(json_str)), mock.patch.object(
tf, 'io'):
for i, (_, example) in enumerate(dart_dataset._generate_examples('')):
self.assertCountEqual(example, expected_examples[i])
if __name__ == '__main__':
tfds.testing.test_main()
| 28.880734
| 80
| 0.493011
| 294
| 3,148
| 5.132653
| 0.482993
| 0.042412
| 0.039761
| 0.055666
| 0.2167
| 0.155732
| 0.132538
| 0.132538
| 0.132538
| 0.063618
| 0
| 0.018568
| 0.401207
| 3,148
| 108
| 81
| 29.148148
| 0.781963
| 0.19155
| 0
| 0.211765
| 0
| 0
| 0.38361
| 0.017419
| 0
| 0
| 0
| 0
| 0.011765
| 1
| 0.011765
| false
| 0
| 0.058824
| 0
| 0.105882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c72f8f31e7cf39a7edd3dbce8585cf8da069b38
| 9,085
|
py
|
Python
|
exp/exp_informer_dad.py
|
AdamLohSg/GTA
|
bf6a745a6e28e365466e76360a15ca10ce61e009
|
[
"Apache-2.0"
] | 8
|
2022-01-19T20:47:36.000Z
|
2022-03-20T05:11:04.000Z
|
exp/exp_informer_dad.py
|
AdamLohSg/GTA
|
bf6a745a6e28e365466e76360a15ca10ce61e009
|
[
"Apache-2.0"
] | 2
|
2022-02-17T06:14:25.000Z
|
2022-02-17T08:43:57.000Z
|
exp/exp_informer_dad.py
|
AdamLohSg/GTA
|
bf6a745a6e28e365466e76360a15ca10ce61e009
|
[
"Apache-2.0"
] | 5
|
2022-02-15T04:16:27.000Z
|
2022-03-29T01:21:41.000Z
|
from data.data_loader_dad import (
NASA_Anomaly,
WADI
)
from exp.exp_basic import Exp_Basic
from models.model import Informer
from utils.tools import EarlyStopping, adjust_learning_rate
from utils.metrics import metric
from sklearn.metrics import classification_report
import numpy as np
import torch
import torch.nn as nn
from torch import optim
from torch.utils.data import DataLoader
import os
import time
import warnings
warnings.filterwarnings('ignore')
class Exp_Informer_DAD(Exp_Basic):
def __init__(self, args):
super(Exp_Informer_DAD, self).__init__(args)
def _build_model(self):
model_dict = {
'informer':Informer,
}
if self.args.model=='informer':
model = model_dict[self.args.model](
self.args.enc_in,
self.args.dec_in,
self.args.c_out,
self.args.seq_len,
self.args.label_len,
self.args.pred_len,
self.args.factor,
self.args.d_model,
self.args.n_heads,
self.args.e_layers,
self.args.d_layers,
self.args.d_ff,
self.args.dropout,
self.args.attn,
self.args.embed,
self.args.data[:-1],
self.args.activation,
self.device
)
return model.double()
def _get_data(self, flag):
args = self.args
data_dict = {
'SMAP':NASA_Anomaly,
'MSL':NASA_Anomaly,
'WADI':WADI,
}
Data = data_dict[self.args.data]
if flag == 'test':
shuffle_flag = False; drop_last = True; batch_size = args.batch_size
else:
shuffle_flag = True; drop_last = True; batch_size = args.batch_size
data_set = Data(
root_path=args.root_path,
data_path=args.data_path,
flag=flag,
size=[args.seq_len, args.label_len, args.pred_len],
features=args.features,
target=args.target
)
print(flag, len(data_set))
data_loader = DataLoader(
data_set,
batch_size=batch_size,
shuffle=shuffle_flag,
num_workers=args.num_workers,
drop_last=drop_last)
return data_set, data_loader
def _select_optimizer(self):
model_optim = optim.Adam(self.model.parameters(), lr=self.args.learning_rate)
return model_optim
def _select_criterion(self):
criterion = nn.MSELoss()
return criterion
def vali(self, vali_data, vali_loader, criterion):
self.model.eval()
total_loss = []
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(vali_loader):
batch_x = batch_x.double().to(self.device)
batch_y = batch_y.double()
batch_x_mark = batch_x_mark.double().to(self.device)
batch_y_mark = batch_y_mark.double().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)
# encoder - decoder
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)
pred = outputs.detach().cpu()
true = batch_y.detach().cpu()
loss = criterion(pred, true)
total_loss.append(loss)
total_loss = np.average(total_loss)
self.model.train()
return total_loss
def train(self, setting):
train_data, train_loader = self._get_data(flag = 'train')
vali_data, vali_loader = self._get_data(flag = 'val')
test_data, test_loader = self._get_data(flag = 'test')
path = './checkpoints/'+setting
if not os.path.exists(path):
os.makedirs(path)
time_now = time.time()
train_steps = len(train_loader)
early_stopping = EarlyStopping(patience=self.args.patience, verbose=True)
model_optim = self._select_optimizer()
criterion = self._select_criterion()
for epoch in range(self.args.train_epochs):
iter_count = 0
train_loss = []
self.model.train()
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark) in enumerate(train_loader):
iter_count += 1
model_optim.zero_grad()
batch_x = batch_x.double().to(self.device)
batch_y = batch_y.double()
batch_x_mark = batch_x_mark.double().to(self.device)
batch_y_mark = batch_y_mark.double().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)
# encoder - decoder
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)
loss = criterion(outputs, batch_y)
train_loss.append(loss.item())
if (i+1) % 100==0:
print("\titers: {0}, epoch: {1} | loss: {2:.7f}".format(i + 1, epoch + 1, loss.item()))
speed = (time.time()-time_now)/iter_count
left_time = speed*((self.args.train_epochs - epoch)*train_steps - i)
print('\tspeed: {:.4f}s/iter; left time: {:.4f}s'.format(speed, left_time))
iter_count = 0
time_now = time.time()
loss.backward()
model_optim.step()
train_loss = np.average(train_loss)
vali_loss = self.vali(vali_data, vali_loader, criterion)
test_loss = self.vali(test_data, test_loader, criterion)
print("Epoch: {0}, Steps: {1} | Train Loss: {2:.7f} Vali Loss: {3:.7f} Test Loss: {4:.7f}".format(
epoch + 1, train_steps, train_loss, vali_loss, test_loss))
early_stopping(vali_loss, self.model, path)
if early_stopping.early_stop:
print("Early stopping")
break
adjust_learning_rate(model_optim, epoch+1, self.args)
best_model_path = path+'/'+'checkpoint.pth'
self.model.load_state_dict(torch.load(best_model_path))
return self.model
def test(self, setting):
test_data, test_loader = self._get_data(flag='test')
self.model.eval()
preds = []
trues = []
labels = []
with torch.no_grad():
for i, (batch_x,batch_y,batch_x_mark,batch_y_mark,batch_label) in enumerate(test_loader):
batch_x = batch_x.double().to(self.device)
batch_y = batch_y.double()
batch_x_mark = batch_x_mark.double().to(self.device)
batch_y_mark = batch_y_mark.double().to(self.device)
# decoder input
dec_inp = torch.zeros_like(batch_y[:,-self.args.pred_len:,:]).double()
dec_inp = torch.cat([batch_y[:,:self.args.label_len,:], dec_inp], dim=1).double().to(self.device)
# encoder - decoder
outputs = self.model(batch_x, batch_x_mark, dec_inp, batch_y_mark)
batch_y = batch_y[:,-self.args.pred_len:,:].to(self.device)
pred = outputs.detach().cpu().numpy()#.squeeze()
true = batch_y.detach().cpu().numpy()#.squeeze()
batch_label = batch_label.long().detach().numpy()
preds.append(pred)
trues.append(true)
labels.append(batch_label)
preds = np.array(preds)
trues = np.array(trues)
labels = np.array(labels)
print('test shape:', preds.shape, trues.shape)
preds = preds.reshape(-1, preds.shape[-2], preds.shape[-1])
trues = trues.reshape(-1, trues.shape[-2], trues.shape[-1])
labels = labels.reshape(-1, labels.shape[-1])
print('test shape:', preds.shape, trues.shape)
# result save
folder_path = './results/' + setting +'/'
if not os.path.exists(folder_path):
os.makedirs(folder_path)
mae, mse, rmse, mape, mspe = metric(preds, trues)
print('mse:{}, mae:{}'.format(mse, mae))
np.save(folder_path+'metrics.npy', np.array([mae, mse, rmse, mape, mspe]))
np.save(folder_path+'pred.npy', preds)
np.save(folder_path+'true.npy', trues)
np.save(folder_path+'label.npy', labels)
return
| 36.051587
| 113
| 0.557292
| 1,115
| 9,085
| 4.295067
| 0.165022
| 0.060138
| 0.037586
| 0.045103
| 0.349551
| 0.314053
| 0.30403
| 0.289831
| 0.275632
| 0.26018
| 0
| 0.006192
| 0.324491
| 9,085
| 252
| 114
| 36.051587
| 0.774157
| 0.013979
| 0
| 0.188482
| 0
| 0.005236
| 0.038217
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.041885
| false
| 0
| 0.073298
| 0
| 0.157068
| 0.041885
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c73ce1a389f347a8681ff6c30c8fe84612d252e
| 9,270
|
py
|
Python
|
tests/components/mysensors/conftest.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 30,023
|
2016-04-13T10:17:53.000Z
|
2020-03-02T12:56:31.000Z
|
tests/components/mysensors/conftest.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 24,710
|
2016-04-13T08:27:26.000Z
|
2020-03-02T12:59:13.000Z
|
tests/components/mysensors/conftest.py
|
liangleslie/core
|
cc807b4d597daaaadc92df4a93c6e30da4f570c6
|
[
"Apache-2.0"
] | 11,956
|
2016-04-13T18:42:31.000Z
|
2020-03-02T09:32:12.000Z
|
"""Provide common mysensors fixtures."""
from __future__ import annotations
from collections.abc import AsyncGenerator, Callable, Generator
import json
from typing import Any
from unittest.mock import AsyncMock, MagicMock, patch
from mysensors import BaseSyncGateway
from mysensors.persistence import MySensorsJSONDecoder
from mysensors.sensor import Sensor
import pytest
from homeassistant.components.device_tracker.legacy import Device
from homeassistant.components.mqtt import DOMAIN as MQTT_DOMAIN
from homeassistant.components.mysensors.config_flow import DEFAULT_BAUD_RATE
from homeassistant.components.mysensors.const import (
CONF_BAUD_RATE,
CONF_DEVICE,
CONF_GATEWAY_TYPE,
CONF_GATEWAY_TYPE_SERIAL,
CONF_VERSION,
DOMAIN,
)
from homeassistant.core import HomeAssistant
from homeassistant.setup import async_setup_component
from tests.common import MockConfigEntry, load_fixture
@pytest.fixture(autouse=True)
def device_tracker_storage(mock_device_tracker_conf: list[Device]) -> list[Device]:
"""Mock out device tracker known devices storage."""
devices = mock_device_tracker_conf
return devices
@pytest.fixture(name="mqtt")
def mock_mqtt_fixture(hass: HomeAssistant) -> None:
"""Mock the MQTT integration."""
hass.config.components.add(MQTT_DOMAIN)
@pytest.fixture(name="is_serial_port")
def is_serial_port_fixture() -> Generator[MagicMock, None, None]:
"""Patch the serial port check."""
with patch("homeassistant.components.mysensors.gateway.cv.isdevice") as is_device:
is_device.side_effect = lambda device: device
yield is_device
@pytest.fixture(name="gateway_nodes")
def gateway_nodes_fixture() -> dict[int, Sensor]:
"""Return the gateway nodes dict."""
return {}
@pytest.fixture(name="serial_transport")
async def serial_transport_fixture(
gateway_nodes: dict[int, Sensor],
is_serial_port: MagicMock,
) -> AsyncGenerator[dict[int, Sensor], None]:
"""Mock a serial transport."""
with patch(
"mysensors.gateway_serial.AsyncTransport", autospec=True
) as transport_class, patch("mysensors.task.OTAFirmware", autospec=True), patch(
"mysensors.task.load_fw", autospec=True
), patch(
"mysensors.task.Persistence", autospec=True
) as persistence_class:
persistence = persistence_class.return_value
mock_gateway_features(persistence, transport_class, gateway_nodes)
yield transport_class
def mock_gateway_features(
persistence: MagicMock, transport_class: MagicMock, nodes: dict[int, Sensor]
) -> None:
"""Mock the gateway features."""
async def mock_schedule_save_sensors() -> None:
"""Load nodes from via persistence."""
gateway = transport_class.call_args[0][0]
gateway.sensors.update(nodes)
persistence.schedule_save_sensors = AsyncMock(
side_effect=mock_schedule_save_sensors
)
# For some reason autospeccing does not recognize these methods.
persistence.safe_load_sensors = MagicMock()
persistence.save_sensors = MagicMock()
async def mock_connect() -> None:
"""Mock the start method."""
transport.connect_task = MagicMock()
gateway = transport_class.call_args[0][0]
gateway.on_conn_made(gateway)
transport = transport_class.return_value
transport.connect_task = None
transport.connect.side_effect = mock_connect
@pytest.fixture(name="transport")
def transport_fixture(serial_transport: MagicMock) -> MagicMock:
"""Return the default mocked transport."""
return serial_transport
@pytest.fixture
def transport_write(transport: MagicMock) -> MagicMock:
"""Return the transport mock that accepts string messages."""
return transport.return_value.send
@pytest.fixture(name="serial_entry")
async def serial_entry_fixture(hass: HomeAssistant) -> MockConfigEntry:
"""Create a config entry for a serial gateway."""
entry = MockConfigEntry(
domain=DOMAIN,
data={
CONF_GATEWAY_TYPE: CONF_GATEWAY_TYPE_SERIAL,
CONF_VERSION: "2.3",
CONF_DEVICE: "/test/device",
CONF_BAUD_RATE: DEFAULT_BAUD_RATE,
},
)
return entry
@pytest.fixture(name="config_entry")
def config_entry_fixture(serial_entry: MockConfigEntry) -> MockConfigEntry:
"""Provide the config entry used for integration set up."""
return serial_entry
@pytest.fixture(name="integration")
async def integration_fixture(
hass: HomeAssistant, transport: MagicMock, config_entry: MockConfigEntry
) -> AsyncGenerator[MockConfigEntry, None]:
"""Set up the mysensors integration with a config entry."""
config: dict[str, Any] = {}
config_entry.add_to_hass(hass)
with patch("homeassistant.components.mysensors.device.UPDATE_DELAY", new=0):
await async_setup_component(hass, DOMAIN, config)
await hass.async_block_till_done()
yield config_entry
@pytest.fixture
def receive_message(
transport: MagicMock, integration: MockConfigEntry
) -> Callable[[str], None]:
"""Receive a message for the gateway."""
def receive_message_callback(message_string: str) -> None:
"""Receive a message with the transport.
The message_string parameter is a string in the MySensors message format.
"""
gateway = transport.call_args[0][0]
# node_id;child_id;command;ack;type;payload\n
gateway.logic(message_string)
return receive_message_callback
@pytest.fixture(name="gateway")
def gateway_fixture(
transport: MagicMock, integration: MockConfigEntry
) -> BaseSyncGateway:
"""Return a setup gateway."""
return transport.call_args[0][0]
def load_nodes_state(fixture_path: str) -> dict:
"""Load mysensors nodes fixture."""
return json.loads(load_fixture(fixture_path), cls=MySensorsJSONDecoder)
def update_gateway_nodes(
gateway_nodes: dict[int, Sensor], nodes: dict[int, Sensor]
) -> dict:
"""Update the gateway nodes."""
gateway_nodes.update(nodes)
return nodes
@pytest.fixture(name="gps_sensor_state", scope="session")
def gps_sensor_state_fixture() -> dict:
"""Load the gps sensor state."""
return load_nodes_state("mysensors/gps_sensor_state.json")
@pytest.fixture
def gps_sensor(gateway_nodes: dict[int, Sensor], gps_sensor_state: dict) -> Sensor:
"""Load the gps sensor."""
nodes = update_gateway_nodes(gateway_nodes, gps_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="power_sensor_state", scope="session")
def power_sensor_state_fixture() -> dict:
"""Load the power sensor state."""
return load_nodes_state("mysensors/power_sensor_state.json")
@pytest.fixture
def power_sensor(gateway_nodes: dict[int, Sensor], power_sensor_state: dict) -> Sensor:
"""Load the power sensor."""
nodes = update_gateway_nodes(gateway_nodes, power_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="energy_sensor_state", scope="session")
def energy_sensor_state_fixture() -> dict:
"""Load the energy sensor state."""
return load_nodes_state("mysensors/energy_sensor_state.json")
@pytest.fixture
def energy_sensor(
gateway_nodes: dict[int, Sensor], energy_sensor_state: dict
) -> Sensor:
"""Load the energy sensor."""
nodes = update_gateway_nodes(gateway_nodes, energy_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="sound_sensor_state", scope="session")
def sound_sensor_state_fixture() -> dict:
"""Load the sound sensor state."""
return load_nodes_state("mysensors/sound_sensor_state.json")
@pytest.fixture
def sound_sensor(gateway_nodes: dict[int, Sensor], sound_sensor_state: dict) -> Sensor:
"""Load the sound sensor."""
nodes = update_gateway_nodes(gateway_nodes, sound_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="distance_sensor_state", scope="session")
def distance_sensor_state_fixture() -> dict:
"""Load the distance sensor state."""
return load_nodes_state("mysensors/distance_sensor_state.json")
@pytest.fixture
def distance_sensor(
gateway_nodes: dict[int, Sensor], distance_sensor_state: dict
) -> Sensor:
"""Load the distance sensor."""
nodes = update_gateway_nodes(gateway_nodes, distance_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="temperature_sensor_state", scope="session")
def temperature_sensor_state_fixture() -> dict:
"""Load the temperature sensor state."""
return load_nodes_state("mysensors/temperature_sensor_state.json")
@pytest.fixture
def temperature_sensor(
gateway_nodes: dict[int, Sensor], temperature_sensor_state: dict
) -> Sensor:
"""Load the temperature sensor."""
nodes = update_gateway_nodes(gateway_nodes, temperature_sensor_state)
node = nodes[1]
return node
@pytest.fixture(name="text_node_state", scope="session")
def text_node_state_fixture() -> dict:
"""Load the text node state."""
return load_nodes_state("mysensors/text_node_state.json")
@pytest.fixture
def text_node(gateway_nodes: dict[int, Sensor], text_node_state: dict) -> Sensor:
"""Load the text child node."""
nodes = update_gateway_nodes(gateway_nodes, text_node_state)
node = nodes[1]
return node
| 31.530612
| 87
| 0.73247
| 1,147
| 9,270
| 5.681779
| 0.145597
| 0.060764
| 0.041737
| 0.030382
| 0.369649
| 0.268835
| 0.144852
| 0.070278
| 0.058616
| 0.044192
| 0
| 0.002317
| 0.161812
| 9,270
| 293
| 88
| 31.638225
| 0.836422
| 0.114132
| 0
| 0.163043
| 0
| 0
| 0.095554
| 0.063957
| 0
| 0
| 0
| 0
| 0
| 1
| 0.146739
| false
| 0
| 0.086957
| 0
| 0.36413
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c76c121d957b364e4b6f2fa9125b58b9c909aee
| 4,086
|
py
|
Python
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 3
|
2021-12-15T04:58:18.000Z
|
2022-02-06T12:15:37.000Z
|
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | null | null | null |
Part-03-Understanding-Software-Crafting-Your-Own-Tools/models/edx-platform/openedx/core/djangoapps/course_groups/migrations/0001_initial.py
|
osoco/better-ways-of-thinking-about-software
|
83e70d23c873509e22362a09a10d3510e10f6992
|
[
"MIT"
] | 1
|
2019-01-02T14:38:50.000Z
|
2019-01-02T14:38:50.000Z
|
from django.db import migrations, models
from django.conf import settings
from opaque_keys.edx.django.models import CourseKeyField
class Migration(migrations.Migration):
dependencies = [
migrations.swappable_dependency(settings.AUTH_USER_MODEL),
]
operations = [
migrations.CreateModel(
name='CohortMembership',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('course_id', CourseKeyField(max_length=255)),
],
),
migrations.CreateModel(
name='CourseCohort',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('assignment_type', models.CharField(default='manual', max_length=20, choices=[('random', 'Random'), ('manual', 'Manual')])),
],
),
migrations.CreateModel(
name='CourseCohortsSettings',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('is_cohorted', models.BooleanField(default=False)),
('course_id', CourseKeyField(help_text='Which course are these settings associated with?', unique=True, max_length=255, db_index=True)),
('_cohorted_discussions', models.TextField(null=True, db_column='cohorted_discussions', blank=True)),
('always_cohort_inline_discussions', models.BooleanField(default=True)),
],
),
migrations.CreateModel(
name='CourseUserGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('name', models.CharField(help_text='What is the name of this group? Must be unique within a course.', max_length=255)),
('course_id', CourseKeyField(help_text='Which course is this group associated with?', max_length=255, db_index=True)),
('group_type', models.CharField(max_length=20, choices=[('cohort', 'Cohort')])),
('users', models.ManyToManyField(help_text='Who is in this group?', related_name='course_groups', to=settings.AUTH_USER_MODEL, db_index=True)),
],
),
migrations.CreateModel(
name='CourseUserGroupPartitionGroup',
fields=[
('id', models.AutoField(verbose_name='ID', serialize=False, auto_created=True, primary_key=True)),
('partition_id', models.IntegerField(help_text='contains the id of a cohorted partition in this course')),
('group_id', models.IntegerField(help_text='contains the id of a specific group within the cohorted partition')),
('created_at', models.DateTimeField(auto_now_add=True)),
('updated_at', models.DateTimeField(auto_now=True)),
('course_user_group', models.OneToOneField(to='course_groups.CourseUserGroup', on_delete=models.CASCADE)),
],
),
migrations.AddField(
model_name='coursecohort',
name='course_user_group',
field=models.OneToOneField(related_name='cohort', to='course_groups.CourseUserGroup', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='cohortmembership',
name='course_user_group',
field=models.ForeignKey(to='course_groups.CourseUserGroup', on_delete=models.CASCADE),
),
migrations.AddField(
model_name='cohortmembership',
name='user',
field=models.ForeignKey(to=settings.AUTH_USER_MODEL, on_delete=models.CASCADE),
),
migrations.AlterUniqueTogether(
name='courseusergroup',
unique_together={('name', 'course_id')},
),
migrations.AlterUniqueTogether(
name='cohortmembership',
unique_together={('user', 'course_id')},
),
]
| 49.829268
| 159
| 0.618698
| 405
| 4,086
| 6.041975
| 0.259259
| 0.022885
| 0.051083
| 0.046996
| 0.437679
| 0.383327
| 0.341643
| 0.308132
| 0.308132
| 0.308132
| 0
| 0.00526
| 0.255507
| 4,086
| 81
| 160
| 50.444444
| 0.799145
| 0
| 0
| 0.506494
| 0
| 0
| 0.21953
| 0.0465
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038961
| 0
| 0.077922
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c77100c5bc822f15ee0cc031b607fff7a7b2f70
| 899
|
py
|
Python
|
parsl/tests/test_error_handling/test_resource_spec.py
|
MatthewBM/parsl
|
f11417a0255ed290fd0d78ffa1bc52cfe7a06301
|
[
"Apache-2.0"
] | null | null | null |
parsl/tests/test_error_handling/test_resource_spec.py
|
MatthewBM/parsl
|
f11417a0255ed290fd0d78ffa1bc52cfe7a06301
|
[
"Apache-2.0"
] | null | null | null |
parsl/tests/test_error_handling/test_resource_spec.py
|
MatthewBM/parsl
|
f11417a0255ed290fd0d78ffa1bc52cfe7a06301
|
[
"Apache-2.0"
] | null | null | null |
import parsl
from parsl.app.app import python_app
from parsl.tests.configs.local_threads import config
from parsl.executors.errors import UnsupportedFeatureError
from parsl.executors import WorkQueueExecutor
@python_app
def double(x, parsl_resource_specification={}):
return x * 2
def test_resource(n=2):
spec = {'cores': 2, 'memory': '1GiB'}
fut = double(n, parsl_resource_specification=spec)
try:
fut.result()
except Exception as e:
assert isinstance(e, UnsupportedFeatureError)
else:
executors = parsl.dfk().executors
executor = None
for label in executors:
if label != 'data_manager':
executor = executors[label]
break
assert isinstance(executor, WorkQueueExecutor)
if __name__ == '__main__':
local_config = config
parsl.load(local_config)
x = test_resource(2)
| 26.441176
| 58
| 0.676307
| 104
| 899
| 5.653846
| 0.480769
| 0.061224
| 0.061224
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007299
| 0.238042
| 899
| 33
| 59
| 27.242424
| 0.851095
| 0
| 0
| 0
| 0
| 0
| 0.038932
| 0
| 0
| 0
| 0
| 0
| 0.074074
| 1
| 0.074074
| false
| 0
| 0.185185
| 0.037037
| 0.296296
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c78f1b09da753afd4fbe81d818781bc202c7f29
| 9,565
|
py
|
Python
|
cincan/file_tool.py
|
cincanproject/cincan-command
|
b8cde81931b1c8583ac7daa1327520fb9f06856e
|
[
"MIT"
] | 1
|
2022-03-11T02:37:42.000Z
|
2022-03-11T02:37:42.000Z
|
cincan/file_tool.py
|
cincanproject/cincan-command
|
b8cde81931b1c8583ac7daa1327520fb9f06856e
|
[
"MIT"
] | null | null | null |
cincan/file_tool.py
|
cincanproject/cincan-command
|
b8cde81931b1c8583ac7daa1327520fb9f06856e
|
[
"MIT"
] | null | null | null |
import pathlib
import re
from typing import List, Optional, Dict, Set, Tuple, Iterable
import shlex
class FileMatcher:
"""Match files based on a pattern"""
def __init__(self, match_string: str, include: bool):
self.match_string = match_string
self.exact = '*' not in match_string
self.absolute_path = match_string.startswith('/')
self.include = include
@classmethod
def parse(cls, match_strings: List[str]) -> List['FileMatcher']:
"""Parse pattens from a list"""
res = []
for m in match_strings:
if m.startswith('^'):
res.append(FileMatcher(m[1:], include=False))
else:
res.append(FileMatcher(m, include=True))
return res
def filter_upload_files(self, files: List[pathlib.Path]) -> List[pathlib.Path]:
"""Filter uploaded files by this pattern"""
return list(filter(lambda f: self.__match(f.as_posix()) == self.include, files))
def filter_download_files(self, files: List[str], work_dir: str) -> List[str]:
"""Filter downloaded files by this pattern"""
if self.absolute_path:
# matching absolute files
res = []
for file in files:
if self.__match(file) == self.include:
res.append(file)
return res
else:
# matching files relative to working directory
res = []
for file in files:
try:
rel_file = pathlib.Path(file).relative_to(work_dir).as_posix()
except ValueError:
if not self.include:
res.append(file)
continue
if self.__match(rel_file) == self.include:
res.append(file)
return res
def __match(self, value: str) -> bool:
"""Match value with this pattern"""
if self.exact:
return self.match_string == value
split = self.match_string.split("*")
i = 0
off = 0
len_v = len(value)
s = split[0]
len_s = len(s)
if len_s > 0:
if len_v < i + len_s or value[i:i + len_s] != s:
return False
off += len_s
i += 1
while i < len(split):
s = split[i]
len_s = len(s)
if len_s > 0:
off = value.find(s, off)
if off < 0:
return False
i += 1
off += len_s
if split[-1] != '' and off != len_v:
return False
return True
class FileResolver:
"""Resolve files from command line arguments"""
def __init__(self, args: List[str], directory: pathlib.Path, output_dirs: List[str] = None,
do_resolve: bool = True, input_filters: List[FileMatcher] = None):
self.original_args = args
self.directory = directory
self.host_files: List[pathlib.Path] = []
self.command_args = args.copy()
# Additional punctuation chars, whereas we might split command (On top of shlex basic)
self.additional_punc_chars = "=,"
# these are output directories, upload them without contents
for dir in output_dirs or []:
self.host_files.append(pathlib.Path(dir))
self.output_dirs = set([pathlib.Path(d) for d in (output_dirs or [])])
if do_resolve:
# autodetect input files
self.__analyze()
# exclude files by filters, perhaps?
for filth in input_filters or []:
self.host_files = filth.filter_upload_files(self.host_files)
def __file_exists(self, path: str, already_listed: Set[pathlib.Path], parent_check: bool = True) -> Optional[str]:
"""
Method for evaluating the possible existence of input files and potential output directories.
If there is local match for file/directory, it is marked as uploadable file into container, and path is changed
to be relative of working directory of container, when command is passed into container.
Special case: when possible argument is coming from first layer (not quoted) of arguments, is valid path
and has no whitespace in arguments, we are processing this part later, because we can support special markups
such as % and & in here.
"""
o_file = pathlib.Path(path)
# does file/dir exists? No attempt to copy '/', leave it as it is...
file_exists = o_file.exists() and not all([c == '/' for c in path])
# When filename contains potentially spaces, were are only interested about absolute path
# Not checking parents
if not file_exists and not parent_check and not " " in path:
return None
if not file_exists and not o_file.is_absolute() and '..' not in o_file.as_posix():
# the file does not exist, but it is relative path to a file/directory...
o_parent = o_file.parent
while not file_exists and o_parent and o_parent.as_posix() != '.':
if o_parent.is_dir() and o_parent not in self.host_files:
file_exists = True # ...and there is existing parent directory, perhaps for output
o_parent = o_parent.parent
if file_exists:
h_file, a_name = self.__archive_name_for(o_file)
if h_file not in already_listed:
self.host_files.append(h_file)
already_listed.add(h_file)
# '/' in the end gets eaten away... fix
for p in range(len(path) - 1, 0, -1):
if path[p] != '/':
break
a_name += '/'
if file_exists and o_file.is_dir() and o_file not in self.output_dirs:
# include files in sub directories
self.__include_sub_dirs(o_file.iterdir(), already_listed)
if file_exists:
return a_name
else:
return None
def __analyze(self):
"""Analyze the command line"""
self.command_args = []
already_listed: Set[pathlib.Path] = self.output_dirs.copy()
for o_arg in self.original_args:
a_name = self.__file_exists(o_arg, already_listed, parent_check=False)
# Potential path as argument, not dividing it pieces yet for further analysis
if a_name:
self.command_args.append(a_name)
continue
# NOTE: Shlex not Windows compatible!
lex = shlex.shlex(o_arg, posix=True, punctuation_chars=self.additional_punc_chars)
split = list(lex)
modified_paths = []
for part in split:
a_name = self.__file_exists(part, already_listed)
if a_name:
modified_paths.append((part, a_name))
for m_part, m_name in modified_paths:
o_arg = o_arg.replace(m_part, m_name)
self.command_args.append(o_arg)
def __include_sub_dirs(self, files: Iterable[pathlib.Path], file_set: Set[pathlib.Path]):
"""Include files from sub directories"""
for f in files:
if f not in file_set:
self.host_files.append(f)
file_set.add(f)
if f.is_dir():
self.__include_sub_dirs(f.iterdir(), file_set)
def resolve_upload_files(self, upload_files: Dict[pathlib.Path, str]):
"""Resolve the files to upload"""
for up_file in self.detect_upload_files():
host_file, arc_name = self.__archive_name_for(up_file)
upload_files[host_file] = arc_name
cmd_args = self.command_args
return cmd_args
def detect_upload_files(self, files: Optional[Iterable[pathlib.Path]] = None) -> List[pathlib.Path]:
"""Detect files to upload"""
it_files = sorted(self.host_files) if files is None else files
res = []
# filter out files which do not exist nor should exists
for file in it_files:
if file.exists() or file in self.output_dirs:
res.append(file)
if files is None:
# make sure also paths leading to output files are uploaded
all_dirs = set()
for file in res:
all_dirs.add(file)
for p in file.parents:
all_dirs.add(p)
for file in filter(lambda f: not f.exists(), it_files):
# file not exists, but marked for upload - must mean some sub directory for output
p = file.parent
while not p.exists():
p = p.parent
if p not in all_dirs:
res.append(p)
return res
@classmethod
def __archive_name_for(cls, file: pathlib.Path) -> Tuple[pathlib.Path, str]:
"""Resolve host file and archive name for uploaded file"""
if cls.__use_absolute_path(file):
h_file = file.resolve()
a_file = file.resolve().as_posix()
a_file = a_file[1:] if a_file.startswith('/') else a_file
else:
h_file = file
a_file = file.as_posix()
return h_file, a_file
@classmethod
def __use_absolute_path(cls, file: pathlib.Path) -> bool:
"""Should use absolute path to refer a file path?"""
# - use absolute paths, if /../ used (ok, quite weak)
return file.is_absolute() or (".." in file.as_posix())
| 41.228448
| 119
| 0.576477
| 1,240
| 9,565
| 4.258065
| 0.187903
| 0.0375
| 0.019697
| 0.011364
| 0.082955
| 0.0375
| 0.019697
| 0.019697
| 0
| 0
| 0
| 0.002198
| 0.334135
| 9,565
| 231
| 120
| 41.406926
| 0.826817
| 0.205332
| 0
| 0.235294
| 0
| 0
| 0.00362
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076471
| false
| 0
| 0.023529
| 0
| 0.205882
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c7958cdc1aac4d3672c25246775beb5da7fc72d
| 997
|
py
|
Python
|
aws_interface/cloud/auth/set_me.py
|
hubaimaster/aws-interface
|
162dd056546d58b6eb29afcae1c3c2d78e4309b2
|
[
"Apache-2.0"
] | 53
|
2018-10-02T05:58:54.000Z
|
2020-09-15T08:58:26.000Z
|
aws_interface/cloud/auth/set_me.py
|
hubaimaster/aws-interface
|
162dd056546d58b6eb29afcae1c3c2d78e4309b2
|
[
"Apache-2.0"
] | 52
|
2018-09-26T05:16:09.000Z
|
2022-03-11T23:51:14.000Z
|
aws_interface/cloud/auth/set_me.py
|
hubaimaster/aws-interface
|
162dd056546d58b6eb29afcae1c3c2d78e4309b2
|
[
"Apache-2.0"
] | 10
|
2019-03-11T16:35:14.000Z
|
2019-10-23T08:03:54.000Z
|
from cloud.permission import Permission, NeedPermission
from cloud.message import error
# Define the input output format of the function.
# This information is used when creating the *SDK*.
info = {
'input_format': {
'session_id': 'str',
'field': 'str',
'value?': 'str',
},
'output_format': {
'user_id?': 'str',
},
'description': 'Set my information'
}
@NeedPermission(Permission.Run.Auth.set_me)
def do(data, resource):
body = {}
params = data['params']
user = data['user']
user_id = user['id']
field = params.get('field')
value = params.get('value', None)
user = resource.db_get_item(user_id)
# For security
if field in ['id', 'email', 'password_hash', 'salt', 'groups', 'login_method']:
body['error'] = error.FORBIDDEN_MODIFICATION
return body
else:
user[field] = value
resource.db_update_item(user_id, user)
body['user_id'] = user_id
return body
| 24.317073
| 83
| 0.608826
| 121
| 997
| 4.876033
| 0.495868
| 0.071186
| 0.050847
| 0.040678
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.250752
| 997
| 40
| 84
| 24.925
| 0.789826
| 0.110331
| 0
| 0.066667
| 0
| 0
| 0.193878
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0.033333
| 0.066667
| 0
| 0.166667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c79e8c0feadf546c1f7ffb56f2c6aded823808d
| 4,647
|
py
|
Python
|
arcade/examples/sprite_bullets_enemy_aims.py
|
LiorAvrahami/arcade
|
fce254a9eb89629de1f99d57a63759a2953184e9
|
[
"MIT"
] | 1
|
2020-01-18T04:48:38.000Z
|
2020-01-18T04:48:38.000Z
|
arcade/examples/sprite_bullets_enemy_aims.py
|
LiorAvrahami/arcade
|
fce254a9eb89629de1f99d57a63759a2953184e9
|
[
"MIT"
] | 1
|
2019-08-11T18:47:27.000Z
|
2019-08-12T03:02:11.000Z
|
arcade/examples/sprite_bullets_enemy_aims.py
|
LiorAvrahami/arcade
|
fce254a9eb89629de1f99d57a63759a2953184e9
|
[
"MIT"
] | null | null | null |
"""
Show how to have enemies shoot bullets aimed at the player.
If Python and Arcade are installed, this example can be run from the command line with:
python -m arcade.examples.sprite_bullets_enemy_aims
"""
import arcade
import math
import os
SCREEN_WIDTH = 800
SCREEN_HEIGHT = 600
SCREEN_TITLE = "Sprites and Bullets Enemy Aims Example"
BULLET_SPEED = 4
class MyGame(arcade.Window):
""" Main application class """
def __init__(self, width, height, title):
super().__init__(width, height, title)
# Set the working directory (where we expect to find files) to the same
# directory this .py file is in. You can leave this out of your own
# code, but it is needed to easily run the examples using "python -m"
# as mentioned at the top of this program.
file_path = os.path.dirname(os.path.abspath(__file__))
os.chdir(file_path)
arcade.set_background_color(arcade.color.BLACK)
self.frame_count = 0
self.enemy_list = None
self.bullet_list = None
self.player_list = None
self.player = None
def setup(self):
self.enemy_list = arcade.SpriteList()
self.bullet_list = arcade.SpriteList()
self.player_list = arcade.SpriteList()
# Add player ship
self.player = arcade.Sprite(":resources:images/space_shooter/playerShip1_orange.png", 0.5)
self.player_list.append(self.player)
# Add top-left enemy ship
enemy = arcade.Sprite(":resources:images/space_shooter/playerShip1_green.png", 0.5)
enemy.center_x = 120
enemy.center_y = SCREEN_HEIGHT - enemy.height
enemy.angle = 180
self.enemy_list.append(enemy)
# Add top-right enemy ship
enemy = arcade.Sprite(":resources:images/space_shooter/playerShip1_green.png", 0.5)
enemy.center_x = SCREEN_WIDTH - 120
enemy.center_y = SCREEN_HEIGHT - enemy.height
enemy.angle = 180
self.enemy_list.append(enemy)
def on_draw(self):
"""Render the screen. """
arcade.start_render()
self.enemy_list.draw()
self.bullet_list.draw()
self.player_list.draw()
def on_update(self, delta_time):
"""All the logic to move, and the game logic goes here. """
self.frame_count += 1
# Loop through each enemy that we have
for enemy in self.enemy_list:
# First, calculate the angle to the player. We could do this
# only when the bullet fires, but in this case we will rotate
# the enemy to face the player each frame, so we'll do this
# each frame.
# Position the start at the enemy's current location
start_x = enemy.center_x
start_y = enemy.center_y
# Get the destination location for the bullet
dest_x = self.player.center_x
dest_y = self.player.center_y
# Do math to calculate how to get the bullet to the destination.
# Calculation the angle in radians between the start points
# and end points. This is the angle the bullet will travel.
x_diff = dest_x - start_x
y_diff = dest_y - start_y
angle = math.atan2(y_diff, x_diff)
# Set the enemy to face the player.
enemy.angle = math.degrees(angle)-90
# Shoot every 60 frames change of shooting each frame
if self.frame_count % 60 == 0:
bullet = arcade.Sprite(":resources:images/space_shooter/laserBlue01.png")
bullet.center_x = start_x
bullet.center_y = start_y
# Angle the bullet sprite
bullet.angle = math.degrees(angle)
# Taking into account the angle, calculate our change_x
# and change_y. Velocity is how fast the bullet travels.
bullet.change_x = math.cos(angle) * BULLET_SPEED
bullet.change_y = math.sin(angle) * BULLET_SPEED
self.bullet_list.append(bullet)
# Get rid of the bullet when it flies off-screen
for bullet in self.bullet_list:
if bullet.top < 0:
bullet.remove_from_sprite_lists()
self.bullet_list.update()
def on_mouse_motion(self, x, y, delta_x, delta_y):
"""Called whenever the mouse moves. """
self.player.center_x = x
self.player.center_y = y
def main():
""" Main method """
window = MyGame(SCREEN_WIDTH, SCREEN_HEIGHT, SCREEN_TITLE)
window.setup()
arcade.run()
if __name__ == "__main__":
main()
| 32.957447
| 98
| 0.624919
| 631
| 4,647
| 4.434231
| 0.304279
| 0.039314
| 0.027877
| 0.038599
| 0.163331
| 0.163331
| 0.132952
| 0.115082
| 0.115082
| 0.115082
| 0
| 0.012515
| 0.295029
| 4,647
| 140
| 99
| 33.192857
| 0.841575
| 0.306004
| 0
| 0.112676
| 0
| 0
| 0.079861
| 0.065341
| 0
| 0
| 0
| 0
| 0
| 1
| 0.084507
| false
| 0
| 0.042254
| 0
| 0.140845
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c7e4ec9d240f0bbb6bcb11b797135aad6a43254
| 1,342
|
py
|
Python
|
amnesia/modules/mime/model.py
|
silenius/amnesia
|
ba5e3ac79a89da599c22206ad1fd17541855f74c
|
[
"BSD-2-Clause"
] | 4
|
2015-05-08T10:57:56.000Z
|
2021-05-17T04:32:11.000Z
|
amnesia/modules/mime/model.py
|
silenius/amnesia
|
ba5e3ac79a89da599c22206ad1fd17541855f74c
|
[
"BSD-2-Clause"
] | 6
|
2019-12-26T16:43:41.000Z
|
2022-02-28T11:07:54.000Z
|
amnesia/modules/mime/model.py
|
silenius/amnesia
|
ba5e3ac79a89da599c22206ad1fd17541855f74c
|
[
"BSD-2-Clause"
] | 1
|
2019-09-23T14:08:11.000Z
|
2019-09-23T14:08:11.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=E1101
from sqlalchemy import sql
from sqlalchemy import orm
from sqlalchemy.orm.exc import NoResultFound
from .. import Base
# http://www.iana.org/assignments/media-types/media-types.xhtml
class MimeMajor(Base):
"""Mime major"""
def __init__(self, name):
super().__init__()
self.name = name
class Mime(Base):
def __init__(self, name, template, major):
super().__init__()
self.name = name
self.template = template
self.major = major
@property
def full(self):
return '{0}/{1}'.format(self.major.name, self.name)
@staticmethod
def q_major_minor(dbsession, major, minor):
cond = sql.and_(
MimeMajor.name == major,
Mime.name == minor
)
result = dbsession.execute(
sql.select(Mime).join(Mime.major).options(
orm.contains_eager(Mime.major)
).filter(cond)
).scalar_one_or_none()
return result
###########
# Filters #
###########
@classmethod
def filter_mime(cls, value):
(major, minor) = value.split('/')
cond = sql.and_()
cond.append(MimeMajor.name == major)
if minor and minor != '*':
cond.append(Mime.name == minor)
return cond
| 21.645161
| 63
| 0.568554
| 150
| 1,342
| 4.92
| 0.42
| 0.054201
| 0.065041
| 0.04065
| 0.056911
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007361
| 0.291356
| 1,342
| 61
| 64
| 22
| 0.768665
| 0.093145
| 0
| 0.108108
| 0
| 0
| 0.007601
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.135135
| false
| 0
| 0.108108
| 0.027027
| 0.378378
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c7ea1a87be56599bff87dd5b87938ba5b672c0b
| 14,385
|
py
|
Python
|
launcher/src/main/scripts/bin/launcher.py
|
iyersathya/airlift
|
27e981a50cee655ff4e1e13801ba5a55991f93ce
|
[
"Apache-2.0"
] | null | null | null |
launcher/src/main/scripts/bin/launcher.py
|
iyersathya/airlift
|
27e981a50cee655ff4e1e13801ba5a55991f93ce
|
[
"Apache-2.0"
] | 35
|
2019-09-27T23:27:54.000Z
|
2021-10-06T14:57:28.000Z
|
launcher/src/main/scripts/bin/launcher.py
|
iyersathya/airlift
|
27e981a50cee655ff4e1e13801ba5a55991f93ce
|
[
"Apache-2.0"
] | 21
|
2019-09-21T06:13:58.000Z
|
2021-08-10T20:05:09.000Z
|
#!/usr/bin/env python
import errno
import os
import platform
import sys
import traceback
from fcntl import flock, LOCK_EX, LOCK_NB
from optparse import OptionParser
from os import O_RDWR, O_CREAT, O_WRONLY, O_APPEND
from os.path import basename, dirname, exists, realpath
from os.path import join as pathjoin
from signal import SIGTERM, SIGKILL
from stat import S_ISLNK
from time import sleep
COMMANDS = ['run', 'start', 'stop', 'restart', 'kill', 'status']
LSB_NOT_RUNNING = 3
LSB_STATUS_UNKNOWN = 4
def find_install_path(f):
"""Find canonical parent of bin/launcher.py"""
if basename(f) != 'launcher.py':
raise Exception("Expected file '%s' to be 'launcher.py' not '%s'" % (f, basename(f)))
p = realpath(dirname(f))
if basename(p) != 'bin':
raise Exception("Expected file '%s' directory to be 'bin' not '%s" % (f, basename(p)))
return dirname(p)
def makedirs(p):
"""Create directory and all intermediate ones"""
try:
os.makedirs(p)
except OSError as e:
if e.errno != errno.EEXIST:
raise
def load_properties(f):
"""Load key/value pairs from a file"""
properties = {}
for line in load_lines(f):
k, v = line.split('=', 1)
properties[k.strip()] = v.strip()
return properties
def load_lines(f):
"""Load lines from a file, ignoring blank or comment lines"""
lines = []
for line in open(f, 'r').readlines():
line = line.strip()
if len(line) > 0 and not line.startswith('#'):
lines.append(line)
return lines
def try_lock(f):
"""Try to open an exclusive lock (inheritable) on a file"""
try:
flock(f, LOCK_EX | LOCK_NB)
return True
except (IOError, OSError): # IOError in Python 2, OSError in Python 3.
return False
def open_read_write(f, mode):
"""Open file in read/write mode (without truncating it)"""
return os.fdopen(os.open(f, O_RDWR | O_CREAT, mode), 'r+')
class Process:
def __init__(self, path):
makedirs(dirname(path))
self.path = path
self.pid_file = open_read_write(path, 0o600)
self.refresh()
def refresh(self):
self.locked = try_lock(self.pid_file)
def clear_pid(self):
assert self.locked, 'pid file not locked by us'
self.pid_file.seek(0)
self.pid_file.truncate()
def write_pid(self, pid):
self.clear_pid()
self.pid_file.write(str(pid) + '\n')
self.pid_file.flush()
def alive(self):
self.refresh()
if self.locked:
return False
pid = self.read_pid()
try:
os.kill(pid, 0)
return True
except OSError as e:
raise Exception('Signaling pid %s failed: %s' % (pid, e))
def read_pid(self):
assert not self.locked, 'pid file is locked by us'
self.pid_file.seek(0)
line = self.pid_file.readline().strip()
if len(line) == 0:
raise Exception("Pid file '%s' is empty" % self.path)
try:
pid = int(line)
except ValueError:
raise Exception("Pid file '%s' contains garbage: %s" % (self.path, line))
if pid <= 0:
raise Exception("Pid file '%s' contains an invalid pid: %s" % (self.path, pid))
return pid
def redirect_stdin_to_devnull():
"""Redirect stdin to /dev/null"""
fd = os.open(os.devnull, O_RDWR)
os.dup2(fd, sys.stdin.fileno())
os.close(fd)
def open_append(f):
"""Open a raw file descriptor in append mode"""
# noinspection PyTypeChecker
return os.open(f, O_WRONLY | O_APPEND | O_CREAT, 0o644)
def redirect_output(fd):
"""Redirect stdout and stderr to a file descriptor"""
os.dup2(fd, sys.stdout.fileno())
os.dup2(fd, sys.stderr.fileno())
def symlink_exists(p):
"""Check if symlink exists and raise if another type of file exists"""
try:
st = os.lstat(p)
if not S_ISLNK(st.st_mode):
raise Exception('Path exists and is not a symlink: %s' % p)
return True
except OSError as e:
if e.errno != errno.ENOENT:
raise
return False
def create_symlink(source, target):
"""Create a symlink, removing the target first if it is a symlink"""
if symlink_exists(target):
os.remove(target)
if exists(source):
os.symlink(source, target)
def create_app_symlinks(options):
"""
Symlink the 'etc' and 'plugin' directory into the data directory.
This is needed to support programs that reference 'etc/xyz' from within
their config files: log.levels-file=etc/log.properties
"""
if options.etc_dir != pathjoin(options.data_dir, 'etc'):
create_symlink(
options.etc_dir,
pathjoin(options.data_dir, 'etc'))
if options.install_path != options.data_dir:
create_symlink(
pathjoin(options.install_path, 'plugin'),
pathjoin(options.data_dir, 'plugin'))
def build_java_execution(options, daemon):
if not exists(options.config_path):
raise Exception('Config file is missing: %s' % options.config_path)
if not exists(options.jvm_config):
raise Exception('JVM config file is missing: %s' % options.jvm_config)
if not exists(options.launcher_config):
raise Exception('Launcher config file is missing: %s' % options.launcher_config)
if options.log_levels_set and not exists(options.log_levels):
raise Exception('Log levels file is missing: %s' % options.log_levels)
properties = options.properties.copy()
if exists(options.log_levels):
properties['log.levels-file'] = options.log_levels
if daemon:
properties['log.output-file'] = options.server_log
properties['log.enable-console'] = 'false'
jvm_properties = load_lines(options.jvm_config)
launcher_properties = load_properties(options.launcher_config)
try:
main_class = launcher_properties['main-class']
except KeyError:
raise Exception("Launcher config is missing 'main-class' property")
properties['config'] = options.config_path
system_properties = ['-D%s=%s' % i for i in properties.items()]
classpath = pathjoin(options.install_path, 'lib', '*')
command = ['java', '-cp', classpath]
command += jvm_properties + system_properties
command += [main_class]
command += options.arguments
if options.verbose:
print(command)
print("")
env = os.environ.copy()
# set process name: https://github.com/electrum/procname
process_name = launcher_properties.get('process-name', '')
if len(process_name) > 0:
system = platform.system() + '-' + platform.machine()
shim = pathjoin(options.install_path, 'bin', 'procname', system, 'libprocname.so')
if exists(shim):
env['LD_PRELOAD'] = (env.get('LD_PRELOAD', '') + ':' + shim).strip()
env['PROCNAME'] = process_name
return command, env
def run(process, options):
if process.alive():
print('Already running as %s' % process.read_pid())
return
create_app_symlinks(options)
args, env = build_java_execution(options, False)
makedirs(options.data_dir)
os.chdir(options.data_dir)
process.write_pid(os.getpid())
redirect_stdin_to_devnull()
os.execvpe(args[0], args, env)
def start(process, options):
if process.alive():
print('Already running as %s' % process.read_pid())
return
create_app_symlinks(options)
args, env = build_java_execution(options, True)
makedirs(dirname(options.launcher_log))
log = open_append(options.launcher_log)
makedirs(options.data_dir)
os.chdir(options.data_dir)
pid = os.fork()
if pid > 0:
process.write_pid(pid)
print('Started as %s' % pid)
return
if hasattr(os, "set_inheritable"):
# See https://docs.python.org/3/library/os.html#inheritance-of-file-descriptors
# Since Python 3.4
os.set_inheritable(process.pid_file.fileno(), True)
os.setsid()
redirect_stdin_to_devnull()
redirect_output(log)
os.close(log)
os.execvpe(args[0], args, env)
def terminate(process, signal, message):
if not process.alive():
print('Not running')
return
pid = process.read_pid()
while True:
try:
os.kill(pid, signal)
except OSError as e:
if e.errno != errno.ESRCH:
raise Exception('Signaling pid %s failed: %s' % (pid, e))
if not process.alive():
process.clear_pid()
break
sleep(0.1)
print('%s %s' % (message, pid))
def stop(process):
terminate(process, SIGTERM, 'Stopped')
def kill(process):
terminate(process, SIGKILL, 'Killed')
def status(process):
if not process.alive():
print('Not running')
sys.exit(LSB_NOT_RUNNING)
print('Running as %s' % process.read_pid())
def handle_command(command, options):
process = Process(options.pid_file)
if command == 'run':
run(process, options)
elif command == 'start':
start(process, options)
elif command == 'stop':
stop(process)
elif command == 'restart':
stop(process)
start(process, options)
elif command == 'kill':
kill(process)
elif command == 'status':
status(process)
else:
raise AssertionError('Unhandled command: ' + command)
def create_parser():
commands = 'Commands: ' + ', '.join(COMMANDS)
parser = OptionParser(prog='launcher', usage='usage: %prog [options] command', description=commands)
parser.add_option('-v', '--verbose', action='store_true', default=False, help='Run verbosely')
parser.add_option('--etc-dir', metavar='DIR', help='Defaults to INSTALL_PATH/etc')
parser.add_option('--launcher-config', metavar='FILE', help='Defaults to INSTALL_PATH/bin/launcher.properties')
parser.add_option('--node-config', metavar='FILE', help='Defaults to ETC_DIR/node.properties')
parser.add_option('--jvm-config', metavar='FILE', help='Defaults to ETC_DIR/jvm.config')
parser.add_option('--config', metavar='FILE', help='Defaults to ETC_DIR/config.properties')
parser.add_option('--log-levels-file', metavar='FILE', help='Defaults to ETC_DIR/log.properties')
parser.add_option('--data-dir', metavar='DIR', help='Defaults to INSTALL_PATH')
parser.add_option('--pid-file', metavar='FILE', help='Defaults to DATA_DIR/var/run/launcher.pid')
parser.add_option('--arg', action='append', metavar='ARG', dest='arguments', help='Add a program argument of the Java application')
parser.add_option('--launcher-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/launcher.log (only in daemon mode)')
parser.add_option('--server-log-file', metavar='FILE', help='Defaults to DATA_DIR/var/log/server.log (only in daemon mode)')
parser.add_option('-D', action='append', metavar='NAME=VALUE', dest='properties', help='Set a Java system property')
return parser
def parse_properties(parser, args):
properties = {}
for arg in args:
if '=' not in arg:
parser.error('property is malformed: %s' % arg)
key, value = [i.strip() for i in arg.split('=', 1)]
if key == 'config':
parser.error('cannot specify config using -D option (use --config)')
if key == 'log.output-file':
parser.error('cannot specify server log using -D option (use --server-log-file)')
if key == 'log.levels-file':
parser.error('cannot specify log levels using -D option (use --log-levels-file)')
properties[key] = value
return properties
def print_options(options):
if options.verbose:
for i in sorted(vars(options)):
print("%-15s = %s" % (i, getattr(options, i)))
print("")
class Options:
pass
def main():
parser = create_parser()
(options, args) = parser.parse_args()
if len(args) != 1:
if len(args) == 0:
parser.error('command name not specified')
else:
parser.error('too many arguments')
command = args[0]
if command not in COMMANDS:
parser.error('unsupported command: %s' % command)
try:
install_path = find_install_path(sys.argv[0])
except Exception as e:
print('ERROR: %s' % e)
sys.exit(LSB_STATUS_UNKNOWN)
o = Options()
o.verbose = options.verbose
o.install_path = install_path
o.launcher_config = realpath(options.launcher_config or pathjoin(o.install_path, 'bin/launcher.properties'))
o.etc_dir = realpath(options.etc_dir or pathjoin(o.install_path, 'etc'))
o.node_config = realpath(options.node_config or pathjoin(o.etc_dir, 'node.properties'))
o.jvm_config = realpath(options.jvm_config or pathjoin(o.etc_dir, 'jvm.config'))
o.config_path = realpath(options.config or pathjoin(o.etc_dir, 'config.properties'))
o.log_levels = realpath(options.log_levels_file or pathjoin(o.etc_dir, 'log.properties'))
o.log_levels_set = bool(options.log_levels_file)
if options.node_config and not exists(o.node_config):
parser.error('Node config file is missing: %s' % o.node_config)
node_properties = {}
if exists(o.node_config):
node_properties = load_properties(o.node_config)
data_dir = node_properties.get('node.data-dir')
o.data_dir = realpath(options.data_dir or data_dir or o.install_path)
o.pid_file = realpath(options.pid_file or pathjoin(o.data_dir, 'var/run/launcher.pid'))
o.launcher_log = realpath(options.launcher_log_file or pathjoin(o.data_dir, 'var/log/launcher.log'))
o.server_log = realpath(options.server_log_file or pathjoin(o.data_dir, 'var/log/server.log'))
o.properties = parse_properties(parser, options.properties or {})
for k, v in node_properties.items():
if k not in o.properties:
o.properties[k] = v
o.arguments = options.arguments or []
if o.verbose:
print_options(o)
try:
handle_command(command, o)
except SystemExit:
raise
except Exception as e:
if o.verbose:
traceback.print_exc()
else:
print('ERROR: %s' % e)
sys.exit(LSB_STATUS_UNKNOWN)
if __name__ == '__main__':
main()
| 31.136364
| 135
| 0.639694
| 1,929
| 14,385
| 4.646449
| 0.161223
| 0.01562
| 0.021756
| 0.020529
| 0.241883
| 0.190115
| 0.146491
| 0.127747
| 0.07531
| 0.038157
| 0
| 0.003436
| 0.231213
| 14,385
| 461
| 136
| 31.203905
| 0.807035
| 0.066597
| 0
| 0.230303
| 0
| 0
| 0.165118
| 0.01604
| 0.00303
| 0
| 0
| 0
| 0.009091
| 1
| 0.090909
| false
| 0.00303
| 0.039394
| 0
| 0.193939
| 0.048485
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c7f557e50cc992f1ad5414b88efb2c8bf4f59f5
| 1,213
|
py
|
Python
|
code/sim/test.py
|
vectorcrumb/Ballbot_IEE2913
|
5ab54825b2bfadae251e2c6bfaaa7f8fcdae77a0
|
[
"MIT"
] | null | null | null |
code/sim/test.py
|
vectorcrumb/Ballbot_IEE2913
|
5ab54825b2bfadae251e2c6bfaaa7f8fcdae77a0
|
[
"MIT"
] | null | null | null |
code/sim/test.py
|
vectorcrumb/Ballbot_IEE2913
|
5ab54825b2bfadae251e2c6bfaaa7f8fcdae77a0
|
[
"MIT"
] | null | null | null |
from direct.showbase.ShowBase import ShowBase
from direct.task import Task
from direct.actor.Actor import Actor
import numpy as np
class MyApp(ShowBase):
def __init__(self):
ShowBase.__init__(self)
# Load environment model
self.scene = self.loader.loadModel("models/environment")
# Reparent model to render
self.scene.reparentTo(self.render)
# Scale and position model
self.scene.setScale(0.25, 0.25, 0.25)
self.scene.setPos(-8, 42, 0)
# Add spinCameraTask to task manager to execute
self.taskMgr.add(self.spinCameraTask, "SpinCameraTask")
# Load and transform panda actor
self.pandaActor = Actor("models/panda-model", {"walk": "models/panda-walk4"})
self.pandaActor.setScale(0.005, 0.005, 0.005)
self.pandaActor.reparentTo(self.render)
# Loop animation
self.pandaActor.loop("walk")
def spinCameraTask(self, task):
angleDegs = task.time * 6.0
angleRads = angleDegs * (np.pi / 180.0)
self.camera.setPos(20*np.sin(angleRads), -20.0 * np.cos(angleRads), 3)
self.camera.setHpr(angleDegs, 0, 0)
return Task.cont
app = MyApp()
app.run()
| 32.783784
| 85
| 0.649629
| 157
| 1,213
| 4.968153
| 0.401274
| 0.046154
| 0.035897
| 0.015385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043103
| 0.234955
| 1,213
| 37
| 86
| 32.783784
| 0.797414
| 0.135202
| 0
| 0
| 0
| 0
| 0.072867
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.166667
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c80d22f73704982f5f02b4193bf4d13e0699eda
| 5,914
|
py
|
Python
|
src/pandas_profiling/model/describe.py
|
briangrahamww/pandas-profiling
|
62f8e3fd81720d444041069191c4aacd03d79ad5
|
[
"MIT"
] | null | null | null |
src/pandas_profiling/model/describe.py
|
briangrahamww/pandas-profiling
|
62f8e3fd81720d444041069191c4aacd03d79ad5
|
[
"MIT"
] | 4
|
2021-11-01T15:17:07.000Z
|
2022-01-26T15:22:15.000Z
|
src/pandas_profiling/model/describe.py
|
briangrahamww/pandas-profiling
|
62f8e3fd81720d444041069191c4aacd03d79ad5
|
[
"MIT"
] | null | null | null |
"""Organize the calculation of statistics for each series in this DataFrame."""
import warnings
from datetime import datetime
from typing import Optional
import pandas as pd
from tqdm.auto import tqdm
from visions import VisionsTypeset
from pandas_profiling.config import Settings
from pandas_profiling.model.correlations import calculate_correlation
from pandas_profiling.model.duplicates import get_duplicates
from pandas_profiling.model.sample import Sample, get_sample
from pandas_profiling.model.summarizer import BaseSummarizer
from pandas_profiling.model.summary import (
get_messages,
get_missing_diagrams,
get_scatter_matrix,
get_series_descriptions,
get_table_stats,
)
from pandas_profiling.version import __version__
def describe(
config: Settings,
df: pd.DataFrame,
summarizer: BaseSummarizer,
typeset: VisionsTypeset,
sample: Optional[dict] = None,
) -> dict:
"""Calculate the statistics for each series in this DataFrame.
Args:
config: report Settings object
df: DataFrame.
sample: optional, dict with custom sample
Returns:
This function returns a dictionary containing:
- table: overall statistics.
- variables: descriptions per series.
- correlations: correlation matrices.
- missing: missing value diagrams.
- messages: direct special attention to these patterns in your data.
- package: package details.
"""
if df is None:
raise ValueError("Can not describe a `lazy` ProfileReport without a DataFrame.")
if not isinstance(df, pd.DataFrame):
warnings.warn("df is not of type pandas.DataFrame")
disable_progress_bar = not config.progress_bar
date_start = datetime.utcnow()
correlation_names = [
correlation_name
for correlation_name in [
"pearson",
"spearman",
"kendall",
"phi_k",
"cramers",
]
if config.correlations[correlation_name].calculate
]
number_of_tasks = 8 + len(df.columns) + len(correlation_names)
with tqdm(
total=number_of_tasks, desc="Summarize dataset", disable=disable_progress_bar
) as pbar:
series_description = get_series_descriptions(
config, df, summarizer, typeset, pbar
)
pbar.set_postfix_str("Get variable types")
variables = {
column: description["type"]
for column, description in series_description.items()
}
supported_columns = [
column
for column, type_name in variables.items()
if type_name != "Unsupported"
]
interval_columns = [
column for column, type_name in variables.items() if type_name == "Numeric"
]
pbar.update()
# Get correlations
correlations = {}
for correlation_name in correlation_names:
pbar.set_postfix_str(f"Calculate {correlation_name} correlation")
correlations[correlation_name] = calculate_correlation(
config, df, correlation_name, series_description
)
pbar.update()
# make sure correlations is not None
correlations = {
key: value for key, value in correlations.items() if value is not None
}
# Scatter matrix
pbar.set_postfix_str("Get scatter matrix")
scatter_matrix = get_scatter_matrix(config, df, interval_columns)
pbar.update()
# Table statistics
pbar.set_postfix_str("Get table statistics")
table_stats = get_table_stats(config, df, series_description)
pbar.update()
# missing diagrams
pbar.set_postfix_str("Get missing diagrams")
missing = get_missing_diagrams(config, df, table_stats)
pbar.update()
# Sample
pbar.set_postfix_str("Take sample")
if sample is None:
samples = get_sample(config, df)
else:
if "name" not in sample:
sample["name"] = None
if "caption" not in sample:
sample["caption"] = None
samples = [
Sample(
id="custom",
data=sample["data"],
name=sample["name"],
caption=sample["caption"],
)
]
pbar.update()
# Duplicates
pbar.set_postfix_str("Locating duplicates")
metrics, duplicates = get_duplicates(config, df, supported_columns)
table_stats.update(metrics)
pbar.update()
# Messages
pbar.set_postfix_str("Get messages/warnings")
messages = get_messages(config, table_stats, series_description, correlations)
pbar.update()
pbar.set_postfix_str("Get reproduction details")
package = {
"pandas_profiling_version": __version__,
"pandas_profiling_config": config.json(),
}
pbar.update()
pbar.set_postfix_str("Completed")
date_end = datetime.utcnow()
analysis = {
"title": config.title,
"date_start": date_start,
"date_end": date_end,
"duration": date_end - date_start,
}
return {
# Analysis metadata
"analysis": analysis,
# Overall dataset description
"table": table_stats,
# Per variable descriptions
"variables": series_description,
# Bivariate relations
"scatter": scatter_matrix,
# Correlation matrices
"correlations": correlations,
# Missing values
"missing": missing,
# Warnings
"messages": messages,
# Package
"package": package,
# Sample
"sample": samples,
# Duplicates
"duplicates": duplicates,
}
| 30.328205
| 88
| 0.615996
| 599
| 5,914
| 5.894825
| 0.248748
| 0.019824
| 0.039649
| 0.048145
| 0.097706
| 0.068536
| 0.053243
| 0.031719
| 0.031719
| 0.031719
| 0
| 0.000243
| 0.305208
| 5,914
| 194
| 89
| 30.484536
| 0.85909
| 0.148461
| 0
| 0.067669
| 0
| 0
| 0.11443
| 0.009485
| 0
| 0
| 0
| 0
| 0
| 1
| 0.007519
| false
| 0
| 0.097744
| 0
| 0.112782
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c813b2cc84c9caa5444e2c87441c4626db990da
| 1,114
|
py
|
Python
|
maxOfferNum.py
|
Ruanxingzhi/King-of-Pigeon
|
38d6191c93c2d485b2e5cf163f06b9f2a5dacbec
|
[
"MIT"
] | null | null | null |
maxOfferNum.py
|
Ruanxingzhi/King-of-Pigeon
|
38d6191c93c2d485b2e5cf163f06b9f2a5dacbec
|
[
"MIT"
] | null | null | null |
maxOfferNum.py
|
Ruanxingzhi/King-of-Pigeon
|
38d6191c93c2d485b2e5cf163f06b9f2a5dacbec
|
[
"MIT"
] | null | null | null |
import operator
class Std(object):
def __init__(self):
self.name = ''
self.offerNum = 0
self.offers = []
stds = []
stdsDict = {}
index = 0
def readStd(name,camper):
global stds
global stdsDict
global index
if name not in stdsDict:
newStd = Std()
newStd.name = name
stds.append(newStd)
stdsDict[name] = index
index += 1
if camper not in stds[stdsDict[name]].offers:
stds[stdsDict[name]].offers.append(camper)
stds[stdsDict[name]].offerNum += 1
if __name__ == "__main__":
campers = ['PKUxk','THUsz_ai','THUsz_cs','THUsz_data','USTC_cs']
for camper in campers:
filename = camper + '.txt'
with open('data/%s'%(filename), "r") as f:
data = f.readlines()
for std in data:
readStd(std,camper)
cmpfun = operator.attrgetter('offerNum','name')
stds.sort(key = cmpfun,reverse = True)
for std in stds:
if std.name[-1] == '\n':
std.name = std.name[:-1]
print(f'{std.name} 拿了 {std.offerNum} 个 offer: {std.offers}')
| 26.52381
| 68
| 0.56553
| 140
| 1,114
| 4.385714
| 0.378571
| 0.078176
| 0.078176
| 0.071661
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007614
| 0.292639
| 1,114
| 42
| 69
| 26.52381
| 0.771574
| 0
| 0
| 0
| 0
| 0
| 0.109417
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.027778
| 0
| 0.111111
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c81a099c1328ddb836ac7f6bc808bcec8ce85e6
| 5,525
|
py
|
Python
|
tabnine-vim/third_party/ycmd/third_party/python-future/setup.py
|
MrMonk3y/vimrc
|
950230fb3fd7991d1234c2ab516ec03245945677
|
[
"MIT"
] | 2
|
2018-04-16T03:08:42.000Z
|
2021-01-06T10:21:49.000Z
|
tabnine-vim/third_party/ycmd/third_party/python-future/setup.py
|
MrMonk3y/vimrc
|
950230fb3fd7991d1234c2ab516ec03245945677
|
[
"MIT"
] | null | null | null |
tabnine-vim/third_party/ycmd/third_party/python-future/setup.py
|
MrMonk3y/vimrc
|
950230fb3fd7991d1234c2ab516ec03245945677
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from __future__ import absolute_import, print_function
import os
import os.path
import sys
try:
from setuptools import setup
except ImportError:
from distutils.core import setup
if sys.argv[-1] == 'publish':
os.system('python setup.py sdist upload')
sys.exit()
NAME = "future"
PACKAGES = ["future",
"future.builtins",
"future.types",
"future.standard_library",
"future.backports",
"future.backports.email",
"future.backports.email.mime",
"future.backports.html",
"future.backports.http",
"future.backports.test",
"future.backports.urllib",
"future.backports.xmlrpc",
"future.moves",
"future.moves.dbm",
"future.moves.html",
"future.moves.http",
"future.moves.test",
"future.moves.tkinter",
"future.moves.urllib",
"future.moves.xmlrpc",
"future.tests", # for future.tests.base
# "future.tests.test_email",
"future.utils",
"past",
"past.builtins",
"past.types",
"past.utils",
# "past.tests",
"past.translation",
"libfuturize",
"libfuturize.fixes",
"libpasteurize",
"libpasteurize.fixes",
]
# PEP 3108 stdlib moves:
if sys.version_info[:2] < (3, 0):
PACKAGES += [
"builtins",
"configparser",
"copyreg",
"html",
"http",
"queue",
"reprlib",
"socketserver",
"tkinter",
"winreg",
"xmlrpc",
"_dummy_thread",
"_markupbase",
"_thread",
]
PACKAGE_DATA = {'': [
'README.rst',
'LICENSE.txt',
'futurize.py',
'pasteurize.py',
'discover_tests.py',
'check_rst.sh',
'TESTING.txt',
],
'tests': ['*.py'],
}
REQUIRES = []
TEST_REQUIRES = []
if sys.version_info[:2] == (2, 6):
REQUIRES += ['importlib', 'argparse']
TEST_REQUIRES += ['unittest2']
import src.future
VERSION = src.future.__version__
DESCRIPTION = "Clean single-source support for Python 3 and 2"
LONG_DESC = src.future.__doc__
AUTHOR = "Ed Schofield"
AUTHOR_EMAIL = "ed@pythoncharmers.com"
URL="https://python-future.org"
LICENSE = "MIT"
KEYWORDS = "future past python3 migration futurize backport six 2to3 modernize pasteurize 3to2"
CLASSIFIERS = [
"Programming Language :: Python",
"Programming Language :: Python :: 2.6",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.3",
"Programming Language :: Python :: 3.4",
"License :: OSI Approved",
"License :: OSI Approved :: MIT License",
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
]
setup_kwds = {}
# * Important *
# We forcibly remove the build folder to avoid breaking the
# user's Py3 installation if they run "python2 setup.py
# build" and then "python3 setup.py install".
try:
# If the user happens to run:
# python2 setup.py build
# python3 setup.py install
# then folders like "configparser" will be in build/lib.
# If so, we CANNOT let the user install this, because
# this may break his/her Python 3 install, depending on the folder order in
# sys.path. (Running "import configparser" etc. may pick up our Py2
# substitute packages, instead of the intended system stdlib modules.)
SYSTEM_MODULES = set([
'_dummy_thread',
'_markupbase',
'_thread',
'builtins',
'configparser',
'copyreg',
'html',
'http',
'queue',
'reprlib',
'socketserver',
'tkinter',
'winreg',
'xmlrpc'
])
if sys.version_info[0] >= 3:
# Do any of the above folders exist in build/lib?
files = os.listdir(os.path.join('build', 'lib'))
if len(set(files) & set(SYSTEM_MODULES)) > 0:
print('ERROR: Your build folder is in an inconsistent state for '
'a Python 3.x install. Please remove it manually and run '
'setup.py again.', file=sys.stderr)
sys.exit(1)
except OSError:
pass
setup(name=NAME,
version=VERSION,
author=AUTHOR,
author_email=AUTHOR_EMAIL,
url=URL,
description=DESCRIPTION,
long_description=LONG_DESC,
license=LICENSE,
keywords=KEYWORDS,
entry_points={
'console_scripts': [
'futurize = libfuturize.main:main',
'pasteurize = libpasteurize.main:main'
]
},
package_dir={'': 'src'},
packages=PACKAGES,
package_data=PACKAGE_DATA,
include_package_data=True,
install_requires=REQUIRES,
classifiers=CLASSIFIERS,
test_suite = "discover_tests",
tests_require=TEST_REQUIRES,
**setup_kwds
)
| 29.864865
| 95
| 0.523439
| 531
| 5,525
| 5.344633
| 0.40113
| 0.042283
| 0.052854
| 0.016913
| 0.10148
| 0.054968
| 0.054968
| 0.054968
| 0.054968
| 0.054968
| 0
| 0.011638
| 0.362353
| 5,525
| 184
| 96
| 30.027174
| 0.793926
| 0.131041
| 0
| 0.013333
| 0
| 0
| 0.343508
| 0.051432
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.006667
| 0.06
| 0
| 0.06
| 0.013333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c82276d6def1d1d6f137aa1788b787b2da8110f
| 3,009
|
py
|
Python
|
python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py
|
wangchuanli001/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 12
|
2019-12-07T01:44:55.000Z
|
2022-01-27T14:13:30.000Z
|
python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py
|
hujiese/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 23
|
2020-05-23T03:56:33.000Z
|
2022-02-28T07:54:45.000Z
|
python-百度翻译调用/Baidu_translate/com/translate/baidu/stackoverflow_question_handler.py
|
hujiese/Project-experience
|
b563c5c3afc07c913c2e1fd25dff41c70533f8de
|
[
"Apache-2.0"
] | 7
|
2019-12-20T04:48:56.000Z
|
2021-11-19T02:23:45.000Z
|
import requests
from bs4 import BeautifulSoup
import urllib.request
import os
import random
import time
def html(url):
user_agents = [
'Mozilla/5.0 (Windows; U; Windows NT 5.1; it; rv:1.8.1.11) Gecko/20071127 Firefox/2.0.0.11',
'Opera/9.25 (Windows NT 5.1; U; en)',
'Mozilla/4.0 (compatible; MSIE 6.0; Windows NT 5.1; SV1; .NET CLR 1.1.4322; .NET CLR 2.0.50727)',
'Mozilla/5.0 (compatible; Konqueror/3.5; Linux) KHTML/3.5.5 (like Gecko) (Kubuntu)',
'Mozilla/5.0 (X11; U; Linux i686; en-US; rv:1.8.0.12) Gecko/20070731 Ubuntu/dapper-security Firefox/1.5.0.12',
'Lynx/2.8.5rel.1 libwww-FM/2.14 SSL-MM/1.4.1 GNUTLS/1.2.9',
"Mozilla/5.0 (X11; Linux i686) AppleWebKit/535.7 (KHTML, like Gecko) Ubuntu/11.04 Chromium/16.0.912.77 Chrome/16.0.912.77 Safari/535.7",
"Mozilla/5.0 (X11; Ubuntu; Linux i686; rv:10.0) Gecko/20100101 Firefox/10.0 "]
user_agent = random.choice(user_agents)
headers = {
'User-Agent': user_agent,
'Accept-Encoding': 'gzip'}
req = requests.get(url=url, headers=headers)
html_doc = req.text
soup = BeautifulSoup(html_doc, "html.parser")
times = soup.select("time")
views = soup.select("p.label-key > b")
active_str = str(views[2])
active = active_str[active_str.find("title=\"") + 7:active_str.find("Z")]
answers = soup.select("#answers-header > div > h2 >span")
question_content = soup.select("div.post-text")
tags = soup.select("#question > div.post-layout > div.postcell.post-layout--right > "
"div.post-taglist.grid.gs4.gsy.fd-column > div >a")
title = soup.select("h1 >a")
tags_str = ""
item = []
for tag in tags:
tags_str += tag.get_text() + ","
answer_contetnts = []
for i in range(1, len(question_content)):
answer_contetnts.append(question_content[i])
for i in range(len(times)):
if len(times[i].get_text()) > 1:
asked_time = times[i].get("datetime").replace("T", " ")
item.append(title[
0].get_text()) # title views answersnum asked_time tag_str active_time quest_content_ text answer_content_list
item.append(views[1].get_text())
item.append(answers[0].get_text())
item.append(asked_time)
item.append(tags_str)
item.append(active)
item.append(question_content[0])
item.append(answer_contetnts)
print(item)
# updatetosql(item)
def updatetosql(item):
ansers_text = "[split]".join(item[7])
updatesql = "UPDATE `t_stackoverflow_question` " \
"SET `tags`='%s', `views`='%s', `answers_num`='%s', `asked_time`='%s', `last_active_time`='%s', `question_content`='%s', `answers_contetnt`='%s' " \
"WHERE (`question_id`='%s') " \
% (item[4], item[1], item[2], item[3], item[5], item[6], ansers_text, item[0],)
pass
if __name__ == '__main__':
html("https://stackoverflow.com/questions/50119673/nginx-fast-cgi-cache-on-error-page-404")
| 42.985714
| 164
| 0.623463
| 453
| 3,009
| 4.024283
| 0.373068
| 0.043884
| 0.024685
| 0.018102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.074802
| 0.204719
| 3,009
| 69
| 165
| 43.608696
| 0.687004
| 0.03988
| 0
| 0
| 0
| 0.131148
| 0.427235
| 0.065489
| 0
| 0
| 0
| 0
| 0
| 1
| 0.032787
| false
| 0.016393
| 0.098361
| 0
| 0.131148
| 0.016393
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c82c0f597ec23a15334ec51934c9484615b1b1f
| 2,541
|
py
|
Python
|
Research/data_loader.py
|
ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge
|
d8b06969c9393cfce6d9ac96b58c9d365ff4369d
|
[
"MIT"
] | null | null | null |
Research/data_loader.py
|
ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge
|
d8b06969c9393cfce6d9ac96b58c9d365ff4369d
|
[
"MIT"
] | null | null | null |
Research/data_loader.py
|
ALEXKIRNAS/Kaggle-C-CORE-Iceberg-Classifier-Challenge
|
d8b06969c9393cfce6d9ac96b58c9d365ff4369d
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
from keras.utils import to_categorical
from sklearn.model_selection import KFold, train_test_split
def load_data(path):
train = pd.read_json(os.path.join(path, "./train.json"))
test = pd.read_json(os.path.join(path, "./test.json"))
return (train, test)
def preprocess(df,
means=(-22.159262, -24.953745, 40.021883465782651),
stds=(5.33146, 4.5463958, 4.0815391476694414)):
X_band_1 = np.array([np.array(band).astype(np.float32).reshape(75, 75)
for band in df["band_1"]])
X_band_2 = np.array([np.array(band).astype(np.float32).reshape(75, 75)
for band in df["band_2"]])
angl = df['inc_angle'].map(lambda x: np.cos(x * np.pi / 180) if x != 'na' else means[3])
angl = np.array([np.full(shape=(75, 75), fill_value=angel).astype(np.float32)
for angel in angl])
X_band_1 = (X_band_1 - means[0]) / stds[0]
X_band_2 = (X_band_2 - means[1]) / stds[1]
angl = (angl - means[2]) / stds[2]
images = np.concatenate([X_band_1[:, :, :, np.newaxis],
X_band_2[:, :, :, np.newaxis],
angl[:, :, :, np.newaxis]],
axis=-1)
return images
def prepare_data_cv(path):
train, test = load_data(path)
X_train, y_train = (preprocess(train),
to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1)))
kfold_data = []
kf = KFold(n_splits=5, shuffle=True, random_state=0xCAFFE)
for train_indices, val_indices in kf.split(y_train):
X_train_cv = X_train[train_indices]
y_train_cv = y_train[train_indices]
X_val = X_train[val_indices]
y_val = y_train[val_indices]
kfold_data.append((X_train_cv, y_train_cv, X_val, y_val))
X_test = preprocess(test)
return (kfold_data, X_test)
def prepare_data(path):
train, test = load_data(path)
X_train, y_train = (preprocess(train),
to_categorical(train['is_iceberg'].as_matrix().reshape(-1, 1)))
X_train_cv, X_valid, y_train_cv, y_valid = train_test_split(X_train,
y_train,
random_state=0xCAFFE,
train_size=0.8)
X_test = preprocess(test)
return ([(X_train_cv, y_train_cv, X_valid, y_valid)], X_test)
| 34.337838
| 92
| 0.562377
| 354
| 2,541
| 3.776836
| 0.268362
| 0.044877
| 0.017951
| 0.026926
| 0.34555
| 0.293194
| 0.293194
| 0.231862
| 0.231862
| 0.231862
| 0
| 0.063673
| 0.307753
| 2,541
| 73
| 93
| 34.808219
| 0.696418
| 0
| 0
| 0.156863
| 0
| 0
| 0.025974
| 0
| 0
| 0
| 0.00551
| 0
| 0
| 1
| 0.078431
| false
| 0
| 0.098039
| 0
| 0.254902
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c839f4dc74ac86e89c284ecfbdaf987fd07d858
| 554
|
py
|
Python
|
Problem_09.py
|
Habbo3/Project-Euler
|
1a01d67f72b9cfb606d13df91af89159b588216e
|
[
"MIT"
] | null | null | null |
Problem_09.py
|
Habbo3/Project-Euler
|
1a01d67f72b9cfb606d13df91af89159b588216e
|
[
"MIT"
] | null | null | null |
Problem_09.py
|
Habbo3/Project-Euler
|
1a01d67f72b9cfb606d13df91af89159b588216e
|
[
"MIT"
] | null | null | null |
"""
A Pythagorean triplet is a set of three natural numbers, a < b < c, for which,
a2 + b2 = c2
For example, 32 + 42 = 9 + 16 = 25 = 52.
There exists exactly one Pythagorean triplet for which a + b + c = 1000.
Find the product abc.
"""
solved = False
for a in range(1, 1000):
for b in range(1, 1000):
for c in range(1, 1000):
if a < b < c:
if a + b + c == 1000:
if a**2 + b**2 == c**2:
solved = True
break
if solved:
break
if solved:
break
product = a*b*c
print("The product of only triplet who exists is : ", product)
| 24.086957
| 78
| 0.601083
| 101
| 554
| 3.29703
| 0.435644
| 0.03003
| 0.045045
| 0.108108
| 0.09009
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099751
| 0.276173
| 554
| 23
| 79
| 24.086957
| 0.730673
| 0.409747
| 0
| 0.333333
| 0
| 0
| 0.1375
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c83aa67c0a65ae58c0709d1dc148cd1d75e4a56
| 2,862
|
py
|
Python
|
fanscribed/apps/transcripts/tests/test_transcripts.py
|
fanscribed/fanscribed
|
89b14496459f81a152df38ed5098fba2b087a1d7
|
[
"MIT"
] | 8
|
2015-01-05T07:04:02.000Z
|
2016-07-19T17:56:46.000Z
|
fanscribed/apps/transcripts/tests/test_transcripts.py
|
fanscribed/fanscribed
|
89b14496459f81a152df38ed5098fba2b087a1d7
|
[
"MIT"
] | 32
|
2015-03-18T18:51:00.000Z
|
2021-06-10T20:37:33.000Z
|
fanscribed/apps/transcripts/tests/test_transcripts.py
|
fanscribed/fanscribed
|
89b14496459f81a152df38ed5098fba2b087a1d7
|
[
"MIT"
] | 5
|
2015-02-10T21:15:32.000Z
|
2016-06-02T17:26:14.000Z
|
from decimal import Decimal
import os
from django.test import TestCase
from unipath import Path
from ....utils import refresh
from ...media import tests
from ..models import Transcript, TranscriptMedia
MEDIA_TESTDATA_PATH = Path(tests.__file__).parent.child('testdata')
RAW_MEDIA_PATH = MEDIA_TESTDATA_PATH.child('raw').child(
'NA-472-2012-12-23-Final-excerpt.mp3').absolute()
class TranscriptsTestCase(TestCase):
def test_transcript_starts_out_with_unknown_length(self):
transcript = Transcript.objects.create(title='test')
self.assertEqual(transcript.length, None)
def test_setting_transcript_length_creates_fragments_and_stitches(self):
t = Transcript.objects.create(title='test')
t.set_length('3.33')
f0, = t.fragments.all()
self.assertEqual(f0.start, Decimal('0.00'))
self.assertEqual(f0.end, Decimal('3.33'))
self.assertEqual(t.stitches.count(), 0)
t = Transcript.objects.create(title='test')
t.set_length('7.77')
f0, = t.fragments.all()
self.assertEqual(f0.start, Decimal('0.00'))
self.assertEqual(f0.end, Decimal('7.77'))
self.assertEqual(t.stitches.count(), 0)
t = Transcript.objects.create(title='test')
t.set_length('17.77')
f0, f1, f2 = t.fragments.all()
self.assertEqual(f0.start, Decimal('0.00'))
self.assertEqual(f0.end, Decimal('5.00'))
self.assertEqual(f1.start, Decimal('5.00'))
self.assertEqual(f1.end, Decimal('10.00'))
self.assertEqual(f2.start, Decimal('10.00'))
self.assertEqual(f2.end, Decimal('17.77'))
s0, s1 = t.stitches.all()
self.assertEqual(s0.left, f0)
self.assertEqual(s0.right, f1)
self.assertEqual(s0.state, 'notready')
self.assertEqual(s1.left, f1)
self.assertEqual(s1.right, f2)
self.assertEqual(s1.state, 'notready')
if os.environ.get('FAST_TEST') != '1':
from django.core.files import File
class SlowTranscriptsTestCase(TestCase):
def test_transcript_with_processed_media_has_length(self):
transcript = Transcript.objects.create(
title='test transcript',
)
raw_media = TranscriptMedia(
transcript=transcript,
is_processed=False,
is_full_length=True,
)
with open(RAW_MEDIA_PATH, 'rb') as f:
raw_media.file.save('{transcript.id}_raw.mp3'.format(**locals()), File(f))
raw_media.save()
# Process raw media.
raw_media.create_processed_task()
transcript = refresh(transcript)
# Check length.
expected_length = 5 * 60 # 5 minutes.
self.assertAlmostEqual(
transcript.length, expected_length, delta=0.2)
| 33.27907
| 90
| 0.628931
| 347
| 2,862
| 5.051873
| 0.299712
| 0.162578
| 0.067884
| 0.079863
| 0.351398
| 0.351398
| 0.292641
| 0.292641
| 0.233314
| 0.208785
| 0
| 0.042416
| 0.242138
| 2,862
| 85
| 91
| 33.670588
| 0.765791
| 0.015024
| 0
| 0.15873
| 0
| 0
| 0.065364
| 0.020604
| 0
| 0
| 0
| 0
| 0.31746
| 1
| 0.047619
| false
| 0
| 0.126984
| 0
| 0.206349
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c83fd89c702ba9d9dcb725c78535f9419ea8d70
| 2,771
|
py
|
Python
|
buildAncestryFeats.py
|
BurcinSayin/pf2
|
bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19
|
[
"MIT"
] | null | null | null |
buildAncestryFeats.py
|
BurcinSayin/pf2
|
bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19
|
[
"MIT"
] | null | null | null |
buildAncestryFeats.py
|
BurcinSayin/pf2
|
bcd362dc0a750b8ee59cd19ecff9cf5be4f34b19
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
import json
import datetime
import codecs
import re
featHolder = {}
featHolder['name'] = 'Pathfinder 2.0 Ancestry feat list'
featHolder['date'] = datetime.date.today().strftime("%B %d, %Y")
def get_details(link):
res = requests.get(link)
res.raise_for_status()
soup = BeautifulSoup(res.text, 'lxml')
feat = soup.find_all("div", {'class':'main'})
detailraw = soup.find("meta", {'name':'description'})['content'] #First we grab the content from the meta tag
detailsplit = re.split('<(.*?)>', detailraw) #Now we split it into groups of strings seperated by < >, to pull out any links
detail = ''.join(detailsplit[::2]) #Finally, we join every other group together (passing over the link groups) into one string
#print(detail)
return detail
def get_feats(link):
feats = []
res = requests.get(link)
res.raise_for_status()
soup = BeautifulSoup(res.text, 'lxml')
table = soup.find(lambda tag: tag.name=='table' and tag.has_attr('id') and tag['id']=="ctl00_MainContent_TableElement")
rows = table.findAll(lambda tag: tag.name=='tr')
t = 0
for row in rows:
t += 1
#print(row)
#print("-----------------------------------")
feat = {}
entries = row.find_all(lambda tag: tag.name=='td')
if entries is not None:
if len(entries) > 0:
name = entries[0].find("a").next_sibling.text #We do next_sibling here because the source puts PFS links first, which we want to skip over.
link = entries[0].find("a").next_sibling.a['href']
#for entry in entries:
# print(entry)
# print("row---------------")
level = entries[1].text
traits = entries[2].text
prereq = entries[3].text
source = entries[4].text
feat['name'] = name
feat['level'] = level
feat['traits'] = traits.split(", ")
feat['link'] = "https://2e.aonprd.com/" +link
feat['prereq'] = prereq
feat['benefits'] = source
details = get_details(feat['link'])
feat['text'] = details
feats.append(feat)
#if t > 5:
#break
return feats
listOfPages = codecs.open("ancestryFeats.csv", encoding='utf-8')
for line in listOfPages:
featMD = line.split(",")
print("Getting feats for :", featMD[0],"This url:", featMD[2])
featHolder[featMD[1]] = get_feats(featMD[2].strip('\n'))
json_data = json.dumps(featHolder, indent=4)
#print(json_data)
filename = "ancestry-feats-pf2.json"
f = open(filename, "w")
f.write(json_data)
f.close
| 34.209877
| 155
| 0.572717
| 347
| 2,771
| 4.518732
| 0.432277
| 0.013393
| 0.022959
| 0.030612
| 0.110969
| 0.110969
| 0.080357
| 0.080357
| 0.080357
| 0.080357
| 0
| 0.011846
| 0.268856
| 2,771
| 80
| 156
| 34.6375
| 0.762093
| 0.16817
| 0
| 0.103448
| 0
| 0
| 0.128272
| 0.023124
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.103448
| 0
| 0.172414
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c84005ad03ff1fb7961f46195db1060fc63cb16
| 861
|
py
|
Python
|
Random_item_selector_module.py
|
Jahronimo/public_question_book_framework
|
812bd11b104de013e930536713b8134d046642d5
|
[
"MIT"
] | null | null | null |
Random_item_selector_module.py
|
Jahronimo/public_question_book_framework
|
812bd11b104de013e930536713b8134d046642d5
|
[
"MIT"
] | null | null | null |
Random_item_selector_module.py
|
Jahronimo/public_question_book_framework
|
812bd11b104de013e930536713b8134d046642d5
|
[
"MIT"
] | 1
|
2020-03-07T10:53:30.000Z
|
2020-03-07T10:53:30.000Z
|
import random
def Randomise(questions_lists):
import random
import secrets
secure_random = secrets.SystemRandom()# creates a secure random object.
group_of_items = questions_lists
num_qustion_t_select = num_question_to_display
list_of_random_items = secure_random.sample(group_of_items, num_qustion_t_select)
# randomly selecting from strings within each question list
for each_question in range (0, num_qustion_t_select):
# I think this is where i need to add in some information but don't understand.
#printing some kind of structure with numbers of question and space to answer.
print (("Q."),(each_question + 1),((list_of_random_items[each_question])))
print (("A."),(each_question + 1),("_______________________"))
print ("\n")
| 47.833333
| 86
| 0.682927
| 111
| 861
| 4.81982
| 0.522523
| 0.11215
| 0.061682
| 0.095327
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004594
| 0.24158
| 861
| 17
| 87
| 50.647059
| 0.814701
| 0.283391
| 0
| 0.166667
| 0
| 0
| 0.047463
| 0.037643
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.25
| 0
| 0.333333
| 0.25
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c8421979f69cbc7cf5cd9ec5a87a153ab3efc74
| 1,228
|
py
|
Python
|
python_scrape/test_functions.py
|
jose-marquez89/tech-job-landscape
|
0b509536e7ba22885f50c82da8cf990b65373090
|
[
"MIT"
] | null | null | null |
python_scrape/test_functions.py
|
jose-marquez89/tech-job-landscape
|
0b509536e7ba22885f50c82da8cf990b65373090
|
[
"MIT"
] | null | null | null |
python_scrape/test_functions.py
|
jose-marquez89/tech-job-landscape
|
0b509536e7ba22885f50c82da8cf990b65373090
|
[
"MIT"
] | null | null | null |
import unittest
import scrape
class TestScrapeFunctions(unittest.TestCase):
def test_build_url(self):
url = scrape.build_url("indeed",
"/jobs?q=Data+Scientist&l=Texas&start=10",
join_next=True)
expected = ("https://www.indeed.com/"
"jobs?q=Data+Scientist&l=Texas&start=10")
url2 = scrape.build_url("indeed", job="Data Scientist", state="Texas")
expected2 = ("https://www.indeed.com/"
"jobs?q=Data%20Scientist&l=Texas&start=0")
self.assertEqual(url, expected)
self.assertEqual(url2, expected2)
def test_fetch_page(self):
fpl = scrape.fetch_page_listings
job_data = fpl("indeed",
job="Data Scientist",
state="Texas")
self.assertNotEqual(len(job_data), 0)
self.assertIsInstance(job_data, tuple)
self.assertIsInstance(job_data[0][0], dict)
self.assertIsInstance(job_data[1], str)
job_data = fpl("indeed",
next_page="/jobs?q=Data+Scientist"
"&l=Texas&start=10")
if __name__ == '__main__':
unittest.main()
| 34.111111
| 78
| 0.556189
| 135
| 1,228
| 4.881481
| 0.355556
| 0.084977
| 0.054628
| 0.081942
| 0.30349
| 0.30349
| 0.206373
| 0.141123
| 0
| 0
| 0
| 0.020238
| 0.315961
| 1,228
| 35
| 79
| 35.085714
| 0.764286
| 0
| 0
| 0.071429
| 0
| 0
| 0.220684
| 0.112378
| 0
| 0
| 0
| 0
| 0.214286
| 1
| 0.071429
| false
| 0
| 0.071429
| 0
| 0.178571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c85f5102089b2dbe1aa3c33bc6b5354992888f4
| 466
|
py
|
Python
|
pybook/ch10/DeckOfCards.py
|
YanhaoXu/python-learning
|
856687a71635a2ca67dab49d396c238f128e5ec0
|
[
"MIT"
] | 2
|
2021-12-06T13:29:48.000Z
|
2022-01-20T11:39:45.000Z
|
pybook/ch10/DeckOfCards.py
|
YanhaoXu/python-learning
|
856687a71635a2ca67dab49d396c238f128e5ec0
|
[
"MIT"
] | null | null | null |
pybook/ch10/DeckOfCards.py
|
YanhaoXu/python-learning
|
856687a71635a2ca67dab49d396c238f128e5ec0
|
[
"MIT"
] | null | null | null |
import random
# Create a deck of cards
deck = [x for x in range(52)]
# Create suits and ranks lists
suits = ["Spades", "Hearts", "Diamonds", "Clubs"]
ranks = ["Ace", "2", "3", "4", "5", "6", "7", "8", "9",
"10", "Jack", "Queen", "King"]
# Shuffle the cards
random.shuffle(deck)
# Display the first four cards
for i in range(4):
suit = suits[deck[i] // 13]
rank = ranks[deck[i] % 13]
print("Card number", deck[i], "is the", rank, "of", suit)
| 24.526316
| 61
| 0.575107
| 74
| 466
| 3.621622
| 0.621622
| 0.05597
| 0.052239
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.046448
| 0.214592
| 466
| 18
| 62
| 25.888889
| 0.685792
| 0.2103
| 0
| 0
| 0
| 0
| 0.192837
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1
| 0
| 0.1
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c8673116b02c8c1dd21b123ad5da8653dbefe4c
| 3,410
|
py
|
Python
|
nlpgnn/gnn/RGCNConv.py
|
ojipadeson/NLPGNN
|
7c43d2f0cb2b16c046c930037fd505c5c4f36db4
|
[
"MIT"
] | 263
|
2020-05-19T10:40:26.000Z
|
2022-03-25T05:22:49.000Z
|
nlpgnn/gnn/RGCNConv.py
|
Kuan-Louis/NLPGNN
|
b9ecec2c6df1b3e40a54511366dcb6085cf90c34
|
[
"MIT"
] | 7
|
2020-05-18T23:02:55.000Z
|
2021-04-29T18:27:43.000Z
|
nlpgnn/gnn/RGCNConv.py
|
Kuan-Louis/NLPGNN
|
b9ecec2c6df1b3e40a54511366dcb6085cf90c34
|
[
"MIT"
] | 56
|
2020-05-19T05:59:36.000Z
|
2022-03-14T06:21:33.000Z
|
#! usr/bin/env python3
# -*- coding:utf-8 -*-
"""
@Author:Kaiyin Zhou
Usage:
node_embeddings = tf.random.normal(shape=(5, 3))
adjacency_lists = [
tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32),
tf.constant([[0, 1], [2, 4], [2, 4]], dtype=tf.int32)
]
layer = RGraphConvolution(out_features=12)
x = layer(GNNInput(node_embeddings, adjacency_lists))
"""
import tensorflow as tf
from nlpgnn.gnn.messagepassing import MessagePassing
class RGraphConvolution(MessagePassing):
def __init__(self,
out_features,
epsion=1e-7,
aggr="sum",
normalize=True,
kernel_initializer='glorot_uniform',
bias_initializer='zeros',
use_bias=True,
**kwargs):
super(RGraphConvolution, self).__init__(aggr, **kwargs)
self.kernel_initializer = tf.keras.initializers.get(kernel_initializer)
self.bias_initializer = tf.keras.initializers.get(bias_initializer)
self.use_bias = use_bias
self.normalize = normalize
self.out_features = out_features
self.epsion = epsion
def build(self, input_shapes):
node_embedding_shapes = input_shapes.node_embeddings
adjacency_list_shapes = input_shapes.adjacency_lists
num_edge_type = len(adjacency_list_shapes)
in_features = node_embedding_shapes[-1]
self._edge_type_weights = []
self._edge_type_bias = []
for i in range(num_edge_type):
weight = self.add_weight(
shape=(in_features, self.out_features),
initializer=self.kernel_initializer,
name='wt_{}'.format(i),
)
self._edge_type_weights.append(weight)
if self.use_bias:
self.bias = self.add_weight(
shape=(self.out_features),
initializer=self.bias_initializer,
name='b',
)
else:
self.bias = None
self.weight_o = self.add_weight(
shape=(in_features, self.out_features),
initializer=self.kernel_initializer,
name='wo',
)
self.built = True
def message_function(self, edge_source_states,
edge_target_states,
num_incoming_to_node_per_message,
num_outing_to_node_per_message,
edge_type_idx):
"""
:param edge_source_states: [M,H]
:param edge_target_states: [M,H]
:param num_incoming_to_node_per_message:[M]
:param edge_type_idx:
:param training:
:return:
"""
weight_r = self._edge_type_weights[edge_type_idx]
messages = tf.linalg.matmul(edge_source_states, weight_r)
if self.normalize:
messages = (
tf.expand_dims(1.0 / (tf.cast(num_incoming_to_node_per_message,
tf.float32) + self.epsion), axis=-1) * messages
)
return messages
def call(self, inputs):
aggr_out = self.propagate(inputs) # message_passing + update
aggr_out += tf.linalg.matmul(inputs.node_embeddings, self.weight_o)
if self.bias is not None:
aggr_out += self.bias
return aggr_out
| 35.894737
| 97
| 0.575367
| 378
| 3,410
| 4.880952
| 0.325397
| 0.039024
| 0.04065
| 0.034688
| 0.211924
| 0.159892
| 0.115989
| 0.115989
| 0.115989
| 0.115989
| 0
| 0.013089
| 0.327859
| 3,410
| 94
| 98
| 36.276596
| 0.791885
| 0.172141
| 0
| 0.060606
| 0
| 0
| 0.010909
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.060606
| false
| 0.030303
| 0.030303
| 0
| 0.136364
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c87af0c38dbd1633d14f5192f2da57d1ebe0d89
| 73,923
|
py
|
Python
|
addons/project/models/project.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/project/models/project.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
addons/project/models/project.py
|
SHIVJITH/Odoo_Machine_Test
|
310497a9872db7844b521e6dab5f7a9f61d365a4
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import ast
from datetime import timedelta, datetime
from random import randint
from odoo import api, fields, models, tools, SUPERUSER_ID, _
from odoo.exceptions import UserError, AccessError, ValidationError, RedirectWarning
from odoo.tools.misc import format_date, get_lang
from odoo.osv.expression import OR
from .project_task_recurrence import DAYS, WEEKS
class ProjectTaskType(models.Model):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence, id'
def _get_default_project_ids(self):
default_project_id = self.env.context.get('default_project_id')
return [default_project_id] if default_project_id else None
active = fields.Boolean('Active', default=True)
name = fields.Char(string='Stage Name', required=True, translate=True)
description = fields.Text(translate=True)
sequence = fields.Integer(default=1)
project_ids = fields.Many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', string='Projects',
default=_get_default_project_ids)
legend_blocked = fields.Char(
'Red Kanban Label', default=lambda s: _('Blocked'), translate=True, required=True,
help='Override the default value displayed for the blocked state for kanban selection, when the task or issue is in that stage.')
legend_done = fields.Char(
'Green Kanban Label', default=lambda s: _('Ready'), translate=True, required=True,
help='Override the default value displayed for the done state for kanban selection, when the task or issue is in that stage.')
legend_normal = fields.Char(
'Grey Kanban Label', default=lambda s: _('In Progress'), translate=True, required=True,
help='Override the default value displayed for the normal state for kanban selection, when the task or issue is in that stage.')
mail_template_id = fields.Many2one(
'mail.template',
string='Email Template',
domain=[('model', '=', 'project.task')],
help="If set an email will be sent to the customer when the task or issue reaches this step.")
fold = fields.Boolean(string='Folded in Kanban',
help='This stage is folded in the kanban view when there are no records in that stage to display.')
rating_template_id = fields.Many2one(
'mail.template',
string='Rating Email Template',
domain=[('model', '=', 'project.task')],
help="If set and if the project's rating configuration is 'Rating when changing stage', then an email will be sent to the customer when the task reaches this step.")
auto_validation_kanban_state = fields.Boolean('Automatic kanban status', default=False,
help="Automatically modify the kanban state when the customer replies to the feedback for this stage.\n"
" * A good feedback from the customer will update the kanban state to 'ready for the new stage' (green bullet).\n"
" * A medium or a bad feedback will set the kanban state to 'blocked' (red bullet).\n")
is_closed = fields.Boolean('Closing Stage', help="Tasks in this stage are considered as closed.")
disabled_rating_warning = fields.Text(compute='_compute_disabled_rating_warning')
def unlink_wizard(self, stage_view=False):
self = self.with_context(active_test=False)
# retrieves all the projects with a least 1 task in that stage
# a task can be in a stage even if the project is not assigned to the stage
readgroup = self.with_context(active_test=False).env['project.task'].read_group([('stage_id', 'in', self.ids)], ['project_id'], ['project_id'])
project_ids = list(set([project['project_id'][0] for project in readgroup] + self.project_ids.ids))
wizard = self.with_context(project_ids=project_ids).env['project.task.type.delete.wizard'].create({
'project_ids': project_ids,
'stage_ids': self.ids
})
context = dict(self.env.context)
context['stage_view'] = stage_view
return {
'name': _('Delete Stage'),
'view_mode': 'form',
'res_model': 'project.task.type.delete.wizard',
'views': [(self.env.ref('project.view_project_task_type_delete_wizard').id, 'form')],
'type': 'ir.actions.act_window',
'res_id': wizard.id,
'target': 'new',
'context': context,
}
def write(self, vals):
if 'active' in vals and not vals['active']:
self.env['project.task'].search([('stage_id', 'in', self.ids)]).write({'active': False})
return super(ProjectTaskType, self).write(vals)
@api.depends('project_ids', 'project_ids.rating_active')
def _compute_disabled_rating_warning(self):
for stage in self:
disabled_projects = stage.project_ids.filtered(lambda p: not p.rating_active)
if disabled_projects:
stage.disabled_rating_warning = '\n'.join('- %s' % p.name for p in disabled_projects)
else:
stage.disabled_rating_warning = False
class Project(models.Model):
_name = "project.project"
_description = "Project"
_inherit = ['portal.mixin', 'mail.alias.mixin', 'mail.thread', 'rating.parent.mixin']
_order = "sequence, name, id"
_rating_satisfaction_days = False # takes all existing ratings
_check_company_auto = True
def _compute_attached_docs_count(self):
Attachment = self.env['ir.attachment']
for project in self:
project.doc_count = Attachment.search_count([
'|',
'&',
('res_model', '=', 'project.project'), ('res_id', '=', project.id),
'&',
('res_model', '=', 'project.task'), ('res_id', 'in', project.task_ids.ids)
])
def _compute_task_count(self):
task_data = self.env['project.task'].read_group([('project_id', 'in', self.ids), '|', '&', ('stage_id.is_closed', '=', False), ('stage_id.fold', '=', False), ('stage_id', '=', False)], ['project_id'], ['project_id'])
result = dict((data['project_id'][0], data['project_id_count']) for data in task_data)
for project in self:
project.task_count = result.get(project.id, 0)
def attachment_tree_view(self):
action = self.env['ir.actions.act_window']._for_xml_id('base.action_attachment')
action['domain'] = str([
'|',
'&',
('res_model', '=', 'project.project'),
('res_id', 'in', self.ids),
'&',
('res_model', '=', 'project.task'),
('res_id', 'in', self.task_ids.ids)
])
action['context'] = "{'default_res_model': '%s','default_res_id': %d}" % (self._name, self.id)
return action
def _compute_is_favorite(self):
for project in self:
project.is_favorite = self.env.user in project.favorite_user_ids
def _inverse_is_favorite(self):
favorite_projects = not_fav_projects = self.env['project.project'].sudo()
for project in self:
if self.env.user in project.favorite_user_ids:
favorite_projects |= project
else:
not_fav_projects |= project
# Project User has no write access for project.
not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]})
favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]})
def _get_default_favorite_user_ids(self):
return [(6, 0, [self.env.uid])]
name = fields.Char("Name", index=True, required=True, tracking=True)
description = fields.Html()
active = fields.Boolean(default=True,
help="If the active field is set to False, it will allow you to hide the project without removing it.")
sequence = fields.Integer(default=10, help="Gives the sequence order when displaying a list of Projects.")
partner_id = fields.Many2one('res.partner', string='Customer', auto_join=True, tracking=True, domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
partner_email = fields.Char(
compute='_compute_partner_email', inverse='_inverse_partner_email',
string='Email', readonly=False, store=True, copy=False)
partner_phone = fields.Char(
compute='_compute_partner_phone', inverse='_inverse_partner_phone',
string="Phone", readonly=False, store=True, copy=False)
company_id = fields.Many2one('res.company', string='Company', required=True, default=lambda self: self.env.company)
currency_id = fields.Many2one('res.currency', related="company_id.currency_id", string="Currency", readonly=True)
analytic_account_id = fields.Many2one('account.analytic.account', string="Analytic Account", copy=False, ondelete='set null',
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]", check_company=True,
help="Analytic account to which this project is linked for financial management. "
"Use an analytic account to record cost and revenue on your project.")
favorite_user_ids = fields.Many2many(
'res.users', 'project_favorite_user_rel', 'project_id', 'user_id',
default=_get_default_favorite_user_ids,
string='Members')
is_favorite = fields.Boolean(compute='_compute_is_favorite', inverse='_inverse_is_favorite', string='Show Project on dashboard',
help="Whether this project should be displayed on your dashboard.")
label_tasks = fields.Char(string='Use Tasks as', default='Tasks', help="Label used for the tasks of the project.", translate=True)
tasks = fields.One2many('project.task', 'project_id', string="Task Activities")
resource_calendar_id = fields.Many2one(
'resource.calendar', string='Working Time',
related='company_id.resource_calendar_id')
type_ids = fields.Many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', string='Tasks Stages')
task_count = fields.Integer(compute='_compute_task_count', string="Task Count")
task_ids = fields.One2many('project.task', 'project_id', string='Tasks',
domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)])
color = fields.Integer(string='Color Index')
user_id = fields.Many2one('res.users', string='Project Manager', default=lambda self: self.env.user, tracking=True)
alias_enabled = fields.Boolean(string='Use email alias', compute='_compute_alias_enabled', readonly=False)
alias_id = fields.Many2one('mail.alias', string='Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized "
"with Tasks (or optionally Issues if the Issue Tracker module is installed).")
privacy_visibility = fields.Selection([
('followers', 'Invited internal users'),
('employees', 'All internal users'),
('portal', 'Invited portal users and all internal users'),
],
string='Visibility', required=True,
default='portal',
help="Defines the visibility of the tasks of the project:\n"
"- Invited internal users: employees may only see the followed project and tasks.\n"
"- All internal users: employees may see all project and tasks.\n"
"- Invited portal and all internal users: employees may see everything."
" Portal users may see project and tasks followed by\n"
" them or by someone of their company.")
allowed_user_ids = fields.Many2many('res.users', compute='_compute_allowed_users', inverse='_inverse_allowed_user')
allowed_internal_user_ids = fields.Many2many('res.users', 'project_allowed_internal_users_rel',
string="Allowed Internal Users", default=lambda self: self.env.user, domain=[('share', '=', False)])
allowed_portal_user_ids = fields.Many2many('res.users', 'project_allowed_portal_users_rel', string="Allowed Portal Users", domain=[('share', '=', True)])
doc_count = fields.Integer(compute='_compute_attached_docs_count', string="Number of documents attached")
date_start = fields.Date(string='Start Date')
date = fields.Date(string='Expiration Date', index=True, tracking=True)
subtask_project_id = fields.Many2one('project.project', string='Sub-task Project', ondelete="restrict",
help="Project in which sub-tasks of the current project will be created. It can be the current project itself.")
allow_subtasks = fields.Boolean('Sub-tasks', default=lambda self: self.env.user.has_group('project.group_subtask_project'))
allow_recurring_tasks = fields.Boolean('Recurring Tasks', default=lambda self: self.env.user.has_group('project.group_project_recurring_tasks'))
# rating fields
rating_request_deadline = fields.Datetime(compute='_compute_rating_request_deadline', store=True)
rating_active = fields.Boolean('Customer Ratings', default=lambda self: self.env.user.has_group('project.group_project_rating'))
rating_status = fields.Selection(
[('stage', 'Rating when changing stage'),
('periodic', 'Periodical Rating')
], 'Customer Ratings Status', default="stage", required=True,
help="How to get customer feedback?\n"
"- Rating when changing stage: an email will be sent when a task is pulled in another stage.\n"
"- Periodical Rating: email will be sent periodically.\n\n"
"Don't forget to set up the mail templates on the stages for which you want to get the customer's feedbacks.")
rating_status_period = fields.Selection([
('daily', 'Daily'),
('weekly', 'Weekly'),
('bimonthly', 'Twice a Month'),
('monthly', 'Once a Month'),
('quarterly', 'Quarterly'),
('yearly', 'Yearly')], 'Rating Frequency', required=True, default='monthly')
_sql_constraints = [
('project_date_greater', 'check(date >= date_start)', 'Error! project start-date must be lower than project end-date.')
]
@api.depends('partner_id.email')
def _compute_partner_email(self):
for project in self:
if project.partner_id and project.partner_id.email != project.partner_email:
project.partner_email = project.partner_id.email
def _inverse_partner_email(self):
for project in self:
if project.partner_id and project.partner_email != project.partner_id.email:
project.partner_id.email = project.partner_email
@api.depends('partner_id.phone')
def _compute_partner_phone(self):
for project in self:
if project.partner_id and project.partner_phone != project.partner_id.phone:
project.partner_phone = project.partner_id.phone
def _inverse_partner_phone(self):
for project in self:
if project.partner_id and project.partner_phone != project.partner_id.phone:
project.partner_id.phone = project.partner_phone
@api.onchange('alias_enabled')
def _onchange_alias_name(self):
if not self.alias_enabled:
self.alias_name = False
def _compute_alias_enabled(self):
for project in self:
project.alias_enabled = project.alias_domain and project.alias_id.alias_name
@api.depends('allowed_internal_user_ids', 'allowed_portal_user_ids')
def _compute_allowed_users(self):
for project in self:
users = project.allowed_internal_user_ids | project.allowed_portal_user_ids
project.allowed_user_ids = users
def _inverse_allowed_user(self):
for project in self:
allowed_users = project.allowed_user_ids
project.allowed_portal_user_ids = allowed_users.filtered('share')
project.allowed_internal_user_ids = allowed_users - project.allowed_portal_user_ids
def _compute_access_url(self):
super(Project, self)._compute_access_url()
for project in self:
project.access_url = '/my/project/%s' % project.id
def _compute_access_warning(self):
super(Project, self)._compute_access_warning()
for project in self.filtered(lambda x: x.privacy_visibility != 'portal'):
project.access_warning = _(
"The project cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy to 'Visible by following customers' in order to make it accessible by the recipient(s).")
@api.depends('rating_status', 'rating_status_period')
def _compute_rating_request_deadline(self):
periods = {'daily': 1, 'weekly': 7, 'bimonthly': 15, 'monthly': 30, 'quarterly': 90, 'yearly': 365}
for project in self:
project.rating_request_deadline = fields.datetime.now() + timedelta(days=periods.get(project.rating_status_period, 0))
@api.model
def _map_tasks_default_valeus(self, task, project):
""" get the default value for the copied task on project duplication """
return {
'stage_id': task.stage_id.id,
'name': task.name,
'company_id': project.company_id.id,
}
def map_tasks(self, new_project_id):
""" copy and map tasks from old to new project """
project = self.browse(new_project_id)
tasks = self.env['project.task']
# We want to copy archived task, but do not propagate an active_test context key
task_ids = self.env['project.task'].with_context(active_test=False).search([('project_id', '=', self.id)], order='parent_id').ids
old_to_new_tasks = {}
for task in self.env['project.task'].browse(task_ids):
# preserve task name and stage, normally altered during copy
defaults = self._map_tasks_default_valeus(task, project)
if task.parent_id:
# set the parent to the duplicated task
defaults['parent_id'] = old_to_new_tasks.get(task.parent_id.id, False)
new_task = task.copy(defaults)
old_to_new_tasks[task.id] = new_task.id
tasks += new_task
return project.write({'tasks': [(6, 0, tasks.ids)]})
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
if default is None:
default = {}
if not default.get('name'):
default['name'] = _("%s (copy)") % (self.name)
project = super(Project, self).copy(default)
if self.subtask_project_id == self:
project.subtask_project_id = project
for follower in self.message_follower_ids:
project.message_subscribe(partner_ids=follower.partner_id.ids, subtype_ids=follower.subtype_ids.ids)
if 'tasks' not in default:
self.map_tasks(project.id)
return project
@api.model
def create(self, vals):
# Prevent double project creation
self = self.with_context(mail_create_nosubscribe=True)
project = super(Project, self).create(vals)
if not vals.get('subtask_project_id'):
project.subtask_project_id = project.id
if project.privacy_visibility == 'portal' and project.partner_id.user_ids:
project.allowed_user_ids |= project.partner_id.user_ids
return project
def write(self, vals):
allowed_users_changed = 'allowed_portal_user_ids' in vals or 'allowed_internal_user_ids' in vals
if allowed_users_changed:
allowed_users = {project: project.allowed_user_ids for project in self}
# directly compute is_favorite to dodge allow write access right
if 'is_favorite' in vals:
vals.pop('is_favorite')
self._fields['is_favorite'].determine_inverse(self)
res = super(Project, self).write(vals) if vals else True
if allowed_users_changed:
for project in self:
permission_removed = allowed_users.get(project) - project.allowed_user_ids
allowed_portal_users_removed = permission_removed.filtered('share')
project.message_unsubscribe(allowed_portal_users_removed.partner_id.commercial_partner_id.ids)
for task in project.task_ids:
task.allowed_user_ids -= permission_removed
if 'allow_recurring_tasks' in vals and not vals.get('allow_recurring_tasks'):
self.env['project.task'].search([('project_id', 'in', self.ids), ('recurring_task', '=', True)]).write({'recurring_task': False})
if 'active' in vals:
# archiving/unarchiving a project does it on its tasks, too
self.with_context(active_test=False).mapped('tasks').write({'active': vals['active']})
if vals.get('partner_id') or vals.get('privacy_visibility'):
for project in self.filtered(lambda project: project.privacy_visibility == 'portal'):
project.allowed_user_ids |= project.partner_id.user_ids
return res
def action_unlink(self):
wizard = self.env['project.delete.wizard'].create({
'project_ids': self.ids
})
return {
'name': _('Confirmation'),
'view_mode': 'form',
'res_model': 'project.delete.wizard',
'views': [(self.env.ref('project.project_delete_wizard_form').id, 'form')],
'type': 'ir.actions.act_window',
'res_id': wizard.id,
'target': 'new',
'context': self.env.context,
}
def unlink(self):
# Check project is empty
for project in self.with_context(active_test=False):
if project.tasks:
raise UserError(_('You cannot delete a project containing tasks. You can either archive it or first delete all of its tasks.'))
# Delete the empty related analytic account
analytic_accounts_to_delete = self.env['account.analytic.account']
for project in self:
if project.analytic_account_id and not project.analytic_account_id.line_ids:
analytic_accounts_to_delete |= project.analytic_account_id
result = super(Project, self).unlink()
analytic_accounts_to_delete.unlink()
return result
def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None):
"""
Subscribe to all existing active tasks when subscribing to a project
And add the portal user subscribed to allowed portal users
"""
res = super(Project, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids)
project_subtypes = self.env['mail.message.subtype'].browse(subtype_ids) if subtype_ids else None
task_subtypes = (project_subtypes.mapped('parent_id') | project_subtypes.filtered(lambda sub: sub.internal or sub.default)).ids if project_subtypes else None
if not subtype_ids or task_subtypes:
self.mapped('tasks').message_subscribe(
partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=task_subtypes)
if partner_ids:
all_users = self.env['res.partner'].browse(partner_ids).user_ids
portal_users = all_users.filtered('share')
internal_users = all_users - portal_users
self.allowed_portal_user_ids |= portal_users
self.allowed_internal_user_ids |= internal_users
return res
def message_unsubscribe(self, partner_ids=None, channel_ids=None):
""" Unsubscribe from all tasks when unsubscribing from a project """
self.mapped('tasks').message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids)
return super(Project, self).message_unsubscribe(partner_ids=partner_ids, channel_ids=channel_ids)
def _alias_get_creation_values(self):
values = super(Project, self)._alias_get_creation_values()
values['alias_model_id'] = self.env['ir.model']._get('project.task').id
if self.id:
values['alias_defaults'] = defaults = ast.literal_eval(self.alias_defaults or "{}")
defaults['project_id'] = self.id
return values
# ---------------------------------------------------
# Actions
# ---------------------------------------------------
def toggle_favorite(self):
favorite_projects = not_fav_projects = self.env['project.project'].sudo()
for project in self:
if self.env.user in project.favorite_user_ids:
favorite_projects |= project
else:
not_fav_projects |= project
# Project User has no write access for project.
not_fav_projects.write({'favorite_user_ids': [(4, self.env.uid)]})
favorite_projects.write({'favorite_user_ids': [(3, self.env.uid)]})
def action_view_tasks(self):
action = self.with_context(active_id=self.id, active_ids=self.ids) \
.env.ref('project.act_project_project_2_project_task_all') \
.sudo().read()[0]
action['display_name'] = self.name
return action
def action_view_account_analytic_line(self):
""" return the action to see all the analytic lines of the project's analytic account """
action = self.env["ir.actions.actions"]._for_xml_id("analytic.account_analytic_line_action")
action['context'] = {'default_account_id': self.analytic_account_id.id}
action['domain'] = [('account_id', '=', self.analytic_account_id.id)]
return action
def action_view_all_rating(self):
""" return the action to see all the rating of the project and activate default filters"""
action = self.env['ir.actions.act_window']._for_xml_id('project.rating_rating_action_view_project_rating')
action['name'] = _('Ratings of %s') % (self.name,)
action_context = ast.literal_eval(action['context']) if action['context'] else {}
action_context.update(self._context)
action_context['search_default_parent_res_name'] = self.name
action_context.pop('group_by', None)
return dict(action, context=action_context)
# ---------------------------------------------------
# Business Methods
# ---------------------------------------------------
@api.model
def _create_analytic_account_from_values(self, values):
analytic_account = self.env['account.analytic.account'].create({
'name': values.get('name', _('Unknown Analytic Account')),
'company_id': values.get('company_id') or self.env.company.id,
'partner_id': values.get('partner_id'),
'active': True,
})
return analytic_account
def _create_analytic_account(self):
for project in self:
analytic_account = self.env['account.analytic.account'].create({
'name': project.name,
'company_id': project.company_id.id,
'partner_id': project.partner_id.id,
'active': True,
})
project.write({'analytic_account_id': analytic_account.id})
# ---------------------------------------------------
# Rating business
# ---------------------------------------------------
# This method should be called once a day by the scheduler
@api.model
def _send_rating_all(self):
projects = self.search([
('rating_active', '=', True),
('rating_status', '=', 'periodic'),
('rating_request_deadline', '<=', fields.Datetime.now())
])
for project in projects:
project.task_ids._send_task_rating_mail()
project._compute_rating_request_deadline()
self.env.cr.commit()
class Task(models.Model):
_name = "project.task"
_description = "Task"
_date_name = "date_assign"
_inherit = ['portal.mixin', 'mail.thread.cc', 'mail.activity.mixin', 'rating.mixin']
_mail_post_access = 'read'
_order = "priority desc, sequence, id desc"
_check_company_auto = True
def _get_default_stage_id(self):
""" Gives default stage_id """
project_id = self.env.context.get('default_project_id')
if not project_id:
return False
return self.stage_find(project_id, [('fold', '=', False), ('is_closed', '=', False)])
@api.model
def _default_company_id(self):
if self._context.get('default_project_id'):
return self.env['project.project'].browse(self._context['default_project_id']).company_id
return self.env.company
@api.model
def _read_group_stage_ids(self, stages, domain, order):
search_domain = [('id', 'in', stages.ids)]
if 'default_project_id' in self.env.context:
search_domain = ['|', ('project_ids', '=', self.env.context['default_project_id'])] + search_domain
stage_ids = stages._search(search_domain, order=order, access_rights_uid=SUPERUSER_ID)
return stages.browse(stage_ids)
active = fields.Boolean(default=True)
name = fields.Char(string='Title', tracking=True, required=True, index=True)
description = fields.Html(string='Description')
priority = fields.Selection([
('0', 'Normal'),
('1', 'Important'),
], default='0', index=True, string="Priority")
sequence = fields.Integer(string='Sequence', index=True, default=10,
help="Gives the sequence order when displaying a list of tasks.")
stage_id = fields.Many2one('project.task.type', string='Stage', compute='_compute_stage_id',
store=True, readonly=False, ondelete='restrict', tracking=True, index=True,
default=_get_default_stage_id, group_expand='_read_group_stage_ids',
domain="[('project_ids', '=', project_id)]", copy=False)
tag_ids = fields.Many2many('project.tags', string='Tags')
kanban_state = fields.Selection([
('normal', 'In Progress'),
('done', 'Ready'),
('blocked', 'Blocked')], string='Kanban State',
copy=False, default='normal', required=True)
kanban_state_label = fields.Char(compute='_compute_kanban_state_label', string='Kanban State Label', tracking=True)
create_date = fields.Datetime("Created On", readonly=True, index=True)
write_date = fields.Datetime("Last Updated On", readonly=True, index=True)
date_end = fields.Datetime(string='Ending Date', index=True, copy=False)
date_assign = fields.Datetime(string='Assigning Date', index=True, copy=False, readonly=True)
date_deadline = fields.Date(string='Deadline', index=True, copy=False, tracking=True)
date_last_stage_update = fields.Datetime(string='Last Stage Update',
index=True,
copy=False,
readonly=True)
project_id = fields.Many2one('project.project', string='Project',
compute='_compute_project_id', store=True, readonly=False,
index=True, tracking=True, check_company=True, change_default=True)
planned_hours = fields.Float("Initially Planned Hours", help='Time planned to achieve this task (including its sub-tasks).', tracking=True)
subtask_planned_hours = fields.Float("Sub-tasks Planned Hours", compute='_compute_subtask_planned_hours', help="Sum of the time planned of all the sub-tasks linked to this task. Usually less or equal to the initially time planned of this task.")
user_id = fields.Many2one('res.users',
string='Assigned to',
default=lambda self: self.env.uid,
index=True, tracking=True)
partner_id = fields.Many2one('res.partner',
string='Customer',
compute='_compute_partner_id', store=True, readonly=False,
domain="['|', ('company_id', '=', False), ('company_id', '=', company_id)]")
partner_is_company = fields.Boolean(related='partner_id.is_company', readonly=True)
commercial_partner_id = fields.Many2one(related='partner_id.commercial_partner_id')
partner_email = fields.Char(
compute='_compute_partner_email', inverse='_inverse_partner_email',
string='Email', readonly=False, store=True, copy=False)
partner_phone = fields.Char(
compute='_compute_partner_phone', inverse='_inverse_partner_phone',
string="Phone", readonly=False, store=True, copy=False)
ribbon_message = fields.Char('Ribbon message', compute='_compute_ribbon_message')
partner_city = fields.Char(related='partner_id.city', readonly=False)
manager_id = fields.Many2one('res.users', string='Project Manager', related='project_id.user_id', readonly=True)
company_id = fields.Many2one(
'res.company', string='Company', compute='_compute_company_id', store=True, readonly=False,
required=True, copy=True, default=_default_company_id)
color = fields.Integer(string='Color Index')
user_email = fields.Char(related='user_id.email', string='User Email', readonly=True, related_sudo=False)
attachment_ids = fields.One2many('ir.attachment', compute='_compute_attachment_ids', string="Main Attachments",
help="Attachment that don't come from message.")
# In the domain of displayed_image_id, we couln't use attachment_ids because a one2many is represented as a list of commands so we used res_model & res_id
displayed_image_id = fields.Many2one('ir.attachment', domain="[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]", string='Cover Image')
legend_blocked = fields.Char(related='stage_id.legend_blocked', string='Kanban Blocked Explanation', readonly=True, related_sudo=False)
legend_done = fields.Char(related='stage_id.legend_done', string='Kanban Valid Explanation', readonly=True, related_sudo=False)
legend_normal = fields.Char(related='stage_id.legend_normal', string='Kanban Ongoing Explanation', readonly=True, related_sudo=False)
is_closed = fields.Boolean(related="stage_id.is_closed", string="Closing Stage", readonly=True, related_sudo=False)
parent_id = fields.Many2one('project.task', string='Parent Task', index=True)
child_ids = fields.One2many('project.task', 'parent_id', string="Sub-tasks", context={'active_test': False})
subtask_project_id = fields.Many2one('project.project', related="project_id.subtask_project_id", string='Sub-task Project', readonly=True)
allow_subtasks = fields.Boolean(string="Allow Sub-tasks", related="project_id.allow_subtasks", readonly=True)
subtask_count = fields.Integer("Sub-task count", compute='_compute_subtask_count')
email_from = fields.Char(string='Email From', help="These people will receive email.", index=True,
compute='_compute_email_from', store="True", readonly=False)
allowed_user_ids = fields.Many2many('res.users', string="Visible to", groups='project.group_project_manager', compute='_compute_allowed_user_ids', store=True, readonly=False, copy=False)
project_privacy_visibility = fields.Selection(related='project_id.privacy_visibility', string="Project Visibility")
# Computed field about working time elapsed between record creation and assignation/closing.
working_hours_open = fields.Float(compute='_compute_elapsed', string='Working hours to assign', store=True, group_operator="avg")
working_hours_close = fields.Float(compute='_compute_elapsed', string='Working hours to close', store=True, group_operator="avg")
working_days_open = fields.Float(compute='_compute_elapsed', string='Working days to assign', store=True, group_operator="avg")
working_days_close = fields.Float(compute='_compute_elapsed', string='Working days to close', store=True, group_operator="avg")
# customer portal: include comment and incoming emails in communication history
website_message_ids = fields.One2many(domain=lambda self: [('model', '=', self._name), ('message_type', 'in', ['email', 'comment'])])
# recurrence fields
allow_recurring_tasks = fields.Boolean(related='project_id.allow_recurring_tasks')
recurring_task = fields.Boolean(string="Recurrent")
recurring_count = fields.Integer(string="Tasks in Recurrence", compute='_compute_recurring_count')
recurrence_id = fields.Many2one('project.task.recurrence', copy=False)
recurrence_update = fields.Selection([
('this', 'This task'),
('subsequent', 'This and following tasks'),
('all', 'All tasks'),
], default='this', store=False)
recurrence_message = fields.Char(string='Next Recurrencies', compute='_compute_recurrence_message')
repeat_interval = fields.Integer(string='Repeat Every', default=1, compute='_compute_repeat', readonly=False)
repeat_unit = fields.Selection([
('day', 'Days'),
('week', 'Weeks'),
('month', 'Months'),
('year', 'Years'),
], default='week', compute='_compute_repeat', readonly=False)
repeat_type = fields.Selection([
('forever', 'Forever'),
('until', 'End Date'),
('after', 'Number of Repetitions'),
], default="forever", string="Until", compute='_compute_repeat', readonly=False)
repeat_until = fields.Date(string="End Date", compute='_compute_repeat', readonly=False)
repeat_number = fields.Integer(string="Repetitions", default=1, compute='_compute_repeat', readonly=False)
repeat_on_month = fields.Selection([
('date', 'Date of the Month'),
('day', 'Day of the Month'),
], default='date', compute='_compute_repeat', readonly=False)
repeat_on_year = fields.Selection([
('date', 'Date of the Year'),
('day', 'Day of the Year'),
], default='date', compute='_compute_repeat', readonly=False)
mon = fields.Boolean(string="Mon", compute='_compute_repeat', readonly=False)
tue = fields.Boolean(string="Tue", compute='_compute_repeat', readonly=False)
wed = fields.Boolean(string="Wed", compute='_compute_repeat', readonly=False)
thu = fields.Boolean(string="Thu", compute='_compute_repeat', readonly=False)
fri = fields.Boolean(string="Fri", compute='_compute_repeat', readonly=False)
sat = fields.Boolean(string="Sat", compute='_compute_repeat', readonly=False)
sun = fields.Boolean(string="Sun", compute='_compute_repeat', readonly=False)
repeat_day = fields.Selection([
(str(i), str(i)) for i in range(1, 32)
], compute='_compute_repeat', readonly=False)
repeat_week = fields.Selection([
('first', 'First'),
('second', 'Second'),
('third', 'Third'),
('last', 'Last'),
], default='first', compute='_compute_repeat', readonly=False)
repeat_weekday = fields.Selection([
('mon', 'Monday'),
('tue', 'Tuesday'),
('wed', 'Wednesday'),
('thu', 'Thursday'),
('fri', 'Friday'),
('sat', 'Saturday'),
('sun', 'Sunday'),
], string='Day Of The Week', compute='_compute_repeat', readonly=False)
repeat_month = fields.Selection([
('january', 'January'),
('february', 'February'),
('march', 'March'),
('april', 'April'),
('may', 'May'),
('june', 'June'),
('july', 'July'),
('august', 'August'),
('september', 'September'),
('october', 'October'),
('november', 'November'),
('december', 'December'),
], compute='_compute_repeat', readonly=False)
repeat_show_dow = fields.Boolean(compute='_compute_repeat_visibility')
repeat_show_day = fields.Boolean(compute='_compute_repeat_visibility')
repeat_show_week = fields.Boolean(compute='_compute_repeat_visibility')
repeat_show_month = fields.Boolean(compute='_compute_repeat_visibility')
@api.model
def _get_recurrence_fields(self):
return ['repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until', 'repeat_number',
'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri', 'sat',
'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday']
@api.depends('recurring_task', 'repeat_unit', 'repeat_on_month', 'repeat_on_year')
def _compute_repeat_visibility(self):
for task in self:
task.repeat_show_day = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'date') or (task.repeat_unit == 'year' and task.repeat_on_year == 'date')
task.repeat_show_week = task.recurring_task and (task.repeat_unit == 'month' and task.repeat_on_month == 'day') or (task.repeat_unit == 'year' and task.repeat_on_year == 'day')
task.repeat_show_dow = task.recurring_task and task.repeat_unit == 'week'
task.repeat_show_month = task.recurring_task and task.repeat_unit == 'year'
@api.depends('recurring_task')
def _compute_repeat(self):
rec_fields = self._get_recurrence_fields()
defaults = self.default_get(rec_fields)
for task in self:
for f in rec_fields:
if task.recurrence_id:
task[f] = task.recurrence_id[f]
else:
if task.recurring_task:
task[f] = defaults.get(f)
else:
task[f] = False
def _get_weekdays(self, n=1):
self.ensure_one()
if self.repeat_unit == 'week':
return [fn(n) for day, fn in DAYS.items() if self[day]]
return [DAYS.get(self.repeat_weekday)(n)]
@api.depends(
'recurring_task', 'repeat_interval', 'repeat_unit', 'repeat_type', 'repeat_until',
'repeat_number', 'repeat_on_month', 'repeat_on_year', 'mon', 'tue', 'wed', 'thu', 'fri',
'sat', 'sun', 'repeat_day', 'repeat_week', 'repeat_month', 'repeat_weekday')
def _compute_recurrence_message(self):
self.recurrence_message = False
for task in self.filtered(lambda t: t.recurring_task and t._is_recurrence_valid()):
date = fields.Date.today()
number_occurrences = min(5, task.repeat_number if task.repeat_type == 'after' else 5)
delta = task.repeat_interval if task.repeat_unit == 'day' else 1
recurring_dates = self.env['project.task.recurrence']._get_next_recurring_dates(
date + timedelta(days=delta),
task.repeat_interval,
task.repeat_unit,
task.repeat_type,
task.repeat_until,
task.repeat_on_month,
task.repeat_on_year,
task._get_weekdays(WEEKS.get(task.repeat_week)),
task.repeat_day,
task.repeat_week,
task.repeat_month,
count=number_occurrences)
date_format = self.env['res.lang']._lang_get(self.env.user.lang).date_format
task.recurrence_message = '<ul>'
for date in recurring_dates[:5]:
task.recurrence_message += '<li>%s</li>' % date.strftime(date_format)
if task.repeat_type == 'after' and task.repeat_number > 5 or task.repeat_type == 'forever' or len(recurring_dates) > 5:
task.recurrence_message += '<li>...</li>'
task.recurrence_message += '</ul>'
if task.repeat_type == 'until':
task.recurrence_message += _('<p><em>Number of tasks: %(tasks_count)s</em></p>') % {'tasks_count': len(recurring_dates)}
def _is_recurrence_valid(self):
self.ensure_one()
return self.repeat_interval > 0 and\
(not self.repeat_show_dow or self._get_weekdays()) and\
(self.repeat_type != 'after' or self.repeat_number) and\
(self.repeat_type != 'until' or self.repeat_until and self.repeat_until > fields.Date.today())
@api.depends('recurrence_id')
def _compute_recurring_count(self):
self.recurring_count = 0
recurring_tasks = self.filtered(lambda l: l.recurrence_id)
count = self.env['project.task'].read_group([('recurrence_id', 'in', recurring_tasks.recurrence_id.ids)], ['id'], 'recurrence_id')
tasks_count = {c.get('recurrence_id')[0]: c.get('recurrence_id_count') for c in count}
for task in recurring_tasks:
task.recurring_count = tasks_count.get(task.recurrence_id.id, 0)
@api.depends('partner_id.email')
def _compute_partner_email(self):
for task in self:
if task.partner_id and task.partner_id.email != task.partner_email:
task.partner_email = task.partner_id.email
def _inverse_partner_email(self):
for task in self:
if task.partner_id and task.partner_email != task.partner_id.email:
task.partner_id.email = task.partner_email
@api.depends('partner_id.phone')
def _compute_partner_phone(self):
for task in self:
if task.partner_id and task.partner_phone != task.partner_id.phone:
task.partner_phone = task.partner_id.phone
def _inverse_partner_phone(self):
for task in self:
if task.partner_id and task.partner_phone != task.partner_id.phone:
task.partner_id.phone = task.partner_phone
@api.depends('partner_email', 'partner_phone', 'partner_id')
def _compute_ribbon_message(self):
for task in self:
will_write_email = task.partner_id and task.partner_email != task.partner_id.email
will_write_phone = task.partner_id and task.partner_phone != task.partner_id.phone
if will_write_email and will_write_phone:
task.ribbon_message = _('By saving this change, the customer email and phone number will also be updated.')
elif will_write_email:
task.ribbon_message = _('By saving this change, the customer email will also be updated.')
elif will_write_phone:
task.ribbon_message = _('By saving this change, the customer phone number will also be updated.')
else:
task.ribbon_message = False
@api.constrains('parent_id')
def _check_parent_id(self):
if not self._check_recursion():
raise ValidationError(_('Error! You cannot create recursive hierarchy of tasks.'))
@api.constrains('allowed_user_ids')
def _check_no_portal_allowed(self):
for task in self.filtered(lambda t: t.project_id.privacy_visibility != 'portal'):
portal_users = task.allowed_user_ids.filtered('share')
if portal_users:
user_names = ', '.join(portal_users[:10].mapped('name'))
raise ValidationError(_("The project visibility setting doesn't allow portal users to see the project's tasks. (%s)", user_names))
def _compute_attachment_ids(self):
for task in self:
attachment_ids = self.env['ir.attachment'].search([('res_id', '=', task.id), ('res_model', '=', 'project.task')]).ids
message_attachment_ids = task.mapped('message_ids.attachment_ids').ids # from mail_thread
task.attachment_ids = [(6, 0, list(set(attachment_ids) - set(message_attachment_ids)))]
@api.depends('project_id.allowed_user_ids', 'project_id.privacy_visibility')
def _compute_allowed_user_ids(self):
for task in self:
portal_users = task.allowed_user_ids.filtered('share')
internal_users = task.allowed_user_ids - portal_users
if task.project_id.privacy_visibility == 'followers':
task.allowed_user_ids |= task.project_id.allowed_internal_user_ids
task.allowed_user_ids -= portal_users
elif task.project_id.privacy_visibility == 'portal':
task.allowed_user_ids |= task.project_id.allowed_portal_user_ids
if task.project_id.privacy_visibility != 'portal':
task.allowed_user_ids -= portal_users
elif task.project_id.privacy_visibility != 'followers':
task.allowed_user_ids -= internal_users
@api.depends('create_date', 'date_end', 'date_assign')
def _compute_elapsed(self):
task_linked_to_calendar = self.filtered(
lambda task: task.project_id.resource_calendar_id and task.create_date
)
for task in task_linked_to_calendar:
dt_create_date = fields.Datetime.from_string(task.create_date)
if task.date_assign:
dt_date_assign = fields.Datetime.from_string(task.date_assign)
duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_assign, compute_leaves=True)
task.working_hours_open = duration_data['hours']
task.working_days_open = duration_data['days']
else:
task.working_hours_open = 0.0
task.working_days_open = 0.0
if task.date_end:
dt_date_end = fields.Datetime.from_string(task.date_end)
duration_data = task.project_id.resource_calendar_id.get_work_duration_data(dt_create_date, dt_date_end, compute_leaves=True)
task.working_hours_close = duration_data['hours']
task.working_days_close = duration_data['days']
else:
task.working_hours_close = 0.0
task.working_days_close = 0.0
(self - task_linked_to_calendar).update(dict.fromkeys(
['working_hours_open', 'working_hours_close', 'working_days_open', 'working_days_close'], 0.0))
@api.depends('stage_id', 'kanban_state')
def _compute_kanban_state_label(self):
for task in self:
if task.kanban_state == 'normal':
task.kanban_state_label = task.legend_normal
elif task.kanban_state == 'blocked':
task.kanban_state_label = task.legend_blocked
else:
task.kanban_state_label = task.legend_done
def _compute_access_url(self):
super(Task, self)._compute_access_url()
for task in self:
task.access_url = '/my/task/%s' % task.id
def _compute_access_warning(self):
super(Task, self)._compute_access_warning()
for task in self.filtered(lambda x: x.project_id.privacy_visibility != 'portal'):
task.access_warning = _(
"The task cannot be shared with the recipient(s) because the privacy of the project is too restricted. Set the privacy of the project to 'Visible by following customers' in order to make it accessible by the recipient(s).")
@api.depends('child_ids.planned_hours')
def _compute_subtask_planned_hours(self):
for task in self:
task.subtask_planned_hours = sum(child_task.planned_hours + child_task.subtask_planned_hours for child_task in task.child_ids)
@api.depends('child_ids')
def _compute_subtask_count(self):
for task in self:
task.subtask_count = len(task._get_all_subtasks())
@api.onchange('company_id')
def _onchange_task_company(self):
if self.project_id.company_id != self.company_id:
self.project_id = False
@api.depends('project_id.company_id')
def _compute_company_id(self):
for task in self.filtered(lambda task: task.project_id):
task.company_id = task.project_id.company_id
@api.depends('project_id')
def _compute_stage_id(self):
for task in self:
if task.project_id:
if task.project_id not in task.stage_id.project_ids:
task.stage_id = task.stage_find(task.project_id.id, [
('fold', '=', False), ('is_closed', '=', False)])
else:
task.stage_id = False
@api.returns('self', lambda value: value.id)
def copy(self, default=None):
if default is None:
default = {}
if not default.get('name'):
default['name'] = _("%s (copy)", self.name)
if self.recurrence_id:
default['recurrence_id'] = self.recurrence_id.copy().id
return super(Task, self).copy(default)
@api.constrains('parent_id')
def _check_parent_id(self):
for task in self:
if not task._check_recursion():
raise ValidationError(_('Error! You cannot create recursive hierarchy of task(s).'))
@api.model
def get_empty_list_help(self, help):
tname = _("task")
project_id = self.env.context.get('default_project_id', False)
if project_id:
name = self.env['project.project'].browse(project_id).label_tasks
if name: tname = name.lower()
self = self.with_context(
empty_list_help_id=self.env.context.get('default_project_id'),
empty_list_help_model='project.project',
empty_list_help_document_name=tname,
)
return super(Task, self).get_empty_list_help(help)
def message_subscribe(self, partner_ids=None, channel_ids=None, subtype_ids=None):
"""
Add the users subscribed to allowed portal users
"""
res = super(Task, self).message_subscribe(partner_ids=partner_ids, channel_ids=channel_ids, subtype_ids=subtype_ids)
if partner_ids:
new_allowed_users = self.env['res.partner'].browse(partner_ids).user_ids.filtered('share')
tasks = self.filtered(lambda task: task.project_id.privacy_visibility == 'portal')
tasks.sudo().write({'allowed_user_ids': [(4, user.id) for user in new_allowed_users]})
return res
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, section_id, domain=[], order='sequence'):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
section_ids.extend(self.mapped('project_id').ids)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
return self.env['project.task.type'].search(search_domain, order=order, limit=1).id
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
@api.model
def default_get(self, default_fields):
vals = super(Task, self).default_get(default_fields)
days = list(DAYS.keys())
week_start = fields.Datetime.today().weekday()
if all(d in default_fields for d in days):
vals[days[week_start]] = True
if 'repeat_day' in default_fields:
vals['repeat_day'] = str(fields.Datetime.today().day)
if 'repeat_month' in default_fields:
vals['repeat_month'] = self._fields.get('repeat_month').selection[fields.Datetime.today().month - 1][0]
if 'repeat_until' in default_fields:
vals['repeat_until'] = fields.Date.today() + timedelta(days=7)
if 'repeat_weekday' in default_fields:
vals['repeat_weekday'] = self._fields.get('repeat_weekday').selection[week_start][0]
return vals
@api.model_create_multi
def create(self, vals_list):
default_stage = dict()
for vals in vals_list:
project_id = vals.get('project_id') or self.env.context.get('default_project_id')
if project_id and not "company_id" in vals:
vals["company_id"] = self.env["project.project"].browse(
project_id
).company_id.id or self.env.company.id
if project_id and "stage_id" not in vals:
# 1) Allows keeping the batch creation of tasks
# 2) Ensure the defaults are correct (and computed once by project),
# by using default get (instead of _get_default_stage_id or _stage_find),
if project_id not in default_stage:
default_stage[project_id] = self.with_context(
default_project_id=project_id
).default_get(['stage_id']).get('stage_id')
vals["stage_id"] = default_stage[project_id]
# user_id change: update date_assign
if vals.get('user_id'):
vals['date_assign'] = fields.Datetime.now()
# Stage change: Update date_end if folded stage and date_last_stage_update
if vals.get('stage_id'):
vals.update(self.update_date_end(vals['stage_id']))
vals['date_last_stage_update'] = fields.Datetime.now()
# recurrence
rec_fields = vals.keys() & self._get_recurrence_fields()
if rec_fields and vals.get('recurring_task') is True:
rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields}
rec_values['next_recurrence_date'] = fields.Datetime.today()
recurrence = self.env['project.task.recurrence'].create(rec_values)
vals['recurrence_id'] = recurrence.id
tasks = super().create(vals_list)
for task in tasks:
if task.project_id.privacy_visibility == 'portal':
task._portal_ensure_token()
return tasks
def write(self, vals):
now = fields.Datetime.now()
if 'parent_id' in vals and vals['parent_id'] in self.ids:
raise UserError(_("Sorry. You can't set a task as its parent task."))
if 'active' in vals and not vals.get('active') and any(self.mapped('recurrence_id')):
# TODO: show a dialog to stop the recurrence
raise UserError(_('You cannot archive recurring tasks. Please, disable the recurrence first.'))
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals.update(self.update_date_end(vals['stage_id']))
vals['date_last_stage_update'] = now
# reset kanban state when changing stage
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
# user_id change: update date_assign
if vals.get('user_id') and 'date_assign' not in vals:
vals['date_assign'] = now
# recurrence fields
rec_fields = vals.keys() & self._get_recurrence_fields()
if rec_fields:
rec_values = {rec_field: vals[rec_field] for rec_field in rec_fields}
for task in self:
if task.recurrence_id:
task.recurrence_id.write(rec_values)
elif vals.get('recurring_task'):
rec_values['next_recurrence_date'] = fields.Datetime.today()
recurrence = self.env['project.task.recurrence'].create(rec_values)
task.recurrence_id = recurrence.id
if 'recurring_task' in vals and not vals.get('recurring_task'):
self.recurrence_id.unlink()
tasks = self
recurrence_update = vals.pop('recurrence_update', 'this')
if recurrence_update != 'this':
recurrence_domain = []
if recurrence_update == 'subsequent':
for task in self:
recurrence_domain = OR([recurrence_domain, ['&', ('recurrence_id', '=', task.recurrence_id.id), ('create_date', '>=', task.create_date)]])
else:
recurrence_domain = [('recurrence_id', 'in', self.recurrence_id.ids)]
tasks |= self.env['project.task'].search(recurrence_domain)
result = super(Task, tasks).write(vals)
# rating on stage
if 'stage_id' in vals and vals.get('stage_id'):
self.filtered(lambda x: x.project_id.rating_active and x.project_id.rating_status == 'stage')._send_task_rating_mail(force_send=True)
return result
def update_date_end(self, stage_id):
project_task_type = self.env['project.task.type'].browse(stage_id)
if project_task_type.fold or project_task_type.is_closed:
return {'date_end': fields.Datetime.now()}
return {'date_end': False}
def unlink(self):
if any(self.mapped('recurrence_id')):
# TODO: show a dialog to stop the recurrence
raise UserError(_('You cannot delete recurring tasks. Please, disable the recurrence first.'))
return super().unlink()
# ---------------------------------------------------
# Subtasks
# ---------------------------------------------------
@api.depends('parent_id.partner_id', 'project_id.partner_id')
def _compute_partner_id(self):
"""
If a task has no partner_id, use the project partner_id if any, or else the parent task partner_id.
Once the task partner_id has been set:
1) if the project partner_id changes, the task partner_id is automatically changed also.
2) if the parent task partner_id changes, the task partner_id remains the same.
"""
for task in self:
if task.partner_id:
if task.project_id.partner_id:
task.partner_id = task.project_id.partner_id
else:
task.partner_id = task.project_id.partner_id or task.parent_id.partner_id
@api.depends('partner_id.email', 'parent_id.email_from')
def _compute_email_from(self):
for task in self:
task.email_from = task.partner_id.email or ((task.partner_id or task.parent_id) and task.email_from) or task.parent_id.email_from
@api.depends('parent_id.project_id.subtask_project_id')
def _compute_project_id(self):
for task in self:
if not task.project_id:
task.project_id = task.parent_id.project_id.subtask_project_id
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
def _track_template(self, changes):
res = super(Task, self)._track_template(changes)
test_task = self[0]
if 'stage_id' in changes and test_task.stage_id.mail_template_id:
res['stage_id'] = (test_task.stage_id.mail_template_id, {
'auto_delete_message': True,
'subtype_id': self.env['ir.model.data'].xmlid_to_res_id('mail.mt_note'),
'email_layout_xmlid': 'mail.mail_notification_light'
})
return res
def _creation_subtype(self):
return self.env.ref('project.mt_task_new')
def _track_subtype(self, init_values):
self.ensure_one()
if 'kanban_state_label' in init_values and self.kanban_state == 'blocked':
return self.env.ref('project.mt_task_blocked')
elif 'kanban_state_label' in init_values and self.kanban_state == 'done':
return self.env.ref('project.mt_task_ready')
elif 'stage_id' in init_values:
return self.env.ref('project.mt_task_stage')
return super(Task, self)._track_subtype(init_values)
def _notify_get_groups(self, msg_vals=None):
""" Handle project users and managers recipients that can assign
tasks and create new one directly from notification emails. Also give
access button to portal users and portal customers. If they are notified
they should probably have access to the document. """
groups = super(Task, self)._notify_get_groups(msg_vals=msg_vals)
local_msg_vals = dict(msg_vals or {})
self.ensure_one()
project_user_group_id = self.env.ref('project.group_project_user').id
group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups']
if self.project_id.privacy_visibility == 'followers':
allowed_user_ids = self.project_id.allowed_internal_user_ids.partner_id.ids
group_func = lambda pdata: pdata['type'] == 'user' and project_user_group_id in pdata['groups'] and pdata['id'] in allowed_user_ids
new_group = ('group_project_user', group_func, {})
if not self.user_id and not self.stage_id.fold:
take_action = self._notify_get_action_link('assign', **local_msg_vals)
project_actions = [{'url': take_action, 'title': _('I take it')}]
new_group[2]['actions'] = project_actions
groups = [new_group] + groups
if self.project_id.privacy_visibility == 'portal':
allowed_user_ids = self.project_id.allowed_portal_user_ids.partner_id.ids
groups.insert(0, (
'allowed_portal_users',
lambda pdata: pdata['type'] == 'portal' and pdata['id'] in allowed_user_ids,
{}
))
portal_privacy = self.project_id.privacy_visibility == 'portal'
for group_name, group_method, group_data in groups:
if group_name in ('customer', 'user') or group_name == 'portal_customer' and not portal_privacy:
group_data['has_button_access'] = False
elif group_name == 'portal_customer' and portal_privacy:
group_data['has_button_access'] = True
return groups
def _notify_get_reply_to(self, default=None, records=None, company=None, doc_names=None):
""" Override to set alias of tasks to their project if any. """
aliases = self.sudo().mapped('project_id')._notify_get_reply_to(default=default, records=None, company=company, doc_names=None)
res = {task.id: aliases.get(task.project_id.id) for task in self}
leftover = self.filtered(lambda rec: not rec.project_id)
if leftover:
res.update(super(Task, leftover)._notify_get_reply_to(default=default, records=None, company=company, doc_names=doc_names))
return res
def email_split(self, msg):
email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or ''))
# check left-part is not already an alias
aliases = self.mapped('project_id.alias_name')
return [x for x in email_list if x.split('@')[0] not in aliases]
@api.model
def message_new(self, msg, custom_values=None):
""" Overrides mail_thread message_new that is called by the mailgateway
through message_process.
This override updates the document according to the email.
"""
# remove default author when going through the mail gateway. Indeed we
# do not want to explicitly set user_id to False; however we do not
# want the gateway user to be responsible if no other responsible is
# found.
create_context = dict(self.env.context or {})
create_context['default_user_id'] = False
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject') or _("No Subject"),
'email_from': msg.get('from'),
'planned_hours': 0.0,
'partner_id': msg.get('author_id')
}
defaults.update(custom_values)
task = super(Task, self.with_context(create_context)).message_new(msg, custom_values=defaults)
email_list = task.email_split(msg)
partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=task, force_create=False) if p]
task.message_subscribe(partner_ids)
return task
def message_update(self, msg, update_vals=None):
""" Override to update the task according to the email. """
email_list = self.email_split(msg)
partner_ids = [p.id for p in self.env['mail.thread']._mail_find_partner_from_emails(email_list, records=self, force_create=False) if p]
self.message_subscribe(partner_ids)
return super(Task, self).message_update(msg, update_vals=update_vals)
def _message_get_suggested_recipients(self):
recipients = super(Task, self)._message_get_suggested_recipients()
for task in self:
if task.partner_id:
reason = _('Customer Email') if task.partner_id.email else _('Customer')
task._message_add_suggested_recipient(recipients, partner=task.partner_id, reason=reason)
elif task.email_from:
task._message_add_suggested_recipient(recipients, email=task.email_from, reason=_('Customer Email'))
return recipients
def _notify_email_header_dict(self):
headers = super(Task, self)._notify_email_header_dict()
if self.project_id:
current_objects = [h for h in headers.get('X-Odoo-Objects', '').split(',') if h]
current_objects.insert(0, 'project.project-%s, ' % self.project_id.id)
headers['X-Odoo-Objects'] = ','.join(current_objects)
if self.tag_ids:
headers['X-Odoo-Tags'] = ','.join(self.tag_ids.mapped('name'))
return headers
def _message_post_after_hook(self, message, msg_vals):
if message.attachment_ids and not self.displayed_image_id:
image_attachments = message.attachment_ids.filtered(lambda a: a.mimetype == 'image')
if image_attachments:
self.displayed_image_id = image_attachments[0]
if self.email_from and not self.partner_id:
# we consider that posting a message with a specified recipient (not a follower, a specific one)
# on a document without customer means that it was created through the chatter using
# suggested recipients. This heuristic allows to avoid ugly hacks in JS.
new_partner = message.partner_ids.filtered(lambda partner: partner.email == self.email_from)
if new_partner:
self.search([
('partner_id', '=', False),
('email_from', '=', new_partner.email),
('stage_id.fold', '=', False)]).write({'partner_id': new_partner.id})
return super(Task, self)._message_post_after_hook(message, msg_vals)
def action_assign_to_me(self):
self.write({'user_id': self.env.user.id})
# If depth == 1, return only direct children
# If depth == 3, return children to third generation
# If depth <= 0, return all children without depth limit
def _get_all_subtasks(self, depth=0):
children = self.mapped('child_ids').filtered(lambda children: children.active)
if not children:
return self.env['project.task']
if depth == 1:
return children
return children + children._get_all_subtasks(depth - 1)
def action_open_parent_task(self):
return {
'name': _('Parent Task'),
'view_mode': 'form',
'res_model': 'project.task',
'res_id': self.parent_id.id,
'type': 'ir.actions.act_window',
'context': dict(self._context, create=False)
}
def action_subtask(self):
action = self.env["ir.actions.actions"]._for_xml_id("project.project_task_action_sub_task")
# display all subtasks of current task
action['domain'] = [('id', 'child_of', self.id), ('id', '!=', self.id)]
# update context, with all default values as 'quick_create' does not contains all field in its view
if self._context.get('default_project_id'):
default_project = self.env['project.project'].browse(self.env.context['default_project_id'])
else:
default_project = self.project_id.subtask_project_id or self.project_id
ctx = dict(self.env.context)
ctx = {k: v for k, v in ctx.items() if not k.startswith('search_default_')}
ctx.update({
'default_name': self.env.context.get('name', self.name) + ':',
'default_parent_id': self.id, # will give default subtask field in `default_get`
'default_company_id': default_project.company_id.id if default_project else self.env.company.id,
})
action['context'] = ctx
return action
def action_recurring_tasks(self):
return {
'name': 'Tasks in Recurrence',
'type': 'ir.actions.act_window',
'res_model': 'project.task',
'view_mode': 'tree,form',
'domain': [('recurrence_id', 'in', self.recurrence_id.ids)],
}
# ---------------------------------------------------
# Rating business
# ---------------------------------------------------
def _send_task_rating_mail(self, force_send=False):
for task in self:
rating_template = task.stage_id.rating_template_id
if rating_template:
task.rating_send_request(rating_template, lang=task.partner_id.lang, force_send=force_send)
def rating_get_partner_id(self):
res = super(Task, self).rating_get_partner_id()
if not res and self.project_id.partner_id:
return self.project_id.partner_id
return res
def rating_apply(self, rate, token=None, feedback=None, subtype_xmlid=None):
return super(Task, self).rating_apply(rate, token=token, feedback=feedback, subtype_xmlid="project.mt_task_rating")
def _rating_get_parent_field_name(self):
return 'project_id'
class ProjectTags(models.Model):
""" Tags of project's tasks """
_name = "project.tags"
_description = "Project Tags"
def _get_default_color(self):
return randint(1, 11)
name = fields.Char('Name', required=True)
color = fields.Integer(string='Color', default=_get_default_color)
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists!"),
]
| 51.550209
| 249
| 0.645577
| 9,265
| 73,923
| 4.905343
| 0.07728
| 0.024358
| 0.006337
| 0.008009
| 0.40235
| 0.326351
| 0.266656
| 0.198513
| 0.167642
| 0.147597
| 0
| 0.002246
| 0.229009
| 73,923
| 1,433
| 250
| 51.586183
| 0.795171
| 0.069424
| 0
| 0.193772
| 0
| 0.013841
| 0.219468
| 0.037133
| 0
| 0
| 0
| 0.000698
| 0
| 1
| 0.086505
| false
| 0
| 0.007785
| 0.00692
| 0.281142
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c8849369fcbb1dad3eb48e7b50645532c6e90e9
| 1,670
|
py
|
Python
|
app/config.py
|
Maethorin/pivocram
|
f1709f5ee76d0280601efa87f3af8e89c2968f43
|
[
"MIT"
] | 5
|
2016-04-02T15:07:03.000Z
|
2021-06-25T14:48:55.000Z
|
app/config.py
|
Maethorin/pivocram
|
f1709f5ee76d0280601efa87f3af8e89c2968f43
|
[
"MIT"
] | 2
|
2016-04-28T20:14:04.000Z
|
2016-05-01T18:37:05.000Z
|
app/config.py
|
Maethorin/pivocram
|
f1709f5ee76d0280601efa87f3af8e89c2968f43
|
[
"MIT"
] | 1
|
2018-07-27T10:52:04.000Z
|
2018-07-27T10:52:04.000Z
|
# -*- coding: utf-8 -*-
"""
Config File for enviroment variables
"""
import os
from importlib import import_module
class Config(object):
"""
Base class for all config variables
"""
DEBUG = False
TESTING = False
DEVELOPMENT = False
CSRF_ENABLED = True
SQLALCHEMY_DATABASE_URI = os.environ['DATABASE_URL']
SECRET_KEY = os.environ['SECRET_KEY']
class ProductionConfig(Config):
"""
Production Config... this is the real thing
"""
DEBUG = False
class StagingConfig(Config):
"""
Staging Config is for... staging things
"""
DEBUG = True
class DevelopmentConfig(Config):
"""
Development Config... this is your home developer!
"""
DEVELOPMENT = True
DEBUG = True
class TestingConfig(Config):
"""
Test Config... You should be testing right now instead reading docs!!!
"""
TESTING = True
KEY_ON_TEST = 'KEY ON TEST'
class ConfigClassNotFound(Exception):
"""
Raises when the APP_SETTINGS environment variable have a value which does not point to an uninstantiable class.
"""
pass
def get_config():
"""
Get the Config Class instance defined in APP_SETTINGS environment variable
:return The config class instance
:rtype: Config
"""
config_imports = os.environ['APP_SETTINGS'].split('.')
config_class_name = config_imports[-1]
config_module = import_module('.'.join(config_imports[:-1]))
config_class = getattr(config_module, config_class_name, None)
if not config_class:
raise ConfigClassNotFound('Unable to find a config class in {}'.format(os.environ['APP_SETTINGS']))
return config_class()
| 23.521127
| 115
| 0.671856
| 199
| 1,670
| 5.502513
| 0.472362
| 0.080365
| 0.021918
| 0.054795
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002317
| 0.224551
| 1,670
| 71
| 116
| 23.521127
| 0.843243
| 0.322156
| 0
| 0.137931
| 0
| 0
| 0.092885
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0.034483
| 0.172414
| 0
| 0.862069
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c88cdba00ccf459ff19909681f6bd97e0741c61
| 6,306
|
py
|
Python
|
pytests/docs/docs.py
|
ramalingam-cb/testrunner
|
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
|
[
"Apache-2.0"
] | null | null | null |
pytests/docs/docs.py
|
ramalingam-cb/testrunner
|
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
|
[
"Apache-2.0"
] | null | null | null |
pytests/docs/docs.py
|
ramalingam-cb/testrunner
|
81cea7a5a493cf0c67fca7f97c667cd3c6ad2142
|
[
"Apache-2.0"
] | null | null | null |
import time
import logger
from basetestcase import BaseTestCase
from couchbase_helper.documentgenerator import DocumentGenerator
from membase.api.rest_client import RestConnection
from couchbase_helper.documentgenerator import BlobGenerator
class DocsTests(BaseTestCase):
def setUp(self):
super(DocsTests, self).setUp()
def tearDown(self):
super(DocsTests, self).tearDown()
def test_docs_int_big_values(self):
degree = self.input.param("degree", 53)
error = self.input.param("error", False)
number = 2**degree
first = ['james', 'sharon']
template = '{{ "number": {0}, "first_name": "{1}" }}'
gen_load = DocumentGenerator('test_docs', template, [number,], first,
start=0, end=self.num_items)
self.log.info("create %s documents..." % (self.num_items))
try:
self._load_all_buckets(self.master, gen_load, "create", 0)
self._verify_stats_all_buckets([self.master])
except Exception as e:
if error:
self.log.info("Unable to create documents as expected: %s" % str(e))
else:
raise e
else:
if error:
self.fail("Able to create documents with value: %s" % str(number))
#docs.docs.DocsTests.test_load_memory,nodes_init=3,standard_buckets=3,memcached_buckets=1,replicas=2,quota_percent=75
"""
1) Configure a cluster with 4 Couchbase Buckets and 1 Memcached Buckets.
2) Total memory quota allocated for Couchbase should be approx. 75% (12G) of total RAM.
3) Load initial data on all buckets upto 60% of each memory quota
4) Pick one bucket and do the following (5) to (8)
5) Insert new items upto high_wat_mark (75% of memory quota)
6) Expire/Delete/update random items (ratio of expiration vs delete ~= 8:2)
7) Repeat (6) until "ep_total_del_items" is ~= (3 X # of items being loaded in (3))
8) Expire 90% of remaining items
9) Insert new items or update existing items across buckets
10) See if we can run into "Hard out of Memory" error (UI)
"""
def test_load_memory(self):
num_items = self.quota * 1024 * 0.6 / self.value_size
num_items = num_items / len(self.buckets)
self.log.info("Load initial data on all buckets upto 60% of each memory quota")
gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=0,
end=num_items)
self._load_all_buckets(self.master, gen_load, "create", 0)
self.log.info("Insert new items upto high_wat_mark (75% of memory quota)")
for bucket in self.buckets:
if bucket.type != 'memcached':
bucket_to_load = bucket
break
new_num_items = self.quota * 1024 * 0.15 / self.value_size
gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=num_items,
end=new_num_items + num_items)
load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name, gen_load,
bucket_to_load.kvs[1], 'create', compression=self.sdk_compression)
load.result()
end_time = time.time() + 60*60*3
while time.time() < end_time:
self.log.info("check memUsed")
rest = RestConnection(self.master)
for bucket in rest.get_buckets():
self.log.info("*****************************\
bucket %s: memUsed %s\
****************************" % (bucket.name,
bucket.stats.memUsed))
self.log.info("Expire/Delete/update random items (ratio \
of expiration vs delete ~= 8:2)")
current_num = 0
wait_task = self.cluster.async_wait_for_stats(self.servers[:self.nodes_init], bucket_to_load,
'all', 'ep_total_del_items', '==', num_items * 3)
while wait_task.state != "FINISHED":
gen_update = BlobGenerator('mike', 'mike-', self.value_size, start=current_num,
end=current_num + 5000)
gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 5000,
end=current_num + 6600)
gen_delete = BlobGenerator('mike', 'mike-', self.value_size, start=current_num + 6600,
end=current_num + 7000)
tasks = []
tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,
gen_update, bucket_to_load.kvs[1], 'update', compression=self.sdk_compression))
tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,
gen_expire, bucket_to_load.kvs[1], 'update', exp=1,
compression=self.sdk_compression))
tasks.append(self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,
gen_delete, bucket_to_load.kvs[1], 'delete', compression=self.sdk_compression))
for task in tasks:
task.result()
current_num += 7000
self.log.info("Expire 90% of remaining items")
remain_keys, _ = bucket_to_load.kvs[1].key_set()
last_key_to_expire = remain_keys[0.9 * len(remain_keys)][4:]
gen_expire = BlobGenerator('mike', 'mike-', self.value_size, start=0,
end=last_key_to_expire)
load = self.cluster.async_load_gen_docs(self.master, bucket_to_load.name,
gen_expire, bucket_to_load.kvs[1], 'update', exp=1, compression=self.sdk_compression)
load.result()
self.log.info("Insert new items or update existing items across buckets")
gen_load = BlobGenerator('mike', 'mike-', self.value_size, start=new_num_items + num_items,
end=new_num_items * 2 + num_items)
self._load_all_buckets(self.master, gen_load, "create", 0)
| 55.80531
| 121
| 0.579607
| 775
| 6,306
| 4.512258
| 0.234839
| 0.034315
| 0.04461
| 0.050043
| 0.498713
| 0.441807
| 0.391192
| 0.391192
| 0.391192
| 0.263655
| 0
| 0.025547
| 0.310974
| 6,306
| 112
| 122
| 56.303571
| 0.779287
| 0.018395
| 0
| 0.129032
| 0
| 0
| 0.098779
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043011
| false
| 0
| 0.064516
| 0
| 0.11828
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c898d721c85859465a77ce43f10791adda1d063
| 1,890
|
py
|
Python
|
lichthi.py
|
truongaxin123/lichthidtu
|
77ba75974769ab1fdd1281b6088a1734dc0a3a83
|
[
"MIT"
] | null | null | null |
lichthi.py
|
truongaxin123/lichthidtu
|
77ba75974769ab1fdd1281b6088a1734dc0a3a83
|
[
"MIT"
] | null | null | null |
lichthi.py
|
truongaxin123/lichthidtu
|
77ba75974769ab1fdd1281b6088a1734dc0a3a83
|
[
"MIT"
] | null | null | null |
from bs4 import BeautifulSoup
import requests
from urllib.request import urlretrieve
ROOT = 'http://pdaotao.duytan.edu.vn'
def get_url_sub(sub, id_, page):
all_td_tag = []
for i in range(1, page+1):
print('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i))
r = requests.get('http://pdaotao.duytan.edu.vn/EXAM_LIST/?page={}&lang=VN'.format(i))
soup = BeautifulSoup(r.text, 'lxml')
list_td_tag = soup.find_all('td', attrs={'style': 'padding-top:10px'})
all_td_tag = all_td_tag + list_td_tag
for td_tag in all_td_tag:
if (((sub+id_) in str(td_tag.a.contents[0])) or
((sub+' '+id_) in str(td_tag.a.contents[0])) or
((sub+'_'+id_) in str(td_tag.a.contents[0]))):
print('\nComplete!!!')
print(' '.join(str(td_tag.a.string).split()))
print(str(td_tag.a['href']).replace('..', ROOT))
return str(td_tag.a['href']).replace('..', ROOT)
def get_excel_url(url):
r = requests.get(url)
soup = BeautifulSoup(r.text,'lxml')
list_span_tags = soup.find_all('span',class_='txt_l4')
excel_url = list_span_tags[1].a['href'].replace('..',ROOT)
return excel_url
# a = get_excel_url('http://pdaotao.duytan.edu.vn/EXAM_LIST_Detail/?ID=52289&lang=VN')
def main():
sub = input('Nhap ten mon: ')
id_ = input('Nhap id mon: ')
url = get_url_sub(sub,id_,4)
if url == None:
print('Khong tim thay mon nao nhu nay ({} {}) ca :('.format(sub, id_))
return
else:
print('get excel URL!!!')
excel_url = get_excel_url(url)
excel_url = excel_url.replace(' ','%20')
print('Download excel file!!!')
save_at = 'C:/Users/truon/Desktop/'
filename = save_at + excel_url.split('/')[-1].replace('%20',' ')
urlretrieve(excel_url,filename)
print('Done!')
main()
| 35
| 93
| 0.595767
| 282
| 1,890
| 3.787234
| 0.319149
| 0.060861
| 0.044944
| 0.050562
| 0.364232
| 0.291199
| 0.235019
| 0.161985
| 0.161985
| 0.161985
| 0
| 0.014199
| 0.21746
| 1,890
| 53
| 94
| 35.660377
| 0.707911
| 0.044444
| 0
| 0.046512
| 0
| 0
| 0.199002
| 0.012749
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069767
| false
| 0
| 0.069767
| 0
| 0.209302
| 0.186047
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c8a2cc8e8cd0ae17cdb81c0889eb3b2e10339c2
| 10,998
|
py
|
Python
|
appengine/uploader/main.py
|
isabella232/feedloader
|
c0417480804d406a83d1aedcb7e7d719058fdbfd
|
[
"Apache-2.0"
] | 5
|
2021-02-15T12:49:12.000Z
|
2022-01-12T06:28:41.000Z
|
appengine/uploader/main.py
|
google/feedloader
|
f6a25569bc3d7d4ee326961fd3b01e45fc3858e4
|
[
"Apache-2.0"
] | 1
|
2021-06-18T15:30:16.000Z
|
2021-06-18T15:30:16.000Z
|
appengine/uploader/main.py
|
isabella232/feedloader
|
c0417480804d406a83d1aedcb7e7d719058fdbfd
|
[
"Apache-2.0"
] | 4
|
2021-02-16T17:28:00.000Z
|
2021-06-18T15:27:52.000Z
|
# coding=utf-8
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Uploader module that handles batch jobs sent from Task Queue.
This module receives batch jobs from TaskQueue. For each job, the module loads
data from BigQuery and sends it to Merchant Center.
"""
import http
import json
import logging
import socket
from typing import List, Tuple
import flask
from google.cloud import bigquery
from google.cloud import logging as cloud_logging
from googleapiclient import errors
import batch_creator
import bigquery_client
import constants
import content_api_client
import result_recorder
import shoptimizer_client
from models import failure
from models import process_result
from models import upload_task
app = flask.Flask(__name__)
_logging_client = cloud_logging.Client()
_logging_client.setup_logging(log_level=logging.DEBUG)
_SHOPTIMIZER_CONFIG_FILE_PATH = 'config/shoptimizer_config.json'
OPERATION_TO_METHOD = {
constants.Operation.UPSERT: constants.Method.INSERT,
constants.Operation.DELETE: constants.Method.DELETE,
constants.Operation.PREVENT_EXPIRING: constants.Method.INSERT
}
# Used to check if this is the last retry for alerting purposes.
# Should match task_retry_limit in appengine/initiator/queue.yaml.
TASK_RETRY_LIMIT = 5
@app.route('/insert_items', methods=['POST'])
def run_insert_process() -> Tuple[str, http.HTTPStatus]:
"""Handles uploading tasks pushed from Task Queue."""
return _run_process(constants.Operation.UPSERT)
@app.route('/delete_items', methods=['POST'])
def run_delete_process() -> Tuple[str, http.HTTPStatus]:
"""Handles deleting tasks pushed from Task Queue."""
return _run_process(constants.Operation.DELETE)
@app.route('/prevent_expiring_items', methods=['POST'])
def run_prevent_expiring_process() -> Tuple[str, http.HTTPStatus]:
"""Handles prevent expiring tasks pushed from Task Queue."""
return _run_process(constants.Operation.PREVENT_EXPIRING)
def _run_process(operation: constants.Operation) -> Tuple[str, http.HTTPStatus]:
"""Handles tasks pushed from Task Queue.
When tasks are enqueued to Task Queue by initiator, this method will be
called. It extracts necessary information from a Task Queue message. The
following processes are executed in this function:
- Loading items to process from BigQuery.
- Converts items into a batch that can be sent to Content API for Shopping.
- Sending items to Content API for Shopping (Merchant Center).
- Records the results of the Content API for Shopping call.
Args:
operation: Type of operation to perform on the items.
Returns:
The result of HTTP request.
"""
request_body = json.loads(flask.request.data.decode('utf-8'))
task = upload_task.UploadTask.from_json(request_body)
if task.batch_size == 0:
return 'OK', http.HTTPStatus.OK
batch_number = int(task.start_index / task.batch_size) + 1
logging.info(
'%s started. Batch #%d info: start_index: %d, batch_size: %d,'
'initiation timestamp: %s', operation.value, batch_number,
task.start_index, task.batch_size, task.timestamp)
try:
items = _load_items_from_bigquery(operation, task)
except errors.HttpError:
return 'Error loading items from BigQuery', http.HTTPStatus.INTERNAL_SERVER_ERROR
result = process_result.ProcessResult([], [], [])
try:
if not items:
logging.error(
'Batch #%d, operation %s: 0 items loaded from BigQuery so batch not sent to Content API. Start_index: %d, batch_size: %d,'
'initiation timestamp: %s', batch_number, operation.value,
task.start_index, task.batch_size, task.timestamp)
return 'No items to process', http.HTTPStatus.OK
method = OPERATION_TO_METHOD.get(operation)
# Creates batch from items loaded from BigQuery
original_batch, skipped_item_ids, batch_id_to_item_id = batch_creator.create_batch(
batch_number, items, method)
# Optimizes batch via Shoptimizer for upsert/prevent_expiring operations
if operation != constants.Operation.DELETE and constants.SHOPTIMIZER_API_INTEGRATION_ON:
batch_to_send_to_content_api = _create_optimized_batch(
original_batch, batch_number, operation)
else:
batch_to_send_to_content_api = original_batch
# Sends batch of items to Content API for Shopping
api_client = content_api_client.ContentApiClient()
successful_item_ids, item_failures = api_client.process_items(
batch_to_send_to_content_api, batch_number, batch_id_to_item_id, method)
result = process_result.ProcessResult(
successfully_processed_item_ids=successful_item_ids,
content_api_failures=item_failures,
skipped_item_ids=skipped_item_ids)
except errors.HttpError as http_error:
error_status_code = http_error.resp.status
error_reason = http_error.resp.reason
result = _handle_content_api_error(error_status_code, error_reason,
batch_number, http_error, items,
operation, task)
return error_reason, error_status_code
except socket.timeout as timeout_error:
error_status_code = http.HTTPStatus.REQUEST_TIMEOUT
error_reason = 'Socket timeout'
result = _handle_content_api_error(error_status_code, error_reason,
batch_number, timeout_error, items,
operation, task)
return error_reason, error_status_code
else:
logging.info(
'Batch #%d with operation %s and initiation timestamp %s successfully processed %s items, failed to process %s items and skipped %s items.',
batch_number, operation.value, task.timestamp,
result.get_success_count(), result.get_failure_count(),
result.get_skipped_count())
finally:
recorder = result_recorder.ResultRecorder.from_service_account_json(
constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_MONITORING,
constants.TABLE_ID_FOR_RESULT_COUNTS_MONITORING,
constants.TABLE_ID_FOR_ITEM_RESULTS_MONITORING)
recorder.insert_result(operation.value, result, task.timestamp,
batch_number)
return 'OK', http.HTTPStatus.OK
def _load_items_from_bigquery(
operation: constants.Operation,
task: upload_task.UploadTask) -> List[bigquery.Row]:
"""Loads items from BigQuery.
Args:
operation: The operation to be performed on this batch of items.
task: The Cloud Task object that initiated this request.
Returns:
The list of items loaded from BigQuery.
"""
table_id = f'process_items_to_{operation.value}_{task.timestamp}'
bq_client = bigquery_client.BigQueryClient.from_service_account_json(
constants.GCP_SERVICE_ACCOUNT_PATH, constants.DATASET_ID_FOR_PROCESSING,
table_id)
try:
items_iterator = bq_client.load_items(task.start_index, task.batch_size)
except errors.HttpError as http_error:
logging.exception(
'Error loading items from %s.%s. HTTP status: %s. Error: %s',
constants.DATASET_ID_FOR_PROCESSING, table_id, http_error.resp.status,
http_error.resp.reason)
raise
return list(items_iterator)
def _create_optimized_batch(batch: constants.Batch, batch_number: int,
operation: constants.Operation) -> constants.Batch:
"""Creates an optimized batch by calling the Shoptimizer API.
Args:
batch: The batch of product data to be optimized.
batch_number: The number that identifies this batch.
operation: The operation to be performed on this batch (upsert, delete,
prevent_expiring).
Returns:
The batch returned from the Shoptimizer API Client.
"""
try:
optimization_client = shoptimizer_client.ShoptimizerClient(
batch_number, operation)
except (OSError, ValueError):
return batch
return optimization_client.shoptimize(batch)
def _handle_content_api_error(
error_status_code: int, error_reason: str, batch_num: int, error: Exception,
item_rows: List[bigquery.Row], operation: constants.Operation,
task: upload_task.UploadTask) -> process_result.ProcessResult:
"""Logs network related errors returned from Content API and returns a list of item failures.
Args:
error_status_code: HTTP status code from Content API.
error_reason: The reason for the error.
batch_num: The batch number.
error: The error thrown by Content API.
item_rows: The items being processed in this batch.
operation: The operation to be performed on this batch of items.
task: The Cloud Task object that initiated this request.
Returns:
The list of items that failed due to the error, wrapped in a
process_result.
"""
logging.warning(
'Batch #%d with operation %s and initiation timestamp %s failed. HTTP status: %s. Error: %s',
batch_num, operation.value, task.timestamp, error_status_code,
error_reason)
# If the batch API call received an HttpError, mark every id as failed.
item_failures = [
failure.Failure(str(item_row.get('item_id', 'Missing ID')), error_reason)
for item_row in item_rows
]
api_result = process_result.ProcessResult([], item_failures, [])
if content_api_client.suggest_retry(
error_status_code) and _get_execution_attempt() < TASK_RETRY_LIMIT:
logging.warning(
'Batch #%d with operation %s and initiation timestamp %s will be requeued for retry',
batch_num, operation.value, task.timestamp)
else:
logging.error(
'Batch #%d with operation %s and initiation timestamp %s failed and will not be retried. Error: %s',
batch_num, operation.value, task.timestamp, error)
return api_result
def _get_execution_attempt() -> int:
"""Returns the number of times this task has previously been executed.
If the execution count header does not exist, it means the request did not
come from Cloud Tasks.
In this case, there will be no retry, so set execution attempt to the retry
limit.
Returns:
int, the number of times this task has previously been executed.
"""
execution_attempt = flask.request.headers.get(
'X-AppEngine-TaskExecutionCount', '')
if execution_attempt:
return int(execution_attempt)
else:
return TASK_RETRY_LIMIT
if __name__ == '__main__':
# This is used when running locally. Gunicorn is used to run the
# application on Google App Engine. See entrypoint in app.yaml.
app.run(host='127.0.0.1', port=8080, debug=True)
| 38.055363
| 148
| 0.738498
| 1,489
| 10,998
| 5.247817
| 0.209537
| 0.023036
| 0.019196
| 0.012798
| 0.316995
| 0.256207
| 0.206808
| 0.182109
| 0.171871
| 0.1441
| 0
| 0.002681
| 0.185943
| 10,998
| 288
| 149
| 38.1875
| 0.870099
| 0.307965
| 0
| 0.164557
| 0
| 0.025316
| 0.130639
| 0.017991
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050633
| false
| 0
| 0.113924
| 0
| 0.259494
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c8a6aee7b7a77f1d1c85df07a12dedc044587d5
| 17,730
|
py
|
Python
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
|
BadDevCode/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 1,738
|
2017-09-21T10:59:12.000Z
|
2022-03-31T21:05:46.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 427
|
2017-09-29T22:54:36.000Z
|
2022-02-15T19:26:50.000Z
|
dev/Gems/CloudGemMetric/v1/AWS/python/windows/Lib/numba/transforms.py
|
olivier-be/lumberyard
|
3d688932f919dbf5821f0cb8a210ce24abe39e9e
|
[
"AML"
] | 671
|
2017-09-21T08:04:01.000Z
|
2022-03-29T14:30:07.000Z
|
"""
Implement transformation on Numba IR
"""
from __future__ import absolute_import, print_function
from collections import namedtuple, defaultdict
import logging
from numba.analysis import compute_cfg_from_blocks, find_top_level_loops
from numba import ir, errors, ir_utils
from numba.analysis import compute_use_defs
_logger = logging.getLogger(__name__)
def _extract_loop_lifting_candidates(cfg, blocks):
"""
Returns a list of loops that are candidate for loop lifting
"""
# check well-formed-ness of the loop
def same_exit_point(loop):
"all exits must point to the same location"
outedges = set()
for k in loop.exits:
succs = set(x for x, _ in cfg.successors(k))
if not succs:
# If the exit point has no successor, it contains an return
# statement, which is not handled by the looplifting code.
# Thus, this loop is not a candidate.
_logger.debug("return-statement in loop.")
return False
outedges |= succs
ok = len(outedges) == 1
_logger.debug("same_exit_point=%s (%s)", ok, outedges)
return ok
def one_entry(loop):
"there is one entry"
ok = len(loop.entries) == 1
_logger.debug("one_entry=%s", ok)
return ok
def cannot_yield(loop):
"cannot have yield inside the loop"
insiders = set(loop.body) | set(loop.entries) | set(loop.exits)
for blk in map(blocks.__getitem__, insiders):
for inst in blk.body:
if isinstance(inst, ir.Assign):
if isinstance(inst.value, ir.Yield):
_logger.debug("has yield")
return False
_logger.debug("no yield")
return True
_logger.info('finding looplift candidates')
# the check for cfg.entry_point in the loop.entries is to prevent a bad
# rewrite where a prelude for a lifted loop would get written into block -1
# if a loop entry were in block 0
candidates = []
for loop in find_top_level_loops(cfg):
_logger.debug("top-level loop: %s", loop)
if (same_exit_point(loop) and one_entry(loop) and cannot_yield(loop) and
cfg.entry_point() not in loop.entries):
candidates.append(loop)
_logger.debug("add candidate: %s", loop)
return candidates
def find_region_inout_vars(blocks, livemap, callfrom, returnto, body_block_ids):
"""Find input and output variables to a block region.
"""
inputs = livemap[callfrom]
outputs = livemap[returnto]
# ensure live variables are actually used in the blocks, else remove,
# saves having to create something valid to run through postproc
# to achieve similar
loopblocks = {}
for k in body_block_ids:
loopblocks[k] = blocks[k]
used_vars = set()
def_vars = set()
defs = compute_use_defs(loopblocks)
for vs in defs.usemap.values():
used_vars |= vs
for vs in defs.defmap.values():
def_vars |= vs
used_or_defined = used_vars | def_vars
# note: sorted for stable ordering
inputs = sorted(set(inputs) & used_or_defined)
outputs = sorted(set(outputs) & used_or_defined & def_vars)
return inputs, outputs
_loop_lift_info = namedtuple('loop_lift_info',
'loop,inputs,outputs,callfrom,returnto')
def _loop_lift_get_candidate_infos(cfg, blocks, livemap):
"""
Returns information on looplifting candidates.
"""
loops = _extract_loop_lifting_candidates(cfg, blocks)
loopinfos = []
for loop in loops:
[callfrom] = loop.entries # requirement checked earlier
an_exit = next(iter(loop.exits)) # anyone of the exit block
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
[(returnto, _)] = cfg.successors(an_exit) # requirement checked earlier
else:
# Post-Py3.8 DO NOT have multiple exits
returnto = an_exit
local_block_ids = set(loop.body) | set(loop.entries)
inputs, outputs = find_region_inout_vars(
blocks=blocks,
livemap=livemap,
callfrom=callfrom,
returnto=returnto,
body_block_ids=local_block_ids,
)
lli = _loop_lift_info(loop=loop, inputs=inputs, outputs=outputs,
callfrom=callfrom, returnto=returnto)
loopinfos.append(lli)
return loopinfos
def _loop_lift_modify_call_block(liftedloop, block, inputs, outputs, returnto):
"""
Transform calling block from top-level function to call the lifted loop.
"""
scope = block.scope
loc = block.loc
blk = ir.Block(scope=scope, loc=loc)
ir_utils.fill_block_with_call(
newblock=blk,
callee=liftedloop,
label_next=returnto,
inputs=inputs,
outputs=outputs,
)
return blk
def _loop_lift_prepare_loop_func(loopinfo, blocks):
"""
Inplace transform loop blocks for use as lifted loop.
"""
entry_block = blocks[loopinfo.callfrom]
scope = entry_block.scope
loc = entry_block.loc
# Lowering assumes the first block to be the one with the smallest offset
firstblk = min(blocks) - 1
blocks[firstblk] = ir_utils.fill_callee_prologue(
block=ir.Block(scope=scope, loc=loc),
inputs=loopinfo.inputs,
label_next=loopinfo.callfrom,
)
blocks[loopinfo.returnto] = ir_utils.fill_callee_epilogue(
block=ir.Block(scope=scope, loc=loc),
outputs=loopinfo.outputs,
)
def _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals):
"""
Modify the block inplace to call to the lifted-loop.
Returns a dictionary of blocks of the lifted-loop.
"""
from numba.dispatcher import LiftedLoop
# Copy loop blocks
loop = loopinfo.loop
loopblockkeys = set(loop.body) | set(loop.entries)
if len(loop.exits) > 1:
# Pre-Py3.8 may have multiple exits
loopblockkeys |= loop.exits
loopblocks = dict((k, blocks[k].copy()) for k in loopblockkeys)
# Modify the loop blocks
_loop_lift_prepare_loop_func(loopinfo, loopblocks)
# Create a new IR for the lifted loop
lifted_ir = func_ir.derive(blocks=loopblocks,
arg_names=tuple(loopinfo.inputs),
arg_count=len(loopinfo.inputs),
force_non_generator=True)
liftedloop = LiftedLoop(lifted_ir,
typingctx, targetctx, flags, locals)
# modify for calling into liftedloop
callblock = _loop_lift_modify_call_block(liftedloop, blocks[loopinfo.callfrom],
loopinfo.inputs, loopinfo.outputs,
loopinfo.returnto)
# remove blocks
for k in loopblockkeys:
del blocks[k]
# update main interpreter callsite into the liftedloop
blocks[loopinfo.callfrom] = callblock
return liftedloop
def loop_lifting(func_ir, typingctx, targetctx, flags, locals):
"""
Loop lifting transformation.
Given a interpreter `func_ir` returns a 2 tuple of
`(toplevel_interp, [loop0_interp, loop1_interp, ....])`
"""
blocks = func_ir.blocks.copy()
cfg = compute_cfg_from_blocks(blocks)
loopinfos = _loop_lift_get_candidate_infos(cfg, blocks,
func_ir.variable_lifetime.livemap)
loops = []
if loopinfos:
_logger.debug('loop lifting this IR with %d candidates:\n%s',
len(loopinfos), func_ir.dump_to_string())
for loopinfo in loopinfos:
lifted = _loop_lift_modify_blocks(func_ir, loopinfo, blocks,
typingctx, targetctx, flags, locals)
loops.append(lifted)
# Make main IR
main = func_ir.derive(blocks=blocks)
return main, loops
def canonicalize_cfg_single_backedge(blocks):
"""
Rewrite loops that have multiple backedges.
"""
cfg = compute_cfg_from_blocks(blocks)
newblocks = blocks.copy()
def new_block_id():
return max(newblocks.keys()) + 1
def has_multiple_backedges(loop):
count = 0
for k in loop.body:
blk = blocks[k]
edges = blk.terminator.get_targets()
# is a backedge?
if loop.header in edges:
count += 1
if count > 1:
# early exit
return True
return False
def yield_loops_with_multiple_backedges():
for lp in cfg.loops().values():
if has_multiple_backedges(lp):
yield lp
def replace_target(term, src, dst):
def replace(target):
return (dst if target == src else target)
if isinstance(term, ir.Branch):
return ir.Branch(cond=term.cond,
truebr=replace(term.truebr),
falsebr=replace(term.falsebr),
loc=term.loc)
elif isinstance(term, ir.Jump):
return ir.Jump(target=replace(term.target), loc=term.loc)
else:
assert not term.get_targets()
return term
def rewrite_single_backedge(loop):
"""
Add new tail block that gathers all the backedges
"""
header = loop.header
tailkey = new_block_id()
for blkkey in loop.body:
blk = newblocks[blkkey]
if header in blk.terminator.get_targets():
newblk = blk.copy()
# rewrite backedge into jumps to new tail block
newblk.body[-1] = replace_target(blk.terminator, header,
tailkey)
newblocks[blkkey] = newblk
# create new tail block
entryblk = newblocks[header]
tailblk = ir.Block(scope=entryblk.scope, loc=entryblk.loc)
# add backedge
tailblk.append(ir.Jump(target=header, loc=tailblk.loc))
newblocks[tailkey] = tailblk
for loop in yield_loops_with_multiple_backedges():
rewrite_single_backedge(loop)
return newblocks
def canonicalize_cfg(blocks):
"""
Rewrite the given blocks to canonicalize the CFG.
Returns a new dictionary of blocks.
"""
return canonicalize_cfg_single_backedge(blocks)
def with_lifting(func_ir, typingctx, targetctx, flags, locals):
"""With-lifting transformation
Rewrite the IR to extract all withs.
Only the top-level withs are extracted.
Returns the (the_new_ir, the_lifted_with_ir)
"""
from numba import postproc
def dispatcher_factory(func_ir, objectmode=False, **kwargs):
from numba.dispatcher import LiftedWith, ObjModeLiftedWith
myflags = flags.copy()
if objectmode:
# Lifted with-block cannot looplift
myflags.enable_looplift = False
# Lifted with-block uses object mode
myflags.enable_pyobject = True
myflags.force_pyobject = True
myflags.no_cpython_wrapper = False
cls = ObjModeLiftedWith
else:
cls = LiftedWith
return cls(func_ir, typingctx, targetctx, myflags, locals, **kwargs)
postproc.PostProcessor(func_ir).run() # ensure we have variable lifetime
assert func_ir.variable_lifetime
vlt = func_ir.variable_lifetime
blocks = func_ir.blocks.copy()
# find where with-contexts regions are
withs = find_setupwiths(blocks)
cfg = vlt.cfg
_legalize_withs_cfg(withs, cfg, blocks)
# For each with-regions, mutate them according to
# the kind of contextmanager
sub_irs = []
for (blk_start, blk_end) in withs:
body_blocks = []
for node in _cfg_nodes_in_region(cfg, blk_start, blk_end):
body_blocks.append(node)
_legalize_with_head(blocks[blk_start])
# Find the contextmanager
cmkind, extra = _get_with_contextmanager(func_ir, blocks, blk_start)
# Mutate the body and get new IR
sub = cmkind.mutate_with_body(func_ir, blocks, blk_start, blk_end,
body_blocks, dispatcher_factory,
extra)
sub_irs.append(sub)
if not sub_irs:
# Unchanged
new_ir = func_ir
else:
new_ir = func_ir.derive(blocks)
return new_ir, sub_irs
def _get_with_contextmanager(func_ir, blocks, blk_start):
"""Get the global object used for the context manager
"""
_illegal_cm_msg = "Illegal use of context-manager."
def get_var_dfn(var):
"""Get the definition given a variable"""
return func_ir.get_definition(var)
def get_ctxmgr_obj(var_ref):
"""Return the context-manager object and extra info.
The extra contains the arguments if the context-manager is used
as a call.
"""
# If the contextmanager used as a Call
dfn = func_ir.get_definition(var_ref)
if isinstance(dfn, ir.Expr) and dfn.op == 'call':
args = [get_var_dfn(x) for x in dfn.args]
kws = {k: get_var_dfn(v) for k, v in dfn.kws}
extra = {'args': args, 'kwargs': kws}
var_ref = dfn.func
else:
extra = None
ctxobj = ir_utils.guard(ir_utils.find_global_value, func_ir, var_ref)
# check the contextmanager object
if ctxobj is ir.UNDEFINED:
raise errors.CompilerError(
"Undefined variable used as context manager",
loc=blocks[blk_start].loc,
)
if ctxobj is None:
raise errors.CompilerError(_illegal_cm_msg, loc=dfn.loc)
return ctxobj, extra
# Scan the start of the with-region for the contextmanager
for stmt in blocks[blk_start].body:
if isinstance(stmt, ir.EnterWith):
var_ref = stmt.contextmanager
ctxobj, extra = get_ctxmgr_obj(var_ref)
if not hasattr(ctxobj, 'mutate_with_body'):
raise errors.CompilerError(
"Unsupported context manager in use",
loc=blocks[blk_start].loc,
)
return ctxobj, extra
# No contextmanager found?
raise errors.CompilerError(
"malformed with-context usage",
loc=blocks[blk_start].loc,
)
def _legalize_with_head(blk):
"""Given *blk*, the head block of the with-context, check that it doesn't
do anything else.
"""
counters = defaultdict(int)
for stmt in blk.body:
counters[type(stmt)] += 1
if counters.pop(ir.EnterWith) != 1:
raise errors.CompilerError(
"with's head-block must have exactly 1 ENTER_WITH",
loc=blk.loc,
)
if counters.pop(ir.Jump) != 1:
raise errors.CompilerError(
"with's head-block must have exactly 1 JUMP",
loc=blk.loc,
)
# Can have any number of del
counters.pop(ir.Del, None)
# There MUST NOT be any other statements
if counters:
raise errors.CompilerError(
"illegal statements in with's head-block",
loc=blk.loc,
)
def _cfg_nodes_in_region(cfg, region_begin, region_end):
"""Find the set of CFG nodes that are in the given region
"""
region_nodes = set()
stack = [region_begin]
while stack:
tos = stack.pop()
succs, _ = zip(*cfg.successors(tos))
nodes = set([node for node in succs
if node not in region_nodes and
node != region_end])
stack.extend(nodes)
region_nodes |= nodes
return region_nodes
def _legalize_withs_cfg(withs, cfg, blocks):
"""Verify the CFG of the with-context(s).
"""
doms = cfg.dominators()
postdoms = cfg.post_dominators()
# Verify that the with-context has no side-exits
for s, e in withs:
loc = blocks[s].loc
if s not in doms[e]:
# Not sure what condition can trigger this error.
msg = "Entry of with-context not dominating the exit."
raise errors.CompilerError(msg, loc=loc)
if e not in postdoms[s]:
msg = (
"Does not support with-context that contain branches "
"(i.e. break/return/raise) that can leave the with-context. "
"Details: exit of with-context not post-dominating the entry. "
)
raise errors.CompilerError(msg, loc=loc)
def find_setupwiths(blocks):
"""Find all top-level with.
Returns a list of ranges for the with-regions.
"""
def find_ranges(blocks):
for blk in blocks.values():
for ew in blk.find_insts(ir.EnterWith):
yield ew.begin, ew.end
def previously_occurred(start, known_ranges):
for a, b in known_ranges:
if s >= a and s < b:
return True
return False
known_ranges = []
for s, e in sorted(find_ranges(blocks)):
if not previously_occurred(s, known_ranges):
if e not in blocks:
# this's possible if there's an exit path in the with-block
raise errors.CompilerError(
'unsupported controlflow due to return/raise '
'statements inside with block'
)
assert s in blocks, 'starting offset is not a label'
known_ranges.append((s, e))
return known_ranges
| 33.579545
| 84
| 0.60846
| 2,184
| 17,730
| 4.779762
| 0.184982
| 0.013794
| 0.022991
| 0.01389
| 0.164096
| 0.110547
| 0.059584
| 0.039659
| 0.031804
| 0.031804
| 0
| 0.002125
| 0.310039
| 17,730
| 527
| 85
| 33.643264
| 0.851222
| 0.182121
| 0
| 0.120235
| 0
| 0
| 0.06584
| 0.002592
| 0
| 0
| 0
| 0
| 0.008798
| 1
| 0.085044
| false
| 0
| 0.026393
| 0.005865
| 0.196481
| 0.002933
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c8a815c2ee01b343fc690c138951a4c479fece7
| 6,453
|
py
|
Python
|
tests/test_masked_inference_wsi_dataset.py
|
HabibMrad/MONAI
|
1314701c15623422574b0153d746666dc6004454
|
[
"Apache-2.0"
] | 1
|
2022-01-04T21:38:23.000Z
|
2022-01-04T21:38:23.000Z
|
tests/test_masked_inference_wsi_dataset.py
|
HabibMrad/MONAI
|
1314701c15623422574b0153d746666dc6004454
|
[
"Apache-2.0"
] | null | null | null |
tests/test_masked_inference_wsi_dataset.py
|
HabibMrad/MONAI
|
1314701c15623422574b0153d746666dc6004454
|
[
"Apache-2.0"
] | null | null | null |
import os
import unittest
from unittest import skipUnless
import numpy as np
from numpy.testing import assert_array_equal
from parameterized import parameterized
from monai.apps.pathology.datasets import MaskedInferenceWSIDataset
from monai.apps.utils import download_url
from monai.utils import optional_import
from tests.utils import skip_if_quick
_, has_cim = optional_import("cucim")
_, has_osl = optional_import("openslide")
FILE_URL = "http://openslide.cs.cmu.edu/download/openslide-testdata/Generic-TIFF/CMU-1.tiff"
FILE_PATH = os.path.join(os.path.dirname(__file__), "testing_data", os.path.basename(FILE_URL))
MASK1 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask1.npy")
MASK2 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask2.npy")
MASK4 = os.path.join(os.path.dirname(__file__), "testing_data", "tissue_mask4.npy")
HEIGHT = 32914
WIDTH = 46000
def prepare_data():
mask = np.zeros((WIDTH // 2, HEIGHT // 2))
mask[100, 100] = 1
np.save(MASK1, mask)
mask[100, 100:102] = 1
np.save(MASK2, mask)
mask[100:102, 100:102] = 1
np.save(MASK4, mask)
TEST_CASE_0 = [
{
"data": [
{"image": FILE_PATH, "mask": MASK1},
],
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
],
]
TEST_CASE_1 = [
{
"data": [{"image": FILE_PATH, "mask": MASK2}],
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 100],
},
],
]
TEST_CASE_2 = [
{
"data": [{"image": FILE_PATH, "mask": MASK4}],
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 101],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 101],
},
],
]
TEST_CASE_3 = [
{
"data": [
{"image": FILE_PATH, "mask": MASK1},
],
"patch_size": 2,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array(
[
[[243, 243], [243, 243]],
[[243, 243], [243, 243]],
[[243, 243], [243, 243]],
],
dtype=np.uint8,
),
"name": "CMU-1",
"mask_location": [100, 100],
},
],
]
TEST_CASE_4 = [
{
"data": [
{"image": FILE_PATH, "mask": MASK1},
{"image": FILE_PATH, "mask": MASK2},
],
"patch_size": 1,
"image_reader_name": "cuCIM",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 100],
},
],
]
TEST_CASE_OPENSLIDE_0 = [
{
"data": [
{"image": FILE_PATH, "mask": MASK1},
],
"patch_size": 1,
"image_reader_name": "OpenSlide",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
],
]
TEST_CASE_OPENSLIDE_1 = [
{
"data": [{"image": FILE_PATH, "mask": MASK2}],
"patch_size": 1,
"image_reader_name": "OpenSlide",
},
[
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [100, 100],
},
{
"image": np.array([[[243]], [[243]], [[243]]], dtype=np.uint8),
"name": "CMU-1",
"mask_location": [101, 100],
},
],
]
class TestMaskedInferenceWSIDataset(unittest.TestCase):
def setUp(self):
prepare_data()
download_url(FILE_URL, FILE_PATH, "5a3cfd4fd725c50578ddb80b517b759f")
@parameterized.expand(
[
TEST_CASE_0,
TEST_CASE_1,
TEST_CASE_2,
TEST_CASE_3,
TEST_CASE_4,
]
)
@skipUnless(has_cim, "Requires CuCIM")
@skip_if_quick
def test_read_patches_cucim(self, input_parameters, expected):
dataset = MaskedInferenceWSIDataset(**input_parameters)
self.compare_samples_expected(dataset, expected)
@parameterized.expand(
[
TEST_CASE_OPENSLIDE_0,
TEST_CASE_OPENSLIDE_1,
]
)
@skipUnless(has_osl, "Requires OpenSlide")
@skip_if_quick
def test_read_patches_openslide(self, input_parameters, expected):
dataset = MaskedInferenceWSIDataset(**input_parameters)
self.compare_samples_expected(dataset, expected)
def compare_samples_expected(self, dataset, expected):
for i in range(len(dataset)):
self.assertTupleEqual(dataset[i][0]["image"].shape, expected[i]["image"].shape)
self.assertIsNone(assert_array_equal(dataset[i][0]["image"], expected[i]["image"]))
self.assertEqual(dataset[i][0]["name"], expected[i]["name"])
self.assertListEqual(dataset[i][0]["mask_location"], expected[i]["mask_location"])
if __name__ == "__main__":
unittest.main()
| 27
| 95
| 0.501937
| 681
| 6,453
| 4.53304
| 0.152717
| 0.071914
| 0.067055
| 0.068027
| 0.580175
| 0.56495
| 0.556527
| 0.537739
| 0.526401
| 0.514091
| 0
| 0.08339
| 0.314272
| 6,453
| 238
| 96
| 27.113445
| 0.614237
| 0
| 0
| 0.399061
| 0
| 0.004695
| 0.158841
| 0.004959
| 0
| 0
| 0
| 0
| 0.023474
| 1
| 0.023474
| false
| 0
| 0.056338
| 0
| 0.084507
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c8eb61b685c469f781463c9f7be05e90e8308c7
| 1,408
|
py
|
Python
|
neural_network/backup_casestudy/denbigh/tf_RNN.py
|
acceleratedmaterials/AMDworkshop_demo
|
e7c2b931e023fc00ff7494b8acb2181f5c75bc4e
|
[
"MIT"
] | 5
|
2019-04-02T03:20:43.000Z
|
2021-07-13T18:23:26.000Z
|
neural_network/backup_casestudy/denbigh/tf_RNN.py
|
NUS-SSE/AMDworkshop_demo
|
edbd6c60957dd0d83c3ef43c7e9e28ef1fef3bd9
|
[
"MIT"
] | null | null | null |
neural_network/backup_casestudy/denbigh/tf_RNN.py
|
NUS-SSE/AMDworkshop_demo
|
edbd6c60957dd0d83c3ef43c7e9e28ef1fef3bd9
|
[
"MIT"
] | 5
|
2019-05-12T17:41:58.000Z
|
2021-06-08T04:38:35.000Z
|
# -*- coding: utf-8 -*-
'''
Framework: Tensorflow
Training samples: 1600
Validation samples: 400
RNN with 128 units
Optimizer: Adam
Epoch: 100
Loss: Cross Entropy
Activation function: Relu for network and Soft-max for regression
Regularization: Drop-out, keep_prob = 0.8
Accuracy of Validation set: 95%
'''
from __future__ import division, print_function, absolute_import
import tflearn
from tflearn.data_utils import to_categorical, pad_sequences
from data_denbigh import *
X, Y = getDenbighData()
#Hyperparams
neurons_num = 128 # Number of neurons in the RNN layer
keep_prob = 0.5 # Keep probability for the drop-out regularization
learning_rate = 0.001 # Learning rate for mini-batch SGD
batch_size = 32 # Batch size
n_epoch = 100 # Number of epoch
#Data preprocessing/ Converting data to vector for the
X = pad_sequences(X, maxlen=5, value=0.)
Y = to_categorical(Y, 2)
#Build the network
net = tflearn.input_data([None, 5])
net = tflearn.embedding(net, input_dim=10000, output_dim=128)
net = tflearn.simple_rnn(net, neurons_num, dropout=keep_prob)
net = tflearn.fully_connected(net, 2, activation='softmax')
net = tflearn.regression(net, optimizer='adam', learning_rate=learning_rate,
loss='categorical_crossentropy')
model = tflearn.DNN(net, tensorboard_verbose=0)
model.fit(X, Y, validation_set=0.2, show_metric=True,
batch_size=batch_size, n_epoch=n_epoch)
model.save('./model.tfl')
| 37.052632
| 76
| 0.769176
| 214
| 1,408
| 4.897196
| 0.490654
| 0.04771
| 0.017176
| 0.028626
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039216
| 0.130682
| 1,408
| 38
| 77
| 37.052632
| 0.816993
| 0.371449
| 0
| 0
| 0
| 0
| 0.052874
| 0.027586
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.181818
| 0
| 0.181818
| 0.045455
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c8eba30a07960e7e0f748300f8823eed9acd88c
| 5,569
|
py
|
Python
|
code/tests/test_tile_tf.py
|
Nocty-chan/cs224n-squad
|
0c0b342621e038aba8e20ff411da13dfa173351d
|
[
"Apache-2.0"
] | 2
|
2018-04-15T06:13:41.000Z
|
2019-07-25T20:22:34.000Z
|
code/tests/test_tile_tf.py
|
Nocty-chan/cs224n-squad
|
0c0b342621e038aba8e20ff411da13dfa173351d
|
[
"Apache-2.0"
] | 1
|
2020-11-10T04:51:36.000Z
|
2020-11-10T04:51:36.000Z
|
code/tests/test_tile_tf.py
|
Nocty-chan/cs224n-squad
|
0c0b342621e038aba8e20ff411da13dfa173351d
|
[
"Apache-2.0"
] | 3
|
2018-08-08T08:48:04.000Z
|
2020-02-10T09:52:41.000Z
|
import numpy as np
import tensorflow as tf
H = 2
N = 2
M = 3
BS = 10
def my_softmax(arr):
max_elements = np.reshape(np.max(arr, axis = 2), (BS, N, 1))
arr = arr - max_elements
exp_array = np.exp(arr)
print (exp_array)
sum_array = np.reshape(np.sum(exp_array, axis=2), (BS, N, 1))
return exp_array /sum_array
def masked_softmax(logits, mask, dim):
"""
Takes masked softmax over given dimension of logits.
Inputs:
logits: Numpy array. We want to take softmax over dimension dim.
mask: Numpy array of same shape as logits.
Has 1s where there's real data in logits, 0 where there's padding
dim: int. dimension over which to take softmax
Returns:
masked_logits: Numpy array same shape as logits.
This is the same as logits, but with 1e30 subtracted
(i.e. very large negative number) in the padding locations.
prob_dist: Numpy array same shape as logits.
The result of taking softmax over masked_logits in given dimension.
Should be 0 in padding locations.
Should sum to 1 over given dimension.
"""
exp_mask = (1 - tf.cast(mask, 'float64')) * (-1e30) # -large where there's padding, 0 elsewhere
print (exp_mask)
masked_logits = tf.add(logits, exp_mask) # where there's padding, set logits to -large
prob_dist = tf.nn.softmax(masked_logits, dim)
return masked_logits, prob_dist
def test_build_similarity(contexts, questions):
w_sim_1 = tf.get_variable('w_sim_1',
initializer=w_1) # 2 * H
w_sim_2 = tf.get_variable('w_sim_2',
initializer=w_2) # 2 * self.hidden_size
w_sim_3 = tf.get_variable('w_sim_3',
initializer=w_3) # 2 * self.hidden_size
q_tile = tf.tile(tf.expand_dims(questions, 0), [N, 1, 1, 1]) # N x BS x M x 2H
q_tile = tf.transpose(q_tile, (1, 0, 3, 2)) # BS x N x 2H x M
contexts = tf.expand_dims(contexts, -1) # BS x N x 2H x 1
result = (contexts * q_tile) # BS x N x 2H x M
tf.assert_equal(tf.shape(result), [BS, N, 2 * H, M])
result = tf.transpose(result, (0, 1, 3, 2)) # BS x N x M x 2H
result = tf.reshape(result, (-1, N * M, 2 * H)) # BS x (NxM) x 2H
tf.assert_equal(tf.shape(result), [BS, N*M, 2*H])
# w_sim_1 = tf.tile(tf.expand_dims(w_sim_1, 0), [BS, 1])
# w_sim_2 = tf.tile(tf.expand_dims(w_sim_2, 0), [BS, 1])
# w_sim_3 = tf.tile(tf.expand_dims(w_sim_3, 0), [BS, 1])
term1 = tf.matmul(tf.reshape(contexts, (BS * N, 2*H)), tf.expand_dims(w_sim_1, -1)) # BS x N
term1 = tf.reshape(term1, (-1, N))
term2 = tf.matmul(tf.reshape(questions, (BS * M, 2*H)), tf.expand_dims(w_sim_2, -1)) # BS x M
term2 = tf.reshape(term2, (-1, M))
term3 = tf.matmul(tf.reshape(result, (BS * N * M, 2* H)), tf.expand_dims(w_sim_3, -1))
term3 = tf.reshape(term3, (-1, N, M)) # BS x N x M
S = tf.reshape(term1,(-1, N, 1)) + term3 + tf.reshape(term2, (-1, 1, M))
return S
def test_build_sim_mask():
context_mask = np.array([True, True]) # BS x N
question_mask = np.array([True, True, False]) # BS x M
context_mask = np.tile(context_mask, [BS, 1])
question_mask = np.tile(question_mask, [BS, 1])
context_mask = tf.get_variable('context_mask', initializer=context_mask)
question_mask = tf.get_variable('question_mask', initializer=question_mask)
context_mask = tf.expand_dims(context_mask, -1) # BS x N x 1
question_mask = tf.expand_dims(question_mask, -1) # BS x M x 1
question_mask = tf.transpose(question_mask, (0, 2, 1)) # BS x 1 x M
sim_mask = tf.matmul(tf.cast(context_mask, dtype=tf.int32),
tf.cast(question_mask, dtype=tf.int32)) # BS x N x M
return sim_mask
def test_build_c2q(S, S_mask, questions):
_, alpha = masked_softmax(S, mask, 2) # BS x N x M
return tf.matmul(alpha, questions)
def test_build_q2c(S, S_mask, contexts):
# S = BS x N x M
# contexts = BS x N x 2H
m = tf.reduce_max(S * tf.cast(S_mask, dtype=tf.float64), axis=2) # BS x N
beta = tf.expand_dims(tf.nn.softmax(m), -1) # BS x N x 1
beta = tf.transpose(beta, (0, 2, 1))
q2c = tf.matmul(beta, contexts)
return m, beta, q2c
def test_concatenation(c2q, q2c):
q2c = tf.tile(q2c, (1, N, 1))
output = tf.concat([c2q, q2c], axis=2)
tf.assert_equal(tf.shape(output), [BS, N, 4*H])
return output
if __name__== "__main__":
w_1 = np.array([1., 2., 3., 4.])
w_2 = np.array([5., 6., 7., 8.])
w_3 = np.array([13., 12., 11., 10.])
c = np.array([[[1., 2., 3., 4.], [5., 6., 7., 8.]]]) # BS x N x 2H
q = np.array([[[1., 2., 3., 0.], [5., 6., 7., 4.], [8., 9. , 10., 11.]]]) # BS x M x 2H
c = np.tile(c, [BS, 1, 1])
q = np.tile(q, [BS, 1, 1])
questions = tf.get_variable('questions', initializer=q)
contexts = tf.get_variable('contexts', initializer=c)
S = test_build_similarity(contexts, questions)
mask = test_build_sim_mask()
c2q = test_build_c2q(S, mask, questions)
m, beta, q2c = test_build_q2c(S, mask, contexts)
output = test_concatenation(c2q, q2c)
init = tf.global_variables_initializer()
with tf.Session() as sess:
sess.run(init)
S_result, mask_result, c2q_r = sess.run([S, mask, c2q])
actual_result = np.tile(np.array([[228, 772, 1372], [548, 1828, 3140]]), [BS, 1, 1])
assert np.array_equal(actual_result, S_result), 'Arrays are not equal'
print ("Building similarity matrix is successful!")
print ("Context 2 Question attention")
m_r, beta_r, q2c_r = sess.run([m, beta, q2c])
output_r = sess.run(output)
| 41.87218
| 99
| 0.625606
| 978
| 5,569
| 3.40593
| 0.160532
| 0.019814
| 0.018013
| 0.018013
| 0.208646
| 0.104773
| 0.059442
| 0.02882
| 0
| 0
| 0
| 0.050568
| 0.225893
| 5,569
| 132
| 100
| 42.189394
| 0.722106
| 0.223379
| 0
| 0
| 0
| 0
| 0.039499
| 0
| 0
| 0
| 0
| 0
| 0.042553
| 1
| 0.074468
| false
| 0
| 0.021277
| 0
| 0.170213
| 0.042553
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c8edd5a1cedfd0895ce2bb9c6148ce0241c7af7
| 7,174
|
py
|
Python
|
specutils/tests/test_smoothing.py
|
hamogu/specutils
|
b873f2ac9b3c207c9e670246d102f46a9606d6ed
|
[
"BSD-3-Clause"
] | null | null | null |
specutils/tests/test_smoothing.py
|
hamogu/specutils
|
b873f2ac9b3c207c9e670246d102f46a9606d6ed
|
[
"BSD-3-Clause"
] | null | null | null |
specutils/tests/test_smoothing.py
|
hamogu/specutils
|
b873f2ac9b3c207c9e670246d102f46a9606d6ed
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
import pytest
from astropy import convolution
from scipy.signal import medfilt
import astropy.units as u
from ..spectra.spectrum1d import Spectrum1D
from ..tests.spectral_examples import simulated_spectra
from ..manipulation.smoothing import (convolution_smooth, box_smooth,
gaussian_smooth, trapezoid_smooth,
median_smooth)
def compare_flux(flux_smooth1, flux_smooth2, flux_original, rtol=0.01):
"""
There are two things to compare for each set of smoothing:
1. Compare the smoothed flux from the astropy machinery vs
the smoothed flux from specutils. This is done by
comparing flux_smooth1 and flux_smooth2.
2. Next we want to compare the smoothed flux to the original
flux. This is a little more difficult as smoothing will
make a difference for median filter, but less so for
convolution based smoothing if the kernel is normalized
(area under the kernel = 1).
In this second case the rtol (relative tolerance) is used
judiciously.
"""
# Compare, element by element, the two smoothed fluxes.
assert np.allclose(flux_smooth1, flux_smooth2)
# Compare the total spectral flux of the smoothed to the original.
assert np.allclose(sum(flux_smooth1), sum(flux_original), rtol=rtol)
def test_smooth_custom_kernel(simulated_spectra):
"""
Test CustomKernel smoothing with correct parmaeters.
"""
# Create the original spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create a custom kernel (some weird asymmetric-ness)
numpy_kernel = np.array([0.5, 1, 2, 0.5, 0.2])
numpy_kernel = numpy_kernel / np.sum(numpy_kernel)
custom_kernel = convolution.CustomKernel(numpy_kernel)
flux_smoothed_astropy = convolution.convolve(flux_original, custom_kernel)
# Calculate the custom smoothed
spec1_smoothed = convolution_smooth(spec1, custom_kernel)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
@pytest.mark.parametrize("width", [1, 2.3])
def test_smooth_box_good(simulated_spectra, width):
"""
Test Box1DKernel smoothing with correct parmaeters.
Width values need to be a number greater than 0.
"""
# Create the original spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Calculate the smoothed flux using Astropy
box_kernel = convolution.Box1DKernel(width)
flux_smoothed_astropy = convolution.convolve(flux_original, box_kernel)
# Calculate the box smoothed
spec1_smoothed = box_smooth(spec1, width)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("width", [-1, 0, 'a'])
def test_smooth_box_bad(simulated_spectra, width):
"""
Test Box1DKernel smoothing with incorrect parmaeters.
Width values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad input parameters
with pytest.raises(ValueError):
box_smooth(spec1, width)
@pytest.mark.parametrize("stddev", [1, 2.3])
def test_smooth_gaussian_good(simulated_spectra, stddev):
"""
Test Gaussian1DKernel smoothing with correct parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Calculate the smoothed flux using Astropy
gaussian_kernel = convolution.Gaussian1DKernel(stddev)
flux_smoothed_astropy = convolution.convolve(flux_original, gaussian_kernel)
# Test gaussian smoothing
spec1_smoothed = gaussian_smooth(spec1, stddev)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.02)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("stddev", [-1, 0, 'a'])
def test_smooth_gaussian_bad(simulated_spectra, stddev):
"""
Test MexicanHat1DKernel smoothing with incorrect parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad input paramters
with pytest.raises(ValueError):
gaussian_smooth(spec1, stddev)
@pytest.mark.parametrize("stddev", [1, 2.3])
def test_smooth_trapezoid_good(simulated_spectra, stddev):
"""
Test Trapezoid1DKernel smoothing with correct parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create the flux_smoothed which is what we want to compare to
trapezoid_kernel = convolution.Trapezoid1DKernel(stddev)
flux_smoothed_astropy = convolution.convolve(flux_original, trapezoid_kernel)
# Test trapezoid smoothing
spec1_smoothed = trapezoid_smooth(spec1, stddev)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("stddev", [-1, 0, 'a'])
def test_smooth_trapezoid_bad(simulated_spectra, stddev):
"""
Test Trapezoid1DKernel smoothing with incorrect parmaeters.
Standard deviation values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad parameters
with pytest.raises(ValueError):
trapezoid_smooth(spec1, stddev)
@pytest.mark.parametrize("width", [1, 3, 9])
def test_smooth_median_good(simulated_spectra, width):
"""
Test Median smoothing with correct parmaeters.
Width values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
flux_original = spec1.flux
# Create the flux_smoothed which is what we want to compare to
flux_smoothed_astropy = medfilt(flux_original, width)
# Test median smoothing
spec1_smoothed = median_smooth(spec1, width)
compare_flux(spec1_smoothed.flux.value, flux_smoothed_astropy, flux_original.value, rtol=0.15)
# Check the input and output units
assert spec1.wavelength.unit == spec1_smoothed.wavelength.unit
assert spec1.flux.unit == spec1_smoothed.flux.unit
@pytest.mark.parametrize("width", [-1, 0, 'a'])
def test_smooth_median_bad(simulated_spectra, width):
"""
Test Median smoothing with incorrect parmaeters.
Width values need to be a number greater than 0.
"""
# Create the spectrum
spec1 = simulated_spectra.s1_um_mJy_e1
# Test bad parameters
with pytest.raises(ValueError):
median_smooth(spec1, width)
| 32.609091
| 98
| 0.730694
| 940
| 7,174
| 5.393617
| 0.156383
| 0.059961
| 0.037475
| 0.051479
| 0.63787
| 0.608679
| 0.594675
| 0.531558
| 0.509467
| 0.509467
| 0
| 0.023034
| 0.195149
| 7,174
| 219
| 99
| 32.757991
| 0.85504
| 0.340117
| 0
| 0.432099
| 0
| 0
| 0.01071
| 0
| 0
| 0
| 0
| 0
| 0.123457
| 1
| 0.123457
| false
| 0
| 0.098765
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c909452f19de7c50d60c569038b33d1b55f15c0
| 909
|
py
|
Python
|
modules/interpolator.py
|
buulikduong/1d_sgl_solver
|
03ce0b362d45acbbd3bb35e7b604ba97982eea92
|
[
"BSD-2-Clause"
] | null | null | null |
modules/interpolator.py
|
buulikduong/1d_sgl_solver
|
03ce0b362d45acbbd3bb35e7b604ba97982eea92
|
[
"BSD-2-Clause"
] | null | null | null |
modules/interpolator.py
|
buulikduong/1d_sgl_solver
|
03ce0b362d45acbbd3bb35e7b604ba97982eea92
|
[
"BSD-2-Clause"
] | 2
|
2020-09-01T13:02:49.000Z
|
2021-08-15T09:10:17.000Z
|
"""Module interpolating mathematical functions out of support points"""
from scipy.interpolate import interp1d, lagrange, CubicSpline
def interpolator(x_sup, y_sup, method):
"""Interpolates a mathematical function from a given set of
points using either linear, polynomial or cubic spline for the
interpolation.
Args:
x_sup (list): x-coordinates of the function
y_sup (list): y-coordinates of the function
method (string): name of the interpolation method to be used
Returns:
intfunc: interpolated function
"""
if method == "linear":
intfunc = interp1d(x_sup, y_sup, kind="linear")
return intfunc
elif method == "polynomial":
intfunc = lagrange(x_sup, y_sup)
return intfunc
elif method == "cspline":
intfunc = CubicSpline(x_sup, y_sup, bc_type="natural")
return intfunc
return None
| 29.322581
| 71
| 0.672167
| 113
| 909
| 5.309735
| 0.495575
| 0.033333
| 0.033333
| 0.053333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002933
| 0.249725
| 909
| 30
| 72
| 30.3
| 0.876833
| 0.454345
| 0
| 0.25
| 0
| 0
| 0.080357
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.083333
| false
| 0
| 0.083333
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c9227a3cbdbdfda32f8e1f7af19e23d5f84fca1
| 946
|
py
|
Python
|
games.py
|
cpratim/DSA-Research-Paper
|
ebb856ef62f8a04aa72380e39afdde958eed529a
|
[
"MIT"
] | null | null | null |
games.py
|
cpratim/DSA-Research-Paper
|
ebb856ef62f8a04aa72380e39afdde958eed529a
|
[
"MIT"
] | null | null | null |
games.py
|
cpratim/DSA-Research-Paper
|
ebb856ef62f8a04aa72380e39afdde958eed529a
|
[
"MIT"
] | null | null | null |
import json
import matplotlib.pyplot as plt
from pprint import pprint
import numpy as np
from scipy.stats import linregress
from util.stats import *
with open('data/game_stats.json', 'r') as f:
df = json.load(f)
X, y = [], []
for match, stats in df.items():
home, away = stats['home'], stats['away']
if home['mp'] != away['mp'] != '240': continue
try:
ft_dif = float(home['fta']) - float(away['fta'])
pt_dif = float(home['pts']) - float(away['pts'])
if abs(pt_dif) > 10: continue
except:
continue
X.append(ft_dif)
y.append(pt_dif)
c = 0
for f, p in zip(X, y):
if f * p > 0:
c += 1
print(c / len(X))
slope, intercept, r, p, std = linregress(X, y)
f = lambda x: x*slope + intercept
fit_y = [f(min(X)), f(max(X))]
plt.xlabel('Free Throw Attempts')
plt.ylabel('Point Differential')
plt.title('FTA vs Point Differential')
print(correlation(X, y))
plt.plot([min(X), max(X)], fit_y, color = 'red')
plt.scatter(X, y)
plt.show()
| 22
| 51
| 0.64482
| 167
| 946
| 3.60479
| 0.443114
| 0.016611
| 0.039867
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010152
| 0.167019
| 946
| 43
| 52
| 22
| 0.753807
| 0
| 0
| 0
| 0
| 0
| 0.119324
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.171429
| 0
| 0.171429
| 0.085714
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c924b0af1eb750ce0d3f38bab21b79619b4ba48
| 6,255
|
py
|
Python
|
src/generate_data.py
|
gycggd/leaf-classification
|
b37dd4a6a262562c454038218c1472329e54128b
|
[
"MIT"
] | null | null | null |
src/generate_data.py
|
gycggd/leaf-classification
|
b37dd4a6a262562c454038218c1472329e54128b
|
[
"MIT"
] | null | null | null |
src/generate_data.py
|
gycggd/leaf-classification
|
b37dd4a6a262562c454038218c1472329e54128b
|
[
"MIT"
] | null | null | null |
import os
import numpy as np
import pandas as pd
import tensorflow as tf
from keras.preprocessing.image import ImageDataGenerator
from keras.preprocessing.image import img_to_array, load_img
from keras.utils.np_utils import to_categorical
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.preprocessing import LabelEncoder, StandardScaler
def load_numeric_training(standardize=True):
data = pd.read_csv('../train.csv')
ID = data.pop('id')
y = data.pop('species')
y = LabelEncoder().fit(y).transform(y)
X = StandardScaler().fit(data).transform(data) if standardize else data.values
return ID.values, X, y
def load_numeric_test(standardize=True):
data = pd.read_csv('../test.csv')
ID = data.pop('id')
test = StandardScaler().fit(data).transform(data) if standardize else data.values
return ID.values, test
def resize_img(img, max_dim=96):
max_axis = np.argmax(img.size)
scale = max_dim / img.size[max_axis]
return img.resize((int(img.size[0] * scale), int(img.size[1] * scale)))
def load_img_data(ids, max_dim=96, center=True):
X = np.empty((len(ids), max_dim, max_dim, 1))
for i, id in enumerate(ids):
img = load_img('../images/{}.jpg'.format(id), grayscale=True)
img = resize_img(img, max_dim=max_dim)
x = img_to_array(img)
h, w = x.shape[:2]
if center:
h1 = (max_dim - h) >> 1
h2 = h1 + h
w1 = (max_dim - w) >> 1
w2 = w1 + w
else:
h1, h2, w1, w2 = 0, h, 0, w
X[i][h1:h2, w1:w2][:] = x
return np.around(X / 255)
def load_train_data(split=0.9, random_state=7):
ID, X_num_train, y = load_numeric_training()
X_img_train = load_img_data(ID)
sss = StratifiedShuffleSplit(n_splits=1, train_size=split, test_size=1 - split, random_state=random_state)
train_idx, val_idx = next(sss.split(X_num_train, y))
ID_tr, X_num_tr, X_img_tr, y_tr = ID[train_idx], X_num_train[train_idx], X_img_train[train_idx], y[train_idx]
ID_val, X_num_val, X_img_val, y_val = ID[val_idx], X_num_train[val_idx], X_img_train[val_idx], y[val_idx]
return (ID_tr, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val)
def load_test_data():
ID, X_num_test = load_numeric_test()
X_img_test = load_img_data(ID)
return ID, X_num_test, X_img_test
print('Loading train data ...')
(ID_train, X_num_tr, X_img_tr, y_tr), (ID_val, X_num_val, X_img_val, y_val) = load_train_data()
# Prepare ID-to-label and ID-to-numerical dictionary
ID_y_dic, ID_num_dic = {}, {}
for i in range(len(ID_train)):
ID_y_dic[ID_train[i]] = y_tr[i]
ID_num_dic[ID_train[i]] = X_num_tr[i, :]
print('Loading test data ...')
ID_test, X_num_test, X_img_test = load_test_data()
# Convert label to categorical/one-hot
ID_train, y_tr, y_val = to_categorical(ID_train), to_categorical(y_tr), to_categorical((y_val))
def _bytes_feature(value):
return tf.train.Feature(bytes_list=tf.train.BytesList(value=[value]))
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
def _float32_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
def write_val_data():
val_data_path = '../tfrecords/val_data_1.tfrecords'
if os.path.exists(val_data_path):
print('Warning: old file exists, removed.')
os.remove(val_data_path)
val_image, val_num, val_label = X_img_val.astype(np.bool), X_num_val.astype(np.float64), y_val.astype(np.bool)
print(val_image.shape, val_num.shape, val_label.shape)
val_writer = tf.python_io.TFRecordWriter(val_data_path)
print('Writing data into tfrecord ...')
for i in range(len(val_image)):
image, num, label = val_image[i], val_num[i], val_label[i]
feature = {'image': _bytes_feature(image.tostring()),
'num': _bytes_feature(num.tostring()),
'label': _bytes_feature(label.tostring())}
example = tf.train.Example(features=tf.train.Features(feature=feature))
val_writer.write(example.SerializeToString())
print('Done!')
def write_train_data():
imgen = ImageDataGenerator(rotation_range=20, zoom_range=0.2, horizontal_flip=True,
vertical_flip=True, fill_mode='nearest')
imgen_train = imgen.flow(X_img_tr, ID_train, batch_size=32, seed=7)
print('Generating augmented images')
all_images = []
all_ID = []
p = True
for i in range(28 * 200):
print('Generating augmented images for epoch {}, batch {}'.format(i // 28, i % 28))
X, ID = imgen_train.next()
all_images.append(X)
all_ID.append(np.argmax(ID, axis=1))
all_images = np.concatenate(all_images).astype(np.bool)
all_ID = np.concatenate(all_ID)
all_y = np.zeros(all_ID.shape)
all_nums = np.zeros((all_ID.shape[0], X_num_tr.shape[1]))
for i in range(len(all_ID)):
all_nums[i, :] = ID_num_dic[all_ID[i]]
all_y[i] = ID_y_dic[all_ID[i]]
all_y = to_categorical(all_y).astype(np.bool)
print('Data shapes:')
print('Image:', all_images.shape)
print('Label:', all_y.shape)
print('Numerical:', all_nums.shape)
train_data_path = '../tfrecords/train_data_1.tfrecords'
if os.path.exists(train_data_path):
print('Warning: old file exists, removed.')
os.remove(train_data_path)
# compression = tf.python_io.TFRecordCompressionType.GZIP
# train_writer = tf.python_io.TFRecordWriter(train_data_path, options=tf.python_io.TFRecordOptions(compression))
train_writer = tf.python_io.TFRecordWriter(train_data_path)
print('Writing data into tfrecord ...')
for i in range(len(all_images)):
if i % 891 == 0:
print('Writing {} th epoch data ...'.format(i // 891))
image, num, label = all_images[i], all_nums[i], all_y[i]
feature = {'image': _bytes_feature(image.tostring()),
'num': _bytes_feature(num.tostring()),
'label': _bytes_feature(label.tostring())}
example = tf.train.Example(features=tf.train.Features(feature=feature))
train_writer.write(example.SerializeToString())
print('Done!')
write_val_data()
| 36.794118
| 116
| 0.672422
| 976
| 6,255
| 4.036885
| 0.17623
| 0.016244
| 0.007614
| 0.013959
| 0.371574
| 0.313706
| 0.232995
| 0.218782
| 0.218782
| 0.196447
| 0
| 0.014229
| 0.191047
| 6,255
| 169
| 117
| 37.011834
| 0.764427
| 0.040608
| 0
| 0.125984
| 0
| 0
| 0.078552
| 0.011341
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086614
| false
| 0
| 0.070866
| 0.023622
| 0.228346
| 0.125984
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c93f115e357ee6abe4ee6a425a0e90b87246382
| 1,834
|
py
|
Python
|
setup.py
|
Parquery/pynumenc
|
f14abab40b7d08c55824bf1da5b2a7026c0a7282
|
[
"MIT"
] | 1
|
2018-11-09T16:16:08.000Z
|
2018-11-09T16:16:08.000Z
|
setup.py
|
Parquery/numenc-py
|
f14abab40b7d08c55824bf1da5b2a7026c0a7282
|
[
"MIT"
] | 2
|
2018-11-09T12:51:40.000Z
|
2018-11-09T12:53:55.000Z
|
setup.py
|
Parquery/pynumenc
|
f14abab40b7d08c55824bf1da5b2a7026c0a7282
|
[
"MIT"
] | 2
|
2019-02-26T12:40:11.000Z
|
2019-06-17T07:42:35.000Z
|
"""A setuptools based setup module.
See:
https://packaging.python.org/en/latest/distributing.html
https://github.com/pypa/sampleproject
"""
import os
from setuptools import setup, find_packages, Extension
import pynumenc_meta
# pylint: disable=redefined-builtin
here = os.path.abspath(os.path.dirname(__file__)) # pylint: disable=invalid-name
with open(os.path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read() # pylint: disable=invalid-name
setup(
name=pynumenc_meta.__title__,
version=pynumenc_meta.__version__,
description=pynumenc_meta.__description__,
long_description=long_description,
url=pynumenc_meta.__url__,
author=pynumenc_meta.__author__,
author_email=pynumenc_meta.__author_email__,
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: End Users/Desktop',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6'
],
license='License :: OSI Approved :: MIT License',
keywords='C++ encode decode bytes encoding decoding sorted',
packages=find_packages(exclude=['docs', 'tests']),
install_requires=[],
extras_require={
'dev': [
# yapf: disable,
'docutils>=0.14,<1',
'mypy==0.641',
'hypothesis==3.82.1',
'pygments>=2.2.0,<3',
'pydocstyle>=3.0.0,<4',
'pylint==2.1.1',
'yapf==0.24.0'
# yapf: enable
]
},
ext_modules=[
Extension('numenc', sources=['numenc-cpp/encoder_decoder.cpp'])
],
scripts=['bin/pynumenc'],
py_modules=['pynumenc_meta'],
package_data={'pynumenc': ['py.typed']},
data_files=[('.', ['LICENSE.txt', 'README.rst'])])
| 31.084746
| 81
| 0.630316
| 211
| 1,834
| 5.241706
| 0.563981
| 0.086799
| 0.036166
| 0.0434
| 0.050633
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022933
| 0.215376
| 1,834
| 58
| 82
| 31.62069
| 0.745657
| 0.138495
| 0
| 0.046512
| 0
| 0
| 0.327597
| 0.01912
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.069767
| 0
| 0.069767
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c95786ebe742f8164fbbe85994a95220ade7338
| 3,074
|
py
|
Python
|
Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py
|
nipunjain099/AutoGuard
|
8217cd03af7927590ef3a160ecb7d9bc9f50d101
|
[
"MIT"
] | 147
|
2018-12-23T09:44:36.000Z
|
2022-03-03T15:38:33.000Z
|
Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py
|
nipunjain099/AutoGuard
|
8217cd03af7927590ef3a160ecb7d9bc9f50d101
|
[
"MIT"
] | 17
|
2018-12-25T16:04:34.000Z
|
2022-01-13T00:44:21.000Z
|
Models/License-Plate-Recognition-Nigerian-vehicles-master/License-Plate-Recognition-Nigerian-vehicles-master/ocr.py
|
nipunjain099/AutoGuard
|
8217cd03af7927590ef3a160ecb7d9bc9f50d101
|
[
"MIT"
] | 77
|
2018-12-19T03:03:14.000Z
|
2022-03-13T17:00:38.000Z
|
import numpy as np
from skimage.transform import resize
from skimage import measure
from skimage.measure import regionprops
class OCROnObjects():
def __init__(self, license_plate):
character_objects = self.identify_boundary_objects(license_plate)
self.get_regions(character_objects, license_plate)
def identify_boundary_objects(self, a_license_plate):
labelImage = measure.label(a_license_plate)
character_dimensions = (0.4*a_license_plate.shape[0], 0.85*a_license_plate.shape[0], 0.04*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
regionLists = regionprops(labelImage)
return regionLists
def get_regions(self, character_objects, a_license_plate):
"""
used to map out regions where the license plate charcters are
the principle of connected component analysis and labelling
were used
Parameters:
-----------
a_license_plate: 2D numpy binary image of the license plate
Returns:
--------
a dictionary containing the index
fullscale: 3D array containig 2D array of each character
columnsVal: 1D array the starting column of each character
coordinates:
"""
cord = []
counter=0
column_list = []
character_dimensions = (0.35*a_license_plate.shape[0], 0.60*a_license_plate.shape[0], 0.05*a_license_plate.shape[1], 0.15*a_license_plate.shape[1])
minHeight, maxHeight, minWidth, maxWidth = character_dimensions
for regions in character_objects:
minimumRow, minimumCol, maximumRow, maximumCol = regions.bbox
character_height = maximumRow - minimumRow
character_width = maximumCol - minimumCol
roi = a_license_plate[minimumRow:maximumRow, minimumCol:maximumCol]
if character_height > minHeight and character_height < maxHeight and character_width > minWidth and character_width < maxWidth:
if counter == 0:
samples = resize(roi, (20,20))
cord.append(regions.bbox)
counter += 1
elif counter == 1:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[None,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
counter+=1
else:
roismall = resize(roi, (20,20))
samples = np.concatenate((samples[:,:,:], roismall[None,:,:]), axis=0)
cord.append(regions.bbox)
column_list.append(minimumCol)
if len(column_list) == 0:
self.candidates = {}
else:
self.candidates = {
'fullscale': samples,
'coordinates': np.array(cord),
'columnsVal': column_list
}
return self.candidates
| 43.914286
| 155
| 0.59987
| 327
| 3,074
| 5.464832
| 0.311927
| 0.120873
| 0.094572
| 0.080582
| 0.266928
| 0.266928
| 0.201455
| 0.201455
| 0.158926
| 0.105204
| 0
| 0.026005
| 0.311971
| 3,074
| 70
| 156
| 43.914286
| 0.818913
| 0.130124
| 0
| 0.22449
| 0
| 0
| 0.011705
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061224
| false
| 0
| 0.081633
| 0
| 0.204082
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c974ea9b476fd86b7ac61a4ae4dbd0512a02f64
| 1,711
|
py
|
Python
|
letsencrypt/setup.py
|
ccppuu/certbot
|
9fead41aaf93dde0d36d4aef6fded8dd306c1ddc
|
[
"Apache-2.0"
] | 1
|
2017-12-20T20:06:11.000Z
|
2017-12-20T20:06:11.000Z
|
letsencrypt/setup.py
|
cpu/certbot
|
9fead41aaf93dde0d36d4aef6fded8dd306c1ddc
|
[
"Apache-2.0"
] | null | null | null |
letsencrypt/setup.py
|
cpu/certbot
|
9fead41aaf93dde0d36d4aef6fded8dd306c1ddc
|
[
"Apache-2.0"
] | null | null | null |
import codecs
import os
import sys
from setuptools import setup
from setuptools import find_packages
def read_file(filename, encoding='utf8'):
"""Read unicode from given file."""
with codecs.open(filename, encoding=encoding) as fd:
return fd.read()
here = os.path.abspath(os.path.dirname(__file__))
readme = read_file(os.path.join(here, 'README.rst'))
# This package is a simple shim around certbot
install_requires = ['certbot']
version = '0.7.0.dev0'
setup(
name='letsencrypt',
version=version,
description="ACME client",
long_description=readme,
url='https://github.com/letsencrypt/letsencrypt',
author="Certbot Project",
author_email='client-dev@letsencrypt.org',
license='Apache License 2.0',
classifiers=[
'Development Status :: 3 - Alpha',
'Environment :: Console',
'Environment :: Console :: Curses',
'Intended Audience :: System Administrators',
'License :: OSI Approved :: Apache Software License',
'Operating System :: POSIX :: Linux',
'Programming Language :: Python',
'Programming Language :: Python :: 2',
'Programming Language :: Python :: 2.6',
'Programming Language :: Python :: 2.7',
'Topic :: Internet :: WWW/HTTP',
'Topic :: Security',
'Topic :: System :: Installation/Setup',
'Topic :: System :: Networking',
'Topic :: System :: Systems Administration',
'Topic :: Utilities',
],
packages=find_packages(),
include_package_data=True,
install_requires=install_requires,
entry_points={
'console_scripts': [
'letsencrypt = certbot.main:main',
],
},
)
| 27.15873
| 61
| 0.630625
| 180
| 1,711
| 5.9
| 0.555556
| 0.071563
| 0.094162
| 0.073446
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.009946
| 0.236119
| 1,711
| 62
| 62
| 27.596774
| 0.802601
| 0.043834
| 0
| 0.041667
| 0
| 0
| 0.442331
| 0.015951
| 0
| 0
| 0
| 0
| 0
| 1
| 0.020833
| false
| 0
| 0.104167
| 0
| 0.145833
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c9af51ba1243be5af3bd0e724c771174bb964d2
| 1,007
|
py
|
Python
|
problem_solving/python/algorithms/greedy/marcs_cakewalk.py
|
kcc3/hackerrank-solutions
|
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
|
[
"MIT"
] | null | null | null |
problem_solving/python/algorithms/greedy/marcs_cakewalk.py
|
kcc3/hackerrank-solutions
|
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
|
[
"MIT"
] | null | null | null |
problem_solving/python/algorithms/greedy/marcs_cakewalk.py
|
kcc3/hackerrank-solutions
|
f862b44b840bd447d99dc148f6bb5e2f5bfb8a86
|
[
"MIT"
] | 1
|
2020-06-04T09:23:19.000Z
|
2020-06-04T09:23:19.000Z
|
def marcs_cakewalk(calorie):
"""Hackerrank Problem: https://www.hackerrank.com/challenges/marcs-cakewalk/problem
Marc loves cupcakes, but he also likes to stay fit. Each cupcake has a calorie count, and Marc can walk a distance
to expend those calories. If Marc has eaten j cupcakes so far, after eating a cupcake with c calories he must walk
at least 2**j x c miles to maintain his weight.
Solve:
To calculate the minimum miles, you solve based on the highest calorie to lowest calorie cupcake
Args:
calorie (list): List of integers denoting the calories for each cupcake
Returns:
int: The minimum number of miels Marc must walk to maintain his weight
"""
calories = 0
for i, c in enumerate(sorted(calorie, reverse=True)):
calories += (2 ** i * c)
return calories
if __name__ == "__main__":
assert marcs_cakewalk([5, 10, 7]) == 44
assert marcs_cakewalk([1, 3, 2]) == 11
assert marcs_cakewalk([7, 4, 9, 6]) == 79
| 37.296296
| 118
| 0.683217
| 154
| 1,007
| 4.38961
| 0.590909
| 0.096154
| 0.08432
| 0.056213
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026008
| 0.236346
| 1,007
| 26
| 119
| 38.730769
| 0.853056
| 0.631579
| 0
| 0
| 0
| 0
| 0.024691
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.111111
| false
| 0
| 0
| 0
| 0.222222
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c9bc57e7e9891072399e9288ee87401c640bfb4
| 1,583
|
py
|
Python
|
coronaindiatracker/coronatracker/views.py
|
ankitgoswami23/CoronaIndiaTracker
|
b2e116a595b3c69ccefa93b60833c09aa07b5eed
|
[
"Unlicense"
] | 2
|
2020-07-26T05:57:27.000Z
|
2020-07-26T07:12:15.000Z
|
coronaindiatracker/coronatracker/views.py
|
ankee23/CoronaIndiaTracker
|
b2e116a595b3c69ccefa93b60833c09aa07b5eed
|
[
"Unlicense"
] | null | null | null |
coronaindiatracker/coronatracker/views.py
|
ankee23/CoronaIndiaTracker
|
b2e116a595b3c69ccefa93b60833c09aa07b5eed
|
[
"Unlicense"
] | 1
|
2020-11-26T08:52:11.000Z
|
2020-11-26T08:52:11.000Z
|
from django.shortcuts import render
import requests
from bs4 import BeautifulSoup
def corona_data(request):
"Testaaaa"
corona_html = requests.get("https://www.mygov.in/covid-19")
soup = BeautifulSoup(corona_html.content, 'html.parser')
state_wise_data = soup.find_all('div', class_='views-row')
information = soup.find('div', class_='information_row')
info = {
'update_data': information.find('div', class_='info_title').find('span').string,
'active_case': information.find('div', class_='active-case').find('span', class_='icount').string,
'discharge': information.find('div', class_='discharge').find('span', class_='icount').string,
'death': information.find('div', class_='death_case').find('span', class_='icount').string
}
corona_info = [
{
"state_name": state.find_all('span', class_='st_name')[0].string,
"confirm_case": state.find_all('div', class_='tick-confirmed')[0].find_all('small')[0].string,
"active_case": state.find_all('div', class_='tick-active')[0].find_all('small')[0].string,
"discharge": state.find_all('div', class_='tick-discharged')[0].find_all('small')[0].string,
"death": state.find_all('div', class_='tick-death')[0].find_all('small')[0].string
} for state in state_wise_data
]
context = {
'corona_info': info,
'data': sorted(corona_info, key=lambda i: int(''.join(i['confirm_case'].replace(',', ''))), reverse=True)
}
return render(request, 'coronainfo/index.html', context)
| 45.228571
| 113
| 0.642451
| 201
| 1,583
| 4.840796
| 0.323383
| 0.071942
| 0.051387
| 0.077081
| 0.274409
| 0.248715
| 0.057554
| 0
| 0
| 0
| 0
| 0.009112
| 0.168035
| 1,583
| 34
| 114
| 46.558824
| 0.729689
| 0.005054
| 0
| 0
| 0
| 0
| 0.245736
| 0.013266
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034483
| false
| 0
| 0.103448
| 0
| 0.172414
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c9c1524555fded271e617bca48b5b1e6a1e9ace
| 6,082
|
py
|
Python
|
compare.py
|
geohackweek/ghw2019_wiggles
|
9b636db8d97986e038a301e36b808e820ccc525f
|
[
"BSD-3-Clause"
] | 3
|
2019-10-09T19:42:12.000Z
|
2021-05-28T00:10:54.000Z
|
compare.py
|
geohackweek/ghw2019_wiggles
|
9b636db8d97986e038a301e36b808e820ccc525f
|
[
"BSD-3-Clause"
] | 1
|
2019-09-11T16:37:59.000Z
|
2019-09-11T16:37:59.000Z
|
compare.py
|
geohackweek/ghw2019_wiggles
|
9b636db8d97986e038a301e36b808e820ccc525f
|
[
"BSD-3-Clause"
] | 3
|
2019-09-10T20:41:59.000Z
|
2019-09-10T20:42:57.000Z
|
# Script tests GPD model using UW truth data
# Test outputs:
# - type of event tested [EQS, EQP, SUS, SUP, THS, THP, SNS, SNP, PXS, PXP]
# - phase [P, S, N] Note: N - not detected
# - model time offset (t_truth - t_model_pick)
import numpy
import math
import string
import datetime
import sys
import os
import csv
from datetime import datetime
from datetime import timedelta
# params
padding_time = 10
fudge_factor = timedelta(seconds=27)
time_diff = timedelta(seconds=10)
# file dirs
parsed_arrivals = []
model_in = []
model_out = []
comp_out = []
for etype in ['EQS','EQP','SUS','SUP','THS','THP','SNS','SNP','PXS','PXP']:
arrival = "parsed_arrivals/" + etype + ".arrivals.txt"
infile = "input_files/GPD." + etype + ".in"
outfile = "output_files/GPD." + etype + ".out"
parsed_arrivals.append(arrival)
model_in.append(infile)
model_out.append(outfile)
comp_out.append("comparison_out/comp." + etype + ".out")
# ------------------
# read in UW arrival times as an array
def read_arrivals_to_arr(filename):
model_list = []
with open(filename) as f:
for ln in f:
row = ln.split()
line = []
line.extend([row[0].strip(), row[1].strip(), row[2].strip()])
formatted_time = datetime.strptime(row[3], "%Y-%m-%dT%H:%M:%S.%f") - fudge_factor
line.extend([formatted_time, row[4].strip(), row[5].strip()])
model_list.append(line)
return model_list
def arrivals_to_dictionary(arrivals):
picks = {}
for arr in arrivals:
key = datetime.strftime(arr[3], "%Y-%m-%dT%H:%M:%S.%f")
key = key[0:-7]
picks[key] = arr
return picks
def model_in_to_array(file):
timestamps = []
with open(file) as f:
for ln in f:
entry = ln.split()
entry = entry[0].strip()
entry = entry[len(entry)-20:len(entry)-6]
entry = entry[0:4] + "-" + entry[4:6] + "-" + entry[6:8] + "T" + entry[8:10] + ":" + entry[10:12] + ":" + entry[12:14]
# ------------- TIME STAMP ISSUES --------------------
# case 1: run if .mseed files have correct timestamps
"""
time = datetime.strptime(entry, "%Y-%m-%dT%H:%M:%S") - fudge_factor # + time_diff (might need to add this)
time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S")
"""
# case 2: run if .mseed files have buggy minutes in the timestamps
time = datetime.strptime(entry, "%Y-%m-%dT%H:%M:%S")
if time.second >=37 and time.second <=51:
time = time + timedelta(seconds=23) + time_diff
time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S")
else:
sec_int = time.second + 23
if sec_int > 59:
sec_int = sec_int - 60
sec_int = str(sec_int).zfill(2)
time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S")
time = time[:-2] + sec_int
time = datetime.strptime(time, "%Y-%m-%dT%H:%M:%S") + time_diff
time = datetime.strftime(time, "%Y-%m-%dT%H:%M:%S")
# -----------------------------------------------------
timestamps.append(time)
return timestamps
def filter_times(arrivals, model_in):
filtered = []
for key in model_in:
if key in arrivals:
filtered.append(arrivals[key])
return filtered
# read in Caltech model output and create a dictionary
def read_output_to_dict(filename):
model_dict = {}
with open(filename) as f:
for line in f:
tmp = line.split()
key = tmp[0] + "-" + tmp[1] + "-" + tmp[2]
try: # fails if date is missing floating point numbers
formatted_time = datetime.strptime(tmp[3], "%Y-%m-%dT%H:%M:%S.%f")
if key not in model_dict:
model_dict[key] = []
model_dict[key].append(formatted_time)
except:
pass
return model_dict
# lookup time in the dictionary
def key_lookup(event, phase, model_dict):
key = event[0] + "-" + event[1] + "-" + phase
times = []
if key in model_dict.keys():
times = model_dict[key]
times = time_lookup(event[3], times)
return times
# search for arrivals within the padding time window
def time_lookup(t, time_arr):
t_lower = t - timedelta(seconds=padding_time)
t_upper = t + timedelta(seconds=padding_time)
offsets = []
for time in time_arr:
if time > t_lower and time < t_upper:
offset = t - time # or format time to absolute value: abs(t - time)
offset = offset.total_seconds()
offsets.append('{:.6f}'.format(offset))
return offsets
def execute_script(arrival, inf, outf, comp_out):
# write outputs to file
outp_file = open(comp_out, 'w')
truth_arr = read_arrivals_to_arr(arrival) # read in the arrival times to a list
truth_dict = arrivals_to_dictionary(truth_arr) # convert arrivals to a dictionary (key=truncated timestamp)
model_in = model_in_to_array(inf) # read in model .in file as a list
truth_arr = filter_times(truth_dict, model_in) # filter arrivals to picks that were passed to the model (.in file)
model_dict = read_output_to_dict(outf) # read output file
for event in truth_arr:
phase = event[2]
times = key_lookup(event, phase, model_dict)
if len(times) == 0:
if phase == 'P':
phase = 'S'
else:
phase = 'P'
times = key_lookup(event, phase, model_dict)
if len(times) == 0:
phase = 'N'
times = ['nan']
outp_file.write(str(event[5]) + " " + phase)
for offset in times:
outp_file.write(" " + str(offset))
outp_file.write('\n')
outp_file.close()
for i in range(len(model_out)):
execute_script(parsed_arrivals[i], model_in[i], model_out[i], comp_out[i])
| 37.312883
| 132
| 0.561822
| 824
| 6,082
| 4.01335
| 0.231796
| 0.023284
| 0.012096
| 0.015119
| 0.180224
| 0.151799
| 0.125189
| 0.125189
| 0.112489
| 0.112489
| 0
| 0.014822
| 0.290036
| 6,082
| 162
| 133
| 37.54321
| 0.751042
| 0.162775
| 0
| 0.101563
| 0
| 0
| 0.06065
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.007813
| 0.070313
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c9c7b65355934d322e4085f42e442dbe2ee0d7d
| 7,012
|
py
|
Python
|
ultitrackerapi/ultitrackerapi/extract_and_upload_video.py
|
atheheath/ultitracker-api
|
5d7ea7ae97c53faf02416f17baf11ed09fd55276
|
[
"MIT"
] | null | null | null |
ultitrackerapi/ultitrackerapi/extract_and_upload_video.py
|
atheheath/ultitracker-api
|
5d7ea7ae97c53faf02416f17baf11ed09fd55276
|
[
"MIT"
] | 7
|
2020-03-27T03:33:52.000Z
|
2020-03-30T02:33:04.000Z
|
ultitrackerapi/ultitrackerapi/extract_and_upload_video.py
|
atheheath/ultitracker-api
|
5d7ea7ae97c53faf02416f17baf11ed09fd55276
|
[
"MIT"
] | null | null | null |
import argparse
import boto3
import datetime
import json
import os
import posixpath
import re
import shutil
import tempfile
import uuid
from concurrent import futures
from multiprocessing import Pool
from ultitrackerapi import get_backend, get_logger, get_s3Client, video
backend_instance = get_backend()
logger = get_logger(__name__, level="DEBUG")
s3Client = get_s3Client()
def update_game_video_length(game_id, video_length):
command = """
UPDATE ultitracker.game_metadata
SET data = jsonb_set(data, '{{length}}', '"{video_length}"', true)
WHERE game_id = '{game_id}'
""".format(
video_length=video_length,
game_id=game_id
)
backend_instance.client.execute(command)
def get_frame_number(key, chunk_multiplier=60):
frame_number = int(posixpath.splitext(posixpath.basename(key))[0].split("_")[1])
chunk_number = int(posixpath.basename(posixpath.dirname(key)).split("_")[1])
return chunk_number * chunk_multiplier + frame_number
def insert_images(
img_raw_paths,
img_types,
img_metadatas,
game_id,
frame_numbers
):
command = """
INSERT INTO ultitracker.img_location (img_id, img_raw_path, img_type, img_metadata, game_id, frame_number) VALUES
"""
for i, (img_raw_path, img_type, img_metadata, frame_number) in enumerate(zip(img_raw_paths, img_types, img_metadatas, frame_numbers)):
command += """('{img_id}', '{img_raw_path}', '{img_type}', '{img_metadata}', '{game_id}', {frame_number}){include_comma}
""".format(
img_id=uuid.uuid4(),
img_raw_path=img_raw_path,
img_type=img_type,
img_metadata=json.dumps(img_metadata),
game_id=game_id,
frame_number=frame_number,
include_comma="," if i < (len(img_raw_paths) - 1) else ""
)
backend_instance.client.execute(command)
def extract_and_upload_video(
bucket,
video_filename,
thumbnail_filename,
video_key,
thumbnail_key,
game_id
):
logger.debug("extract_and_upload_video: Getting video length")
video_length_seconds = int(video.get_video_duration(video_filename))
video_length = str(datetime.timedelta(seconds=video_length_seconds))
logger.debug("extract_and_upload_video: Finished getting video length")
logger.debug("extract_and_upload_video: Getting video height and width")
video_height_width = video.get_video_height_width(video_filename)
logger.debug("extract_and_upload_video: Finished getting height and width")
logger.debug("extract_and_upload_video: Updating length in db")
update_game_video_length(game_id, video_length)
logger.debug("extract_and_upload_video: Finished updating length in db")
logger.debug("extract_and_upload_video: Extracting thumbnail")
video.get_thumbnail(video_filename, thumbnail_filename, time=video_length_seconds // 2)
logger.debug("extract_and_upload_video: Finished extracting thumbnail")
logger.debug("extract_and_upload_video: Uploading thumbnail")
s3Client.upload_file(
thumbnail_filename,
bucket,
thumbnail_key
)
logger.debug("extract_and_upload_video: Finished uploading thumbnail")
logger.debug("extract_and_upload_video: Uploading video to S3")
s3Client.upload_file(
video_filename,
bucket,
video_key
)
logger.debug("extract_and_upload_video: Finished uploading video to S3")
logger.debug("extract_and_upload_video: Chunking video")
chunked_video_dir = tempfile.mkdtemp()
video.chunk_video(video_filename, chunked_video_dir, chunk_size=60)
logger.debug("extract_and_upload_video: Finished chunking video")
logger.debug("extract_and_upload_video: Uploading video chunks")
with futures.ThreadPoolExecutor(8) as ex:
for vid in os.listdir(chunked_video_dir):
ex.submit(
s3Client.upload_file,
os.path.join(chunked_video_dir, vid),
bucket,
posixpath.join(
posixpath.dirname(video_key),
"chunks",
vid
)
)
logger.debug("extract_and_upload_video: Finished uploading video chunks")
logger.debug("extract_and_upload_video: Submitting lambda frame extraction")
aws_lambda_payloads = [
json.dumps({
"s3_bucket_path": bucket,
"s3_video_path": posixpath.join(posixpath.dirname(video_key), "chunks", basename),
"s3_output_frames_path": posixpath.join(posixpath.dirname(video_key), "frames", posixpath.splitext(basename)[0]),
"video_metadata": video_height_width
}).encode()
for basename in os.listdir(chunked_video_dir)
]
client = boto3.client('lambda')
aws_lambda_responses = []
with futures.ThreadPoolExecutor(max_workers=16) as ex:
result_futures = []
for payload in aws_lambda_payloads:
result_futures.append(ex.submit(
client.invoke,
FunctionName="extractFrames",
# InvocationType="Event",
Payload=payload
))
logger.debug("extract_and_upload_video: Submitted lambda frame extraction")
for result_future in futures.as_completed(result_futures):
aws_lambda_response = json.loads(result_future.result()["Payload"].read().decode("utf-8"))
aws_lambda_responses.append(aws_lambda_response)
raw_paths = ["s3://" + posixpath.join(frame["bucket"], frame["key"]) for frame in aws_lambda_response["frames"]]
img_types = ["png" for frame in aws_lambda_response["frames"]]
metadatas = [
{"bucket": bucket}
for frame in aws_lambda_response["frames"]
]
frame_numbers = [-1 for frame in aws_lambda_response["frames"]]
insert_images(
raw_paths,
img_types,
metadatas,
game_id,
frame_numbers
)
logger.debug("extract_and_upload_video: Received all lambda responses")
logger.debug("extract_and_upload_video: Finished inserting image metadata")
os.remove(video_filename)
os.remove(thumbnail_filename)
shutil.rmtree(chunked_video_dir)
def main():
parser = argparse.ArgumentParser()
parser.add_argument("bucket")
parser.add_argument("video_filename")
parser.add_argument("thumbnail_filename")
parser.add_argument("video_key")
parser.add_argument("thumbnail_key")
parser.add_argument("game_id")
args = parser.parse_args()
extract_and_upload_video(
bucket=args.bucket,
video_filename=args.video_filename,
thumbnail_filename=args.thumbnail_filename,
video_key=args.video_key,
thumbnail_key=args.thumbnail_key,
game_id=args.game_id
)
if __name__ == "__main__":
main()
| 33.711538
| 138
| 0.673987
| 829
| 7,012
| 5.359469
| 0.19421
| 0.049516
| 0.079226
| 0.103984
| 0.367995
| 0.345712
| 0.269187
| 0.160927
| 0.059194
| 0.022507
| 0
| 0.005557
| 0.230034
| 7,012
| 208
| 139
| 33.711538
| 0.817374
| 0.00328
| 0
| 0.143713
| 0
| 0
| 0.23941
| 0.085861
| 0
| 0
| 0
| 0
| 0
| 1
| 0.02994
| false
| 0
| 0.077844
| 0
| 0.113772
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7c9c87d15e24804e84e87528b8a9f5ba5b08422f
| 3,265
|
py
|
Python
|
Chapter03/scikit_soft_voting_2knn.py
|
PacktPublishing/Hands-On-Ensemble-Learning-with-Python
|
db9b90189dbebbc6ab5ebba0e2e173ba80197c35
|
[
"MIT"
] | 31
|
2019-07-21T00:36:52.000Z
|
2022-02-25T15:38:21.000Z
|
Chapter03/scikit_soft_voting_2knn.py
|
tokiran/Hands-On-Ensemble-Learning-with-Python
|
739ecda33fb75dc1df1366abf4a79c34cc0c2026
|
[
"MIT"
] | null | null | null |
Chapter03/scikit_soft_voting_2knn.py
|
tokiran/Hands-On-Ensemble-Learning-with-Python
|
739ecda33fb75dc1df1366abf4a79c34cc0c2026
|
[
"MIT"
] | 30
|
2019-07-06T00:22:44.000Z
|
2022-02-04T02:44:17.000Z
|
# --- SECTION 1 ---
# Import the required libraries
from sklearn import datasets, naive_bayes, svm, neighbors
from sklearn.ensemble import VotingClassifier
from sklearn.metrics import accuracy_score
# Load the dataset
breast_cancer = datasets.load_breast_cancer()
x, y = breast_cancer.data, breast_cancer.target
# Split the train and test samples
test_samples = 100
x_train, y_train = x[:-test_samples], y[:-test_samples]
x_test, y_test = x[-test_samples:], y[-test_samples:]
# --- SECTION 2 ---
# Instantiate the learners (classifiers)
learner_1 = neighbors.KNeighborsClassifier(n_neighbors=5)
learner_2 = naive_bayes.GaussianNB()
learner_3 = neighbors.KNeighborsClassifier(n_neighbors=50)
# --- SECTION 3 ---
# Instantiate the voting classifier
voting = VotingClassifier([('5NN', learner_1),
('NB', learner_2),
('50NN', learner_3)],
voting='soft')
# --- SECTION 4 ---
# Fit classifier with the training data
voting.fit(x_train, y_train)
learner_1.fit(x_train, y_train)
learner_2.fit(x_train, y_train)
learner_3.fit(x_train, y_train)
# --- SECTION 5 ---
# Predict the most probable class
hard_predictions = voting.predict(x_test)
# --- SECTION 6 ---
# Get the base learner predictions
predictions_1 = learner_1.predict(x_test)
predictions_2 = learner_2.predict(x_test)
predictions_3 = learner_3.predict(x_test)
# --- SECTION 7 ---
# Accuracies of base learners
print('L1:', accuracy_score(y_test, predictions_1))
print('L2:', accuracy_score(y_test, predictions_2))
print('L3:', accuracy_score(y_test, predictions_3))
# Accuracy of hard voting
print('-'*30)
print('Hard Voting:', accuracy_score(y_test, hard_predictions))
# --- SECTION 1 ---
# Import the required libraries
import matplotlib as mpl
import matplotlib.pyplot as plt
mpl.style.use('seaborn-paper')
# --- SECTION 2 ---
# Get the wrongly predicted instances
# and the predicted probabilities for the whole test set
errors = y_test-hard_predictions
probabilities_1 = learner_1.predict_proba(x_test)
probabilities_2 = learner_2.predict_proba(x_test)
probabilities_3 = learner_3.predict_proba(x_test)
# --- SECTION 2 ---
# Store the predicted probability for
# each wrongly predicted instance, for each base learner
# as well as the average predicted probability
#
x=[]
y_1=[]
y_2=[]
y_3=[]
y_avg=[]
for i in range(len(errors)):
if not errors[i] == 0:
x.append(i)
y_1.append(probabilities_1[i][0])
y_2.append(probabilities_2[i][0])
y_3.append(probabilities_3[i][0])
y_avg.append((probabilities_1[i][0]+probabilities_2[i][0]+probabilities_3[i][0])/3)
# --- SECTION 3 ---
# Plot the predicted probaiblity of each base learner as
# a bar and the average probability as an X
plt.bar(x, y_1, 3, label='5NN')
plt.bar(x, y_2, 2, label='NB')
plt.bar(x, y_3, 1, label='50NN')
plt.scatter(x, y_avg, marker='x', c='k', s=150, label='Average Positive', zorder=10)
y = [0.5 for x in range(len(errors))]
plt.plot(y, c='k', linestyle='--')
plt.title('Positive Probability')
plt.xlabel('Test sample')
plt.ylabel('probability')
plt.legend()
| 28.640351
| 92
| 0.67902
| 471
| 3,265
| 4.5138
| 0.26327
| 0.023518
| 0.016463
| 0.028222
| 0.182502
| 0.085607
| 0
| 0
| 0
| 0
| 0
| 0.032148
| 0.190199
| 3,265
| 113
| 93
| 28.893805
| 0.771936
| 0.266156
| 0
| 0
| 0
| 0
| 0.053452
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.087719
| 0
| 0.087719
| 0.087719
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca170e48f979878209316e327d77080c8c15058
| 2,662
|
py
|
Python
|
qiskit/ml/datasets/iris.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 504
|
2018-12-15T16:34:03.000Z
|
2022-03-26T11:24:53.000Z
|
qiskit/ml/datasets/iris.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 746
|
2018-12-16T16:44:42.000Z
|
2021-07-10T16:59:43.000Z
|
qiskit/ml/datasets/iris.py
|
stefan-woerner/aqua
|
12e1b867e254977d9c5992612a7919d8fe016cb4
|
[
"Apache-2.0"
] | 421
|
2018-12-22T14:49:00.000Z
|
2022-03-04T09:47:07.000Z
|
# This code is part of Qiskit.
#
# (C) Copyright IBM 2018, 2020.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""
iris dataset
"""
import numpy as np
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.decomposition import PCA
from qiskit.aqua import MissingOptionalLibraryError
def iris(training_size, test_size, n, plot_data=False):
""" returns iris dataset """
class_labels = [r'A', r'B', r'C']
data, target = datasets.load_iris(return_X_y=True)
sample_train, sample_test, label_train, label_test = \
train_test_split(data, target, test_size=1, random_state=42)
# Now we standardize for gaussian around 0 with unit variance
std_scale = StandardScaler().fit(sample_train)
sample_train = std_scale.transform(sample_train)
sample_test = std_scale.transform(sample_test)
# Now reduce number of features to number of qubits
pca = PCA(n_components=n).fit(sample_train)
sample_train = pca.transform(sample_train)
sample_test = pca.transform(sample_test)
# Scale to the range (-1,+1)
samples = np.append(sample_train, sample_test, axis=0)
minmax_scale = MinMaxScaler((-1, 1)).fit(samples)
sample_train = minmax_scale.transform(sample_train)
sample_test = minmax_scale.transform(sample_test)
# Pick training size number of samples from each distro
training_input = {key: (sample_train[label_train == k, :])[:training_size]
for k, key in enumerate(class_labels)}
test_input = {key: (sample_test[label_test == k, :])[:test_size]
for k, key in enumerate(class_labels)}
if plot_data:
try:
import matplotlib.pyplot as plt
except ImportError as ex:
raise MissingOptionalLibraryError(
libname='Matplotlib',
name='iris',
pip_install='pip install matplotlib') from ex
for k in range(0, 3):
plt.scatter(sample_train[label_train == k, 0][:training_size],
sample_train[label_train == k, 1][:training_size])
plt.title("Iris dataset")
plt.show()
return sample_train, training_input, test_input, class_labels
| 38.028571
| 78
| 0.696093
| 369
| 2,662
| 4.850949
| 0.403794
| 0.086034
| 0.06648
| 0.058659
| 0.157542
| 0.075978
| 0.036872
| 0.036872
| 0
| 0
| 0
| 0.011967
| 0.215252
| 2,662
| 69
| 79
| 38.57971
| 0.844902
| 0.258077
| 0
| 0.051282
| 0
| 0
| 0.026194
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.205128
| 0
| 0.25641
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca18b95086348a6dec0e89454f15ffded086574
| 16,864
|
py
|
Python
|
tests/h/views/api_auth_test.py
|
discodavey/h
|
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
|
[
"MIT"
] | null | null | null |
tests/h/views/api_auth_test.py
|
discodavey/h
|
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
|
[
"MIT"
] | 5
|
2017-12-26T14:22:20.000Z
|
2018-04-02T02:56:38.000Z
|
tests/h/views/api_auth_test.py
|
discodavey/h
|
7bff8478b3a5b936de82ac9fcd89b355f4afd3aa
|
[
"MIT"
] | 1
|
2021-03-12T09:45:04.000Z
|
2021-03-12T09:45:04.000Z
|
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
import datetime
import json
import mock
import pytest
from oauthlib.oauth2 import InvalidRequestFatalError
from oauthlib.common import Request as OAuthRequest
from pyramid import httpexceptions
from h._compat import urlparse
from h.exceptions import OAuthTokenError
from h.models.auth_client import ResponseType
from h.services.auth_token import auth_token_service_factory
from h.services.oauth_provider import OAuthProviderService
from h.services.oauth_validator import DEFAULT_SCOPES
from h.services.user import user_service_factory
from h.util.datetime import utc_iso8601
from h.views import api_auth as views
@pytest.mark.usefixtures('routes', 'oauth_provider', 'user_svc')
class TestOAuthAuthorizeController(object):
@pytest.mark.usefixtures('authenticated_user')
@pytest.mark.parametrize('view_name', ['get', 'get_web_message'])
def test_get_validates_request(self, controller, pyramid_request, view_name):
view = getattr(controller, view_name)
view()
controller.oauth.validate_authorization_request.assert_called_once_with(
pyramid_request.url)
@pytest.mark.parametrize('view_name', ['get', 'get_web_message'])
def test_get_raises_for_invalid_request(self, controller, view_name):
controller.oauth.validate_authorization_request.side_effect = InvalidRequestFatalError('boom!')
with pytest.raises(InvalidRequestFatalError) as exc:
view = getattr(controller, view_name)
view()
assert exc.value.description == 'boom!'
@pytest.mark.parametrize('view_name', ['get', 'get_web_message'])
def test_get_redirects_to_login_when_not_authenticated(self, controller, pyramid_request, view_name):
with pytest.raises(httpexceptions.HTTPFound) as exc:
view = getattr(controller, view_name)
view()
parsed_url = urlparse.urlparse(exc.value.location)
assert parsed_url.path == '/login'
assert urlparse.parse_qs(parsed_url.query) == {'next': [pyramid_request.url],
'for_oauth': ['True']}
@pytest.mark.parametrize('response_mode,view_name', [
(None, 'get'),
('web_message', 'get_web_message'),
])
def test_get_returns_expected_context(self, controller, auth_client, authenticated_user, oauth_request, response_mode, view_name):
oauth_request.response_mode = response_mode
view = getattr(controller, view_name)
assert view() == {
'client_id': auth_client.id,
'client_name': auth_client.name,
'response_mode': response_mode,
'response_type': auth_client.response_type.value,
'state': 'foobar',
'username': authenticated_user.username,
}
@pytest.mark.parametrize('view_name', ['get', 'get_web_message'])
def test_get_creates_authorization_response_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request, view_name):
auth_client.trusted = True
view = getattr(controller, view_name)
view()
controller.oauth.create_authorization_response.assert_called_once_with(
pyramid_request.url,
credentials={'user': authenticated_user},
scopes=DEFAULT_SCOPES)
def test_get_returns_redirect_immediately_for_trusted_clients(self, controller, auth_client, authenticated_user, pyramid_request):
auth_client.trusted = True
response = controller.get()
expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)
assert response.location == expected
@pytest.mark.usefixtures('authenticated_user')
def test_get_web_message_renders_template_for_trusted_clients(self, controller, auth_client):
auth_client.trusted = True
assert controller.request.override_renderer is None
controller.get_web_message()
assert controller.request.override_renderer == 'h:templates/oauth/authorize_web_message.html.jinja2'
@pytest.mark.usefixtures('authenticated_user')
def test_get_web_message_returns_context_for_trusted_clients(self, controller, auth_client):
auth_client.trusted = True
response = controller.get_web_message()
assert response == {
'code': 'abcdef123456',
'origin': 'http://client.com',
'state': 'foobar',
}
@pytest.mark.usefixtures('authenticated_user')
def test_get_web_message_allows_empty_state_in_context_for_trusted_clients(self, controller, auth_client, oauth_provider):
auth_client.trusted = True
headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)}
oauth_provider.create_authorization_response.return_value = (headers, None, 302)
response = controller.get_web_message()
assert response['state'] is None
@pytest.mark.parametrize('view_name', ['post', 'post_web_message'])
def test_post_creates_authorization_response(self, controller, pyramid_request, authenticated_user, view_name):
pyramid_request.url = 'http://example.com/auth?client_id=the-client-id' + \
'&response_type=code' + \
'&state=foobar' + \
'&scope=exploit'
view = getattr(controller, view_name)
view()
controller.oauth.create_authorization_response.assert_called_once_with(
pyramid_request.url,
credentials={'user': authenticated_user},
scopes=DEFAULT_SCOPES)
@pytest.mark.usefixtures('authenticated_user')
@pytest.mark.parametrize('view_name', ['post', 'post_web_message'])
def test_post_raises_for_invalid_request(self, controller, view_name):
controller.oauth.create_authorization_response.side_effect = InvalidRequestFatalError('boom!')
with pytest.raises(InvalidRequestFatalError) as exc:
view = getattr(controller, view_name)
view()
assert exc.value.description == 'boom!'
def test_post_redirects_to_client(self, controller, auth_client):
response = controller.post()
expected = '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)
assert response.location == expected
def test_post_web_message_returns_expected_context(self, controller, auth_client):
response = controller.post_web_message()
assert response == {
'code': 'abcdef123456',
'origin': 'http://client.com',
'state': 'foobar',
}
def test_post_web_message_allows_empty_state_in_context(self, controller, auth_client, oauth_provider):
auth_client.trusted = True
headers = {'Location': '{}?code=abcdef123456'.format(auth_client.redirect_uri)}
oauth_provider.create_authorization_response.return_value = (headers, None, 302)
response = controller.post_web_message()
assert response['state'] is None
@pytest.fixture
def controller(self, pyramid_request):
pyramid_request.override_renderer = None
return views.OAuthAuthorizeController(None, pyramid_request)
@pytest.fixture
def oauth_request(self):
return OAuthRequest('/')
@pytest.fixture
def oauth_provider(self, pyramid_config, auth_client, oauth_request):
svc = mock.create_autospec(OAuthProviderService, instance=True)
scopes = ['annotation:read', 'annotation:write']
credentials = {'client_id': auth_client.id, 'state': 'foobar', 'request': oauth_request}
svc.validate_authorization_request.return_value = (scopes, credentials)
headers = {'Location': '{}?code=abcdef123456&state=foobar'.format(auth_client.redirect_uri)}
body = None
status = 302
svc.create_authorization_response.return_value = (headers, body, status)
pyramid_config.register_service(svc, name='oauth_provider')
return svc
@pytest.fixture
def auth_client(self, factories):
return factories.AuthClient(name='Test Client',
redirect_uri='http://client.com/auth/callback',
response_type=ResponseType.code)
@pytest.fixture
def user_svc(self, pyramid_config, pyramid_request):
svc = mock.Mock(spec_set=user_service_factory(None, pyramid_request))
pyramid_config.register_service(svc, name='user')
return svc
@pytest.fixture
def pyramid_request(self, pyramid_request):
pyramid_request.url = 'http://example.com/auth?client_id=the-client-id&response_type=code&state=foobar'
return pyramid_request
@pytest.fixture
def authenticated_user(self, factories, pyramid_config, user_svc):
user = factories.User.build()
pyramid_config.testing_securitypolicy(user.userid)
def fake_fetch(userid):
if userid == user.userid:
return user
user_svc.fetch.side_effect = fake_fetch
return user
@pytest.fixture
def routes(self, pyramid_config):
pyramid_config.add_route('login', '/login')
@pytest.mark.usefixtures('oauth_provider')
class TestOAuthAccessTokenController(object):
def test_it_creates_token_response(self, pyramid_request, controller, oauth_provider):
controller.post()
oauth_provider.create_token_response.assert_called_once_with(
pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers)
def test_it_returns_correct_response_on_success(self, controller, oauth_provider):
body = json.dumps({'access_token': 'the-access-token'})
oauth_provider.create_token_response.return_value = ({}, body, 200)
assert controller.post() == {'access_token': 'the-access-token'}
def test_it_raises_when_error(self, controller, oauth_provider):
body = json.dumps({'error': 'invalid_request'})
oauth_provider.create_token_response.return_value = ({}, body, 400)
with pytest.raises(httpexceptions.HTTPBadRequest) as exc:
controller.post()
assert exc.value.body == body
@pytest.fixture
def controller(self, pyramid_request):
pyramid_request.method = 'POST'
pyramid_request.POST['grant_type'] = 'authorization_code'
pyramid_request.POST['code'] = 'the-authz-code'
pyramid_request.headers = {'X-Test-ID': '1234'}
return views.OAuthAccessTokenController(pyramid_request)
@pytest.fixture
def oauth_provider(self, pyramid_config):
svc = mock.Mock(spec_set=['create_token_response'])
svc.create_token_response.return_value = ({}, '{}', 200)
pyramid_config.register_service(svc, name='oauth_provider')
return svc
@pytest.mark.usefixtures('oauth_provider')
class TestOAuthRevocationController(object):
def test_it_creates_revocation_response(self, pyramid_request, controller, oauth_provider):
controller.post()
oauth_provider.create_revocation_response.assert_called_once_with(
pyramid_request.url, pyramid_request.method, pyramid_request.POST, pyramid_request.headers)
def test_it_returns_empty_response_on_success(self, controller):
response = controller.post()
assert response == {}
def test_it_raises_when_error(self, controller, oauth_provider):
body = json.dumps({'error': 'invalid_request'})
oauth_provider.create_revocation_response.return_value = ({}, body, 400)
with pytest.raises(httpexceptions.HTTPBadRequest) as exc:
controller.post()
assert exc.value.body == body
@pytest.fixture
def controller(self, pyramid_request):
pyramid_request.method = 'POST'
pyramid_request.POST['token'] = 'the-token'
pyramid_request.headers = {'X-Test-ID': '1234'}
return views.OAuthRevocationController(pyramid_request)
@pytest.fixture
def oauth_provider(self, pyramid_config):
svc = mock.Mock(spec_set=['create_revocation_response'])
svc.create_revocation_response.return_value = ({}, '{}', 200)
pyramid_config.register_service(svc, name='oauth_provider')
return svc
class TestDebugToken(object):
def test_it_raises_error_when_token_is_missing(self, pyramid_request):
pyramid_request.auth_token = None
with pytest.raises(OAuthTokenError) as exc:
views.debug_token(pyramid_request)
assert exc.value.type == 'missing_token'
assert 'Bearer token is missing' in exc.value.message
def test_it_raises_error_when_token_is_empty(self, pyramid_request):
pyramid_request.auth_token = ''
with pytest.raises(OAuthTokenError) as exc:
views.debug_token(pyramid_request)
assert exc.value.type == 'missing_token'
assert 'Bearer token is missing' in exc.value.message
def test_it_validates_token(self, pyramid_request, token_service):
pyramid_request.auth_token = 'the-access-token'
views.debug_token(pyramid_request)
token_service.validate.assert_called_once_with('the-access-token')
def test_it_raises_error_when_token_is_invalid(self, pyramid_request, token_service):
pyramid_request.auth_token = 'the-token'
token_service.validate.return_value = None
with pytest.raises(OAuthTokenError) as exc:
views.debug_token(pyramid_request)
assert exc.value.type == 'missing_token'
assert 'Bearer token does not exist or is expired' in exc.value.message
def test_returns_debug_data_for_oauth_token(self, pyramid_request, token_service, oauth_token):
pyramid_request.auth_token = oauth_token.value
token_service.fetch.return_value = oauth_token
result = views.debug_token(pyramid_request)
assert result == {'userid': oauth_token.userid,
'client': {'id': oauth_token.authclient.id,
'name': oauth_token.authclient.name},
'issued_at': utc_iso8601(oauth_token.created),
'expires_at': utc_iso8601(oauth_token.expires),
'expired': oauth_token.expired}
def test_returns_debug_data_for_developer_token(self, pyramid_request, token_service, developer_token):
pyramid_request.auth_token = developer_token.value
token_service.fetch.return_value = developer_token
result = views.debug_token(pyramid_request)
assert result == {'userid': developer_token.userid,
'issued_at': utc_iso8601(developer_token.created),
'expires_at': None,
'expired': False}
@pytest.fixture
def token_service(self, pyramid_config, pyramid_request):
svc = mock.Mock(spec_set=auth_token_service_factory(None, pyramid_request))
pyramid_config.register_service(svc, name='auth_token')
return svc
@pytest.fixture
def oauth_token(self, factories):
authclient = factories.AuthClient(name='Example Client')
expires = datetime.datetime.utcnow() + datetime.timedelta(minutes=10)
return factories.DeveloperToken(authclient=authclient, expires=expires)
@pytest.fixture
def developer_token(self, factories):
return factories.DeveloperToken()
class TestAPITokenError(object):
def test_it_sets_the_response_status_code(self, pyramid_request):
context = OAuthTokenError('the error message', 'error_type', status_code=403)
views.api_token_error(context, pyramid_request)
assert pyramid_request.response.status_code == 403
def test_it_returns_the_error(self, pyramid_request):
context = OAuthTokenError('', 'error_type')
result = views.api_token_error(context, pyramid_request)
assert result['error'] == 'error_type'
def test_it_returns_error_description(self, pyramid_request):
context = OAuthTokenError('error description', 'error_type')
result = views.api_token_error(context, pyramid_request)
assert result['error_description'] == 'error description'
def test_it_skips_description_when_missing(self, pyramid_request):
context = OAuthTokenError(None, 'invalid_request')
result = views.api_token_error(context, pyramid_request)
assert 'error_description' not in result
def test_it_skips_description_when_empty(self, pyramid_request):
context = OAuthTokenError('', 'invalid_request')
result = views.api_token_error(context, pyramid_request)
assert 'error_description' not in result
| 41.131707
| 147
| 0.694141
| 1,895
| 16,864
| 5.864908
| 0.111346
| 0.089437
| 0.027533
| 0.019435
| 0.684002
| 0.610041
| 0.560464
| 0.511337
| 0.474087
| 0.456271
| 0
| 0.007584
| 0.21027
| 16,864
| 409
| 148
| 41.232274
| 0.826926
| 0.001245
| 0
| 0.446254
| 0
| 0.003257
| 0.10801
| 0.013063
| 0
| 0
| 0
| 0
| 0.117264
| 1
| 0.153094
| false
| 0
| 0.055375
| 0.009772
| 0.273616
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca1d5b32a32a25d088eb63410921b9a5e64742f
| 1,306
|
py
|
Python
|
tools/build/v2/test/conditionals.py
|
juslee/boost-svn
|
6d5a03c1f5ed3e2b23bd0f3ad98d13ff33d4dcbb
|
[
"BSL-1.0"
] | 1
|
2018-12-15T19:55:56.000Z
|
2018-12-15T19:55:56.000Z
|
tools/build/v2/test/conditionals.py
|
smart-make/boost
|
46509a094f8a844eefd5bb8a0030b739a04d79e1
|
[
"BSL-1.0"
] | null | null | null |
tools/build/v2/test/conditionals.py
|
smart-make/boost
|
46509a094f8a844eefd5bb8a0030b739a04d79e1
|
[
"BSL-1.0"
] | null | null | null |
#!/usr/bin/python
# Copyright 2003 Dave Abrahams
# Copyright 2002, 2003, 2004 Vladimir Prus
# Distributed under the Boost Software License, Version 1.0.
# (See accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
# Test conditional properties.
import BoostBuild
t = BoostBuild.Tester()
# Arrange a project which will build only if 'a.cpp' is compiled with "STATIC"
# define.
t.write("a.cpp", """\
#ifdef STATIC
int main() {}
#endif
""")
# Test conditionals in target requirements.
t.write("jamroot.jam", "exe a : a.cpp : <link>static:<define>STATIC ;")
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/a.exe")
t.rm("bin")
# Test conditionals in project requirements.
t.write("jamroot.jam", """
project : requirements <link>static:<define>STATIC ;
exe a : a.cpp ;
""")
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/a.exe")
t.rm("bin")
# Regression test for a bug found by Ali Azarbayejani. Conditionals inside
# usage requirement were not being evaluated.
t.write("jamroot.jam", """
lib l : l.cpp : : : <link>static:<define>STATIC ;
exe a : a.cpp l ;
""")
t.write("l.cpp", "int i;")
t.run_build_system(["link=static"])
t.expect_addition("bin/$toolset/debug/link-static/a.exe")
t.cleanup()
| 26.653061
| 78
| 0.712864
| 204
| 1,306
| 4.5
| 0.436275
| 0.098039
| 0.042484
| 0.052288
| 0.396514
| 0.305011
| 0.305011
| 0.305011
| 0.239651
| 0.239651
| 0
| 0.019147
| 0.120214
| 1,306
| 48
| 79
| 27.208333
| 0.779809
| 0.420368
| 0
| 0.5
| 0
| 0
| 0.560484
| 0.254032
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.038462
| 0
| 0.038462
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca44058ba24c0424d8558e54e0f3abd230491fa
| 12,813
|
py
|
Python
|
spiders/juejin_spider.py
|
sunhailin-Leo/TeamLeoX_BlogsCrawler
|
389ff31e02bdff415c8bc470a3a48da1acb14c4c
|
[
"MIT"
] | null | null | null |
spiders/juejin_spider.py
|
sunhailin-Leo/TeamLeoX_BlogsCrawler
|
389ff31e02bdff415c8bc470a3a48da1acb14c4c
|
[
"MIT"
] | null | null | null |
spiders/juejin_spider.py
|
sunhailin-Leo/TeamLeoX_BlogsCrawler
|
389ff31e02bdff415c8bc470a3a48da1acb14c4c
|
[
"MIT"
] | null | null | null |
import time
from typing import Dict, List, Tuple, Optional
from utils.logger_utils import LogManager
from utils.str_utils import check_is_json
from config import LOG_LEVEL, PROCESS_STATUS_FAIL
from utils.time_utils import datetime_str_change_fmt
from utils.exception_utils import LoginException, ParseDataException
from spiders import BaseSpider, BaseSpiderParseMethodType, CookieUtils
from utils.str_utils import check_is_phone_number, check_is_email_address
logger = LogManager(__name__).get_logger_and_add_handlers(
formatter_template=5, log_level_int=LOG_LEVEL
)
class JuejinSpider(BaseSpider):
def __init__(self, task_id: str, username: str, password: str):
self._main_url = "https://juejin.im/auth/type"
self._blogs_url = "https://timeline-merger-ms.juejin.im/v1/get_entry_by_self"
self._like_blogs_url = "https://user-like-wrapper-ms.juejin.im/v1/user"
self._task_id = task_id
self._login_username = username
self._login_password = password
self._spider_name: str = f"juejin:{self._login_username}"
self._login_cookies: Optional[str] = None
self._login_token: Optional[str] = None
self._login_uid: Optional[str] = None
self._login_client_id: Optional[str] = None
self._response_data = None
self._blogs_data: List = []
self._like_blogs_data: List = []
self._like_blogs_total_page: int = 0
super().__init__()
self._login_cookies = self.get_cookies(spider_name=self._spider_name)
def _check_username(self) -> Optional[Tuple[str, Dict]]:
"""
解析用户名
:return: 结果
"""
phone_login = check_is_phone_number(data=self._login_username)
email_login = check_is_email_address(data=self._login_username)
login_data: Dict = {"password": self._login_password}
if phone_login is None and email_login is None:
raise ValueError("Your login username is illegal!")
if phone_login is not None:
login_data.update(phoneNumber=self._login_username)
return f"{self._main_url}/phoneNumber", login_data
if email_login is not None:
login_data.update(email=self._login_username)
return f"{self._main_url}/email", login_data
return None
def parse_data_with_method(self, method: str):
if method == BaseSpiderParseMethodType.LoginResult:
self._parse_login_data()
elif method == BaseSpiderParseMethodType.PersonalBlogs:
self._parse_personal_blogs()
self._parse_personal_like_blogs()
elif method == BaseSpiderParseMethodType.Finish:
self.send_data()
def login(self):
if self._login_cookies is None:
login_url, login_data = self._check_username()
response = self.make_request(
url=login_url,
headers=self._common_headers,
method="POST",
json=login_data,
)
if response.content.decode() != "":
logger.info("登录成功!")
self._response_data = response.json()
self._login_cookies = CookieUtils(
cookie_list=response.cookies.items()
).to_str()
logger.debug(self._login_cookies)
self.set_cookies(
spider_name=self._spider_name, cookies=self._login_cookies
)
self.parse_data_with_method(
method=BaseSpiderParseMethodType.LoginResult
)
else:
logger.error("登录失败!")
raise LoginException()
else:
get_result: str = self.get_data(spider_name=f"{self._spider_name}:params")
if get_result is None:
self.parse_data_with_method(
method=BaseSpiderParseMethodType.LoginResult
)
else:
try:
login_params = get_result.split("&")[1:-1]
self._login_uid = [d for d in login_params if "uid" in d][
0
].replace("uid=", "")
self._login_token = [d for d in login_params if "token" in d][
0
].replace("token=", "")
self._login_client_id = [
d for d in login_params if "device_id" in d
][0].replace("device_id=", "")
self.parse_data_with_method(
method=BaseSpiderParseMethodType.PersonalBlogs
)
except Exception as err:
logger.error(f"解析 Redis 返回数据失败! 错误原因: {err}")
self.parse_data_with_method(
method=BaseSpiderParseMethodType.LoginResult
)
def _parse_login_data(self):
# 公共参数
self._login_token = self._response_data["token"]
self._login_uid = self._response_data["userId"]
self._login_client_id = self._response_data["clientId"]
# 重要参数持久化
params: str = f"?src=web&uid={self._login_uid}" f"&token={self._login_token}" f"&device_id={self._login_client_id}" f"¤t_uid={self._login_uid}"
self.set_data(spider_name=f"{self._spider_name}:params", data=params)
# 个人数据
username = self._response_data["user"]["username"]
description = self._response_data["user"]["selfDescription"]
avatar_img = self._response_data["user"]["avatarLarge"]
followee = self._response_data["user"]["followeesCount"]
follower = self._response_data["user"]["followersCount"]
like_blogs = self._response_data["user"]["collectedEntriesCount"]
personal_data: Dict = {
"username": username,
"description": description,
"avatarImg": avatar_img,
"followee": followee,
"follower": follower,
"likeBlogs": like_blogs,
}
logger.debug(personal_data)
self.data_model.set_personal_data(data=personal_data)
self.parse_data_with_method(method=BaseSpiderParseMethodType.PersonalBlogs)
def _parse_personal_blogs(self, next_params: Optional[str] = None):
req_data: dict = {
"src": "web",
"uid": self._login_uid,
"device_id": self._login_client_id,
"token": self._login_token,
"targetUid": self._login_uid,
"type": "post",
"limit": "20",
"order": "createdAt",
}
if next_params is not None:
req_data.update(before=next_params)
url_params: str = ""
for index, data in enumerate(req_data.items()):
if index == 0:
url_params += f"?{data[0]}={data[1]}"
else:
url_params += f"&{data[0]}={data[1]}"
blogs_url: str = f"{self._blogs_url}{url_params}"
response = self.make_request(url=blogs_url, headers=self._common_headers)
if response.content.decode() != "":
self._response_data = response.json()
if self._response_data is not None and self._response_data["m"] == "ok":
next_page_variable = None
entry_list = self._response_data["d"]["entrylist"]
if len(entry_list) > 0:
for personal_blog in entry_list:
blog_create_time = datetime_str_change_fmt(
time_str=personal_blog["createdAt"],
prev_fmt="%Y-%m-%dT%H:%M:%S.%fZ",
)
blog_data: Dict = {
"blogId": personal_blog["objectId"],
"blogTitle": personal_blog["title"],
"blogHref": personal_blog["originalUrl"],
"blogViewers": personal_blog["viewsCount"],
"blogCreateTime": blog_create_time,
}
self._blogs_data.append(blog_data)
next_page_variable = personal_blog["verifyCreatedAt"]
if self._response_data["d"]["total"] > 20:
time.sleep(0.5)
self._parse_personal_blogs(next_params=next_page_variable)
else:
logger.debug(self._blogs_data)
self.data_model.set_personal_blogs_data(data=self._blogs_data)
logger.info("获取个人博客数据成功!")
else:
logger.error("查询个人博客失败!")
self.update_task_status(
task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)
)
raise LoginException()
def _parse_personal_like_blogs(self, page_no: int = 0):
like_blogs_url: str = f"{self._like_blogs_url}/{self._login_uid}/like/entry?page={page_no}&pageSize=20"
self._common_headers.update(
{
"X-Juejin-Client": str(self._login_client_id),
"X-Juejin-Src": "web",
"X-Juejin-Token": self._login_token,
"X-Juejin-Uid": self._login_uid,
}
)
response = self.make_request(url=like_blogs_url, headers=self._common_headers)
if response.content.decode() != "":
self._response_data = response.json()
if (
self._response_data is not None
and self._response_data["m"] == "success"
):
logger.info(f"当前正在获取第{page_no + 1}页的数据!")
if page_no == 0:
total_count = self._response_data["d"]["total"]
total_pages = total_count // 20
rest_count = total_count % 20
if rest_count != 0:
total_pages += 1
self._like_blogs_total_page = total_pages
entry_list = self._response_data["d"]["entryList"]
if len(entry_list) > 0:
for entry_data in entry_list:
if entry_data is None:
continue
blog_data: Dict = {
"blogId": entry_data["objectId"],
"blogTitle": entry_data["title"],
"blogHref": entry_data["originalUrl"],
"blogViewers": entry_data["viewsCount"],
"blogCreateTime": datetime_str_change_fmt(
time_str=entry_data["createdAt"],
prev_fmt="%Y-%m-%dT%H:%M:%S.%fZ",
),
}
self._like_blogs_data.append(blog_data)
page_no += 1
if page_no <= self._like_blogs_total_page:
# TODO 后面考虑多线程进行任务拆分,并发获取数据
time.sleep(0.5)
self._parse_personal_like_blogs(page_no=page_no)
else:
# logger.debug(self._like_blogs_data)
logger.debug(f"获取到 {len(self._like_blogs_data)} 条个人点赞博客")
self.data_model.set_personal_like_blogs_data(
data=self._like_blogs_data
)
logger.info("获取个人点赞博客成功!")
# 任务末尾
self.parse_data_with_method(method=BaseSpiderParseMethodType.Finish)
else:
logger.error("查询个人点赞博客失败!")
self.update_task_status(
task_id=self._task_id, data=str(PROCESS_STATUS_FAIL)
)
raise ParseDataException()
def _test_cookies(self, cookies: Optional[str] = None) -> bool:
params = self.get_data(spider_name=f"{self._spider_name}:params")
if params is None:
return False
test_user_url: str = f"https://user-storage-api-ms.juejin.im/v1/getUserInfo{params}"
test_request_headers: Dict = self.get_default_headers()
test_response = self.make_request(
url=test_user_url, headers=test_request_headers
)
if (
test_response.status_code != 200
or check_is_json(test_response.content.decode()) is not True
):
logger.error(f"当前掘金账号登录状态: 已退出!")
self._async_task.remove_async_scheduler(job_id=self._spider_name)
return False
test_json_response = test_response.json()
if test_json_response["s"] == 1:
logger.info(f"当前掘金账号为: {self._login_username}, 状态: 已登录")
return True
else:
logger.error(f"当前掘金账号登录状态: 已退出!")
return False
| 42.287129
| 157
| 0.558807
| 1,371
| 12,813
| 4.873085
| 0.170678
| 0.048496
| 0.050292
| 0.019907
| 0.340668
| 0.256698
| 0.210148
| 0.161054
| 0.115552
| 0.094896
| 0
| 0.004866
| 0.342387
| 12,813
| 302
| 158
| 42.427152
| 0.788037
| 0.008039
| 0
| 0.172414
| 0
| 0
| 0.117652
| 0.039296
| 0
| 0
| 0
| 0.003311
| 0
| 1
| 0.030651
| false
| 0.011494
| 0.034483
| 0
| 0.095785
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca486af10b1cca3904ea233b441a3077ec0bb6b
| 3,653
|
py
|
Python
|
NAS/PaddleSlim/train_supernet.py
|
naviocean/SimpleCVReproduction
|
61b43e3583977f42e6f91ef176ec5e1701e98d33
|
[
"Apache-2.0"
] | 923
|
2020-01-11T06:36:53.000Z
|
2022-03-31T00:26:57.000Z
|
NAS/PaddleSlim/train_supernet.py
|
Twenty3hree/SimpleCVReproduction
|
9939f8340c54dbd69b0017cecad875dccf428f26
|
[
"Apache-2.0"
] | 25
|
2020-02-27T08:35:46.000Z
|
2022-01-25T08:54:19.000Z
|
NAS/PaddleSlim/train_supernet.py
|
Twenty3hree/SimpleCVReproduction
|
9939f8340c54dbd69b0017cecad875dccf428f26
|
[
"Apache-2.0"
] | 262
|
2020-01-02T02:19:40.000Z
|
2022-03-23T04:56:16.000Z
|
from paddle.vision.transforms import (
ToTensor, RandomHorizontalFlip, RandomResizedCrop, SaturationTransform, Compose,
HueTransform, BrightnessTransform, ContrastTransform, RandomCrop, Normalize, RandomRotation
)
from paddle.vision.datasets import Cifar100
from paddle.io import DataLoader
from paddle.optimizer.lr import CosineAnnealingDecay, MultiStepDecay, LinearWarmup
import random
from resnet20 import *
import paddle
# supernet trainning 基于paddleslim模型压缩包
# https://github.com/PaddlePaddle/PaddleSlim 欢迎大家多多star
from paddleslim.nas.ofa.convert_super import Convert, supernet
from paddleslim.nas.ofa import OFA, RunConfig, DistillConfig
from paddleslim.nas.ofa.utils import utils
channel_list = []
for i in range(1, 21):
if 0 < i <= 7:
# channel_list.append(random.choice([ 4, 8, 12, 16]))
channel_list.append(16)
elif 7 < i <= 13:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32]))
channel_list.append(32)
elif 13 < i <= 19:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
else:
# channel_list.append(random.choice([ 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56,60, 64]))
channel_list.append(64)
net = ResNet20(100, channel_list)
net2 = ResNet20(100, channel_list)
net2.set_state_dict(paddle.load('./pretrained_model/resnet20.pdparams'))
channel_optional = []
for i in range(0, 23):
if i <= 7:
channel_optional.append([4, 8, 12, 16])
# channel_optional.append([12, 16])
elif 7 < i <= 14:
channel_optional.append([4, 8, 12, 16, 20, 24, 28, 32])
# channel_optional.append([20, 24, 28, 32])
elif 14 < i <= 21:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
else:
channel_optional.append(
[4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52, 56, 60, 64])
# channel_optional.append([36, 40, 44, 48, 52, 56,60, 64])
distill_config = DistillConfig(teacher_model=net2)
sp_net_config = supernet(channel=channel_optional)
sp_model = Convert(sp_net_config).convert(net)
ofa_net = OFA(sp_model, distill_config=distill_config)
ofa_net.set_task('channel')
model = paddle.Model(ofa_net)
MAX_EPOCH = 300
LR = 0.1
WEIGHT_DECAY = 5e-4
MOMENTUM = 0.9
BATCH_SIZE = 128
CIFAR_MEAN = [0.5071, 0.4865, 0.4409]
CIFAR_STD = [0.1942, 0.1918, 0.1958]
DATA_FILE = './data/data76994/cifar-100-python.tar.gz'
model.prepare(
paddle.optimizer.Momentum(
learning_rate=LinearWarmup(
CosineAnnealingDecay(LR, MAX_EPOCH), 2000, 0., LR),
momentum=MOMENTUM,
parameters=model.parameters(),
weight_decay=WEIGHT_DECAY),
CrossEntropyLoss(),
paddle.metric.Accuracy(topk=(1, 5)))
transforms = Compose([
RandomCrop(32, padding=4),
RandomApply(BrightnessTransform(0.1)),
RandomApply(ContrastTransform(0.1)),
RandomHorizontalFlip(),
RandomRotation(15),
ToArray(),
Normalize(CIFAR_MEAN, CIFAR_STD),
])
val_transforms = Compose([ToArray(), Normalize(CIFAR_MEAN, CIFAR_STD)])
train_set = Cifar100(DATA_FILE, mode='train', transform=transforms)
test_set = Cifar100(DATA_FILE, mode='test', transform=val_transforms)
callbacks = [LRSchedulerM(), callbacks.VisualDL('vis_logs/ofa_resnet20')]
model.fit(
train_set,
test_set,
epochs=MAX_EPOCH,
batch_size=BATCH_SIZE,
save_dir='checkpoints',
save_freq=100,
shuffle=True,
num_workers=4,
verbose=1,
callbacks=callbacks,
)
| 33.209091
| 108
| 0.680537
| 505
| 3,653
| 4.786139
| 0.306931
| 0.050062
| 0.056268
| 0.019859
| 0.274721
| 0.228382
| 0.201076
| 0.189905
| 0.189905
| 0.169632
| 0
| 0.112231
| 0.185327
| 3,653
| 109
| 109
| 33.513761
| 0.699933
| 0.163701
| 0
| 0.093023
| 0
| 0
| 0.040736
| 0.031866
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.116279
| 0
| 0.116279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca4b5308f48cb161081920789f0cfaed577f79d
| 28,560
|
py
|
Python
|
slashtags/mixins/commands.py
|
Myst1c-a/phen-cogs
|
672f9022ddbbd9a84b0a05357347e99e64a776fc
|
[
"MIT"
] | null | null | null |
slashtags/mixins/commands.py
|
Myst1c-a/phen-cogs
|
672f9022ddbbd9a84b0a05357347e99e64a776fc
|
[
"MIT"
] | null | null | null |
slashtags/mixins/commands.py
|
Myst1c-a/phen-cogs
|
672f9022ddbbd9a84b0a05357347e99e64a776fc
|
[
"MIT"
] | null | null | null |
"""
MIT License
Copyright (c) 2020-present phenom4n4n
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import logging
import re
import types
from collections import Counter
from copy import copy
from typing import Dict, List, Union
import discord
from redbot.core import commands
from redbot.core.utils.chat_formatting import box, humanize_list, inline, pagify
from redbot.core.utils.menus import DEFAULT_CONTROLS, menu
from redbot.core.utils.predicates import MessagePredicate
from tabulate import tabulate
from ..abc import MixinMeta
from ..converters import (
GlobalTagConverter,
GuildTagConverter,
PastebinConverter,
TagConverter,
TagName,
TagScriptConverter,
)
from ..http import ApplicationOptionChoice, SlashOptionType
from ..objects import ApplicationCommand, ApplicationCommandType, SlashOption, SlashTag
from ..testing.button_menus import menu as button_menu
from ..utils import ARGUMENT_NAME_DESCRIPTION, chunks, dev_check
TAG_RE = re.compile(r"(?i)(\[p\])?\b(slash\s?)?tag'?s?\b")
CHOICE_RE = re.compile(r".{1,100}:.{1,100}")
CHOICE_LIMIT = 25
log = logging.getLogger("red.phenom4n4n.slashtags.commands")
def _sub(match: re.Match) -> str:
if match.group(1):
return "[p]slashtag global"
repl = "global "
name = match.group(0)
repl += name
if name.istitle():
repl = repl.title()
return repl
def copy_doc(original: Union[commands.Command, types.FunctionType]):
def decorator(overriden: Union[commands.Command, types.FunctionType]):
doc = original.help if isinstance(original, commands.Command) else original.__doc__
doc = TAG_RE.sub(_sub, doc)
if isinstance(overriden, commands.Command):
overriden._help_override = doc
else:
overriden.__doc__ = doc
return overriden
return decorator
class Commands(MixinMeta):
@commands.guild_only()
@commands.group(aliases=["st"])
async def slashtag(self, ctx: commands.Context):
"""
Slash Tag management with TagScript.
These commands use TagScriptEngine.
[This site](https://phen-cogs.readthedocs.io/en/latest/index.html) has documentation on how to use TagScript blocks.
"""
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("add", aliases=["create", "+"])
async def slashtag_add(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False),
*,
tagscript: TagScriptConverter,
):
"""
Add a slash tag with TagScript.
[Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html)
"""
await self.create_slash_tag(ctx, tag_name, tagscript, is_global=False)
async def create_slash_tag(
self,
ctx: commands.Context,
tag_name: str,
tagscript: str,
*,
is_global: bool = False,
command_type: ApplicationCommandType = ApplicationCommandType.CHAT_INPUT,
):
options: List[SlashOption] = []
guild_id = None if is_global else ctx.guild.id
if command_type == ApplicationCommandType.CHAT_INPUT:
try:
description = await self.send_and_query_response(
ctx,
"What should the tag description to be? (maximum 100 characters)",
pred=MessagePredicate.length_less(101, ctx),
)
except asyncio.TimeoutError:
return await ctx.send("Tag addition timed out.")
else:
description = ""
if command_type == ApplicationCommandType.CHAT_INPUT:
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.send_and_query_response(
ctx, "Would you like to add arguments to this tag? (Y/n)", pred
)
except asyncio.TimeoutError:
await ctx.send("Query timed out, not adding arguments.")
else:
if pred.result is True:
await self.get_options(ctx, options)
command = ApplicationCommand(
self,
name=tag_name,
description=description,
guild_id=guild_id,
options=options,
type=command_type,
)
try:
await command.register()
except discord.Forbidden as error:
log.error(
"Failed to create command {command!r} on guild {ctx.guild!r}", exc_info=error
)
text = (
"Looks like I don't have permission to add Slash Commands here. Reinvite me "
"with this invite link and try again: <https://discordapp.com/oauth2/authorize"
f"?client_id={self.bot.user.id}&scope=bot%20applications.commands>"
)
return await ctx.send(text)
except Exception:
log.error("Failed to create command {command!r} on guild {ctx.guild!r}")
# exc info unneeded since error handler should print it, however info on the command options is needed
raise
tag = SlashTag(
self,
tagscript,
guild_id=guild_id,
author_id=ctx.author.id,
command=command,
)
await ctx.send(await tag.initialize())
async def get_options(
self, ctx: commands.Context, options: List[SlashOption]
) -> List[SlashOption]:
added_required = False
for i in range(1, 11):
try:
option = await self.get_option(ctx, added_required=added_required)
if not option.required:
added_required = True
except asyncio.TimeoutError:
await ctx.send("Adding this argument timed out.", delete_after=15)
break
options.append(option)
if i == 10:
break
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.send_and_query_response(
ctx, "Would you like to add another argument? (Y/n)", pred
)
except asyncio.TimeoutError:
await ctx.send("Query timed out, not adding additional arguments.")
break
if pred.result is False:
break
return options
async def send_and_query_response(
self,
ctx: commands.Context,
query: str,
pred: MessagePredicate = None,
*,
timeout: int = 60,
) -> str:
if pred is None:
pred = MessagePredicate.same_context(ctx)
ask = await ctx.send(query)
try:
message = await self.bot.wait_for("message", check=pred, timeout=timeout)
except asyncio.TimeoutError:
await self.delete_quietly(ask)
raise
await self.delete_quietly(ask)
await self.delete_quietly(message)
return message.content
async def get_choices(self, ctx: commands.Context) -> List[ApplicationOptionChoice]:
query = (
"Send the list of choice names and values you would like to add as choices to "
"the tag. Choice names and values should be seperated by `:`, and each choice "
"should be seperated by `|`. Example:\n`dog:Doggo|cat:Catto`"
)
response = await self.send_and_query_response(ctx, query)
choices = []
for choice_text in response.split("|"):
if ":" not in choice_text:
await ctx.send(
f"Failed to parse `{choice_text}` to a choice as its name and value "
"weren't seperated by a `:`.",
delete_after=15,
)
continue
if not CHOICE_RE.match(choice_text):
await ctx.send(
f"Failed to parse `{choice_text}` to a choice as "
"its name or value exceeded the 100 character limit.",
delete_after=15,
)
continue
choice = ApplicationOptionChoice(*choice_text.split(":", 1))
choices.append(choice)
if len(choices) >= CHOICE_LIMIT:
await ctx.send(f"Reached max choices ({CHOICE_LIMIT}).")
break
return choices
async def get_option(
self, ctx: commands.Context, *, added_required: bool = False
) -> SlashOption:
name_desc = [
"What should the argument name be and description be?",
"The argument name and description should be split by a `:`.",
"Example: `member:A member of this server.`\n",
"*Slash argument names may not exceed 32 characters and can only contain characters "
"that are alphanumeric or '_' or '-'.",
"The argument description must be less than or equal to 100 characters.*",
]
name_pred = MessagePredicate.regex(ARGUMENT_NAME_DESCRIPTION, ctx)
await self.send_and_query_response(ctx, "\n".join(name_desc), name_pred)
match = name_pred.result
name, description = match.group(1), match.group(2)
valid_option_types = [
name.lower()
for name in SlashOptionType.__members__.keys()
if not name.startswith("SUB")
]
valid_option_types.append("choices")
option_query = [
"What should the argument type be?",
f"Valid option types: {humanize_list([inline(n) for n in valid_option_types])}",
"(select `string` if you don't understand)",
]
option_type = await self.send_and_query_response(
ctx,
"\n".join(option_query),
MessagePredicate.lower_contained_in(valid_option_types, ctx),
)
if option_type.lower() == "choices":
choices = await self.get_choices(ctx)
option_type = "STRING"
else:
choices = []
option_type = SlashOptionType[option_type.upper()]
if not added_required:
pred = MessagePredicate.yes_or_no(ctx)
await self.send_and_query_response(
ctx,
"Is this argument required? (Y/n)\n*Keep in mind that if you choose to make this argument optional, all following arguments must also be optional.*",
pred,
)
required = pred.result
else:
await ctx.send(
"This argument was automatically made optional as the previous one was optional.",
delete_after=15,
)
required = False
return SlashOption(
name=name.lower(),
description=description,
option_type=option_type,
required=required,
choices=choices,
)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("message")
async def slashtag_message(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False, check_regex=False),
*,
tagscript: TagScriptConverter,
):
"""
Add a message command tag with TagScript.
[Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html)
"""
await self.create_slash_tag(
ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.MESSAGE
)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("user")
async def slashtag_user(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False, check_regex=False),
*,
tagscript: TagScriptConverter,
):
"""
Add a user command tag with TagScript.
[Slash tag usage guide](https://phen-cogs.readthedocs.io/en/latest/slashtags/slashtags.html)
"""
await self.create_slash_tag(
ctx, tag_name, tagscript, is_global=False, command_type=ApplicationCommandType.USER
)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("pastebin", aliases=["++"])
async def slashtag_pastebin(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False),
*,
link: PastebinConverter,
):
"""
Add a slash tag with a Pastebin link.
"""
await self.create_slash_tag(ctx, tag_name, link, is_global=False)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.group("edit", aliases=["e"], invoke_without_command=True)
async def slashtag_edit(
self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter
):
"""Edit a slash tag."""
await ctx.send(await tag.edit_tagscript(tagscript))
@slashtag_edit.command("tagscript")
async def slashtag_edit_tagscript(
self, ctx: commands.Context, tag: GuildTagConverter, *, tagscript: TagScriptConverter
):
"""Edit a slash tag's TagScript."""
await self.slashtag_edit(ctx, tag, tagscript=tagscript)
@slashtag_edit.command("name")
async def slashtag_edit_name(
self, ctx: commands.Context, tag: GuildTagConverter, *, name: TagName(check_global=False)
):
"""Edit a slash tag's name."""
await ctx.send(await tag.edit_name(name))
@slashtag_edit.command("description")
async def slashtag_edit_description(
self, ctx: commands.Context, tag: GuildTagConverter, *, description: str
):
"""Edit a slash tag's description."""
await ctx.send(await tag.edit_description(description))
@slashtag_edit.command("arguments", aliases=["options"])
async def slashtag_edit_arguments(self, ctx: commands.Context, tag: GuildTagConverter):
"""
Edit a slash tag's arguments.
See [this documentation page](https://phen-cogs.readthedocs.io/en/latest/slashtags/slash_arguments.html) for more information on slash tag arguments.
"""
await tag.edit_options(ctx)
@slashtag_edit.command("argument", aliases=["option"])
async def slashtag_edit_argument(
self, ctx: commands.Context, tag: GuildTagConverter, argument: str
):
"""Edit a single slash tag's argument by name."""
await tag.edit_single_option(ctx, argument)
@commands.mod_or_permissions(manage_guild=True)
@slashtag.command("remove", aliases=["delete", "-"])
async def slashtag_remove(self, ctx: commands.Context, *, tag: GuildTagConverter):
"""Delete a slash tag."""
await ctx.send(await tag.delete())
@slashtag.command("info")
async def slashtag_info(self, ctx: commands.Context, *, tag: TagConverter):
"""Get info about a slash tag that is stored on this server."""
await tag.send_info(ctx)
@slashtag.command("raw")
async def slashtag_raw(self, ctx: commands.Context, *, tag: GuildTagConverter):
"""Get a slash tag's raw content."""
await tag.send_raw_tagscript(ctx)
@classmethod
def format_tagscript(cls, tag: SlashTag, limit: int = 60) -> str:
title = f"`{tag.type.get_prefix()}{tag.name}` - "
limit -= len(title)
tagscript = tag.tagscript
if len(tagscript) > limit - 3:
tagscript = tagscript[:limit] + "..."
tagscript = tagscript.replace("\n", " ")
return f"{title}{discord.utils.escape_markdown(tagscript)}"
async def view_slash_tags(
self,
ctx: commands.Context,
tags: Dict[int, SlashTag],
*,
is_global: bool,
):
description = [
self.format_tagscript(tag) for tag in sorted(tags.values(), key=lambda t: t.name)
]
description = "\n".join(description)
e = discord.Embed(color=await ctx.embed_color())
if is_global:
slash_tags = "global slash tags"
e.set_author(name="Global Slash Tags", icon_url=ctx.me.avatar_url)
else:
slash_tags = "slash tags"
e.set_author(name="Stored Slash Tags", icon_url=ctx.guild.icon_url)
embeds = []
pages = list(pagify(description))
for index, page in enumerate(pages, 1):
embed = e.copy()
embed.description = page
embed.set_footer(text=f"{index}/{len(pages)} | {len(tags)} {slash_tags}")
embeds.append(embed)
# await menu(ctx, embeds, DEFAULT_CONTROLS)
await button_menu(ctx, embeds)
@slashtag.command("list")
async def slashtag_list(self, ctx: commands.Context):
"""View stored slash tags."""
tags = self.guild_tag_cache[ctx.guild.id]
if not tags:
return await ctx.send("There are no slash tags on this server.")
await self.view_slash_tags(ctx, tags, is_global=False)
async def show_slash_tag_usage(self, ctx: commands.Context, guild: discord.Guild = None):
tags = self.guild_tag_cache[guild.id] if guild else self.global_tag_cache
if not tags:
message = (
"This server has no slash tags." if guild else "There are no global slash tags."
)
return await ctx.send(message)
counter = Counter({tag.name: tag.uses for tag in tags.copy().values()})
e = discord.Embed(title="Slash Tag Stats", color=await ctx.embed_color())
embeds = []
for usage_data in chunks(counter.most_common(), 10):
usage_chart = box(tabulate(usage_data, headers=("Tag", "Uses")), "prolog")
embed = e.copy()
embed.description = usage_chart
embeds.append(embed)
await menu(ctx, embeds, DEFAULT_CONTROLS)
@slashtag.command("usage", aliases=["stats"])
async def slashtag_usage(self, ctx: commands.Context):
"""
See this slash tag usage stats.
**Example:**
`[p]slashtag usage`
"""
await self.show_slash_tag_usage(ctx, ctx.guild)
@commands.is_owner()
@slashtag.command("restore", hidden=True)
async def slashtag_restore(self, ctx: commands.Context):
"""Restore all slash tags from the database."""
await self.restore_tags(ctx, ctx.guild)
@commands.is_owner()
@slashtag.command("clear", hidden=True)
async def slashtag_clear(self, ctx: commands.Context):
"""Clear all slash tags for this server."""
pred = MessagePredicate.yes_or_no(ctx)
try:
await self.send_and_query_response(
ctx, "Are you sure you want to delete all slash tags on this server? (Y/n)", pred
)
except asyncio.TimeoutError:
return await ctx.send("Timed out, not deleting slash tags.")
if not pred.result:
return await ctx.send("Ok, not deleting slash tags.")
guild: discord.Guild = ctx.guild
await self.http.put_guild_slash_commands(guild.id, [])
for tag in copy(self.guild_tag_cache[guild.id]).values():
tag.remove_from_cache()
tag.command.remove_from_cache()
del tag
self.guild_tag_cache[guild.id].clear()
await self.config.guild(guild).tags.clear()
await ctx.send("Tags deleted.")
@commands.is_owner()
@slashtag.group("global")
@copy_doc(slashtag)
async def slashtag_global(self, ctx: commands.Context):
pass
@slashtag_global.command("add")
@copy_doc(slashtag_add)
async def slashtag_global_add(
self,
ctx: commands.Context,
tag_name: TagName(global_priority=True),
*,
tagscript: TagScriptConverter,
):
await self.create_slash_tag(ctx, tag_name, tagscript, is_global=True)
@commands.mod_or_permissions(manage_guild=True)
@slashtag_global.command("message")
@copy_doc(slashtag_message)
async def slashtag_global_message(
self,
ctx: commands.Context,
tag_name: TagName(global_priority=True, check_regex=False),
*,
tagscript: TagScriptConverter,
):
await self.create_slash_tag(
ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.MESSAGE
)
@commands.mod_or_permissions(manage_guild=True)
@slashtag_global.command("user")
@copy_doc(slashtag_user)
async def slashtag_global_user(
self,
ctx: commands.Context,
tag_name: TagName(global_priority=True, check_regex=False),
*,
tagscript: TagScriptConverter,
):
await self.create_slash_tag(
ctx, tag_name, tagscript, is_global=True, command_type=ApplicationCommandType.USER
)
@slashtag_global.command("pastebin", aliases=["++"])
@copy_doc(slashtag_pastebin)
async def slashtag_global_pastebin(
self,
ctx: commands.Context,
tag_name: TagName(check_global=False),
*,
link: PastebinConverter,
):
await self.create_slash_tag(ctx, tag_name, link, is_global=True)
@slashtag_global.group("edit", aliases=["e"], invoke_without_command=True)
@copy_doc(slashtag_edit)
async def slashtag_global_edit(
self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter
):
await ctx.send(await tag.edit_tagscript(tagscript))
@slashtag_global_edit.command("tagscript")
@copy_doc(slashtag_edit_tagscript)
async def slashtag_global_edit_tagscript(
self, ctx: commands.Context, tag: GlobalTagConverter, *, tagscript: TagScriptConverter
):
await self.slashtag_global_edit(ctx, tag, tagscript=tagscript)
@slashtag_global_edit.command("name")
@copy_doc(slashtag_edit_name)
async def slashtag_global_edit_name(
self,
ctx: commands.Context,
tag: GlobalTagConverter,
*,
name: TagName(global_priority=True),
):
await ctx.send(await tag.edit_name(name))
@slashtag_global_edit.command("description")
@copy_doc(slashtag_edit_description)
async def slashtag_global_edit_description(
self, ctx: commands.Context, tag: GlobalTagConverter, *, description: str
):
await ctx.send(await tag.edit_description(description))
@slashtag_global_edit.command("arguments", aliases=["options"])
@copy_doc(slashtag_edit_arguments)
async def slashtag_global_edit_arguments(self, ctx: commands.Context, tag: GlobalTagConverter):
await tag.edit_options(ctx)
@slashtag_global_edit.command("argument", aliases=["option"])
@copy_doc(slashtag_edit_argument)
async def slashtag_global_edit_argument(
self, ctx: commands.Context, tag: GuildTagConverter, argument: str
):
await tag.edit_single_option(ctx, argument)
@slashtag_global.command("remove", aliases=["delete", "-"])
@copy_doc(slashtag_remove)
async def slashtag_global_remove(self, ctx: commands.Context, *, tag: GlobalTagConverter):
await ctx.send(await tag.delete())
@slashtag_global.command("raw")
@copy_doc(slashtag_raw)
async def slashtag_global_raw(self, ctx: commands.Context, *, tag: GlobalTagConverter):
await tag.send_raw_tagscript(ctx)
@slashtag_global.command("list")
@copy_doc(slashtag_list)
async def slashtag_global_list(self, ctx: commands.Context):
tags = self.global_tag_cache
if not tags:
return await ctx.send("There are no global slash tags.")
await self.view_slash_tags(ctx, tags, is_global=True)
@slashtag_global.command("usage", aliases=["stats"])
@copy_doc(slashtag_usage)
async def slashtag_global_usage(self, ctx: commands.Context):
await self.show_slash_tag_usage(ctx)
@slashtag_global.command("restore", hidden=True)
@copy_doc(slashtag_restore)
async def slashtag_global_restore(self, ctx: commands.Context):
await self.restore_tags(ctx, None)
@commands.is_owner()
@commands.group(aliases=["slashset"])
async def slashtagset(self, ctx: commands.Context):
"""Manage SlashTags settings."""
@slashtagset.command("settings")
async def slashtagset_settings(self, ctx: commands.Context):
"""View SlashTags settings."""
eval_command = f"✅ (**{self.eval_command}**)" if self.eval_command else "❎"
testing_enabled = "✅" if self.testing_enabled else "❎"
description = [
f"Application ID: **{self.application_id}**",
f"Eval command: {eval_command}",
f"Test cog loaded: {testing_enabled}",
]
embed = discord.Embed(
color=0xC9C9C9, title="SlashTags Settings", description="\n".join(description)
)
await ctx.send(embed=embed)
@slashtagset.command("appid")
async def slashtagset_appid(self, ctx: commands.Context, id: int = None):
"""
Manually set the application ID for [botname] slash commands if it differs from the bot user ID.
This only applies to legacy bots. If you don't know what this means, you don't need to worry about it.
"""
app_id = id or self.bot.user.id
await self.config.application_id.set(app_id)
self.application_id = app_id
await ctx.send(f"Application ID set to `{id}`.")
@commands.check(dev_check)
@slashtagset.command("addeval")
async def slashtagset_addeval(self, ctx: commands.Context):
"""Add a slash eval command for debugging."""
if self.eval_command:
return await ctx.send("An eval command is already registered.")
slasheval = ApplicationCommand(
self,
name="eval",
description="SlashTags debugging eval command. Only bot owners can use this.",
options=[
SlashOption(name="body", description="Code body to evaluate.", required=True)
],
)
await slasheval.register()
await self.config.eval_command.set(slasheval.id)
self.eval_command = slasheval.id
await ctx.send("`/eval` has been registered.")
@commands.check(dev_check)
@slashtagset.command("rmeval")
async def slashtagset_rmeval(self, ctx: commands.Context):
"""Remove the slash eval command."""
if not self.eval_command:
return await ctx.send("The eval command hasn't been registered.")
try:
await self.http.remove_slash_command(self.eval_command)
except discord.HTTPException:
pass
await self.config.eval_command.clear()
self.eval_command = None
await ctx.send("`/eval` has been deleted.")
@slashtagset.command("testing")
async def slashtagset_testing(self, ctx: commands.Context, true_or_false: bool = None):
"""
Load or unload the SlashTag interaction development test cog.
"""
target_state = (
true_or_false if true_or_false is not None else not await self.config.testing_enabled()
)
if target_state is self.testing_enabled:
loaded = "loaded" if target_state else "unloaded"
return await ctx.send(f"The SlashTag interaction testing cog is already {loaded}.")
await self.config.testing_enabled.set(target_state)
if target_state:
loaded = "Loaded"
self.add_test_cog()
else:
loaded = "Unloaded"
self.remove_test_cog()
await ctx.send(f"{loaded} the SlashTag interaction testing cog.")
| 37.777778
| 165
| 0.630882
| 3,365
| 28,560
| 5.211293
| 0.144131
| 0.021442
| 0.040203
| 0.058964
| 0.401175
| 0.324133
| 0.271499
| 0.229642
| 0.203923
| 0.152372
| 0
| 0.003117
| 0.269818
| 28,560
| 755
| 166
| 37.827815
| 0.837585
| 0.042647
| 0
| 0.305369
| 0
| 0.001678
| 0.129653
| 0.013486
| 0
| 0
| 0.000315
| 0
| 0
| 1
| 0.006711
| false
| 0.003356
| 0.031879
| 0
| 0.072148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca53e88f3dd2e94b942b008b7daf5b989a2b7df
| 3,493
|
py
|
Python
|
product_spider/spiders/jk_spider.py
|
Pandaaaa906/product_spider
|
cc7f865f53fd3ed68f4869be3ba917c8373dfcf2
|
[
"MIT"
] | null | null | null |
product_spider/spiders/jk_spider.py
|
Pandaaaa906/product_spider
|
cc7f865f53fd3ed68f4869be3ba917c8373dfcf2
|
[
"MIT"
] | null | null | null |
product_spider/spiders/jk_spider.py
|
Pandaaaa906/product_spider
|
cc7f865f53fd3ed68f4869be3ba917c8373dfcf2
|
[
"MIT"
] | null | null | null |
import json
import re
from string import ascii_uppercase
from time import time
from urllib.parse import urljoin
import scrapy
from more_itertools import first
from scrapy import Request
from product_spider.items import JkProduct, JKPackage
from product_spider.utils.functions import strip
class JkPrdSpider(scrapy.Spider):
name = "jk"
allowed_domains = ["jkchemical.com"]
base_url = "http://www.jkchemical.com"
start_urls = map(lambda x: "http://www.jkchemical.com/CH/products/index/ProductName/{0}.html".format(x),
ascii_uppercase)
prd_size_url = "http://www.jkchemical.com/Controls/Handler/GetPackAgeJsonp.ashx?callback=py27&value={value}&cid={cid}&type=product&_={ts}"
def parse(self, response):
for xp_url in response.xpath("//div[@class='yy toa']//a/@href"):
tmp_url = self.base_url + xp_url.extract()
yield Request(tmp_url.replace("EN", "CH"), callback=self.parse_list)
def parse_list(self, response):
xp_boxes = response.xpath("//table[@id]//div[@class='PRODUCT_box']")
for xp_box in xp_boxes:
div = xp_box.xpath(".//div[2][@class='left_right mulu_text']")
brand = strip(div.xpath('.//li[@id="ctl00_cph_Content_li_lt_Brand"]/text()').get(), '')
rel_url = div.xpath('.//a[@class="name"]/@href').get()
img_url = div.xpath('.//img/@src').get()
d = {
'brand': brand.replace('-', '') or None,
"purity": div.xpath(".//li[1]/text()").get('').split(u":")[-1].strip(),
"cas": strip(div.xpath(".//li[2]//a/text()").get()),
"cat_no": div.xpath(".//li[4]/text()").get().split(u":")[-1].strip(),
"en_name": strip(xp_box.xpath(".//a[@class='name']/text()").get()),
"cn_name": strip(xp_box.xpath(".//a[@class='name']//span[1]/text()").get()),
'prd_url': rel_url and urljoin(response.url, rel_url),
'img_url': img_url and urljoin(response.url, img_url),
}
data_jkid = xp_box.xpath(".//div[@data-jkid]/@data-jkid").get()
data_cid = xp_box.xpath(".//div[@data-cid]/@data-cid").get()
yield Request(self.prd_size_url.format(value=data_jkid, cid=data_cid, ts=int(time())),
body=u"",
meta={"prd_data": d},
callback=self.parse_package)
next_page = response.xpath('//a[contains(text(), "下一页")]/@href').get()
if next_page:
yield Request(urljoin(response.url, next_page), callback=self.parse_list)
def parse_package(self, response):
s = re.findall(r"(?<=\().+(?=\))", response.text)[0]
packages = json.loads(s)
d = response.meta.get('prd_data', {})
package = first(packages, {})
if package:
d['brand'] = d['brand'] or package.get('Product', {}).get('BrandName')
yield JkProduct(**d)
for package_obj in packages:
catalog_price = package_obj.get("CatalogPrice", {})
dd = {
'brand': d.get('brand'),
'cat_no': d.get('cat_no'),
'package': package_obj.get("stringFormat"),
'price': catalog_price and catalog_price.get('Value'),
'currency': catalog_price and strip(catalog_price.get('Currency')),
'attrs': json.dumps(package_obj),
}
yield JKPackage(**dd)
| 45.960526
| 142
| 0.566562
| 441
| 3,493
| 4.328798
| 0.310658
| 0.015715
| 0.026192
| 0.03143
| 0.147721
| 0.080671
| 0.030382
| 0.030382
| 0
| 0
| 0
| 0.004958
| 0.249356
| 3,493
| 75
| 143
| 46.573333
| 0.723112
| 0
| 0
| 0
| 0
| 0.015152
| 0.23361
| 0.073862
| 0
| 0
| 0
| 0
| 0
| 1
| 0.045455
| false
| 0
| 0.151515
| 0
| 0.287879
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca69cf8d56aa0d6b0aa339f56249d7cdfe3ab0c
| 15,717
|
py
|
Python
|
env/LaneChangeEnv_v2.py
|
byq-luo/Lane_change_RL
|
3409238db939e6722441219b4c2dc66033611069
|
[
"MIT"
] | 4
|
2021-03-11T03:05:31.000Z
|
2022-03-22T08:45:20.000Z
|
env/LaneChangeEnv_v2.py
|
byq-luo/Lane_change_RL
|
3409238db939e6722441219b4c2dc66033611069
|
[
"MIT"
] | null | null | null |
env/LaneChangeEnv_v2.py
|
byq-luo/Lane_change_RL
|
3409238db939e6722441219b4c2dc66033611069
|
[
"MIT"
] | 5
|
2021-06-18T11:32:34.000Z
|
2021-12-31T08:10:51.000Z
|
import os
import sys
import random
import datetime
import gym
from gym import spaces
import numpy as np
from env.IDM import IDM
from env.Road import Road
from env.Vehicle import Vehicle
import math
# add sumo/tools into python environment
if 'SUMO_HOME' in os.environ:
tools = os.path.join(os.environ['SUMO_HOME'], 'tools')
sys.path.append(tools)
print('success')
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
import traci
######################################################################
# simulation environments
class LaneChangeEnv(gym.Env):
def __init__(self, id=None, traffic=1, gui=False, seed=None):
# todo check traffic flow density
if traffic == 0:
# average 9 vehicles
self.cfg = 'C:/Users/Fei Ye/Desktop/map/ramp3/mapFree.sumo.cfg'
elif traffic == 2:
# average 19 vehicles
self.cfg = 'C:/Users/Fei Ye/Desktop/map/ramp3/mapDense.sumo.cfg'
else:
# average 14 vehicles
self.cfg = 'C:/Users/Fei Ye/Desktop/map/ramp3/map.sumo.cfg'
# arguments must be string, if float/int, must be converted to str(float/int), instead of '3.0'
self.sumoBinary = "/usr/local/Cellar/sumo/1.2.0/bin/sumo"
self.sumoCmd = ['-c', self.cfg,
# '--lanechange.duration', str(3), # using 'Simple Continuous lane-change model'
'--lateral-resolution', str(0.8), # using 'Sublane-Model'
'--step-length', str(0.1),
'--default.action-step-length', str(0.1)]
# randomness
if seed is None:
self.sumoCmd += ['--random']
else:
self.sumoCmd += ['--seed', str(seed)]
# gui
if gui is True:
self.sumoBinary += '-gui'
self.sumoCmd = [self.sumoBinary] + self.sumoCmd + ['--quit-on-end', str(True),
'--start', str(True)]
else:
self.sumoCmd = [self.sumoBinary] + self.sumoCmd
traci.start(self.sumoCmd)
self.rd = Road()
self.timestep = 0
self.dt = traci.simulation.getDeltaT()
self.randomseed = None
self.sumoseed = None
self.veh_dict = {}
self.vehID_tuple_all = ()
self.egoID = id
self.ego = None
# self.tgtLane = tgtlane
self.is_success = False
self.collision_num = 0
self.lateral_action = 2
# self.observation = [[0, 0, 0], # ego lane position and speed
# [0, 0, 0], # leader
# [0, 0, 0], # target lane leader
# [0, 0, 0]] # target lane follower
self.observation = np.empty(20)
self.reward = None # (float) : amount of reward returned after previous action
self.done = True # (bool): whether the episode has ended, in which case further step() calls will return undefined results
self.info = {
'resetFlag': 0} # (dict): contains auxiliary diagnostic information (helpful for debugging, and sometimes learning)
self.action_space = spaces.Discrete(6)
self.observation_space = spaces.Box(low=-np.inf, high=np.inf, shape=(20,))
def update_veh_dict(self, veh_id_tuple):
for veh_id in veh_id_tuple:
if veh_id not in self.veh_dict.keys():
self.veh_dict[veh_id] = Vehicle(veh_id, self.rd)
for veh_id in list(self.veh_dict.keys()):
if veh_id not in veh_id_tuple:
self.veh_dict.pop(veh_id)
for veh_id in list(self.veh_dict.keys()):
self.veh_dict[veh_id].update_info(self.rd, self.veh_dict)
def _updateObservationSingle(self, name, veh):
"""
:param name: 0:ego; 1:leader; 2:target leader; 3:target follower
:param id: vehicle id corresponding to name
:return:
"""
if veh is not None:
self.observation[name * 4 + 0] = veh.lanePos
self.observation[name * 4 + 1] = veh.speed
self.observation[name * 4 + 2] = veh.pos_lat
self.observation[name * 4 + 3] = veh.acce
else:
self.observation[name * 4 + 0] = self.observation[0] + 300.
self.observation[name * 4 + 1] = self.observation[1]
self.observation[name * 4 + 2] = 4.8
self.observation[name * 4 + 3] = 0
# todo check if rational
def updateObservation(self):
self.observation[0] = self.ego.lanePos
self.observation[1] = self.ego.speed
self.observation[2] = self.ego.pos_lat
self.observation[3] = self.ego.acce
self._updateObservationSingle(1, self.ego.orig_leader)
self._updateObservationSingle(2, self.ego.orig_follower)
self._updateObservationSingle(3, self.ego.trgt_leader)
self._updateObservationSingle(4, self.ego.trgt_follower)
# self.observation = np.array(self.observation).flatten()
# print(self.observation.shape)
def updateReward(self):
return -self.ego.dis2tgtLane
def updateReward2(self):
wc1 = 1
wc2 = 1
wt = 1
ws = 1
we = 1
# reward related to comfort
r_comf = wc1 * self.ego.acce ** 2 + wc2 * self.ego.delta_acce ** 2
# reward related to efficiency
r_time = - wt * self.timestep
r_speed = ws * (self.ego.speed - self.ego_speedLimit)
r_effi = we * self.ego.dis2tgtLane / self.ego.dis2entrance
r_effi_all = r_time + r_speed + r_effi
# reward related to safety
w_lateral = 1
w_longi = 1
if self.ego.leaderID is not None:
# compute longitudinal time gap
delta_V = self.veh_dict[self.ego.leaderID].speed - self.ego.speed
delta_A = self.veh_dict[self.ego.leaderID].acce - self.ego.acce
if delta_A == 0:
TTC = - abs(self.ego.leaderDis)/delta_V
else:
TTC = -delta_V - math.sqrt(delta_V**2 + 2*delta_A * self.ego.leaderDis)
TTC = TTC/delta_A
if self.lateral_action != 1 and 0 < TTC < 2:
r_long_c = - math.exp(-2*TTC+5)
else:
r_long_c = 0
if self.lateral_action == 0: #abort lane change
alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2
assert 0 <= alpha <= 1.1
r_lat_c = -math.exp(-4*alpha+5)
else:
r_lat_c = 0
if self.ego.targetLeaderID is not None:
# compute longitudinal time gap
delta_V2 = self.veh_dict[self.ego.targetLeaderID].speed - self.ego.speed
delta_A2 = self.veh_dict[self.ego.targetLeaderID].acce - self.ego.acce
delta_D2 = self.veh_dict[self.ego.targetLeaderID].lanePos - self.ego.lanePos
if delta_A2 == 0:
TTC2 = - abs(delta_D2) / delta_V2
else:
TTC2 = -delta_V2 - math.sqrt(delta_V2 ** 2 + 2 * delta_A2 * delta_D2)
TTC2 = TTC2 / delta_A2
if self.lateral_action == 1 and 0 < TTC2 < 2:
r_long_t = - math.exp(-2 * TTC2 + 5)
else:
r_long_t = 0
if self.lateral_action == 1: # lane change
alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2
assert 0 <= alpha <= 1.1
r_lat_t = -math.exp(-4*alpha+5)
else:
r_lat_t = 0
r_safe = w_lateral * (r_lat_c + r_lat_t) + w_longi * (r_long_c+ r_long_t)
#
# if self.ego.leaderID is not None:
# # ('lateralPos2leader', abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat))
# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.leaderID].pos_lat) / 3.2
# assert 0 <= alpha <= 1.1
# r_safe_leader = w_lateral * alpha + w_longi * (1 - alpha) * abs(self.ego.leaderDis)
# else:
# r_safe_leader = 0
# if self.ego.targetLeaderID is not None:
# # print('lateralPos2tgtleader', abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat))
# alpha = abs(self.ego.pos_lat - self.veh_dict[self.ego.targetLeaderID].pos_lat) / 3.2
# # print('alpha', alpha)
# assert 0 <= alpha <= 1.1
#
# r_safe_tgtleader = w_lateral * alpha + w_longi * (1 - alpha) * abs(
# self.ego.lanePos - self.veh_dict[self.ego.targetLeaderID].lanePos)
# else:
# r_safe_tgtleader = 0
#
#
# r_safe = r_safe_leader + r_safe_tgtleader
# total reward
r_total = r_comf + r_effi_all + r_safe
return r_total
def is_done(self):
# lane change successfully executed, episode ends, reset env
# todo modify
if self.is_success:
self.done = True
# print('reset on: successfully lane change, dis2targetlane:',
# self.ego.dis2tgtLane)
# too close to ramp entrance
if self.ego.dis2entrance < 10.0:
self.done = True
# print('reset on: too close to ramp entrance, dis2targetlane:',
# self.ego.dis2tgtLane)
# ego vehicle out of env
if self.egoID not in self.vehID_tuple_all:
self.done = True
# print('reset on: self.ego not in env:', self.egoID not in self.vehID_tuple_all)
# collision occurs
self.collision_num = traci.simulation.getCollidingVehiclesNumber()
if self.collision_num > 0:
self.done = True
# print('reset on: self.collision_num:', self.collision_num)
def preStep(self):
traci.simulationStep()
self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)
self.update_veh_dict(self.vehID_tuple_all)
def step(self, action=2):
"""Run one timestep of the environment's dynamics. When end of
episode is reached, call `reset()` outside env!! to reset this
environment's state.
Accepts an action and returns a tuple (observation, reward, done, info).
Args:
action (object): longitudinal0: action[0] = 1: accelerate
action[0] = -1: decelerate
action[0] = 0: use SUMO default
action[0] = others: acce = 0.0
longitudinal1: action[0] = 0: follow original lane leader
action[0] = 1: follow closer leader
longitudinal2: action[0] = 0: follow original lane leader
action[0] = 1: follow target lane leader
**important**: orginal/target lane leader will not change despite the lateral position of
the ego may change
lateral: action[1] = 1: lane change
action[1] = 0: abort lane change, change back to original lane
action[1] = 2: keep in current lateral position
Returns:
described in __init__
"""
action_longi = action // 3
action_lateral = action % 3
self.lateral_action = action_lateral
# action_longi = action[0]
# action_lateral = action[1]
assert self.done is False, 'self.done is not False'
assert action is not None, 'action is None'
assert self.egoID in self.vehID_tuple_all, 'vehicle not in env'
self.timestep += 1
# lateral control-------------------------
# episode in progress; 0:change back to original line; 1:lane change to target lane; 2:keep current
# lane change to target lane
if not self.is_success:
if action_lateral == 1: # and abs(self.ego.pos_lat - (0.5+self.ego.targetLane)*self.rd.laneWidth) > 0.01:
self.is_success = self.ego.changeLane(True, self.ego.trgt_laneIndex, self.rd)
# print('posLat', self.ego.pos_lat, 'lane', self.ego.curr_laneIndex, 'rdWdith', self.rd.laneWidth)
# print('right', -(self.ego.pos_lat - 0.5*self.rd.laneWidth))
# abort lane change, change back to ego's original lane
if action_lateral == 0: # and abs(self.ego.pos_lat - (0.5+self.ego.origLane)*self.rd.laneWidth) > 0.01:
self.is_success = self.ego.changeLane(True, self.ego.orig_laneIndex, self.rd)
# print('left', 1.5 * self.rd.laneWidth - self.ego.pos_lat)
# keep current lateral position
if action_lateral == 2:
self.is_success = self.ego.changeLane(True, -1, self.rd)
# longitudinal control2---------------------
acceNext = self.ego.updateLongitudinalSpeedIDM(action_longi)
# print(acceNext)
vNext = self.ego.speed + acceNext * 0.1
traci.vehicle.setSpeed(self.egoID, vNext)
# update info------------------------------
traci.simulationStep()
self.vehID_tuple_all = traci.edge.getLastStepVehicleIDs(self.rd.entranceEdgeID)
self.update_veh_dict(self.vehID_tuple_all)
# check if episode ends
self.is_done()
if self.done is True:
self.info['resetFlag'] = True
return self.observation, 0.0, self.done, self.info
else:
self.updateObservation()
self.reward = self.updateReward()
return self.observation, self.reward, self.done, self.info
def seed(self, seed=None):
if seed is None:
self.randomseed = datetime.datetime.now().microsecond
else:
self.randomseed = seed
random.seed(self.randomseed)
def reset(self, egoid, tlane=0, tfc=1, is_gui=True, sumoseed=None, randomseed=None):
"""
reset env
:param id: ego vehicle id
:param tfc: int. 0:light; 1:medium; 2:dense
:return: initial observation
"""
self.seed(randomseed)
if sumoseed is None:
self.sumoseed = self.randomseed
traci.close()
self.__init__(id=egoid, traffic=tfc, gui=is_gui, seed=self.sumoseed)
# continue step until ego appears in env
if self.egoID is not None:
while self.egoID not in self.veh_dict.keys():
# must ensure safety in preStpe
self.preStep()
if self.timestep > 5000:
raise Exception('cannot find ego after 5000 timesteps')
assert self.egoID in self.vehID_tuple_all, "cannot start training while ego is not in env"
self.done = False
self.ego = self.veh_dict[self.egoID]
self.ego.trgt_laneIndex = tlane
self.ego.is_ego = 1
# set ego vehicle speed mode
traci.vehicle.setSpeedMode(self.ego.veh_id, 0)
self.ego_speedFactor = traci.vehicle.getSpeedFactor(egoid)
self.ego_speedLimit = self.ego_speedFactor * traci.lane.getMaxSpeed(traci.vehicle.getLaneID(self.egoID))
self.ego.idm_obj = IDM()
self.ego.idm_obj.__init__(self.ego_speedLimit)
self.ego.update_info(self.rd, self.veh_dict)
self.updateObservation()
return self.observation
return
def close(self):
traci.close()
| 40.507732
| 131
| 0.56283
| 1,965
| 15,717
| 4.38117
| 0.17659
| 0.062609
| 0.029388
| 0.026135
| 0.324196
| 0.261819
| 0.220815
| 0.176559
| 0.140434
| 0.128238
| 0
| 0.023418
| 0.326207
| 15,717
| 387
| 132
| 40.612403
| 0.789518
| 0.306674
| 0
| 0.153846
| 0
| 0
| 0.049113
| 0.016498
| 0
| 0
| 0
| 0.005168
| 0.027149
| 1
| 0.054299
| false
| 0
| 0.054299
| 0.004525
| 0.140271
| 0.004525
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
7ca6e02f50310d3c489fa5cfd14dac9866b27eaa
| 5,466
|
py
|
Python
|
cidr/o365/o365.py
|
jblukach/distillery
|
4087debb496d7dfc4c425c2e68246e1b0726168b
|
[
"Apache-2.0"
] | 1
|
2021-11-10T12:56:09.000Z
|
2021-11-10T12:56:09.000Z
|
cidr/o365/o365.py
|
jblukach/distillery
|
4087debb496d7dfc4c425c2e68246e1b0726168b
|
[
"Apache-2.0"
] | null | null | null |
cidr/o365/o365.py
|
jblukach/distillery
|
4087debb496d7dfc4c425c2e68246e1b0726168b
|
[
"Apache-2.0"
] | 1
|
2021-11-05T03:16:32.000Z
|
2021-11-05T03:16:32.000Z
|
import boto3
import ipaddress
import json
import logging
import os
import requests
import uuid
logger = logging.getLogger()
logger.setLevel(logging.INFO)
dynamodb = boto3.resource('dynamodb')
table = dynamodb.Table(os.environ['DYNAMODB_TABLE'])
client = boto3.client('ssm')
def downloader(instance, latest, parameter, link):
r = requests.get(link)
cidrs = r.json()
if r.status_code == 200:
for cidr in cidrs:
try:
if len(cidr['ips']) != 0:
for ip in cidr['ips']:
sortkey = 'O365#'+instance+'#'+cidr['serviceArea']+'#'+ip
hostmask = ip.split('/')
iptype = ipaddress.ip_address(hostmask[0])
nametype = 'IPv'+str(iptype.version)+'#'
if nametype == 'IPv4#':
netrange = ipaddress.IPv4Network(ip)
first, last = netrange[0], netrange[-1]
firstip = int(ipaddress.IPv4Address(first))
lastip = int(ipaddress.IPv4Address(last))
elif nametype == 'IPv6#':
netrange = ipaddress.IPv6Network(ip)
first, last = netrange[0], netrange[-1]
firstip = int(ipaddress.IPv6Address(first))
lastip = int(ipaddress.IPv6Address(last))
table.put_item(
Item= {
'pk': nametype,
'sk': sortkey,
'service': cidr['serviceArea'],
'cidr': ip,
'created': latest,
'endpoint': instance,
'firstip': firstip,
'lastip': lastip
}
)
except:
pass
logger.info('o365 '+instance+' IP Ranges Updated')
response = client.put_parameter(
Name = parameter,
Value = str(latest),
Type = 'String',
Overwrite = True
)
def handler(event, context):
r = requests.get('https://endpoints.office.com/version?clientrequestid='+str(uuid.uuid4()))
logger.info('Link Status Code: '+str(r.status_code))
if r.status_code == 200:
versions = r.json()
logger.info(versions)
for version in versions:
if version['instance'] == 'Worldwide':
response = client.get_parameter(Name=os.environ['WORLD_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 Worldwide IP Ranges')
link = 'https://endpoints.office.com/endpoints/worldwide?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['WORLD_PARAMETER'], link)
elif version['instance'] == 'USGovDoD':
response = client.get_parameter(Name=os.environ['DOD_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 USGovDoD IP Ranges')
link = 'https://endpoints.office.com/endpoints/USGOVDoD?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['DOD_PARAMETER'], link)
elif version['instance'] == 'USGovGCCHigh':
response = client.get_parameter(Name=os.environ['HIGH_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 USGovGCCHigh IP Ranges')
link = 'https://endpoints.office.com/endpoints/USGOVGCCHigh?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['HIGH_PARAMETER'], link)
elif version['instance'] == 'China':
response = client.get_parameter(Name=os.environ['CHINA_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 China IP Ranges')
link = 'https://endpoints.office.com/endpoints/China?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['CHINA_PARAMETER'], link)
elif version['instance'] == 'Germany':
response = client.get_parameter(Name=os.environ['GERMANY_PARAMETER'])
prevtoken = response['Parameter']['Value']
if prevtoken != str(version['latest']):
logger.info('Updating o365 Germany IP Ranges')
link = 'https://endpoints.office.com/endpoints/Germany?clientrequestid='+str(uuid.uuid4())
downloader(version['instance'], version['latest'], os.environ['GERMANY_PARAMETER'], link)
else:
logger.info('No o365 IP Range Updates')
return {
'statusCode': 200,
'body': json.dumps('Download o365 IP Ranges')
}
| 47.12069
| 115
| 0.51921
| 486
| 5,466
| 5.794239
| 0.228395
| 0.035156
| 0.042614
| 0.049006
| 0.527699
| 0.470881
| 0.470881
| 0.401634
| 0.323509
| 0.323509
| 0
| 0.016733
| 0.354921
| 5,466
| 115
| 116
| 47.530435
| 0.781906
| 0
| 0
| 0.135922
| 0
| 0
| 0.212221
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019417
| false
| 0.009709
| 0.067961
| 0
| 0.097087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|