hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
19f86b537c0bae3f03cb054ba2a00a84283387fd
| 264
|
py
|
Python
|
Dataset/Leetcode/test/26/20.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/26/20.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
Dataset/Leetcode/test/26/20.py
|
kkcookies99/UAST
|
fff81885aa07901786141a71e5600a08d7cb4868
|
[
"MIT"
] | null | null | null |
class Solution:
def XXX(self, nums: List[int]) -> int:
lenth = len(nums)-1
if lenth > 0:
for i in range(lenth):
if nums[lenth-i] == nums[lenth-i-1]:
del nums[lenth-i-1]
return len(nums)
| 26.4
| 52
| 0.469697
| 37
| 264
| 3.351351
| 0.513514
| 0.217742
| 0.241935
| 0.177419
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025157
| 0.397727
| 264
| 9
| 53
| 29.333333
| 0.754717
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
19fcc623262d860189e8189235d583ae4e65f09f
| 4,567
|
py
|
Python
|
reports/migrations/0001_initial.py
|
prabinrs/surveilance-system
|
1a9f118737d1043133dbb7247573b4616a680c2d
|
[
"BSD-3-Clause"
] | null | null | null |
reports/migrations/0001_initial.py
|
prabinrs/surveilance-system
|
1a9f118737d1043133dbb7247573b4616a680c2d
|
[
"BSD-3-Clause"
] | 2
|
2020-06-05T21:39:21.000Z
|
2021-06-10T21:40:18.000Z
|
reports/migrations/0001_initial.py
|
prabinrs/surveilance-system
|
1a9f118737d1043133dbb7247573b4616a680c2d
|
[
"BSD-3-Clause"
] | 1
|
2020-02-26T15:06:32.000Z
|
2020-02-26T15:06:32.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.9.1 on 2016-07-10 15:25
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
('location', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='Group',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('group_code', models.CharField(blank=True, max_length=100)),
('name', models.CharField(blank=True, max_length=100)),
],
options={
'verbose_name': 'Group',
'verbose_name_plural': 'Groups',
},
),
migrations.CreateModel(
name='ICD',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('icd_code', models.CharField(blank=True, max_length=100)),
('morbidity_code', models.CharField(blank=True, max_length=100)),
('name', models.CharField(blank=True, max_length=100)),
],
options={
'verbose_name': 'ICD',
'verbose_name_plural': 'ICDs',
},
),
migrations.CreateModel(
name='Morbidity',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('morbidity_code', models.CharField(blank=True, max_length=100)),
('name', models.CharField(blank=True, max_length=100)),
],
options={
'verbose_name': 'Morbidity',
'verbose_name_plural': 'Morbidities',
},
),
migrations.CreateModel(
name='Outreach',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('outreach_code', models.CharField(blank=True, max_length=100)),
('name', models.CharField(blank=True, max_length=100)),
],
options={
'verbose_name': 'Outreach',
'verbose_name_plural': 'Outreaches',
},
),
migrations.CreateModel(
name='PatientHA',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('visit_date', models.DateField()),
('sent_date', models.DateField()),
('ha_provider_id', models.CharField(blank=True, max_length=100)),
('patient_id', models.CharField(blank=True, max_length=100)),
('age', models.CharField(blank=True, max_length=100)),
('unit', models.CharField(blank=True, max_length=100)),
('gender', models.CharField(blank=True, max_length=100)),
('ward', models.IntegerField()),
('obs_k', models.CharField(blank=True, max_length=100)),
('obs_l', models.CharField(blank=True, max_length=100)),
('obs_m', models.CharField(blank=True, max_length=100)),
('derived_n', models.CharField(blank=True, max_length=100)),
('derived_o', models.CharField(blank=True, max_length=100)),
('derived_p', models.CharField(blank=True, max_length=100)),
('derived_q', models.CharField(blank=True, max_length=100)),
('derived_r', models.CharField(blank=True, max_length=100)),
('derived_s', models.CharField(blank=True, max_length=100)),
('district', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='patient_has', to='location.District')),
('group', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='patient_has', to='reports.Group')),
('icd', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='patient_has', to='reports.ICD')),
('outreach', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='patient_has', to='reports.Outreach')),
('vdc', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='patient_has', to='location.VDC')),
],
),
]
| 47.572917
| 145
| 0.567988
| 466
| 4,567
| 5.373391
| 0.197425
| 0.13778
| 0.183706
| 0.220447
| 0.726038
| 0.726038
| 0.726038
| 0.66853
| 0.488019
| 0.488019
| 0
| 0.027259
| 0.285089
| 4,567
| 95
| 146
| 48.073684
| 0.739663
| 0.01467
| 0
| 0.402299
| 1
| 0
| 0.135424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.034483
| 0
| 0.08046
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
c239f5c3d4947e1407e55108ddd39325d57ce40b
| 144
|
py
|
Python
|
tests/examples-bad/classdup.py
|
JohannesBuchner/pystrict3
|
f442a89ac6a23f4323daed8ef829d8e9e1197f90
|
[
"BSD-2-Clause"
] | 1
|
2020-06-05T08:53:26.000Z
|
2020-06-05T08:53:26.000Z
|
tests/examples-bad/classdup.py
|
JohannesBuchner/pystrict3
|
f442a89ac6a23f4323daed8ef829d8e9e1197f90
|
[
"BSD-2-Clause"
] | 1
|
2020-06-04T13:47:19.000Z
|
2020-06-04T13:47:57.000Z
|
tests/examples-bad/classdup.py
|
JohannesBuchner/pystrict3
|
f442a89ac6a23f4323daed8ef829d8e9e1197f90
|
[
"BSD-2-Clause"
] | 1
|
2020-11-07T17:02:46.000Z
|
2020-11-07T17:02:46.000Z
|
class Foo0():
def __init__(self):
pass
foo1 = Foo0()
class Foo0(): ## error: redefined class
def __init__(self, a):
pass
foo2 = Foo0()
| 12
| 39
| 0.638889
| 20
| 144
| 4.2
| 0.55
| 0.214286
| 0.261905
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052632
| 0.208333
| 144
| 11
| 40
| 13.090909
| 0.684211
| 0.152778
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0.25
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
df99b03a9d330f18fb903fd9f572a6f8a7b50ba7
| 2,264
|
py
|
Python
|
tests/test_models/test_sum_aggregate_by_user_report.py
|
wikimedia/analytics-wikimetrics
|
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
|
[
"MIT"
] | 6
|
2015-01-28T05:59:08.000Z
|
2018-01-09T07:48:57.000Z
|
tests/test_models/test_sum_aggregate_by_user_report.py
|
wikimedia/analytics-wikimetrics
|
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
|
[
"MIT"
] | 2
|
2020-05-09T16:36:43.000Z
|
2020-05-09T16:52:35.000Z
|
tests/test_models/test_sum_aggregate_by_user_report.py
|
wikimedia/analytics-wikimetrics
|
1d2036657b06ccd16ecfc76edd3f9a6119ff75f4
|
[
"MIT"
] | 1
|
2016-01-13T07:19:44.000Z
|
2016-01-13T07:19:44.000Z
|
from nose.tools import assert_equals, assert_true
from wikimetrics.metrics import metric_classes
from wikimetrics.models import SumAggregateByUserReport
from wikimetrics.models.storage.wikiuser import WikiUserKey
from wikimetrics.enums import Aggregation
from ..fixtures import DatabaseTest
class SumAggregateByUserReportWithoutQueueTest(DatabaseTest):
def setUp(self):
DatabaseTest.setUp(self)
self.common_cohort_1()
def test_finish_positive(self):
metric = metric_classes['RollingActiveEditor']()
report = SumAggregateByUserReport(self.cohort, metric)
report.usernames = {
WikiUserKey(1, 'enwiki', 12): 'John',
WikiUserKey(2, 'dewiki', 12): 'John',
WikiUserKey(3, 'frwiki', 12): 'John',
WikiUserKey(4, 'ptwiki', 12): 'Kate',
}
finished = report.finish([{
'1|enwiki|12': {'rolling_active_editor': 0},
'2|dewiki|12': {'rolling_active_editor': 1},
'3|frwiki|12': {'rolling_active_editor': 0},
'4|ptwiki|12': {'rolling_active_editor': 1},
}])
assert_equals(len(finished), 1)
assert_true(Aggregation.SUM in finished)
assert_true('rolling_active_editor' in finished[Aggregation.SUM])
assert_equals(finished[Aggregation.SUM]['rolling_active_editor'], 2)
def test_finish_negative(self):
metric = metric_classes['RollingActiveEditor']()
report = SumAggregateByUserReport(self.cohort, metric)
report.usernames = {
WikiUserKey(1, 'enwiki', 12): 'John',
WikiUserKey(2, 'dewiki', 12): 'John',
WikiUserKey(3, 'frwiki', 12): 'John',
WikiUserKey(4, 'ptwiki', 12): 'Kate',
}
finished = report.finish([{
'1|enwiki|12': {'rolling_active_editor': 0},
'2|dewiki|12': {'rolling_active_editor': 0},
'3|frwiki|12': {'rolling_active_editor': 0},
'4|ptwiki|12': {'rolling_active_editor': 0},
}])
assert_equals(len(finished), 1)
assert_true(Aggregation.SUM in finished)
assert_true('rolling_active_editor' in finished[Aggregation.SUM])
assert_equals(finished[Aggregation.SUM]['rolling_active_editor'], 0)
| 42.716981
| 76
| 0.638251
| 240
| 2,264
| 5.841667
| 0.220833
| 0.11127
| 0.162625
| 0.119829
| 0.71184
| 0.708987
| 0.708987
| 0.708987
| 0.708987
| 0.708987
| 0
| 0.035078
| 0.23189
| 2,264
| 52
| 77
| 43.538462
| 0.771133
| 0
| 0
| 0.583333
| 0
| 0
| 0.202297
| 0.111307
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.0625
| false
| 0
| 0.125
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
df9f3c0e92bffcb38e5adfe2df0bb08a894807c7
| 159
|
py
|
Python
|
generated-libraries/python/netapp/lun/san_size.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | 2
|
2017-03-28T15:31:26.000Z
|
2018-08-16T22:15:18.000Z
|
generated-libraries/python/netapp/lun/san_size.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
generated-libraries/python/netapp/lun/san_size.py
|
radekg/netapp-ontap-lib-get
|
6445ebb071ec147ea82a486fbe9f094c56c5c40d
|
[
"MIT"
] | null | null | null |
class SanSize(int):
"""
Size in bytes
Range : [0..2^63-1].
"""
@staticmethod
def get_api_name():
return "san-size"
| 14.454545
| 27
| 0.477987
| 19
| 159
| 3.894737
| 0.947368
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.05
| 0.371069
| 159
| 10
| 28
| 15.9
| 0.69
| 0.213836
| 0
| 0
| 0
| 0
| 0.07619
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0.25
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a048820ace4a4fa970d9206512dd93ff746c73a2
| 216
|
py
|
Python
|
consensual/core/raft/__init__.py
|
lycantropos/consensual
|
0dcb850a39a81bbbb7b79fe6e7f8ce2fc4588c69
|
[
"MIT"
] | 2
|
2022-02-15T08:10:35.000Z
|
2022-02-15T15:22:16.000Z
|
consensual/core/raft/__init__.py
|
lycantropos/consensual
|
0dcb850a39a81bbbb7b79fe6e7f8ce2fc4588c69
|
[
"MIT"
] | null | null | null |
consensual/core/raft/__init__.py
|
lycantropos/consensual
|
0dcb850a39a81bbbb7b79fe6e7f8ce2fc4588c69
|
[
"MIT"
] | null | null | null |
from . import communication
from .hints import Processor
from .messages import MessageKind
from .node import Node
from .receiver import Receiver
from .sender import (ReceiverUnavailable,
Sender)
| 27
| 41
| 0.74537
| 24
| 216
| 6.708333
| 0.458333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.212963
| 216
| 7
| 42
| 30.857143
| 0.947059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.857143
| 0
| 0.857143
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a0910b6d70baafe08cdfdb9f5b4f8e619a106d8c
| 166
|
py
|
Python
|
groups/rules.py
|
sylae/pctnet
|
fde46f4e97a293a4ecee5fd2ebd1b526b2003a7b
|
[
"MIT"
] | 1
|
2018-11-19T04:43:03.000Z
|
2018-11-19T04:43:03.000Z
|
groups/rules.py
|
sylae/pctnet
|
fde46f4e97a293a4ecee5fd2ebd1b526b2003a7b
|
[
"MIT"
] | 50
|
2018-11-19T03:35:26.000Z
|
2021-06-10T18:01:21.000Z
|
groups/rules.py
|
sylae/pctnet
|
fde46f4e97a293a4ecee5fd2ebd1b526b2003a7b
|
[
"MIT"
] | 4
|
2018-12-22T22:10:40.000Z
|
2020-09-17T03:44:08.000Z
|
import rules
@rules.predicate
def can_edit_grouppage(user, group):
return user in group.admins.all()
rules.add_rule('can_edit_grouppage', can_edit_grouppage)
| 16.6
| 56
| 0.783133
| 25
| 166
| 4.92
| 0.6
| 0.170732
| 0.390244
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.120482
| 166
| 9
| 57
| 18.444444
| 0.842466
| 0
| 0
| 0
| 0
| 0
| 0.108434
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
a0937a86cc9be338f969fe520f3c6568c88edc97
| 12,262
|
py
|
Python
|
tests/sender/send_lookup.py
|
angel-devo/python-sdk
|
18e72bce07a3932093973f433d540d5c37cbd684
|
[
"MIT"
] | null | null | null |
tests/sender/send_lookup.py
|
angel-devo/python-sdk
|
18e72bce07a3932093973f433d540d5c37cbd684
|
[
"MIT"
] | null | null | null |
tests/sender/send_lookup.py
|
angel-devo/python-sdk
|
18e72bce07a3932093973f433d540d5c37cbd684
|
[
"MIT"
] | null | null | null |
import unittest
from ssl import CERT_NONE
from unittest import mock
from devo.sender import Sender, SenderConfigSSL, Lookup
from .load_certs import *
class TestLookup(unittest.TestCase):
def setUp(self):
self.server = os.getenv("DEVO_SENDER_SERVER", "127.0.0.1")
self.port = int(os.getenv("DEVO_SENDER_PORT", 4488))
self.key = os.getenv("DEVO_SENDER_KEY", CLIENT_KEY)
self.cert = os.getenv("DEVO_SENDER_CERT", CLIENT_CERT)
self.chain = os.getenv("DEVO_SENDER_CHAIN", CLIENT_CHAIN)
self.lookup_name = "Test_Lookup_of_today"
self.lookup_file = "".join(
(
os.path.dirname(os.path.abspath(__file__)),
os.sep,
"testfile_lookup.csv",
)
)
self.lookup_key = "KEY"
def test_ssl_lookup_csv_send(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
chain=self.chain,
check_hostname=False,
verify_mode=CERT_NONE,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, historic_tag=None, con=con)
with open(self.lookup_file) as f:
line = f.readline()
lookup.send_csv(
self.lookup_file,
headers=line.rstrip().split(","),
key=self.lookup_key,
)
con.socket.shutdown(0)
# Add new line to lookup
def test_ssl_lookup_new_line(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
chain=self.chain,
check_hostname=False,
verify_mode=CERT_NONE,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, historic_tag=None, con=con)
p_headers = Lookup.list_to_headers(["KEY", "HEX", "COLOR"], "KEY")
lookup.send_control("START", p_headers, "INC")
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
lookup.send_data_line(key="11", fields=["11", "HEX12", "COLOR12"])
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
lookup.send_control("END", p_headers, "INC")
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
con.socket.shutdown(0)
def test_create_lookup_key_index_preserves_structure(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
chain=self.chain,
check_hostname=False,
verify_mode=CERT_NONE,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, con=con)
headers = ["col1", "col2", "col3"]
fields = ["a", "b", "c"]
expected_headers = '[{"col1":{"type":"str","key":true}},{"col2":{"type":"str"}},{"col3":{"type":"str"}}]'
with mock.patch.object(
lookup, "send_control", wraps=lookup.send_control
) as lookup_spy:
lookup.send_headers(
headers=headers, key_index=0, event="START", action="FULL"
)
lookup_spy.assert_called_with(
action="FULL", event="START", headers=expected_headers
)
lookup.send_data_line(key_index=0, fields=fields)
lookup.send_headers(
headers=headers, key_index=0, event="END", action="FULL"
)
lookup_spy.assert_called_with(
action="FULL", event="END", headers=expected_headers
)
con.socket.shutdown(0)
def test_send_headers_with_type_of_key(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
chain=self.chain,
check_hostname=False,
verify_mode=CERT_NONE,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, con=con)
headers = ["col1", "col2", "col3"]
expected_headers = '[{"col1":{"type":"int4","key":true}},{"col2":{"type":"str"}},{"col3":{"type":"str"}}]'
with mock.patch.object(
lookup, "send_control", wraps=lookup.send_control
) as lookup_spy:
lookup.send_headers(
headers=headers,
key_index=0,
type_of_key="int4",
event="START",
action="FULL",
)
lookup_spy.assert_called_with(
action="FULL", event="START", headers=expected_headers
)
con.socket.shutdown(0)
# add new line deleting previous data
def test_ssl_lookup_override(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
chain=self.chain,
check_hostname=False,
verify_mode=CERT_NONE,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, historic_tag=None, con=con)
p_headers = Lookup.list_to_headers(["KEY", "HEX", "COLOR"], "KEY")
lookup.send_control("START", p_headers, "FULL")
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
lookup.send_data_line(key="11", fields=["11", "HEX12", "COLOR12"])
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
lookup.send_control("END", p_headers, "FULL")
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
con.socket.shutdown(0)
# delete a line from lookup
def test_ssl_lookup_delete_line(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
chain=self.chain,
check_hostname=False,
verify_mode=CERT_NONE,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, historic_tag=None, con=con)
p_headers = Lookup.list_to_headers(["KEY", "HEX", "COLOR"], "KEY")
lookup.send_control("START", p_headers, "INC")
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
lookup.send_data_line(
key="11", fields=["11", "HEX12", "COLOR12"], delete=True
)
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
lookup.send_control("END", p_headers, "INC")
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
con.socket.shutdown(0)
def test_ssl_lookup_simplify(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
chain=self.chain,
check_hostname=False,
verify_mode=CERT_NONE,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, historic_tag=None, con=con)
lookup.send_headers(
headers=["KEY", "HEX", "COLOR"], key="KEY", action="START"
)
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
lookup.send_data_line(key="11", fields=["11", "HEX12", "COLOR12"])
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
lookup.send_headers(
headers=["KEY", "HEX", "COLOR"], key="KEY", action="END"
)
if len(con.socket.recv(1000)) == 0:
raise Exception("Not msg sent!")
con.socket.shutdown(0)
def test_check_is_number(self):
self.assertTrue(Lookup.is_number("5"))
self.assertTrue(Lookup.is_number("5.0"))
def test_check_is_not_a_number(self):
self.assertFalse(
Lookup.is_number(
"5551,HNBId=001D4C-1213120051,"
"Fsn=1213120051,bSRName=,"
"manualPscUsed=false"
)
)
self.assertFalse(Lookup.is_number("5."))
self.assertFalse(Lookup.is_number("5,0"))
def test_process_fields_does_not_modify_arguments(self):
fields = ["a", "b", "c"]
processed_fields = Lookup.process_fields(fields, key_index=1)
self.assertEqual(fields, ["a", "b", "c"])
self.assertEqual(processed_fields, '"b","a","c"')
# Clean field
def test_clean_field_parametrized(self):
test_params = [
("No double quotes", False, '"No double quotes"'),
("No double quotes", True, '"No double quotes"'),
('Double quotes"', False, '"Double quotes""'),
('Double quotes"', True, '"Double quotes"""')
]
for field, escape_quotes, expected_result in test_params:
with self.subTest(
field=field,
escape_quotes=escape_quotes,
expected_result=expected_result
):
result = Lookup.clean_field(field, escape_quotes)
self.assertEqual(result, expected_result)
# Test to make sure escape_quotes is propagated correctly
def test_escape_quotes_in_send_data_line_key(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, historic_tag=None, con=con,
escape_quotes=True)
with mock.patch.object(Lookup, 'clean_field',
wraps=Lookup.clean_field) as clean_field:
lookup.send_data_line(key="11", fields=["11", 'Double quotes"'])
clean_field.assert_called_with('Double quotes"', True)
# Test to make sure escape_quotes is propagated correctly
def test_escape_quotes_in_send_data_line(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, historic_tag=None, con=con,
escape_quotes=True)
with mock.patch.object(Lookup, 'clean_field',
wraps=Lookup.clean_field) as clean_field:
lookup.send_data_line(fields=["11", 'Double quotes"'])
clean_field.assert_called_with('Double quotes"', True)
# Test to make sure escape_quotes is propagated correctly
def test_escape_quotes_in_send_csv(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, historic_tag=None, con=con,
escape_quotes=True)
with mock.patch.object(Lookup, 'clean_field',
wraps=Lookup.clean_field) as clean_field:
lookup.send_csv(path=self.lookup_file,
has_header=True,
key=self.lookup_key)
clean_field.assert_called_with('ffffff', True)
# Test to make sure escape_quotes is propagated correctly
def test_escape_quotes_in_send_csv_delete_index(self):
engine_config = SenderConfigSSL(
address=(self.server, self.port),
key=self.key,
cert=self.cert,
)
con = Sender(engine_config)
lookup = Lookup(name=self.lookup_name, historic_tag=None, con=con,
escape_quotes=True)
with mock.patch.object(Lookup, 'clean_field',
wraps=Lookup.clean_field) as clean_field:
lookup.send_csv(path=self.lookup_file,
has_header=True,
key=self.lookup_key, delete_field="Green")
clean_field.assert_called_with('ffffff', True)
if __name__ == "__main__":
unittest.main()
| 36.171091
| 114
| 0.569728
| 1,418
| 12,262
| 4.715092
| 0.11213
| 0.037392
| 0.025127
| 0.025127
| 0.771014
| 0.754861
| 0.740652
| 0.709243
| 0.708495
| 0.701765
| 0
| 0.019816
| 0.308596
| 12,262
| 338
| 115
| 36.278107
| 0.768813
| 0.026097
| 0
| 0.595745
| 0
| 0.007092
| 0.092852
| 0.018604
| 0
| 0
| 0
| 0
| 0.053191
| 1
| 0.056738
| false
| 0
| 0.017731
| 0
| 0.078014
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
39fee5475d1ba8dd4165cb59811377a68405b96c
| 874
|
py
|
Python
|
ezotv/utils/luna_source.py
|
marcsello/ezotv-frontend
|
405c440a567e8a0f1577f10d45385f3171398afe
|
[
"CC0-1.0"
] | null | null | null |
ezotv/utils/luna_source.py
|
marcsello/ezotv-frontend
|
405c440a567e8a0f1577f10d45385f3171398afe
|
[
"CC0-1.0"
] | 7
|
2020-01-23T00:50:39.000Z
|
2020-04-18T20:34:40.000Z
|
ezotv/utils/luna_source.py
|
marcsello/ezotv-frontend
|
405c440a567e8a0f1577f10d45385f3171398afe
|
[
"CC0-1.0"
] | null | null | null |
#!/usr/bin/env python3
from cache_tools import CachedBaseHttpSession
# TODO: Schema checking
class LunaSource:
def __init__(self, api_url: str, api_key: str):
self._session = CachedBaseHttpSession("LUNA", api_url)
self._session.headers.update({
"Authorization": api_key
})
def _get_json(self, path: str):
r = self._session.get(path)
r.raise_for_status()
return r.json()
@property
def latest_backup(self):
return self._get_json("backups/$latest")
@property
def backup_list(self):
return self._get_json("backups")
@property
def server_status(self):
return self._get_json("status")
@property
def players_data(self):
return self._get_json("playerdata")
@property
def map_status(self):
return self._get_json("maprender")
| 22.410256
| 62
| 0.640732
| 105
| 874
| 5.038095
| 0.428571
| 0.079395
| 0.132325
| 0.160681
| 0.247637
| 0.20794
| 0
| 0
| 0
| 0
| 0
| 0.001527
| 0.250572
| 874
| 38
| 63
| 23
| 0.806107
| 0.049199
| 0
| 0.192308
| 0
| 0
| 0.077201
| 0
| 0
| 0
| 0
| 0.026316
| 0
| 1
| 0.269231
| false
| 0
| 0.038462
| 0.192308
| 0.576923
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
2619da9e173692bb28d942c9b9d91d59ee58f847
| 10,181
|
py
|
Python
|
Utils/Logistic.py
|
Golden-Slumber/AirFL-2nd
|
d362de22f41058cd793b4b2d6e94ef0c22ffc988
|
[
"MIT"
] | 1
|
2022-03-31T02:56:33.000Z
|
2022-03-31T02:56:33.000Z
|
Utils/Logistic.py
|
Golden-Slumber/AirFL-2nd
|
d362de22f41058cd793b4b2d6e94ef0c22ffc988
|
[
"MIT"
] | null | null | null |
Utils/Logistic.py
|
Golden-Slumber/AirFL-2nd
|
d362de22f41058cd793b4b2d6e94ef0c22ffc988
|
[
"MIT"
] | null | null | null |
"""
This module is used to calculate the global optimal solution for the logistic regression loss function
"""
import numpy
from scipy import optimize
from Utils.conjugate_gradient_method import conjugate_solver
import sys
from tqdm import tqdm
# import numba
home_dir = '../'
sys.path.append(home_dir)
class LogisticSolver:
def __init__(self, x_mat=None, y_vec=None):
if (x_mat is not None) and (y_vec is not None):
self.n, self.d = x_mat.shape
self.x_mat = x_mat
self.y_vec = y_vec
def fit(self, x_mat, y_vec):
self.n, self.d = x_mat.shape
self.x_mat = x_mat
self.y_vec = y_vec
def obj_fun(self, w_vec, *args):
gamma = args[0]
z_vec = numpy.dot(self.x_mat, w_vec.reshape(self.d, 1))
z_vec = numpy.multiply(z_vec, self.y_vec)
l_vec = numpy.log(1 + numpy.exp(-z_vec))
return numpy.mean(l_vec) + (gamma / 2) * (numpy.linalg.norm(w_vec) ** 2)
def grad(self, w_vec, *args):
gamma = args[0]
z_vec = numpy.dot(self.x_mat, w_vec.reshape(self.d, 1))
z_vec = numpy.multiply(z_vec, self.y_vec)
exp_z_vec = numpy.exp(z_vec)
exp_z_vec = 1 + exp_z_vec
exp_z_vec = -1 / exp_z_vec
exp_z_vec = numpy.multiply(exp_z_vec, self.y_vec)
grad = numpy.dot(self.x_mat.T, exp_z_vec)
grad = grad / self.n + gamma * w_vec.reshape(self.d, 1)
return grad
def exact_newton(self, gamma, max_iter=50, tol=1e-15):
"""
NO ENOUGH MEMORY
"""
w_vec = numpy.zeros((self.d, 1))
eta_list = 1 / (2 ** numpy.arange(0, 10))
eye_mat = gamma * numpy.eye(self.d)
args = (gamma,)
for t in range(max_iter):
grad = self.grad(w_vec, args)
grad_norm = numpy.linalg.norm(grad)
print('Logistic Solver: Iter ' + str(t) + ', L2 norm of gradient = ' + str(grad_norm))
if grad_norm < tol:
print('The change of objective value is smaller than ' + str(tol))
break
z_vec = numpy.dot(self.x_mat, w_vec)
z_vec = numpy.multiply(z_vec, self.y_vec)
exp_z_vec1 = numpy.add(1, numpy.exp(z_vec))
exp_z_vec2 = numpy.add(1, numpy.exp(numpy.multiply(-1, z_vec)))
z_mat = 1 / numpy.dot(exp_z_vec1, exp_z_vec2.T)
xz_mat = numpy.dot(self.x_mat.T, z_mat)
xz_mat = numpy.dot(xz_mat, self.x_mat)
hessian = numpy.add(xz_mat, numpy.multiply(gamma, eye_mat))
if numpy.linalg.det(hessian) == 0:
hessian_inv = numpy.linalg.pinv(hessian)
else:
hessian_inv = numpy.linalg.inv(hessian)
p_vec = numpy.dot(hessian_inv, grad)
obj_val = self.obj_fun(w_vec, *args)
eta = 0
if grad_norm > tol:
pg = - 0.5 * numpy.sum(numpy.multiply(p_vec, grad))
for eta in eta_list:
obj_val_new = self.obj_fun(w_vec - eta * p_vec, *args)
if obj_val_new < obj_val + eta * pg:
break
else:
eta = 0.5
w_vec = w_vec - eta * p_vec
sig = numpy.linalg.svd(hessian, compute_uv=False)
cond_num = sig[0] / sig[-1]
print('L: ' + sig[0] + ', u: ' + sig[-1] + ', condition number: ' + cond_num)
return w_vec, cond_num
def conjugate_newton(self, gamma, max_iter=50, tol=1e-15):
w_vec = numpy.zeros((self.d, 1))
eta_list = 1 / (2 ** numpy.arange(0, 10))
eye_mat = gamma * numpy.eye(self.d)
args = (gamma,)
for t in range(max_iter):
grad = self.grad(w_vec, args)
grad_norm = numpy.linalg.norm(grad)
print('Logistic Solver: Iter ' + str(t) + ', L2 norm of gradient = ' + str(grad_norm))
if grad_norm < tol:
print('The change of objective value is smaller than ' + str(tol))
break
z_vec = numpy.dot(self.x_mat, w_vec)
z_vec = numpy.multiply(z_vec, self.y_vec)
exp_z_vec = numpy.add(1, numpy.exp(z_vec))
exp_z_vec = numpy.sqrt(numpy.exp(z_vec)) / exp_z_vec
a_mat = numpy.multiply(self.x_mat, exp_z_vec)
p_vec = conjugate_solver(a_mat / numpy.sqrt(self.n), grad, gamma, tol=tol, max_iter=100)
eta = 0
obj_val = self.obj_fun(w_vec, *args)
if grad_norm > tol:
pg = - 0.5 * numpy.sum(numpy.multiply(p_vec, grad))
for eta in eta_list:
obj_val_new = self.obj_fun(w_vec - eta * p_vec, *args)
if obj_val_new < obj_val + eta * pg:
break
else:
eta = 0.5
w_vec = w_vec - eta * p_vec
hessian = numpy.dot(a_mat.T, a_mat) / self.n + eye_mat
sig = numpy.linalg.svd(hessian, compute_uv=False)
cond_num = sig[0] / sig[-1]
print('L: ' + sig[0] + ', u: ' + sig[-1] + ', condition number: ' + cond_num)
return w_vec, cond_num
def conjugate_newton_simplified(self, gamma, max_iter=50, tol=1e-15):
"""
reduce computation complexity
"""
w_vec = numpy.zeros((self.d, 1))
eta_list = 1 / (2 ** numpy.arange(0, 10))
eye_mat = gamma * numpy.eye(self.d)
args = (gamma,)
for t in range(max_iter):
z_vec = numpy.dot(self.x_mat, w_vec)
z_vec = numpy.multiply(z_vec, self.y_vec)
exp_z_vec = numpy.exp(z_vec)
loss = numpy.log(1 + 1 / exp_z_vec)
obj_val = numpy.mean(loss) + (numpy.linalg.norm(w_vec) ** 2) * gamma / 2
vec_for_grad = numpy.multiply(-1 / (1 + exp_z_vec), self.y_vec)
grad = numpy.dot(self.x_mat.T, vec_for_grad) / self.n + gamma * w_vec
grad_norm = numpy.linalg.norm(grad)
print('Logistic Solver: Iter ' + str(t) + ', L2 norm of gradient = ' + str(grad_norm))
if grad_norm < tol:
print('The change of objective value is smaller than ' + str(tol))
break
vec_for_hessian = numpy.sqrt(exp_z_vec) / (1 + exp_z_vec)
a_mat = numpy.multiply(self.x_mat, vec_for_hessian)
p_vec = conjugate_solver(a_mat / numpy.sqrt(self.n), grad, gamma, tol=tol, max_iter=100)
eta = 0
if grad_norm > tol:
pg = - 0.5 * numpy.sum(numpy.multiply(p_vec, grad))
for eta in eta_list:
obj_val_new = self.obj_fun(numpy.subtract(w_vec, eta * p_vec), *args)
if obj_val_new < obj_val + eta * pg:
break
else:
eta = 0.5
w_vec = numpy.subtract(w_vec, eta * p_vec)
hessian = numpy.dot(a_mat.T, a_mat) / self.n + eye_mat
sig = numpy.linalg.svd(hessian, compute_uv=False)
cond_num = sig[0] / sig[-1]
print('L: ' + str(sig[0]) + ', u: ' + str(sig[-1]) + ', condition number: ' + str(cond_num))
return w_vec, cond_num
def centralized_conjugate_newton_simplified(self, gamma, max_iter=50, tol=1e-15):
w_vec = numpy.zeros((self.d, 1))
eta_list = 1 / (2 ** numpy.arange(0, 10))
eye_mat = gamma * numpy.eye(self.d)
args = (gamma,)
w_vec_list = list()
err_list = list()
acc_list = list()
w_vec_list.append(w_vec)
err_list.append(self.obj_fun(w_vec, *args))
acc_list.append(self.accuracy(w_vec))
for t in tqdm(range(max_iter)):
z_vec = numpy.dot(self.x_mat, w_vec)
z_vec = numpy.multiply(z_vec, self.y_vec)
exp_z_vec = numpy.exp(z_vec)
loss = numpy.log(1 + 1 / exp_z_vec)
obj_val = numpy.mean(loss) + (numpy.linalg.norm(w_vec) ** 2) * gamma / 2
vec_for_grad = numpy.multiply(-1 / (1 + exp_z_vec), self.y_vec)
grad = numpy.dot(self.x_mat.T, vec_for_grad) / self.n + gamma * w_vec
grad_norm = numpy.linalg.norm(grad)
print('Logistic Solver: Iter ' + str(t) + ', L2 norm of gradient = ' + str(grad_norm))
# if grad_norm < tol:
# print('The change of objective value is smaller than ' + str(tol))
# break
vec_for_hessian = numpy.sqrt(exp_z_vec) / (1 + exp_z_vec)
a_mat = numpy.multiply(self.x_mat, vec_for_hessian)
p_vec = conjugate_solver(a_mat / numpy.sqrt(self.n), grad, gamma, tol=tol, max_iter=100)
eta = 0
if grad_norm > tol:
pg = - 0.5 * numpy.sum(numpy.multiply(p_vec, grad))
for eta in eta_list:
obj_val_new = self.obj_fun(numpy.subtract(w_vec, eta * p_vec), *args)
if obj_val_new < obj_val + eta * pg:
# err_list.append(obj_val_new)
break
else:
eta = 0.5
# err_list.append(self.obj_fun(numpy.subtract(w_vec, eta * p_vec), *args))
w_vec = numpy.subtract(w_vec, eta * p_vec)
w_vec_list.append(w_vec)
err_list.append(self.obj_fun(w_vec, *args))
acc_list.append(self.accuracy(w_vec))
# print(err_list)
opt_obj = self.obj_fun(w_vec, *args)
for t in range(max_iter+1):
err_list[t] -= opt_obj
print(err_list)
print(acc_list)
# for t in tqdm(range(max_iter)):
# err = self.obj_fun(w_vec_list[t], *args) - opt_obj
# acc = self.accuracy(w_vec_list[t])
# err_list.append(err)
# acc_list.append(acc)
return err_list, acc_list
def set_test_data(self, x_test, y_test):
self.x_test = x_test
self.y_test = y_test
def accuracy(self, w):
num = self.x_test.shape[0]
count = 0
idx = 0
for row in self.x_test:
if numpy.sign(numpy.dot(row, w.reshape(self.d, 1)))[0] == self.y_test[idx, 0]:
count += 1
idx += 1
return count / num
| 38.564394
| 102
| 0.54366
| 1,533
| 10,181
| 3.369863
| 0.093933
| 0.038715
| 0.039295
| 0.025165
| 0.774681
| 0.763453
| 0.740805
| 0.729772
| 0.715254
| 0.692606
| 0
| 0.019504
| 0.335232
| 10,181
| 263
| 103
| 38.711027
| 0.743794
| 0.054808
| 0
| 0.693467
| 0
| 0
| 0.042791
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.050251
| false
| 0
| 0.025126
| 0
| 0.115578
| 0.060302
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
263dad54265c25f260e91028ab13d19124c2d3dd
| 195
|
py
|
Python
|
test_bot.py
|
AlexHLinS/EndeavourBot
|
7b42789c5aa0084ab920837a02a8fb918ded3dea
|
[
"Apache-2.0"
] | 1
|
2021-09-23T08:36:34.000Z
|
2021-09-23T08:36:34.000Z
|
test_bot.py
|
AlexHLinS/EndeavourBot
|
7b42789c5aa0084ab920837a02a8fb918ded3dea
|
[
"Apache-2.0"
] | null | null | null |
test_bot.py
|
AlexHLinS/EndeavourBot
|
7b42789c5aa0084ab920837a02a8fb918ded3dea
|
[
"Apache-2.0"
] | null | null | null |
from bot import botToken
import unittest
def test_bot():
assert str(botToken(token_file='test_bot.token').getToken(
)) == '1111111111:TeStTaPiToKeN', 'Loading token from file is wrong'
| 24.375
| 72
| 0.738462
| 26
| 195
| 5.423077
| 0.653846
| 0.099291
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.060241
| 0.148718
| 195
| 7
| 73
| 27.857143
| 0.789157
| 0
| 0
| 0
| 0
| 0
| 0.358974
| 0.123077
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| true
| 0
| 0.4
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2687ab5977e2096daf830040e8b20502ebecf47f
| 86
|
py
|
Python
|
src/blog_vi/core/translations/providers/__init__.py
|
LikaloLLC/BlogVi
|
49a51d7b4ce1686e0784b064914365ea63bb1e38
|
[
"BSD-3-Clause"
] | null | null | null |
src/blog_vi/core/translations/providers/__init__.py
|
LikaloLLC/BlogVi
|
49a51d7b4ce1686e0784b064914365ea63bb1e38
|
[
"BSD-3-Clause"
] | 2
|
2021-07-02T14:31:09.000Z
|
2021-07-19T18:07:28.000Z
|
src/blog_vi/core/translations/providers/__init__.py
|
NikBelyaev/BlogVi
|
9244e1815e34472017203afeaaecb30fa5981d43
|
[
"BSD-3-Clause"
] | 2
|
2021-03-30T16:51:17.000Z
|
2021-05-03T22:22:41.000Z
|
from .deepl import DeeplTranslateProvider
from .google import GoogleTranslateProvider
| 28.666667
| 43
| 0.883721
| 8
| 86
| 9.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.093023
| 86
| 2
| 44
| 43
| 0.974359
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cd02f4328cae3c134fc6e082641f16a4db4cdfb8
| 231
|
py
|
Python
|
tests/matchers/test_match_any.py
|
emou/pdbuddy
|
5708c44803e46d06aca02a0402ebaec0c5ae4634
|
[
"MIT"
] | null | null | null |
tests/matchers/test_match_any.py
|
emou/pdbuddy
|
5708c44803e46d06aca02a0402ebaec0c5ae4634
|
[
"MIT"
] | null | null | null |
tests/matchers/test_match_any.py
|
emou/pdbuddy
|
5708c44803e46d06aca02a0402ebaec0c5ae4634
|
[
"MIT"
] | null | null | null |
from __future__ import absolute_import
from pdbuddy.matchers.match_any import AnyMatcher
from pdbuddy.trace_context import TraceContext
def test_matches_any():
assert AnyMatcher()(TraceContext(object(), object(), object()))
| 25.666667
| 67
| 0.805195
| 28
| 231
| 6.321429
| 0.607143
| 0.124294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.108225
| 231
| 8
| 68
| 28.875
| 0.859223
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 1
| 0.2
| true
| 0
| 0.6
| 0
| 0.8
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cd365a584b6e7c79496310b1f0fbc840397e1733
| 71
|
py
|
Python
|
Main.py
|
durgarao641/TulsiClientLinux
|
7ce3d835841ccacc156e98376882c2c52b2c5f8c
|
[
"Apache-2.0"
] | null | null | null |
Main.py
|
durgarao641/TulsiClientLinux
|
7ce3d835841ccacc156e98376882c2c52b2c5f8c
|
[
"Apache-2.0"
] | null | null | null |
Main.py
|
durgarao641/TulsiClientLinux
|
7ce3d835841ccacc156e98376882c2c52b2c5f8c
|
[
"Apache-2.0"
] | null | null | null |
import os
os.system("nohup python src/Tulsi.py >> nohup.out 2>&1 &")
| 17.75
| 59
| 0.661972
| 13
| 71
| 3.615385
| 0.846154
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.15493
| 71
| 3
| 60
| 23.666667
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.647887
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
cd5be0db1fa3273c99de563af1dd986b4ce17d5d
| 20
|
py
|
Python
|
hstore_flattenfields/forms/__init__.py
|
modohash/django-hstore-flattenfields
|
09626a638b9ef85d28fa5bfef1b040f9926bb95b
|
[
"BSD-3-Clause"
] | 5
|
2015-09-18T16:35:56.000Z
|
2020-12-24T11:46:17.000Z
|
hstore_flattenfields/forms/__init__.py
|
modohash/django-hstore-flattenfields
|
09626a638b9ef85d28fa5bfef1b040f9926bb95b
|
[
"BSD-3-Clause"
] | 9
|
2020-02-11T22:01:06.000Z
|
2021-06-10T17:46:04.000Z
|
hstore_flattenfields/forms/__init__.py
|
modohash/django-hstore-flattenfields
|
09626a638b9ef85d28fa5bfef1b040f9926bb95b
|
[
"BSD-3-Clause"
] | 2
|
2015-10-20T10:21:30.000Z
|
2016-03-23T09:54:54.000Z
|
from forms import *
| 10
| 19
| 0.75
| 3
| 20
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.9375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
cd664ecd8de549a5fd3d3d80d32c2101bdea9c17
| 189
|
py
|
Python
|
views/events.py
|
Usamaiqbal789/Flask
|
b0a3c0be63fb88cfe020e116b37d73261c7bcab1
|
[
"MIT"
] | null | null | null |
views/events.py
|
Usamaiqbal789/Flask
|
b0a3c0be63fb88cfe020e116b37d73261c7bcab1
|
[
"MIT"
] | null | null | null |
views/events.py
|
Usamaiqbal789/Flask
|
b0a3c0be63fb88cfe020e116b37d73261c7bcab1
|
[
"MIT"
] | 1
|
2021-10-14T19:14:09.000Z
|
2021-10-14T19:14:09.000Z
|
from flask import Blueprint, render_template
events = Blueprint('events', __name__)
@events.route('/events', methods=['GET'])
def events_page():
return render_template("events.html")
| 23.625
| 44
| 0.740741
| 23
| 189
| 5.782609
| 0.652174
| 0.210526
| 0.300752
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 189
| 8
| 45
| 23.625
| 0.791667
| 0
| 0
| 0
| 0
| 0
| 0.142105
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0.2
| 0.6
| 0.4
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
cd75c6fc123823d3d4ae9b06568c1922615dd9b9
| 96
|
py
|
Python
|
app/models/__init__.py
|
cyber-chuvash/todolist-API
|
44a1accdc0e19207283b724a645e87c22c0db882
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
cyber-chuvash/todolist-API
|
44a1accdc0e19207283b724a645e87c22c0db882
|
[
"MIT"
] | null | null | null |
app/models/__init__.py
|
cyber-chuvash/todolist-API
|
44a1accdc0e19207283b724a645e87c22c0db882
|
[
"MIT"
] | null | null | null |
from .base import Base
from .user import User
from .todolist import List
from .card import Card
| 19.2
| 26
| 0.791667
| 16
| 96
| 4.75
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 96
| 4
| 27
| 24
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
cd87f26b19d8b9e330da62c15f4ea0636bcf46b1
| 1,483
|
py
|
Python
|
Core/core/views.py
|
Gelatito/4Fun-Games
|
c40bc2f0d9007a2fda0ed9ca5ea5f80adf7100cc
|
[
"MIT"
] | 1
|
2021-09-30T00:44:31.000Z
|
2021-09-30T00:44:31.000Z
|
Core/core/views.py
|
Gelatito/4Fun-Games
|
c40bc2f0d9007a2fda0ed9ca5ea5f80adf7100cc
|
[
"MIT"
] | null | null | null |
Core/core/views.py
|
Gelatito/4Fun-Games
|
c40bc2f0d9007a2fda0ed9ca5ea5f80adf7100cc
|
[
"MIT"
] | null | null | null |
from django.contrib.auth.decorators import login_required
from django.shortcuts import render
from django.shortcuts import render
from .models import Pag1juegos,Aventura,Lucha,Rol,MundoAbierto
from django.http import HttpResponse
# Create your views here.
def home(request):
Juegos = Pag1juegos.objects.all()
data = {
'juegos':Juegos
}
return render(request,'core/home.html',data)
def Adventures (request):
JuegosAD = Aventura.objects.all()
data = {
'juegosad':JuegosAD
}
return render(request,'core/Adventures.html',data)
def Luchas (request):
JuegosLu = Lucha.objects.all()
data = {
'juegoslu':JuegosLu
}
return render(request,'core/Luchas.html',data)
def rol (request):
JuegosRol = Rol.objects.all()
data = {
'juegosro':JuegosRol
}
return render(request,'core/rol.html',data)
def openWorld (request):
JuegosMA = MundoAbierto.objects.all()
data = {
'juegosmu':JuegosMA
}
return render(request,'core/openWorld.html',data)
def principal(request):
return render(request)
def Lista(request):
return render(request)
def Modificar(request):
return render(request)
def Borrar(request):
return render(request)
def CAT(request):
return render(request)
def ModificarCAT(request):
return render(request)
def ListaCAT(request):
return render(request)
| 19.513158
| 63
| 0.651382
| 162
| 1,483
| 5.95679
| 0.296296
| 0.149223
| 0.236269
| 0.188601
| 0.248705
| 0.068394
| 0
| 0
| 0
| 0
| 0
| 0.001779
| 0.242077
| 1,483
| 75
| 64
| 19.773333
| 0.856762
| 0.015509
| 0
| 0.285714
| 0
| 0
| 0.082361
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.244898
| false
| 0
| 0.102041
| 0.142857
| 0.591837
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
cd9412792c27c2923536d166280b326a7d0e2e78
| 114
|
py
|
Python
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/python_Multinomial_Distribution.txt.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 5
|
2021-06-02T23:44:25.000Z
|
2021-12-27T16:21:57.000Z
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/python_Multinomial_Distribution.txt.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 22
|
2021-05-31T01:33:25.000Z
|
2021-10-18T18:32:39.000Z
|
WEEKS/CD_Sata-Structures/_RESOURCES/python-prac/mini-scripts/python_Multinomial_Distribution.txt.py
|
webdevhub42/Lambda
|
b04b84fb5b82fe7c8b12680149e25ae0d27a0960
|
[
"MIT"
] | 3
|
2021-06-19T03:37:47.000Z
|
2021-08-31T00:49:51.000Z
|
from numpy import random
x = random.multinomial(n=6, pvals=[1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6, 1 / 6])
print(x)
| 19
| 77
| 0.561404
| 24
| 114
| 2.666667
| 0.458333
| 0.1875
| 0.234375
| 0.3125
| 0.1875
| 0.1875
| 0.1875
| 0.1875
| 0.1875
| 0.1875
| 0
| 0.151163
| 0.245614
| 114
| 5
| 78
| 22.8
| 0.593023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
26ed3e1675a119db13ed696fded3e86bd70a8662
| 109
|
py
|
Python
|
model/__init__.py
|
KazukiChiyo/localization
|
55df875d72519d6111e3dd37c7f19ef8cbf7bb9c
|
[
"MIT"
] | null | null | null |
model/__init__.py
|
KazukiChiyo/localization
|
55df875d72519d6111e3dd37c7f19ef8cbf7bb9c
|
[
"MIT"
] | null | null | null |
model/__init__.py
|
KazukiChiyo/localization
|
55df875d72519d6111e3dd37c7f19ef8cbf7bb9c
|
[
"MIT"
] | null | null | null |
from .base import Anchor, Localizer, BaggingRegressor
__all__ = ['Anchor', 'Localizer', 'BaggingRegressor']
| 27.25
| 53
| 0.761468
| 10
| 109
| 7.9
| 0.7
| 0.379747
| 0.78481
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110092
| 109
| 3
| 54
| 36.333333
| 0.814433
| 0
| 0
| 0
| 0
| 0
| 0.284404
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f81711303a76cdfe3561b2df1f04d3bfac24317c
| 190
|
py
|
Python
|
brainspace/vtk_interface/__init__.py
|
josemariamoreira/BrainSpace
|
d7e8e65c6463a81146e7fcfcca902feef04d329d
|
[
"BSD-3-Clause"
] | null | null | null |
brainspace/vtk_interface/__init__.py
|
josemariamoreira/BrainSpace
|
d7e8e65c6463a81146e7fcfcca902feef04d329d
|
[
"BSD-3-Clause"
] | null | null | null |
brainspace/vtk_interface/__init__.py
|
josemariamoreira/BrainSpace
|
d7e8e65c6463a81146e7fcfcca902feef04d329d
|
[
"BSD-3-Clause"
] | null | null | null |
from .wrappers import wrap_vtk
from .pipeline import serial_connect, to_data, get_output
__all__ = ['serial_connect',
'to_data',
'get_output',
'wrap_vtk']
| 21.111111
| 57
| 0.636842
| 23
| 190
| 4.73913
| 0.565217
| 0.12844
| 0.275229
| 0.348624
| 0.513761
| 0.513761
| 0
| 0
| 0
| 0
| 0
| 0
| 0.268421
| 190
| 8
| 58
| 23.75
| 0.784173
| 0
| 0
| 0
| 0
| 0
| 0.205263
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f87afdbfc94714407b46cdc0d73a41ba2ab3aa68
| 188
|
py
|
Python
|
tgdm/__init__.py
|
fankib/TGDM
|
baaacb4a2f267fbb8dedff2e001ebdb84366ca6f
|
[
"MIT"
] | null | null | null |
tgdm/__init__.py
|
fankib/TGDM
|
baaacb4a2f267fbb8dedff2e001ebdb84366ca6f
|
[
"MIT"
] | null | null | null |
tgdm/__init__.py
|
fankib/TGDM
|
baaacb4a2f267fbb8dedff2e001ebdb84366ca6f
|
[
"MIT"
] | null | null | null |
#from .tgdm_base import TGDMBase
from .tgdm import TGDM
from .tgdm_hd import TGDM_HD, TGDM_HDC
from .tgdm_t1t2 import TGDM_T1T2
from .pytorch_sgd import PYTORCH_SGD_STEP, PYTORCH_SGD_DEC
| 26.857143
| 58
| 0.835106
| 33
| 188
| 4.424242
| 0.363636
| 0.219178
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024242
| 0.12234
| 188
| 6
| 59
| 31.333333
| 0.860606
| 0.164894
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f892dbbd3882dc44e0867b89d5f99f50ba743ee0
| 157
|
py
|
Python
|
flowjs/signals.py
|
nelsonmonteiro/django-flowjs
|
7f2c88df12a9e8f19bb7a3fa21a628d07849d61e
|
[
"MIT"
] | 16
|
2015-01-02T16:41:17.000Z
|
2022-03-09T00:15:56.000Z
|
flowjs/signals.py
|
nelsonmonteiro/django-flowjs
|
7f2c88df12a9e8f19bb7a3fa21a628d07849d61e
|
[
"MIT"
] | 2
|
2015-10-03T18:00:20.000Z
|
2016-08-02T07:07:21.000Z
|
flowjs/signals.py
|
nelsonmonteiro/django-flowjs
|
7f2c88df12a9e8f19bb7a3fa21a628d07849d61e
|
[
"MIT"
] | 18
|
2015-01-07T14:46:19.000Z
|
2018-07-22T22:56:05.000Z
|
import django.dispatch
file_is_ready = django.dispatch.Signal()
file_upload_failed = django.dispatch.Signal()
file_joining_failed = django.dispatch.Signal()
| 31.4
| 46
| 0.828025
| 21
| 157
| 5.904762
| 0.47619
| 0.451613
| 0.483871
| 0.387097
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070064
| 157
| 5
| 46
| 31.4
| 0.849315
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 0.25
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f8cde8faf8a82c62bf47ddf2faf44f2099f5a8a3
| 109
|
py
|
Python
|
python/testData/optimizeImports/commentsInsideParenthesesInCombinedFromImports.after.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2019-04-28T07:48:50.000Z
|
2020-12-11T14:18:08.000Z
|
python/testData/optimizeImports/commentsInsideParenthesesInCombinedFromImports.after.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 173
|
2018-07-05T13:59:39.000Z
|
2018-08-09T01:12:03.000Z
|
python/testData/optimizeImports/commentsInsideParenthesesInCombinedFromImports.after.py
|
truthiswill/intellij-community
|
fff88cfb0dc168eea18ecb745d3e5b93f57b0b95
|
[
"Apache-2.0"
] | 2
|
2020-03-15T08:57:37.000Z
|
2020-04-07T04:48:14.000Z
|
from datetime import timedelta as name, time as bbb, datetime as ccc # bcc; cbc; abc
print(name, bbb, ccc)
| 27.25
| 85
| 0.724771
| 19
| 109
| 4.157895
| 0.684211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.192661
| 109
| 3
| 86
| 36.333333
| 0.897727
| 0.119266
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
3e4d1a17dd83dc05c813e4182435d4b40d426f77
| 324
|
py
|
Python
|
psi/token/api.py
|
NCRAR/psiexperiment
|
c3f8580b2b155ce42ebb936019d862c4343b545c
|
[
"MIT"
] | 5
|
2016-05-26T13:46:00.000Z
|
2020-03-03T13:07:47.000Z
|
psi/token/api.py
|
NCRAR/psiexperiment
|
c3f8580b2b155ce42ebb936019d862c4343b545c
|
[
"MIT"
] | 2
|
2018-04-17T15:06:35.000Z
|
2019-03-25T18:13:10.000Z
|
psi/token/api.py
|
NCRAR/psiexperiment
|
c3f8580b2b155ce42ebb936019d862c4343b545c
|
[
"MIT"
] | 3
|
2020-04-17T15:03:36.000Z
|
2022-01-14T23:19:29.000Z
|
import enaml
with enaml.imports():
from .primitives import (
BandlimitedNoise, BandlimitedNoiseFactory, Chirp, ChirpFactory,
Cos2Envelope, Cos2EnvelopeFactory, Gate, GateFactory, SAMEnvelope,
SAMEnvelopeFactory, Silence, SilenceFactory, SquareWave,
SquareWaveFactory, Tone, ToneFactory)
| 36
| 74
| 0.743827
| 24
| 324
| 10.041667
| 0.916667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007605
| 0.188272
| 324
| 8
| 75
| 40.5
| 0.908745
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.428571
| 0
| 0.428571
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
3e74b801a73e5c8b6d26c87462fd3f312ccd0886
| 222
|
py
|
Python
|
tests/calculations/test_geochem.py
|
drew026/geo_calcs
|
e2d5aba7e5c7fe6cef81adb45978d3fea874868e
|
[
"MIT"
] | 1
|
2021-11-26T04:32:09.000Z
|
2021-11-26T04:32:09.000Z
|
tests/calculations/test_geochem.py
|
drew026/geo_calcs
|
e2d5aba7e5c7fe6cef81adb45978d3fea874868e
|
[
"MIT"
] | null | null | null |
tests/calculations/test_geochem.py
|
drew026/geo_calcs
|
e2d5aba7e5c7fe6cef81adb45978d3fea874868e
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
"""Tests for `geochem` subpackage."""
import pytest
from geo_calcs.calculations.geochem import *
def test_get_atomic_weight():
assert get_atomic_weight(["Si","O","O"]) == 60.0843
| 22.2
| 57
| 0.666667
| 30
| 222
| 4.733333
| 0.8
| 0.126761
| 0.211268
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032787
| 0.175676
| 222
| 10
| 58
| 22.2
| 0.743169
| 0.234234
| 0
| 0
| 0
| 0
| 0.025641
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| true
| 0
| 0.5
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
3e7c306972b2c6543cfa0f287d553ff77e5b6677
| 18
|
py
|
Python
|
GoodBye.py
|
Silvio622/pands-problems-2020
|
7a9c93b513c881eaf80f20ed4e6e7d2969883596
|
[
"MIT"
] | null | null | null |
GoodBye.py
|
Silvio622/pands-problems-2020
|
7a9c93b513c881eaf80f20ed4e6e7d2969883596
|
[
"MIT"
] | null | null | null |
GoodBye.py
|
Silvio622/pands-problems-2020
|
7a9c93b513c881eaf80f20ed4e6e7d2969883596
|
[
"MIT"
] | null | null | null |
print("Good Bye")
| 9
| 17
| 0.666667
| 3
| 18
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 18
| 1
| 18
| 18
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
3e92827fb0a41de11c30fed6a9450ccfd0de8690
| 62
|
py
|
Python
|
core/src/Peer/__init__.py
|
mkg20001/Fuzium
|
d424cd42a92272563fcba2290028c036cb7ce4a1
|
[
"MIT"
] | null | null | null |
core/src/Peer/__init__.py
|
mkg20001/Fuzium
|
d424cd42a92272563fcba2290028c036cb7ce4a1
|
[
"MIT"
] | null | null | null |
core/src/Peer/__init__.py
|
mkg20001/Fuzium
|
d424cd42a92272563fcba2290028c036cb7ce4a1
|
[
"MIT"
] | null | null | null |
from Peer import Peer
from PeerHashfield import PeerHashfield
| 20.666667
| 39
| 0.870968
| 8
| 62
| 6.75
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 62
| 2
| 40
| 31
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
e4a29add88a990b168f41a3863aa1becf96aed36
| 205
|
py
|
Python
|
lightnet/__init__.py
|
rversaw/lightnet
|
a98865abc83dbb9965b75ce0c5dedc69ca86cf1d
|
[
"MIT"
] | 345
|
2017-11-22T03:43:42.000Z
|
2021-05-18T16:07:12.000Z
|
lightnet/__init__.py
|
sachadee/lightnet
|
e7283d95367ed2288a26f2744ad015f6dc0f17bd
|
[
"MIT"
] | 14
|
2017-11-23T10:50:36.000Z
|
2018-09-24T09:50:00.000Z
|
lightnet/__init__.py
|
sachadee/lightnet
|
e7283d95367ed2288a26f2744ad015f6dc0f17bd
|
[
"MIT"
] | 51
|
2017-11-22T08:29:21.000Z
|
2022-01-29T22:42:55.000Z
|
# coding: utf8
from __future__ import unicode_literals
from .lightnet import Network, Image, BoxLabels
from .about import __version__
def load(name, path=None):
return Network.load(name, path=path)
| 20.5
| 47
| 0.77561
| 28
| 205
| 5.357143
| 0.678571
| 0.106667
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005714
| 0.146341
| 205
| 9
| 48
| 22.777778
| 0.851429
| 0.058537
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.6
| 0.2
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
e4ca1d3ef93b9f360a62d90b989edf6e704baefd
| 53
|
py
|
Python
|
examples/get-started/job2.py
|
hongkunyoo/jupyterflow
|
b4391529cf7c27adb97a272403c322e75adbffae
|
[
"BSD-3-Clause"
] | 66
|
2020-11-12T12:41:58.000Z
|
2022-03-21T15:46:56.000Z
|
examples/get-started/job2.py
|
hongkunyoo/jupyterflow
|
b4391529cf7c27adb97a272403c322e75adbffae
|
[
"BSD-3-Clause"
] | 4
|
2021-03-08T11:44:46.000Z
|
2022-03-29T13:24:26.000Z
|
examples/get-started/job2.py
|
hongkunyoo/jupyterflow
|
b4391529cf7c27adb97a272403c322e75adbffae
|
[
"BSD-3-Clause"
] | 9
|
2021-03-03T10:43:15.000Z
|
2022-03-31T01:13:16.000Z
|
# job2.py
import sys
print('world %s!' % sys.argv[1])
| 17.666667
| 32
| 0.641509
| 10
| 53
| 3.4
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.132075
| 53
| 3
| 32
| 17.666667
| 0.695652
| 0.132075
| 0
| 0
| 0
| 0
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
e4e4825fad09202de9fb4dd27c183168d78b45cf
| 88
|
py
|
Python
|
miso/modules/seq2seq_encoders/__init__.py
|
pitrack/arglinking
|
5f4677efe580e2d22915d66be26ceff331a3b2c2
|
[
"Apache-2.0"
] | 21
|
2020-07-09T14:01:26.000Z
|
2022-02-04T20:49:23.000Z
|
miso/modules/seq2seq_encoders/__init__.py
|
pitrack/arglinking
|
5f4677efe580e2d22915d66be26ceff331a3b2c2
|
[
"Apache-2.0"
] | 5
|
2020-07-30T15:08:01.000Z
|
2022-03-02T20:06:40.000Z
|
miso/modules/seq2seq_encoders/__init__.py
|
pitrack/arglinking
|
5f4677efe580e2d22915d66be26ceff331a3b2c2
|
[
"Apache-2.0"
] | 4
|
2020-08-14T13:49:45.000Z
|
2021-07-28T01:37:44.000Z
|
from miso.modules.seq2seq_encoders.pytorch_seq2seq_wrapper import PytorchSeq2SeqWrapper
| 44
| 87
| 0.920455
| 10
| 88
| 7.8
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.045455
| 88
| 1
| 88
| 88
| 0.892857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
90144e69db5b3f5e0c3e28d447852c18d0d98f6a
| 71
|
py
|
Python
|
zarya/manifolds/__init__.py
|
kefirski/zarya
|
db1f84cef1c4ffa28aa7adb5dea6cf9f2ebf2f84
|
[
"MIT"
] | null | null | null |
zarya/manifolds/__init__.py
|
kefirski/zarya
|
db1f84cef1c4ffa28aa7adb5dea6cf9f2ebf2f84
|
[
"MIT"
] | null | null | null |
zarya/manifolds/__init__.py
|
kefirski/zarya
|
db1f84cef1c4ffa28aa7adb5dea6cf9f2ebf2f84
|
[
"MIT"
] | null | null | null |
from .manifold import Manifold
from .poincare_ball import PoincareBall
| 23.666667
| 39
| 0.859155
| 9
| 71
| 6.666667
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 71
| 2
| 40
| 35.5
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
902d6ddb82220c418b31b613afe5a1ef5dd47065
| 790
|
py
|
Python
|
cowin_project/controllers/controllers.py
|
shangdinvxu/cowinaddons
|
4e9d69894cd80e5427ccc9bac6c37b8bd67cadd0
|
[
"MIT"
] | null | null | null |
cowin_project/controllers/controllers.py
|
shangdinvxu/cowinaddons
|
4e9d69894cd80e5427ccc9bac6c37b8bd67cadd0
|
[
"MIT"
] | null | null | null |
cowin_project/controllers/controllers.py
|
shangdinvxu/cowinaddons
|
4e9d69894cd80e5427ccc9bac6c37b8bd67cadd0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
# from odoo import http
# class CowinProject(http.Controller):
# @http.route('/cowin_project/cowin_project/', auth='public')
# def index(self, **kw):
# return "Hello, world"
# @http.route('/cowin_project/cowin_project/objects/', auth='public')
# def list(self, **kw):
# return http.request.render('cowin_project.listing', {
# 'root': '/cowin_project/cowin_project',
# 'objects': http.request.env['cowin_project.cowin_project'].search([]),
# })
# @http.route('/cowin_project/cowin_project/objects/<model("cowin_project.cowin_project"):obj>/', auth='public')
# def object(self, obj, **kw):
# return http.request.render('cowin_project.object', {
# 'object': obj
# })
| 39.5
| 116
| 0.607595
| 89
| 790
| 5.235955
| 0.370787
| 0.360515
| 0.218884
| 0.309013
| 0.467811
| 0.401288
| 0.330472
| 0
| 0
| 0
| 0
| 0.001592
| 0.205063
| 790
| 20
| 117
| 39.5
| 0.740446
| 0.897468
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
903033eefd0a3eb9d1acb75cce84f1cba870cef0
| 72
|
py
|
Python
|
landlab/components/fracture_grid/__init__.py
|
cctrunz/landlab
|
4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939
|
[
"MIT"
] | null | null | null |
landlab/components/fracture_grid/__init__.py
|
cctrunz/landlab
|
4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939
|
[
"MIT"
] | 1
|
2016-03-16T02:34:08.000Z
|
2016-04-20T19:31:30.000Z
|
landlab/components/fracture_grid/__init__.py
|
cctrunz/landlab
|
4e4ef12f4bae82bc5194f1dcc9af8ff1a7c20939
|
[
"MIT"
] | null | null | null |
from .fracture_grid import make_frac_grid
__all__ = ["make_frac_grid"]
| 18
| 41
| 0.805556
| 11
| 72
| 4.454545
| 0.636364
| 0.326531
| 0.489796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 72
| 3
| 42
| 24
| 0.765625
| 0
| 0
| 0
| 0
| 0
| 0.194444
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
903999c3121dd1b6662b42efde7b3bef500fa90a
| 305
|
py
|
Python
|
tests/common/test_generate_training_data.py
|
chanind/hanzi-font-deconstructor
|
ce41b2a5c0e66b8a83d6c734678446d1d32a18b7
|
[
"MIT"
] | null | null | null |
tests/common/test_generate_training_data.py
|
chanind/hanzi-font-deconstructor
|
ce41b2a5c0e66b8a83d6c734678446d1d32a18b7
|
[
"MIT"
] | null | null | null |
tests/common/test_generate_training_data.py
|
chanind/hanzi-font-deconstructor
|
ce41b2a5c0e66b8a83d6c734678446d1d32a18b7
|
[
"MIT"
] | null | null | null |
from hanzi_font_deconstructor.common.generate_training_data import (
get_training_input_and_mask_tensors,
)
def test_get_training_input_and_mask_tensors():
input, mask = get_training_input_and_mask_tensors(size_px=256)
assert input.shape == (1, 256, 256)
assert mask.shape == (256, 256)
| 30.5
| 68
| 0.780328
| 45
| 305
| 4.822222
| 0.488889
| 0.152074
| 0.221198
| 0.262673
| 0.414747
| 0.414747
| 0
| 0
| 0
| 0
| 0
| 0.060606
| 0.134426
| 305
| 9
| 69
| 33.888889
| 0.761364
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.285714
| 1
| 0.142857
| true
| 0
| 0.142857
| 0
| 0.285714
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
5fb7759dde94197c98d174bfe895d8c03a5a3321
| 38
|
py
|
Python
|
Testes/teste13.py
|
JefferMarcelino/Python
|
bf2ebf4f110b1fa1a6226cb98cd16ce18108eb03
|
[
"MIT"
] | 2
|
2021-01-27T19:30:02.000Z
|
2022-01-10T20:34:47.000Z
|
Testes/teste13.py
|
JefferMarcelino/Python
|
bf2ebf4f110b1fa1a6226cb98cd16ce18108eb03
|
[
"MIT"
] | null | null | null |
Testes/teste13.py
|
JefferMarcelino/Python
|
bf2ebf4f110b1fa1a6226cb98cd16ce18108eb03
|
[
"MIT"
] | null | null | null |
import os
os.startfile("teste08.py")
| 9.5
| 26
| 0.736842
| 6
| 38
| 4.666667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0.105263
| 38
| 3
| 27
| 12.666667
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0.263158
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5fc65bf6e1519b4ac9b196a58594c5549b582a63
| 95
|
py
|
Python
|
Types/Enums/Segmentation_Mode.py
|
SBCV/PythonUtility
|
0062e1e60dc151776b963d13bc4c1763eb90d333
|
[
"MIT"
] | 2
|
2019-02-20T14:56:13.000Z
|
2020-05-19T12:31:53.000Z
|
Types/Enums/Segmentation_Mode.py
|
SBCV/PythonUtility
|
0062e1e60dc151776b963d13bc4c1763eb90d333
|
[
"MIT"
] | null | null | null |
Types/Enums/Segmentation_Mode.py
|
SBCV/PythonUtility
|
0062e1e60dc151776b963d13bc4c1763eb90d333
|
[
"MIT"
] | 1
|
2021-01-07T08:32:07.000Z
|
2021-01-07T08:32:07.000Z
|
class SegmentationMode:
category_mode = 'CATEGORY_MODE'
instance_mode = 'INSTANCE_MODE'
| 31.666667
| 35
| 0.768421
| 10
| 95
| 6.9
| 0.5
| 0.347826
| 0.463768
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.157895
| 95
| 3
| 36
| 31.666667
| 0.8625
| 0
| 0
| 0
| 0
| 0
| 0.270833
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
39bf4515845c4283f4dbf37ca9019348c332d65c
| 112
|
py
|
Python
|
wsgi.py
|
ml-workgroup/image-segmentation-study-manager
|
1f5b0bcf707ebbcc9d22cbd18761f1f7521f09ae
|
[
"BSD-2-Clause"
] | null | null | null |
wsgi.py
|
ml-workgroup/image-segmentation-study-manager
|
1f5b0bcf707ebbcc9d22cbd18761f1f7521f09ae
|
[
"BSD-2-Clause"
] | 11
|
2020-02-14T14:02:34.000Z
|
2020-02-24T09:29:41.000Z
|
wsgi.py
|
ml-workgroup/image-segmentation-study-manager
|
1f5b0bcf707ebbcc9d22cbd18761f1f7521f09ae
|
[
"BSD-2-Clause"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import os
from app import create_app
application = create_app()
| 14
| 26
| 0.6875
| 17
| 112
| 4.411765
| 0.764706
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.010638
| 0.160714
| 112
| 7
| 27
| 16
| 0.787234
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
39bfc7218cf71f619dcc4c6487dc7eecb9091b03
| 25
|
py
|
Python
|
gpio.py
|
icasper/rpi4iotest
|
98ec033807bfda0fab952bc17005144b0e771e47
|
[
"MIT"
] | null | null | null |
gpio.py
|
icasper/rpi4iotest
|
98ec033807bfda0fab952bc17005144b0e771e47
|
[
"MIT"
] | null | null | null |
gpio.py
|
icasper/rpi4iotest
|
98ec033807bfda0fab952bc17005144b0e771e47
|
[
"MIT"
] | null | null | null |
from gpiozero import LED
| 12.5
| 24
| 0.84
| 4
| 25
| 5.25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 25
| 1
| 25
| 25
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
39ce94d1fd7f068dea0f364bada7e934d4273751
| 1,574
|
py
|
Python
|
app.py
|
rmudingay/backup
|
26a19644b9f09268f2b2c1b1df2e17d2d8e43f83
|
[
"BSD-2-Clause"
] | 1
|
2019-07-04T03:50:22.000Z
|
2019-07-04T03:50:22.000Z
|
app.py
|
rmudingay/backup
|
26a19644b9f09268f2b2c1b1df2e17d2d8e43f83
|
[
"BSD-2-Clause"
] | null | null | null |
app.py
|
rmudingay/backup
|
26a19644b9f09268f2b2c1b1df2e17d2d8e43f83
|
[
"BSD-2-Clause"
] | null | null | null |
from flask import Flask, request, render_template, session, url_for
#from flask_simpleldap import LDAP
app = Flask(__name__)
@app.route('/')
def index():
login = request.form.get('username')
return render_template('home.html', login=login)
@app.route('/login')
def login():
return render_template('login.html')
@app.route('/jobs')
def jobs():
return render_template('jobs.html')
@app.route('/addjob')
def addjobs():
return render_template('forms/add_jobs.html')
@app.route('/addlocation')
def addlocation():
return render_template('forms/add_location.html')
@app.route('/policy')
def policy():
return render_template('policy.html')
@app.route('/addpolicy')
def addpolicy():
return render_template('forms/add_policy.html')
@app.route('/scripts')
def scripts():
return render_template('scripts.html')
@app.route('/addscript')
def addscript():
return render_template('forms/add_script.html')
@app.route('/events')
def backup():
return render_template('events.html')
@app.route('/settings', methods = ['POST', 'GET'])
def config():
login = request.form.get('username')
return render_template('settings.html', login=login)
@app.route('/sshkeys')
def sshkey():
return render_template('sshkeys.html')
@app.route('/addkey')
def addkey():
return render_template('forms/add_key.html')
@app.route('/accounts')
def account():
return render_template('accounts.html')
@app.route('/adduser')
def adduser():
return render_template('forms/add_user.html')
if __name__ == '__main__':
app.run(debug=True)
| 22.485714
| 67
| 0.700762
| 203
| 1,574
| 5.256158
| 0.26601
| 0.209934
| 0.281162
| 0.140581
| 0.286785
| 0.088097
| 0.088097
| 0.088097
| 0
| 0
| 0
| 0
| 0.124524
| 1,574
| 69
| 68
| 22.811594
| 0.774311
| 0.020966
| 0
| 0.039216
| 0
| 0
| 0.237662
| 0.042208
| 0
| 0
| 0
| 0
| 0
| 1
| 0.294118
| false
| 0
| 0.019608
| 0.254902
| 0.607843
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
8425d50da1c3a7f0dd5a8e7d48ca58a54d31cc3d
| 119
|
py
|
Python
|
torchpack/__init__.py
|
hellock/torchpack
|
8d7363ff683c8aec5af57e5d53518a22c7e0a807
|
[
"MIT"
] | 25
|
2017-12-16T09:53:14.000Z
|
2021-11-26T14:19:38.000Z
|
torchpack/__init__.py
|
nd1511/torchpack
|
8d7363ff683c8aec5af57e5d53518a22c7e0a807
|
[
"MIT"
] | null | null | null |
torchpack/__init__.py
|
nd1511/torchpack
|
8d7363ff683c8aec5af57e5d53518a22c7e0a807
|
[
"MIT"
] | 9
|
2018-01-17T14:08:05.000Z
|
2021-08-31T14:48:25.000Z
|
from .config import *
from .io import *
from .parallel import *
from .runner import *
from .version import __version__
| 19.833333
| 32
| 0.756303
| 16
| 119
| 5.375
| 0.4375
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168067
| 119
| 5
| 33
| 23.8
| 0.868687
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
ffbe4ec906f51d9589691a3999a5031f11a5769b
| 23,843
|
py
|
Python
|
classes/SymEntry.py
|
nanohedra/nanohedra
|
3921b7f5ce10e0e3393c3b675bb97ccbecb96663
|
[
"MIT"
] | 2
|
2020-12-07T00:38:32.000Z
|
2021-05-13T19:36:17.000Z
|
classes/SymEntry.py
|
nanohedra/nanohedra
|
3921b7f5ce10e0e3393c3b675bb97ccbecb96663
|
[
"MIT"
] | null | null | null |
classes/SymEntry.py
|
nanohedra/nanohedra
|
3921b7f5ce10e0e3393c3b675bb97ccbecb96663
|
[
"MIT"
] | 1
|
2021-05-13T19:36:18.000Z
|
2021-05-13T19:36:18.000Z
|
# Copyright 2020 Joshua Laniado and Todd O. Yeates.
__author__ = "Joshua Laniado and Todd O. Yeates"
__copyright__ = "Copyright 2020, Nanohedra"
__version__ = "1.0"
# SYMMETRY COMBINATION MATERIAL TABLE (T.O.Y and J.L, 2020)
sym_comb_dict = {
1: [1, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'C2', 1, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'D2', 'D2', 0, 'N/A', 4, 2],
2: [2, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'C3', 2, ['r:<0,0,1,c>'], 1, '<e,0.577350*e,0>', 'C6', 'p6', 2, '(2*e, 2*e), 120', 4, 6],
3: [3, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'C3', 2, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'D3', 'D3', 0, 'N/A', 4, 2],
4: [4, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 6, '<e,0,0>', 'C3', 2, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'D3', 'p312', 2, '(2*e, 2*e), 120', 5, 6],
5: [5, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'C3', 2, ['r:<0,0,1,c>', 't:<0,0,d>'], 4, '<0,0,0>', 'T', 'T', 0, 'N/A', 4, 3],
6: [6, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,e,0>', 'C3', 2, ['r:<0,0,1,c>', 't:<0,0,d>'], 4, '<0,0,0>', 'T', 'I213', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 5, 10],
7: [7, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<0,0,0>', 'C3', 2, ['r:<0,0,1,c>', 't:<0,0,d>'], 4, '<0,0,0>', 'O', 'O', 0, 'N/A', 4, 4],
8: [8, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<2*e,e,0>', 'C3', 2, ['r:<0,0,1,c>', 't:<0,0,d>'], 4, '<0,0,0>', 'O', 'P4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 5, 10],
9: [9, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'C3', 2, ['r:<0,0,1,c>', 't:<0,0,d>'], 7, '<0,0,0>', 'I', 'I', 0, 'N/A', 4, 5],
10: [10, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'C4', 3, ['r:<0,0,1,c>'], 1, '<0,0,0>', 'C4', 'p4', 2, '(2*e, 2*e), 90', 4, 4],
11: [11, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'D4', 'D4', 0, 'N/A', 4, 2],
12: [12, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 8, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<e,0,0>', 'D4', 'p4212', 2, '(2*e, 2*e), 90', 5, 4],
13: [13, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'O', 'O', 0, 'N/A', 4, 3],
14: [14, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<2*e,e,0>', 'C4', 3, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 5, 8],
15: [15, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'C5', 4, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'D5', 'D5', 0, 'N/A', 4, 2],
16: [16, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'C5', 4, ['r:<0,0,1,c>', 't:<0,0,d>'], 9, '<0,0,0>', 'I', 'I', 0, 'N/A', 4, 3],
17: [17, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'C6', 5, ['r:<0,0,1,c>'], 1, '<0,0,0>', 'C6', 'p6', 2, '(2*e, 2*e), 120', 4, 3],
18: [18, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'C6', 5, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'D6', 'D6', 0, 'N/A', 4, 2],
19: [19, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 6, '<e,0,0>', 'C6', 5, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 5, 4],
20: [20, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,f,0>', 'D2', 6, ['None'], 1, '<0,0,0>', 'D2', 'c222', 2, '(4*e, 4*f), 90', 4, 4],
21: [21, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 8, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 3, 4],
22: [22, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,e,f>', 'D2', 6, ['None'], 5, '<0,0,0>', 'D4', 'I4122', 3, '(4*e, 4*e, 8*f), (90, 90, 90)', 4, 6],
23: [23, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 10, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 3],
24: [24, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 10, '<0,0,e>', 'D2', 6, ['None'], 1, '<f,0,0>', 'D6', 'P6222', 3, '(2*f, 2*f, 6*e), (90, 90, 120)', 4, 6],
25: [25, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<0,0,0>', 'D2', 6, ['None'], 5, '<2*e,0,e>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 4],
26: [26, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<-2*e,3*e,0>', 'D2', 6, ['None'], 5, '<0,2*e,e>', 'O', 'I4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 3, 3],
27: [27, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 6, '<e,0,0>', 'D3', 7, ['None'], 11, '<0,0,0>', 'D3', 'p312', 2, '(2*e, 2*e), 120', 3, 3],
28: [28, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,e,f>', 'D3', 7, ['None'], 1, '<0,0,0>', 'D3', 'R32', 3, '(3.4641*e, 3.4641*e, 3*f), (90, 90, 120)', 4, 4],
29: [29, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
30: [30, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
31: [31, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,f>', 'D6', 'P6322', 3, '(2*e, 2*e, 4*f), (90, 90, 120)', 4, 4],
32: [32, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D3', 7, ['None'], 4, '<e,e,e>', 'O', 'F4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 3, 3],
33: [33, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,2*e,0>', 'D3', 7, ['None'], 4, '<e,e,e>', 'O', 'I4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 3, 2],
34: [34, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<0,0,0>', 'D3', 7, ['None'], 4, '<e,e,e>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 4],
35: [35, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<0,e,-2*e>', 'D3', 7, ['None'], 4, '<e,e,e>', 'O', 'I4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 3, 2],
36: [36, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<0,e,-2*e>', 'D3', 7, ['None'], 4, '<3*e,3*e,3*e>', 'O', 'P4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 3, 3],
37: [37, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 3, 2],
38: [38, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,e,0>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 3, 2],
39: [39, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 8, '<0,e,f>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'I422', 3, '(2*e, 2*e, 4*f), (90, 90, 90)', 4, 4],
40: [40, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<0,0,0>', 'D4', 8, ['None'], 1, '<0,0,e>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 3],
41: [41, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<2*e,e,0>', 'D4', 8, ['None'], 1, '<2*e,2*e,0>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 2],
42: [42, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
43: [43, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 6, '<e,0,0>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
44: [44, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 6, '<e,0,f>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'P622', 3, '(2*e, 2*e, 2*f), (90, 90, 120)', 4, 4],
45: [45, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'T', 'P23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
46: [46, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,e,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'T', 'F23', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 3],
47: [47, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<2*e,3*e,0>', 'T', 10, ['None'], 1, '<0,4*e,0>', 'O', 'F4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 3, 2],
48: [48, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
49: [49, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,e,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 2],
50: [50, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<e,0,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'F432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
51: [51, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<0,e,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
52: [52, 'C2', 1, ['r:<0,0,1,a>', 't:<0,0,b>'], 3, '<-e,e,e>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 2],
53: [53, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'C3', 2, ['r:<0,0,1,c>'], 1, '<e,0.57735*e,0>', 'C3', 'p3', 2, '(2*e, 2*e), 120', 4, 3],
54: [54, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'C3', 2, ['r:<0,0,1,c>', 't:<0,0,d>'], 12, '<0,0,0>', 'T', 'T', 0, 'N/A', 4, 2],
55: [55, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'C3', 2, ['r:<0,0,1,c>', 't:<0,0,d>'], 12, '<e,0,0>', 'T', 'P213', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 5, 5],
56: [56, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<0,0,0>', 'O', 'O', 0, 'N/A', 4, 2],
57: [57, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>', 't:<0,0,d>'], 1, '<e,0,0>', 'O', 'F432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 5, 6],
58: [58, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 7, '<0,0,0>', 'C5', 4, ['r:<0,0,1,c>', 't:<0,0,d>'], 9, '<0,0,0>', 'I', 'I', 0, 'N/A', 4, 2],
59: [59, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'C6', 5, ['r:<0,0,1,c>'], 1, '<0,0,0>', 'C6', 'p6', 2, '(2*e, 2*e), 120', 4, 2],
60: [60, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
61: [61, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'T', 'P23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 3],
62: [62, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'D2', 6, ['None'], 3, '<e,0,e>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 3],
63: [63, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'D2', 6, ['None'], 3, '<2*e,e,0>', 'O', 'I4132', 3, '(8*e,8*e, 8*e), (90, 90, 90)', 3, 2],
64: [64, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'D3', 7, ['None'], 11, '<0,0,0>', 'D3', 'p312', 2, '(2*e, 2*e), 120', 3, 2],
65: [65, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'D3', 7, ['None'], 1, '<0,0,0>', 'D3', 'p321', 2, '(2*e, 2*e), 120', 3, 2],
66: [66, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 12, '<4*e,0,0>', 'D3', 7, ['None'], 4, '<3*e,3*e,3*e>', 'O', 'P4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 3, 4],
67: [67, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<0,0,0>', 'D4', 8, ['None'], 1, '<0,0,e>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
68: [68, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0.57735*e,0>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
69: [69, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<e,0,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'T', 'F23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
70: [70, 'C3', 2, ['r:<0,0,1,a>', 't:<0,0,b>'], 4, '<e,0,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'F432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
71: [71, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>'], 1, '<e,e,0>', 'C4', 'p4', 2, '(2*e, 2*e), 90', 4, 2],
72: [72, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'C4', 3, ['r:<0,0,1,c>', 't:<0,0,d>'], 2, '<0,e,e>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 5, 4],
73: [73, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 3, 2],
74: [74, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,0,0>', 'D2', 6, ['None'], 5, '<0,0,0>', 'D4', 'p4212', 2, '(2*e, 2*e), 90', 3, 2],
75: [75, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'D2', 6, ['None'], 3, '<2*e,e,0>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 2],
76: [76, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D2', 6, ['None'], 3, '<e,0,e>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 3],
77: [77, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D3', 7, ['None'], 4, '<e,e,e>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 2],
78: [78, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,e,0>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 3, 2],
79: [79, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 2, '<0,0,0>', 'D4', 8, ['None'], 1, '<e,e,0>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
80: [80, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'T', 10, ['None'], 1, '<e,e,e>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 3, 2],
81: [81, 'C4', 3, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<e,e,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 3, 2],
82: [82, 'C6', 5, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 3, 2],
83: [83, 'C6', 5, ['r:<0,0,1,a>', 't:<0,0,b>'], 1, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 2, 2],
84: [84, 'D2', 6, ['None'], 1, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,f,0>', 'D2', 'p222', 2, '(2*e, 2*f), 90', 2, 2],
85: [85, 'D2', 6, ['None'], 1, '<0,0,0>', 'D2', 6, ['None'], 1, '<e,f,g>', 'D2', 'F222', 3, '(4*e, 4*f, 4*g), (90, 90, 90)', 3, 3],
86: [86, 'D2', 6, ['None'], 1, '<e,0,0>', 'D2', 6, ['None'], 5, '<0,0,f>', 'D4', 'P4222', 3, '(2*e, 2*e, 4*f), (90, 90, 90)', 2, 2],
87: [87, 'D2', 6, ['None'], 1, '<e,0,0>', 'D2', 6, ['None'], 13, '<0,0,-f>', 'D6', 'P6222', 3, '(2*e, 2*e, 6*f), (90, 90, 120)', 2, 2],
88: [88, 'D2', 6, ['None'], 3, '<0,e,2*e>', 'D2', 6, ['None'], 5, '<0,2*e,e>', 'O', 'P4232', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
89: [89, 'D2', 6, ['None'], 1, '<e,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 1, 1],
90: [90, 'D2', 6, ['None'], 1, '<e,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,f>', 'D6', 'P622', 3, '(2*e, 2*e, 2*f), (90, 90, 120)', 2, 2],
91: [91, 'D2', 6, ['None'], 1, '<0,0,2*e>', 'D3', 7, ['None'], 4, '<e,e,e>', 'D6', 'P4232', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
92: [92, 'D2', 6, ['None'], 3, '<2*e,e,0>', 'D3', 7, ['None'], 4, '<e,e,e>', 'O', 'I4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 1, 1],
93: [93, 'D2', 6, ['None'], 1, '<e,0,0>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 1, 1],
94: [94, 'D2', 6, ['None'], 1, '<e,0,f>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'P422', 3, '(2*e, 2*e, 2*f), (90, 90,90)', 2, 2],
95: [95, 'D2', 6, ['None'], 5, '<e,0,f>', 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 'I422', 3, '(2*e, 2*e, 4*f), (90, 90,90)', 2, 2],
96: [96, 'D2', 6, ['None'], 3, '<0,e,2*e>', 'D4', 8, ['None'], 1, '<0,0,2*e>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 1],
97: [97, 'D2', 6, ['None'], 1, '<e,0,0>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 1, 1],
98: [98, 'D2', 6, ['None'], 1, '<e,0,f>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'P622', 3, '(2*e, 2*e, 2*f), (90, 90, 120)', 2, 2],
99: [99, 'D2', 6, ['None'], 1, '<e,0,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'T', 'P23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 1],
100: [100, 'D2', 6, ['None'], 1, '<e,e,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'T', 'P23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 2],
101: [101, 'D2', 6, ['None'], 3, '<e,0,e>', 'T', 10, ['None'], 1, '<e,e,e>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 1],
102: [102, 'D2', 6, ['None'], 3, '<2*e,e,0>', 'T', 10, ['None'], 1, '<0,0,0>', 'O', 'P4232', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
103: [103, 'D2', 6, ['None'], 3, '<e,0,e>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 1],
104: [104, 'D2', 6, ['None'], 3, '<2*e,e,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
105: [105, 'D3', 7, ['None'], 11, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D3', 'p312', 2, '(2*e, 2*e), 120', 1, 1],
106: [106, 'D3', 7, ['None'], 11, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,f>', 'D3', 'P312', 3, '(2*e, 2*e, 2*f), (90, 90, 120)', 2, 2],
107: [107, 'D3', 7, ['None'], 1, '<0,0,0>', 'D3', 7, ['None'], 11, '<e,0.57735*e,f>', 'D6', 'P6322', 3, '(2*e, 2*e, 4*f), (90, 90, 120)', 2, 2],
108: [108, 'D3', 7, ['None'], 4, '<e,e,e>', 'D3', 7, ['None'], 12, '<e,3*e,e>', 'O', 'P4232', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
109: [109, 'D3', 7, ['None'], 4, '<3*e,3*e,3*e>', 'D3', 7, ['None'], 12, '<e,3*e,5*e>', 'O', 'P4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 1, 1],
110: [110, 'D3', 7, ['None'], 4, '<e,e,e>', 'D4', 8, ['None'], 1, '<0,0,2*e>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 2],
111: [111, 'D3', 7, ['None'], 11, '<e,0.57735*e,0>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'p622', 2, '(2*e, 2*e), 120', 1, 1],
112: [112, 'D3', 7, ['None'], 11, '<e,0.57735*e,f>', 'D6', 9, ['None'], 1, '<0,0,0>', 'D6', 'P622', 3, '(2*e, 2*e, 2*f), (90, 90, 120)', 2, 2],
113: [113, 'D3', 7, ['None'], 4, '<e,e,e>', 'T', 10, ['None'], 1, '<0,0,0>', 'O', 'F4132', 3, '(8*e, 8*e, 8*e), (90, 90, 90)', 1, 1],
114: [114, 'D3', 7, ['None'], 4, '<e,e,e>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'I432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 1],
115: [115, 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 8, ['None'], 1, '<e,e,0>', 'D4', 'p422', 2, '(2*e, 2*e), 90', 1, 1],
116: [116, 'D4', 8, ['None'], 1, '<0,0,0>', 'D4', 8, ['None'], 1, '<e,e,f>', 'D4', 'P422', 3, '(2*e, 2*e, 2*f), (90, 90,90)', 2, 2],
117: [117, 'D4', 8, ['None'], 1, '<0,0,e>', 'D4', 8, ['None'], 2, '<0,e,e>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 1],
118: [118, 'D4', 8, ['None'], 1, '<0,0,e>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 1],
119: [119, 'D4', 8, ['None'], 1, '<e,e,0>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 1],
120: [120, 'T', 10, ['None'], 1, '<0,0,0>', 'T', 10, ['None'], 1, '<e,e,e>', 'T', 'F23', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 1],
121: [121, 'T', 10, ['None'], 1, '<0,0,0>', 'T', 10, ['None'], 1, '<e,0,0>', 'T', 'F23', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 1],
122: [122, 'T', 10, ['None'], 1, '<e,e,e>', 'O', 11, ['None'], 1, '<0,0,0>', 'O', 'F432', 3, '(4*e, 4*e, 4*e), (90, 90, 90)', 1, 1],
123: [123, 'O', 11, ['None'], 1, '<0,0,0>', 'O', 11, ['None'], 1, '<e,e,e>', 'O', 'P432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 1],
124: [124, 'O', 11, ['None'], 1, '<0,0,0>', 'O', 11, ['None'], 1, '<e,0,0>', 'O', 'F432', 3, '(2*e, 2*e, 2*e), (90, 90, 90)', 1, 1]}
# ROTATION RANGE DEG
C2 = 180
C3 = 120
C4 = 90
C5 = 72
C6 = 60
RotRangeDict = {"C2": C2, "C3": C3, "C4": C4, "C5": C5, "C6": C6}
# ROTATION SETTING MATRICES
RotMat1 = [[1.0, 0.0, 0.0], [0.0, 1.0, 0.0], [0.0, 0.0, 1.0]]
RotMat2 = [[0.0, 0.0, 1.0], [0.0, 1.0, 0.0], [-1.0, 0.0, 0.0]]
RotMat3 = [[0.707107, 0.0, 0.707107], [0.0, 1.0, 0.0], [-0.707107, 0.0, 0.707107]]
RotMat4 = [[0.707107, 0.408248, 0.577350], [-0.707107, 0.408248, 0.577350], [0.0, -0.816497, 0.577350]]
RotMat5 = [[0.707107, 0.707107, 0.0], [-0.707107, 0.707107, 0.0], [0.0, 0.0, 1.0]]
RotMat6 = [[1.0, 0.0, 0.0], [0.0, 0.0, 1.0], [0.0, -1.0, 0.0]]
RotMat7 = [[1.0, 0.0, 0.0], [0.0, 0.934172, 0.356822], [0.0, -0.356822, 0.934172]]
RotMat8 = [[0.0, 0.707107, 0.707107], [0.0, -0.707107, 0.707107], [1.0, 0.0, 0.0]]
RotMat9 = [[0.850651, 0.0, 0.525732], [0.0, 1.0, 0.0], [-0.525732, 0.0, 0.850651]]
RotMat10 = [[0.0, 0.5, 0.866025], [0.0, -0.866025, 0.5], [1.0, 0.0, 0.0]]
RotMat11 = [[0.0, -1.0, 0.0], [1.0, 0.0, 0.0], [0.0, 0.0, 1.0]]
RotMat12 = [[0.707107, -0.408248, 0.577350], [0.707107, 0.408248, -0.577350], [0.0, 0.816497, 0.577350]]
RotMat13 = [[0.5, -0.866025, 0.0], [0.866025, 0.5, 0.0], [0.0, 0.0, 1.0]]
RotSetDict = {1: RotMat1,
2: RotMat2,
3: RotMat3,
4: RotMat4,
5: RotMat5,
6: RotMat6,
7: RotMat7,
8: RotMat8,
9: RotMat9,
10: RotMat10,
11: RotMat11,
12: RotMat12,
13: RotMat13}
class SymEntry:
def __init__(self, entry):
if type(entry) == int and entry in range(1, 125):
# GETTING ENTRY INFORMATION FROM sym_comb_dict
self.entry_number = entry
sym_comb_info = sym_comb_dict[self.entry_number]
# ASSIGNING CLASS VARIABLES
self.group1 = sym_comb_info[1]
self.group1_indx = sym_comb_info[2]
self.int_dof_group1 = sym_comb_info[3]
self.rot_set_group1 = sym_comb_info[4]
self.ref_frame_tx_dof_group1 = sym_comb_info[5]
self.group2 = sym_comb_info[6]
self.group2_indx = sym_comb_info[7]
self.int_dof_group2 = sym_comb_info[8]
self.rot_set_group2 = sym_comb_info[9]
self.ref_frame_tx_dof_group2 = sym_comb_info[10]
self.pt_grp = sym_comb_info[11]
self.result = sym_comb_info[12]
self.dim = sym_comb_info[13]
self.unit_cell = sym_comb_info[14]
self.tot_dof = sym_comb_info[15]
self.cycle_size = sym_comb_info[16]
else:
raise ValueError("\nINVALID SYMMETRY ENTRY. SUPPORTED VALUES ARE: 1 to 124\n")
def get_group1_sym(self):
return self.group1
def get_group2_sym(self):
return self.group2
def get_pt_grp_sym(self):
return self.pt_grp
def get_rot_range_deg_1(self):
if self.group1 in RotRangeDict:
return RotRangeDict[self.group1]
else:
return 0
def get_rot_range_deg_2(self):
if self.group2 in RotRangeDict:
return RotRangeDict[self.group2]
else:
return 0
def get_rot_set_mat_group1(self):
return RotSetDict[self.rot_set_group1]
def get_ref_frame_tx_dof_group1(self):
return self.ref_frame_tx_dof_group1
def get_rot_set_mat_group2(self):
return RotSetDict[self.rot_set_group2]
def get_ref_frame_tx_dof_group2(self):
return self.ref_frame_tx_dof_group2
def get_result_design_sym(self):
return self.result
def get_design_dim(self):
return self.dim
def get_uc_spec_string(self):
return self.unit_cell
def is_internal_tx1(self):
if 't:<0,0,b>' in self.int_dof_group1:
return True
else:
return False
def is_internal_tx2(self):
if 't:<0,0,d>' in self.int_dof_group2:
return True
else:
return False
def get_internal_tx1(self):
if 't:<0,0,b>' in self.int_dof_group1:
return 't:<0,0,b>'
else:
return None
def get_internal_tx2(self):
if 't:<0,0,d>' in self.int_dof_group2:
return 't:<0,0,d>'
else:
return None
def is_internal_rot1(self):
if 'r:<0,0,1,a>' in self.int_dof_group1:
return True
else:
return False
def is_internal_rot2(self):
if 'r:<0,0,1,c>' in self.int_dof_group2:
return True
else:
return False
def get_internal_rot1(self):
if 'r:<0,0,1,a>' in self.int_dof_group1:
return 'r:<0,0,1,a>'
else:
return None
def get_internal_rot2(self):
if 'r:<0,0,1,c>' in self.int_dof_group2:
return 'r:<0,0,1,c>'
else:
return None
def is_ref_frame_tx_dof1(self):
if self.ref_frame_tx_dof_group1 != '<0,0,0>':
return True
else:
return False
def is_ref_frame_tx_dof2(self):
if self.ref_frame_tx_dof_group2 != '<0,0,0>':
return True
else:
return False
| 78.17377
| 176
| 0.388332
| 5,306
| 23,843
| 1.710516
| 0.054467
| 0.1342
| 0.061481
| 0.051565
| 0.777545
| 0.740084
| 0.68918
| 0.6645
| 0.632878
| 0.62186
| 0
| 0.251465
| 0.205763
| 23,843
| 304
| 177
| 78.430921
| 0.227808
| 0.009353
| 0
| 0.148289
| 0
| 0.003802
| 0.373682
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087452
| false
| 0
| 0
| 0.038023
| 0.220532
| 0
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
fff328b31f46b00d87163c77e3f3150360ab5bb0
| 142
|
py
|
Python
|
py_tdlib/constructors/update_chat_pinned_message.py
|
Mr-TelegramBot/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 24
|
2018-10-05T13:04:30.000Z
|
2020-05-12T08:45:34.000Z
|
py_tdlib/constructors/update_chat_pinned_message.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 3
|
2019-06-26T07:20:20.000Z
|
2021-05-24T13:06:56.000Z
|
py_tdlib/constructors/update_chat_pinned_message.py
|
MrMahdi313/python-tdlib
|
2e2d21a742ebcd439971a32357f2d0abd0ce61eb
|
[
"MIT"
] | 5
|
2018-10-05T14:29:28.000Z
|
2020-08-11T15:04:10.000Z
|
from ..factory import Type
class updateChatPinnedMessage(Type):
chat_id = None # type: "int53"
pinned_message_id = None # type: "int53"
| 20.285714
| 42
| 0.725352
| 18
| 142
| 5.555556
| 0.666667
| 0.12
| 0.2
| 0.3
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033898
| 0.169014
| 142
| 6
| 43
| 23.666667
| 0.813559
| 0.190141
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.25
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
0830e7099955619ddc8169352e7452c17fbcbcc2
| 905
|
py
|
Python
|
src/func/_getfeatures.py
|
megemini/DataCastle2017
|
261134f760d8c1bbfc3e65e1362b7710e601947d
|
[
"MIT"
] | null | null | null |
src/func/_getfeatures.py
|
megemini/DataCastle2017
|
261134f760d8c1bbfc3e65e1362b7710e601947d
|
[
"MIT"
] | null | null | null |
src/func/_getfeatures.py
|
megemini/DataCastle2017
|
261134f760d8c1bbfc3e65e1362b7710e601947d
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Generate features from pandas dataframe, with columns statistics.
-------------------
Return dataframe, columns=[by, col]
"""
def get_count(df, by):
return df.groupby(by=by).count().iloc[:,0]
def get_sum(df, col, by):
return df.groupby(by=by).sum()[col]
def get_mean(df, col, by):
return df.groupby(by=by).mean()[col]
def get_std(df, col, by):
return df.groupby(by=by).std()[col]
def get_max(df, col, by):
return df.groupby(by=by).max()[col]
def get_min(df, col, by):
return df.groupby(by=by).min()[col]
def get_dum_sum(df, col, prefix, by, drop_first=False):
return pd.get_dummies(df[by + col], columns=col, prefix=prefix, drop_first=drop_first).groupby(by).sum()
def get_dum_has(df, col, prefix, by, drop_first=False):
return pd.get_dummies(df[by + col], columns=col, prefix=prefix, drop_first=drop_first).groupby(by).max()
| 21.547619
| 108
| 0.656354
| 152
| 905
| 3.789474
| 0.223684
| 0.083333
| 0.09375
| 0.177083
| 0.612847
| 0.612847
| 0.576389
| 0.576389
| 0.350694
| 0.350694
| 0
| 0.002587
| 0.145856
| 905
| 41
| 109
| 22.073171
| 0.742561
| 0.160221
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
08456e61183832940d20cc1f61e5f588b25adc65
| 196
|
py
|
Python
|
deep_table/nn/models/__init__.py
|
pfnet-research/deep-table
|
a19c0c3048484017d5f24806604c3b3470bcf550
|
[
"MIT"
] | 48
|
2021-09-30T08:14:26.000Z
|
2022-03-02T12:20:08.000Z
|
deep_table/nn/models/__init__.py
|
pfnet-research/deep-table
|
a19c0c3048484017d5f24806604c3b3470bcf550
|
[
"MIT"
] | 1
|
2021-11-08T11:41:49.000Z
|
2021-11-08T11:41:49.000Z
|
deep_table/nn/models/__init__.py
|
pfnet-research/deep-table
|
a19c0c3048484017d5f24806604c3b3470bcf550
|
[
"MIT"
] | 2
|
2021-12-31T03:43:48.000Z
|
2022-03-11T09:04:21.000Z
|
from .base import BaseModel
from .head import MLPHeadModel
from .pretraining import (
DenoisingPretrainModel,
SAINTPretrainModel,
TabTransformerPretrainModel,
VIMEPretrainModel,
)
| 21.777778
| 32
| 0.785714
| 15
| 196
| 10.266667
| 0.733333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.168367
| 196
| 8
| 33
| 24.5
| 0.944785
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.375
| 0
| 0.375
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0858819ffb549ecdcb828627e28763561569b564
| 232
|
py
|
Python
|
autograd/numpy/__init__.py
|
gautam1858/autograd
|
8d7acaf79e33139b4ebfedf7da0602a965b47c63
|
[
"MIT"
] | 6,119
|
2015-03-10T03:55:58.000Z
|
2022-03-31T11:54:19.000Z
|
autograd/numpy/__init__.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 523
|
2015-03-10T11:59:23.000Z
|
2022-03-05T15:31:59.000Z
|
autograd/numpy/__init__.py
|
EnjoyLifeFund/Debian_py36_packages
|
1985d4c73fabd5f08f54b922e73a9306e09c77a5
|
[
"BSD-3-Clause",
"BSD-2-Clause",
"MIT"
] | 949
|
2015-03-11T20:04:20.000Z
|
2022-03-31T12:13:11.000Z
|
from __future__ import absolute_import
from .numpy_wrapper import *
from . import numpy_boxes
from . import numpy_vspaces
from . import numpy_vjps
from . import numpy_jvps
from . import linalg
from . import fft
from . import random
| 23.2
| 38
| 0.806034
| 34
| 232
| 5.205882
| 0.382353
| 0.39548
| 0.338983
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155172
| 232
| 9
| 39
| 25.777778
| 0.903061
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
085bb244fe65d92e28f64b6d3019c139b3443f2c
| 75
|
py
|
Python
|
src/dice_lib/fs/_davix.py
|
uobdic/dice-lib
|
8b9c1c90542270aec96a00b2c605b63fd60eaa0f
|
[
"BSD-3-Clause"
] | null | null | null |
src/dice_lib/fs/_davix.py
|
uobdic/dice-lib
|
8b9c1c90542270aec96a00b2c605b63fd60eaa0f
|
[
"BSD-3-Clause"
] | 1
|
2022-03-25T13:47:46.000Z
|
2022-03-25T13:47:46.000Z
|
src/dice_lib/fs/_davix.py
|
uobdic/dice-lib
|
8b9c1c90542270aec96a00b2c605b63fd60eaa0f
|
[
"BSD-3-Clause"
] | null | null | null |
from ._base import FileSystem
class DavixFileSystem(FileSystem):
...
| 12.5
| 34
| 0.733333
| 7
| 75
| 7.714286
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173333
| 75
| 5
| 35
| 15
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f2540c2045ee715942af354dc3b696809ae92561
| 286
|
py
|
Python
|
sources/t03/t03ej23.py
|
workready/pythonbasic
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
[
"MIT"
] | null | null | null |
sources/t03/t03ej23.py
|
workready/pythonbasic
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
[
"MIT"
] | null | null | null |
sources/t03/t03ej23.py
|
workready/pythonbasic
|
59bd82caf99244f5e711124e1f6f4dec8de22141
|
[
"MIT"
] | null | null | null |
x,y = 8, 4
if x > y:
print("x es mayor que y")
print("x es el doble de y")
if x > y:
print("x es mayor que y")
else:
print("x es menor o igual que y")
if x < y:
print("x es menor que y")
elif x == y:
print("x es igual a y")
else:
print("x es mayor que y")
| 16.823529
| 37
| 0.538462
| 63
| 286
| 2.444444
| 0.285714
| 0.272727
| 0.363636
| 0.292208
| 0.649351
| 0.474026
| 0.363636
| 0.272727
| 0.272727
| 0
| 0
| 0.010204
| 0.314685
| 286
| 17
| 38
| 16.823529
| 0.77551
| 0
| 0
| 0.5
| 0
| 0
| 0.418118
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f257005bcac2e1253c3b4bd06999f06535f2ed11
| 12
|
py
|
Python
|
src/pythonnamespace/namespacepackage/project1/parent/child/one.py
|
sudeep0901/python
|
7a50af12e72d21ca4cad7f2afa4c6f929552043f
|
[
"MIT"
] | null | null | null |
src/pythonnamespace/namespacepackage/project1/parent/child/one.py
|
sudeep0901/python
|
7a50af12e72d21ca4cad7f2afa4c6f929552043f
|
[
"MIT"
] | 3
|
2019-12-26T05:13:55.000Z
|
2020-03-07T06:59:56.000Z
|
src/pythonnamespace/namespacepackage/project1/parent/child/one.py
|
sudeep0901/python
|
7a50af12e72d21ca4cad7f2afa4c6f929552043f
|
[
"MIT"
] | null | null | null |
print("One")
| 12
| 12
| 0.666667
| 2
| 12
| 4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 12
| 1
| 12
| 12
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
f265749efffffd88e6173a2b1ae94fc1a246433f
| 118
|
py
|
Python
|
setup.py
|
GreaterGoodest/implant
|
6def0add92d84934d03591e5da05611896f87b91
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
GreaterGoodest/implant
|
6def0add92d84934d03591e5da05611896f87b91
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
GreaterGoodest/implant
|
6def0add92d84934d03591e5da05611896f87b91
|
[
"Apache-2.0"
] | null | null | null |
from setuptools import setup, find_packages
setup(name='c2', version='0.1', packages=find_packages(include=['c2.*']))
| 39.333333
| 73
| 0.745763
| 17
| 118
| 5.058824
| 0.705882
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.036364
| 0.067797
| 118
| 3
| 73
| 39.333333
| 0.745455
| 0
| 0
| 0
| 0
| 0
| 0.07563
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
f28bcfafec5f0a9c9ac96f3ee11a535a10640f35
| 223
|
py
|
Python
|
src/home/models.py
|
pandeydivesh15/item_sharing_portal
|
c814d5cf0a7b34d73d8155e508a0cf4f334af199
|
[
"MIT"
] | 1
|
2019-11-04T16:45:27.000Z
|
2019-11-04T16:45:27.000Z
|
src/home/models.py
|
pandeydivesh15/item_sharing_portal
|
c814d5cf0a7b34d73d8155e508a0cf4f334af199
|
[
"MIT"
] | null | null | null |
src/home/models.py
|
pandeydivesh15/item_sharing_portal
|
c814d5cf0a7b34d73d8155e508a0cf4f334af199
|
[
"MIT"
] | null | null | null |
from __future__ import unicode_literals
from django.db import models
# Create your models here.
class feedback_data(models.Model):
improvements=models.CharField(max_length=500)
complain=models.CharField(max_length=500)
| 24.777778
| 46
| 0.829596
| 31
| 223
| 5.709677
| 0.677419
| 0.169492
| 0.20339
| 0.271186
| 0.305085
| 0
| 0
| 0
| 0
| 0
| 0
| 0.029703
| 0.09417
| 223
| 8
| 47
| 27.875
| 0.846535
| 0.107623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.4
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
f2afbe41421b41ef9d2955137b83cd24ae2cff2d
| 64
|
py
|
Python
|
djcastor/__init__.py
|
panaceya/django-castor
|
fd9398b8385670c615d4d3ef3acea83b95a6131d
|
[
"Unlicense"
] | 8
|
2015-02-04T21:57:51.000Z
|
2017-09-07T01:50:06.000Z
|
djcastor/__init__.py
|
panaceya/django-castor
|
fd9398b8385670c615d4d3ef3acea83b95a6131d
|
[
"Unlicense"
] | 1
|
2015-10-08T14:46:58.000Z
|
2015-10-09T13:57:50.000Z
|
djcastor/__init__.py
|
panaceya/django-castor
|
fd9398b8385670c615d4d3ef3acea83b95a6131d
|
[
"Unlicense"
] | 4
|
2015-10-08T12:46:38.000Z
|
2021-06-03T13:47:27.000Z
|
# -*- coding: utf-8 -*-
from djcastor.storage import CAStorage
| 16
| 38
| 0.6875
| 8
| 64
| 5.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018519
| 0.15625
| 64
| 3
| 39
| 21.333333
| 0.796296
| 0.328125
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4b5718fcf49b865c427889b60f43292f1dcb01b3
| 592
|
py
|
Python
|
tests/test_cases.py
|
Harry-Verspagen/2is50-2019-2020-homework-assignment-1-pair-50
|
5716e63ea543bc746c8a6d8d187bef05d22c7ccf
|
[
"MIT"
] | null | null | null |
tests/test_cases.py
|
Harry-Verspagen/2is50-2019-2020-homework-assignment-1-pair-50
|
5716e63ea543bc746c8a6d8d187bef05d22c7ccf
|
[
"MIT"
] | null | null | null |
tests/test_cases.py
|
Harry-Verspagen/2is50-2019-2020-homework-assignment-1-pair-50
|
5716e63ea543bc746c8a6d8d187bef05d22c7ccf
|
[
"MIT"
] | null | null | null |
"""Unit tests for the Mandelbrot software.
Author: Tom Verhoeff
Copyright (c) 2020 - Eindhoven University of Technology, The Netherlands
This software is made available under the terms of the MIT License.
* Contributor 1: ...
* TU/e ID number 1: ...
* Contributor 2: ...
* TU/e ID number 2: ...
* Date: ...
This software is made available under the terms of the MIT License.
"""
import mandel
# TODO: Provide (at least) four test cases (import what you need from
def test_dummy():
"""Just an example; replace with your code.
"""
assert mandel.generate_mandel_nums() == []
| 22.769231
| 72
| 0.695946
| 86
| 592
| 4.755814
| 0.674419
| 0.05868
| 0.06846
| 0.08802
| 0.268949
| 0.268949
| 0.268949
| 0.268949
| 0.268949
| 0.268949
| 0
| 0.016807
| 0.195946
| 592
| 25
| 73
| 23.68
| 0.842437
| 0.829392
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.04
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
4b6469ed242fe12c0965295dc2f79d0eb99a5e89
| 9,441
|
py
|
Python
|
server/src/shared_helpers/tests/services_tests.py
|
dashhudson/go-links
|
b0bff29be1d438db6fbbba527959f65a9382fa54
|
[
"Apache-2.0"
] | 176
|
2019-07-20T00:16:40.000Z
|
2022-03-29T08:44:11.000Z
|
server/src/shared_helpers/tests/services_tests.py
|
dashhudson/go-links
|
b0bff29be1d438db6fbbba527959f65a9382fa54
|
[
"Apache-2.0"
] | 45
|
2019-08-18T17:03:57.000Z
|
2022-03-21T14:47:43.000Z
|
server/src/shared_helpers/tests/services_tests.py
|
dashhudson/go-links
|
b0bff29be1d438db6fbbba527959f65a9382fa54
|
[
"Apache-2.0"
] | 44
|
2019-07-22T07:26:55.000Z
|
2022-03-30T19:55:39.000Z
|
import datetime
import unittest
from flask import Blueprint, request, jsonify
from freezegun import freeze_time
from mock import Mock, patch
import jwt
from requests.exceptions import HTTPError
from shared_helpers import services
from testing import TrottoTestCase, LIVE_APP_HOST
class TestFunctions(unittest.TestCase):
@patch('shared_helpers.services.get_service_config', return_value={'signing_secret': 'so_secret'})
def test__create_internal_token(self, mock_get_service_config):
now = datetime.datetime.now(datetime.timezone.utc)
with freeze_time(now):
token = services._create_internal_token('my_service', {'id': 1})
self.assertEqual({'exp': int(now.timestamp()) + 30,
'id': 1},
jwt.decode(token, 'so_secret', algorithms=['HS256']))
with freeze_time(now + datetime.timedelta(seconds=40)):
with self.assertRaises(jwt.exceptions.ExpiredSignatureError):
jwt.decode(token, 'so_secret', algorithms=['HS256'])
mock_get_service_config.assert_called_once_with('my_service')
@patch('shared_helpers.services.requests.get')
@patch('shared_helpers.services._create_internal_token', return_value='internal_token')
@patch('shared_helpers.services.get_service_config', return_value={'base_url': 'https://trot.to'})
def test_get__basic(self, mock_get_service_config, mock_create_internal_token, mock_requests_get):
mock_response = Mock()
mock_response.json.return_value = {'id': 1}
mock_requests_get.return_value = mock_response
self.assertEqual({'id': 1},
services.get('my_service', 'api/users'))
mock_get_service_config.assert_called_once_with('my_service')
mock_create_internal_token.assert_called_once_with('my_service', {'url': 'https://trot.to/api/users'})
mock_requests_get.assert_called_once_with('https://trot.to/api/users',
headers={'X-Token': 'internal_token'})
@patch('shared_helpers.services.requests.get')
@patch('shared_helpers.services._create_internal_token', return_value='internal_token')
@patch('shared_helpers.services.get_service_config', return_value={'base_url': 'https://trot.to/'})
def test_get__trailing_and_leading_slashes(self,
mock_get_service_config, mock_create_internal_token, mock_requests_get):
mock_response = Mock()
mock_response.json.return_value = {'id': 1}
mock_requests_get.return_value = mock_response
self.assertEqual({'id': 1},
services.get('my_service', '/api/users'))
mock_get_service_config.assert_called_once_with('my_service')
mock_create_internal_token.assert_called_once_with('my_service', {'url': 'https://trot.to/api/users'})
mock_requests_get.assert_called_once_with('https://trot.to/api/users',
headers={'X-Token': 'internal_token'})
@patch('shared_helpers.services.requests.get')
@patch('shared_helpers.services._create_internal_token', return_value='internal_token')
@patch('shared_helpers.services.get_service_config', return_value={'base_url': 'https://trot.to'})
def test_get__http_error(self, mock_get_service_config, mock_create_internal_token, mock_requests_get):
mock_response = Mock()
mock_response.raise_for_status.side_effect = HTTPError
mock_requests_get.return_value = mock_response
with self.assertRaises(HTTPError):
services.get('my_service', 'api/users')
mock_get_service_config.assert_called_once_with('my_service')
mock_create_internal_token.assert_called_once_with('my_service', {'url': 'https://trot.to/api/users'})
mock_requests_get.assert_called_once_with('https://trot.to/api/users',
headers={'X-Token': 'internal_token'})
def test_validate_internal_request__no_token(self):
mock_request = Mock()
mock_request.headers = {}
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('no token',
str(cm.exception))
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__invalid_signature__wrong_secret(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=30),
'url': 'https://trot.to/api/users'},
'a_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('invalid signature',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__invalid_signature__no_exp(self, mock_get_config_by_key_path):
token = jwt.encode({'url': 'https://trot.to/api/users'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('missing exp',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__expired_token(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() - datetime.timedelta(seconds=1),
'url': 'https://trot.to/api/users'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('expired',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__mismatched_url(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=30),
'url': 'https://trot.to/api/users/1'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
with self.assertRaises(services.InvalidInternalToken) as cm:
services.validate_internal_request(mock_request)
self.assertEqual('mismatched URL',
str(cm.exception))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
@patch('shared_helpers.services.get_config_by_key_path', return_value='so_secret')
def test_validate_internal_request__valid_token(self, mock_get_config_by_key_path):
token = jwt.encode({'exp': datetime.datetime.utcnow() + datetime.timedelta(seconds=30),
'url': 'https://trot.to/api/users'},
'so_secret',
algorithm='HS256')
mock_request = Mock()
mock_request.headers = {'X-Token': token}
mock_request.url = 'https://trot.to/api/users'
self.assertEqual(True,
services.validate_internal_request(mock_request))
mock_get_config_by_key_path.assert_called_once_with(['signing_secret'])
routes = Blueprint('test', __name__)
@routes.route('/_/api/users', methods=['GET'])
def get_users():
services.validate_internal_request(request)
return jsonify([{'id': 1}])
class TestIntegration(TrottoTestCase):
blueprints_under_test = [routes]
start_live_app = True
live_app_config = {'sessions_secret': 'a_sessions_secret',
'signing_secret': 'so_secret',
'postgres': {'url': 'postgresql://admin:testing@/testing_trotto_core'}}
@patch('shared_helpers.config.get_config', return_value={'services': {'my_service': {'signing_secret': 'so_secret',
'base_url': LIVE_APP_HOST}}})
def test_internal_request__real_handler__valid_token(self, _):
self.assertEqual([{'id': 1}],
services.get('my_service', '/_/api/users'))
@patch('shared_helpers.config.get_config', return_value={'services': {'my_service': {'signing_secret': 'a_secret',
'base_url': LIVE_APP_HOST}}})
def test_internal_request__real_handler__invalid_token(self, _):
with self.assertRaises(HTTPError) as cm:
self.assertEqual([{'id': 1}],
services.get('my_service', '/_/api/users'))
self.assertEqual(500,
cm.exception.response.status_code)
| 41.774336
| 117
| 0.679165
| 1,138
| 9,441
| 5.247803
| 0.112478
| 0.042364
| 0.034997
| 0.037508
| 0.796551
| 0.787508
| 0.77495
| 0.756196
| 0.756196
| 0.747321
| 0
| 0.005947
| 0.198496
| 9,441
| 225
| 118
| 41.96
| 0.783269
| 0
| 0
| 0.58125
| 0
| 0
| 0.220316
| 0.07997
| 0
| 0
| 0
| 0
| 0.21875
| 1
| 0.08125
| false
| 0
| 0.05625
| 0
| 0.175
| 0.01875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
4b6e3ca6f68c53b9dcba74eac7c29fd0505f91a2
| 134
|
py
|
Python
|
content/widgets.py
|
tedor/home-blog
|
41e6cde964b9501864925f17d496ffea1fd0e770
|
[
"BSD-3-Clause"
] | null | null | null |
content/widgets.py
|
tedor/home-blog
|
41e6cde964b9501864925f17d496ffea1fd0e770
|
[
"BSD-3-Clause"
] | null | null | null |
content/widgets.py
|
tedor/home-blog
|
41e6cde964b9501864925f17d496ffea1fd0e770
|
[
"BSD-3-Clause"
] | null | null | null |
from django import forms
class PictureByCategory(forms.Widget):
def render(self, name, value, attrs=None):
return '1234'
| 22.333333
| 46
| 0.708955
| 17
| 134
| 5.588235
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.19403
| 134
| 5
| 47
| 26.8
| 0.842593
| 0
| 0
| 0
| 0
| 0
| 0.029851
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0.25
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
4b80cd273867e57efb30815b3848bbfd5d9213aa
| 86
|
py
|
Python
|
connectfour/players/__init__.py
|
amwhalen/connectfour
|
4f01bc4a94a04ae729c66c0498fe64b1ce8585f6
|
[
"MIT"
] | 1
|
2017-10-12T05:20:02.000Z
|
2017-10-12T05:20:02.000Z
|
connectfour/players/__init__.py
|
amwhalen/connectfour
|
4f01bc4a94a04ae729c66c0498fe64b1ce8585f6
|
[
"MIT"
] | null | null | null |
connectfour/players/__init__.py
|
amwhalen/connectfour
|
4f01bc4a94a04ae729c66c0498fe64b1ce8585f6
|
[
"MIT"
] | null | null | null |
import player
import cover
import random
import human
import negamax
import negamaxabp
| 14.333333
| 17
| 0.872093
| 12
| 86
| 6.25
| 0.583333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.127907
| 86
| 6
| 17
| 14.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
299cba0734b71448179d7134a71d454811554afa
| 42
|
py
|
Python
|
src/VarDACAE/train/__init__.py
|
scheng1992/Data_Assimilation
|
b4d43895229205ee2cd16b15ee20beccb33b71d6
|
[
"MIT"
] | 1
|
2021-11-25T12:46:48.000Z
|
2021-11-25T12:46:48.000Z
|
src/VarDACAE/train/__init__.py
|
bugsuse/Data_Assimilation
|
2965ccf78951df11f8686282cd6814bae18afde5
|
[
"MIT"
] | null | null | null |
src/VarDACAE/train/__init__.py
|
bugsuse/Data_Assimilation
|
2965ccf78951df11f8686282cd6814bae18afde5
|
[
"MIT"
] | 2
|
2021-03-02T13:29:34.000Z
|
2022-03-12T11:01:08.000Z
|
from VarDACAE.train.trainer import TrainAE
| 42
| 42
| 0.880952
| 6
| 42
| 6.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.071429
| 42
| 1
| 42
| 42
| 0.948718
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
29a08bd98712bafea6ba2d7c27dd32ff727a4138
| 54
|
py
|
Python
|
keras_text_cls/conf/__init__.py
|
titicaca/keras-text-cls
|
de1d70e64946cae2da06bda46b9ebace2b0b4f00
|
[
"MIT"
] | 3
|
2019-03-01T15:50:12.000Z
|
2021-05-03T15:08:10.000Z
|
keras_text_cls/conf/__init__.py
|
titicaca/keras-text-cls
|
de1d70e64946cae2da06bda46b9ebace2b0b4f00
|
[
"MIT"
] | null | null | null |
keras_text_cls/conf/__init__.py
|
titicaca/keras-text-cls
|
de1d70e64946cae2da06bda46b9ebace2b0b4f00
|
[
"MIT"
] | 1
|
2020-08-08T02:53:56.000Z
|
2020-08-08T02:53:56.000Z
|
import os
from keras_text_cls.conf.config import *
| 9
| 40
| 0.777778
| 9
| 54
| 4.444444
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 54
| 5
| 41
| 10.8
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d9e92cfc003d77da245f528d4ed2f732bfcfd1e8
| 102
|
py
|
Python
|
count_parameters.py
|
catskillsresearch/openasr20
|
b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e
|
[
"Apache-2.0"
] | null | null | null |
count_parameters.py
|
catskillsresearch/openasr20
|
b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e
|
[
"Apache-2.0"
] | null | null | null |
count_parameters.py
|
catskillsresearch/openasr20
|
b9821c4ee6a51501e81103c1d6d4db0ea8aaa31e
|
[
"Apache-2.0"
] | 1
|
2021-07-28T02:13:21.000Z
|
2021-07-28T02:13:21.000Z
|
def count_parameters(model):
return sum(p.numel() for p in model.parameters() if p.requires_grad)
| 34
| 72
| 0.745098
| 17
| 102
| 4.352941
| 0.764706
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.137255
| 102
| 2
| 73
| 51
| 0.840909
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
d9fec8e045e09c2cabe41a6ab918ef9b101e704e
| 234
|
py
|
Python
|
widgets/AboutWidget.py
|
ICE-WR/chia-tools
|
2428bc7a0c8e4ca2c7ab24358b9b2d81400dc4ae
|
[
"Apache-2.0"
] | 6
|
2021-07-01T21:30:44.000Z
|
2022-03-25T01:35:41.000Z
|
widgets/AboutWidget.py
|
ICE-WR/chia-tools
|
2428bc7a0c8e4ca2c7ab24358b9b2d81400dc4ae
|
[
"Apache-2.0"
] | 1
|
2021-07-06T14:05:40.000Z
|
2021-07-06T14:05:40.000Z
|
widgets/AboutWidget.py
|
ICE-WR/chia-tools
|
2428bc7a0c8e4ca2c7ab24358b9b2d81400dc4ae
|
[
"Apache-2.0"
] | 3
|
2021-05-07T10:01:18.000Z
|
2021-05-21T08:38:45.000Z
|
from PyQt5.QtWidgets import QWidget
from ui.AboutWidget import Ui_AboutWidget
class AboutWidget(QWidget, Ui_AboutWidget):
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.setupUi(self)
| 26
| 43
| 0.717949
| 28
| 234
| 5.642857
| 0.535714
| 0.246835
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.005155
| 0.17094
| 234
| 8
| 44
| 29.25
| 0.809278
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8a2fa7ea0d2aad5585f2dfd52d50ee66604e548e
| 107
|
py
|
Python
|
tests/SampleApps/python/django-react-boilerplate/exampleapp/models.py
|
samruddhikhandale/Oryx
|
9031b36c02967bb4000645950680572a8a76fa56
|
[
"MIT"
] | 403
|
2019-05-07T23:40:45.000Z
|
2022-03-31T11:14:07.000Z
|
tests/SampleApps/python/django-react-boilerplate/exampleapp/models.py
|
samruddhikhandale/Oryx
|
9031b36c02967bb4000645950680572a8a76fa56
|
[
"MIT"
] | 514
|
2019-05-07T17:00:14.000Z
|
2022-03-31T20:09:16.000Z
|
tests/SampleApps/python/django-react-boilerplate/exampleapp/models.py
|
samruddhikhandale/Oryx
|
9031b36c02967bb4000645950680572a8a76fa56
|
[
"MIT"
] | 108
|
2019-05-07T23:40:47.000Z
|
2022-03-30T00:15:19.000Z
|
from __future__ import unicode_literals
from django.db import models # noqa
# Create your models here.
| 15.285714
| 39
| 0.785047
| 15
| 107
| 5.266667
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.17757
| 107
| 6
| 40
| 17.833333
| 0.897727
| 0.271028
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8a5fa878ea718d44f27e4107399204fe4911fbff
| 178
|
py
|
Python
|
217. Contains Duplicate.py
|
rohitpatwa/leetcode
|
f4826763e8f154cac9134d53b154b8299acd39a8
|
[
"Xnet",
"X11",
"CECILL-B"
] | 1
|
2020-07-15T20:48:27.000Z
|
2020-07-15T20:48:27.000Z
|
217. Contains Duplicate.py
|
rohitpatwa/leetcode
|
f4826763e8f154cac9134d53b154b8299acd39a8
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
217. Contains Duplicate.py
|
rohitpatwa/leetcode
|
f4826763e8f154cac9134d53b154b8299acd39a8
|
[
"Xnet",
"X11",
"CECILL-B"
] | null | null | null |
# Create a set and check if element already exists.
class Solution:
def containsDuplicate(self, nums: List[int]) -> bool:
return len(nums) != len(set(nums))
| 29.666667
| 57
| 0.646067
| 24
| 178
| 4.791667
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.241573
| 178
| 6
| 58
| 29.666667
| 0.851852
| 0.275281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
8a649d8f87bba929b19d0b53964b84da55f854d0
| 65
|
py
|
Python
|
piroq/command.py
|
christophevg/piroq
|
c87fdf398ae8d8172d720991e044950665d73d9d
|
[
"MIT"
] | 1
|
2020-12-11T01:10:57.000Z
|
2020-12-11T01:10:57.000Z
|
piroq/command.py
|
christophevg/piroq
|
c87fdf398ae8d8172d720991e044950665d73d9d
|
[
"MIT"
] | 1
|
2021-06-01T22:50:17.000Z
|
2021-06-01T22:50:17.000Z
|
piroq/command.py
|
christophevg/piroq
|
c87fdf398ae8d8172d720991e044950665d73d9d
|
[
"MIT"
] | 1
|
2021-05-03T01:40:26.000Z
|
2021-05-03T01:40:26.000Z
|
from piroq.service import Manager
def main():
Manager().run()
| 13
| 33
| 0.707692
| 9
| 65
| 5.111111
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 65
| 4
| 34
| 16.25
| 0.836364
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8a880400b864d3bb276a41dc4bb2517314d31d08
| 6,796
|
py
|
Python
|
conflowgen/tests/container_flow_data_generation_process/test_truck_for_export_containers_manager.py
|
bbargstaedt/conflowgen
|
b5b5c0e2df8a605d23ef467aaa3e88aa463a34ee
|
[
"MIT"
] | null | null | null |
conflowgen/tests/container_flow_data_generation_process/test_truck_for_export_containers_manager.py
|
bbargstaedt/conflowgen
|
b5b5c0e2df8a605d23ef467aaa3e88aa463a34ee
|
[
"MIT"
] | null | null | null |
conflowgen/tests/container_flow_data_generation_process/test_truck_for_export_containers_manager.py
|
bbargstaedt/conflowgen
|
b5b5c0e2df8a605d23ef467aaa3e88aa463a34ee
|
[
"MIT"
] | null | null | null |
import datetime
import unittest
from collections import Counter
import matplotlib.pyplot as plt
import seaborn as sns
from conflowgen.domain_models.distribution_models.truck_arrival_distribution import TruckArrivalDistribution
from conflowgen.domain_models.distribution_seeders import truck_arrival_distribution_seeder
from conflowgen.container_flow_data_generation_process.truck_for_export_containers_manager import \
TruckForExportContainersManager
from conflowgen.tests.substitute_peewee_database import setup_sqlite_in_memory_db
class TestTruckForExportContainersManager(unittest.TestCase):
def setUp(self) -> None:
"""Create container database in memory"""
sqlite_db = setup_sqlite_in_memory_db()
sqlite_db.create_tables([
TruckArrivalDistribution
])
truck_arrival_distribution_seeder.seed()
# Enables visualisation, helpful for probability distributions
# However, this blocks the execution of tests.
self.debug = False
self.manager = TruckForExportContainersManager()
self.manager.reload_distribution(
minimum_dwell_time_in_hours=3, # after ship arrival, at least 3h pass
maximum_dwell_time_in_hours=(3 * 24) # 3 days after ship arrival the container must have left the yard
)
def test_delivery_time_in_required_time_range_weekday(self):
container_departure_time = datetime.datetime(
year=2021, month=7, day=30, hour=11, minute=55
)
earliest_container_delivery = datetime.datetime(
year=2021, month=7, day=27, hour=11, minute=55
)
delivery_times = []
for i in range(1000):
delivery_time = self.manager._get_container_delivery_time(container_departure_time)
self.assertGreaterEqual(delivery_time, earliest_container_delivery,
"container must not arrive earlier than three days before export, "
f"but here we had {delivery_time} in round {i + 1}")
self.assertLessEqual(delivery_time, container_departure_time,
"container must not arrive later than their departure time "
f"but here we had {delivery_time} in round {i + 1}")
self.assertTrue(delivery_time.weekday() != 6,
f"containers do not arrive on Sundays, but here we had {delivery_time} in round {i + 1}")
delivery_times.append(delivery_time)
if self.debug:
sns.kdeplot(delivery_times, bw=0.01)
plt.show(block=True)
def test_delivery_time_in_required_time_range_with_sunday(self):
container_departure_time = datetime.datetime(
year=2021, month=8, day=2, hour=11, minute=30 # 11:30 -3h dwell time = 08:30 latest arrival
)
earliest_container_delivery = datetime.datetime(
year=2021, month=7, day=30, hour=11, minute=30
)
delivery_times = []
for i in range(1000):
delivery_time = self.manager._get_container_delivery_time(container_departure_time)
delivery_times.append(delivery_time)
self.assertGreaterEqual(delivery_time, earliest_container_delivery,
"container must not arrive earlier than three days before export, "
f"but here we had {delivery_time} in round {i + 1}")
self.assertLessEqual(delivery_time, container_departure_time,
"container must not arrive later than their departure time "
f"but here we had {delivery_time} in round {i + 1}")
self.assertTrue(delivery_time.weekday() != 6,
f"containers do not arrive on Sundays, but here we had {delivery_time} in round {i + 1}")
weekday_counter = Counter([delivery_time.weekday() for delivery_time in delivery_times])
self.assertIn(4, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Friday must be counted (30.07.2021)")
self.assertIn(5, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Saturday must be counted (31.07.2021)")
self.assertIn(0, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Monday must be counted (02.08.2021)")
if self.debug:
sns.kdeplot(delivery_times, bw=0.01)
plt.show(block=True)
def test_delivery_time_in_required_time_range_with_sunday_and_at_different_day_times(self):
container_departure_time = datetime.datetime(
year=2021, month=8, day=2, hour=11, minute=2
)
earliest_container_delivery = datetime.datetime(
year=2021, month=7, day=30, hour=5, minute=0
)
delivery_times = []
for i in range(1000):
delivery_time = self.manager._get_container_delivery_time(container_departure_time)
delivery_times.append(delivery_time)
self.assertGreaterEqual(delivery_time, earliest_container_delivery,
"container must not arrive earlier than three days before export, "
f"but here we had {delivery_time} in round {i + 1}")
self.assertLessEqual(delivery_time, container_departure_time,
"container must not arrive later than their departure time "
f"but here we had {delivery_time} in round {i + 1}")
self.assertNotEqual(delivery_time.weekday(), 6,
f"containers do not arrive on Sundays, "
f"but here we had {delivery_time} in round {i + 1}")
weekday_counter = Counter([delivery_time.weekday() for delivery_time in delivery_times])
self.assertIn(4, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Friday must be counted (30.07.2021)")
self.assertIn(5, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Saturday must be counted (31.07.2021)")
self.assertIn(0, weekday_counter.keys(), "Probability (out of 1000 repetitions): "
"At least once a Monday must be counted (02.08.2021)")
if self.debug:
sns.kdeplot(delivery_times, bw=0.01)
plt.show(block=True)
| 54.806452
| 117
| 0.621689
| 788
| 6,796
| 5.159898
| 0.201777
| 0.100344
| 0.048205
| 0.026562
| 0.769798
| 0.724791
| 0.724791
| 0.724791
| 0.715445
| 0.709051
| 0
| 0.039577
| 0.304738
| 6,796
| 123
| 118
| 55.252033
| 0.820952
| 0.042231
| 0
| 0.615385
| 0
| 0.019231
| 0.224034
| 0
| 0
| 0
| 0
| 0
| 0.144231
| 1
| 0.038462
| false
| 0
| 0.086538
| 0
| 0.134615
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8a978d85947416d2a882d1811b20f816fe84fe71
| 41
|
py
|
Python
|
modules/2.79/bpy/types/NodeSocketFloatPercentage.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/NodeSocketFloatPercentage.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
modules/2.79/bpy/types/NodeSocketFloatPercentage.py
|
cmbasnett/fake-bpy-module
|
acb8b0f102751a9563e5b5e5c7cd69a4e8aa2a55
|
[
"MIT"
] | null | null | null |
NodeSocketFloatPercentage.links = None
| 10.25
| 38
| 0.829268
| 3
| 41
| 11.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.121951
| 41
| 3
| 39
| 13.666667
| 0.944444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8a99aca6fd7174155b587dbaeb76739c8a0d3ec0
| 1,441
|
py
|
Python
|
sdk/python/pulumi_azure_nextgen/media/latest/__init__.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/media/latest/__init__.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_azure_nextgen/media/latest/__init__.py
|
test-wiz-sec/pulumi-azure-nextgen
|
20a695af0d020b34b0f1c336e1b69702755174cc
|
[
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
# Export this package's modules as members:
from .account_filter import *
from .asset import *
from .asset_filter import *
from .content_key_policy import *
from .get_account_filter import *
from .get_asset import *
from .get_asset_encryption_key import *
from .get_asset_filter import *
from .get_content_key_policy import *
from .get_content_key_policy_properties_with_secrets import *
from .get_job import *
from .get_live_event import *
from .get_live_output import *
from .get_media_service import *
from .get_private_endpoint_connection import *
from .get_streaming_endpoint import *
from .get_streaming_locator import *
from .get_streaming_policy import *
from .get_transform import *
from .job import *
from .list_asset_container_sas import *
from .list_asset_streaming_locators import *
from .list_media_service_edge_policies import *
from .list_media_service_keys import *
from .list_streaming_locator_content_keys import *
from .list_streaming_locator_paths import *
from .live_event import *
from .live_output import *
from .media_service import *
from .private_endpoint_connection import *
from .streaming_endpoint import *
from .streaming_locator import *
from .streaming_policy import *
from .transform import *
from ._inputs import *
from . import outputs
| 34.309524
| 80
| 0.806384
| 209
| 1,441
| 5.239234
| 0.315789
| 0.319635
| 0.178082
| 0.052055
| 0.252968
| 0.141553
| 0
| 0
| 0
| 0
| 0
| 0.000796
| 0.127689
| 1,441
| 41
| 81
| 35.146341
| 0.870326
| 0.140874
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8ab6ed666cc2af4dc0b8cc0befb6cf6d30a561c6
| 579
|
py
|
Python
|
internal/images.py
|
yukimyg/black_hole_image
|
58e99bc120c0e8c2fec5a72a6fa0f88ec8de86db
|
[
"MIT"
] | 1
|
2022-02-21T01:22:59.000Z
|
2022-02-21T01:22:59.000Z
|
internal/images.py
|
yukimyg/black-hole-image
|
58e99bc120c0e8c2fec5a72a6fa0f88ec8de86db
|
[
"MIT"
] | 6
|
2022-02-16T03:17:29.000Z
|
2022-02-20T06:57:52.000Z
|
internal/images.py
|
yukimyg/black-hole-image
|
58e99bc120c0e8c2fec5a72a6fa0f88ec8de86db
|
[
"MIT"
] | null | null | null |
import dataclasses
@dataclasses.dataclass(frozen=True)
class Sd:
height: int = 480
width: int = 640
@dataclasses.dataclass(frozen=True)
class Hd:
height: int = 720
width: int = 1280
@dataclasses.dataclass(frozen=True)
class Fhd:
height: int = 1080
width: int = 1920
@dataclasses.dataclass(frozen=True)
class Qhd:
height: int = 1440
width: int = 2560
@dataclasses.dataclass(frozen=True)
class Uhd:
height: int = 2160
width: int = 3840
@dataclasses.dataclass(frozen=True)
class Fuhd:
height: int = 4320
width: int = 7680
| 15.236842
| 35
| 0.675302
| 74
| 579
| 5.283784
| 0.364865
| 0.306905
| 0.398977
| 0.460358
| 0.537084
| 0
| 0
| 0
| 0
| 0
| 0
| 0.099778
| 0.221071
| 579
| 37
| 36
| 15.648649
| 0.767184
| 0
| 0
| 0.24
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.04
| 0
| 0.76
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
8ad0ee5fe99b13ddc7e5cfe0e107b94bd4080f75
| 110
|
py
|
Python
|
kalliope/core/ConfigurationManager/__init__.py
|
G10DRAS/kalliope
|
4c6586bd4c5ff0ca2b51cbf02f042d9ed0c9742d
|
[
"MIT"
] | null | null | null |
kalliope/core/ConfigurationManager/__init__.py
|
G10DRAS/kalliope
|
4c6586bd4c5ff0ca2b51cbf02f042d9ed0c9742d
|
[
"MIT"
] | null | null | null |
kalliope/core/ConfigurationManager/__init__.py
|
G10DRAS/kalliope
|
4c6586bd4c5ff0ca2b51cbf02f042d9ed0c9742d
|
[
"MIT"
] | null | null | null |
from YAMLLoader import YAMLLoader
from SettingLoader import SettingLoader
from BrainLoader import BrainLoader
| 27.5
| 39
| 0.890909
| 12
| 110
| 8.166667
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.109091
| 110
| 3
| 40
| 36.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
76d3c2ff9cbe0a61898b5499169e04fc853812c0
| 80
|
py
|
Python
|
src/utils/__init__.py
|
mushroom-x/gamepad_python
|
fa1893c6f094521f769d8945ced699f39f102dbf
|
[
"MIT"
] | 2
|
2022-02-11T03:14:01.000Z
|
2022-02-11T03:17:42.000Z
|
src/utils/__init__.py
|
JACKDONG-blue/gamepad_python
|
22dc9f537bbee584f37eb3693ae81148a5d29c6a
|
[
"MIT"
] | 1
|
2022-02-10T18:49:25.000Z
|
2022-02-10T18:49:25.000Z
|
src/utils/__init__.py
|
JACKDONG-blue/gamepad_python
|
22dc9f537bbee584f37eb3693ae81148a5d29c6a
|
[
"MIT"
] | 1
|
2022-02-11T02:54:10.000Z
|
2022-02-11T02:54:10.000Z
|
from .thread import KillableThread
from .logger_interface import LoggerInterface
| 40
| 45
| 0.8875
| 9
| 80
| 7.777778
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0875
| 80
| 2
| 45
| 40
| 0.958904
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
76f6f878d497f2e2a6d41fce6f2d684f78247318
| 1,005
|
py
|
Python
|
tests/port_tests/boolean_tests/operation_tests/test_to_next_position.py
|
skrat/martinez
|
86db48324cb50ecb52be8ab2e4278a6d5cdd562b
|
[
"MIT"
] | 7
|
2020-05-07T08:13:44.000Z
|
2021-12-17T07:33:51.000Z
|
tests/port_tests/boolean_tests/operation_tests/test_to_next_position.py
|
skrat/martinez
|
86db48324cb50ecb52be8ab2e4278a6d5cdd562b
|
[
"MIT"
] | 17
|
2019-11-29T23:17:26.000Z
|
2020-12-20T15:47:17.000Z
|
tests/port_tests/boolean_tests/operation_tests/test_to_next_position.py
|
skrat/martinez
|
86db48324cb50ecb52be8ab2e4278a6d5cdd562b
|
[
"MIT"
] | 1
|
2020-12-17T22:44:21.000Z
|
2020-12-17T22:44:21.000Z
|
from typing import (List,
Tuple)
from hypothesis import given
from tests.port_tests.hints import (PortedOperation,
PortedSweepEvent)
from . import strategies
@given(strategies.non_empty_sweep_events_lists_with_indices_and_booleans_lists)
def test_basic(events_with_position_and_processed
: Tuple[List[PortedSweepEvent], int, List[bool]]) -> None:
events, position, processed = events_with_position_and_processed
result = PortedOperation.to_next_position(position, events, processed)
assert isinstance(result, int)
@given(strategies.non_empty_sweep_events_lists_with_indices_and_booleans_lists)
def test_properties(events_with_position_and_processed
: Tuple[List[PortedSweepEvent], int, List[bool]]) -> None:
events, position, processed = events_with_position_and_processed
result = PortedOperation.to_next_position(position, events, processed)
assert result in range(len(events))
| 34.655172
| 79
| 0.743284
| 116
| 1,005
| 6.086207
| 0.336207
| 0.056657
| 0.101983
| 0.11898
| 0.736544
| 0.736544
| 0.736544
| 0.736544
| 0.736544
| 0.736544
| 0
| 0
| 0.187065
| 1,005
| 28
| 80
| 35.892857
| 0.864137
| 0
| 0
| 0.444444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0a1bbe66aa30c343fc9375adfe2f156262e8e0d4
| 57
|
py
|
Python
|
CodeWars/8 Kyu/Grasshopper - Debug sayHello.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/8 Kyu/Grasshopper - Debug sayHello.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
CodeWars/8 Kyu/Grasshopper - Debug sayHello.py
|
anubhab-code/Competitive-Programming
|
de28cb7d44044b9e7d8bdb475da61e37c018ac35
|
[
"MIT"
] | null | null | null |
def say_hello(name):
return "Hello, {0}".format(name)
| 28.5
| 36
| 0.666667
| 9
| 57
| 4.111111
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.020408
| 0.140351
| 57
| 2
| 36
| 28.5
| 0.734694
| 0
| 0
| 0
| 0
| 0
| 0.172414
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
0a31a2d814340698ffadffedf90d8edb861704f9
| 158
|
py
|
Python
|
Bronze/Bronze_III/2444.py
|
masterTyper/baekjoon_solved_ac
|
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
|
[
"MIT"
] | null | null | null |
Bronze/Bronze_III/2444.py
|
masterTyper/baekjoon_solved_ac
|
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
|
[
"MIT"
] | null | null | null |
Bronze/Bronze_III/2444.py
|
masterTyper/baekjoon_solved_ac
|
b9ce14d9bdaa5b5b06735ad075fb827de9f44b9c
|
[
"MIT"
] | null | null | null |
N = int(input())
for i in range(1, N):
print(' ' * (N - i) + '*' * ((i * 2) - 1))
for i in range(N, 0, -1):
print(' ' * (N - i) + '*' * ((i * 2) - 1))
| 31.6
| 46
| 0.35443
| 28
| 158
| 2
| 0.392857
| 0.142857
| 0.214286
| 0.392857
| 0.357143
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0.06422
| 0.310127
| 158
| 5
| 47
| 31.6
| 0.449541
| 0
| 0
| 0.4
| 0
| 0
| 0.025157
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a621b8c75c0e3da85b2b6a03eece610c8094bf2
| 34,314
|
py
|
Python
|
python/paddle/fluid/tests/unittests/test_imperative_save_load.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | 2
|
2017-05-15T06:52:18.000Z
|
2017-06-13T11:55:11.000Z
|
python/paddle/fluid/tests/unittests/test_imperative_save_load.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | null | null | null |
python/paddle/fluid/tests/unittests/test_imperative_save_load.py
|
Sand3r-/Paddle
|
1217a521554d63caa1381b8716910d0268dfc22d
|
[
"Apache-2.0"
] | null | null | null |
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import os
import unittest
import paddle.fluid as fluid
import paddle.fluid.core as core
from paddle.fluid.dygraph.nn import Embedding, Linear
import paddle.fluid.framework as framework
from paddle.fluid.optimizer import Adam
from paddle.fluid.dygraph.base import to_variable
from paddle.fluid.dygraph.learning_rate_scheduler import LearningRateDecay
from test_imperative_base import new_program_scope
import numpy as np
import six
class SimpleLSTMRNN(fluid.Layer):
def __init__(self,
hidden_size,
num_steps,
num_layers=2,
init_scale=0.1,
dropout=None):
super(SimpleLSTMRNN, self).__init__()
self._hidden_size = hidden_size
self._num_layers = num_layers
self._init_scale = init_scale
self._dropout = dropout
self._input = None
self._num_steps = num_steps
self.cell_array = []
self.hidden_array = []
self.weight_1_arr = []
self.weight_2_arr = []
self.bias_arr = []
self.mask_array = []
for i in range(self._num_layers):
weight_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 2, self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale))
self.weight_1_arr.append(self.add_parameter('w_%d' % i, weight_1))
bias_1 = self.create_parameter(
attr=fluid.ParamAttr(
initializer=fluid.initializer.UniformInitializer(
low=-self._init_scale, high=self._init_scale)),
shape=[self._hidden_size * 4],
dtype="float32",
default_initializer=fluid.initializer.Constant(0.0))
self.bias_arr.append(self.add_parameter('b_%d' % i, bias_1))
def forward(self, input_embedding, init_hidden=None, init_cell=None):
self.cell_array = []
self.hidden_array = []
for i in range(self._num_layers):
pre_hidden = fluid.layers.slice(
init_hidden, axes=[0], starts=[i], ends=[i + 1])
pre_cell = fluid.layers.slice(
init_cell, axes=[0], starts=[i], ends=[i + 1])
pre_hidden = fluid.layers.reshape(
pre_hidden, shape=[-1, self._hidden_size])
pre_cell = fluid.layers.reshape(
pre_cell, shape=[-1, self._hidden_size])
self.hidden_array.append(pre_hidden)
self.cell_array.append(pre_cell)
res = []
for index in range(self._num_steps):
self._input = fluid.layers.slice(
input_embedding, axes=[1], starts=[index], ends=[index + 1])
self._input = fluid.layers.reshape(
self._input, shape=[-1, self._hidden_size])
for k in range(self._num_layers):
pre_hidden = self.hidden_array[k]
pre_cell = self.cell_array[k]
weight_1 = self.weight_1_arr[k]
bias = self.bias_arr[k]
nn = fluid.layers.concat([self._input, pre_hidden], 1)
gate_input = fluid.layers.matmul(x=nn, y=weight_1)
gate_input = fluid.layers.elementwise_add(gate_input, bias)
i, j, f, o = fluid.layers.split(
gate_input, num_or_sections=4, dim=-1)
c = pre_cell * fluid.layers.sigmoid(f) + fluid.layers.sigmoid(
i) * fluid.layers.tanh(j)
m = fluid.layers.tanh(c) * fluid.layers.sigmoid(o)
self.hidden_array[k] = m
self.cell_array[k] = c
self._input = m
if self._dropout is not None and self._dropout > 0.0:
self._input = fluid.layers.dropout(
self._input,
dropout_prob=self._dropout,
dropout_implementation='upscale_in_train')
res.append(
fluid.layers.reshape(
self._input, shape=[1, -1, self._hidden_size]))
real_res = fluid.layers.concat(res, 0)
real_res = fluid.layers.transpose(x=real_res, perm=[1, 0, 2])
last_hidden = fluid.layers.concat(self.hidden_array, 1)
last_hidden = fluid.layers.reshape(
last_hidden, shape=[-1, self._num_layers, self._hidden_size])
last_hidden = fluid.layers.transpose(x=last_hidden, perm=[1, 0, 2])
last_cell = fluid.layers.concat(self.cell_array, 1)
last_cell = fluid.layers.reshape(
last_cell, shape=[-1, self._num_layers, self._hidden_size])
last_cell = fluid.layers.transpose(x=last_cell, perm=[1, 0, 2])
return real_res, last_hidden, last_cell
class PtbModel(fluid.Layer):
def __init__(self,
hidden_size,
vocab_size,
num_layers=2,
num_steps=20,
init_scale=0.1,
dropout=None):
super(PtbModel, self).__init__()
self.hidden_size = hidden_size
self.vocab_size = vocab_size
self.init_scale = init_scale
self.num_layers = num_layers
self.num_steps = num_steps
self.dropout = dropout
self.simple_lstm_rnn = SimpleLSTMRNN(
hidden_size,
num_steps,
num_layers=num_layers,
init_scale=init_scale,
dropout=dropout)
self.embedding = Embedding(
size=[vocab_size, hidden_size],
dtype='float32',
is_sparse=False,
param_attr=fluid.ParamAttr(
name='embedding_para',
initializer=fluid.initializer.UniformInitializer(
low=-init_scale, high=init_scale)))
self.softmax_weight = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.hidden_size, self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
self.softmax_bias = self.create_parameter(
attr=fluid.ParamAttr(),
shape=[self.vocab_size],
dtype="float32",
default_initializer=fluid.initializer.UniformInitializer(
low=-self.init_scale, high=self.init_scale))
def forward(self, input, label, init_hidden, init_cell):
init_h = fluid.layers.reshape(
init_hidden, shape=[self.num_layers, -1, self.hidden_size])
init_c = fluid.layers.reshape(
init_cell, shape=[self.num_layers, -1, self.hidden_size])
x_emb = self.embedding(input)
x_emb = fluid.layers.reshape(
x_emb, shape=[-1, self.num_steps, self.hidden_size])
if self.dropout is not None and self.dropout > 0.0:
x_emb = fluid.layers.dropout(
x_emb,
dropout_prob=self.drop_out,
dropout_implementation='upscale_in_train')
rnn_out, last_hidden, last_cell = self.simple_lstm_rnn(x_emb, init_h,
init_c)
rnn_out = fluid.layers.reshape(
rnn_out, shape=[-1, self.num_steps, self.hidden_size])
projection = fluid.layers.matmul(rnn_out, self.softmax_weight)
projection = fluid.layers.elementwise_add(projection, self.softmax_bias)
projection = fluid.layers.reshape(
projection, shape=[-1, self.vocab_size])
loss = fluid.layers.softmax_with_cross_entropy(
logits=projection, label=label, soft_label=False)
loss = fluid.layers.reshape(loss, shape=[-1, self.num_steps])
loss = fluid.layers.reduce_mean(loss, dim=[0])
loss = fluid.layers.reduce_sum(loss)
return loss, last_hidden, last_cell
class TestDygraphPtbRnn(unittest.TestCase):
def setUp(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [1.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
new_lr = 1.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
if i == 0:
for param in ptb_model.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
if i == batch_num - 1:
for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy()
# check optimizer
self.opti_dict = adam.state_dict()
self.base_opti = {}
for k, v in self.opti_dict.items():
self.base_opti[v.name] = v.numpy()
self.assertTrue(np.sum(np.abs(v.numpy())) != 0)
fluid.save_dygraph(self.opti_dict, "./test_dy")
self.state_dict = ptb_model.state_dict()
self.model_base = {}
for k, v in self.state_dict.items():
np_t = v.numpy()
self.model_base[k] = np_t
fluid.save_dygraph(self.state_dict, "./test_dy")
def testLoadAndSetVarBase(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [1.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
new_lr = 1.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
if i == 0:
for param in ptb_model.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
if i == batch_num - 1:
for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy()
# check optimizer
opti_dict = adam.state_dict()
# set to zero
for k, v in opti_dict.items():
np_t = v.numpy()
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
if isinstance(adam._learning_rate, LearningRateDecay):
adam._learning_rate.step_num = 0
para_state_dict, opti_state_dict = fluid.load_dygraph("./test_dy")
adam.set_dict(opti_state_dict)
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
# check parameter
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
np_t = v.numpy()
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
ptb_model.set_dict(para_state_dict)
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def testSetVariable(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [1.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
new_lr = 1.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
if i == 0:
for param in ptb_model.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
if i == batch_num - 1:
for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy()
# check optimizer
opti_dict = adam.state_dict()
# set to zero
for k, v in opti_dict.items():
np_t = v.numpy()
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
if isinstance(adam._learning_rate, LearningRateDecay):
adam._learning_rate.step_num = 0
adam.set_dict(self.opti_dict)
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
# check parameter
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
np_t = v.numpy()
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
ptb_model.set_dict(self.state_dict)
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def testSetNumpy(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [1.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
new_lr = 1.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
for i in range(batch_num):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
if i == 0:
for param in ptb_model.parameters():
dy_param_init[param.name] = param.numpy()
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
if i == batch_num - 1:
for param in ptb_model.parameters():
dy_param_updated[param.name] = param.numpy()
# check optimizer
opti_dict = adam.state_dict()
np_opti_dict = {}
# set to zero
for k, v in opti_dict.items():
np_t = v.numpy()
np_opti_dict[v.name] = np_t
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
self.assertTrue(np.sum(np.abs(v.numpy())) == 0)
if isinstance(adam._learning_rate, LearningRateDecay):
adam._learning_rate.step_num = 0
adam.set_dict(np_opti_dict)
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name]))
# check parameter
state_dict = ptb_model.state_dict()
np_state_dict = {}
for k, v in state_dict.items():
np_t = v.numpy()
np_state_dict[k] = np_t
var = v.value().get_tensor()
var.set(np.zeros_like(np_t), place)
ptb_model.set_dict(np_state_dict)
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def testSetVariableBeforeTrain(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=0.0,
beta1=0.8,
beta2=0.6,
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
adam.set_dict(self.opti_dict)
ptb_model.set_dict(self.state_dict)
for i in range(1):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta1))
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta2))
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def testLoadAndSetVarBaseBeforeTrain(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [0.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
# set lr to zero not update parameter
new_lr = 0.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=0.0,
beta1=0.8,
beta2=0.6,
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
state_dict, opti_dict = fluid.load_dygraph("./test_dy")
adam.set_dict(opti_dict)
ptb_model.set_dict(state_dict)
for i in range(1):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta1))
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta2))
# check parameter
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def testSetNumpyBeforeTrain(self):
seed = 90
hidden_size = 10
vocab_size = 1000
num_layers = 1
num_steps = 3
init_scale = 0.1
batch_size = 4
batch_num = 200
with fluid.dygraph.guard():
fluid.default_startup_program().random_seed = seed
fluid.default_main_program().random_seed = seed
# TODO: marsyang1993 Change seed to
ptb_model = PtbModel(
hidden_size=hidden_size,
vocab_size=vocab_size,
num_layers=num_layers,
num_steps=num_steps,
init_scale=init_scale)
bd = []
lr_arr = [0.0]
# this a fake lr decay strategy
for i in range(1, 10):
bd.append(100 * i)
# set lr to 0.0, not update parameter
new_lr = 0.0
lr_arr.append(new_lr)
place = fluid.CPUPlace() if not core.is_compiled_with_cuda(
) else fluid.CUDAPlace(0)
adam = Adam(
learning_rate=fluid.layers.piecewise_decay(
boundaries=bd, values=lr_arr),
beta1=0.8,
beta2=0.6,
parameter_list=ptb_model.parameters())
dy_param_updated = dict()
dy_param_init = dict()
dy_loss = None
last_hidden = None
last_cell = None
np_opti_dict = {}
np_state_dict = {}
for k, v in self.opti_dict.items():
np_opti_dict[v.name] = v.numpy()
for k, v in self.state_dict.items():
np_state_dict[k] = v.numpy()
adam.set_dict(np_opti_dict)
ptb_model.set_dict(np_state_dict)
for i in range(1):
x_data = np.arange(12).reshape(4, 3).astype('int64')
y_data = np.arange(1, 13).reshape(4, 3).astype('int64')
y_data = y_data.reshape((-1, 1))
init_hidden_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
init_cell_data = np.zeros(
(num_layers, batch_size, hidden_size), dtype='float32')
x = to_variable(x_data)
y = to_variable(y_data)
init_hidden = to_variable(init_hidden_data)
init_cell = to_variable(init_cell_data)
dy_loss, last_hidden, last_cell = ptb_model(x, y, init_hidden,
init_cell)
dy_loss.backward()
adam.minimize(dy_loss)
ptb_model.clear_gradients()
opti_dict = adam.state_dict()
for k, v in opti_dict.items():
if k == "global_step":
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] + 1))
if k.find("beta1_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta1))
if k.find("beta2_pow_acc_0") > 0:
self.assertTrue(
np.array_equal(v.numpy(), self.base_opti[v.name] *
adam._beta2))
# check parameter
state_dict = ptb_model.state_dict()
for k, v in state_dict.items():
new_t = v.numpy()
base_t = self.model_base[k]
self.assertTrue(np.array_equal(new_t, base_t))
def testOnlyLoadParams(self):
with fluid.dygraph.guard():
emb = fluid.dygraph.Embedding([10, 10])
state_dict = emb.state_dict()
fluid.save_dygraph(state_dict, os.path.join('saved_dy', 'emb_dy'))
para_state_dict, opti_state_dict = fluid.load_dygraph(
os.path.join('saved_dy', 'emb_dy'))
self.assertTrue(opti_state_dict == None)
if __name__ == '__main__':
unittest.main()
| 38.425532
| 80
| 0.526374
| 4,056
| 34,314
| 4.170365
| 0.071992
| 0.032988
| 0.019864
| 0.009104
| 0.796098
| 0.764588
| 0.745551
| 0.726811
| 0.699911
| 0.686728
| 0
| 0.02318
| 0.380195
| 34,314
| 892
| 81
| 38.46861
| 0.772146
| 0.036516
| 0
| 0.766162
| 0
| 0
| 0.01369
| 0
| 0
| 0
| 0
| 0.001121
| 0.031637
| 1
| 0.016506
| false
| 0
| 0.017882
| 0
| 0.041265
| 0.001376
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
6a8b72c99e17c286dab5988f49aa3ee67b9bac0f
| 79
|
py
|
Python
|
application/exceptions.py
|
jnhoang/generic-flask
|
89b24f68f394eb49b21ed80b8be6edc9b4104e40
|
[
"MIT"
] | null | null | null |
application/exceptions.py
|
jnhoang/generic-flask
|
89b24f68f394eb49b21ed80b8be6edc9b4104e40
|
[
"MIT"
] | null | null | null |
application/exceptions.py
|
jnhoang/generic-flask
|
89b24f68f394eb49b21ed80b8be6edc9b4104e40
|
[
"MIT"
] | null | null | null |
class MissingKeyError(Exception):
pass
class NoneError(Exception):
pass
| 9.875
| 33
| 0.759494
| 8
| 79
| 7.5
| 0.625
| 0.433333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.164557
| 79
| 7
| 34
| 11.285714
| 0.909091
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
6adba30c182b5960f97d71bf8ea9d9f4fd1ea067
| 143
|
py
|
Python
|
funkcje/cw2.py
|
Wiktor-Wewe/zadaniaDoKolosaWewe
|
69519edae6b582bd81b5011871ce38f1e6a2447f
|
[
"MIT"
] | null | null | null |
funkcje/cw2.py
|
Wiktor-Wewe/zadaniaDoKolosaWewe
|
69519edae6b582bd81b5011871ce38f1e6a2447f
|
[
"MIT"
] | null | null | null |
funkcje/cw2.py
|
Wiktor-Wewe/zadaniaDoKolosaWewe
|
69519edae6b582bd81b5011871ce38f1e6a2447f
|
[
"MIT"
] | null | null | null |
"""
Zadeklaruj funkcje która jako argumenty przyjmuje dwie zmienne i zwraca ich sumę
"""
def f(a:int, b:int):
return a+b
print(f(12, 12))
| 17.875
| 80
| 0.692308
| 25
| 143
| 3.96
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034188
| 0.181818
| 143
| 7
| 81
| 20.428571
| 0.811966
| 0.559441
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
6ae61c59dc78d88567d355f56f8f160274fc89d9
| 348
|
py
|
Python
|
lessons/03/perm_missing_elem/test_challenge.py
|
jimlawton/codility
|
b286db80c7cfa6722b78c7eb8992e1a5934db8a0
|
[
"Apache-2.0"
] | null | null | null |
lessons/03/perm_missing_elem/test_challenge.py
|
jimlawton/codility
|
b286db80c7cfa6722b78c7eb8992e1a5934db8a0
|
[
"Apache-2.0"
] | 2
|
2021-03-25T21:32:16.000Z
|
2021-07-19T11:11:15.000Z
|
lessons/03/perm_missing_elem/test_challenge.py
|
jimlawton/codility
|
b286db80c7cfa6722b78c7eb8992e1a5934db8a0
|
[
"Apache-2.0"
] | null | null | null |
from challenge import solution
def test_challenge():
# Create empty array (dependent on test)
# Create single entry array
# Create an array with the answer at the start
# Create an array with the answer at the end
# Create an array with the answer in the middle
# Single entry slot
assert(solution([2, 3, 1, 5]) == 4)
| 29
| 51
| 0.683908
| 54
| 348
| 4.388889
| 0.537037
| 0.101266
| 0.164557
| 0.21519
| 0.371308
| 0.371308
| 0.261603
| 0.261603
| 0
| 0
| 0
| 0.019231
| 0.252874
| 348
| 11
| 52
| 31.636364
| 0.892308
| 0.62069
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.333333
| 0
| 0.666667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
0a7853edea271b757fc110a6d6e7dbdbbfcc27c5
| 17
|
py
|
Python
|
thinkutils_plus/__init__.py
|
ThinkmanWang/thinkutils_plus
|
65d56a1a0cfce22dff08a4f0baea6b4eb08a2e35
|
[
"MIT"
] | null | null | null |
thinkutils_plus/__init__.py
|
ThinkmanWang/thinkutils_plus
|
65d56a1a0cfce22dff08a4f0baea6b4eb08a2e35
|
[
"MIT"
] | null | null | null |
thinkutils_plus/__init__.py
|
ThinkmanWang/thinkutils_plus
|
65d56a1a0cfce22dff08a4f0baea6b4eb08a2e35
|
[
"MIT"
] | null | null | null |
from log import *
| 17
| 17
| 0.764706
| 3
| 17
| 4.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.176471
| 17
| 1
| 17
| 17
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0a813ff943afd19168e5e731db82f5d79729f7dd
| 24
|
py
|
Python
|
Tables-Python/print_table/__init__.py
|
abelandcain/simple-python-packages
|
9da63066c73065186540ead933a5c8c45eb3bc47
|
[
"MIT"
] | 2
|
2019-08-25T11:43:12.000Z
|
2020-03-27T00:34:18.000Z
|
tables/__init__.py
|
salt-die/tables
|
85c6b08e2dd7ca83852b044e97ac1cd51cd8f056
|
[
"Unlicense"
] | null | null | null |
tables/__init__.py
|
salt-die/tables
|
85c6b08e2dd7ca83852b044e97ac1cd51cd8f056
|
[
"Unlicense"
] | null | null | null |
from .table import Table
| 24
| 24
| 0.833333
| 4
| 24
| 5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 24
| 1
| 24
| 24
| 0.952381
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
0aa120b0a173258070c8b845051293bedd152cd1
| 32
|
py
|
Python
|
helper.py
|
himicakumar/cs3240-labdemo
|
3707ee6affed33bd450175f7995299956d218f50
|
[
"MIT"
] | null | null | null |
helper.py
|
himicakumar/cs3240-labdemo
|
3707ee6affed33bd450175f7995299956d218f50
|
[
"MIT"
] | null | null | null |
helper.py
|
himicakumar/cs3240-labdemo
|
3707ee6affed33bd450175f7995299956d218f50
|
[
"MIT"
] | null | null | null |
def greeting(msg):
print(msg)
| 10.666667
| 19
| 0.6875
| 5
| 32
| 4.4
| 0.8
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.15625
| 32
| 2
| 20
| 16
| 0.814815
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0
| 0.5
| 0.5
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
7c3652e514497bb8fef63f1e6eb079fc68eddce8
| 186
|
py
|
Python
|
corehq/apps/hqcase/models.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
corehq/apps/hqcase/models.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | 1
|
2022-03-12T01:03:25.000Z
|
2022-03-12T01:03:25.000Z
|
corehq/apps/hqcase/models.py
|
johan--/commcare-hq
|
86ee99c54f55ee94e4c8f2f6f30fc44e10e69ebd
|
[
"BSD-3-Clause"
] | null | null | null |
# This file is only here so that django will recognize that
# this is a valid app and run the associated unit tests.
from dimagi.ext.couchdbkit import Document
class _(Document): pass
| 31
| 60
| 0.77957
| 31
| 186
| 4.645161
| 0.870968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.177419
| 186
| 5
| 61
| 37.2
| 0.941176
| 0.607527
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0.5
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
7c5cf80965436c7d3784b305e291072e297d9ead
| 118,878
|
py
|
Python
|
core/domain/user_jobs_one_off_test.py
|
MohdImran001/oppia
|
ff7421ee424955fc86b1a96012965165cd41be12
|
[
"Apache-2.0"
] | 1
|
2021-04-08T03:04:21.000Z
|
2021-04-08T03:04:21.000Z
|
core/domain/user_jobs_one_off_test.py
|
MohdImran001/oppia
|
ff7421ee424955fc86b1a96012965165cd41be12
|
[
"Apache-2.0"
] | null | null | null |
core/domain/user_jobs_one_off_test.py
|
MohdImran001/oppia
|
ff7421ee424955fc86b1a96012965165cd41be12
|
[
"Apache-2.0"
] | null | null | null |
# coding: utf-8
#
# Copyright 2014 The Oppia Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for user-related one-off computations."""
from __future__ import absolute_import # pylint: disable=import-only-modules
from __future__ import unicode_literals # pylint: disable=import-only-modules
import ast
import datetime
import re
from core.domain import collection_domain
from core.domain import collection_services
from core.domain import event_services
from core.domain import exp_domain
from core.domain import exp_services
from core.domain import feedback_services
from core.domain import learner_progress_services
from core.domain import rating_services
from core.domain import rights_domain
from core.domain import rights_manager
from core.domain import subscription_services
from core.domain import taskqueue_services
from core.domain import user_jobs_continuous
from core.domain import user_jobs_one_off
from core.domain import user_services
from core.platform import models
from core.tests import test_utils
from core.tests.data import image_constants
import feconf
import python_utils
import utils
auth_models, user_models, feedback_models, exp_models = (
models.Registry.import_models(
[models.NAMES.auth, models.NAMES.user, models.NAMES.feedback,
models.NAMES.exploration]))
datastore_services = models.Registry.import_datastore_services()
search_services = models.Registry.import_search_services()
class UserContributionsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard subscriptions job."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
USER_D_EMAIL = 'd@example.com'
USER_D_USERNAME = 'd'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.UserContributionsOneOffJob.create_new()
user_jobs_one_off.UserContributionsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
def setUp(self):
super(UserContributionsOneOffJobTests, self).setUp()
# User A has no created or edited explorations.
# User B has one created exploration.
# User C has one edited exploration.
# User D has created an exploration and then edited it.
# (This is used to check that there are no duplicate
# entries in the contribution lists).
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
self.user_d_id = self.get_user_id_from_email(self.USER_D_EMAIL)
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_b_id, end_state_name='End')
exp_services.update_exploration(
self.user_c_id, self.EXP_ID_1, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.save_new_valid_exploration(
self.EXP_ID_2, self.user_d_id, end_state_name='End')
exp_services.update_exploration(
self.user_d_id, self.EXP_ID_2, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
def test_null_case(self):
"""Tests the case where user has no created or edited explorations."""
self._run_one_off_job()
user_a_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_contributions_model.created_exploration_ids, [])
self.assertEqual(user_a_contributions_model.edited_exploration_ids, [])
def test_created_exp(self):
"""Tests the case where user has created (and therefore edited)
an exploration.
"""
self._run_one_off_job()
user_b_contributions_model = user_models.UserContributionsModel.get(
self.user_b_id)
self.assertEqual(
user_b_contributions_model.created_exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_contributions_model.edited_exploration_ids, [self.EXP_ID_1])
def test_edited_exp(self):
"""Tests the case where user has an edited exploration."""
self._run_one_off_job()
user_c_contributions_model = user_models.UserContributionsModel.get(
self.user_c_id)
self.assertEqual(
user_c_contributions_model.created_exploration_ids, [])
self.assertEqual(
user_c_contributions_model.edited_exploration_ids, [self.EXP_ID_1])
def test_for_duplicates(self):
"""Tests the case where user has an edited exploration, and edits
it again making sure it is not duplicated.
"""
self._run_one_off_job()
user_d_contributions_model = user_models.UserContributionsModel.get(
self.user_d_id)
self.assertEqual(
user_d_contributions_model.edited_exploration_ids,
[self.EXP_ID_2])
self.assertEqual(
user_d_contributions_model.created_exploration_ids,
[self.EXP_ID_2])
def test_no_new_user_contributions_model_get_created_with_existing_model(
self):
model1 = exp_models.ExplorationSnapshotMetadataModel(
id='exp_id-1', committer_id=self.user_a_id, commit_type='create')
model1.update_timestamps()
model1.put()
user_models.UserContributionsModel(
id=self.user_a_id,
created_exploration_ids=['exp_id']
).put()
user_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id)
self.assertEqual(
user_contributions_model.created_exploration_ids,
['exp_id'])
self._run_one_off_job()
user_contributions_model = user_models.UserContributionsModel.get(
self.user_a_id)
self.assertEqual(
user_contributions_model.created_exploration_ids,
['exp_id'])
def test_user_contributions_get_created_after_running_the_job(self):
model1 = exp_models.ExplorationSnapshotMetadataModel(
id='exp_id-1', committer_id='new_user', commit_type='create')
model1.update_timestamps()
model1.put()
user_contributions_model = user_models.UserContributionsModel.get(
'new_user', strict=False)
self.assertIsNone(user_contributions_model)
self._run_one_off_job()
user_contributions_model = user_models.UserContributionsModel.get(
'new_user', strict=False)
self.assertEqual(
user_contributions_model.created_exploration_ids,
['exp_id'])
class UsernameLengthDistributionOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off username length distribution job."""
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'ab@example.com'
USER_B_USERNAME = 'ab'
USER_C_EMAIL = 'bc@example.com'
USER_C_USERNAME = 'bc'
USER_D_EMAIL = 'bcd@example.com'
USER_D_USERNAME = 'bcd'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UsernameLengthDistributionOneOffJob.create_new())
user_jobs_one_off.UsernameLengthDistributionOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.UsernameLengthDistributionOneOffJob.get_output(
job_id))
output = {}
for stringified_distribution in stringified_output:
value = re.findall(r'\d+', stringified_distribution)
# The following is output['username length'] = number of users.
output[value[0]] = int(value[1])
return output
def test_null_case(self):
"""Tests the case when there are no signed up users but there is one
default user having the username - 'tmpsuperadm1n'.
"""
output = self._run_one_off_job()
# Number of users = 1.
# length of usernames = 13 (tmpsuperadm1n).
self.assertEqual(output['13'], 1)
def test_single_user_case(self):
"""Tests the case when there is only one signed up user and a default
user - 'tmpsuperadm1n'.
"""
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
output = self._run_one_off_job()
# Number of users = 2.
# length of usernames = 13 (tmpsuperadm1n), 1 (a).
self.assertEqual(output['13'], 1)
self.assertEqual(output['1'], 1)
def test_multiple_users_case(self):
"""Tests the case when there are multiple signed up users and a
default user - 'tmpsuperadm1n'.
"""
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
output = self._run_one_off_job()
# Number of users = 3
# length of usernames = 13 (tmpsuperadm1n), 2 (ab), 1 (a).
self.assertEqual(output['13'], 1)
self.assertEqual(output['2'], 1)
self.assertEqual(output['1'], 1)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
output = self._run_one_off_job()
# Number of users = 5
# length of usernames = 13 (tmpsuperadm1n), 3 (bcd), 2 (ab, bc), 1 (a).
self.assertEqual(output['13'], 1)
self.assertEqual(output['3'], 1)
self.assertEqual(output['2'], 2)
self.assertEqual(output['1'], 1)
class UsernameLengthAuditOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off username length limit job."""
USER_1_EMAIL = '1@example.com'
USER_1_USERNAME = '123456789123456789123'
USER_2_EMAIL = '2@example.com'
USER_2_USERNAME = '123456789123456789124'
USER_3_EMAIL = '3@example.com'
USER_3_USERNAME = 'a' * 30
USER_4_EMAIL = '4@example.com'
# Username 4 length is 20, so it shouldn't be in the output.
USER_4_USERNAME = '12345678912345678912'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UsernameLengthAuditOneOffJob.create_new())
user_jobs_one_off.UsernameLengthAuditOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
return user_jobs_one_off.UsernameLengthAuditOneOffJob.get_output(job_id)
def test_username_length_limit(self):
self.signup(self.USER_1_EMAIL, self.USER_1_USERNAME)
self.signup(self.USER_2_EMAIL, self.USER_2_USERNAME)
self.signup(self.USER_3_EMAIL, self.USER_3_USERNAME)
expected_output = [u'[u\'Length: 21\', u"Usernames: [\'%s\', \'%s\']"]'
% (self.USER_1_USERNAME, self.USER_2_USERNAME),
u'[u\'Length: 30\', u"Usernames: [\'%s\']"]'
% self.USER_3_USERNAME]
actual_output = self._run_one_off_job()
self.assertEqual(actual_output, expected_output)
class LongUserBiosOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off long userbio length job."""
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_A_BIO = 'I am less than 500'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_B_BIO = 'Long Bio' * 100
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
USER_C_BIO = 'Same Bio' * 100
USER_D_EMAIL = 'd@example.com'
USER_D_USERNAME = 'd'
USER_D_BIO = 'Diff Bio' * 300
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.LongUserBiosOneOffJob.create_new())
user_jobs_one_off.LongUserBiosOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.LongUserBiosOneOffJob.get_output(
job_id))
eval_output = [ast.literal_eval(stringified_item)
for stringified_item in stringified_output]
output = [[int(eval_item[0]), eval_item[1]]
for eval_item in eval_output]
return output
def test_no_userbio_returns_empty_list(self):
"""Tests the case when userbio is None."""
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
result = self._run_one_off_job()
self.assertEqual(result, [])
def test_short_userbio_returns_empty_list(self):
"""Tests the case where the userbio is less than 500 characters."""
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
user_id_a = self.get_user_id_from_email(self.USER_A_EMAIL)
user_services.update_user_bio(user_id_a, self.USER_A_BIO)
result = self._run_one_off_job()
self.assertEqual(result, [])
def test_long_userbio_length(self):
"""Tests the case where the userbio is more than 500 characters."""
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_id_b = self.get_user_id_from_email(self.USER_B_EMAIL)
user_services.update_user_bio(user_id_b, self.USER_B_BIO)
result = self._run_one_off_job()
expected_result = [[800, ['b']]]
self.assertEqual(result, expected_result)
def test_same_userbio_length(self):
"""Tests the case where two users have same userbio length."""
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
user_id_b = self.get_user_id_from_email(self.USER_B_EMAIL)
user_services.update_user_bio(user_id_b, self.USER_B_BIO)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_id_c = self.get_user_id_from_email(self.USER_C_EMAIL)
user_services.update_user_bio(user_id_c, self.USER_C_BIO)
result = self._run_one_off_job()
result[0][1].sort()
expected_result = [[800, ['b', 'c']]]
self.assertEqual(result, expected_result)
def test_diff_userbio_length(self):
"""Tests the case where two users have different userbio lengths."""
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
user_id_c = self.get_user_id_from_email(self.USER_C_EMAIL)
user_services.update_user_bio(user_id_c, self.USER_C_BIO)
self.signup(self.USER_D_EMAIL, self.USER_D_USERNAME)
user_id_d = self.get_user_id_from_email(self.USER_D_EMAIL)
user_services.update_user_bio(user_id_d, self.USER_D_BIO)
result = sorted(self._run_one_off_job(), key=lambda x: x[0])
expected_result = [[800, ['c']], [2400, ['d']]]
self.assertEqual(result, expected_result)
def test_bio_length_for_users_with_no_bio(self):
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
user_id_a = self.get_user_id_from_email(self.USER_A_EMAIL)
model1 = user_models.UserSettingsModel(
id=user_id_a,
email=self.USER_A_EMAIL)
model1.update_timestamps()
model1.put()
result = self._run_one_off_job()
self.assertEqual(result, [])
class DashboardSubscriptionsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard subscriptions job."""
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
COLLECTION_ID_1 = 'col_id_1'
COLLECTION_ID_2 = 'col_id_2'
EXP_ID_FOR_COLLECTION_1 = 'id_of_exp_in_collection_1'
USER_A_EMAIL = 'a@example.com'
USER_A_USERNAME = 'a'
USER_B_EMAIL = 'b@example.com'
USER_B_USERNAME = 'b'
USER_C_EMAIL = 'c@example.com'
USER_C_USERNAME = 'c'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.DashboardSubscriptionsOneOffJob.create_new()
user_jobs_one_off.DashboardSubscriptionsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
def _null_fn(self, *args, **kwargs):
"""A mock for functions of the form subscribe_to_*() to represent
behavior prior to the implementation of subscriptions.
"""
pass
def setUp(self):
super(DashboardSubscriptionsOneOffJobTests, self).setUp()
self.signup(self.USER_A_EMAIL, self.USER_A_USERNAME)
self.user_a_id = self.get_user_id_from_email(self.USER_A_EMAIL)
self.signup(self.USER_B_EMAIL, self.USER_B_USERNAME)
self.user_b_id = self.get_user_id_from_email(self.USER_B_EMAIL)
self.signup(self.USER_C_EMAIL, self.USER_C_USERNAME)
self.user_c_id = self.get_user_id_from_email(self.USER_C_EMAIL)
self.user_a = user_services.get_user_actions_info(self.user_a_id)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A creates and saves a new valid exploration.
self.save_new_valid_exploration(
self.EXP_ID_1, self.user_a_id, end_state_name='End')
def test_null_case(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
def test_feedback_thread_subscription(self):
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(user_b_subscriptions_model, None)
self.assertEqual(user_c_subscriptions_model, None)
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User B starts a feedback thread.
feedback_services.create_thread(
'exploration', self.EXP_ID_1, self.user_b_id, 'subject', 'text')
# User C adds to that thread.
thread_id = feedback_services.get_all_threads(
'exploration', self.EXP_ID_1, False)[0].id
feedback_services.create_message(
thread_id, self.user_c_id, None, None, 'more text')
self._run_one_off_job()
# Both users are subscribed to the feedback thread.
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id)
self.assertEqual(user_b_subscriptions_model.exploration_ids, [])
self.assertEqual(user_c_subscriptions_model.exploration_ids, [])
self.assertEqual(
user_b_subscriptions_model.general_feedback_thread_ids, [thread_id])
self.assertEqual(
user_c_subscriptions_model.general_feedback_thread_ids, [thread_id])
def test_exploration_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# User A adds user C as a viewer of the exploration.
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_c_id,
rights_domain.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the exploration. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_explorations(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A creates and saves another valid exploration.
self.save_new_valid_exploration(self.EXP_ID_2, self.user_a_id)
self._run_one_off_job()
# User A is subscribed to two explorations.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.exploration_ids),
sorted([self.EXP_ID_1, self.EXP_ID_2]))
def test_community_owned_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A adds user B as an editor to the exploration.
rights_manager.assign_role_for_exploration(
self.user_a, self.EXP_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# The exploration becomes community-owned.
rights_manager.publish_exploration(self.user_a, self.EXP_ID_1)
rights_manager.release_ownership_of_exploration(
self.user_a, self.EXP_ID_1)
# User C edits the exploration.
exp_services.update_exploration(
self.user_c_id, self.EXP_ID_1, [], 'Update exploration')
self._run_one_off_job()
# User A and user B are subscribed to the exploration; user C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_b_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
def test_deleted_exploration(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
):
# User A deletes the exploration.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self.process_and_flush_pending_mapreduce_tasks()
self._run_one_off_job()
# User A is not subscribed to the exploration.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_collection_subscription(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A adds user B as an editor to the collection.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# User A adds user C as a viewer of the collection.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID_1, self.user_c_id,
rights_domain.ROLE_VIEWER)
self._run_one_off_job()
# Users A and B are subscribed to the collection. User C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
# User A is also subscribed to the exploration within the collection
# because they created both.
self.assertEqual(
sorted(user_a_subscriptions_model.exploration_ids), [
self.EXP_ID_1, self.EXP_ID_FOR_COLLECTION_1])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
def test_two_collections(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
# User A creates and saves another valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_2, self.user_a_id,
exploration_id=self.EXP_ID_FOR_COLLECTION_1)
self._run_one_off_job()
# User A is subscribed to two collections.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
self.assertEqual(
sorted(user_a_subscriptions_model.collection_ids),
sorted([self.COLLECTION_ID_1, self.COLLECTION_ID_2]))
def test_deleted_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_exploration', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User A creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_a_id)
# User A deletes the collection.
collection_services.delete_collection(
self.user_a_id, self.COLLECTION_ID_1)
# User A deletes the exploration from earlier.
exp_services.delete_exploration(self.user_a_id, self.EXP_ID_1)
self.process_and_flush_pending_mapreduce_tasks()
self._run_one_off_job()
# User A is not subscribed to the collection.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
def test_adding_exploration_to_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
# User B creates and saves a new collection.
self.save_new_default_collection(
self.COLLECTION_ID_1, self.user_b_id)
# User B adds the exploration created by user A to the collection.
collection_services.update_collection(
self.user_b_id, self.COLLECTION_ID_1, [{
'cmd': collection_domain.CMD_ADD_COLLECTION_NODE,
'exploration_id': self.EXP_ID_1
}], 'Add new exploration to collection.')
# Users A and B have no subscriptions (to either explorations or
# collections).
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id, strict=False)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id, strict=False)
self.assertEqual(user_a_subscriptions_model, None)
self.assertEqual(user_b_subscriptions_model, None)
self._run_one_off_job()
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
# User B should be subscribed to the collection and user A to the
# exploration.
self.assertEqual(
user_a_subscriptions_model.exploration_ids, [self.EXP_ID_1])
self.assertEqual(
user_a_subscriptions_model.collection_ids, [])
self.assertEqual(
user_b_subscriptions_model.exploration_ids, [])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
def test_community_owned_collection(self):
with self.swap(
subscription_services, 'subscribe_to_thread', self._null_fn
), self.swap(
subscription_services, 'subscribe_to_collection', self._null_fn
):
rights_manager.publish_exploration(self.user_a, self.EXP_ID_1)
# User A creates and saves a new valid collection.
self.save_new_valid_collection(
self.COLLECTION_ID_1, self.user_a_id,
exploration_id=self.EXP_ID_1)
# User A adds user B as an editor to the collection.
rights_manager.assign_role_for_collection(
self.user_a, self.COLLECTION_ID_1, self.user_b_id,
rights_domain.ROLE_EDITOR)
# The collection becomes community-owned.
rights_manager.publish_collection(self.user_a, self.COLLECTION_ID_1)
rights_manager.release_ownership_of_collection(
self.user_a, self.COLLECTION_ID_1)
# User C edits the collection.
collection_services.update_collection(
self.user_c_id, self.COLLECTION_ID_1, [{
'cmd': collection_domain.CMD_EDIT_COLLECTION_PROPERTY,
'property_name': (
collection_domain.COLLECTION_PROPERTY_TITLE),
'new_value': 'New title'
}], 'Changed title.')
self._run_one_off_job()
# User A and user B are subscribed to the collection; user C is not.
user_a_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_a_id)
user_b_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_b_id)
user_c_subscriptions_model = user_models.UserSubscriptionsModel.get(
self.user_c_id, strict=False)
self.assertEqual(
user_a_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(
user_b_subscriptions_model.collection_ids, [self.COLLECTION_ID_1])
self.assertEqual(user_c_subscriptions_model, None)
class MockUserStatsAggregator(
user_jobs_continuous.UserStatsAggregator):
"""A modified UserStatsAggregator that does not start a new
batch job when the previous one has finished.
"""
@classmethod
def _get_batch_job_manager_class(cls):
return MockUserStatsMRJobManager
@classmethod
def _kickoff_batch_job_after_previous_one_ends(cls):
pass
class MockUserStatsMRJobManager(
user_jobs_continuous.UserStatsMRJobManager):
@classmethod
def _get_continuous_computation_class(cls):
return MockUserStatsAggregator
class DashboardStatsOneOffJobTests(test_utils.GenericTestBase):
"""Tests for the one-off dashboard stats job."""
CURRENT_DATE_AS_STRING = user_services.get_current_date_as_string()
DATE_AFTER_ONE_WEEK = (
(datetime.datetime.utcnow() + datetime.timedelta(7)).strftime(
feconf.DASHBOARD_STATS_DATETIME_STRING_FORMAT))
USER_SESSION_ID = 'session1'
EXP_ID_1 = 'exp_id_1'
EXP_ID_2 = 'exp_id_2'
EXP_VERSION = 1
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.DashboardStatsOneOffJob.create_new()
user_jobs_one_off.DashboardStatsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
def setUp(self):
super(DashboardStatsOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
def mock_get_current_date_as_string(self):
return self.CURRENT_DATE_AS_STRING
def _rate_exploration(self, user_id, exp_id, rating):
"""Assigns rating to the exploration corresponding to the given
exploration id.
Args:
user_id: str. The user id.
exp_id: str. The exploration id.
rating: int. The rating to be assigned to the given exploration.
"""
rating_services.assign_rating_to_exploration(user_id, exp_id, rating)
def _record_play(self, exp_id, state):
"""Calls StartExplorationEventHandler and records the 'play' event
corresponding to the given exploration id.
Args:
exp_id: str. The exploration id.
state: dict(str, *). The state of the exploration corresponding to
the given id.
"""
event_services.StartExplorationEventHandler.record(
exp_id, self.EXP_VERSION, state, self.USER_SESSION_ID, {},
feconf.PLAY_TYPE_NORMAL)
def test_weekly_stats_if_continuous_stats_job_has_not_been_run(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._record_play(exp_id, init_state_name)
self._rate_exploration('user1', exp_id, 5)
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(weekly_stats, None)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id), None)
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
expected_results_list = [{
self.mock_get_current_date_as_string(): {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}]
self.assertEqual(weekly_stats, expected_results_list)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id),
expected_results_list[0])
def test_weekly_stats_if_no_explorations(self):
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 0,
'average_ratings': None,
'total_plays': 0
}
}])
def test_weekly_stats_for_single_exploration(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._record_play(exp_id, init_state_name)
self._rate_exploration('user1', exp_id, 5)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.process_and_flush_pending_tasks()
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 5.0,
'total_plays': 1
}
}])
def test_weekly_stats_for_multiple_explorations(self):
exploration_1 = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id_1 = exploration_1.id
exploration_2 = self.save_new_valid_exploration(
self.EXP_ID_2, self.owner_id)
exp_id_2 = exploration_2.id
init_state_name_1 = exploration_1.init_state_name
self._record_play(exp_id_1, init_state_name_1)
self._rate_exploration('user1', exp_id_1, 5)
self._rate_exploration('user2', exp_id_2, 4)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 1,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.process_and_flush_pending_tasks()
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 2,
'average_ratings': 4.5,
'total_plays': 1
}
}])
def test_stats_for_multiple_weeks(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID_1, self.owner_id)
exp_id = exploration.id
init_state_name = exploration.init_state_name
self._rate_exploration('user1', exp_id, 4)
self._record_play(exp_id, init_state_name)
self._record_play(exp_id, init_state_name)
event_services.StatsEventsHandler.record(
self.EXP_ID_1, 1, {
'num_starts': 2,
'num_actual_starts': 0,
'num_completions': 0,
'state_stats_mapping': {}
})
self.process_and_flush_pending_tasks()
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
with self.swap(
user_services,
'get_current_date_as_string',
self.mock_get_current_date_as_string):
self._run_one_off_job()
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(
weekly_stats, [{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 4.0,
'total_plays': 2
}
}])
MockUserStatsAggregator.stop_computation(self.owner_id)
self.process_and_flush_pending_mapreduce_tasks()
self._rate_exploration('user2', exp_id, 2)
MockUserStatsAggregator.start_computation()
self.process_and_flush_pending_mapreduce_tasks()
def _mock_get_date_after_one_week():
"""Returns the date of the next week."""
return self.DATE_AFTER_ONE_WEEK
with self.swap(
user_services,
'get_current_date_as_string',
_mock_get_date_after_one_week):
self._run_one_off_job()
expected_results_list = [
{
self.mock_get_current_date_as_string(): {
'num_ratings': 1,
'average_ratings': 4.0,
'total_plays': 2
}
},
{
_mock_get_date_after_one_week(): {
'num_ratings': 2,
'average_ratings': 3.0,
'total_plays': 2
}
}
]
weekly_stats = user_services.get_weekly_dashboard_stats(self.owner_id)
self.assertEqual(weekly_stats, expected_results_list)
self.assertEqual(
user_services.get_last_week_dashboard_stats(self.owner_id),
expected_results_list[1])
class UserFirstContributionMsecOneOffJobTests(test_utils.GenericTestBase):
EXP_ID = 'test_exp'
def setUp(self):
super(UserFirstContributionMsecOneOffJobTests, self).setUp()
self.signup(self.ADMIN_EMAIL, self.ADMIN_USERNAME)
self.admin_id = self.get_user_id_from_email(self.ADMIN_EMAIL)
self.set_admins([self.ADMIN_USERNAME])
self.admin = user_services.get_user_actions_info(self.admin_id)
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.owner = user_services.get_user_actions_info(self.owner_id)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
def test_contribution_msec_updates_on_published_explorations(self):
exploration = self.save_new_valid_exploration(
self.EXP_ID, self.admin_id, end_state_name='End')
init_state_name = exploration.init_state_name
# Test that no contribution time is set.
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
self.assertIsNone(
user_services.get_user_settings(
self.admin_id).first_contribution_msec)
# Test all owners and editors of exploration after publication have
# updated times.
exp_services.publish_exploration_and_update_user_profiles(
self.admin, self.EXP_ID)
rights_manager.release_ownership_of_exploration(
self.admin, self.EXP_ID)
exp_services.update_exploration(
self.editor_id, self.EXP_ID, [exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_id',
'new_value': 'MultipleChoiceInput'
}), exp_domain.ExplorationChange({
'cmd': 'edit_state_property',
'state_name': init_state_name,
'property_name': 'widget_customization_args',
'new_value': {
'choices': {
'value': [{
'content_id': 'ca_choices_0',
'html': '<p>Choice 1</p>'
}]
},
'showChoicesInShuffledOrder': {'value': True}
}
})], 'commit')
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
self.assertIsNotNone(user_services.get_user_settings(
self.admin_id).first_contribution_msec)
self.assertIsNotNone(user_services.get_user_settings(
self.editor_id).first_contribution_msec)
def test_contribution_msec_does_not_update_on_unpublished_explorations(
self):
self.save_new_valid_exploration(
self.EXP_ID, self.owner_id, end_state_name='End')
exp_services.publish_exploration_and_update_user_profiles(
self.owner, self.EXP_ID)
# We now manually reset the user's first_contribution_msec to None.
# This is to test that the one off job skips over the unpublished
# exploration and does not reset the user's first_contribution_msec.
user_models.UserSettingsModel(
id=self.owner_id,
email='email@email.com',
username='username',
first_contribution_msec=None
).put()
rights_manager.unpublish_exploration(self.admin, self.EXP_ID)
# Test that first contribution time is not set for unpublished
# explorations.
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
def test_contribution_msec_is_not_generated_if_exploration_not_created(
self):
model1 = exp_models.ExplorationRightsSnapshotMetadataModel(
id='exp_id-1', committer_id=self.owner_id, commit_type='create')
model1.update_timestamps()
model1.put()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
job_id = (
user_jobs_one_off.UserFirstContributionMsecOneOffJob.create_new())
user_jobs_one_off.UserFirstContributionMsecOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
self.assertIsNone(user_services.get_user_settings(
self.owner_id).first_contribution_msec)
class UserLastExplorationActivityOneOffJobTests(test_utils.GenericTestBase):
def setUp(self):
super(UserLastExplorationActivityOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
self.editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.exp_id = 'exp'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UserLastExplorationActivityOneOffJob.create_new())
user_jobs_one_off.UserLastExplorationActivityOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
def test_that_last_created_time_is_updated(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.exp_id, self.owner_id, end_state_name='End')
self.logout()
user_models.UserSettingsModel(
id=self.owner_id,
email=self.OWNER_EMAIL,
last_created_an_exploration=None
).put()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
self._run_one_off_job()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNotNone(owner_settings.last_created_an_exploration)
self.assertIsNotNone(owner_settings.last_edited_an_exploration)
def test_that_last_edited_time_is_updated(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.exp_id, self.owner_id, end_state_name='End')
self.logout()
self.login(self.EDITOR_EMAIL)
exp_services.update_exploration(
self.editor_id, self.exp_id, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.logout()
user_models.UserSettingsModel(
id=self.editor_id,
email=self.EDITOR_EMAIL,
last_edited_an_exploration=None
).put()
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNone(editor_settings.last_created_an_exploration)
self.assertIsNone(editor_settings.last_edited_an_exploration)
self._run_one_off_job()
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNotNone(editor_settings.last_edited_an_exploration)
self.assertIsNone(editor_settings.last_created_an_exploration)
def test_that_last_edited_and_created_time_both_updated(self):
self.login(self.OWNER_EMAIL)
self.save_new_valid_exploration(
self.exp_id, self.owner_id, end_state_name='End')
exp_services.update_exploration(
self.owner_id, self.exp_id, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
self.logout()
self.login(self.EDITOR_EMAIL)
exp_services.update_exploration(
self.editor_id, self.exp_id, [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'new objective'
})], 'Test edit new')
self.logout()
user_models.UserSettingsModel(
id=self.owner_id,
email=self.OWNER_EMAIL,
last_created_an_exploration=None,
last_edited_an_exploration=None
).put()
user_models.UserSettingsModel(
id=self.editor_id,
email=self.EDITOR_EMAIL,
last_edited_an_exploration=None
).put()
owner_settings = user_services.get_user_settings(self.owner_id)
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
self.assertIsNone(editor_settings.last_created_an_exploration)
self.assertIsNone(editor_settings.last_edited_an_exploration)
self._run_one_off_job()
owner_settings = user_services.get_user_settings(self.owner_id)
editor_settings = user_services.get_user_settings(self.editor_id)
self.assertIsNotNone(owner_settings.last_edited_an_exploration)
self.assertIsNotNone(owner_settings.last_created_an_exploration)
self.assertIsNotNone(editor_settings.last_edited_an_exploration)
self.assertIsNone(editor_settings.last_created_an_exploration)
def test_that_last_edited_and_created_time_are_not_updated(self):
user_models.UserSettingsModel(
id=self.owner_id,
email=self.OWNER_EMAIL,
last_created_an_exploration=None,
last_edited_an_exploration=None
).put()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
self._run_one_off_job()
owner_settings = user_services.get_user_settings(self.owner_id)
self.assertIsNone(owner_settings.last_created_an_exploration)
self.assertIsNone(owner_settings.last_edited_an_exploration)
class CleanupUserSubscriptionsModelUnitTests(test_utils.GenericTestBase):
def setUp(self):
super(CleanupUserSubscriptionsModelUnitTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup('user@email', 'user')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email('user@email')
self.owner = user_services.get_user_actions_info(self.owner_id)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in python_utils.RANGE(3)]
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner, exp.id)
for exp in explorations:
subscription_services.subscribe_to_exploration(
self.user_id, exp.id)
self.process_and_flush_pending_mapreduce_tasks()
def test_standard_operation(self):
for exp_id in python_utils.RANGE(3):
exp_models.ExplorationModel.get('%s' % exp_id).delete(
self.owner_id, 'deleted exploration')
owner_subscription_model = user_models.UserSubscriptionsModel.get(
self.owner_id)
self.assertEqual(len(owner_subscription_model.exploration_ids), 3)
user_subscription_model = user_models.UserSubscriptionsModel.get(
self.user_id)
self.assertEqual(len(user_subscription_model.exploration_ids), 3)
job = (
user_jobs_one_off
.CleanupExplorationIdsFromUserSubscriptionsModelOneOffJob
)
job_id = job.create_new()
job.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
owner_subscription_model = user_models.UserSubscriptionsModel.get(
self.owner_id)
self.assertEqual(len(owner_subscription_model.exploration_ids), 0)
user_subscription_model = user_models.UserSubscriptionsModel.get(
self.user_id)
self.assertEqual(len(user_subscription_model.exploration_ids), 0)
actual_output = job.get_output(job_id)
expected_output = [
u'[u\'Successfully cleaned up UserSubscriptionsModel %s and '
'removed explorations 0, 1, 2\', 1]' %
self.owner_id,
u'[u\'Successfully cleaned up UserSubscriptionsModel %s and '
'removed explorations 0, 1, 2\', 1]' %
self.user_id]
self.assertEqual(sorted(actual_output), sorted(expected_output))
class MockUserSettingsModelWithGaeUserId(user_models.UserSettingsModel):
"""Mock UserSettingsModel so that it allows to set `gae_user_id`."""
gae_user_id = (
datastore_services.StringProperty(indexed=True, required=False))
class MockUserSettingsModelWithGaeId(user_models.UserSettingsModel):
"""Mock UserSettingsModel so that it allows to set `gae_id`."""
gae_id = (
datastore_services.StringProperty(indexed=True, required=True))
class MockUserSubscriptionsModelWithActivityIDs(
user_models.UserSubscriptionsModel):
"""Mock UserSubscriptionsModel so that it allows to set 'activity_ids'. """
activity_ids = (
datastore_services.StringProperty(indexed=True, repeated=True))
class RemoveActivityIDsOneOffJobTests(test_utils.GenericTestBase):
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.RemoveActivityIDsOneOffJob.create_new())
user_jobs_one_off.RemoveActivityIDsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.RemoveActivityIDsOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def test_one_subscription_model_with_activity_ids(self):
with self.swap(
user_models, 'UserSubscriptionsModel',
MockUserSubscriptionsModelWithActivityIDs):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id',
activity_ids=['exp_1', 'exp_2', 'exp_3']
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertIsNotNone(
original_subscription_model.activity_ids)
self.assertIn(
'activity_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertIn(
'activity_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'activity_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
def test_one_subscription_model_without_activity_ids(self):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id'
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertNotIn(
'activity_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'activity_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
def test_rerun(self):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id'
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertNotIn(
'activity_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'activity_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'activity_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'activity_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
class MockUserSubscriptionsModelWithFeedbackThreadIDs(
user_models.UserSubscriptionsModel):
"""Mock UserSubscriptionsModel so that it allows to set
`feedback_thread_ids`.
"""
feedback_thread_ids = (
datastore_services.StringProperty(indexed=True, repeated=True))
class RemoveFeedbackThreadIDsOneOffJobTests(test_utils.GenericTestBase):
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.create_new())
user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.RemoveFeedbackThreadIDsOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def test_one_subscription_model_with_feedback_thread_ids(self):
with self.swap(
user_models, 'UserSubscriptionsModel',
MockUserSubscriptionsModelWithFeedbackThreadIDs):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id',
feedback_thread_ids=['some_id']
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertIsNotNone(
original_subscription_model.feedback_thread_ids)
self.assertIn(
'feedback_thread_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertIn(
'feedback_thread_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'feedback_thread_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'feedback_thread_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
def test_one_subscription_model_without_feedback_thread_ids(self):
original_subscription_model = (
user_models.UserSubscriptionsModel(
id='id'
)
)
original_subscription_model.update_timestamps()
original_subscription_model.put()
self.assertNotIn(
'feedback_thread_ids', original_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'feedback_thread_ids', original_subscription_model._properties) # pylint: disable=protected-access
output = self._run_one_off_job()
self.assertItemsEqual(
[['SUCCESS_ALREADY_REMOVED - UserSubscriptionsModel', 1]], output)
migrated_subscription_model = (
user_models.UserSubscriptionsModel.get_by_id('id'))
self.assertNotIn(
'feedback_thread_ids', migrated_subscription_model._values) # pylint: disable=protected-access
self.assertNotIn(
'feedback_thread_ids', migrated_subscription_model._properties) # pylint: disable=protected-access
self.assertEqual(
original_subscription_model.last_updated,
migrated_subscription_model.last_updated)
class FixUserSettingsCreatedOnOneOffJobTests(test_utils.GenericTestBase):
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False
USER_ID_1 = 'user_id'
USER_ID_2 = 'user_id_2'
EMAIL_1 = 'test@email.com'
EMAIL_2 = 'test2@email.com'
SKILL_ID_1 = 'skill_id_1'
SKILL_ID_2 = 'skill_id_2'
DEGREE_OF_MASTERY = 0.5
EXPLORATION_IDS = ['exp_1', 'exp_2', 'exp_3']
COLLECTION_IDS = ['col_1', 'col_2', 'col_3']
EXP_ID_ONE = 'exp_id_one'
EXP_ID_TWO = 'exp_id_two'
EXP_ID_THREE = 'exp_id_three'
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.create_new())
user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.FixUserSettingsCreatedOnOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
sorted_eval_output = []
for key, values in eval_output:
if key == 'ERROR_NOT_UP_TO_DATE_USER':
values.sort()
sorted_eval_output.append([key, values])
return sorted_eval_output
def test_update_user_model_using_all_user_settings_model_attributes(self):
user_settings_model = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
user_settings_model.update_timestamps()
original_created_on_timestamp = user_settings_model.created_on
# last_agreed_to_terms is set to have the absolute minimum
# timestamp value.
user_settings_model.last_agreed_to_terms = (
original_created_on_timestamp + datetime.timedelta(hours=2))
final_created_on_timestamp = user_settings_model.last_agreed_to_terms
user_settings_model.created_on = (
final_created_on_timestamp + datetime.timedelta(days=10))
user_settings_model.last_logged_in = (
final_created_on_timestamp + datetime.timedelta(minutes=1))
user_settings_model.last_started_state_editor_tutorial = (
final_created_on_timestamp + datetime.timedelta(minutes=3))
user_settings_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=12))
user_settings_model.last_started_state_translation_tutorial = (
final_created_on_timestamp + datetime.timedelta(hours=14))
user_settings_model.last_edited_an_exploration = (
final_created_on_timestamp + datetime.timedelta(hours=15))
user_settings_model.last_created_an_exploration = (
final_created_on_timestamp + datetime.timedelta(hours=16))
user_settings_model.first_contribution_msec = (
utils.get_time_in_millisecs(
final_created_on_timestamp + datetime.timedelta(hours=10))
)
user_settings_model.put()
expected_output = [
[
'SUCCESS_UPDATED_USING_UserSettingsModel_last_agreed_to_terms',
1
],
['ERROR_NOT_UP_TO_DATE_USER', [self.USER_ID_1]]
]
self.assertLess(
final_created_on_timestamp, user_settings_model.created_on)
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
migrated_user_model = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_1))
self.assertEqual(
migrated_user_model.created_on, final_created_on_timestamp)
def test_update_using_datetime_attributes_of_all_other_models(self):
user_subscriptions_model = user_models.UserSubscriptionsModel(
id=self.USER_ID_1)
user_subscriptions_model.update_timestamps()
# We are sequentially creating the models, so the timestamps will
# be in increasing order, and hence created_on attribute for
# user_subscriptions_model will have the smallest timestamp value.
final_created_on_timestamp = user_subscriptions_model.created_on
user_subscriptions_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=2)
)
user_subscriptions_model.last_checked = (
final_created_on_timestamp + datetime.timedelta(hours=3)
)
user_subscriptions_model.put()
user_settings_model = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
user_settings_model.update_timestamps()
user_settings_model.created_on = (
final_created_on_timestamp + datetime.timedelta(hours=10)
)
user_settings_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=10)
)
user_settings_model.put()
exploration_user_data_model = user_models.ExplorationUserDataModel(
id='%s.%s' % (self.USER_ID_1, self.EXP_ID_ONE),
user_id=self.USER_ID_1,
exploration_id=self.EXP_ID_ONE,
rating=2,
rated_on=final_created_on_timestamp + datetime.timedelta(hours=1),
draft_change_list={'new_content': {}},
draft_change_list_last_updated=(
final_created_on_timestamp + datetime.timedelta(hours=2)),
draft_change_list_exp_version=3,
draft_change_list_id=1
)
exploration_user_data_model.update_timestamps()
exploration_user_data_model.created_on = (
final_created_on_timestamp + datetime.timedelta(hours=5)
)
exploration_user_data_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=5)
)
exploration_user_data_model.put()
user_contributions_model = user_models.UserContributionsModel(
id=self.USER_ID_1)
user_contributions_model.update_timestamps()
user_contributions_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=5)
)
user_contributions_model.put()
user_email_preferences_model = user_models.UserEmailPreferencesModel(
id=self.USER_ID_1)
user_email_preferences_model.update_timestamps()
user_email_preferences_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=6)
)
user_email_preferences_model.put()
user_stats_model = user_models.UserStatsModel(
id=self.USER_ID_1)
user_stats_model.update_timestamps()
user_stats_model.created_on = (
final_created_on_timestamp + datetime.timedelta(hours=10)
)
user_stats_model.last_updated = (
final_created_on_timestamp + datetime.timedelta(hours=10)
)
user_stats_model.put()
expected_output = [
[
'SUCCESS_UPDATED_USING_UserSubscriptionsModel_created_on', 1
],
['ERROR_NOT_UP_TO_DATE_USER', [self.USER_ID_1]]
]
self.assertLess(
final_created_on_timestamp, user_settings_model.created_on)
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
migrated_user_model = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_1))
self.assertEqual(
migrated_user_model.created_on, final_created_on_timestamp)
def test_time_difference_less_than_time_delta_does_not_update(self):
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
user_auth_details_model = (
auth_models.UserAuthDetailsModel.get(user_id))
user_auth_details_model.update_timestamps()
user_auth_details_model.put()
user_settings_model = (
user_models.UserSettingsModel(
id=user_id,
email=self.NEW_USER_EMAIL,
)
)
user_settings_model.update_timestamps()
user_settings_model.put()
# UserAuthDetails model was created before UserSettingsModel, but the
# time difference is less than the time_delta required (will be less
# than a second here), hence created_on will not be updated.
self.assertLess(
user_auth_details_model.created_on, user_settings_model.created_on)
expected_output = [['SUCCESS_ALREADY_UP_TO_DATE', 1]]
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
migrated_user_model = (
user_models.UserSettingsModel.get_by_id(user_id))
self.assertNotEqual(
migrated_user_model.created_on, user_auth_details_model.created_on)
def test_update_for_multiple_users_works_correctly(self):
user_settings_model_1 = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
user_settings_model_1.update_timestamps()
user_settings_model_1.created_on += datetime.timedelta(hours=10)
final_created_on_timestamp_1 = user_settings_model_1.last_updated
user_settings_model_1.put()
user_settings_model_2 = (
user_models.UserSettingsModel(
id=self.USER_ID_2,
email=self.EMAIL_2,
)
)
user_settings_model_2.update_timestamps()
original_created_on_timestamp_2 = user_settings_model_2.created_on
user_settings_model_2.created_on = (
original_created_on_timestamp_2 + datetime.timedelta(hours=5))
user_settings_model_2.last_updated = (
original_created_on_timestamp_2 + datetime.timedelta(hours=6))
user_settings_model_2.last_logged_in = (
original_created_on_timestamp_2 + datetime.timedelta(hours=1))
final_created_on_timestamp_2 = user_settings_model_2.last_logged_in
user_settings_model_2.put()
expected_output = [
['SUCCESS_UPDATED_USING_UserSettingsModel_last_updated', 1],
['SUCCESS_UPDATED_USING_UserSettingsModel_last_logged_in', 1],
['ERROR_NOT_UP_TO_DATE_USER', [self.USER_ID_1, self.USER_ID_2]]
]
self.assertLess(
final_created_on_timestamp_1, user_settings_model_1.created_on)
self.assertLess(
final_created_on_timestamp_2, user_settings_model_2.created_on)
actual_output = self._run_one_off_job()
self.assertItemsEqual(actual_output, expected_output)
migrated_user_model_1 = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_1))
migrated_user_model_2 = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_2))
self.assertEqual(
migrated_user_model_1.created_on, final_created_on_timestamp_1)
self.assertEqual(
migrated_user_model_2.created_on, final_created_on_timestamp_2)
def test_multiple_runs_of_one_off_job_works_correctly(self):
user_settings_model_1 = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
user_settings_model_1.update_timestamps()
user_settings_model_1.created_on += datetime.timedelta(hours=10)
final_created_on_timestamp_1 = user_settings_model_1.last_updated
user_settings_model_1.put()
user_settings_model_2 = (
user_models.UserSettingsModel(
id=self.USER_ID_2,
email=self.EMAIL_2,
)
)
user_settings_model_2.update_timestamps()
user_settings_model_2.created_on += datetime.timedelta(hours=5)
final_created_on_timestamp_2 = user_settings_model_2.last_updated
user_settings_model_2.put()
expected_output = [['SUCCESS_ALREADY_UP_TO_DATE', 2]]
self.assertLess(
final_created_on_timestamp_1, user_settings_model_1.created_on)
self.assertLess(
final_created_on_timestamp_2, user_settings_model_2.created_on)
actual_output = self._run_one_off_job()
actual_output = self._run_one_off_job()
self.assertItemsEqual(actual_output, expected_output)
migrated_user_model_1 = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_1))
migrated_user_model_2 = (
user_models.UserSettingsModel.get_by_id(self.USER_ID_2))
self.assertEqual(
migrated_user_model_1.created_on, final_created_on_timestamp_1)
self.assertEqual(
migrated_user_model_2.created_on, final_created_on_timestamp_2)
class UserSettingsCreatedOnAuditOneOffJobTests(test_utils.GenericTestBase):
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False
USER_ID_1 = 'user_id'
USER_ID_2 = 'user_id_2'
EMAIL_1 = 'test@email.com'
EMAIL_2 = 'test2@email.com'
SKILL_ID_1 = 'skill_id_1'
SKILL_ID_2 = 'skill_id_2'
DEGREE_OF_MASTERY = 0.5
EXPLORATION_IDS = ['exp_1', 'exp_2', 'exp_3']
COLLECTION_IDS = ['col_1', 'col_2', 'col_3']
EXP_ID_ONE = 'exp_id_one'
EXP_ID_TWO = 'exp_id_two'
EXP_ID_THREE = 'exp_id_three'
def setUp(self):
super(UserSettingsCreatedOnAuditOneOffJobTests, self).setUp()
self.user_settings_model = (
user_models.UserSettingsModel(
id=self.USER_ID_1,
email=self.EMAIL_1,
)
)
self.user_settings_model.update_timestamps()
self.lowest_timestamp = self.user_settings_model.created_on
self.user_settings_model.last_agreed_to_terms = (
self.lowest_timestamp + datetime.timedelta(hours=2))
self.user_settings_model.last_logged_in = (
self.lowest_timestamp + datetime.timedelta(minutes=1))
self.user_settings_model.last_started_state_editor_tutorial = (
self.lowest_timestamp + datetime.timedelta(minutes=3))
self.user_settings_model.last_started_state_translation_tutorial = (
self.lowest_timestamp + datetime.timedelta(hours=14))
self.user_settings_model.last_edited_an_exploration = (
self.lowest_timestamp + datetime.timedelta(hours=15))
self.user_settings_model.last_created_an_exploration = (
self.lowest_timestamp + datetime.timedelta(hours=16))
self.user_settings_model.first_contribution_msec = (
utils.get_time_in_millisecs(
self.lowest_timestamp + datetime.timedelta(
hours=10)
)
)
self.user_settings_model.put()
self.user_subscriptions_model = user_models.UserSubscriptionsModel(
id=self.USER_ID_1)
self.user_subscriptions_model.update_timestamps()
self.user_subscriptions_model.last_checked = (
self.lowest_timestamp + datetime.timedelta(hours=1)
)
self.user_subscriptions_model.put()
self.exploration_user_data_model = user_models.ExplorationUserDataModel(
id='%s.%s' % (self.USER_ID_1, self.EXP_ID_ONE),
user_id=self.USER_ID_1,
exploration_id=self.EXP_ID_ONE,
rating=2,
rated_on=self.lowest_timestamp + datetime.timedelta(hours=1),
draft_change_list={'new_content': {}},
draft_change_list_last_updated=(
self.lowest_timestamp + datetime.timedelta(hours=2)),
draft_change_list_exp_version=3,
draft_change_list_id=1
)
self.exploration_user_data_model.update_timestamps()
self.exploration_user_data_model.put()
self.user_contributions_model = user_models.UserContributionsModel(
id=self.USER_ID_1)
self.user_contributions_model.update_timestamps()
self.user_contributions_model.put()
self.user_email_preferences_model = (
user_models.UserEmailPreferencesModel(id=self.USER_ID_1))
self.user_email_preferences_model.update_timestamps()
self.user_email_preferences_model.put()
self.user_stats_model = user_models.UserStatsModel(
id=self.USER_ID_1)
self.user_stats_model.update_timestamps()
self.user_stats_model.put()
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.create_new())
user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.UserSettingsCreatedOnAuditOneOffJob
.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def test_created_on_having_lowest_value_timestamp_yields_success(self):
self.assertEqual(
self.lowest_timestamp, self.user_settings_model.created_on)
expected_output = [['SUCCESS_ALREADY_UP_TO_DATE', 1]]
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
def test_created_on_within_delta_from_lowest_value_yields_success(self):
self.user_settings_model.update_timestamps(
update_last_updated_time=False)
self.user_settings_model.created_on += datetime.timedelta(minutes=5)
self.user_settings_model.put()
self.assertLess(
self.lowest_timestamp, self.user_settings_model.created_on)
expected_output = [['SUCCESS_ALREADY_UP_TO_DATE', 1]]
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
def test_created_on_greater_than_delta_from_lowest_value_yields_error(self):
self.user_settings_model.update_timestamps(
update_last_updated_time=False)
self.user_settings_model.created_on += datetime.timedelta(minutes=6)
self.user_settings_model.put()
# Since last_updated of user_settings_model was never changed, hence
# it remains the lowest timestamp value among all attributes.
self.lowest_timestamp = self.user_settings_model.last_updated
self.assertLess(
self.lowest_timestamp,
self.user_settings_model.created_on - datetime.timedelta(minutes=5))
expected_output = [
[
'ERROR_NEED_TO_UPDATE_USING_UserSettingsModel_last_updated',
[self.USER_ID_1]
]]
actual_output = self._run_one_off_job()
self.assertItemsEqual(expected_output, actual_output)
def test_update_for_multiple_users_works_correctly(self):
user_settings_model_2 = (
user_models.UserSettingsModel(
id=self.USER_ID_2,
email=self.EMAIL_2,
)
)
user_settings_model_2.update_timestamps()
user_settings_model_2.created_on += datetime.timedelta(hours=10)
user_settings_model_2.put()
expected_output = [
['SUCCESS_ALREADY_UP_TO_DATE', 1],
[
'ERROR_NEED_TO_UPDATE_USING_UserSettingsModel_last_updated',
[self.USER_ID_2]
]
]
actual_output = self._run_one_off_job()
self.assertItemsEqual(actual_output, expected_output)
class CleanUpUserSubscribersModelOneOffJobTests(test_utils.GenericTestBase):
def setUp(self):
super(CleanUpUserSubscribersModelOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup('user@email', 'user')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email('user@email')
subscription_services.subscribe_to_creator(self.user_id, self.owner_id)
self.model_instance = user_models.UserSubscribersModel.get_by_id(
self.owner_id)
self.process_and_flush_pending_mapreduce_tasks()
def test_standard_operation(self):
job_id = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output(
job_id))
self.assertEqual(output, [])
def test_migration_job_skips_deleted_model(self):
self.model_instance.subscriber_ids.append(self.owner_id)
self.model_instance.deleted = True
self.model_instance.update_timestamps()
self.model_instance.put()
job_id = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output(
job_id))
self.assertEqual(output, [])
def test_job_removes_user_id_from_subscriber_ids(self):
self.model_instance.subscriber_ids.append(self.owner_id)
self.model_instance.update_timestamps()
self.model_instance.put()
job_id = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserSubscribersModelOneOffJob.get_output(
job_id))
self.assertEqual(
output, [
'[u\'Removed user from their own subscribers list\', '
'[u\'%s\']]' % self.owner_id])
self.model_instance = user_models.UserSubscribersModel.get_by_id(
self.owner_id)
self.assertTrue(self.user_id in self.model_instance.subscriber_ids)
self.assertTrue(self.owner_id not in self.model_instance.subscriber_ids)
class CleanUpCollectionProgressModelOneOffJobTests(test_utils.GenericTestBase):
def setUp(self):
super(CleanUpCollectionProgressModelOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.set_admins([self.OWNER_USERNAME])
self.owner = user_services.get_user_actions_info(self.owner_id)
explorations = [exp_domain.Exploration.create_default_exploration(
'%s' % i,
title='title %d' % i,
category='category%d' % i
) for i in python_utils.RANGE(3)]
collection = collection_domain.Collection.create_default_collection(
'col')
for exp in explorations:
exp_services.save_new_exploration(self.owner_id, exp)
rights_manager.publish_exploration(self.owner, exp.id)
collection.add_node(exp.id)
collection_services.save_new_collection(self.owner_id, collection)
rights_manager.publish_collection(self.owner, 'col')
self.signup('user@email', 'user')
self.user_id = self.get_user_id_from_email('user@email')
learner_progress_services.mark_exploration_as_completed(
self.user_id, '0')
collection_services.record_played_exploration_in_collection_context(
self.user_id, 'col', '0')
learner_progress_services.mark_exploration_as_completed(
self.user_id, '1')
collection_services.record_played_exploration_in_collection_context(
self.user_id, 'col', '1')
self.model_instance = user_models.CollectionProgressModel.get_by_id(
'%s.col' % self.user_id)
self.process_and_flush_pending_mapreduce_tasks()
def test_standard_operation(self):
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
self.assertEqual(output, [])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
def test_migration_job_skips_deleted_model(self):
self.model_instance.completed_explorations.append('3')
self.model_instance.deleted = True
self.model_instance.update_timestamps()
self.model_instance.put()
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
self.assertEqual(output, [])
def test_job_cleans_up_exploration_ids_not_present_in_collection(self):
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1'])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
self.model_instance.completed_explorations.append('3')
self.model_instance.update_timestamps()
self.model_instance.put()
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1', '3'])
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
expected_output = [(
'[u\'Added missing exp ids in CompletedActivitiesModel\', '
'[u\'%s.col\']]' % self.user_id
), (
'[u\'Invalid Exploration IDs cleaned from '
'CollectionProgressModel\', '
'[u"Model id: %s.col, Collection id: col, Removed exploration ids: '
'[u\'3\']"]]' % self.user_id)]
self.assertEqual(output, expected_output)
self.model_instance = user_models.CollectionProgressModel.get_by_id(
'%s.col' % self.user_id)
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1', '3'])
def test_job_creates_completed_activities_model_if_it_is_missing(self):
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1'])
completed_activities_model.delete()
self.assertIsNone(
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
self.assertEqual(
output, [
'[u\'Regenerated Missing CompletedActivitiesModel\', '
'[u\'%s.col\']]' % self.user_id])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1'])
def test_job_updates_completed_activities_model_if_exp_ids_do_not_match(
self):
learner_progress_services.mark_exploration_as_completed(
self.user_id, '2')
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '1', '2'])
completed_activities_model.exploration_ids = ['0', '2']
completed_activities_model.update_timestamps()
completed_activities_model.put()
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '2'])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
job_id = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.create_new())
user_jobs_one_off.CleanUpCollectionProgressModelOneOffJob.enqueue(
job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off
.CleanUpCollectionProgressModelOneOffJob.get_output(job_id))
self.assertEqual(
output, [
'[u\'Added missing exp ids in CompletedActivitiesModel\', '
'[u\'%s.col\']]' % self.user_id])
self.assertEqual(
self.model_instance.completed_explorations, ['0', '1'])
completed_activities_model = (
user_models.CompletedActivitiesModel.get_by_id(self.user_id))
self.assertEqual(
completed_activities_model.exploration_ids, ['0', '2', '1'])
class CleanUpUserContributionsModelOneOffJobTests(test_utils.GenericTestBase):
def setUp(self):
super(CleanUpUserContributionsModelOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.signup('user@email', 'user')
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
self.user_id = self.get_user_id_from_email('user@email')
self.owner = user_services.get_user_actions_info(self.owner_id)
self.user = user_services.get_user_actions_info(self.user_id)
self.save_new_valid_exploration(
'exp0', self.user_id, end_state_name='End')
self.save_new_valid_exploration(
'exp1', self.owner_id, end_state_name='End')
exp_services.update_exploration(
self.user_id, 'exp1', [exp_domain.ExplorationChange({
'cmd': 'edit_exploration_property',
'property_name': 'objective',
'new_value': 'the objective'
})], 'Test edit')
rights_manager.publish_exploration(self.user, 'exp0')
rights_manager.publish_exploration(self.owner, 'exp1')
self.process_and_flush_pending_mapreduce_tasks()
def test_standard_operation(self):
job_id = (
user_jobs_one_off
.CleanUpUserContributionsModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output(
job_id))
self.assertEqual(output, [])
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, ['exp0'])
self.assertEqual(
model_instance_1.edited_exploration_ids, ['exp0', 'exp1'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, ['exp1'])
self.assertEqual(
model_instance_2.edited_exploration_ids, ['exp1'])
def test_migration_job_skips_deleted_model(self):
model_instance = user_models.UserContributionsModel.get_by_id(
self.user_id)
model_instance.deleted = True
model_instance.update_timestamps()
model_instance.put()
exp_services.delete_exploration(self.user_id, 'exp0')
job_id = (
user_jobs_one_off
.CleanUpUserContributionsModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output(
job_id))
self.assertEqual(output, [])
def test_job_removes_deleted_exp_from_created_explorations(self):
exp_services.delete_exploration(self.user_id, 'exp0')
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, ['exp0'])
self.assertEqual(
model_instance_1.edited_exploration_ids, ['exp0', 'exp1'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, ['exp1'])
self.assertEqual(
model_instance_2.edited_exploration_ids, ['exp1'])
job_id = (
user_jobs_one_off
.CleanUpUserContributionsModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output(
job_id))
self.assertEqual(
output, [
'[u\'Removed deleted exp ids from UserContributionsModel\', '
'[u"Model id: %s, Removed exploration ids: [u\'exp0\', '
'u\'exp0\']"]]' % self.user_id])
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, [])
self.assertEqual(model_instance_1.edited_exploration_ids, ['exp1'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, ['exp1'])
self.assertEqual(
model_instance_2.edited_exploration_ids, ['exp1'])
def test_job_removes_deleted_exp_from_edited_explorations(self):
exp_services.delete_exploration(self.owner_id, 'exp1')
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, ['exp0'])
self.assertEqual(
model_instance_1.edited_exploration_ids, ['exp0', 'exp1'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, ['exp1'])
self.assertEqual(
model_instance_2.edited_exploration_ids, ['exp1'])
job_id = (
user_jobs_one_off
.CleanUpUserContributionsModelOneOffJob.create_new())
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
output = (
user_jobs_one_off.CleanUpUserContributionsModelOneOffJob.get_output(
job_id))
removed_exp_list = [
'Model id: %s, Removed exploration ids: '
'[u\'exp1\', u\'exp1\']' % self.owner_id,
'Model id: %s, Removed exploration ids: '
'[u\'exp1\']' % self.user_id]
removed_exp_list.sort()
self.assertEqual(
output, [
'[u\'Removed deleted exp ids from UserContributionsModel\', '
'[u"%s", u"%s"]]' % (removed_exp_list[0], removed_exp_list[1])])
model_instance_1 = user_models.UserContributionsModel.get_by_id(
self.user_id)
self.assertEqual(model_instance_1.created_exploration_ids, ['exp0'])
self.assertEqual(model_instance_1.edited_exploration_ids, ['exp0'])
model_instance_2 = user_models.UserContributionsModel.get_by_id(
self.owner_id)
self.assertEqual(model_instance_2.created_exploration_ids, [])
self.assertEqual(
model_instance_2.edited_exploration_ids, [])
class ProfilePictureAuditOneOffJobTests(test_utils.GenericTestBase):
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = user_jobs_one_off.ProfilePictureAuditOneOffJob.create_new()
user_jobs_one_off.ProfilePictureAuditOneOffJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.ProfilePictureAuditOneOffJob.get_output(job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
return eval_output
def setUp(self):
super(ProfilePictureAuditOneOffJobTests, self).setUp()
self.signup(self.OWNER_EMAIL, self.OWNER_USERNAME)
self.owner_id = self.get_user_id_from_email(self.OWNER_EMAIL)
user_services.generate_initial_profile_picture(self.owner_id)
def test_correct_profile_picture_has_success_value(self):
user_services.generate_initial_profile_picture(self.owner_id)
output = self._run_one_off_job()
self.assertEqual(output, [['SUCCESS', 1]])
def test_resized_image_has_profile_picture_non_standard_dimensions_error(
self):
user_services.update_profile_picture_data_url(
self.owner_id, image_constants.PNG_IMAGE_WRONG_DIMENSIONS_BASE64)
output = self._run_one_off_job()
self.assertEqual(
output,
[[
'FAILURE - PROFILE PICTURE NON STANDARD DIMENSIONS - 150,160',
[self.OWNER_USERNAME]
]]
)
def test_invalid_image_has_cannot_load_picture_error(self):
user_services.update_profile_picture_data_url(
self.owner_id, image_constants.PNG_IMAGE_BROKEN_BASE64)
output = self._run_one_off_job()
self.assertEqual(
output,
[['FAILURE - CANNOT LOAD PROFILE PICTURE', [self.OWNER_USERNAME]]]
)
def test_non_png_image_has_profile_picture_not_png_error(self):
user_services.update_profile_picture_data_url(
self.owner_id, image_constants.JPG_IMAGE_BASE64)
output = self._run_one_off_job()
self.assertEqual(
output,
[['FAILURE - PROFILE PICTURE NOT PNG', [self.OWNER_USERNAME]]]
)
def test_broken_base64_data_url_has_invalid_profile_picture_data_url_error(
self):
user_services.update_profile_picture_data_url(
self.owner_id, image_constants.BROKEN_BASE64)
output = self._run_one_off_job()
self.assertEqual(
output,
[[
'FAILURE - INVALID PROFILE PICTURE DATA URL',
[self.OWNER_USERNAME]
]]
)
def test_user_without_profile_picture_has_missing_profile_picture_error(
self):
user_services.update_profile_picture_data_url(self.owner_id, None)
output = self._run_one_off_job()
self.assertEqual(
output,
[['FAILURE - MISSING PROFILE PICTURE', [self.OWNER_USERNAME]]]
)
def test_not_registered_user_has_not_registered_value(self):
user_settings_model = (
user_models.UserSettingsModel.get_by_id(self.owner_id))
user_settings_model.username = None
user_settings_model.update_timestamps()
user_settings_model.put()
output = self._run_one_off_job()
self.assertEqual(output, [['SUCCESS - NOT REGISTERED', 1]])
def test_deleted_user_has_deleted_value(self):
user_settings_model = (
user_models.UserSettingsModel.get_by_id(self.owner_id))
user_settings_model.deleted = True
user_settings_model.update_timestamps()
user_settings_model.put()
output = self._run_one_off_job()
self.assertEqual(output, [['SUCCESS - DELETED', 1]])
def test_zero_users_has_no_output(self):
user_models.UserSettingsModel.delete_by_id(self.owner_id)
output = self._run_one_off_job()
self.assertEqual(output, [])
def test_multiple_users_have_correct_values(self):
self.signup(self.NEW_USER_EMAIL, self.NEW_USER_USERNAME)
new_user_id = self.get_user_id_from_email(self.NEW_USER_EMAIL)
self.signup(self.EDITOR_EMAIL, self.EDITOR_USERNAME)
editor_id = self.get_user_id_from_email(self.EDITOR_EMAIL)
self.signup(self.MODERATOR_EMAIL, self.MODERATOR_USERNAME)
moderator_id = self.get_user_id_from_email(self.MODERATOR_EMAIL)
user_services.update_profile_picture_data_url(
new_user_id, image_constants.JPG_IMAGE_BASE64)
user_services.update_profile_picture_data_url(editor_id, None)
user_settings_model = (
user_models.UserSettingsModel.get_by_id(moderator_id))
user_settings_model.deleted = True
user_settings_model.update_timestamps()
user_settings_model.put()
output = self._run_one_off_job()
self.assertItemsEqual(
output,
[
['SUCCESS', 1],
['FAILURE - MISSING PROFILE PICTURE', [self.EDITOR_USERNAME]],
['SUCCESS - DELETED', 1],
['FAILURE - PROFILE PICTURE NOT PNG', [self.NEW_USER_USERNAME]]
]
)
class UniqueHashedNormalizedUsernameAuditJobTests(test_utils.GenericTestBase):
AUTO_CREATE_DEFAULT_SUPERADMIN_USER = False
def _run_one_off_job(self):
"""Runs the one-off MapReduce job."""
job_id = (
user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob
.create_new())
user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.enqueue(job_id)
self.assertEqual(
self.count_jobs_in_mapreduce_taskqueue(
taskqueue_services.QUEUE_NAME_ONE_OFF_JOBS), 1)
self.process_and_flush_pending_mapreduce_tasks()
stringified_output = (
user_jobs_one_off.UniqueHashedNormalizedUsernameAuditJob.get_output(
job_id))
eval_output = [ast.literal_eval(stringified_item) for
stringified_item in stringified_output]
for item in eval_output:
if item[0] == 'FAILURE':
item[1] = sorted(item[1])
return eval_output
def test_audit_user_with_username_is_successful(self):
model = user_models.UserSettingsModel(id='id', email='email@email.com')
model.update_timestamps()
model.put()
output = self._run_one_off_job()
self.assertEqual(output, [['SUCCESS USERNAME NONE', 1]])
def test_audit_users_with_different_usernames_is_successful(self):
# Generate 4 different users.
for i in python_utils.RANGE(4):
model = user_models.UserSettingsModel(
id='id%s' % i,
email='email%s@email.com' % i,
normalized_username='username%s' % i
)
model.update_timestamps()
model.put()
output = self._run_one_off_job()
self.assertEqual(output, [])
def test_audit_users_with_different_usernames_all_hashes_same_fails(self):
# Generate 4 different users.
for i in python_utils.RANGE(4):
model = user_models.UserSettingsModel(
id='id%s' % i,
email='email%s@email.com' % i,
normalized_username='username%s' % i
)
model.update_timestamps()
model.put()
def mock_convert_to_hash(*_):
"""Function that takes any number of arguments and returns the
same hash for all inputs.
"""
return 'hashhash'
with self.swap(utils, 'convert_to_hash', mock_convert_to_hash):
output = self._run_one_off_job()
self.assertEqual(
output,
[['FAILURE', ['username%s' % i for i in python_utils.RANGE(4)]]])
def test_audit_users_with_different_usernames_some_hashes_same_fails(self):
# Generate 5 different users.
for i in python_utils.RANGE(5):
model = user_models.UserSettingsModel(
id='id%s' % i,
email='email%s@email.com' % i,
normalized_username='username%s' % i
)
model.update_timestamps()
model.put()
def mock_convert_to_hash(username, _):
"""Function that takes username and returns the same hash for some
usernames and unique hash for others.
"""
if username in ('username1', 'username2'):
return 'hashhash'
return hash(username)
with self.swap(utils, 'convert_to_hash', mock_convert_to_hash):
output = self._run_one_off_job()
self.assertEqual(output, [['FAILURE', ['username1', 'username2']]])
class DiscardOldDraftsOneOffJobTests(test_utils.GenericTestBase):
EXP_USER_DATA_MODEL_ID = 'user_id.exp_id'
USER_ID = 'user_id'
EXP_ID = 'exp_id'
def setUp(self):
super(DiscardOldDraftsOneOffJobTests, self).setUp()
self.save_new_valid_exploration(self.EXP_ID, self.USER_ID)
def _run_job_and_verify_output(self, expected_output):
"""Runs the DiscardOldDraftsOneOffJob and verifies that the output
matches the expected output.
Args:
expected_output: list(str). The expected output from the one-off
job.
"""
job_id = user_jobs_one_off.DiscardOldDraftsOneOffJob.create_new()
user_jobs_one_off.DiscardOldDraftsOneOffJob.enqueue(job_id)
self.process_and_flush_pending_mapreduce_tasks()
actual_output = user_jobs_one_off.DiscardOldDraftsOneOffJob.get_output(
job_id)
self.assertEqual(sorted(actual_output), sorted(expected_output))
def _create_exp_user_data_model(self, draft_change_list, last_updated):
"""Creates a new ExplorationUserDataModel with the given parameters.
Args:
draft_change_list: list(dict)|None. The change list corresponding
to the user's draft for this exploration, or None if there is
no such draft.
last_updated: datetime.datetime. When the draft was last updated.
"""
user_models.ExplorationUserDataModel(
id=self.EXP_USER_DATA_MODEL_ID,
user_id=self.USER_ID,
exploration_id=self.EXP_ID,
rating=2,
rated_on=datetime.datetime(2018, 1, 1),
draft_change_list=draft_change_list,
draft_change_list_last_updated=last_updated,
draft_change_list_exp_version=3,
draft_change_list_id=1
).put()
def test_models_without_drafts_are_ignored(self):
self._create_exp_user_data_model(None, None)
self._run_job_and_verify_output([])
def test_draft_left_alone_if_it_is_current(self):
self._create_exp_user_data_model(
{'new_content': {}}, datetime.datetime(2021, 1, 1))
self._run_job_and_verify_output([])
def test_draft_discarded_if_exploration_is_missing(self):
exp_services.delete_exploration(self.USER_ID, self.EXP_ID)
self._create_exp_user_data_model(
{'new_content': {}}, datetime.datetime(2021, 1, 1))
old_model = user_models.ExplorationUserDataModel.get_by_id(
self.EXP_USER_DATA_MODEL_ID)
self.assertIsNotNone(old_model.draft_change_list)
self.assertIsNotNone(old_model.draft_change_list_last_updated)
self.assertIsNotNone(old_model.draft_change_list_exp_version)
self._run_job_and_verify_output([
'[u\'DISCARDED - Exploration is missing\', [u\'%s\']]' %
self.EXP_USER_DATA_MODEL_ID,
'[u\'SUCCESS - Discarded draft\', 1]'
])
new_model = user_models.ExplorationUserDataModel.get_by_id(
self.EXP_USER_DATA_MODEL_ID)
self.assertLess(old_model.last_updated, new_model.last_updated)
self.assertIsNone(new_model.draft_change_list)
self.assertIsNone(new_model.draft_change_list_last_updated)
self.assertIsNone(new_model.draft_change_list_exp_version)
def test_draft_discarded_if_it_is_too_old(self):
self._create_exp_user_data_model(
{'new_content': {}}, datetime.datetime(2017, 1, 1))
old_model = user_models.ExplorationUserDataModel.get_by_id(
self.EXP_USER_DATA_MODEL_ID)
self.assertIsNotNone(old_model.draft_change_list)
self.assertIsNotNone(old_model.draft_change_list_last_updated)
self.assertIsNotNone(old_model.draft_change_list_exp_version)
self._run_job_and_verify_output([
'[u\'DISCARDED - Draft is old\', [u\'%s\']]' %
self.EXP_USER_DATA_MODEL_ID,
'[u\'SUCCESS - Discarded draft\', 1]'
])
new_model = user_models.ExplorationUserDataModel.get_by_id(
self.EXP_USER_DATA_MODEL_ID)
self.assertLess(old_model.last_updated, new_model.last_updated)
self.assertIsNone(new_model.draft_change_list)
self.assertIsNone(new_model.draft_change_list_last_updated)
self.assertIsNone(new_model.draft_change_list_exp_version)
| 41.392061
| 115
| 0.670074
| 13,817
| 118,878
| 5.33741
| 0.047405
| 0.030591
| 0.020977
| 0.015946
| 0.842828
| 0.789497
| 0.757685
| 0.722755
| 0.684218
| 0.66084
| 0
| 0.00866
| 0.250088
| 118,878
| 2,871
| 116
| 41.406479
| 0.818583
| 0.069256
| 0
| 0.677876
| 0
| 0
| 0.051698
| 0.014662
| 0
| 0
| 0
| 0
| 0.113274
| 1
| 0.05354
| false
| 0.000885
| 0.012832
| 0.001327
| 0.124779
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7c5d05cab5a5f5b6e0c695120149c5d859ce73dc
| 1,271
|
py
|
Python
|
cupyx/scipy/special/_bessel.py
|
prkhrsrvstv1/cupy
|
ea86c8225b575af9d2855fb77a306cf86fd098ea
|
[
"MIT"
] | 6,180
|
2016-11-01T14:22:30.000Z
|
2022-03-31T08:39:20.000Z
|
cupyx/scipy/special/_bessel.py
|
prkhrsrvstv1/cupy
|
ea86c8225b575af9d2855fb77a306cf86fd098ea
|
[
"MIT"
] | 6,281
|
2016-12-22T07:42:31.000Z
|
2022-03-31T19:57:02.000Z
|
cupyx/scipy/special/_bessel.py
|
prkhrsrvstv1/cupy
|
ea86c8225b575af9d2855fb77a306cf86fd098ea
|
[
"MIT"
] | 829
|
2017-02-23T05:46:12.000Z
|
2022-03-27T17:40:03.000Z
|
from cupy import _core
j0 = _core.create_ufunc(
'cupyx_scipy_special_j0', ('f->f', 'd->d'),
'out0 = j0(in0)',
doc='''Bessel function of the first kind of order 0.
.. seealso:: :meth:`scipy.special.j0`
''')
j1 = _core.create_ufunc(
'cupyx_scipy_special_j1', ('f->f', 'd->d'),
'out0 = j1(in0)',
doc='''Bessel function of the first kind of order 1.
.. seealso:: :meth:`scipy.special.j1`
''')
y0 = _core.create_ufunc(
'cupyx_scipy_special_y0', ('f->f', 'd->d'),
'out0 = y0(in0)',
doc='''Bessel function of the second kind of order 0.
.. seealso:: :meth:`scipy.special.y0`
''')
y1 = _core.create_ufunc(
'cupyx_scipy_special_y1', ('f->f', 'd->d'),
'out0 = y1(in0)',
doc='''Bessel function of the second kind of order 1.
.. seealso:: :meth:`scipy.special.y1`
''')
i0 = _core.create_ufunc(
'cupyx_scipy_special_i0', ('f->f', 'd->d'),
'out0 = cyl_bessel_i0(in0)',
doc='''Modified Bessel function of order 0.
.. seealso:: :meth:`scipy.special.i0`
''')
i1 = _core.create_ufunc(
'cupyx_scipy_special_i1', ('f->f', 'd->d'),
'out0 = cyl_bessel_i1(in0)',
doc='''Modified Bessel function of order 1.
.. seealso:: :meth:`scipy.special.i1`
''')
| 20.5
| 57
| 0.581432
| 182
| 1,271
| 3.868132
| 0.186813
| 0.204545
| 0.127841
| 0.170455
| 0.90625
| 0.860795
| 0.588068
| 0.372159
| 0.235795
| 0.235795
| 0
| 0.042126
| 0.215578
| 1,271
| 61
| 58
| 20.836066
| 0.663992
| 0
| 0
| 0.162162
| 0
| 0
| 0.656176
| 0.217152
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.027027
| 0
| 0.027027
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7c7c2902240d7e1307b27f122f8a6f8a82ec3f97
| 53,062
|
py
|
Python
|
detecting-invisible-people/deep_sort/tracker_mask.py
|
lv1turtle/Occlusion-object-tracking
|
bda349332ce904f5f08b694ea25e3e79abc997bc
|
[
"MIT"
] | 26
|
2021-10-30T15:08:56.000Z
|
2022-03-31T14:10:13.000Z
|
detecting-invisible-people/deep_sort/tracker_mask.py
|
lv1turtle/Occlusion-object-tracking
|
bda349332ce904f5f08b694ea25e3e79abc997bc
|
[
"MIT"
] | null | null | null |
detecting-invisible-people/deep_sort/tracker_mask.py
|
lv1turtle/Occlusion-object-tracking
|
bda349332ce904f5f08b694ea25e3e79abc997bc
|
[
"MIT"
] | 4
|
2021-10-30T02:13:29.000Z
|
2022-03-24T14:54:16.000Z
|
# vim: expandtab:ts=4:sw=4
from __future__ import absolute_import
import numpy as np
from skimage.filters import threshold_otsu
import os
from . import kalman_filter
from . import linear_assignment
from . import iou_matching
from .track import Track
from pycocotools import mask as maskUtils
import cv2
from skimage.transform import resize
from PIL import Image
import argparse
import glob
import multiprocessing as mp
import os
import time
import cv2
import tqdm
import numpy as np
import torch
from detectron2.config import get_cfg
from detectron2.structures import Boxes, Instances
from detectron2.data.detection_utils import read_image
from detectron2.utils.logger import setup_logger
from predictor import VisualizationDemo
from pycocotools import mask as maskUtils
def setup_cfg(args):
cfg = get_cfg()
cfg.merge_from_file(args['config_file'])
cfg.MODEL.RETINANET.SCORE_THRESH_TEST = args['confidence_threshold']
cfg.MODEL.ROI_HEADS.SCORE_THRESH_TEST = args['confidence_threshold']
cfg.MODEL.PANOPTIC_FPN.COMBINE.INSTANCES_CONFIDENCE_THRESH = args['confidence_threshold']
cfg.MODEL.WEIGHTS = 'detectron2://COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x/138205316/model_final_a3ec72.pkl'
cfg.freeze()
return cfg
def get_parser():
parser = {'config_file': '/home/tkhurana/CVPR/detectron2/configs/COCO-InstanceSegmentation/mask_rcnn_R_101_FPN_3x.yaml', 'confidence_threshold': 0.5}
return parser
def sort_to_detectron2(detections):
boxes = Boxes(torch.from_numpy(np.asarray(detections)))
return boxes
class Tracker:
"""
This is the multi-target tracker.
Parameters
----------
metric : nn_matching.NearestNeighborDistanceMetric
A distance metric for measurement-to-track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of consecutive detections before the track is confirmed. The
track state is set to `Deleted` if a miss occurs within the first
`n_init` frames.
Attributes
----------
metric : nn_matching.NearestNeighborDistanceMetric
The distance metric used for measurement to track association.
max_age : int
Maximum number of missed misses before a track is deleted.
n_init : int
Number of frames that a track remains in initialization phase.
kf : kalman_filter.KalmanFilter
A Kalman filter to filter target trajectories in image space.
tracks : List[Track]
The list of active tracks at the current time step.
"""
def __init__(self, metric, max_iou_distance=0.7, max_age=30, n_init=3):
self.metric = metric
self.max_iou_distance = max_iou_distance
self.max_age = max_age
self.n_init = n_init
self.frame_idx = -1
self.depth_map_path = ''
self.sequence_info = {}
self.max_height = -1
self.image = []
self.tn = -1
self.past_frame = []
self.current_frame = []
self.warp_matrix = -1
self.kf = kalman_filter.KalmanFilter()
self.tracks = []
self._next_id = 1
self.vicinity_x = 25
self.vicinity_y = 0
def get_masks(self):
bboxes = []
for track in self.tracks:
x, y, w, h = track.to_tlwh()
bboxes.append([x, y, x+w, y+h])
impath = os.path.join(
self.depth_map_path,
'img1',
'{:06d}.jpg'.format(self.frame_idx))
if len(bboxes) != 0:
self.masks = self.get_mask_for_bbox(bboxes, impath)
else:
self.masks = []
def get_mask_for_bbox(self, bboxes, path):
width, height = Image.open(path).size
j = 0
mask_array = []
while j < len(bboxes):
bbox_mask = np.zeros((height, width), dtype='uint8')
x1, y1, x2, y2 = bboxes[j]
bbox_mask[int(y1):int(y2), int(x1):int(x2)] = 1
mask_array.append(bbox_mask)
j += 1
return mask_array
def predict(self):
"""Propagate track state distributions one time step forward.
This function should be called once every time step, before `update`.
"""
# print("Len of tracks:", len(self.tracks))
for track in self.tracks:
track.predict(self.kf, self.max_height, tn=self.tn,
warp_matrix=self.warp_matrix)
def update(self, detections, occluded_factor=1.0,
filtering_factor=1.0):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
# Run matching cascade.
matches, unmatched_tracks, unmatched_detections, newly_occluded_tracks, previously_occluded_tracks = \
self._match(detections, occluded_factor=occluded_factor, filtering_factor=filtering_factor)
# use this with only_filtering True and default_matching False to get just deepsort+
# extrapolate+depth; the filtered out boxes should be joined back to unmatched_tracks if
# this flag is true.
if only_extrapolate:
unmatched_tracks = unmatched_tracks + previously_occluded_tracks
previously_occluded_tracks = []
# for all the matched detection and track pairs, we are going to (conditionally) call
# these confirmed tracks and do the needful (as you can find in the update function in
# track.py in this folder).
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(
self.kf, detections[detection_idx],
self.image, self.sequence_info,
temporal_noise=self.temporal_noise,
tn=self.tn)
# for all the newly_occluded_tracks, we are going to call these occluded if they
# were previously a confirmed track. if these tracks are still occluded and it has
# been > max_age then we are going to delete these tracks.
for track_idx in newly_occluded_tracks:
self.tracks[track_idx].mark_occluded()
# these are the tracks that got filtered due to freespace filtering so take a hard
# decision of deleting these.
for track_idx in previously_occluded_tracks:
self.tracks[track_idx].mark_deleted()
# for the tracks that were in confirmed state but which were left unmatched, delete
# them if it has been > max_age.
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
# for all unmatched detections in the current frame, start a new track.
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx],
temporal_noise=self.temporal_noise, tn=self.tn)
self.tracks = [t for t in self.tracks if not t.is_deleted()]
# Update distance metric.
active_targets = [t.track_id for t in self.tracks if t.is_confirmed() or t.is_occluded()]
features, targets = [], []
for track in self.tracks:
if not track.is_confirmed() and not track.is_occluded():
continue
features += track.features
targets += [track.track_id for _ in track.features]
track.features = []
self.metric.partial_fit(
np.asarray(features), np.asarray(targets), active_targets)
def _match(self, detections, default_matching=False,
freespace_filtering=True, occluded_factor=1.0,
filtering_factor=1.0, extrapolated_iou_match=False,
appearance_match=True, bugfix=False):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
cost_matrix = self.metric.distance(features, targets)
cost_matrix = linear_assignment.gate_cost_matrix(
self.kf, cost_matrix, tracks, dets, track_indices,
detection_indices, temporal_noise=self.temporal_noise,
tn=self.tn)
return cost_matrix
self.get_masks()
# Split track set into confirmed, occluded and unconfirmed tracks.
confirmed_tracks = [
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
occluded_tracks = [
i for i, t in enumerate(self.tracks) if t.is_occluded()]
unconfirmed_tracks = [
i for i, t in enumerate(self.tracks) if not t.is_confirmed() and not t.is_occluded()]
# find all occluded tracks from the set of confirmed tracks and collectively
# call them newly_occluded_tracks. the set of tracks that were not occluded will
# still be in confirmed_tracks.
if not self.only_filtering:
newly_occluded_tracks, confirmed_tracks = self.reason_for_occlusions_mask(
self.tracks,
confirmed_tracks,
occluded_factor)
newly_occluded_tracks = newly_occluded_tracks + occluded_tracks
# if using default matching, merge all kinds of tracks together into confirmed_tracks
# and match these together based on appearance. later we will segregate them again
if not self.only_filtering and default_matching and appearance_match:
confirmed_tracks = confirmed_tracks + newly_occluded_tracks
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, 0, self.max_age,
self.tracks, detections, confirmed_tracks)
elif not self.only_filtering and default_matching and not appearance_match:
confirmed_tracks = confirmed_tracks + newly_occluded_tracks
matches_a = []
unmatched_tracks_a = confirmed_tracks
unmatched_detections = [idx for idx, det in enumerate(detections)]
# similar, except we dont match the confirmed and occluded tracks together now
if not default_matching and appearance_match:
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, 0, self.max_age,
self.tracks, detections, confirmed_tracks)
elif not default_matching and not appearance_match:
matches_a = []
unmatched_tracks_a = confirmed_tracks
unmatched_detections = [idx for idx, det in enumerate(detections)]
# similar idea, above was for matching confirmed tracks, now we are matching the
# occluded tracks. in this case, the occluded tracks that actually got matched to
# a detection, we should call it a confirmed track now and the ones that didnt match
# should still be in the occluded state.
if not self.only_filtering and not default_matching and appearance_match:
# print("matching c!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!")
matches_c, newly_occluded_tracks, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, 0, self.max_age, # 0.15
self.tracks, detections, newly_occluded_tracks, unmatched_detections)
elif not self.only_filtering and not default_matching and not appearance_match:
matches_c = []
# for track_idx, detection_idx in matches_a:
# if self.tracks[track_idx].track_id == 4:
# print("track was matched in a!!!!!!!!!!!!")
# for track_idx, detection_idx in matches_c:
# if self.tracks[track_idx].track_id == 4:
# print("track was matched in c!!!!!!!!!!!!")
# this is an original step in deepsort
# Associate remaining tracks together with unconfirmed tracks using IOU.
# extrapolated iou match debug
# temp = [k for k in unmatched_tracks_a if
# self.tracks[k].time_since_update != 1 \
# and self.tracks[k].state == 4]
# print("debug print", temp)
if extrapolated_iou_match:
# print("Extrapolated iou match was true")
iou_track_candidates = unconfirmed_tracks + [
k for k in unmatched_tracks_a]
unmatched_tracks_a = []
else:
iou_track_candidates = unconfirmed_tracks + [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update == 1]
unmatched_tracks_a = [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update != 1]
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, iou_track_candidates, unmatched_detections)
# extrapolated iou match debug
# print("iou matches", matches_b)
# very trivial, just takes care of whether we have three sets of matches till
# now or only two
if not self.only_filtering and not default_matching:
matches = matches_a + matches_b + matches_c # + matches_d
else:
matches = matches_a + matches_b # + matches_c # + matches_d
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
# this step segregates the occluded tracks from the unmatched confirmed tracks
# if you used default matching above, (because we merged both into one for default
# matching)
if default_matching:
newly_occluded_tracks = [i for i in newly_occluded_tracks if i in unmatched_tracks]
unmatched_tracks = [i for i in unmatched_tracks if i not in newly_occluded_tracks]
# if we weren't using occluded state, then we havent formed any variable called
# newly_occluded_tracks yet, so just call unmatched_tracks as this for the next step
if self.only_filtering and not default_matching:
newly_occluded_tracks = unmatched_tracks
# either do freespace filtering or if we werent supposed to filter, then there is no
# notion of previously_occluded_tracks (these are the set of tracks that were filtered
# so they are going to be deleted if stored in this variable) and all newly_occluded_tracks
# are still maintained in the occluded state
if (freespace_filtering or self.only_filtering) and not default_matching:
previously_occluded_tracks, occluded_tracks_ = self.reason_for_reappearances_mask(
self.tracks,
newly_occluded_tracks,
filtering_factor)
elif (freespace_filtering or self.only_filtering) and default_matching and bugfix:
# print("Executing bugfix")
pv1, occluded_tracks_ = self.reason_for_reappearances_mask(
self.tracks,
newly_occluded_tracks,
filtering_factor)
pv2, unmatched_tracks = self.reason_for_reappearances_mask(
self.tracks,
unmatched_tracks,
filtering_factor)
previously_occluded_tracks = pv1 + pv2
elif (freespace_filtering or self.only_filtering) and default_matching and not bugfix:
# print("Not executing bugfix")
previously_occluded_tracks, occluded_tracks_ = self.reason_for_reappearances_mask(
self.tracks,
newly_occluded_tracks,
filtering_factor)
else:
previously_occluded_tracks = []
occluded_tracks_ = newly_occluded_tracks
# if we were only filtering, then there was no notion of occluded_tracks_ and these are
# actually the tracks that did not get filtered and so really, are still unmatched
if self.only_filtering and not default_matching:
unmatched_tracks = occluded_tracks_
occluded_tracks_ = []
# two caveats: one, some variables or if statements might be redundant, pls excuse my
# coding, two, because of this reason, always have to take care that if only_filtering is
# set to true then default_matching should be set to false for the code to execute properly
# print("matches, unmatched tracks, unmatched detections, occluded_tracks_, previously_occluded_tracks",
# len(matches), len(unmatched_tracks), len(unmatched_detections),
# len(occluded_tracks_), len(previously_occluded_tracks))
return matches, unmatched_tracks, unmatched_detections, occluded_tracks_, previously_occluded_tracks
# DO NOT TRUST THIS CODE
def _match_swap(self, detections, default_matching=False,
freespace_filtering=True, occluded_factor=1.0,
filtering_factor=1.0, extrapolated_iou_match=False,
appearance_match=True, bugfix=False):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
print("detection indices", detection_indices)
print("track indices", track_indices)
cost_matrix = self.metric.distance(features, targets)
cost_matrix = linear_assignment.gate_cost_matrix(
self.kf, cost_matrix, tracks, dets, track_indices,
detection_indices, temporal_noise=self.temporal_noise,
tn=self.tn)
return cost_matrix
# Split track set into confirmed, occluded and unconfirmed tracks.
confirmed_tracks = [
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
occluded_tracks = [
i for i, t in enumerate(self.tracks) if t.is_occluded()]
unconfirmed_tracks = [
i for i, t in enumerate(self.tracks) if not t.is_confirmed() and not t.is_occluded()]
# find all occluded tracks from the set of confirmed tracks and collectively
# call them newly_occluded_tracks. the set of tracks that were not occluded will
# still be in confirmed_tracks.
if not self.only_filtering:
newly_occluded_tracks, confirmed_tracks = self.reason_for_occlusions(
self.tracks,
confirmed_tracks,
occluded_factor)
newly_occluded_tracks = newly_occluded_tracks + occluded_tracks
# if using default matching, merge all kinds of tracks together into confirmed_tracks
# and match these together based on appearance. later we will segregate them again
if not self.only_filtering and default_matching: # and appearance_match:
confirmed_tracks = confirmed_tracks + newly_occluded_tracks + unconfirmed_tracks
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, confirmed_tracks)
# similar, except we dont match the confirmed and occluded tracks together now
if not default_matching: # and appearance_match:
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, confirmed_tracks)
# similar idea, above was for matching confirmed tracks, now we are matching the
# occluded tracks. in this case, the occluded tracks that actually got matched to
# a detection, we should call it a confirmed track now and the ones that didnt match
# should still be in the occluded state.
if not self.only_filtering and not default_matching: # and appearance_match:
matches_c, newly_occluded_tracks, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, newly_occluded_tracks, unmatched_detections)
iou_track_candidates = unmatched_tracks_b
unmatched_tracks_b = []
# print(len(iou_track_candidates), len(unmatched_detections))
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, 0, self.max_age,
self.tracks, detections, iou_track_candidates, unmatched_detections)
# very trivial, just takes care of whether we have three sets of matches till
# now or only two
if not self.only_filtering and not default_matching:
matches = matches_a + matches_b + matches_c # + matches_d
else:
matches = matches_a + matches_b # + matches_c # + matches_d
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
# this step segregates the occluded tracks from the unmatched confirmed tracks
# if you used default matching above, (because we merged both into one for default
# matching)
if default_matching:
newly_occluded_tracks = [i for i in newly_occluded_tracks if i in unmatched_tracks]
unmatched_tracks = [i for i in unmatched_tracks if i not in newly_occluded_tracks]
# if we weren't using occluded state, then we havent formed any variable called
# newly_occluded_tracks yet, so just call unmatched_tracks as this for the next step
if self.only_filtering and not default_matching:
newly_occluded_tracks = unmatched_tracks
# either do freespace filtering or if we werent supposed to filter, then there is no
# notion of previously_occluded_tracks (these are the set of tracks that were filtered
# so they are going to be deleted if stored in this variable) and all newly_occluded_tracks
# are still maintained in the occluded state
if (freespace_filtering or self.only_filtering) and not default_matching:
previously_occluded_tracks, occluded_tracks_ = self.reason_for_reappearances(
self.tracks,
newly_occluded_tracks,
filtering_factor)
elif (freespace_filtering or self.only_filtering) and default_matching and bugfix:
# print("Executing bugfix")
pv1, occluded_tracks_ = self.reason_for_reappearances(
self.tracks,
newly_occluded_tracks,
filtering_factor)
pv2, unmatched_tracks = self.reason_for_reappearances(
self.tracks,
unmatched_tracks,
filtering_factor)
previously_occluded_tracks = pv1 + pv2
elif (freespace_filtering or self.only_filtering) and default_matching and not bugfix:
# print("Not executing bugfix")
previously_occluded_tracks, occluded_tracks_ = self.reason_for_reappearances(
self.tracks,
newly_occluded_tracks,
filtering_factor)
else:
previously_occluded_tracks = []
occluded_tracks_ = newly_occluded_tracks
# if we were only filtering, then there was no notion of occluded_tracks_ and these are
# actually the tracks that did not get filtered and so really, are still unmatched
if self.only_filtering and not default_matching:
unmatched_tracks = occluded_tracks_
occluded_tracks_ = []
# two caveats: one, some variables or if statements might be redundant, pls excuse my
# coding, two, because of this reason, always have to take care that if only_filtering is
# set to true then default_matching should be set to false for the code to execute properly
# print("matches, unmatched tracks, unmatched detections, occluded_tracks_, previously_occluded_tracks",
# len(matches), len(unmatched_tracks), len(unmatched_detections),
# len(occluded_tracks_), len(previously_occluded_tracks))
return matches, unmatched_tracks, unmatched_detections, occluded_tracks_, previously_occluded_tracks
def _initiate_track(self, detection, temporal_noise=True, tn=-1):
mean_depth = self.compute_mean_depth_from_mask(self.image, detection, self.sequence_info)
# print(mean_depth)
det = list(detection.to_xyah())
det = det + [mean_depth]
mean, covariance = self.kf.initiate(det, temporal_noise, tn)
self.tracks.append(Track(
mean, covariance, self._next_id,
self.n_init, self.max_age,
detection.feature))
self._next_id += 1
def compute_mean_depth(self, depth_map, detection, seq_info):
scale_x = seq_info["image_size"][1] / float(depth_map.shape[1])
scale_y = seq_info["image_size"][0] / float(depth_map.shape[0])
box = detection.tlwh.copy()
box[2:] += box[:2]
box = [box[0]/scale_x,
box[1]/scale_y,
box[2]/scale_x,
box[3]/scale_y]
box = [int(x) for x in box]
box = [max(0, box[0]), max(0, box[1]),
max(0, min(depth_map.shape[1], box[2])),
max(0, min(depth_map.shape[0], box[3]))]
if 0 in box[2:] \
or box[0] >= depth_map.shape[1] \
or box[1] >= depth_map.shape[0] \
or box[0] == box[2] \
or box[1] == box[2]:
return -1
box = depth_map[box[1]:box[3], box[0]:box[2]].copy()
return np.mean(box)
def compute_mean_depth_from_mask(self, depth_map, detection, seq_info, mask=None):
width = depth_map.shape[1]
height = depth_map.shape[0]
# print(detection.mask['counts'], detection.mask['size'])
if detection is not None:
m = detection.mask.copy()
elif mask is not None:
m = mask
else:
print("One of detection or mask has to be non-None")
exit(0)
m = resize(m, (height, width), order=1)
inter_mask = np.zeros((height, width), dtype=float)
inter_mask = np.where(m > 10e-6, depth_map, 0)
if 0 in np.nonzero(inter_mask)[0].shape:
return -1
return np.mean(inter_mask[np.nonzero(inter_mask)])
def align(self, im1_gray, im2_gray):
# maximal number of iterations (original 50)
number_of_iterations = 50 # 100
# Threshold increment between two iterations (original 0.001)
termination_eps = 0.001 # 0.00001
# Which warp mode to use (cv2.MOTION_EUCLIDEAN, cv2.MOTION_AFFINE, ...)
warp_mode = cv2.MOTION_EUCLIDEAN
# im1_gray = cv2.cvtColor(im1, cv2.COLOR_RGB2GRAY)
# im2_gray = cv2.cvtColor(im2, cv2.COLOR_RGB2GRAY)
warp_matrix = np.eye(2, 3, dtype=np.float32)
criteria = (cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT,
number_of_iterations,
termination_eps)
try:
cc, warp_matrix = cv2.findTransformECC(im1_gray,
im2_gray, warp_matrix,
warp_mode, criteria,
inputMask=None, gaussFiltSize=1)
except TypeError:
cc, warp_matrix = cv2.findTransformECC(im1_gray,
im2_gray, warp_matrix,
warp_mode, criteria)
# if self.do_reid:
# for t in self.inactive_tracks:
# t.pos = warp_pos(t.pos, warp_matrix)
# if self.motion_model_cfg['enabled']:
# for t in self.tracks:
# for i in range(len(t.last_pos)):
# t.last_pos[i] = warp_pos(t.last_pos[i], warp_matrix)
return warp_matrix
def update_metadata(self, idx, path, seq_info, max_height,
only_filtering=False, temporal_noise=True,
ah_velocity=False, velocity_weighting=True,
tn=-1, motion_aware=False):
self.frame_idx = idx
self.depth_map_path = path
self.sequence_info = seq_info
self.max_height = max_height
self.image = np.load(
os.path.join(
self.depth_map_path,
'img1Depth',
'{:06d}.npy'.format(self.frame_idx)))
if self.frame_idx != 1:
self.past_frame = cv2.imread(
os.path.join(
self.depth_map_path,
'img1',
'{:06d}.jpg'.format(self.frame_idx - 1)),
0
)
self.current_frame = cv2.imread(
os.path.join(
self.depth_map_path,
'img1',
'{:06d}.jpg'.format(self.frame_idx)),
0
)
self.only_filtering = only_filtering
self.temporal_noise = temporal_noise
self.ah_velocity = ah_velocity
self.velocity_weighting = velocity_weighting
self.tn = tn
if self.frame_idx != 1 and motion_aware:
# print("aligning ...")
warp_path = os.path.join(
self.depth_map_path,
'warpmatrix',
'{:06d}.npy'.format(self.frame_idx))
if os.path.exists(warp_path):
self.warp_matrix = np.load(warp_path)
else:
os.makedirs(os.path.dirname(warp_path), exist_ok=True)
self.warp_matrix = self.align(self.past_frame,
self.current_frame)
np.save(warp_path, self.warp_matrix)
self.motion_aware = motion_aware
def reason_for_occlusions(self, tracks, track_indices, occluded_factor=1.0):
if self.frame_idx == -1:
return [], track_indices
newly_occluded_tracks, unmatched_tracks = [], []
image = self.image.copy() # np.load(os.path.join(self.depth_map_path, 'img1Depth',
# '{:06d}.npy'.format(self.frame_idx)))
scale_x = self.sequence_info["image_size"][1] / float(image.shape[1])
scale_y = self.sequence_info["image_size"][0] / float(image.shape[0])
for idx in track_indices:
track = self.tracks[idx]
box = track.to_tlbr()
_, _, _, _, predicted_depth = track.to_tlwhz()
box = [box[0]/scale_x, box[1]/scale_y, box[2]/scale_x, box[3]/scale_y]
box = [int(x) for x in box]
box = [max(0, box[0]), max(0, box[1]),
max(min(image.shape[1], box[2]), 0),
max(min(image.shape[0], box[3]), 0)]
if 0 in box[2:] or box[0] >= image.shape[1] or box[1] >= image.shape[0] or box[0] == box[2] or box[1] == box[2]:
unmatched_tracks.append(idx)
continue
box = image[box[1]:box[3], box[0]:box[2]].copy()
if len(np.unique(box)) == 1:
unmatched_tracks.append(idx)
continue
if 0 in box.shape:
unmatched_tracks.append(idx)
continue
box_mean = np.mean(box[np.nonzero(box)])
# if track.track_id == 4:
# print("two depths are", predicted_depth, box_mean)
if predicted_depth * occluded_factor < box_mean:
newly_occluded_tracks.append(idx)
else:
unmatched_tracks.append(idx)
return newly_occluded_tracks, unmatched_tracks
def reason_for_reappearances(self, tracks, track_indices, filtering_factor=1.0):
if self.frame_idx == -1:
return [], track_indices
previously_occluded_tracks, occluded_tracks = [], []
image = self.image.copy() # np.load(os.path.join(self.depth_map_path, 'img1Depth',
# '{:06d}.npy'.format(self.frame_idx)))
scale_x = self.sequence_info["image_size"][1] / float(image.shape[1])
scale_y = self.sequence_info["image_size"][0] / float(image.shape[0])
for idx in track_indices:
track = self.tracks[idx]
box = track.to_tlbr()
_, _, _, _, predicted_depth = track.to_tlwhz()
box = [box[0]/scale_x, box[1]/scale_y, box[2]/scale_x, box[3]/scale_y]
box = [int(x) for x in box]
box = [max(0, box[0]), max(0, box[1]),
max(min(image.shape[1], box[2]), 0),
max(min(image.shape[0], box[3]), 0)]
if 0 in box[2:] or box[0] >= image.shape[1] or box[1] >= image.shape[0] or box[0] == box[2] or box[1] == box[2]:
occluded_tracks.append(idx)
continue
box = image[box[1]:box[3], box[0]:box[2]].copy()
if len(np.unique(box)) == 1:
occluded_tracks.append(idx)
continue
if 0 in box.shape:
occluded_tracks.append(idx)
continue
box_mean = np.mean(box[np.nonzero(box)])
# if track.track_id == 4:
# print("in filtering, two depths are", predicted_depth, box_mean)
if predicted_depth > box_mean * filtering_factor:
previously_occluded_tracks.append(idx)
else:
occluded_tracks.append(idx)
return previously_occluded_tracks, occluded_tracks
def reason_for_occlusions_mask(self, tracks, track_indices, occluded_factor=1.0):
if self.frame_idx == -1:
return [], track_indices
newly_occluded_tracks, unmatched_tracks = [], []
image = self.image.copy() # np.load(os.path.join(self.depth_map_path, 'img1Depth',
# '{:06d}.npy'.format(self.frame_idx)))
for idx in track_indices:
track = self.tracks[idx]
_, _, _, _, predicted_depth = track.to_tlwhz()
box_mean = self.compute_mean_depth_from_mask(
image, None, self.sequence_info, self.masks[idx])
if predicted_depth * occluded_factor < box_mean:
newly_occluded_tracks.append(idx)
else:
unmatched_tracks.append(idx)
return newly_occluded_tracks, unmatched_tracks
def reason_for_reappearances_mask(self, tracks, track_indices, filtering_factor=1.0):
if self.frame_idx == -1:
return [], track_indices
previously_occluded_tracks, occluded_tracks = [], []
image = self.image.copy() # np.load(os.path.join(self.depth_map_path, 'img1Depth',
# '{:06d}.npy'.format(self.frame_idx)))
for idx in track_indices:
track = self.tracks[idx]
_, _, _, _, predicted_depth = track.to_tlwhz()
box_mean = self.compute_mean_depth_from_mask(
image, None, self.sequence_info, self.masks[idx])
if predicted_depth > box_mean * filtering_factor:
previously_occluded_tracks.append(idx)
else:
occluded_tracks.append(idx)
return previously_occluded_tracks, occluded_tracks
############################################################################################################
############################################################################################################
############################################################################################################
def reason_for_occlusions_old(self, tracks, track_indices, noise=0.98):
# print(len(self.tracks))
if self.frame_idx == -1:
return [], track_indices
# Use depth to find potentially occluded tracks
newly_occluded_tracks, unmatched_tracks = [], []
image = self.image.copy() # np.load(os.path.join(self.depth_map_path, 'img1Depth',
# '{:06d}.npy'.format(self.frame_idx)))
scale_x = self.sequence_info["image_size"][1] / float(image.shape[1])
scale_y = self.sequence_info["image_size"][0] / float(image.shape[0])
for idx in track_indices:
track = self.tracks[idx]
# predicted, _ = track.predict(self.kf, self.max_height, update_age=False)
# ret = predicted[:4]
# ret[2] *= ret[3]
# ret[:2] -= ret[2:] / 2
# ret[2:] = ret[:2] + ret[2:]
# print("Doing track", track.track_id)
# if track.track_id == 14:
# print("Doing this track", idx)
img = image.copy() * 255
# crop out the original and extended boxes from the depth map
box = track.to_tlbr()
# print("box1", box)
box = [box[0]/scale_x, box[1]/scale_y, box[2]/scale_x, box[3]/scale_y]
# print("box2", box, scale_x, scale_y)
box = [int(x) for x in box]
box_vicinity = [box[0] - self.vicinity_x, box[1] - self.vicinity_y,
box[2] + self.vicinity_x, box[3] + self.vicinity_y]
box = [max(0, box[0]), max(0, box[1]),
max(min(image.shape[1], box[2]), 0),
max(min(image.shape[0], box[3]), 0)]
box_vicinity = [max(0, box_vicinity[0]), max(0, box_vicinity[1]),
max(0, min(image.shape[1], box_vicinity[2])),
max(0, min(image.shape[0], box_vicinity[3]))]
boxx = box
boxx_vicinity = box_vicinity
# print(box, box_vicinity, image.shape[1], image.shape[0])
if 0 in box[2:] or 0 in box_vicinity[2:] or box[0] >= image.shape[1] or box_vicinity[0] >= image.shape[1] or box[1] >= image.shape[0] or box_vicinity[1] >= image.shape[0] or box[0] == box[2] or box[1] == box[2]:
# print("Skipping ...", track.track_id)
# if track.track_id == 30:
# print(box, box_vicinity)
# print("Skipping from 1")
unmatched_tracks.append(idx)
continue
box = image[box[1]:box[3], box[0]:box[2]].copy()
box_vicinity = image[box_vicinity[1]:box_vicinity[3],
box_vicinity[0]:box_vicinity[2]].copy()
if len(np.unique(box)) == 1 or len(np.unique(box_vicinity)) == 1:
unmatched_tracks.append(idx)
# if track.track_id == 30:
# print("Skipping from 1")
continue
if 0 in box.shape or 0 in box_vicinity.shape:
unmatched_tracks.append(idx)
continue
# img = cv2.rectangle(img, (boxx[0], boxx[1]), (boxx[2], boxx[3]), (0, 0, 0), 1)
# cv2.rectangle(img, (boxx_vicinity[0], boxx_vicinity[1]), (boxx_vicinity[2], boxx_vicinity[3]), (0, 0, 0), 1)
# calculate the Otsu's threshold and get all important pixels above this threshold from
# both the original and the extended boxes so we can reason if there is an object closer
# than the current object represented by these important pixels
# if not os.path.exists('/data/tkhurana/tk/deep_sort/verificatio/{}/'.format(track.track_id)):
# os.makedirs('/data/tkhurana/tk/deep_sort/verificatio/{}/'.format(track.track_id))
# cv2.imwrite('/data/tkhurana/tk/deep_sort/verificatio/{}/{}_boxes.jpg'.format(track.track_id, self.frame_idx), img)
# cv2.imwrite('/data/tkhurana/tk/deep_sort/verification/{}/{}_box_vicinity.jpg'.format(track.track_id, self.frame_idx), box_vicinity * 255)
thresh = threshold_otsu(box)
box_pixels = box * (box > thresh)
# cv2.imwrite('/data/tkhurana/tk/deep_sort/verificatio/{}/{}_box_pixels.jpg'.format(track.track_id, self.frame_idx), box_pixels * 255)
box_vicinity_pixels = box_vicinity * (box_vicinity > thresh)
# cv2.imwrite('/data/tkhurana/tk/deep_sort/verificatio/{}/{}_box_vicinity_pixels.jpg'.format(track.track_id, self.frame_idx), box_vicinity_pixels * 255)
box_mean = np.mean(box_pixels[np.nonzero(box_pixels)])
box_vicinity_mean = np.mean(box_vicinity_pixels[np.nonzero(box_vicinity_pixels)])
# if track.track_id == 30:
# print(box_vicinity_mean, box_mean, box_mean * noise)
if box_vicinity_mean > box_mean * noise:
# if track.track_id == 8:
# print("was here", idx)
newly_occluded_tracks.append(idx)
else:
unmatched_tracks.append(idx)
return newly_occluded_tracks, unmatched_tracks
def reason_for_reappearances_old(self, tracks, track_indices, noise=0.75):
# print(len(self.tracks))
if self.frame_idx == -1:
return [], track_indices
# Use depth to find potentially occluded tracks
previously_occluded_tracks, unmatched_tracks = [], []
image = self.image.copy()
scale_x = self.sequence_info["image_size"][1] / float(image.shape[1])
scale_y = self.sequence_info["image_size"][0] / float(image.shape[0])
for idx in track_indices:
track = self.tracks[idx]
img = image.copy() * 255
# crop out the original and extended boxes from the depth map
box = track.to_tlbr()
box = [box[0]/scale_x, box[1]/scale_y, box[2]/scale_x, box[3]/scale_y]
box = [int(x) for x in box]
# box_vicinity = [box[0] - self.vicinity_x, box[1] - self.vicinity_y,
# box[2] + self.vicinity_x, box[3] + self.vicinity_y]
box = [max(0, box[0]), max(0, box[1]),
max(min(image.shape[1], box[2]), 0),
max(min(image.shape[0], box[3]), 0)]
# box_vicinity = [max(0, box_vicinity[0]), max(0, box_vicinity[1]),
# max(0, min(image.shape[1], box_vicinity[2])),
# max(0, min(image.shape[0], box_vicinity[3]))]
boxx = box
# boxx_vicinity = box_vicinity
# print(box, box_vicinity, image.shape[1], image.shape[0])
if 0 in box[2:] or box[0] >= image.shape[1] or box[1] >= image.shape[0] or box[0] == box[2] or box[1] == box[2]:
unmatched_tracks.append(idx)
continue
box = image[box[1]:box[3], box[0]:box[2]].copy()
# box_vicinity = image[box_vicinity[1]:box_vicinity[3],
# box_vicinity[0]:box_vicinity[2]].copy()
if len(np.unique(box)) == 1:
unmatched_tracks.append(idx)
continue
if 0 in box.shape:
unmatched_tracks.append(idx)
continue
# img = cv2.rectangle(img, (boxx[0], boxx[1]), (boxx[2], boxx[3]), (0, 0, 0), 1)
# cv2.rectangle(img, (boxx_vicinity[0], boxx_vicinity[1]), (boxx_vicinity[2], boxx_vicinity[3]), (0, 0, 0), 1)
# if not os.path.exists('/data/tkhurana/tk/deep_sort/verificatio/{}/'.format(track.track_id)):
# os.makedirs('/data/tkhurana/tk/deep_sort/verificatio/{}/'.format(track.track_id))
# cv2.imwrite('/data/tkhurana/tk/deep_sort/verificatio/{}/{}_boxes.jpg'.format(track.track_id, self.frame_idx), img)
# cv2.imwrite('/data/tkhurana/tk/deep_sort/verification/{}/{}_box_vicinity.jpg'.format(track.track_id, self.frame_idx), box_vicinity * 255)
thresh = threshold_otsu(box)
box_dominant_pixels = box * (box > thresh)
box_non_dominant_pixels = box * (box <= thresh)
cv2.imwrite('/data/tkhurana/tk/deep_sort/verificationn/{}/{}_box_dominant_pixels.jpg'.format(track.track_id, self.frame_idx), box_dominant_pixels * 255)
# box_vicinity_pixels = box_vicinity * (box_vicinity > thresh)
cv2.imwrite('/data/tkhurana/tk/deep_sort/verificationn/{}/{}_box_non_dominant_pixels.jpg'.format(track.track_id, self.frame_idx), box_non_dominant_pixels * 255)
box_dominant_mean = np.mean(box_dominant_pixels[np.nonzero(box_dominant_pixels)])
box_non_dominant_mean = np.mean(box_non_dominant_pixels[np.nonzero(box_non_dominant_pixels)])
if box_dominant_mean * noise > box_non_dominant_mean:
previously_occluded_tracks.append(idx)
else:
unmatched_tracks.append(idx)
return previously_occluded_tracks, unmatched_tracks
def update_old(self, detections):
"""Perform measurement update and track management.
Parameters
----------
detections : List[deep_sort.detection.Detection]
A list of detections at the current time step.
"""
# Run matching cascade.
matches, unmatched_tracks, unmatched_detections, newly_occluded_tracks, previously_occluded_tracks = \
self._match(detections)
# print(len(matches), len(unmatched_tracks),
# len(unmatched_detections), len(newly_occluded_tracks),
# len(previously_occluded_tracks))
# Update track set.
for track_idx, detection_idx in matches:
self.tracks[track_idx].update(
self.kf, detections[detection_idx], self.image, self.sequence_info)
# if len(self.tracks) > 13:
# if self.tracks[13].track_id == 14:
# print(self.tracks[3].state)
for track_idx in newly_occluded_tracks:
self.tracks[track_idx].mark_occluded()
for track_idx in previously_occluded_tracks:
self.tracks[track_idx].mark_tentative()
for track_idx in unmatched_tracks:
self.tracks[track_idx].mark_missed()
for detection_idx in unmatched_detections:
self._initiate_track(detections[detection_idx])
self.tracks = [t for t in self.tracks if not t.is_deleted()]
# Update distance metric.
active_targets = [t.track_id for t in self.tracks if t.is_confirmed() or t.is_occluded()]
features, targets = [], []
for track in self.tracks:
if not track.is_confirmed() and not track.is_occluded():
continue
features += track.features
targets += [track.track_id for _ in track.features]
track.features = []
self.metric.partial_fit(
np.asarray(features), np.asarray(targets), active_targets)
def _match_old(self, detections):
def gated_metric(tracks, dets, track_indices, detection_indices):
features = np.array([dets[i].feature for i in detection_indices])
targets = np.array([tracks[i].track_id for i in track_indices])
cost_matrix = self.metric.distance(features, targets)
cost_matrix = linear_assignment.gate_cost_matrix(
self.kf, cost_matrix, tracks, dets, track_indices,
detection_indices)
return cost_matrix
# Split track set into confirmed, occluded and unconfirmed tracks.
confirmed_tracks = [
i for i, t in enumerate(self.tracks) if t.is_confirmed()]
occluded_tracks = [
i for i, t in enumerate(self.tracks) if t.is_occluded()]
unconfirmed_tracks = [
i for i, t in enumerate(self.tracks) if not t.is_confirmed() and not t.is_occluded()]
# There are two things to note here:
# (1) A TrackState.Occluded track will only emerge from a
# TrackState.Confirmed track.
# (2) However, for those tracks that were already TrackState.Occluded,
# we should let the TrackState.Confirmed tracks match first and
# TrackState.Occluded tracks match second, as a TrackState.Occluded
# track that is recovering from occlusion would be less certain of
# encountering a corresponding detection as compared to
# TrackState.Confirmed.
# (1) is implemented here.
newly_occluded_tracks, confirmed_tracks = self.reason_for_occlusions(
self.tracks,
confirmed_tracks)
newly_occluded_tracks = newly_occluded_tracks + occluded_tracks
# if 4 in newly_occluded_tracks and self.tracks[4].track_id == 8:
# print("Track 8 is in the occluded state")
# elif 4 in confirmed_tracks and self.tracks[4].track_id == 8:
# print("Track 8 is in the confirmed state")
# if 4 in unmatched_tracks_a and self.tracks[4].track_id == 8:
# print("Track 8 was unmatched")
# else:
# print("Track 8 was matched")
# Associate confirmed tracks using appearance features.
matches_a, unmatched_tracks_a, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, 0, self.max_age,
self.tracks, detections, confirmed_tracks)
# (2) is implemented here.
matches_c, newly_occluded_tracks, unmatched_detections = \
linear_assignment.matching_cascade(
gated_metric, self.metric.matching_threshold, 0, self.max_age, # 0.15
self.tracks, detections, newly_occluded_tracks, unmatched_detections)
previously_occluded_tracks = []
# Associate remaining tracks together with unconfirmed tracks using IOU.
iou_track_candidates = unconfirmed_tracks + [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update == 1]
unmatched_tracks_a = [
k for k in unmatched_tracks_a if
self.tracks[k].time_since_update != 1]
matches_b, unmatched_tracks_b, unmatched_detections = \
linear_assignment.min_cost_matching(
iou_matching.iou_cost, self.max_iou_distance, self.tracks,
detections, iou_track_candidates, unmatched_detections)
# matches_d, newly_occluded_tracks, unmatched_detections = \
# linear_assignment.min_cost_matching(
# iou_matching.iou_cost, 0.9, self.tracks,
# detections, newly_occluded_tracks, unmatched_detections)
# newly_occluded_tracks = newly_occluded_tracks + unmatched_occluded_tracks
# if 4 in unmatched_tracks_b and self.tracks[4].track_id == 8:
# print("Track 8 was unmatched once again")
# else:
# print("Track 8 was matched once again")
matches = matches_a + matches_b + matches_c # + matches_d
unmatched_tracks = list(set(unmatched_tracks_a + unmatched_tracks_b))
# if 4 in unmatched_tracks and self.tracks[4].track_id == 8:
# print("Track 8 was unmatched finally")
# print(self.tracks[4].time_since_update)
# previously_occluded_tracks, newly_occluded_tracks = self.reason_for_reappearances(
# self.tracks,
# newly_occluded_tracks)
return matches, unmatched_tracks, unmatched_detections, newly_occluded_tracks, previously_occluded_tracks
| 46.221254
| 223
| 0.598941
| 6,447
| 53,062
| 4.707461
| 0.082364
| 0.067811
| 0.039441
| 0.015651
| 0.779828
| 0.753402
| 0.734027
| 0.722429
| 0.702989
| 0.691127
| 0
| 0.015375
| 0.307471
| 53,062
| 1,147
| 224
| 46.261552
| 0.810515
| 0.273868
| 0
| 0.630792
| 0
| 0
| 0.018362
| 0.008797
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038864
| false
| 0
| 0.040359
| 0
| 0.121076
| 0.004484
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7c86ee21a3a34af5dea24161f33a0ce426aff14b
| 42
|
py
|
Python
|
diptych/exceptext.py
|
bansan85/diptych
|
297e6b291893a6e7abaab16025dc04d7d397a493
|
[
"Apache-2.0"
] | null | null | null |
diptych/exceptext.py
|
bansan85/diptych
|
297e6b291893a6e7abaab16025dc04d7d397a493
|
[
"Apache-2.0"
] | 3
|
2021-04-06T18:25:28.000Z
|
2021-05-12T12:13:47.000Z
|
diptych/exceptext.py
|
bansan85/diptych
|
297e6b291893a6e7abaab16025dc04d7d397a493
|
[
"Apache-2.0"
] | null | null | null |
class NotMyException(Exception):
pass
| 14
| 32
| 0.761905
| 4
| 42
| 8
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.166667
| 42
| 2
| 33
| 21
| 0.914286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
7c9fe9d2ceeebde3df86f720f458b0d2f3757823
| 37,950
|
py
|
Python
|
instances/passenger_demand/pas-20210421-2109-int2e-1/40.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int2e-1/40.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
instances/passenger_demand/pas-20210421-2109-int2e-1/40.py
|
LHcau/scheduling-shared-passenger-and-freight-transport-on-a-fixed-infrastructure
|
bba1e6af5bc8d9deaa2dc3b83f6fe9ddf15d2a11
|
[
"BSD-3-Clause"
] | null | null | null |
"""
PASSENGERS
"""
numPassengers = 435
passenger_arriving = (
(2, 1, 1, 0, 1, 0, 0, 1, 2, 1, 1, 0), # 0
(1, 5, 1, 0, 0, 0, 2, 2, 0, 1, 0, 0), # 1
(1, 1, 1, 1, 1, 0, 0, 1, 0, 0, 0, 0), # 2
(0, 2, 1, 1, 1, 0, 3, 2, 1, 0, 0, 0), # 3
(0, 1, 1, 1, 0, 0, 4, 1, 0, 0, 0, 0), # 4
(1, 0, 0, 1, 0, 0, 1, 3, 0, 1, 0, 0), # 5
(2, 2, 1, 0, 0, 0, 2, 1, 1, 0, 0, 0), # 6
(1, 2, 1, 1, 0, 0, 1, 1, 2, 1, 1, 0), # 7
(1, 1, 0, 2, 0, 0, 1, 0, 1, 0, 1, 0), # 8
(0, 3, 0, 0, 0, 0, 0, 4, 0, 0, 0, 0), # 9
(1, 1, 0, 0, 1, 0, 5, 0, 0, 0, 0, 0), # 10
(2, 2, 3, 2, 0, 0, 0, 1, 0, 1, 0, 0), # 11
(2, 1, 0, 0, 1, 0, 0, 4, 1, 0, 0, 0), # 12
(0, 0, 0, 0, 1, 0, 1, 2, 3, 0, 0, 0), # 13
(0, 1, 1, 1, 0, 0, 1, 1, 3, 0, 0, 0), # 14
(0, 2, 2, 1, 1, 0, 1, 1, 1, 0, 0, 0), # 15
(0, 1, 0, 0, 0, 0, 0, 0, 1, 1, 0, 0), # 16
(1, 1, 4, 0, 0, 0, 1, 2, 3, 1, 0, 0), # 17
(0, 0, 0, 0, 0, 0, 1, 2, 2, 0, 0, 0), # 18
(0, 0, 0, 0, 0, 0, 1, 1, 0, 1, 0, 0), # 19
(0, 1, 2, 0, 0, 0, 1, 0, 0, 0, 0, 0), # 20
(0, 0, 0, 1, 0, 0, 1, 2, 0, 1, 0, 0), # 21
(0, 0, 0, 0, 0, 0, 1, 1, 3, 2, 1, 0), # 22
(1, 1, 2, 0, 0, 0, 1, 1, 1, 1, 0, 0), # 23
(0, 1, 1, 0, 0, 0, 1, 0, 0, 0, 0, 0), # 24
(0, 2, 3, 1, 0, 0, 0, 2, 2, 1, 0, 0), # 25
(0, 1, 0, 0, 0, 0, 0, 0, 2, 1, 1, 0), # 26
(0, 2, 2, 0, 1, 0, 0, 5, 0, 1, 1, 0), # 27
(1, 4, 0, 0, 0, 0, 0, 2, 0, 0, 0, 0), # 28
(1, 1, 1, 1, 0, 0, 3, 1, 0, 1, 0, 0), # 29
(0, 1, 0, 1, 0, 0, 0, 3, 1, 0, 0, 0), # 30
(0, 0, 2, 1, 0, 0, 1, 0, 2, 1, 0, 0), # 31
(0, 3, 1, 1, 0, 0, 0, 1, 1, 0, 0, 0), # 32
(1, 2, 0, 0, 0, 0, 1, 1, 1, 0, 0, 0), # 33
(0, 1, 1, 0, 0, 0, 0, 2, 0, 1, 1, 0), # 34
(1, 1, 0, 0, 0, 0, 1, 3, 0, 0, 0, 0), # 35
(1, 2, 0, 1, 0, 0, 1, 0, 2, 2, 0, 0), # 36
(1, 2, 0, 0, 0, 0, 0, 3, 1, 2, 1, 0), # 37
(2, 0, 1, 1, 0, 0, 2, 1, 2, 1, 0, 0), # 38
(0, 1, 1, 0, 0, 0, 0, 2, 2, 1, 0, 0), # 39
(0, 0, 0, 2, 0, 0, 5, 2, 2, 1, 0, 0), # 40
(0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 1, 0), # 41
(1, 1, 0, 2, 0, 0, 0, 1, 1, 1, 1, 0), # 42
(0, 2, 3, 1, 1, 0, 0, 1, 0, 0, 0, 0), # 43
(1, 0, 0, 0, 0, 0, 0, 0, 2, 0, 0, 0), # 44
(0, 1, 1, 1, 0, 0, 0, 2, 1, 0, 1, 0), # 45
(0, 2, 1, 0, 0, 0, 0, 1, 0, 0, 1, 0), # 46
(1, 3, 1, 0, 0, 0, 0, 1, 1, 2, 0, 0), # 47
(0, 0, 1, 0, 0, 0, 2, 1, 1, 1, 0, 0), # 48
(0, 0, 1, 0, 0, 0, 1, 2, 0, 0, 0, 0), # 49
(0, 0, 2, 1, 0, 0, 0, 0, 0, 2, 0, 0), # 50
(1, 1, 1, 0, 1, 0, 0, 2, 0, 0, 0, 0), # 51
(0, 0, 2, 0, 0, 0, 0, 1, 0, 1, 2, 0), # 52
(1, 2, 0, 1, 0, 0, 3, 1, 1, 0, 0, 0), # 53
(1, 1, 0, 0, 0, 0, 1, 2, 0, 0, 0, 0), # 54
(1, 3, 3, 0, 0, 0, 0, 0, 2, 1, 0, 0), # 55
(1, 3, 1, 0, 1, 0, 2, 1, 1, 1, 0, 0), # 56
(1, 0, 2, 2, 0, 0, 0, 0, 1, 0, 0, 0), # 57
(0, 0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 58
(0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0), # 59
)
station_arriving_intensity = (
(0.5299303116769096, 1.3592921401515152, 1.59884720437018, 1.2672554347826086, 1.4286057692307692, 0.951358695652174), # 0
(0.53490440200956, 1.3744083197425647, 1.607483107504999, 1.274312877415459, 1.4393133012820511, 0.9510344278381644), # 1
(0.5398216954443468, 1.3893002805836139, 1.6159140245644101, 1.2812149758454108, 1.4497948717948719, 0.9507002415458937), # 2
(0.544678017998244, 1.4039519531250002, 1.6241337965938305, 1.2879558423913042, 1.4600408653846157, 0.9503561820652175), # 3
(0.5494691956882256, 1.4183472678170597, 1.6321362646386746, 1.2945295893719808, 1.470041666666667, 0.9500022946859903), # 4
(0.5541910545312653, 1.4324701551101293, 1.6399152697443586, 1.3009303291062801, 1.4797876602564102, 0.9496386246980676), # 5
(0.5588394205443372, 1.4463045454545456, 1.6474646529562986, 1.3071521739130436, 1.4892692307692308, 0.9492652173913044), # 6
(0.5634101197444152, 1.4598343693006453, 1.6547782553199086, 1.3131892361111113, 1.4984767628205131, 0.9488821180555557), # 7
(0.5678989781484733, 1.4730435570987652, 1.6618499178806059, 1.3190356280193236, 1.507400641025641, 0.9484893719806764), # 8
(0.5723018217734855, 1.4859160392992425, 1.668673481683805, 1.3246854619565218, 1.51603125, 0.9480870244565218), # 9
(0.5766144766364257, 1.4984357463524132, 1.6752427877749214, 1.3301328502415461, 1.5243589743589745, 0.9476751207729468), # 10
(0.5808327687542679, 1.5105866087086142, 1.6815516771993715, 1.335371905193237, 1.5323741987179487, 0.9472537062198069), # 11
(0.584952524143986, 1.5223525568181817, 1.6875939910025708, 1.3403967391304348, 1.5400673076923077, 0.9468228260869564), # 12
(0.5889695688225538, 1.5337175211314538, 1.6933635702299341, 1.3452014643719807, 1.5474286858974362, 0.9463825256642512), # 13
(0.5928797288069457, 1.5446654320987658, 1.6988542559268778, 1.3497801932367148, 1.5544487179487179, 0.9459328502415458), # 14
(0.5966788301141351, 1.5551802201704543, 1.7040598891388172, 1.3541270380434782, 1.5611177884615386, 0.9454738451086957), # 15
(0.6003626987610965, 1.5652458157968576, 1.7089743109111684, 1.3582361111111112, 1.567426282051282, 0.9450055555555557), # 16
(0.6039271607648035, 1.5748461494283108, 1.713591362289346, 1.3621015247584543, 1.5733645833333334, 0.9445280268719808), # 17
(0.6073680421422301, 1.5839651515151516, 1.7179048843187663, 1.365717391304348, 1.5789230769230773, 0.9440413043478261), # 18
(0.6106811689103502, 1.592586752507716, 1.7219087180448445, 1.3690778230676328, 1.5840921474358975, 0.9435454332729469), # 19
(0.613862367086138, 1.600694882856341, 1.725596704512996, 1.37217693236715, 1.5888621794871796, 0.943040458937198), # 20
(0.6169074626865673, 1.6082734730113633, 1.728962684768638, 1.3750088315217392, 1.5932235576923075, 0.9425264266304348), # 21
(0.6198122817286118, 1.6153064534231203, 1.7320004998571836, 1.3775676328502415, 1.5971666666666664, 0.9420033816425122), # 22
(0.6225726502292459, 1.6217777545419474, 1.7347039908240505, 1.3798474486714978, 1.600681891025641, 0.9414713692632852), # 23
(0.6251843942054434, 1.6276713068181818, 1.7370669987146528, 1.3818423913043478, 1.6037596153846154, 0.9409304347826087), # 24
(0.6276433396741781, 1.6329710407021605, 1.7390833645744075, 1.383546573067633, 1.6063902243589743, 0.9403806234903382), # 25
(0.6299453126524241, 1.6376608866442197, 1.740746929448729, 1.384954106280193, 1.6085641025641024, 0.9398219806763285), # 26
(0.6320861391571554, 1.6417247750946968, 1.7420515343830332, 1.3860591032608698, 1.610271634615385, 0.9392545516304349), # 27
(0.6340616452053459, 1.6451466365039282, 1.7429910204227366, 1.3868556763285025, 1.611503205128205, 0.9386783816425122), # 28
(0.6358676568139694, 1.6479104013222505, 1.7435592286132533, 1.3873379378019326, 1.6122491987179488, 0.9380935160024155), # 29
(0.6375000000000001, 1.6500000000000001, 1.7437500000000001, 1.3875000000000002, 1.6125, 0.9375), # 30
(0.6390274056905372, 1.6517357599431817, 1.7436069897342994, 1.3874707312091505, 1.6124087322695038, 0.9366752519573547), # 31
(0.6405218350383632, 1.6534485795454548, 1.743182004830918, 1.387383496732026, 1.612136879432624, 0.9354049516908214), # 32
(0.6419839593989769, 1.6551382457386365, 1.742481114130435, 1.3872391544117648, 1.6116873670212766, 0.933701536731634), # 33
(0.6434144501278772, 1.6568045454545457, 1.74151038647343, 1.3870385620915036, 1.611063120567376, 0.9315774446110279), # 34
(0.6448139785805627, 1.6584472656249998, 1.7402758907004832, 1.3867825776143792, 1.610267065602837, 0.9290451128602366), # 35
(0.646183216112532, 1.6600661931818186, 1.7387836956521738, 1.3864720588235295, 1.6093021276595747, 0.9261169790104948), # 36
(0.6475228340792839, 1.6616611150568183, 1.7370398701690821, 1.3861078635620916, 1.6081712322695034, 0.9228054805930368), # 37
(0.6488335038363171, 1.6632318181818182, 1.7350504830917874, 1.3856908496732026, 1.606877304964539, 0.919123055139097), # 38
(0.6501158967391305, 1.6647780894886364, 1.7328216032608694, 1.385221875, 1.6054232712765957, 0.91508214017991), # 39
(0.6513706841432225, 1.666299715909091, 1.7303592995169084, 1.384701797385621, 1.6038120567375884, 0.9106951732467099), # 40
(0.6525985374040921, 1.6677964843749997, 1.7276696407004832, 1.3841314746732027, 1.6020465868794327, 0.9059745918707315), # 41
(0.6538001278772378, 1.6692681818181823, 1.724758695652174, 1.3835117647058823, 1.6001297872340428, 0.9009328335832084), # 42
(0.6549761269181587, 1.6707145951704545, 1.7216325332125604, 1.3828435253267974, 1.5980645833333333, 0.8955823359153756), # 43
(0.656127205882353, 1.6721355113636365, 1.7182972222222224, 1.382127614379085, 1.59585390070922, 0.8899355363984673), # 44
(0.6572540361253196, 1.6735307173295455, 1.7147588315217395, 1.3813648897058823, 1.5935006648936172, 0.8840048725637182), # 45
(0.6583572890025576, 1.6749000000000003, 1.711023429951691, 1.3805562091503267, 1.59100780141844, 0.8778027819423623), # 46
(0.6594376358695653, 1.6762431463068184, 1.707097086352657, 1.3797024305555556, 1.5883782358156031, 0.8713417020656339), # 47
(0.6604957480818415, 1.6775599431818184, 1.7029858695652174, 1.3788044117647058, 1.5856148936170213, 0.8646340704647678), # 48
(0.6615322969948849, 1.6788501775568179, 1.698695848429952, 1.3778630106209153, 1.58272070035461, 0.8576923246709978), # 49
(0.6625479539641944, 1.680113636363636, 1.6942330917874397, 1.3768790849673205, 1.5796985815602838, 0.8505289022155589), # 50
(0.6635433903452687, 1.681350106534091, 1.689603668478261, 1.375853492647059, 1.5765514627659574, 0.8431562406296852), # 51
(0.6645192774936062, 1.682559375, 1.684813647342995, 1.374787091503268, 1.5732822695035462, 0.8355867774446111), # 52
(0.6654762867647059, 1.683741228693182, 1.6798690972222223, 1.373680739379085, 1.5698939273049648, 0.8278329501915709), # 53
(0.6664150895140666, 1.6848954545454544, 1.6747760869565216, 1.3725352941176472, 1.5663893617021278, 0.8199071964017991), # 54
(0.6673363570971866, 1.6860218394886364, 1.6695406853864734, 1.3713516135620916, 1.5627714982269505, 0.8118219536065301), # 55
(0.6682407608695652, 1.6871201704545453, 1.6641689613526571, 1.3701305555555556, 1.5590432624113477, 0.8035896593369982), # 56
(0.6691289721867009, 1.6881902343750004, 1.6586669836956522, 1.3688729779411766, 1.555207579787234, 0.7952227511244377), # 57
(0.6700016624040921, 1.689231818181818, 1.6530408212560386, 1.3675797385620916, 1.5512673758865247, 0.7867336665000834), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_arriving_acc = (
(2, 1, 1, 0, 1, 0, 0, 1, 2, 1, 1, 0), # 0
(3, 6, 2, 0, 1, 0, 2, 3, 2, 2, 1, 0), # 1
(4, 7, 3, 1, 2, 0, 2, 4, 2, 2, 1, 0), # 2
(4, 9, 4, 2, 3, 0, 5, 6, 3, 2, 1, 0), # 3
(4, 10, 5, 3, 3, 0, 9, 7, 3, 2, 1, 0), # 4
(5, 10, 5, 4, 3, 0, 10, 10, 3, 3, 1, 0), # 5
(7, 12, 6, 4, 3, 0, 12, 11, 4, 3, 1, 0), # 6
(8, 14, 7, 5, 3, 0, 13, 12, 6, 4, 2, 0), # 7
(9, 15, 7, 7, 3, 0, 14, 12, 7, 4, 3, 0), # 8
(9, 18, 7, 7, 3, 0, 14, 16, 7, 4, 3, 0), # 9
(10, 19, 7, 7, 4, 0, 19, 16, 7, 4, 3, 0), # 10
(12, 21, 10, 9, 4, 0, 19, 17, 7, 5, 3, 0), # 11
(14, 22, 10, 9, 5, 0, 19, 21, 8, 5, 3, 0), # 12
(14, 22, 10, 9, 6, 0, 20, 23, 11, 5, 3, 0), # 13
(14, 23, 11, 10, 6, 0, 21, 24, 14, 5, 3, 0), # 14
(14, 25, 13, 11, 7, 0, 22, 25, 15, 5, 3, 0), # 15
(14, 26, 13, 11, 7, 0, 22, 25, 16, 6, 3, 0), # 16
(15, 27, 17, 11, 7, 0, 23, 27, 19, 7, 3, 0), # 17
(15, 27, 17, 11, 7, 0, 24, 29, 21, 7, 3, 0), # 18
(15, 27, 17, 11, 7, 0, 25, 30, 21, 8, 3, 0), # 19
(15, 28, 19, 11, 7, 0, 26, 30, 21, 8, 3, 0), # 20
(15, 28, 19, 12, 7, 0, 27, 32, 21, 9, 3, 0), # 21
(15, 28, 19, 12, 7, 0, 28, 33, 24, 11, 4, 0), # 22
(16, 29, 21, 12, 7, 0, 29, 34, 25, 12, 4, 0), # 23
(16, 30, 22, 12, 7, 0, 30, 34, 25, 12, 4, 0), # 24
(16, 32, 25, 13, 7, 0, 30, 36, 27, 13, 4, 0), # 25
(16, 33, 25, 13, 7, 0, 30, 36, 29, 14, 5, 0), # 26
(16, 35, 27, 13, 8, 0, 30, 41, 29, 15, 6, 0), # 27
(17, 39, 27, 13, 8, 0, 30, 43, 29, 15, 6, 0), # 28
(18, 40, 28, 14, 8, 0, 33, 44, 29, 16, 6, 0), # 29
(18, 41, 28, 15, 8, 0, 33, 47, 30, 16, 6, 0), # 30
(18, 41, 30, 16, 8, 0, 34, 47, 32, 17, 6, 0), # 31
(18, 44, 31, 17, 8, 0, 34, 48, 33, 17, 6, 0), # 32
(19, 46, 31, 17, 8, 0, 35, 49, 34, 17, 6, 0), # 33
(19, 47, 32, 17, 8, 0, 35, 51, 34, 18, 7, 0), # 34
(20, 48, 32, 17, 8, 0, 36, 54, 34, 18, 7, 0), # 35
(21, 50, 32, 18, 8, 0, 37, 54, 36, 20, 7, 0), # 36
(22, 52, 32, 18, 8, 0, 37, 57, 37, 22, 8, 0), # 37
(24, 52, 33, 19, 8, 0, 39, 58, 39, 23, 8, 0), # 38
(24, 53, 34, 19, 8, 0, 39, 60, 41, 24, 8, 0), # 39
(24, 53, 34, 21, 8, 0, 44, 62, 43, 25, 8, 0), # 40
(24, 53, 36, 21, 8, 0, 44, 62, 43, 25, 9, 0), # 41
(25, 54, 36, 23, 8, 0, 44, 63, 44, 26, 10, 0), # 42
(25, 56, 39, 24, 9, 0, 44, 64, 44, 26, 10, 0), # 43
(26, 56, 39, 24, 9, 0, 44, 64, 46, 26, 10, 0), # 44
(26, 57, 40, 25, 9, 0, 44, 66, 47, 26, 11, 0), # 45
(26, 59, 41, 25, 9, 0, 44, 67, 47, 26, 12, 0), # 46
(27, 62, 42, 25, 9, 0, 44, 68, 48, 28, 12, 0), # 47
(27, 62, 43, 25, 9, 0, 46, 69, 49, 29, 12, 0), # 48
(27, 62, 44, 25, 9, 0, 47, 71, 49, 29, 12, 0), # 49
(27, 62, 46, 26, 9, 0, 47, 71, 49, 31, 12, 0), # 50
(28, 63, 47, 26, 10, 0, 47, 73, 49, 31, 12, 0), # 51
(28, 63, 49, 26, 10, 0, 47, 74, 49, 32, 14, 0), # 52
(29, 65, 49, 27, 10, 0, 50, 75, 50, 32, 14, 0), # 53
(30, 66, 49, 27, 10, 0, 51, 77, 50, 32, 14, 0), # 54
(31, 69, 52, 27, 10, 0, 51, 77, 52, 33, 14, 0), # 55
(32, 72, 53, 27, 11, 0, 53, 78, 53, 34, 14, 0), # 56
(33, 72, 55, 29, 11, 0, 53, 78, 54, 34, 14, 0), # 57
(33, 72, 57, 29, 11, 0, 53, 78, 54, 34, 14, 0), # 58
(33, 72, 57, 29, 11, 0, 53, 78, 54, 34, 14, 0), # 59
)
passenger_arriving_rate = (
(0.5299303116769096, 1.087433712121212, 0.959308322622108, 0.5069021739130434, 0.2857211538461538, 0.0, 0.951358695652174, 1.1428846153846153, 0.7603532608695651, 0.6395388817480719, 0.271858428030303, 0.0), # 0
(0.53490440200956, 1.0995266557940517, 0.9644898645029993, 0.5097251509661835, 0.2878626602564102, 0.0, 0.9510344278381644, 1.1514506410256409, 0.7645877264492754, 0.6429932430019996, 0.27488166394851293, 0.0), # 1
(0.5398216954443468, 1.111440224466891, 0.9695484147386461, 0.5124859903381642, 0.28995897435897433, 0.0, 0.9507002415458937, 1.1598358974358973, 0.7687289855072464, 0.646365609825764, 0.27786005611672276, 0.0), # 2
(0.544678017998244, 1.1231615625, 0.9744802779562982, 0.5151823369565216, 0.2920081730769231, 0.0, 0.9503561820652175, 1.1680326923076925, 0.7727735054347825, 0.6496535186375322, 0.280790390625, 0.0), # 3
(0.5494691956882256, 1.1346778142536476, 0.9792817587832047, 0.5178118357487923, 0.29400833333333337, 0.0, 0.9500022946859903, 1.1760333333333335, 0.7767177536231885, 0.6528545058554698, 0.2836694535634119, 0.0), # 4
(0.5541910545312653, 1.1459761240881035, 0.9839491618466152, 0.5203721316425121, 0.295957532051282, 0.0, 0.9496386246980676, 1.183830128205128, 0.7805581974637681, 0.6559661078977435, 0.28649403102202586, 0.0), # 5
(0.5588394205443372, 1.1570436363636363, 0.9884787917737792, 0.5228608695652174, 0.29785384615384614, 0.0, 0.9492652173913044, 1.1914153846153845, 0.7842913043478261, 0.6589858611825195, 0.28926090909090907, 0.0), # 6
(0.5634101197444152, 1.1678674954405162, 0.9928669531919452, 0.5252756944444444, 0.2996953525641026, 0.0, 0.9488821180555557, 1.1987814102564105, 0.7879135416666667, 0.6619113021279635, 0.29196687386012904, 0.0), # 7
(0.5678989781484733, 1.1784348456790121, 0.9971099507283635, 0.5276142512077294, 0.3014801282051282, 0.0, 0.9484893719806764, 1.2059205128205128, 0.7914213768115942, 0.6647399671522423, 0.29460871141975303, 0.0), # 8
(0.5723018217734855, 1.188732831439394, 1.001204089010283, 0.5298741847826087, 0.30320624999999995, 0.0, 0.9480870244565218, 1.2128249999999998, 0.7948112771739131, 0.6674693926735219, 0.2971832078598485, 0.0), # 9
(0.5766144766364257, 1.1987485970819305, 1.0051456726649528, 0.5320531400966184, 0.3048717948717949, 0.0, 0.9476751207729468, 1.2194871794871796, 0.7980797101449276, 0.6700971151099685, 0.2996871492704826, 0.0), # 10
(0.5808327687542679, 1.2084692869668912, 1.0089310063196228, 0.5341487620772947, 0.3064748397435897, 0.0, 0.9472537062198069, 1.2258993589743588, 0.8012231431159421, 0.6726206708797485, 0.3021173217417228, 0.0), # 11
(0.584952524143986, 1.2178820454545454, 1.0125563946015423, 0.5361586956521739, 0.30801346153846154, 0.0, 0.9468228260869564, 1.2320538461538462, 0.8042380434782609, 0.6750375964010282, 0.30447051136363634, 0.0), # 12
(0.5889695688225538, 1.2269740169051628, 1.0160181421379604, 0.5380805857487923, 0.30948573717948724, 0.0, 0.9463825256642512, 1.237942948717949, 0.8071208786231884, 0.6773454280919736, 0.3067435042262907, 0.0), # 13
(0.5928797288069457, 1.2357323456790126, 1.0193125535561267, 0.5399120772946859, 0.31088974358974353, 0.0, 0.9459328502415458, 1.2435589743589741, 0.8098681159420289, 0.6795417023707511, 0.30893308641975314, 0.0), # 14
(0.5966788301141351, 1.2441441761363634, 1.0224359334832902, 0.5416508152173912, 0.3122235576923077, 0.0, 0.9454738451086957, 1.2488942307692308, 0.8124762228260869, 0.6816239556555268, 0.31103604403409085, 0.0), # 15
(0.6003626987610965, 1.252196652637486, 1.025384586546701, 0.5432944444444444, 0.3134852564102564, 0.0, 0.9450055555555557, 1.2539410256410255, 0.8149416666666667, 0.6835897243644673, 0.3130491631593715, 0.0), # 16
(0.6039271607648035, 1.2598769195426485, 1.0281548173736075, 0.5448406099033817, 0.31467291666666664, 0.0, 0.9445280268719808, 1.2586916666666665, 0.8172609148550726, 0.6854365449157384, 0.3149692298856621, 0.0), # 17
(0.6073680421422301, 1.2671721212121212, 1.0307429305912597, 0.5462869565217392, 0.31578461538461544, 0.0, 0.9440413043478261, 1.2631384615384618, 0.8194304347826088, 0.6871619537275064, 0.3167930303030303, 0.0), # 18
(0.6106811689103502, 1.2740694020061727, 1.0331452308269067, 0.547631129227053, 0.3168184294871795, 0.0, 0.9435454332729469, 1.267273717948718, 0.8214466938405797, 0.6887634872179377, 0.3185173505015432, 0.0), # 19
(0.613862367086138, 1.2805559062850727, 1.0353580227077976, 0.5488707729468599, 0.31777243589743587, 0.0, 0.943040458937198, 1.2710897435897435, 0.82330615942029, 0.6902386818051983, 0.32013897657126816, 0.0), # 20
(0.6169074626865673, 1.2866187784090906, 1.0373776108611827, 0.5500035326086956, 0.31864471153846147, 0.0, 0.9425264266304348, 1.2745788461538459, 0.8250052989130435, 0.691585073907455, 0.32165469460227264, 0.0), # 21
(0.6198122817286118, 1.292245162738496, 1.0392002999143102, 0.5510270531400966, 0.31943333333333324, 0.0, 0.9420033816425122, 1.277733333333333, 0.8265405797101449, 0.6928001999428733, 0.323061290684624, 0.0), # 22
(0.6225726502292459, 1.2974222036335579, 1.0408223944944301, 0.551938979468599, 0.3201363782051282, 0.0, 0.9414713692632852, 1.2805455128205128, 0.8279084692028986, 0.6938815963296201, 0.32435555090838947, 0.0), # 23
(0.6251843942054434, 1.3021370454545453, 1.0422401992287915, 0.552736956521739, 0.3207519230769231, 0.0, 0.9409304347826087, 1.2830076923076923, 0.8291054347826087, 0.694826799485861, 0.32553426136363633, 0.0), # 24
(0.6276433396741781, 1.3063768325617282, 1.0434500187446445, 0.5534186292270532, 0.32127804487179484, 0.0, 0.9403806234903382, 1.2851121794871794, 0.8301279438405799, 0.6956333458297629, 0.32659420814043205, 0.0), # 25
(0.6299453126524241, 1.3101287093153757, 1.0444481576692373, 0.5539816425120772, 0.32171282051282046, 0.0, 0.9398219806763285, 1.2868512820512819, 0.8309724637681158, 0.6962987717794915, 0.32753217732884393, 0.0), # 26
(0.6320861391571554, 1.3133798200757574, 1.0452309206298198, 0.5544236413043478, 0.32205432692307695, 0.0, 0.9392545516304349, 1.2882173076923078, 0.8316354619565218, 0.6968206137532132, 0.32834495501893934, 0.0), # 27
(0.6340616452053459, 1.3161173092031424, 1.045794612253642, 0.5547422705314009, 0.322300641025641, 0.0, 0.9386783816425122, 1.289202564102564, 0.8321134057971015, 0.6971964081690946, 0.3290293273007856, 0.0), # 28
(0.6358676568139694, 1.3183283210578003, 1.046135537167952, 0.554935175120773, 0.32244983974358976, 0.0, 0.9380935160024155, 1.289799358974359, 0.8324027626811595, 0.6974236914453013, 0.3295820802644501, 0.0), # 29
(0.6375000000000001, 1.32, 1.0462500000000001, 0.555, 0.3225, 0.0, 0.9375, 1.29, 0.8325000000000001, 0.6975, 0.33, 0.0), # 30
(0.6390274056905372, 1.3213886079545452, 1.0461641938405797, 0.5549882924836602, 0.3224817464539007, 0.0, 0.9366752519573547, 1.2899269858156028, 0.8324824387254903, 0.6974427958937197, 0.3303471519886363, 0.0), # 31
(0.6405218350383632, 1.3227588636363636, 1.0459092028985506, 0.5549533986928104, 0.3224273758865248, 0.0, 0.9354049516908214, 1.2897095035460993, 0.8324300980392155, 0.6972728019323671, 0.3306897159090909, 0.0), # 32
(0.6419839593989769, 1.3241105965909092, 1.045488668478261, 0.5548956617647058, 0.3223374734042553, 0.0, 0.933701536731634, 1.2893498936170211, 0.8323434926470589, 0.696992445652174, 0.3310276491477273, 0.0), # 33
(0.6434144501278772, 1.3254436363636364, 1.044906231884058, 0.5548154248366014, 0.3222126241134752, 0.0, 0.9315774446110279, 1.2888504964539007, 0.8322231372549022, 0.696604154589372, 0.3313609090909091, 0.0), # 34
(0.6448139785805627, 1.3267578124999997, 1.0441655344202898, 0.5547130310457516, 0.3220534131205674, 0.0, 0.9290451128602366, 1.2882136524822696, 0.8320695465686275, 0.6961103562801932, 0.3316894531249999, 0.0), # 35
(0.646183216112532, 1.3280529545454547, 1.0432702173913042, 0.5545888235294117, 0.3218604255319149, 0.0, 0.9261169790104948, 1.2874417021276596, 0.8318832352941177, 0.6955134782608695, 0.3320132386363637, 0.0), # 36
(0.6475228340792839, 1.3293288920454547, 1.0422239221014493, 0.5544431454248366, 0.32163424645390065, 0.0, 0.9228054805930368, 1.2865369858156026, 0.831664718137255, 0.6948159480676328, 0.33233222301136367, 0.0), # 37
(0.6488335038363171, 1.3305854545454545, 1.0410302898550725, 0.5542763398692809, 0.3213754609929078, 0.0, 0.919123055139097, 1.2855018439716313, 0.8314145098039215, 0.6940201932367149, 0.33264636363636363, 0.0), # 38
(0.6501158967391305, 1.331822471590909, 1.0396929619565216, 0.55408875, 0.3210846542553191, 0.0, 0.91508214017991, 1.2843386170212765, 0.831133125, 0.6931286413043477, 0.33295561789772726, 0.0), # 39
(0.6513706841432225, 1.3330397727272727, 1.0382155797101449, 0.5538807189542484, 0.32076241134751765, 0.0, 0.9106951732467099, 1.2830496453900706, 0.8308210784313727, 0.6921437198067633, 0.3332599431818182, 0.0), # 40
(0.6525985374040921, 1.3342371874999996, 1.03660178442029, 0.553652589869281, 0.3204093173758865, 0.0, 0.9059745918707315, 1.281637269503546, 0.8304788848039216, 0.6910678562801932, 0.3335592968749999, 0.0), # 41
(0.6538001278772378, 1.3354145454545456, 1.0348552173913044, 0.5534047058823529, 0.3200259574468085, 0.0, 0.9009328335832084, 1.280103829787234, 0.8301070588235294, 0.6899034782608696, 0.3338536363636364, 0.0), # 42
(0.6549761269181587, 1.3365716761363635, 1.0329795199275362, 0.5531374101307189, 0.31961291666666664, 0.0, 0.8955823359153756, 1.2784516666666665, 0.8297061151960784, 0.688653013285024, 0.3341429190340909, 0.0), # 43
(0.656127205882353, 1.337708409090909, 1.0309783333333333, 0.552851045751634, 0.31917078014184397, 0.0, 0.8899355363984673, 1.2766831205673759, 0.829276568627451, 0.6873188888888889, 0.33442710227272726, 0.0), # 44
(0.6572540361253196, 1.3388245738636362, 1.0288552989130437, 0.5525459558823529, 0.3187001329787234, 0.0, 0.8840048725637182, 1.2748005319148936, 0.8288189338235293, 0.6859035326086957, 0.33470614346590905, 0.0), # 45
(0.6583572890025576, 1.33992, 1.0266140579710146, 0.5522224836601306, 0.31820156028368796, 0.0, 0.8778027819423623, 1.2728062411347518, 0.828333725490196, 0.6844093719806764, 0.33498, 0.0), # 46
(0.6594376358695653, 1.3409945170454545, 1.0242582518115941, 0.5518809722222222, 0.3176756471631206, 0.0, 0.8713417020656339, 1.2707025886524823, 0.8278214583333333, 0.6828388345410626, 0.3352486292613636, 0.0), # 47
(0.6604957480818415, 1.3420479545454547, 1.0217915217391305, 0.5515217647058823, 0.3171229787234042, 0.0, 0.8646340704647678, 1.268491914893617, 0.8272826470588235, 0.6811943478260869, 0.33551198863636367, 0.0), # 48
(0.6615322969948849, 1.3430801420454541, 1.0192175090579711, 0.5511452042483661, 0.31654414007092196, 0.0, 0.8576923246709978, 1.2661765602836879, 0.8267178063725492, 0.6794783393719807, 0.33577003551136353, 0.0), # 49
(0.6625479539641944, 1.3440909090909086, 1.0165398550724638, 0.5507516339869282, 0.31593971631205675, 0.0, 0.8505289022155589, 1.263758865248227, 0.8261274509803923, 0.6776932367149758, 0.33602272727272714, 0.0), # 50
(0.6635433903452687, 1.3450800852272726, 1.0137622010869565, 0.5503413970588236, 0.31531029255319143, 0.0, 0.8431562406296852, 1.2612411702127657, 0.8255120955882354, 0.6758414673913044, 0.33627002130681816, 0.0), # 51
(0.6645192774936062, 1.3460474999999998, 1.010888188405797, 0.5499148366013071, 0.31465645390070923, 0.0, 0.8355867774446111, 1.258625815602837, 0.8248722549019608, 0.673925458937198, 0.33651187499999996, 0.0), # 52
(0.6654762867647059, 1.3469929829545455, 1.0079214583333334, 0.549472295751634, 0.31397878546099295, 0.0, 0.8278329501915709, 1.2559151418439718, 0.824208443627451, 0.6719476388888889, 0.3367482457386364, 0.0), # 53
(0.6664150895140666, 1.3479163636363634, 1.004865652173913, 0.5490141176470589, 0.31327787234042553, 0.0, 0.8199071964017991, 1.2531114893617021, 0.8235211764705883, 0.6699104347826086, 0.33697909090909084, 0.0), # 54
(0.6673363570971866, 1.348817471590909, 1.001724411231884, 0.5485406454248366, 0.31255429964539005, 0.0, 0.8118219536065301, 1.2502171985815602, 0.822810968137255, 0.6678162741545893, 0.33720436789772723, 0.0), # 55
(0.6682407608695652, 1.3496961363636362, 0.9985013768115942, 0.5480522222222222, 0.3118086524822695, 0.0, 0.8035896593369982, 1.247234609929078, 0.8220783333333334, 0.6656675845410628, 0.33742403409090904, 0.0), # 56
(0.6691289721867009, 1.3505521875000002, 0.9952001902173913, 0.5475491911764706, 0.3110415159574468, 0.0, 0.7952227511244377, 1.2441660638297871, 0.821323786764706, 0.6634667934782609, 0.33763804687500004, 0.0), # 57
(0.6700016624040921, 1.3513854545454542, 0.991824492753623, 0.5470318954248365, 0.31025347517730495, 0.0, 0.7867336665000834, 1.2410139007092198, 0.8205478431372549, 0.6612163285024154, 0.33784636363636356, 0.0), # 58
(0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0, 0.0), # 59
)
passenger_allighting_rate = (
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 0
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 1
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 2
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 3
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 4
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 5
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 6
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 7
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 8
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 9
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 10
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 11
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 12
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 13
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 14
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 15
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 16
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 17
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 18
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 19
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 20
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 21
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 22
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 23
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 24
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 25
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 26
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 27
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 28
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 29
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 30
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 31
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 32
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 33
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 34
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 35
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 36
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 37
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 38
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 39
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 40
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 41
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 42
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 43
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 44
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 45
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 46
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 47
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 48
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 49
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 50
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 51
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 52
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 53
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 54
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 55
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 56
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 57
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 58
(0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1, 0, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 0.16666666666666666, 1), # 59
)
"""
parameters for reproducibiliy. More information: https://numpy.org/doc/stable/reference/random/parallel.html
"""
#initial entropy
entropy = 258194110137029475889902652135037600173
#index for seed sequence child
child_seed_index = (
1, # 0
39, # 1
)
| 113.283582
| 220
| 0.729539
| 5,147
| 37,950
| 5.376919
| 0.180882
| 0.042638
| 0.247154
| 0.468293
| 0.360506
| 0.352304
| 0.342837
| 0.335321
| 0.331635
| 0.327154
| 0
| 0.819356
| 0.118946
| 37,950
| 334
| 221
| 113.622754
| 0.008344
| 0.03191
| 0
| 0.202532
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.015823
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7cabc55935c88d685ec61b8ac3dd9a65ef851192
| 320
|
py
|
Python
|
subscribers/apps.py
|
elegant-solutions/django-webstore
|
2c53189ea075a1d60a4d1e20a69ec8e831894068
|
[
"MIT"
] | 1
|
2020-10-24T08:45:32.000Z
|
2020-10-24T08:45:32.000Z
|
subscribers/apps.py
|
elegant-solutions/django-webstore
|
2c53189ea075a1d60a4d1e20a69ec8e831894068
|
[
"MIT"
] | 14
|
2016-09-22T17:06:38.000Z
|
2016-10-12T18:25:39.000Z
|
subscribers/apps.py
|
elegant-solutions/django-webstore
|
2c53189ea075a1d60a4d1e20a69ec8e831894068
|
[
"MIT"
] | 3
|
2016-10-07T12:03:35.000Z
|
2021-04-17T09:24:21.000Z
|
from __future__ import unicode_literals
from django.apps import AppConfig
# =========================================================================
# ----- Custom AppConfig Class
# =========================================================================
class SubscribersConfig(AppConfig):
name = 'subscribers'
| 32
| 75
| 0.409375
| 18
| 320
| 7
| 0.722222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.0875
| 320
| 9
| 76
| 35.555556
| 0.431507
| 0.55
| 0
| 0
| 0
| 0
| 0.078571
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7cf9ac978652cb4f1b7d030865d55d28754f0392
| 3,338
|
py
|
Python
|
cgatpipelines/tools/pipeline_docs/pipeline_rnaseqdiffexpression/trackers/Genelists.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 49
|
2015-04-13T16:49:25.000Z
|
2022-03-29T10:29:14.000Z
|
cgatpipelines/tools/pipeline_docs/pipeline_rnaseqdiffexpression/trackers/Genelists.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 252
|
2015-04-08T13:23:34.000Z
|
2019-03-18T21:51:29.000Z
|
cgatpipelines/tools/pipeline_docs/pipeline_rnaseqdiffexpression/trackers/Genelists.py
|
kevinrue/cgat-flow
|
02b5a1867253c2f6fd6b4f3763e0299115378913
|
[
"MIT"
] | 22
|
2015-05-21T00:37:52.000Z
|
2019-09-25T05:04:27.000Z
|
from RnaseqDiffExpressionReport import ProjectTracker
from RnaseqDiffExpressionReport import linkToEnsembl, linkToUCSC
class TopDifferentiallyExpressedGenes(ProjectTracker):
'''output differentially expressed genes.'''
limit = 10
pattern = '(.*)_gene_diff'
sort = ''
def __call__(self, track, slice=None):
statement = '''SELECT DISTINCT a.gene_name,
a.gene_id,
a.gene_biotype,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue,
s.contig, s.start, s.end
FROM %(track)s_gene_diff as t,
annotations.transcript_info as a,
annotations.gene_stats as s
WHERE a.gene_id = t.test_id AND
s.gene_id = t.test_id AND
t.significant
ORDER BY %(sort)s
LIMIT %(limit)i'''
data = self.getAll(statement)
if data:
data['gene_id'] = [linkToEnsembl(x) for x in data["gene_id"]]
data["locus"] = [linkToUCSC(*x) for x in zip(
data["contig"],
data["start"],
data["end"])]
return data
statement = '''SELECT DISTINCT t.test_id,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue
FROM %(track)s_gene_diff as t
WHERE
t.significant
ORDER BY %(sort)s
LIMIT %(limit)i'''
return self.getAll(statement)
class TopUpRegulatedGenes(TopDifferentiallyExpressedGenes):
sort = 't.l2fold DESC'
class TopDownRegulatedGenes(TopDifferentiallyExpressedGenes):
sort = 't.l2fold Asc'
class AllDifferentiallyExpressedGenes(ProjectTracker):
'''output differentially expressed genes.'''
limit = 1000
pattern = '(.*)_gene_diff'
def __call__(self, track, slice=None):
statement = '''SELECT DISTINCT a.gene_name,
a.gene_id,
a.gene_biotype,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue,
s.contig, s.start, s.end
FROM %(track)s_gene_diff as t,
annotations.transcript_info as a,
annotations.gene_stats as s
WHERE a.gene_id = t.test_id AND
s.gene_id = t.test_id AND
t.significant
ORDER BY t.l2fold DESC LIMIT %(limit)i'''
data = self.getAll(statement)
if data:
data['gene_id'] = [linkToEnsembl(x) for x in data["gene_id"]]
data["locus"] = [linkToUCSC(*x) for x in zip(
data["contig"],
data["start"],
data["end"])]
return data
statement = '''SELECT DISTINCT t.test_id,
t.l2fold,
t.treatment_mean,
t.control_mean,
t.pvalue,
t.qvalue
FROM %(track)s_gene_diff as t
WHERE
t.significant
ORDER BY t.l2fold DESC LIMIT %(limit)i'''
return self.getAll(statement)
| 31.490566
| 73
| 0.508688
| 348
| 3,338
| 4.729885
| 0.198276
| 0.036452
| 0.025516
| 0.041312
| 0.780073
| 0.780073
| 0.715674
| 0.685298
| 0.685298
| 0.672539
| 0
| 0.006979
| 0.399041
| 3,338
| 105
| 74
| 31.790476
| 0.813559
| 0.023068
| 0
| 0.873563
| 0
| 0
| 0.600923
| 0.030154
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022989
| false
| 0
| 0.022989
| 0
| 0.218391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
7cfaa7c645b13e6f64a64916e2ac369a0acab6e9
| 106
|
py
|
Python
|
natch/decorators/lte.py
|
ertgl/natch
|
5729725c6eed1596071ac984e3ddfdc21a15af0a
|
[
"MIT"
] | 2
|
2020-05-24T23:41:00.000Z
|
2020-05-25T09:18:08.000Z
|
natch/decorators/lte.py
|
ertgl/natch
|
5729725c6eed1596071ac984e3ddfdc21a15af0a
|
[
"MIT"
] | null | null | null |
natch/decorators/lte.py
|
ertgl/natch
|
5729725c6eed1596071ac984e3ddfdc21a15af0a
|
[
"MIT"
] | null | null | null |
from natch.core import Decoration
from natch.rules import Lte
lte = Decoration.make_rule_decorator(Lte)
| 17.666667
| 41
| 0.820755
| 16
| 106
| 5.3125
| 0.625
| 0.211765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122642
| 106
| 5
| 42
| 21.2
| 0.913978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
6b2ccaecd88b85529a804616a0cd60f1e60840d0
| 372
|
py
|
Python
|
qrcode/image/styles/moduledrawers/__init__.py
|
xamronpc/python-qrcode
|
49060c484ce6def1adbc13e3b14e71dcef266eb2
|
[
"BSD-3-Clause"
] | null | null | null |
qrcode/image/styles/moduledrawers/__init__.py
|
xamronpc/python-qrcode
|
49060c484ce6def1adbc13e3b14e71dcef266eb2
|
[
"BSD-3-Clause"
] | null | null | null |
qrcode/image/styles/moduledrawers/__init__.py
|
xamronpc/python-qrcode
|
49060c484ce6def1adbc13e3b14e71dcef266eb2
|
[
"BSD-3-Clause"
] | null | null | null |
# For backwards compatibility, importing the PIL drawers here.
from .pil import CircleModuleDrawer # noqa: F401
from .pil import GappedSquareModuleDrawer # noqa: F401
from .pil import HorizontalBarsDrawer # noqa: F401
from .pil import RoundedModuleDrawer # noqa: F401
from .pil import SquareModuleDrawer # noqa: F401
from .pil import VerticalBarsDrawer # noqa: F401
| 46.5
| 62
| 0.790323
| 44
| 372
| 6.681818
| 0.409091
| 0.142857
| 0.265306
| 0.255102
| 0.357143
| 0
| 0
| 0
| 0
| 0
| 0
| 0.057143
| 0.153226
| 372
| 7
| 63
| 53.142857
| 0.87619
| 0.33871
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
863d20f0e9c79b22f955635c479e18b8bcf235de
| 234
|
py
|
Python
|
tests/functionals/functionals/__init__.py
|
MCOfficer/cabot
|
80add22dd9721f22f19e5afbfb363df7084082ce
|
[
"BSD-3-Clause"
] | 21
|
2019-02-16T01:55:50.000Z
|
2021-11-25T00:00:43.000Z
|
tests/functionals/functionals/__init__.py
|
MCOfficer/cabot
|
80add22dd9721f22f19e5afbfb363df7084082ce
|
[
"BSD-3-Clause"
] | 7
|
2017-11-09T18:35:16.000Z
|
2020-09-29T09:44:09.000Z
|
tests/functionals/functionals/__init__.py
|
MCOfficer/cabot
|
80add22dd9721f22f19e5afbfb363df7084082ce
|
[
"BSD-3-Clause"
] | 4
|
2017-08-10T22:22:08.000Z
|
2020-09-29T08:59:42.000Z
|
"""Implemement behave steps."""
from behave_pytest.hook import install_pytest_asserts
install_pytest_asserts()
#from pytest import register_assert_rewrite
from . import given
from . import help
from . import then
from . import when
| 21.272727
| 53
| 0.807692
| 32
| 234
| 5.6875
| 0.5
| 0.21978
| 0.21978
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128205
| 234
| 10
| 54
| 23.4
| 0.892157
| 0.290598
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0
| true
| 0
| 0.833333
| 0
| 0.833333
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8651b07d458fef91fd6d8fbe2238f7ceda3eab55
| 170
|
py
|
Python
|
doc/workflow/examples/driver1.py
|
PyUtilib/PyUtilib
|
d99406f2af1fb62268c34453a2fbe6bd4a7348f0
|
[
"BSD-3-Clause"
] | 24
|
2016-04-02T10:00:02.000Z
|
2021-03-02T16:40:18.000Z
|
doc/workflow/examples/driver1.py
|
PyUtilib/PyUtilib
|
d99406f2af1fb62268c34453a2fbe6bd4a7348f0
|
[
"BSD-3-Clause"
] | 105
|
2015-10-29T03:29:58.000Z
|
2021-12-30T22:00:45.000Z
|
doc/workflow/examples/driver1.py
|
PyUtilib/PyUtilib
|
d99406f2af1fb62268c34453a2fbe6bd4a7348f0
|
[
"BSD-3-Clause"
] | 22
|
2016-01-21T15:35:25.000Z
|
2021-05-15T20:17:44.000Z
|
import pyutilib.workflow
import tasks_yz
driver = pyutilib.workflow.TaskDriver()
driver.register_task('TaskZ')
driver.register_task('TaskY')
print(driver.parse_args())
| 18.888889
| 39
| 0.805882
| 22
| 170
| 6.045455
| 0.636364
| 0.240602
| 0.270677
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.070588
| 170
| 8
| 40
| 21.25
| 0.841772
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0.166667
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8674d5db162f181d6b47162a8d8c16526725907b
| 119
|
py
|
Python
|
contact/admin.py
|
85599/my-first-contact-app
|
dda8c12cd9232ee6f962d11e18c397d9c5a2f251
|
[
"MIT"
] | null | null | null |
contact/admin.py
|
85599/my-first-contact-app
|
dda8c12cd9232ee6f962d11e18c397d9c5a2f251
|
[
"MIT"
] | null | null | null |
contact/admin.py
|
85599/my-first-contact-app
|
dda8c12cd9232ee6f962d11e18c397d9c5a2f251
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
from .models import Person
admin.site.register(Person)
# Register your models here.
| 17
| 32
| 0.798319
| 17
| 119
| 5.588235
| 0.647059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.134454
| 119
| 6
| 33
| 19.833333
| 0.92233
| 0.218487
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8104ce758c73d2f12abaea87ce872c7633f68a13
| 54
|
py
|
Python
|
venv/Lib/site-packages/pandas/io/sas/__init__.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | 1
|
2021-02-06T21:00:00.000Z
|
2021-02-06T21:00:00.000Z
|
venv/Lib/site-packages/pandas/io/sas/__init__.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | null | null | null |
venv/Lib/site-packages/pandas/io/sas/__init__.py
|
arnoyu-hub/COMP0016miemie
|
59af664dcf190eab4f93cefb8471908717415fea
|
[
"MIT"
] | 1
|
2021-04-26T22:41:56.000Z
|
2021-04-26T22:41:56.000Z
|
from pandas.io.sas.sasreader import read_sas # noqa
| 27
| 53
| 0.777778
| 9
| 54
| 4.555556
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.148148
| 54
| 1
| 54
| 54
| 0.891304
| 0.074074
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
8130b6c4d6dabb9248dae29af867f8c0044fd04a
| 219
|
py
|
Python
|
WQ_tools/_init_.py
|
lkschn/WQ_tools
|
15c9a290794f00dc2b10b7b261a744bb11cdb9cb
|
[
"MIT"
] | null | null | null |
WQ_tools/_init_.py
|
lkschn/WQ_tools
|
15c9a290794f00dc2b10b7b261a744bb11cdb9cb
|
[
"MIT"
] | null | null | null |
WQ_tools/_init_.py
|
lkschn/WQ_tools
|
15c9a290794f00dc2b10b7b261a744bb11cdb9cb
|
[
"MIT"
] | null | null | null |
from .dicts_modelNWDM import varDict, plot_locs, columnsNWDM
from .plotFunctions import plotTS_modelNWDM, dotplot_modelNWDM
from .nwdmFunctions import wfsbuild, readUrl
from .dwaqFunctions import get_modkey, get_modTime
| 54.75
| 62
| 0.86758
| 27
| 219
| 6.814815
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.091324
| 219
| 4
| 63
| 54.75
| 0.924623
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
d491190d111a25187dcd91999bf7ba7e64a259a5
| 43
|
py
|
Python
|
tests/components/upc_connect/__init__.py
|
zalke/home-assistant
|
a31e49c857722c0723dc5297cd83cbce0f8716f6
|
[
"Apache-2.0"
] | 4
|
2019-07-03T22:36:57.000Z
|
2019-08-10T15:33:25.000Z
|
tests/components/upc_connect/__init__.py
|
zalke/home-assistant
|
a31e49c857722c0723dc5297cd83cbce0f8716f6
|
[
"Apache-2.0"
] | 39
|
2016-12-16T12:40:34.000Z
|
2017-02-13T17:53:42.000Z
|
tests/components/upc_connect/__init__.py
|
zalke/home-assistant
|
a31e49c857722c0723dc5297cd83cbce0f8716f6
|
[
"Apache-2.0"
] | 3
|
2020-03-03T18:14:10.000Z
|
2020-10-04T06:52:45.000Z
|
"""Tests for the upc_connect component."""
| 21.5
| 42
| 0.72093
| 6
| 43
| 5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116279
| 43
| 1
| 43
| 43
| 0.789474
| 0.837209
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4996450a80f623e6369948e86ffbcdc6cc23d6d
| 21
|
py
|
Python
|
ua_parser/__init__.py
|
kaltura/uap-python
|
bb3c5dc7820f1a42dc4e1f619451f6156925c5a7
|
[
"Apache-2.0"
] | 1
|
2021-12-10T03:19:39.000Z
|
2021-12-10T03:19:39.000Z
|
ua_parser/__init__.py
|
kaltura/uap-python
|
bb3c5dc7820f1a42dc4e1f619451f6156925c5a7
|
[
"Apache-2.0"
] | null | null | null |
ua_parser/__init__.py
|
kaltura/uap-python
|
bb3c5dc7820f1a42dc4e1f619451f6156925c5a7
|
[
"Apache-2.0"
] | null | null | null |
VERSION = (0, 15, 0)
| 10.5
| 20
| 0.52381
| 4
| 21
| 2.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 0.238095
| 21
| 1
| 21
| 21
| 0.4375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4b8d96802b5a03dad082bf1f9bc6ea0d7a0cc82
| 5,758
|
py
|
Python
|
tests/unit/dataactvalidator/test_b14_object_class_program_activity.py
|
RonSherfey/data-act-broker-backend
|
d287abda2cac06dd479ecf0127e789cb8e59387d
|
[
"CC0-1.0"
] | null | null | null |
tests/unit/dataactvalidator/test_b14_object_class_program_activity.py
|
RonSherfey/data-act-broker-backend
|
d287abda2cac06dd479ecf0127e789cb8e59387d
|
[
"CC0-1.0"
] | 3
|
2021-08-22T11:47:45.000Z
|
2022-03-29T22:06:49.000Z
|
tests/unit/dataactvalidator/test_b14_object_class_program_activity.py
|
RonSherfey/data-act-broker-backend
|
d287abda2cac06dd479ecf0127e789cb8e59387d
|
[
"CC0-1.0"
] | null | null | null |
from dataactcore.models.stagingModels import ObjectClassProgramActivity
from dataactcore.models.domainModels import SF133
from tests.unit.dataactvalidator.utils import number_of_errors, query_columns
_FILE = 'b14_object_class_program_activity'
_TAS = 'b14_object_class_program_activity_tas'
def test_column_headers(database):
expected_subset = {'row_number', 'ussgl480100_undelivered_or_cpe_sum', 'ussgl480100_undelivered_or_fyb_sum',
'ussgl480200_undelivered_or_cpe_sum', 'ussgl480200_undelivered_or_fyb_sum',
'ussgl488100_upward_adjustm_cpe_sum', 'ussgl488200_upward_adjustm_cpe_sum',
'ussgl490100_delivered_orde_cpe_sum', 'ussgl490100_delivered_orde_fyb_sum',
'ussgl490200_delivered_orde_cpe_sum', 'ussgl490800_authority_outl_cpe_sum',
'ussgl490800_authority_outl_fyb_sum', 'ussgl498100_upward_adjustm_cpe_sum',
'ussgl498200_upward_adjustm_cpe_sum', 'expected_value_GTAS SF133 Line 2004',
'difference', 'uniqueid_TAS', 'uniqueid_DisasterEmergencyFundCode'}
actual = set(query_columns(_FILE, database))
assert (actual & expected_subset) == expected_subset
def test_success(database):
""" Tests that SF 133 amount sum for line 2004 matches the calculation from Appropriation based on the fields below
for the specified fiscal year and period and TAS/DEFC combination.
"""
tas = "".join([_TAS, "_success"])
# This uses the default submission created in utils for 10/2015 which is period 1 of FY 2016
sf = SF133(line=2004, tas=tas, period=1, fiscal_year=2016, amount=-15, agency_identifier="sys",
main_account_code="000", sub_account_code="000", disaster_emergency_fund_code='C')
op = ObjectClassProgramActivity(job_id=1, row_number=1, tas=tas, by_direct_reimbursable_fun='d',
ussgl480100_undelivered_or_cpe=1, ussgl480100_undelivered_or_fyb=1,
ussgl480200_undelivered_or_cpe=1, ussgl480200_undelivered_or_fyb=1,
ussgl488100_upward_adjustm_cpe=1, ussgl488200_upward_adjustm_cpe=1,
ussgl490100_delivered_orde_cpe=1, ussgl490100_delivered_orde_fyb=1,
ussgl490200_delivered_orde_cpe=1, ussgl490800_authority_outl_cpe=1,
ussgl490800_authority_outl_fyb=1, ussgl498100_upward_adjustm_cpe=1,
ussgl498200_upward_adjustm_cpe=1, disaster_emergency_fund_code='C')
op2 = ObjectClassProgramActivity(job_id=1, row_number=2, tas=tas, by_direct_reimbursable_fun='d',
ussgl480100_undelivered_or_cpe=2, ussgl480100_undelivered_or_fyb=2,
ussgl480200_undelivered_or_cpe=2, ussgl480200_undelivered_or_fyb=2,
ussgl488100_upward_adjustm_cpe=2, ussgl488200_upward_adjustm_cpe=2,
ussgl490100_delivered_orde_cpe=2, ussgl490100_delivered_orde_fyb=2,
ussgl490200_delivered_orde_cpe=2, ussgl490800_authority_outl_cpe=2,
ussgl490800_authority_outl_fyb=2, ussgl498100_upward_adjustm_cpe=2,
ussgl498200_upward_adjustm_cpe=2, disaster_emergency_fund_code='c')
assert number_of_errors(_FILE, database, models=[sf, op, op2]) == 0
def test_failure(database):
""" Tests that SF 133 amount sum for line 2004 does not match the calculation from Appropriation based on
the fields below for the specified fiscal year and period and TAS/DEFC combination.
"""
tas = "".join([_TAS, "_failure"])
sf = SF133(line=2004, tas=tas, period=1, fiscal_year=2016, amount=5, agency_identifier="sys",
main_account_code="000", sub_account_code="000", disaster_emergency_fund_code='D')
op = ObjectClassProgramActivity(job_id=1, row_number=1, tas=tas, by_direct_reimbursable_fun='d',
ussgl480100_undelivered_or_cpe=1, ussgl480100_undelivered_or_fyb=1,
ussgl480200_undelivered_or_cpe=1, ussgl480200_undelivered_or_fyb=1,
ussgl488100_upward_adjustm_cpe=1, ussgl488200_upward_adjustm_cpe=1,
ussgl490100_delivered_orde_cpe=1, ussgl490100_delivered_orde_fyb=1,
ussgl490200_delivered_orde_cpe=1, ussgl490800_authority_outl_cpe=1,
ussgl490800_authority_outl_fyb=1, ussgl498100_upward_adjustm_cpe=1,
ussgl498200_upward_adjustm_cpe=1, disaster_emergency_fund_code='D')
op2 = ObjectClassProgramActivity(job_id=1, row_number=2, tas=tas, by_direct_reimbursable_fun='d',
ussgl480100_undelivered_or_cpe=2, ussgl480100_undelivered_or_fyb=2,
ussgl480200_undelivered_or_cpe=2, ussgl480200_undelivered_or_fyb=2,
ussgl488100_upward_adjustm_cpe=2, ussgl488200_upward_adjustm_cpe=2,
ussgl490100_delivered_orde_cpe=2, ussgl490100_delivered_orde_fyb=2,
ussgl490200_delivered_orde_cpe=2, ussgl490800_authority_outl_cpe=2,
ussgl490800_authority_outl_fyb=2, ussgl498100_upward_adjustm_cpe=2,
ussgl498200_upward_adjustm_cpe=2, disaster_emergency_fund_code='D')
assert number_of_errors(_FILE, database, models=[sf, op, op2]) == 1
| 71.08642
| 119
| 0.659778
| 649
| 5,758
| 5.397535
| 0.194145
| 0.074222
| 0.09135
| 0.038824
| 0.772481
| 0.736512
| 0.718242
| 0.718242
| 0.718242
| 0.718242
| 0
| 0.127874
| 0.274748
| 5,758
| 80
| 120
| 71.975
| 0.710967
| 0.079194
| 0
| 0.482759
| 0
| 0
| 0.124834
| 0.103743
| 0
| 0
| 0
| 0
| 0.051724
| 1
| 0.051724
| false
| 0
| 0.051724
| 0
| 0.103448
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4c699095bab30ac430c91f63838de2026aced19
| 6,860
|
py
|
Python
|
tests/tagulous_tests_migration/south_migrations_expected/0003_tree.py
|
marxide/django-tagulous
|
80c057c5dd2dce85f4bb531b25d3b4982bd03e8f
|
[
"Apache-2.0"
] | null | null | null |
tests/tagulous_tests_migration/south_migrations_expected/0003_tree.py
|
marxide/django-tagulous
|
80c057c5dd2dce85f4bb531b25d3b4982bd03e8f
|
[
"Apache-2.0"
] | null | null | null |
tests/tagulous_tests_migration/south_migrations_expected/0003_tree.py
|
marxide/django-tagulous
|
80c057c5dd2dce85f4bb531b25d3b4982bd03e8f
|
[
"Apache-2.0"
] | null | null | null |
# -*- coding: utf-8 -*-
from south.db import db
from south.v2 import SchemaMigration
class Migration(SchemaMigration):
def forwards(self, orm):
# Removing unique constraint on 'Tagulous_MigrationTestModel_tags', fields ['slug']
db.delete_unique(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags", ["slug"]
)
# Adding field 'Tagulous_MigrationTestModel_tags.parent'
db.add_column(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags",
"parent",
self.gf("django.db.models.fields.related.ForeignKey")(
blank=True,
related_name="children",
null=True,
to=orm["tagulous_tests_migration.Tagulous_MigrationTestModel_tags"],
),
keep_default=False,
)
# Adding field 'Tagulous_MigrationTestModel_tags.label'
db.add_column(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags",
"label",
self.gf("django.db.models.fields.CharField")(default=".", max_length=191),
keep_default=False,
)
# Adding field 'Tagulous_MigrationTestModel_tags.level'
db.add_column(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags",
"level",
self.gf("django.db.models.fields.IntegerField")(default=1),
keep_default=False,
)
# Adding field 'Tagulous_MigrationTestModel_tags.path'
from tagulous.models.migrations import add_unique_column
add_unique_column(
self,
db,
orm["tagulous_tests_migration.Tagulous_MigrationTestModel_tags"],
"path",
lambda obj: setattr(obj, "path", str(obj.pk)),
"django.db.models.fields.TextField",
)
# Adding unique constraint on 'Tagulous_MigrationTestModel_tags', fields ['slug', 'parent']
db.create_unique(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags",
["slug", "parent_id"],
)
def backwards(self, orm):
# Removing unique constraint on 'Tagulous_MigrationTestModel_tags', fields ['slug', 'parent']
db.delete_unique(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags",
["slug", "parent_id"],
)
# Deleting field 'Tagulous_MigrationTestModel_tags.parent'
db.delete_column(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags", "parent_id"
)
# Deleting field 'Tagulous_MigrationTestModel_tags.path'
db.delete_column(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags", "path"
)
# Deleting field 'BookmarkTag.label'
db.delete_column(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags", "label"
)
# Deleting field 'BookmarkTag.level'
db.delete_column(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags", "level"
)
# Adding unique constraint on 'Tagulous_MigrationTestModel_tags', fields ['slug']
db.create_unique(
u"tagulous_tests_migration_tagulous_migrationtestmodel_tags", ["slug"]
)
models = {
u"tagulous_tests_migration.tagulous_migrationtestmodel_singletag": {
"Meta": {
"ordering": "('name',)",
"unique_together": "(('slug',),)",
"object_name": "Tagulous_MigrationTestModel_singletag",
"_bases": ["tagulous.models.BaseTagModel"],
},
"count": ("django.db.models.fields.IntegerField", [], {"default": "0"}),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "191"},
),
"protected": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"slug": ("django.db.models.fields.SlugField", [], {"max_length": "50"}),
},
u"tagulous_tests_migration.tagulous_migrationtestmodel_tags": {
"Meta": {
"ordering": "('name',)",
"unique_together": "(('slug', 'parent'),)",
"object_name": "Tagulous_MigrationTestModel_tags",
"_bases": ["tagulous.models.BaseTagTreeModel"],
},
"count": ("django.db.models.fields.IntegerField", [], {"default": "0"}),
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"label": ("django.db.models.fields.CharField", [], {"max_length": "191"}),
"level": ("django.db.models.fields.IntegerField", [], {"default": "1"}),
"name": (
"django.db.models.fields.CharField",
[],
{"unique": "True", "max_length": "191"},
),
"parent": (
"django.db.models.fields.related.ForeignKey",
[],
{
"blank": "True",
"related_name": "'children'",
"null": "True",
"to": u"orm['tagulous_tests_migration.Tagulous_MigrationTestModel_tags']",
},
),
"path": ("django.db.models.fields.TextField", [], {"unique": "True"}),
"protected": (
"django.db.models.fields.BooleanField",
[],
{"default": "False"},
),
"slug": ("django.db.models.fields.SlugField", [], {"max_length": "50"}),
},
u"tagulous_tests_migration.migrationtestmodel": {
"Meta": {"object_name": "MigrationTestModel"},
u"id": ("django.db.models.fields.AutoField", [], {"primary_key": "True"}),
"name": ("django.db.models.fields.CharField", [], {"max_length": "10"}),
"singletag": (
"tagulous.models.fields.SingleTagField",
[],
{
"_set_tag_meta": "True",
"blank": "True",
"to": u"orm['tagulous_tests_migration.Tagulous_MigrationTestModel_singletag']",
"null": "True",
},
),
"tags": (
"tagulous.models.fields.TagField",
[],
{
"to": u"orm['tagulous_tests_migration.Tagulous_MigrationTestModel_tags']",
"tree": "True",
"_set_tag_meta": "True",
},
),
},
}
complete_apps = ["tagulous_tests_migration"]
| 39.425287
| 101
| 0.540962
| 579
| 6,860
| 6.136442
| 0.158895
| 0.219533
| 0.227976
| 0.112581
| 0.802139
| 0.779341
| 0.717422
| 0.63186
| 0.566001
| 0.488883
| 0
| 0.005168
| 0.323032
| 6,860
| 173
| 102
| 39.653179
| 0.759905
| 0.111224
| 0
| 0.475862
| 0
| 0
| 0.438599
| 0.33158
| 0
| 0
| 0
| 0
| 0
| 1
| 0.013793
| false
| 0
| 0.02069
| 0
| 0.055172
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4c8fe78c6ae1996f8397aab862344b484c7173d
| 22,801
|
py
|
Python
|
prob4.py
|
reschly/cryptopals
|
8cdb5e909a6385b0b6b5b1a9c1cde1b277d8e5a2
|
[
"Apache-2.0"
] | 43
|
2015-03-16T17:14:51.000Z
|
2021-02-18T17:23:28.000Z
|
prob4.py
|
HMY626/CryptoChallenge
|
9100d6ba227a22aba5016f480ec8d361cfdf3779
|
[
"Apache-2.0"
] | null | null | null |
prob4.py
|
HMY626/CryptoChallenge
|
9100d6ba227a22aba5016f480ec8d361cfdf3779
|
[
"Apache-2.0"
] | 13
|
2015-03-16T17:14:55.000Z
|
2019-11-14T21:38:32.000Z
|
#!/usr/bin/env python
# Written against python 3.3.1
# Matasano Problem 4
# Detect single-character XOR
# One of the 60-character strings at:
#
# https://gist.github.com/3132713
# has been encrypted by single-character XOR. Find it. (Your code from
# problem 3 should help.)
from prob3 import tryKey
from prob1 import rawToHexLUT, hexToRaw
cipher_strings = [
b'0e3647e8592d35514a081243582536ed3de6734059001e3f535ce6271032',
b'334b041de124f73c18011a50e608097ac308ecee501337ec3e100854201d',
b'40e127f51c10031d0133590b1e490f3514e05a54143d08222c2a4071e351',
b'45440b171d5c1b21342e021c3a0eee7373215c4024f0eb733cf006e2040c',
b'22015e420b07ef21164d5935e82338452f42282c1836e42536284c450de3',
b'043b452e0268e7eb005a080b360f0642e6e342005217ef04a42f3e43113d',
b'581e0829214202063d70030845e5301f5a5212ed0818e22f120b211b171b',
b'ea0b342957394717132307133f143a1357e9ed1f5023034147465c052616',
b'0c300b355c2051373a051851ee154a023723414c023a08171e1b4f17595e',
b'550c3e13e80246320b0bec09362542243be42d1d5d060e203e1a0c66ef48',
b'e159464a582a6a0c50471310084f6b1703221d2e7a54502b2b205c433afa',
b'ec58ea200e3005090e1725005739eda7342aed311001383fff7c58ef1f11',
b'01305424231c0d2c41f105057f74510d335440332f1038ec17275f5814e1',
b'05f12f380720ea2b19e24a07e53c142128354e2827f25a08fb401c3126a6',
b'0d17272f53063954163d050a541b1f1144305ae37d4932431b1f33140b1b',
b'0b4f070f071fe92c200e1fa05e4b272e50201b5d493110e429482c100730',
b'100a3148080f227fe60a132f0c10174fe3f63d1a5d38eb414ca8e82f2b05',
b'0a19e83c58400a023b13234572e6e4272bf67434331631e63b5e0f00175c',
b'54520c2ceb45530e0f78111d0b0707e01e4bf43b0606073854324421e6f9',
b'09e7585353ee4a34190de1354e481c373a1b2b0a136127383e271212191f',
b'0f060d09fb4f2d5024022c5ff6463c390c2b5f1a5532071a31f33503fcea',
b'371d39121605584f48217235ee1e0602445c162e4942254c071954321d29',
b'4a0900e63e5f161e15554045f3594c2a6a77e4e52711602beaf53ae53bed',
b'29011616565d2a372a605bee39eced31183fe068185c3b445b391fe53232',
b'e4102337000303452a1e2f2b29493f54ed5a037b3e08311b625cfd005009',
b'2d560d4b0618203249312a310d5f541f295c3f0f25235c2b20037d1600f3',
b'2c245155e8253708391a7ceb0d05005c3e080f3f0f0e5a16583b111f4448',
b'493804044d262eec3759594f212d562420105d6a39e70a0f3957f347070c',
b'e72d1d1f103807590f4339575e00381074485d2d580249f744052605e11d',
b'e131570ae95307143a71131729552d001057a4540a1f425b190b572dee34',
b'2c1655342f02581c202b0a5c17a358291e1506f325550f05365e165c1c5f',
b'e318164df80b043e5406296e5359271d152f552e155a43eda81f23231d1c',
b'001de0413e174e18192c061e4b3d1b5626f90e3e1429544a20ee150d0c20',
b'32e902193219033c58191302441a5c1b584825ea140c290927aaea53e23c',
b'3a36363a732e32ea3f0e430508204b332c382a19292d5b291122e123446a',
b'1804115614031f5f571f2b143c5d3c1b257a4b37350f18445a3e08341c3d',
b'21f2fb250b2e55151e77253a3f0e5f4b2030370a4155e720e73914e35a4a',
b'510a55583a3c491221397c123a2b14a8305b3b09e71b241d0e51202e1a32',
b'1b51202f4917232b512a141d6812f03c455df05e5a1c2cee14390b3b593a',
b'5f5731e5203116ee131a4a4b24112cef5d0822f035e6547d3a0014462f26',
b'0028fb522104f771501a555d3f581e30e9ec3e49e3e63123432f07794145',
b'1459f6312f000e5a1373e346e40f211e1b0b0e17000f391f170552150500',
b'7e301e18325717e3412e022f087be30e5641080151357714e0e0eee15e11',
b'533258e9360f513b083aa51d2824222f40200a470537ecec392d31070b38',
b'07e32c180dfa56496a461627542115132a4c284050495b23e2245b093159',
b'2d3c230a1e5a300f6c3e26ed0d1709434950fd6f1e121335054129e4e4ec',
b'ef22fa2112311b11584ce43434f46f521a215433f9514fe33d313a3e0838',
b'34e7f336270c08010f2f544f0f1c1e235c0222644c2632efec061de2115f',
b'121a42395d4c560d213b0c0a26a7e4f4382718153d5e511158a10b2c021e',
b'e05d414dfa40222f0c382a03235f4d0d04372d4b7855105e26e44f2e0555',
b'7f3a4f1351f85b0344223e1177e14707190c0e311f4ca633f5f3e9352372',
b'01424d5d1a322a0d381717130e181d07240c2c19ecee750b1a37085d014c',
b'16012c5de55a0314a8260e2759e439123ca0c81c321d454e4e0ee14f4c1d',
b'0b1415512f38580e4e2a227def242643183c224f0ea146443403022fe9fd',
b'43eb2b1078322a02192d5b5e0c360d584d0b5e2c13072912ee32f03f4155',
b'002a52553e08361b0be0074b573e201c164c093a5c0f0159333b59770d5b',
b'38e63c1c5244301a5a01f26930321256143e1ae05e1120a9eaf20a192d58',
b'7d54140a152ef4035f09083ded531ee04df55848020656a1342e502649eb',
b'0c211dfe101702015516341136252f3f06f73247133113f5642d083a3417',
b'015e3d51433f3c003e5e28030b1d413eee186824504b241e0f0d32373e2b',
b'2d465040ec130c5c0e2704aa17010c40095207223669110f22f45ea155f7',
b'14552e2b341e5ce0195351066a23e3283e0ee935444b255a1c5c3cef7614',
b'372b453d5a357c05142be65b3c17f92d2b134853390a312bf92a531b513d',
b'5658265f4c0ce4440a20322f591a413034292b312206a01be6453a512d21',
b'1c585c19f31f785324f8583d1ee02620342b10a236263f105011ee5b0e14',
b'0f522b550818591a752e5fea0e033322ee5e280a4a1b244f5a2b35341255',
b'39093c1ced331b264127173f1312e2455fa33b31012c1f4d073c553f5d5e',
b'18f82d5d07e2430b3b3c1b5b49effb0313173f5d4a2e5c134555ff6b1d1a',
b'550a20234202726341190311295254f4064205aa515ae0145a23071c4e18',
b'3f2047024e3ce4555a1b39fa145455012c3afb0f2d11134846182e3c575b',
b'e3e456571937762828065443153b51152e262f09c937024405284f236432',
b'012f580c3536ec5c021574541d5c41123a4e661d5f0f5f344a083e3a5e4c',
b'4216252d01eb0a2a4623621b48360d312c29f33e380650447617124b3e71',
b'54141e59323606390204e95f1206520e5c084510034d30171c5e744f335d',
b'1e30061401600b342e171059526d1949431a3f412f56594c183711ea4837',
b'3131254f11e76f550e1e4d26f1391f44363b151c31281ff45259351da0e6',
b'5def250d0f3505385f22e9f4112633005d272d092e0138275851f943e90e',
b'0939165718303b445210095c16390cf04f19450e06f4545c0a0c320e3e23',
b'1e0b0b1f573f3d0fe05d43090fa8482242300819313142325b1f4b19365b',
b'0d3b2a5d271e463d2203765245065d5d684a051e5815265b52f3171d3004',
b'6af423303817a43324394af15a5c482e3b16f5a46f1e0b5c1201214b5fe4',
b'4030544f3f51151e436e04203a5e3b287ee303490a43fb3b28042f36504e',
b'1a2d5a03fc0e2c04384046242e2b5e1548101825eb2f285f1a210f022141',
b'122355e90122281deeed3ba05636003826525d5551572d07030d4935201f',
b'2a3c484a15410d3b16375d4665271b5c4ce7ee37083d3e512b45204f17f6',
b'03222801255c2c211a7aeb1e042b4e38e8f1293143203139fb202c325f2b',
b'06542a28041956350e292bf3fe5c32133a2a171b3a3e4e4e3101381529e3',
b'4a5209ef24e5f3225e503b143d0e5747323fe7ee3d5b1b5110395619e65a',
b'1fee0a3945563d2b5703701817584b5f5b54702522f5031b561929ea2d1e',
b'e7271935100e3c31211b23113a3a5524e02241181a251d521ff52f3c5a76',
b'144a0efee02f0f5f1d353a1c112e1909234f032953ec591e0a58e55d2cf4',
b'efee0cf00d0955500210015311467543544708eb590d113d30443d080c1e',
b'1a562c1f7e2b0030094f051c03e30f4d501a0fe22a2817edfc5e470c3843',
b'1c3df1135321a8e9241a5607f8305d571aa546001e3254555a11511924',
b'eb1d3f54ec0fea341a097c502ff1111524e24f5b553e49e8576b5b0e1e33',
b'72413e2f5329e332ec563b5e65185efefd2c3b4e5f0b5133246d214a401d',
b'352a0ae632183d200a162e5346110552131514e0553e51003e220d47424b',
b'1d005c58135f3c1b53300c3b49263928f55625454f3be259361ded1f0834',
b'2d2457524a1e1204255934174d442a1a7d130f350a123c4a075f5be73e30',
b'0c0518582d131f39575925e0231833370c482b270e183810415d5aec1900',
b'453b181df1572735380b0446097f00111f1425070b2e1958102ceb592928',
b'010a4a2d0b0926082d2f1525562d1d070a7a08152f5b4438a4150b132e20',
b'2b395d0d5d015d41335d21250de33e3d42152d3f557d1e44e4ee22255d2d',
b'4a1b5c272d0d1c45072639362e402dee2853e51311262b17aa72eb390410',
b'e7015f0215352030574b4108e44d0e1a204418e62325ff7f34052f234b2d',
b'1d563c13202346071d39e34055402b0b392c27f552222d3deb3843ee2c16',
b'29332a521f3c1b0811e33e1a25520e323e75e01c17473f55071226120d3d',
b'210b35ee1a0a5335222e35033905170c4f3104eb032d425058367d5a2bf2',
b'1e553809415efb1c460f2f0ffafaec491e4d4e49510452e8245a366a4106',
b'e1f92cee0e10142514e7ec13155c412fe901092f1f0fa738280c5eee5e04',
b'3526291e0b2a5f486a3051041f4c16372f5402e6f70b31a03525190b161a',
b'260e5e1f0c2e4d7528ef11552fefe247201e4752085c1da903563c162a4b',
b'2a14ff2e3265e604075e523b24455c364a7f284f3a43051d52152f1119e8',
b'5f02e55a4b1300063640ef10151002565f0b0c010033a1cbef5d3634484a',
b'1b121c585b495a5e033a09037f2d1754072c2d49084055172a3c220bed4f',
b'1613400e1632435c0018482aa55b363d26290ae4405ded280f2b0c271536',
b'4011250ce02119464a1de43113170356342c272d1d3355555e5706245e0a',
b'16272d5e545953002e10020875e223010719555410f91ce518420e382456',
b'0d4037320345f945241a1d090a545a310142442131464f4d10562ae4f05a',
b'07ee4d4ae12e571e313c1636313134233e495459e548317708563c2c1b2f',
b'e75803294b36565225552c3406304f0201e43323291b5e0e2159025c2f25',
b'5e63194411490c44494232237e1b323108573d3f391d1f3537e4165a2b35',
b'51000a3a264c503b5852072a5636f04f5cea58a42838f5fca876415c3521',
b'3c14130be511275932055a30aa2d03470c51060009f210543002585f5713',
b'10f0370c5823115200e5015d083e2f1a5df91d68065c1b03f0080855e529',
b'02ec00f1462d034123151ba6fc07eb3d5e54e85a3f3ee532fb41791a060b',
b'0c29274232f93efb3d465544e45e491b042ced245100e3f05c14134c254b',
b'5741235f051e080401a8013c065627e8ee5432205114243d54320e133f2d',
b'4a4d181635411f5d084e31ed230c16506d5125415e060e4dcd0e5f3708e3',
b'2d531c3e22065a5eee07310c145305131800063e4a20094b2006ea131240',
b'e7335c1c4308160be6aa551a0f5a58243e0b10ee470047683c345e1c5b0c',
b'5434505ee22a18110d20342e4b53062c4d79042a0a02422e225b2523e95a',
b'3252212407115c07e15eee06391d0519e9271b641330011f383410281f0e',
b'2cee2b355233292b595d1c69592f483b54584f7154fd4928560752e333a1',
b'17272b272f110df5e91c560a39104510240b5c4b0c1c570871e422351927',
b'c32550ec3f132c0c2458503ae5241d3c0d7911480a073826315620403615',
b'16e11c270d2b010650145de2290b0beb1e120a3a354b2104064f3b533c4e',
b'505746313d4d2e3455290a281ee81d50007e1148252528025237715a342a',
b'1c0a13163e404e40242142061d34185421160220fa031f7a423a08f2e01a',
b'101d303802f51b0c08ef461259315b553823e622a12d565509e23c624139',
b'0a3d1309e4384c0eed383846545a035a41ee1771513b090a031e15f45159',
b'2d4944092a1965542507003b23195758403e175a0a450c5c38114de21141',
b'eb100fe63a031c4b35eb591845e428441c0d5b0037131f5c160a31243619',
b'c155ef0d19143e24392507a202581a25491b135c27571d5c5b35250f0bef',
b'0e1d510556485e39557e044e2cf10457523016473f500b1e36370c17591c',
b'7e5a19250a5e152b46f5130a094cef08e84704ef10197324464b0114017a',
b'3b56f126390008343d3c400232ed201667211f0b1a1413080202530b08e2',
b'4912321b61c90a0cf6ef0a0a0c0f17fa62eb385e2616194526701aff5fe6',
b'2c57114b0400152d4f2aeb18ed41386c2e3a023a281d1a311eefe750ebab',
b'3a4353282114593b3e36446d2c5e1e582e335337022930331f211604576a',
b'295f3bfae9271ae8065a3b4417545c3e5b0df11a53351c78530915392d2e',
b'074a122ee01b17131e4e124e2322a9560ce4120e37582b24e1036fe93f30',
b'3c08290121090ef72f25e4f220323444532d3fe71f34553c7b2726131009',
b'12e84a3308590357a719e74c4f2133690a20031a0b045af63551325b1219',
b'0e3d4fe03f56523cf40f29e4353455120e3a4f2f26f6a30a2b3e0c5b085a',
b'57f3315c33e41c0f523426232d0651395c1525274e314d0219163b5f181f',
b'53471622182739e9e25b473d74e1e7023d095a3134e62d1366563004120e',
b'230a06431935391d5e0b5543223a3bed2b4358f555401e1b3b5c36470d11',
b'22100330e03b4812e6120f163b1ef6abebe6f602545ef9a459e33d334c2a',
b'463405faa655563a43532cfe154bec32fe3345eb2c2700340811213e5006',
b'14241340112b2916017c270a0652732ee8121132385a6c020c040e2be15b',
b'251119225c573b105d5c0a371c3d421ef23e22377fee334e0228561b2d15',
b'2e4c2e373b434b0d0b1b340c300e4b195614130ea03c234c292e14530c46',
b'0d2c3f08560ee32e5a5b6413355215384442563e69ec294a0eef561e3053',
b'193c100c0b24231c012273e10d2e12552723586120020b02e45632265e5f',
b'2c175a11553d4b0b16025e2534180964245b125e5d6e595d1d2a0710580b',
b'213a175ff30855e4001b305000263f5a5c3c5100163cee00114e3518f33a',
b'10ed33e65b003012e7131e161d5e2e270b4645f358394118330f5a5b241b',
b'33e80130f45708395457573406422a3b0d03e6e5053d0d2d151c083337a2',
b'551be2082b1563c4ec2247140400124d4b6508041b5a472256093aea1847',
b'7b5a4215415d544115415d5015455447414c155c46155f4058455c5b523f',
b'0864eb4935144c501103a71851370719301bec57093a0929ea3f18060e55',
b'2d395e57143359e80efffb13330633ea19e323077b4814571e5a3de73a1f',
b'52e73c1d53330846243c422d3e1b374b5209543903e3195c041c251b7c04',
b'2f3c2c28273a12520b482f18340d565d1fe84735474f4a012e1a13502523',
b'23340f39064e306a08194d544647522e1443041d5ee81f5a18415e34a45f',
b'475a392637565757730a0c4a517b2821040e1709e028071558021f164c54',
b'100b2135190505264254005618f51152136125370eef27383e45350118ed',
b'3947452914e0223f1d040943313c193f295b221e573e1b5723391d090d1f',
b'2c33141859392b04155e3d4e393b322526ee3e581d1b3d6817374d0c085b',
b'c2ea5821200f1b755b2d13130f04e26625ea3a5b1e37144d3e473c24030d',
b'ee15025d2019f757305e3f010e2a453a205f1919391e1a04e86d1a350119',
b'1a5beb4946180fe0002a031a050b41e5164c58795021e1e45c59e2495c20',
b'1121394f1e381c3647005b7326250514272b55250a49183be5454ba518eb',
b'1ee55936102a465d5004371f2e382f1d03144f170d2b0eed042ee341eb19',
b'ec1014ef3ff1272c3408220a41163708140b2e340e505c560c1e4cf82704',
b'274b341a454a27a0263408292e362c201c0401462049523b2d55e5132d54',
b'e259032c444b091e2e4920023f1a7ce40908255228e36f0f2424394b3c48',
b'34130cf8223f23084813e745e006531a1e464b005e0e1ee405413fe22b4e',
b'4af201080c0928420c2d491f6e5121e451223b070dee54244b3efc470a0e',
b'771c161f795df81c22101408465ae7ef0c0604733ee03a20560c1512f217',
b'2f3a142c4155073a200f04166c565634020a59ea04244ff7413c4bc10858',
b'240d4752e5fa5a4e1ce255505602e55d4c575e2b59f52b4e0c0a0b464019',
b'21341927f3380232396707232ae424ea123f5b371d4f65e2471dfbede611',
b'e10e1c3b1d4d28085c091f135b585709332c56134e4844552f45eb41172a',
b'3f1b5a343f034832193b153c482f1705392f021f5f0953290c4c43312b36',
b'3810161aea7001fb5d502b285945255d4ef80131572d2c2e59730e2c3035',
b'4d59052e1f2242403d440a13263e1d2dea0612125e16033b180834030829',
b'022917180d07474c295f793e42274b0e1e16581036225c1211e41e04042f',
b'ec2b41054f2a5f56065e5e0e1f56e13e0a702e1b2f2137020e363a2ae2a4',
b'53085a3b34e75a1caa2e5d031f261f5f044350312f37455d493f131f3746',
b'0c295f1724e90b001a4e015d27091a0b3256302c303d51a05956e6331531',
b'e42b315ce21f0def38144d20242845fa3f3b3b0ce8f4fb2d31ed1d54134b',
b'2957023141335d35372813263b46581af6535a16404d0b4ff12a207648ec',
b'e4421e301de25c43010c504e0f562f2018421ce137443b41134b5f542047',
b'0c5600294e085c1d3622292c480d261213e05c1334385108c145f3090612',
b'062d2e02267404241f4966e6e010052d3224e72856100b1d22f65a30e863',
b'324950394700e11a01201a0564525706f1013f353319076b4c0d015a2e24',
b'2a1be80e2013571522483b1e20321a4e03285d211a444d113924e8f41a1f',
b'27193ae2302208e73010eaa1292001045737013e10e4745aed2c105b25fb',
b'1b135d46eaef103e1d330a14337a2a4302441c1631ed07e7100c743a0e35',
b'1a0957115c293b1c0de853245b5b18e2e12d28421b3230245d7b4a55f355',
b'e7360e2b3846202a2926fa495e3302ed064d127a17343a1f11032b40e8f5',
b'06e8f90a3118381c5414157d1434050210363e30500511a00a3d56e10438',
b'30021931f7193e25a0540ef52658350929380974fb035b1a5d2c042959c7',
b'151b0c24052d0e56025404390e5a3909edec0d03070f040cff710825363e',
b'2a2328120b2203320810134a0c0a0ef30b25460bec011c1e26e913575a51',
b'e12d0948ed3c511416151d1c54082b3e385d14f838510bec4e4b5f585321',
b'1559305c3a49192a010f04ec11001a3d5a5621e5535358353206521f013f',
b'172c2c155a3a322009505c290516a2c4e4405a1e0a1e353b6e1a5a4e2f09',
b'552c34e2432b0df1132b130841000d4007232339a2092a593f142b0a0117',
b'0931432e452d3aea1d02587d3a3e56ed2a3050e2f9363df366331e421947',
b'0250094823545b20163f1d0a36a92228ed25564d1a304deae8035c32370d',
b'4314380e264e2359e6a412504a424328e84434ff30236649353315344a00',
b'25e33540550d3c15135b0eed451cfd1812eaf2063f085d6e214d121c342f',
b'37513b2d0a4e3e5211372a3a01334c5d51030c46463e3756290c0d0e1222',
b'132f175e4c4af1120138e1f2085a3804471f5824555d083de6123f533123',
b'0de11936062d3d2f12193e135f38ff5e1a531d1426523746004e2c063a27',
b'49241aee1802311611a50de9592009e936270108214a0c4213a01f09545f',
b'02e14d2babee204a5c4337135821360d021b7831305963ee0737072f0deb',
b'1512371119050c0c1142245a004f033650481830230a1925085c1a172726',
b'3be62f230a4b50526ec9345100252aa729eafa59221b3fa517304e500a15',
b'5e57f231333c3d0c470a47551733511031362a3bed0f334a3f3136104230',
b'eb24015d051a151f245905061a37ea273d2239fe02463a5e314d565f0457',
b'23025f415d290a594e3b5940313347a11c5e41531ff15a385a183829780a',
b'51e0035f2deb3b163eabe8550e2e0414491f573b5419234a28183044e112',
b'1d54e8390b26585f3aef5f14206672240c4a5e5d31e01b4d406e351401fa',
b'e555173e242c753b275d4ee50b2f26501402a71b1b5733ec19ee34284aed',
b'2ee8f023401c09383b084d623ef324ee5a33065a6d5e365b092c5d0d4501',
b'3f4e024d4b161e144d5e3b140d1e2944465b491d265603a705373c231240',
b'544f0d4ea6091e00e62d3e130d4f005139f339001a3b480c221b730be75e',
b'5f1f4f3e0a0dec3b5128e32960e42d0fee02275528154b10e65c36555a2e',
b'ea3e311b5b0f5f220b1f1b2914f12111f41213e06232224df5ec0114470d',
b'51203f1e01e5563851284013514a565e53125223052f47100e5011100201',
b'3f5bee2305217838582be55958a00245265b0308ec56525b5c114c2d5407',
b'e6e74818e53602160e45372029eb4de72754ec3f49290d2f5901014c0e7f',
b'08e715e612380a5c1908285a1222073a023c562907384e4f470444483f34',
b'1110382b5225343ba6092133483e2d683e1e280227084a1e405e3a341513',
b'415f240f0c53e3f7196e2252fb0105347f345e531f535a344bf439220916',
b'5722e7f7fa2f4c2e057e2a025e2dec31413439aa12265f5a3458f81a4b15',
b'135839401856f337a72fec475a060de239a650163a55392a5b303f051415',
b'56090f18023a2b16e2364407050d48e1541408281d3aa3e84c5b264c1f33',
b'1725f9540aec5e10ed293e4e5a5a2d2125f053251a55395d1c2044022231',
b'292d523ff86a180620075f325e02566659f30423525a053a01f0087f4b3b',
b'17fe493808f25309251e1325596ce32b42311e5d0c2f58652640582a4b17',
b'67381a5afb7128150a0043e45b173d2111155c49092d2635370a3a201826',
b'e62d021d36e03b205d5f1f295c094608342a412122583f3bfc34190be62c',
b'393a055f59060d454a235326e844243a30285c14e316272524f4f0444f51',
b'352c3c5b2b5845244f55494940194721f80b120f07392b7c2c5a0508111e',
b'2f1219430151e60f11150b101e295736361b1e053e4d08f83f230e2c383a',
b'ef5b1d492610e834330f5cf3a2485d324f2822084f41111f582957191b19',
b'1e3e223704fe1d2e1f592753e5550f15170b231b4234e945301f5605a670',
b'300d322759ea0337015c662a0e073809543f2741104835512d0624551751',
b'373727ef1f41084d0b5c0c0137283b1337026aea1c5ae115064ffa183402',
b'09152b11e1233e5a0e302a521c5a33181e180026463744a82c024b4bf04e',
b'1df61df1263fee59135c13400950153d3c5c59183b020b1d2d2c492f4968',
b'e2000c405a01ede30c4c082e2537443c120f38fc57c43651423e5c3beb1d',
b'1922182420191b293e163d58020b005f454a0621051a38e80b090a463ee9',
b'39513f2d47042c0fe5134419ec48490f150f323a5ee7a7e0201e193a5e1b',
b'2037200a2b1013567b35fb4a0f322c2f49435d091920521c302b413f5f35',
b'775d1a345b483b35a02a4c3e17ee3a3d5a5b57153613264f23041922432f',
b'35125b3e0a1d2257eb002a26455e1a2f042e1545e92f0b3408032c4f3551',
b'2d4c392321300a18ed4f3e2c314d20500052aa3917e55d0d29500754282e',
b'381b2e263758f63c474a1c23110c2d5f1c220412e91043580656080c0427',
b'081ce1e5350b6a3535f0e6592e5b543432340e38f008e0324102e45a3f25',
b'30040c181615362e4d1016160a4a5c006eeb1d2422355a3f1028ff192a07',
b'53f6354d4b5d121974245c14f0225713331f2e381810101428571725e432',
b'1a2c06372d5b1419742150042d25003c2650512834ef16e51d183f0f0508',
b'3d191107251100ee2e4125405a44174f061e0e1e5959e606530e06ed245e',
b'3f592d47512dec5922500e460e1de7183b4c3c2e583942255a0c5d4d2305',
b'3438001e482a002d56113a1fe13bed542d3508e22f4e22221431121c1539',
b'ed445a5d28415073eb18022ef836274d573a48090f2a663058194901405d',
b'215b143954fc313c1e28584b51e729ef31013b232bfb4c52e2322a2d4557',
b'5244102e1c3d304450ee01761924e62ff2173305e15809102b2125284dfc',
b'171a3f010f3639056f2be71c2047581de32e05a20833e1221b0e25362459',
b'2958280de238084f5a1c292e005be71f3b311e1f415809383d3862260238',
b'361f56ecee120156375862eb3627185c2519545149e2e50b1f3b0c4e3352',
b'e6115f440634e4005d273611e41c5d383c3814537b3d23362b084024345b',
b'10370656372e0236eb4f3303e216505f0e465228383729394faa2f205f34',
b'2e125b2f2c1d0f1f170e0c51331f0c06291610345c0603791f33253f0e0c',
b'1c2b080526133aeb3e23571d4cfa1e48057a2a010a490a50391b09514f2e',
b'59383ae11237e5450029162d2e1d3e09221a160e42ea06ea0ca7c7ecf4ea',
b'3d3024f34d5c07464bea3b185e110d3a10395d3b2632343cf30ca2e6065a',
b'262f111c0e15441a4825111b185f1e5756243206125f4603e97e79582d27',
b'2d5801ee2654113e2da00b58e9260d643c10423e1d1f42093b0d0f7d5102',
b'3649211f210456051e290f1b4c584d0749220c280b2a50531f262901503e',
b'52053e3e152b5b2b4415580fec57ef5c08e5ed43cc2d2e5b40355d0d2017',
b'6d3917263f030c4b55f0025d501e57504a122729293c4c5819680d3001ed',
b'1e313323324e5e177b171cf70c371541395c0e2b7726e42505483014362e',
b'1910e4f7253f0a012057e03b1e3b4201362b224ff60e0b3a1d115b043957',
b'200c1e0b242e5e3b4755f61e3be05c040908f1234358e55562711d2efa0f',
b'0737e0160b1d13132044080d2325f1f0ee2f00354f2106471131020a5d0b',
b'3f21060de62c052a17576e2ce729242b3e3621300627f01e52580a480050',
b'1b381a11351f4f5d22040c3c4b3e7d263714e8e61a571d107a34260a4a51',
b'edf52314e111207c0b23eb482f441d211f306137152407040e08530a783e',
b'3c054e2d4e2905275e640220f74f1a193f54e1ed5b4e2a290eab27a55147',
b'33522817335316ea2f3df957e25e02030601514f09f74c2fedee102d3114',
b'5d05231d03313826164156110c44e4111f4658005e115e300f413b430300',
b'380bf53a4331f74627492c133fe8eb3141ee39040def040c1a0ae914e3ed',
b'5b00f0211f0a091e05582e22f05a5d262e0ce352251d25100b102b11e339',
b'36053935f051f959093252411e2d5af81f360c0fa15d0b373b1d26323b77',
b'501424184202206215e05944505c4817514540445b0207025de05b050932',
b'0a5a114515536f553a352c513f0b12f700345fa51d5efb28222676e559ea',
b'561b0557403f5f534a574638411e2d3b3c133f79555c333215e6f5f9e7ec',
b'6658f7210218110f00062752e305f21601442c5310162445ed4d175630f3',
b'0e2154253c4a22f02e1b0933351314071b521513235031250c18120024a1',
b'e03555453d1e31775f37331823164c341c09e310463438481019fb0b12fa',
b'37eee654410e4007501f2c0e42faf50125075b2b46164f165a1003097f08',
b'2a5332145851553926523965582e5b2f530d5d1e292046344feaed461517',
b'583d2b06251f551d2f5451110911e6034147481a05166e1f241a5817015b',
b'1f2d3f5c310c315402200010e24135592435f71b4640540a041012ee1b3f',
b'5b2010060e2f5a4d045e0b36192f79181b0732183b4a261038340032f434',
b'3a5557340be6f5315c35112912393503320f54065f0e275a3b5853352008',
b'1c595d183539220eec123478535337110424f90a355af44c267be848173f',
b'41053f5cef5f6f56e4f5410a5407281600200b2649460a2e3a3c38492a0c',
b'4c071a57e9356ee415103c5c53e254063f2019340969e30a2e381d5b2555',
b'32042f46431d2c44607934ed180c1028136a5f2b26092e3b2c4e2930585a',];
def findSingleCharXOR(ciphers):
for cip in ciphers:
for i in range(256):
mg, plain = tryKey(cip, rawToHexLUT[i]);
if (mg > .050):
print("potential key: 0x" + rawToHexLUT[i]);
print("potential hex(cipher): " + str(cip).lstrip("b'").rstrip("'"));
print("potential hex(plain): " + str(plain).lstrip("b'").rstrip("'"));
print("potential plaintext: " + str(hexToRaw(str(plain).lstrip("b'").rstrip("'"))).lstrip("b'").rstrip("'"));
if __name__ == "__main__":
findSingleCharXOR(cipher_strings);
# Print the known answer, from having run this program once:
#print("key: 0x58");
#print("plaintext: " + str(hexToRaw(hex_xor(cip, '58585858585858585858585858585858585858585858585858585858585858585858'))));
| 63.160665
| 129
| 0.907986
| 795
| 22,801
| 26.027673
| 0.509434
| 0.002706
| 0.002513
| 0.00174
| 0.004011
| 0
| 0
| 0
| 0
| 0
| 0
| 0.682658
| 0.041007
| 22,801
| 360
| 130
| 63.336111
| 0.263651
| 0.020131
| 0
| 0
| 0
| 0
| 0.897715
| 0.893026
| 0
| 0
| 0
| 0
| 0
| 1
| 0.002933
| false
| 0
| 0.005865
| 0
| 0.008798
| 0.01173
| 0
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
d4ce7130dd9278bd13b0e97a2ccbdb8eb4381baa
| 124
|
py
|
Python
|
protogen/stalk_proto/models_grpc.py
|
peake100/stalkreports-py
|
4da5d11cd7dc27523c29948386ffc3da90b7588a
|
[
"MIT"
] | null | null | null |
protogen/stalk_proto/models_grpc.py
|
peake100/stalkreports-py
|
4da5d11cd7dc27523c29948386ffc3da90b7588a
|
[
"MIT"
] | 12
|
2020-04-25T22:13:57.000Z
|
2020-05-24T16:24:59.000Z
|
protogen/stalk_proto/models_grpc.py
|
peake100/stalkbroker-py
|
95bed6e6d89dc00b183b71d5d3fce7908c554ed9
|
[
"MIT"
] | null | null | null |
# Generated by the Protocol Buffers compiler. DO NOT EDIT!
# source: stalk_proto/models.proto
# plugin: grpclib.plugin.main
| 31
| 58
| 0.782258
| 18
| 124
| 5.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.129032
| 124
| 3
| 59
| 41.333333
| 0.888889
| 0.943548
| 0
| null | 1
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.