hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
37b79a42787ab39d8a353cbbf500ada9ea93a8d8
| 78
|
py
|
Python
|
oe_site/app/admin.py
|
WsinGithub/ChemECar_web
|
aad7d8b98ac82830ddff0ec9adb99efef66e40f8
|
[
"MIT"
] | null | null | null |
oe_site/app/admin.py
|
WsinGithub/ChemECar_web
|
aad7d8b98ac82830ddff0ec9adb99efef66e40f8
|
[
"MIT"
] | null | null | null |
oe_site/app/admin.py
|
WsinGithub/ChemECar_web
|
aad7d8b98ac82830ddff0ec9adb99efef66e40f8
|
[
"MIT"
] | 1
|
2021-07-30T04:31:43.000Z
|
2021-07-30T04:31:43.000Z
|
from django.contrib import admin
# django框架默认文件
# Register your models here.
| 15.6
| 32
| 0.794872
| 10
| 78
| 6.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 78
| 4
| 33
| 19.5
| 0.939394
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
80a5bf8e4fc4367f119468df4b768b89e6482019
| 382
|
py
|
Python
|
src/pyglue/DocStrings/GroupTransform.py
|
omenos/OpenColorIO
|
7316c3be20752278924dd3f213bff297ffb63a14
|
[
"BSD-3-Clause"
] | 7
|
2015-07-01T03:19:43.000Z
|
2021-03-27T11:02:16.000Z
|
src/pyglue/DocStrings/GroupTransform.py
|
dictoon/OpenColorIO
|
64adcad300adfd166280d2e7b1fb5c3ce7dca482
|
[
"BSD-3-Clause"
] | null | null | null |
src/pyglue/DocStrings/GroupTransform.py
|
dictoon/OpenColorIO
|
64adcad300adfd166280d2e7b1fb5c3ce7dca482
|
[
"BSD-3-Clause"
] | 2
|
2019-03-05T20:43:59.000Z
|
2019-11-11T20:35:55.000Z
|
class GroupTransform:
"""
GroupTransform
"""
def __init__(self):
pass
def getTransform(self):
pass
def getTransforms(self):
pass
def setTransforms(self, transforms):
pass
def size(self):
pass
def push_back(self, transform):
pass
def clear(self):
pass
def empty(self):
pass
| 17.363636
| 40
| 0.54712
| 38
| 382
| 5.368421
| 0.421053
| 0.240196
| 0.269608
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.366492
| 382
| 21
| 41
| 18.190476
| 0.842975
| 0.036649
| 0
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.470588
| false
| 0.470588
| 0
| 0
| 0.529412
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
80bf37aa9aad7edddb690a9919912a42c6115218
| 2,742
|
py
|
Python
|
pymonad/maybe/maybe_test.py
|
Wildhoney/Pymonad
|
177989b3d0f362c3bf3af962d89306309ff000c3
|
[
"MIT"
] | null | null | null |
pymonad/maybe/maybe_test.py
|
Wildhoney/Pymonad
|
177989b3d0f362c3bf3af962d89306309ff000c3
|
[
"MIT"
] | null | null | null |
pymonad/maybe/maybe_test.py
|
Wildhoney/Pymonad
|
177989b3d0f362c3bf3af962d89306309ff000c3
|
[
"MIT"
] | null | null | null |
import unittest
from . import Nothing, Just
a = Just('Adam')
b = Nothing()
def lower(x): return x.lower()
def reverse(x): return x[::-1]
def shout(x): return '%s!' % x
def capitalise(x): return x.capitalize()
class TestJust(unittest.TestCase):
def test_is_just(self):
self.assertEqual(a.is_just(), True)
self.assertEqual(b.is_just(), False)
def test_is_nothing(self):
self.assertEqual(a.is_nothing(), False)
self.assertEqual(b.is_nothing(), True)
def test_map(self):
c = a.map(lower).map(reverse).map(shout).map(capitalise)
self.assertEqual(str(c), 'Just (Mada!)')
d = a.map(lower).map(lambda x: Just(
reverse(x))).map(shout).map(capitalise)
self.assertEqual(str(d), 'Just (Mada!)')
e = a.map(lower).map(lambda x: Nothing()).map(shout).map(capitalise)
self.assertEqual(str(e), 'Nothing')
f = b.map(lower).map(reverse).map(shout).map(capitalise)
self.assertEqual(str(f), 'Nothing')
g = b.map(lower).map(lambda x: Just(
reverse(x))).map(shout).map(capitalise)
self.assertEqual(str(g), 'Nothing')
def test_map_shorthand(self):
c = a >> lower >> reverse >> shout >> capitalise
self.assertEqual(str(c), 'Just (Mada!)')
d = b >> lower >> reverse >> shout >> capitalise
self.assertEqual(str(d), 'Nothing')
def test_get(self):
c = a >> lower >> reverse >> shout >> capitalise
self.assertEqual(c.get(), 'Mada!')
d = b >> lower >> reverse >> shout >> capitalise
self.assertEqual(d.get('Unknown'), 'Unknown')
def test_get_shorthand(self):
c = a >> lower >> reverse >> shout >> capitalise
self.assertEqual(c | 'Unknown', 'Mada!')
d = b >> lower >> reverse >> shout >> capitalise
self.assertEqual(d | 'Unknown', 'Unknown')
def test_equals(self):
self.assertEqual(str(Just('Adam') == Just('Adam')), 'Just (True)')
self.assertEqual(str(Just('Adam') == Just('Imogen')), 'Just (False)')
self.assertEqual(str(Just('Maria') == Just('Imogen')), 'Just (False)')
self.assertEqual(str(Nothing() == Nothing()), 'Nothing')
self.assertEqual(str(Nothing() == Just('Imogen')), 'Nothing')
def test_not_equals(self):
self.assertEqual(str(Just('Adam') != Just('Adam')), 'Just (False)')
self.assertEqual(str(Just('Adam') != Just('Imogen')), 'Just (True)')
self.assertEqual(str(Just('Maria') != Just('Imogen')), 'Just (True)')
self.assertEqual(str(Just('Adam') == Nothing()), 'Nothing')
self.assertEqual(str(Nothing() == Nothing()), 'Nothing')
self.assertEqual(str(Nothing() == Just('Imogen')), 'Nothing')
| 37.054054
| 78
| 0.591174
| 342
| 2,742
| 4.690058
| 0.125731
| 0.243142
| 0.201995
| 0.122195
| 0.732544
| 0.703865
| 0.679551
| 0.642145
| 0.511222
| 0.481297
| 0
| 0.000466
| 0.21663
| 2,742
| 73
| 79
| 37.561644
| 0.746276
| 0
| 0
| 0.25
| 0
| 0
| 0.107221
| 0
| 0
| 0
| 0
| 0
| 0.464286
| 1
| 0.214286
| false
| 0
| 0.035714
| 0.071429
| 0.267857
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0391350ff5403c977fa0fbdb326f594770fc8943
| 53
|
py
|
Python
|
catcher_rl/__main__.py
|
sohnryang/catcher-rl
|
8a45080f2be528be8abb94c3a4eea0dc700ab505
|
[
"MIT"
] | null | null | null |
catcher_rl/__main__.py
|
sohnryang/catcher-rl
|
8a45080f2be528be8abb94c3a4eea0dc700ab505
|
[
"MIT"
] | null | null | null |
catcher_rl/__main__.py
|
sohnryang/catcher-rl
|
8a45080f2be528be8abb94c3a4eea0dc700ab505
|
[
"MIT"
] | null | null | null |
"""__main__.py"""
import catcher_rl
catcher_rl.main()
| 17.666667
| 17
| 0.754717
| 8
| 53
| 4.25
| 0.625
| 0.529412
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.056604
| 53
| 3
| 18
| 17.666667
| 0.68
| 0.207547
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
039de2956a93e9fe91351642653c24e863f9b4ef
| 219
|
py
|
Python
|
py-data/plugin.video.arteplussept/problems/api-related/1/correct-usages/get_last7days.py
|
ualberta-smr/NFBugs
|
65d9ef603e9527b3d83f53af0606b1ae240513f1
|
[
"MIT"
] | 3
|
2019-10-01T19:58:24.000Z
|
2021-09-17T04:03:21.000Z
|
py-data/plugin.video.arteplussept/problems/api-related/1/correct-usages/get_last7days.py
|
senseconcordia/NFBugsExtended
|
60058ccbd64107018a92ede73056d08ecbdaaed2
|
[
"MIT"
] | 22
|
2018-08-23T15:15:37.000Z
|
2019-03-15T17:09:41.000Z
|
py-data/plugin.video.arteplussept/problems/api-related/1/correct-usages/get_last7days.py
|
senseconcordia/NFBugsExtended
|
60058ccbd64107018a92ede73056d08ecbdaaed2
|
[
"MIT"
] | 1
|
2019-02-11T18:26:36.000Z
|
2019-02-11T18:26:36.000Z
|
from xbmcswift2 import Plugin
from xbmcswift2 import actions
import requests
import os
import urllib2
import time
import datetime
def get_last7days():
return flatten([get_day(date) for (date, _) in get_dates()])
| 19.909091
| 64
| 0.780822
| 31
| 219
| 5.387097
| 0.645161
| 0.167665
| 0.239521
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021622
| 0.155251
| 219
| 10
| 65
| 21.9
| 0.881081
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| true
| 0
| 0.777778
| 0.111111
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
03f575511edc87fbaa0168ce74fe3d45c2492f5f
| 4,158
|
py
|
Python
|
src/contactapp/migrations/0001_initial.py
|
robertsmoto/sodavault
|
200e843be7abe6cc447647bba55c7c1309092e5e
|
[
"BSD-3-Clause"
] | null | null | null |
src/contactapp/migrations/0001_initial.py
|
robertsmoto/sodavault
|
200e843be7abe6cc447647bba55c7c1309092e5e
|
[
"BSD-3-Clause"
] | null | null | null |
src/contactapp/migrations/0001_initial.py
|
robertsmoto/sodavault
|
200e843be7abe6cc447647bba55c7c1309092e5e
|
[
"BSD-3-Clause"
] | null | null | null |
# Generated by Django 3.2.3 on 2021-08-23 17:30
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_type', models.CharField(blank=True, choices=[('LOCA', 'Location'), ('SUPP', 'Suppplier'), ('CUST', 'Customer')], max_length=4)),
('name', models.CharField(blank=True, max_length=200)),
('phone', models.CharField(blank=True, max_length=200)),
('website', models.CharField(blank=True, max_length=200)),
('address_01', models.CharField(blank=True, max_length=200)),
('address_02', models.CharField(blank=True, max_length=200)),
('city', models.CharField(blank=True, max_length=200)),
('state', models.CharField(blank=True, max_length=200)),
('zipcode', models.CharField(blank=True, max_length=200)),
('ship_address_01', models.CharField(blank=True, max_length=200)),
('ship_address_02', models.CharField(blank=True, max_length=200)),
('ship_city', models.CharField(blank=True, max_length=200)),
('ship_state', models.CharField(blank=True, max_length=200)),
('ship_zipcode', models.CharField(blank=True, max_length=200)),
],
options={
'verbose_name_plural': 'companies',
},
),
migrations.CreateModel(
name='Person',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('person_type', models.CharField(blank=True, choices=[('CUST', 'Customer'), ('SUPP', 'Suppplier')], max_length=4)),
('firstname', models.CharField(blank=True, max_length=200)),
('lastname', models.CharField(blank=True, max_length=200)),
('nickname', models.CharField(blank=True, max_length=200)),
('phone', models.CharField(blank=True, max_length=200)),
('mobile', models.CharField(blank=True, max_length=200)),
('email', models.CharField(blank=True, max_length=200)),
('website', models.CharField(blank=True, max_length=200)),
('address_01', models.CharField(blank=True, max_length=200)),
('address_02', models.CharField(blank=True, max_length=200)),
('city', models.CharField(blank=True, max_length=200)),
('state', models.CharField(blank=True, max_length=200)),
('zipcode', models.CharField(blank=True, max_length=200)),
('ship_address_01', models.CharField(blank=True, max_length=200)),
('ship_address_02', models.CharField(blank=True, max_length=200)),
('ship_city', models.CharField(blank=True, max_length=200)),
('ship_state', models.CharField(blank=True, max_length=200)),
('shop_zipcode', models.CharField(blank=True, max_length=200)),
('company', models.ForeignKey(blank=True, null=True, on_delete=django.db.models.deletion.CASCADE, to='contactapp.company')),
],
options={
'verbose_name_plural': 'people',
},
),
migrations.CreateModel(
name='Location',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('contactapp.company',),
),
migrations.CreateModel(
name='Supplier',
fields=[
],
options={
'proxy': True,
'indexes': [],
'constraints': [],
},
bases=('contactapp.company',),
),
]
| 46.719101
| 154
| 0.549784
| 410
| 4,158
| 5.421951
| 0.197561
| 0.133603
| 0.287899
| 0.345479
| 0.735043
| 0.735043
| 0.703554
| 0.654971
| 0.614485
| 0.558704
| 0
| 0.042109
| 0.297499
| 4,158
| 88
| 155
| 47.25
| 0.718932
| 0.010823
| 0
| 0.641975
| 1
| 0
| 0.131355
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.024691
| 0
| 0.074074
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
03fcdf32087c44ef545515c06175fa5dfd2d8041
| 122
|
py
|
Python
|
rmepy/robot_modules/__init__.py
|
233a344a455/RobomasterEPlib
|
d0497d06d107c482e7b4c80c54c7c05c0bf62e21
|
[
"MIT"
] | 3
|
2020-04-23T14:19:59.000Z
|
2020-10-06T17:02:12.000Z
|
rmepy/robot_modules/__init__.py
|
233a344a455/RobomasterEPlib
|
d0497d06d107c482e7b4c80c54c7c05c0bf62e21
|
[
"MIT"
] | null | null | null |
rmepy/robot_modules/__init__.py
|
233a344a455/RobomasterEPlib
|
d0497d06d107c482e7b4c80c54c7c05c0bf62e21
|
[
"MIT"
] | 2
|
2020-05-13T08:15:16.000Z
|
2020-05-13T08:55:51.000Z
|
from .basic_ctrl import BasicCtrl
from .chassis import Chassis
from .gimbal import Gimbal
from .blaster import Blaster
| 30.5
| 34
| 0.811475
| 17
| 122
| 5.764706
| 0.470588
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155738
| 122
| 4
| 35
| 30.5
| 0.951456
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ff0b467f1ad7ee9bf9ae9be6fd164aa964a5004d
| 289
|
py
|
Python
|
tests/test_checks_interface.py
|
ployt0/server_monitor
|
835e48ed317b4b069ebd66675ca2d1b3120770c0
|
[
"MIT"
] | null | null | null |
tests/test_checks_interface.py
|
ployt0/server_monitor
|
835e48ed317b4b069ebd66675ca2d1b3120770c0
|
[
"MIT"
] | null | null | null |
tests/test_checks_interface.py
|
ployt0/server_monitor
|
835e48ed317b4b069ebd66675ca2d1b3120770c0
|
[
"MIT"
] | null | null | null |
from checks_interface import deserialise_simple_csv
def test_deserialise_simple_csv():
csv_list = deserialise_simple_csv("yolo,barry white,george soros,tilda swinton,None,bill gates")
assert csv_list == ['yolo', 'barrywhite', 'george soros', 'tilda swinton', None, 'bill gates']
| 41.285714
| 100
| 0.768166
| 39
| 289
| 5.435897
| 0.564103
| 0.240566
| 0.283019
| 0.216981
| 0.339623
| 0.339623
| 0.339623
| 0
| 0
| 0
| 0
| 0
| 0.121107
| 289
| 6
| 101
| 48.166667
| 0.834646
| 0
| 0
| 0
| 0
| 0
| 0.373702
| 0
| 0
| 0
| 0
| 0
| 0.25
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
205c895c1f60cd3b978288b5cd1339799a85f756
| 3,267
|
py
|
Python
|
tests/data/expected_tabulated.py
|
CozyDoomer/pypistats
|
39e4415c736d025d16aa0131d2107756d0f127fa
|
[
"MIT"
] | 1
|
2020-09-13T14:18:09.000Z
|
2020-09-13T14:18:09.000Z
|
tests/data/expected_tabulated.py
|
CozyDoomer/pypistats
|
39e4415c736d025d16aa0131d2107756d0f127fa
|
[
"MIT"
] | 5
|
2020-09-13T14:18:30.000Z
|
2020-09-13T14:33:37.000Z
|
tests/data/expected_tabulated.py
|
Smirenost/pypistats
|
431201080061ecd41d58b12ad4837de6883d66ae
|
[
"MIT"
] | null | null | null |
EXPECTED_TABULATED_HTML = """
<table>
<thead>
<tr>
<th>category</th>
<th>date</th>
<th>downloads</th>
</tr>
</thead>
<tbody>
<tr>
<td align="left">2.6</td>
<td align="left">2018-08-15</td>
<td align="right">51</td>
</tr>
<tr>
<td align="left">2.7</td>
<td align="left">2018-08-15</td>
<td align="right">63,749</td>
</tr>
<tr>
<td align="left">3.2</td>
<td align="left">2018-08-15</td>
<td align="right">2</td>
</tr>
<tr>
<td align="left">3.3</td>
<td align="left">2018-08-15</td>
<td align="right">40</td>
</tr>
<tr>
<td align="left">3.4</td>
<td align="left">2018-08-15</td>
<td align="right">6,095</td>
</tr>
<tr>
<td align="left">3.5</td>
<td align="left">2018-08-15</td>
<td align="right">20,358</td>
</tr>
<tr>
<td align="left">3.6</td>
<td align="left">2018-08-15</td>
<td align="right">35,274</td>
</tr>
<tr>
<td align="left">3.7</td>
<td align="left">2018-08-15</td>
<td align="right">6,595</td>
</tr>
<tr>
<td align="left">3.8</td>
<td align="left">2018-08-15</td>
<td align="right">3</td>
</tr>
<tr>
<td align="left">null</td>
<td align="left">2018-08-15</td>
<td align="right">1,019</td>
</tr>
</tbody>
</table>
"""
EXPECTED_TABULATED_MD = """
| category | date | downloads |
|----------|------------|----------:|
| 2.6 | 2018-08-15 | 51 |
| 2.7 | 2018-08-15 | 63,749 |
| 3.2 | 2018-08-15 | 2 |
| 3.3 | 2018-08-15 | 40 |
| 3.4 | 2018-08-15 | 6,095 |
| 3.5 | 2018-08-15 | 20,358 |
| 3.6 | 2018-08-15 | 35,274 |
| 3.7 | 2018-08-15 | 6,595 |
| 3.8 | 2018-08-15 | 3 |
| null | 2018-08-15 | 1,019 |
"""
EXPECTED_TABULATED_RST = """
.. table::
========== ============ ===========
category date downloads
========== ============ ===========
2.6 2018-08-15 51
2.7 2018-08-15 63,749
3.2 2018-08-15 2
3.3 2018-08-15 40
3.4 2018-08-15 6,095
3.5 2018-08-15 20,358
3.6 2018-08-15 35,274
3.7 2018-08-15 6,595
3.8 2018-08-15 3
null 2018-08-15 1,019
========== ============ ===========
""" # noqa: W291
EXPECTED_TABULATED_TSV = """
"category" \t "date" \t "downloads"
"2.6" \t "2018-08-15" \t 51
"2.7" \t "2018-08-15" \t 63,749
"3.2" \t "2018-08-15" \t 2
"3.3" \t "2018-08-15" \t 40
"3.4" \t "2018-08-15" \t 6,095
"3.5" \t "2018-08-15" \t 20,358
"3.6" \t "2018-08-15" \t 35,274
"3.7" \t "2018-08-15" \t 6,595
"3.8" \t "2018-08-15" \t 3
"null" \t "2018-08-15" \t 1,019
""" # noqa: W291
| 28.911504
| 44
| 0.379859
| 465
| 3,267
| 2.651613
| 0.094624
| 0.194647
| 0.25953
| 0.105434
| 0.769667
| 0.716951
| 0.643958
| 0.541768
| 0.541768
| 0.541768
| 0
| 0.266332
| 0.390878
| 3,267
| 112
| 45
| 29.169643
| 0.353266
| 0.006428
| 0
| 0.364486
| 0
| 0
| 0.958064
| 0.221092
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
207a4de3d61bc090e22bce94c09268f291db401d
| 395
|
py
|
Python
|
users/models.py
|
lizooo/webpage
|
4a203ad04991a4ae54d6bd1179054715b56095aa
|
[
"MIT"
] | 1
|
2021-12-16T15:56:35.000Z
|
2021-12-16T15:56:35.000Z
|
users/models.py
|
Na11a/webpage
|
29ba3ecee7c122a7ce92c6053077f00056e6ce28
|
[
"MIT"
] | 6
|
2020-04-25T17:43:43.000Z
|
2021-11-04T20:02:46.000Z
|
users/models.py
|
Na11a/webpage
|
29ba3ecee7c122a7ce92c6053077f00056e6ce28
|
[
"MIT"
] | 10
|
2020-10-05T12:55:54.000Z
|
2021-11-21T12:03:30.000Z
|
from django.db import models
# Create your models here.
from django.db import models
from datetime import datetime
class User(models.Model):
name = models.CharField(max_length=100)
surname = models.CharField(max_length=100)
email = models.EmailField(unique=True)
password = models.CharField(max_length=128)
created = models.DateTimeField('Created', default=datetime.now)
| 26.333333
| 67
| 0.756962
| 52
| 395
| 5.692308
| 0.538462
| 0.152027
| 0.182432
| 0.243243
| 0.344595
| 0
| 0
| 0
| 0
| 0
| 0
| 0.026786
| 0.149367
| 395
| 14
| 68
| 28.214286
| 0.854167
| 0.060759
| 0
| 0.222222
| 0
| 0
| 0.01897
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.111111
| 0.333333
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
208a944865cc3b2efa6d830a3ced85fa06ed73c4
| 3,369
|
py
|
Python
|
myutils/pandas_util.py
|
stas00/fastai-misc
|
e7e8c18ed798f91b2e026c667f795f45992608b8
|
[
"Apache-2.0"
] | 1
|
2018-06-01T17:39:59.000Z
|
2018-06-01T17:39:59.000Z
|
myutils/pandas_util.py
|
stas00/fastai-misc
|
e7e8c18ed798f91b2e026c667f795f45992608b8
|
[
"Apache-2.0"
] | null | null | null |
myutils/pandas_util.py
|
stas00/fastai-misc
|
e7e8c18ed798f91b2e026c667f795f45992608b8
|
[
"Apache-2.0"
] | null | null | null |
# from https://github.com/ohmeow/pandas_examples
# import sys
# sys.path.append('/home/stas/fast.ai')
# from myutils.pandas_util import advanced_describe
import pandas as pd
######################### Data Examination ############################
# - made changes to unique_vals to show a small sample, regardless how many there are
def advanced_describe(df):
# get descriptive stats for dataframe for 'all' column dtypes
desc = df.describe(include='all').T
desc.drop(['top', 'freq', 'unique'], axis=1, inplace=True)
# update column counts (df.describe() returns NaN for non-numeric cols)
counts = pd.Series({ col: df[col].count() for col in df.columns })
desc.update(counts.to_frame('count'))
# add missing count/%
missings = df.isnull().sum()
desc = pd.concat([desc, missings.to_frame('missing')], axis=1)
desc['missing%'] = (desc['missing'] / len(desc)).round(2)
# add unique counts/%
uniques = pd.Series({ col: len(df[col].unique()) for col in df.columns })
desc = pd.concat([desc, uniques.to_frame('unique')], axis=1)
desc['unique%'] = (desc['unique'] / len(desc)).round(2)
unique_vals = pd.Series({ col: df[col].unique() if len(df[col].unique()) < 10 else [*df[col].unique()[0:10],"..."] for col in df.columns })
desc = pd.concat([desc, unique_vals.to_frame('unique_values')], axis=1, sort=True)
# add col dtype
dtypes = pd.Series({ col: df[col].dtype for col in df.columns })
desc = pd.concat([desc, dtypes.to_frame('dtype')], axis=1, sort=True)
return desc
# same as advanced_describe but with fever attributes to avoid
# horizontal scrolling
def advanced_describe_short(df):
# get descriptive stats for dataframe for 'all' column dtypes
desc = df.describe(include='all').T
desc.drop(['top', 'freq', 'unique', '25%', '50%', '75%'], axis=1, inplace=True)
# update column counts (df.describe() returns NaN for non-numeric cols)
counts = pd.Series({ col: df[col].count() for col in df.columns })
desc.update(counts.to_frame('count'))
# add missing count/%
missings = df.isnull().sum()
desc = pd.concat([desc, missings.to_frame('missing')], axis=1)
#desc['missing%'] = (desc['missing'] / len(desc)).round(2)
# add unique counts/%
uniques = pd.Series({ col: len(df[col].unique()) for col in df.columns })
desc = pd.concat([desc, uniques.to_frame('unique')], axis=1)
#desc['unique%'] = (desc['unique'] / len(desc)).round(2)
unique_vals = pd.Series({ col: df[col].unique() if len(df[col].unique()) < 10 else [*df[col].unique()[0:10],"..."] for col in df.columns })
desc = pd.concat([desc, unique_vals.to_frame('unique_values')], axis=1, sort=True)
# add col dtype
dtypes = pd.Series({ col: df[col].dtype for col in df.columns })
desc = pd.concat([desc, dtypes.to_frame('dtype')], axis=1, sort=True)
return desc
######################### Data Cleaning and Preparation ###############
def fillna_by_group(df, target_col, group_cols, agg='median'):
df[target_col] = df.groupby(group_cols)[target_col].transform(lambda x: x.fillna(eval(f'x.{agg}()')))
######################### Feature Engineering #########################
def add_by_regex(df, target_col, new_col, regex):
df[new_col] = df[target_col].str.extract(regex, expand=False)
| 41.592593
| 143
| 0.623034
| 484
| 3,369
| 4.260331
| 0.254132
| 0.029098
| 0.042677
| 0.038797
| 0.722599
| 0.722599
| 0.722599
| 0.722599
| 0.722599
| 0.722599
| 0
| 0.010722
| 0.169486
| 3,369
| 80
| 144
| 42.1125
| 0.726233
| 0.255565
| 0
| 0.685714
| 0
| 0
| 0.069142
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114286
| false
| 0
| 0.028571
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
20aa2b45072a77da6b8b4f3ae6b02d4f7ca10fa0
| 61
|
py
|
Python
|
django_libretto/__init__.py
|
ze-phyr-us/django-libretto
|
b19d8aa21b9579ee91e81967a44d1c40f5588b17
|
[
"MIT"
] | null | null | null |
django_libretto/__init__.py
|
ze-phyr-us/django-libretto
|
b19d8aa21b9579ee91e81967a44d1c40f5588b17
|
[
"MIT"
] | null | null | null |
django_libretto/__init__.py
|
ze-phyr-us/django-libretto
|
b19d8aa21b9579ee91e81967a44d1c40f5588b17
|
[
"MIT"
] | null | null | null |
from . import decorators, forms, http, models, template, url
| 30.5
| 60
| 0.754098
| 8
| 61
| 5.75
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.147541
| 61
| 1
| 61
| 61
| 0.884615
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
20cf365aa273b16e874ac91e02b64fabdf860834
| 148
|
py
|
Python
|
backend/app/db/base.py
|
jnana-cetana/XMeme
|
cb7d5e31c455dc3c7e751dff9e7c8e067090936b
|
[
"MIT"
] | 19
|
2021-02-15T19:55:25.000Z
|
2022-02-01T09:05:07.000Z
|
backend/app/db/base.py
|
jnana-cetana/XMeme
|
cb7d5e31c455dc3c7e751dff9e7c8e067090936b
|
[
"MIT"
] | null | null | null |
backend/app/db/base.py
|
jnana-cetana/XMeme
|
cb7d5e31c455dc3c7e751dff9e7c8e067090936b
|
[
"MIT"
] | null | null | null |
# Import all the models, so that Base has them before being imported by Alembic
from app.db.base_class import Base
from app.models.meme import Meme
| 37
| 79
| 0.804054
| 27
| 148
| 4.37037
| 0.703704
| 0.118644
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.155405
| 148
| 3
| 80
| 49.333333
| 0.944
| 0.52027
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
455ec713befd7532c83b45583b4e08a6cd8de1bc
| 70
|
py
|
Python
|
src/prodis/packets/handshaking/__init__.py
|
blubberdiblub/prodis
|
c70d31b7df0358edd8969e9a94341b3771ee2e0f
|
[
"MIT"
] | null | null | null |
src/prodis/packets/handshaking/__init__.py
|
blubberdiblub/prodis
|
c70d31b7df0358edd8969e9a94341b3771ee2e0f
|
[
"MIT"
] | null | null | null |
src/prodis/packets/handshaking/__init__.py
|
blubberdiblub/prodis
|
c70d31b7df0358edd8969e9a94341b3771ee2e0f
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
from .serverbound import Packet as ServerBound
| 17.5
| 46
| 0.785714
| 10
| 70
| 5.5
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.128571
| 70
| 3
| 47
| 23.333333
| 0.901639
| 0.285714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
4592ee2a9205ee3dd55eed2050ff0223402372a9
| 52
|
py
|
Python
|
src/core/sessions/buffers/gui/configuration/twitter/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 21
|
2015-08-02T21:26:14.000Z
|
2019-12-27T09:57:44.000Z
|
src/core/sessions/buffers/gui/configuration/twitter/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 34
|
2015-01-12T00:38:14.000Z
|
2020-08-31T11:19:37.000Z
|
src/core/sessions/buffers/gui/configuration/twitter/__init__.py
|
Oire/TheQube
|
fcfd8a68b15948e0740642d635db24adef8cc314
|
[
"MIT"
] | 15
|
2015-03-24T15:42:30.000Z
|
2020-09-24T20:26:42.000Z
|
from main import BufferConfigDialog
import panels
| 17.333333
| 36
| 0.846154
| 6
| 52
| 7.333333
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.153846
| 52
| 2
| 37
| 26
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
45b6462ff728650517df81b64394ea4eb748f3d3
| 160
|
py
|
Python
|
snek/exts/syncer/__init__.py
|
Snek-Network/snek
|
5f443c00d701c985ef9362d0d98d2ac07b1c56e0
|
[
"MIT"
] | null | null | null |
snek/exts/syncer/__init__.py
|
Snek-Network/snek
|
5f443c00d701c985ef9362d0d98d2ac07b1c56e0
|
[
"MIT"
] | 20
|
2020-07-25T17:16:46.000Z
|
2020-10-01T19:05:55.000Z
|
snek/exts/syncer/__init__.py
|
Snek-Network/snek
|
5f443c00d701c985ef9362d0d98d2ac07b1c56e0
|
[
"MIT"
] | 3
|
2020-08-02T20:15:58.000Z
|
2020-12-29T08:48:12.000Z
|
from snek.bot import Snek
from snek.exts.syncer.cog import Syncer
def setup(bot: Snek) -> None:
"""Load the `Syncer` cog."""
bot.add_cog(Syncer(bot))
| 20
| 39
| 0.675
| 26
| 160
| 4.115385
| 0.5
| 0.149533
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.175
| 160
| 7
| 40
| 22.857143
| 0.810606
| 0.1375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.5
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
45c1caa23ecd364d3384d99dfa0f6a2683ce53fe
| 951
|
py
|
Python
|
virtualisation/triplestore/triplestoreadapter.py
|
CityPulse/CP_Resourcemanagement
|
aa670fa89d5e086a98ade3ccc152518be55abf2e
|
[
"MIT"
] | 2
|
2016-11-03T14:57:45.000Z
|
2019-05-13T13:21:08.000Z
|
virtualisation/triplestore/triplestoreadapter.py
|
CityPulse/CP_Resourcemanagement
|
aa670fa89d5e086a98ade3ccc152518be55abf2e
|
[
"MIT"
] | null | null | null |
virtualisation/triplestore/triplestoreadapter.py
|
CityPulse/CP_Resourcemanagement
|
aa670fa89d5e086a98ade3ccc152518be55abf2e
|
[
"MIT"
] | 1
|
2020-07-23T11:27:15.000Z
|
2020-07-23T11:27:15.000Z
|
from abc import abstractmethod
from abc import ABCMeta
__author__ = 'Marten Fischer (m.fischer@hs-osnabrueck.de)'
class TripleStoreAdapter:
__metaclass__ = ABCMeta
@abstractmethod
def graphExists(self, graphName):
pass
@abstractmethod
def createGraph(self, graphName):
pass
@abstractmethod
def saveTriple(self, graphName, subject, predicate, object):
pass
@abstractmethod
def saveGraph(self, graph, graphName):
pass
@abstractmethod
def saveMultipleGraphs(self, serialisedGraph, graphName):
pass
@abstractmethod
def getObservationGraph(self, graphName, sensor, start, end, asGraph):
pass
@abstractmethod
def deleteGraph(self, graphName):
pass
@abstractmethod
def getLastQoIData_List(self, graphName, sensorName):
pass
@abstractmethod
def getStreamMinMaxDate(self, graphName, sensorName):
pass
| 21.613636
| 74
| 0.684543
| 86
| 951
| 7.465116
| 0.453488
| 0.238318
| 0.261682
| 0.233645
| 0.158879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245005
| 951
| 44
| 75
| 21.613636
| 0.89415
| 0
| 0
| 0.5625
| 0
| 0
| 0.045168
| 0.029412
| 0
| 0
| 0
| 0
| 0
| 1
| 0.28125
| false
| 0.28125
| 0.0625
| 0
| 0.40625
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
45e62668ada234da171d4601a142283296ac3f75
| 181
|
py
|
Python
|
carmcmc/__init__.py
|
metegenez/WAVEPAL
|
fa2bb91e2c7e63681ae4592929215c96bc523597
|
[
"MIT"
] | 39
|
2015-01-25T19:24:09.000Z
|
2022-02-28T11:55:28.000Z
|
carmcmc/__init__.py
|
metegenez/WAVEPAL
|
fa2bb91e2c7e63681ae4592929215c96bc523597
|
[
"MIT"
] | 13
|
2015-04-29T12:37:45.000Z
|
2021-11-28T23:31:29.000Z
|
carmcmc/__init__.py
|
metegenez/WAVEPAL
|
fa2bb91e2c7e63681ae4592929215c96bc523597
|
[
"MIT"
] | 19
|
2015-09-15T00:41:28.000Z
|
2021-07-28T07:28:47.000Z
|
from _carmcmc import *
from carma_pack import CarmaModel, CarmaSample, Car1Sample, power_spectrum, carma_variance, \
carma_process, get_ar_roots
from samplers import MCMCSample
| 36.2
| 93
| 0.828729
| 23
| 181
| 6.217391
| 0.73913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006329
| 0.127072
| 181
| 4
| 94
| 45.25
| 0.898734
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
aff709693fd92476b3e892543ca1fbdd4a69c8c9
| 139
|
py
|
Python
|
pyeccodes/defs/grib2/dimensionType_table.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 7
|
2020-04-14T09:41:17.000Z
|
2021-08-06T09:38:19.000Z
|
pyeccodes/defs/grib2/dimensionType_table.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | null | null | null |
pyeccodes/defs/grib2/dimensionType_table.py
|
ecmwf/pyeccodes
|
dce2c72d3adcc0cb801731366be53327ce13a00b
|
[
"Apache-2.0"
] | 3
|
2020-04-30T12:44:48.000Z
|
2020-12-15T08:40:26.000Z
|
def load(h):
return ({'abbr': 'layer', 'code': 0, 'title': 'layer'},
{'abbr': 'missing', 'code': 255, 'title': 'missing'})
| 34.75
| 65
| 0.489209
| 16
| 139
| 4.25
| 0.6875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.037037
| 0.223022
| 139
| 3
| 66
| 46.333333
| 0.592593
| 0
| 0
| 0
| 0
| 0
| 0.359712
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
b3100d65adacd7054c8de611ff54f81327ab317b
| 5,201
|
py
|
Python
|
grab_closest_rmsd.py
|
Miro-Astore/mdanalysis_scripts
|
faf59c7b3b63ab103a709941e5cc2e5d7c1d0b23
|
[
"MIT"
] | 1
|
2021-06-16T11:34:29.000Z
|
2021-06-16T11:34:29.000Z
|
grab_closest_rmsd.py
|
Miro-Astore/mdanalysis_scripts
|
faf59c7b3b63ab103a709941e5cc2e5d7c1d0b23
|
[
"MIT"
] | null | null | null |
grab_closest_rmsd.py
|
Miro-Astore/mdanalysis_scripts
|
faf59c7b3b63ab103a709941e5cc2e5d7c1d0b23
|
[
"MIT"
] | 1
|
2021-06-16T11:34:31.000Z
|
2021-06-16T11:34:31.000Z
|
import MDAnalysis as mda
import MDAnalysis.analysis.rms
import numpy as np
ref = mda.Universe ('./pca_2_ref.pdb')
traj_u = mda.Universe ('ionized.psf','sum.xtc')
ref_sel = "name CA and resid 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 391 392 393 394 395 396 397 398 399 400 440 441 442 443 444 445 446 447 448 449 450 451 452 453 454 455 456 457 458 459 460 461 462 463 464 465 466 467 468 469 470 471 472 473 474 475 476 477 478 479 480 481 482 483 484 485 486 487 488 489 490 491 492 493 494 495 496 497 498 499 500 501 502 503 504 505 506 507 508 509 510 511 512 513 514 515 516 517 518 519 520 521 522 523 524 525 526 527 528 529 530 531 532 533 534 535 536 537 538 539 540 541 542 543 544 545 546 547 548 549 550 551 552 553 554 555 556 557 558 559 560 561 562 563 564 565 566 567 568 569 570 571 572 573 574 575 576 577 578 579 580 581 582 583 584 585 586 587 588 589 590 591 592 593 594 595 596 597 598 599 600 601 602 603 604 605 606 607 608 609 610 611 612 613 614 615 616 617 618 619 620 621 622 623 624 625 626 627 628 629 630 631 632 633 634 635 636 844 845 846 847 848 849 850 851 852 853 854 855 856 857 858 859 860 861 862 863 864 865 866 867 868 869 870 871 872 873 874 875 876 877 878 879 880 881 882 883 884 885 911 912 913 914 915 916 917 918 919 920 921 922 923 924 925 926 927 928 929 930 931 932 933 934 935 936 937 938 939 940 941 942 943 944 945 946 947 948 949 950 951 952 953 954 955 956 957 958 959 960 961 962 963 964 965 966 967 968 969 970 971 972 973 974 975 976 977 978 979 980 981 982 983 984 985 986 987 988 989 990 991 992 993 994 995 996 997 998 999 1000 1001 1002 1003 1004 1005 1006 1007 1008 1009 1010 1011 1012 1013 1014 1015 1016 1017 1018 1019 1020 1021 1022 1023 1024 1025 1026 1027 1028 1029 1030 1031 1032 1033 1034 1035 1036 1037 1038 1039 1040 1041 1042 1043 1044 1045 1046 1047 1048 1049 1050 1051 1052 1053 1054 1055 1056 1057 1058 1059 1060 1061 1062 1063 1064 1065 1066 1067 1068 1069 1070 1071 1072 1073 1074 1075 1076 1077 1078 1079 1080 1081 1082 1083 1084 1085 1086 1087 1088 1089 1090 1091 1092 1093 1094 1095 1096 1097 1098 1099 1100 1101 1102 1103 1104 1105 1106 1107 1108 1109 1110 1111 1112 1113 1114 1115 1116 1117 1118 1119 1120 1121 1122 1123 1124 1125 1126 1127 1128 1129 1130 1131 1132 1133 1134 1135 1136 1137 1138 1139 1140 1141 1142 1143 1144 1145 1146 1147 1148 1149 1150 1151 1152 1153 1154 1155 1156 1157 1158 1159 1160 1161 1162 1163 1164 1165 1166 1167 1168 1169 1210 1211 1212 1213 1214 1215 1216 1217 1218 1219 1220 1221 1222 1223 1224 1225 1226 1227 1228 1229 1230 1231 1232 1233 1234 1235 1236 1237 1238 1239 1240 1241 1242 1243 1244 1245 1246 1247 1248 1249 1250 1251 1252 1253 1254 1255 1256 1257 1258 1259 1260 1261 1262 1263 1264 1265 1266 1267 1268 1269 1270 1271 1272 1273 1274 1275 1276 1277 1278 1279 1280 1281 1282 1283 1284 1285 1286 1287 1288 1289 1290 1291 1292 1293 1294 1295 1296 1297 1298 1299 1300 1301 1302 1303 1304 1305 1306 1307 1308 1309 1310 1311 1312 1313 1314 1315 1316 1317 1318 1319 1320 1321 1322 1323 1324 1325 1326 1327 1328 1329 1330 1331 1332 1333 1334 1335 1336 1337 1338 1339 1340 1341 1342 1343 1344 1345 1346 1347 1348 1349 1350 1351 1352 1353 1354 1355 1356 1357 1358 1359 1360 1361 1362 1363 1364 1365 1366 1367 1368 1369 1370 1371 1372 1373 1374 1375 1376 1377 1378 1379 1380 1381 1382 1383 1384 1385 1386 1387 1388 1389 1390 1391 1392 1393 1394 1395 1396 1397 1398 1399 1400 1401 1402 1403 1404 1405 1406 1407 1408 1409 1410 1411 1412 1413 1414 1415 1416 1417 1418 1419 1420 1421 1422 1423 1424 1425 1426 1427 1428 1429 1430 1431 1432 1433 1434 1435 1436 1437 1438 1439 1440 1441 1442 1443 1444 1445 1446 1447 1448 1449"
R = MDAnalysis.analysis.rms.RMSD(traj_u,ref,select=ref_sel)
R.run()
closest_frame=np.argmin(R.rmsd[:,-1])
print ("closest frame to reference is " + str(closest_frame) + " with an RMSD of " + str(R.rmsd[closest_frame] ))
traj_u.trajectory[closest_frame]
write_sel=traj_u.select_atoms('all')
write_sel.write ('closest_to_ref.pdb')
| 305.941176
| 4,705
| 0.766583
| 1,170
| 5,201
| 3.393162
| 0.960684
| 0.015113
| 0.010579
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.885117
| 0.21842
| 5,201
| 16
| 4,706
| 325.0625
| 0.091513
| 0
| 0
| 0
| 0
| 0.076923
| 0.921923
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0.076923
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b3240e96d503aecba5fa6738ed7f723f65cd5c2c
| 77
|
py
|
Python
|
src/fate_of_dice/system/call_of_cthulhu/__init__.py
|
bonczeq/FateOfDice
|
ce1704ac490f55bc600c0963958d4175104e85e5
|
[
"MIT"
] | null | null | null |
src/fate_of_dice/system/call_of_cthulhu/__init__.py
|
bonczeq/FateOfDice
|
ce1704ac490f55bc600c0963958d4175104e85e5
|
[
"MIT"
] | null | null | null |
src/fate_of_dice/system/call_of_cthulhu/__init__.py
|
bonczeq/FateOfDice
|
ce1704ac490f55bc600c0963958d4175104e85e5
|
[
"MIT"
] | null | null | null |
from .skill_check import check_skill, SkillCheckResult, SkillCheckResultType
| 38.5
| 76
| 0.883117
| 8
| 77
| 8.25
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.077922
| 77
| 1
| 77
| 77
| 0.929577
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b3532df5397a8d5aa65b3ed84d50aae7151c36c0
| 95,382
|
py
|
Python
|
sdk/python/pulumi_aws_native/emr/outputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 29
|
2021-09-30T19:32:07.000Z
|
2022-03-22T21:06:08.000Z
|
sdk/python/pulumi_aws_native/emr/outputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 232
|
2021-09-30T19:26:26.000Z
|
2022-03-31T23:22:06.000Z
|
sdk/python/pulumi_aws_native/emr/outputs.py
|
AaronFriel/pulumi-aws-native
|
5621690373ac44accdbd20b11bae3be1baf022d1
|
[
"Apache-2.0"
] | 4
|
2021-11-10T19:42:01.000Z
|
2022-02-05T10:15:49.000Z
|
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi SDK Generator. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from .. import _utilities
from . import outputs
from ._enums import *
__all__ = [
'ClusterApplication',
'ClusterAutoScalingPolicy',
'ClusterBootstrapActionConfig',
'ClusterCloudWatchAlarmDefinition',
'ClusterComputeLimits',
'ClusterConfiguration',
'ClusterEbsBlockDeviceConfig',
'ClusterEbsConfiguration',
'ClusterHadoopJarStepConfig',
'ClusterInstanceFleetConfig',
'ClusterInstanceFleetProvisioningSpecifications',
'ClusterInstanceGroupConfig',
'ClusterInstanceTypeConfig',
'ClusterJobFlowInstancesConfig',
'ClusterKerberosAttributes',
'ClusterKeyValue',
'ClusterManagedScalingPolicy',
'ClusterMetricDimension',
'ClusterOnDemandProvisioningSpecification',
'ClusterPlacementType',
'ClusterScalingAction',
'ClusterScalingConstraints',
'ClusterScalingRule',
'ClusterScalingTrigger',
'ClusterScriptBootstrapActionConfig',
'ClusterSimpleScalingPolicyConfiguration',
'ClusterSpotProvisioningSpecification',
'ClusterStepConfig',
'ClusterTag',
'ClusterVolumeSpecification',
'InstanceFleetConfigConfiguration',
'InstanceFleetConfigEbsBlockDeviceConfig',
'InstanceFleetConfigEbsConfiguration',
'InstanceFleetConfigInstanceFleetProvisioningSpecifications',
'InstanceFleetConfigInstanceTypeConfig',
'InstanceFleetConfigOnDemandProvisioningSpecification',
'InstanceFleetConfigSpotProvisioningSpecification',
'InstanceFleetConfigVolumeSpecification',
'InstanceGroupConfigAutoScalingPolicy',
'InstanceGroupConfigCloudWatchAlarmDefinition',
'InstanceGroupConfigConfiguration',
'InstanceGroupConfigEbsBlockDeviceConfig',
'InstanceGroupConfigEbsConfiguration',
'InstanceGroupConfigMetricDimension',
'InstanceGroupConfigScalingAction',
'InstanceGroupConfigScalingConstraints',
'InstanceGroupConfigScalingRule',
'InstanceGroupConfigScalingTrigger',
'InstanceGroupConfigSimpleScalingPolicyConfiguration',
'InstanceGroupConfigVolumeSpecification',
'StepHadoopJarStepConfig',
'StepKeyValue',
'StudioTag',
]
@pulumi.output_type
class ClusterApplication(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "additionalInfo":
suggest = "additional_info"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterApplication. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterApplication.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterApplication.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
additional_info: Optional[Any] = None,
args: Optional[Sequence[str]] = None,
name: Optional[str] = None,
version: Optional[str] = None):
if additional_info is not None:
pulumi.set(__self__, "additional_info", additional_info)
if args is not None:
pulumi.set(__self__, "args", args)
if name is not None:
pulumi.set(__self__, "name", name)
if version is not None:
pulumi.set(__self__, "version", version)
@property
@pulumi.getter(name="additionalInfo")
def additional_info(self) -> Optional[Any]:
return pulumi.get(self, "additional_info")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "args")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter
def version(self) -> Optional[str]:
return pulumi.get(self, "version")
@pulumi.output_type
class ClusterAutoScalingPolicy(dict):
def __init__(__self__, *,
constraints: 'outputs.ClusterScalingConstraints',
rules: Sequence['outputs.ClusterScalingRule']):
pulumi.set(__self__, "constraints", constraints)
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def constraints(self) -> 'outputs.ClusterScalingConstraints':
return pulumi.get(self, "constraints")
@property
@pulumi.getter
def rules(self) -> Sequence['outputs.ClusterScalingRule']:
return pulumi.get(self, "rules")
@pulumi.output_type
class ClusterBootstrapActionConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scriptBootstrapAction":
suggest = "script_bootstrap_action"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterBootstrapActionConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterBootstrapActionConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterBootstrapActionConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
name: str,
script_bootstrap_action: 'outputs.ClusterScriptBootstrapActionConfig'):
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "script_bootstrap_action", script_bootstrap_action)
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="scriptBootstrapAction")
def script_bootstrap_action(self) -> 'outputs.ClusterScriptBootstrapActionConfig':
return pulumi.get(self, "script_bootstrap_action")
@pulumi.output_type
class ClusterCloudWatchAlarmDefinition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "comparisonOperator":
suggest = "comparison_operator"
elif key == "metricName":
suggest = "metric_name"
elif key == "evaluationPeriods":
suggest = "evaluation_periods"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterCloudWatchAlarmDefinition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterCloudWatchAlarmDefinition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterCloudWatchAlarmDefinition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
comparison_operator: str,
metric_name: str,
period: int,
threshold: float,
dimensions: Optional[Sequence['outputs.ClusterMetricDimension']] = None,
evaluation_periods: Optional[int] = None,
namespace: Optional[str] = None,
statistic: Optional[str] = None,
unit: Optional[str] = None):
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> str:
return pulumi.get(self, "comparison_operator")
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> str:
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def period(self) -> int:
return pulumi.get(self, "period")
@property
@pulumi.getter
def threshold(self) -> float:
return pulumi.get(self, "threshold")
@property
@pulumi.getter
def dimensions(self) -> Optional[Sequence['outputs.ClusterMetricDimension']]:
return pulumi.get(self, "dimensions")
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[int]:
return pulumi.get(self, "evaluation_periods")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def statistic(self) -> Optional[str]:
return pulumi.get(self, "statistic")
@property
@pulumi.getter
def unit(self) -> Optional[str]:
return pulumi.get(self, "unit")
@pulumi.output_type
class ClusterComputeLimits(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maximumCapacityUnits":
suggest = "maximum_capacity_units"
elif key == "minimumCapacityUnits":
suggest = "minimum_capacity_units"
elif key == "unitType":
suggest = "unit_type"
elif key == "maximumCoreCapacityUnits":
suggest = "maximum_core_capacity_units"
elif key == "maximumOnDemandCapacityUnits":
suggest = "maximum_on_demand_capacity_units"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterComputeLimits. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterComputeLimits.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterComputeLimits.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
maximum_capacity_units: int,
minimum_capacity_units: int,
unit_type: str,
maximum_core_capacity_units: Optional[int] = None,
maximum_on_demand_capacity_units: Optional[int] = None):
pulumi.set(__self__, "maximum_capacity_units", maximum_capacity_units)
pulumi.set(__self__, "minimum_capacity_units", minimum_capacity_units)
pulumi.set(__self__, "unit_type", unit_type)
if maximum_core_capacity_units is not None:
pulumi.set(__self__, "maximum_core_capacity_units", maximum_core_capacity_units)
if maximum_on_demand_capacity_units is not None:
pulumi.set(__self__, "maximum_on_demand_capacity_units", maximum_on_demand_capacity_units)
@property
@pulumi.getter(name="maximumCapacityUnits")
def maximum_capacity_units(self) -> int:
return pulumi.get(self, "maximum_capacity_units")
@property
@pulumi.getter(name="minimumCapacityUnits")
def minimum_capacity_units(self) -> int:
return pulumi.get(self, "minimum_capacity_units")
@property
@pulumi.getter(name="unitType")
def unit_type(self) -> str:
return pulumi.get(self, "unit_type")
@property
@pulumi.getter(name="maximumCoreCapacityUnits")
def maximum_core_capacity_units(self) -> Optional[int]:
return pulumi.get(self, "maximum_core_capacity_units")
@property
@pulumi.getter(name="maximumOnDemandCapacityUnits")
def maximum_on_demand_capacity_units(self) -> Optional[int]:
return pulumi.get(self, "maximum_on_demand_capacity_units")
@pulumi.output_type
class ClusterConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "configurationProperties":
suggest = "configuration_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
classification: Optional[str] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[Sequence['outputs.ClusterConfiguration']] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
return pulumi.get(self, "classification")
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.ClusterConfiguration']]:
return pulumi.get(self, "configurations")
@pulumi.output_type
class ClusterEbsBlockDeviceConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "volumeSpecification":
suggest = "volume_specification"
elif key == "volumesPerInstance":
suggest = "volumes_per_instance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterEbsBlockDeviceConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterEbsBlockDeviceConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterEbsBlockDeviceConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
volume_specification: 'outputs.ClusterVolumeSpecification',
volumes_per_instance: Optional[int] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> 'outputs.ClusterVolumeSpecification':
return pulumi.get(self, "volume_specification")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
return pulumi.get(self, "volumes_per_instance")
@pulumi.output_type
class ClusterEbsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ebsBlockDeviceConfigs":
suggest = "ebs_block_device_configs"
elif key == "ebsOptimized":
suggest = "ebs_optimized"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterEbsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterEbsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterEbsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ebs_block_device_configs: Optional[Sequence['outputs.ClusterEbsBlockDeviceConfig']] = None,
ebs_optimized: Optional[bool] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[Sequence['outputs.ClusterEbsBlockDeviceConfig']]:
return pulumi.get(self, "ebs_block_device_configs")
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[bool]:
return pulumi.get(self, "ebs_optimized")
@pulumi.output_type
class ClusterHadoopJarStepConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "mainClass":
suggest = "main_class"
elif key == "stepProperties":
suggest = "step_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterHadoopJarStepConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterHadoopJarStepConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterHadoopJarStepConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
jar: str,
args: Optional[Sequence[str]] = None,
main_class: Optional[str] = None,
step_properties: Optional[Sequence['outputs.ClusterKeyValue']] = None):
pulumi.set(__self__, "jar", jar)
if args is not None:
pulumi.set(__self__, "args", args)
if main_class is not None:
pulumi.set(__self__, "main_class", main_class)
if step_properties is not None:
pulumi.set(__self__, "step_properties", step_properties)
@property
@pulumi.getter
def jar(self) -> str:
return pulumi.get(self, "jar")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "args")
@property
@pulumi.getter(name="mainClass")
def main_class(self) -> Optional[str]:
return pulumi.get(self, "main_class")
@property
@pulumi.getter(name="stepProperties")
def step_properties(self) -> Optional[Sequence['outputs.ClusterKeyValue']]:
return pulumi.get(self, "step_properties")
@pulumi.output_type
class ClusterInstanceFleetConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceTypeConfigs":
suggest = "instance_type_configs"
elif key == "launchSpecifications":
suggest = "launch_specifications"
elif key == "targetOnDemandCapacity":
suggest = "target_on_demand_capacity"
elif key == "targetSpotCapacity":
suggest = "target_spot_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterInstanceFleetConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterInstanceFleetConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterInstanceFleetConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_type_configs: Optional[Sequence['outputs.ClusterInstanceTypeConfig']] = None,
launch_specifications: Optional['outputs.ClusterInstanceFleetProvisioningSpecifications'] = None,
name: Optional[str] = None,
target_on_demand_capacity: Optional[int] = None,
target_spot_capacity: Optional[int] = None):
if instance_type_configs is not None:
pulumi.set(__self__, "instance_type_configs", instance_type_configs)
if launch_specifications is not None:
pulumi.set(__self__, "launch_specifications", launch_specifications)
if name is not None:
pulumi.set(__self__, "name", name)
if target_on_demand_capacity is not None:
pulumi.set(__self__, "target_on_demand_capacity", target_on_demand_capacity)
if target_spot_capacity is not None:
pulumi.set(__self__, "target_spot_capacity", target_spot_capacity)
@property
@pulumi.getter(name="instanceTypeConfigs")
def instance_type_configs(self) -> Optional[Sequence['outputs.ClusterInstanceTypeConfig']]:
return pulumi.get(self, "instance_type_configs")
@property
@pulumi.getter(name="launchSpecifications")
def launch_specifications(self) -> Optional['outputs.ClusterInstanceFleetProvisioningSpecifications']:
return pulumi.get(self, "launch_specifications")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="targetOnDemandCapacity")
def target_on_demand_capacity(self) -> Optional[int]:
return pulumi.get(self, "target_on_demand_capacity")
@property
@pulumi.getter(name="targetSpotCapacity")
def target_spot_capacity(self) -> Optional[int]:
return pulumi.get(self, "target_spot_capacity")
@pulumi.output_type
class ClusterInstanceFleetProvisioningSpecifications(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "onDemandSpecification":
suggest = "on_demand_specification"
elif key == "spotSpecification":
suggest = "spot_specification"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterInstanceFleetProvisioningSpecifications. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
on_demand_specification: Optional['outputs.ClusterOnDemandProvisioningSpecification'] = None,
spot_specification: Optional['outputs.ClusterSpotProvisioningSpecification'] = None):
if on_demand_specification is not None:
pulumi.set(__self__, "on_demand_specification", on_demand_specification)
if spot_specification is not None:
pulumi.set(__self__, "spot_specification", spot_specification)
@property
@pulumi.getter(name="onDemandSpecification")
def on_demand_specification(self) -> Optional['outputs.ClusterOnDemandProvisioningSpecification']:
return pulumi.get(self, "on_demand_specification")
@property
@pulumi.getter(name="spotSpecification")
def spot_specification(self) -> Optional['outputs.ClusterSpotProvisioningSpecification']:
return pulumi.get(self, "spot_specification")
@pulumi.output_type
class ClusterInstanceGroupConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceCount":
suggest = "instance_count"
elif key == "instanceType":
suggest = "instance_type"
elif key == "autoScalingPolicy":
suggest = "auto_scaling_policy"
elif key == "bidPrice":
suggest = "bid_price"
elif key == "customAmiId":
suggest = "custom_ami_id"
elif key == "ebsConfiguration":
suggest = "ebs_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterInstanceGroupConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterInstanceGroupConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterInstanceGroupConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_count: int,
instance_type: str,
auto_scaling_policy: Optional['outputs.ClusterAutoScalingPolicy'] = None,
bid_price: Optional[str] = None,
configurations: Optional[Sequence['outputs.ClusterConfiguration']] = None,
custom_ami_id: Optional[str] = None,
ebs_configuration: Optional['outputs.ClusterEbsConfiguration'] = None,
market: Optional[str] = None,
name: Optional[str] = None):
pulumi.set(__self__, "instance_count", instance_count)
pulumi.set(__self__, "instance_type", instance_type)
if auto_scaling_policy is not None:
pulumi.set(__self__, "auto_scaling_policy", auto_scaling_policy)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
if name is not None:
pulumi.set(__self__, "name", name)
@property
@pulumi.getter(name="instanceCount")
def instance_count(self) -> int:
return pulumi.get(self, "instance_count")
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="autoScalingPolicy")
def auto_scaling_policy(self) -> Optional['outputs.ClusterAutoScalingPolicy']:
return pulumi.get(self, "auto_scaling_policy")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
return pulumi.get(self, "bid_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.ClusterConfiguration']]:
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[str]:
return pulumi.get(self, "custom_ami_id")
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional['outputs.ClusterEbsConfiguration']:
return pulumi.get(self, "ebs_configuration")
@property
@pulumi.getter
def market(self) -> Optional[str]:
return pulumi.get(self, "market")
@property
@pulumi.getter
def name(self) -> Optional[str]:
return pulumi.get(self, "name")
@pulumi.output_type
class ClusterInstanceTypeConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceType":
suggest = "instance_type"
elif key == "bidPrice":
suggest = "bid_price"
elif key == "bidPriceAsPercentageOfOnDemandPrice":
suggest = "bid_price_as_percentage_of_on_demand_price"
elif key == "customAmiId":
suggest = "custom_ami_id"
elif key == "ebsConfiguration":
suggest = "ebs_configuration"
elif key == "weightedCapacity":
suggest = "weighted_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterInstanceTypeConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterInstanceTypeConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterInstanceTypeConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_type: str,
bid_price: Optional[str] = None,
bid_price_as_percentage_of_on_demand_price: Optional[float] = None,
configurations: Optional[Sequence['outputs.ClusterConfiguration']] = None,
custom_ami_id: Optional[str] = None,
ebs_configuration: Optional['outputs.ClusterEbsConfiguration'] = None,
weighted_capacity: Optional[int] = None):
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[float]:
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.ClusterConfiguration']]:
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[str]:
return pulumi.get(self, "custom_ami_id")
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional['outputs.ClusterEbsConfiguration']:
return pulumi.get(self, "ebs_configuration")
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[int]:
return pulumi.get(self, "weighted_capacity")
@pulumi.output_type
class ClusterJobFlowInstancesConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "additionalMasterSecurityGroups":
suggest = "additional_master_security_groups"
elif key == "additionalSlaveSecurityGroups":
suggest = "additional_slave_security_groups"
elif key == "coreInstanceFleet":
suggest = "core_instance_fleet"
elif key == "coreInstanceGroup":
suggest = "core_instance_group"
elif key == "ec2KeyName":
suggest = "ec2_key_name"
elif key == "ec2SubnetId":
suggest = "ec2_subnet_id"
elif key == "ec2SubnetIds":
suggest = "ec2_subnet_ids"
elif key == "emrManagedMasterSecurityGroup":
suggest = "emr_managed_master_security_group"
elif key == "emrManagedSlaveSecurityGroup":
suggest = "emr_managed_slave_security_group"
elif key == "hadoopVersion":
suggest = "hadoop_version"
elif key == "keepJobFlowAliveWhenNoSteps":
suggest = "keep_job_flow_alive_when_no_steps"
elif key == "masterInstanceFleet":
suggest = "master_instance_fleet"
elif key == "masterInstanceGroup":
suggest = "master_instance_group"
elif key == "serviceAccessSecurityGroup":
suggest = "service_access_security_group"
elif key == "terminationProtected":
suggest = "termination_protected"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterJobFlowInstancesConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterJobFlowInstancesConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterJobFlowInstancesConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
additional_master_security_groups: Optional[Sequence[str]] = None,
additional_slave_security_groups: Optional[Sequence[str]] = None,
core_instance_fleet: Optional['outputs.ClusterInstanceFleetConfig'] = None,
core_instance_group: Optional['outputs.ClusterInstanceGroupConfig'] = None,
ec2_key_name: Optional[str] = None,
ec2_subnet_id: Optional[str] = None,
ec2_subnet_ids: Optional[Sequence[str]] = None,
emr_managed_master_security_group: Optional[str] = None,
emr_managed_slave_security_group: Optional[str] = None,
hadoop_version: Optional[str] = None,
keep_job_flow_alive_when_no_steps: Optional[bool] = None,
master_instance_fleet: Optional['outputs.ClusterInstanceFleetConfig'] = None,
master_instance_group: Optional['outputs.ClusterInstanceGroupConfig'] = None,
placement: Optional['outputs.ClusterPlacementType'] = None,
service_access_security_group: Optional[str] = None,
termination_protected: Optional[bool] = None):
if additional_master_security_groups is not None:
pulumi.set(__self__, "additional_master_security_groups", additional_master_security_groups)
if additional_slave_security_groups is not None:
pulumi.set(__self__, "additional_slave_security_groups", additional_slave_security_groups)
if core_instance_fleet is not None:
pulumi.set(__self__, "core_instance_fleet", core_instance_fleet)
if core_instance_group is not None:
pulumi.set(__self__, "core_instance_group", core_instance_group)
if ec2_key_name is not None:
pulumi.set(__self__, "ec2_key_name", ec2_key_name)
if ec2_subnet_id is not None:
pulumi.set(__self__, "ec2_subnet_id", ec2_subnet_id)
if ec2_subnet_ids is not None:
pulumi.set(__self__, "ec2_subnet_ids", ec2_subnet_ids)
if emr_managed_master_security_group is not None:
pulumi.set(__self__, "emr_managed_master_security_group", emr_managed_master_security_group)
if emr_managed_slave_security_group is not None:
pulumi.set(__self__, "emr_managed_slave_security_group", emr_managed_slave_security_group)
if hadoop_version is not None:
pulumi.set(__self__, "hadoop_version", hadoop_version)
if keep_job_flow_alive_when_no_steps is not None:
pulumi.set(__self__, "keep_job_flow_alive_when_no_steps", keep_job_flow_alive_when_no_steps)
if master_instance_fleet is not None:
pulumi.set(__self__, "master_instance_fleet", master_instance_fleet)
if master_instance_group is not None:
pulumi.set(__self__, "master_instance_group", master_instance_group)
if placement is not None:
pulumi.set(__self__, "placement", placement)
if service_access_security_group is not None:
pulumi.set(__self__, "service_access_security_group", service_access_security_group)
if termination_protected is not None:
pulumi.set(__self__, "termination_protected", termination_protected)
@property
@pulumi.getter(name="additionalMasterSecurityGroups")
def additional_master_security_groups(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "additional_master_security_groups")
@property
@pulumi.getter(name="additionalSlaveSecurityGroups")
def additional_slave_security_groups(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "additional_slave_security_groups")
@property
@pulumi.getter(name="coreInstanceFleet")
def core_instance_fleet(self) -> Optional['outputs.ClusterInstanceFleetConfig']:
return pulumi.get(self, "core_instance_fleet")
@property
@pulumi.getter(name="coreInstanceGroup")
def core_instance_group(self) -> Optional['outputs.ClusterInstanceGroupConfig']:
return pulumi.get(self, "core_instance_group")
@property
@pulumi.getter(name="ec2KeyName")
def ec2_key_name(self) -> Optional[str]:
return pulumi.get(self, "ec2_key_name")
@property
@pulumi.getter(name="ec2SubnetId")
def ec2_subnet_id(self) -> Optional[str]:
return pulumi.get(self, "ec2_subnet_id")
@property
@pulumi.getter(name="ec2SubnetIds")
def ec2_subnet_ids(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "ec2_subnet_ids")
@property
@pulumi.getter(name="emrManagedMasterSecurityGroup")
def emr_managed_master_security_group(self) -> Optional[str]:
return pulumi.get(self, "emr_managed_master_security_group")
@property
@pulumi.getter(name="emrManagedSlaveSecurityGroup")
def emr_managed_slave_security_group(self) -> Optional[str]:
return pulumi.get(self, "emr_managed_slave_security_group")
@property
@pulumi.getter(name="hadoopVersion")
def hadoop_version(self) -> Optional[str]:
return pulumi.get(self, "hadoop_version")
@property
@pulumi.getter(name="keepJobFlowAliveWhenNoSteps")
def keep_job_flow_alive_when_no_steps(self) -> Optional[bool]:
return pulumi.get(self, "keep_job_flow_alive_when_no_steps")
@property
@pulumi.getter(name="masterInstanceFleet")
def master_instance_fleet(self) -> Optional['outputs.ClusterInstanceFleetConfig']:
return pulumi.get(self, "master_instance_fleet")
@property
@pulumi.getter(name="masterInstanceGroup")
def master_instance_group(self) -> Optional['outputs.ClusterInstanceGroupConfig']:
return pulumi.get(self, "master_instance_group")
@property
@pulumi.getter
def placement(self) -> Optional['outputs.ClusterPlacementType']:
return pulumi.get(self, "placement")
@property
@pulumi.getter(name="serviceAccessSecurityGroup")
def service_access_security_group(self) -> Optional[str]:
return pulumi.get(self, "service_access_security_group")
@property
@pulumi.getter(name="terminationProtected")
def termination_protected(self) -> Optional[bool]:
return pulumi.get(self, "termination_protected")
@pulumi.output_type
class ClusterKerberosAttributes(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "kdcAdminPassword":
suggest = "kdc_admin_password"
elif key == "aDDomainJoinPassword":
suggest = "a_d_domain_join_password"
elif key == "aDDomainJoinUser":
suggest = "a_d_domain_join_user"
elif key == "crossRealmTrustPrincipalPassword":
suggest = "cross_realm_trust_principal_password"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterKerberosAttributes. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterKerberosAttributes.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterKerberosAttributes.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
kdc_admin_password: str,
realm: str,
a_d_domain_join_password: Optional[str] = None,
a_d_domain_join_user: Optional[str] = None,
cross_realm_trust_principal_password: Optional[str] = None):
pulumi.set(__self__, "kdc_admin_password", kdc_admin_password)
pulumi.set(__self__, "realm", realm)
if a_d_domain_join_password is not None:
pulumi.set(__self__, "a_d_domain_join_password", a_d_domain_join_password)
if a_d_domain_join_user is not None:
pulumi.set(__self__, "a_d_domain_join_user", a_d_domain_join_user)
if cross_realm_trust_principal_password is not None:
pulumi.set(__self__, "cross_realm_trust_principal_password", cross_realm_trust_principal_password)
@property
@pulumi.getter(name="kdcAdminPassword")
def kdc_admin_password(self) -> str:
return pulumi.get(self, "kdc_admin_password")
@property
@pulumi.getter
def realm(self) -> str:
return pulumi.get(self, "realm")
@property
@pulumi.getter(name="aDDomainJoinPassword")
def a_d_domain_join_password(self) -> Optional[str]:
return pulumi.get(self, "a_d_domain_join_password")
@property
@pulumi.getter(name="aDDomainJoinUser")
def a_d_domain_join_user(self) -> Optional[str]:
return pulumi.get(self, "a_d_domain_join_user")
@property
@pulumi.getter(name="crossRealmTrustPrincipalPassword")
def cross_realm_trust_principal_password(self) -> Optional[str]:
return pulumi.get(self, "cross_realm_trust_principal_password")
@pulumi.output_type
class ClusterKeyValue(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class ClusterManagedScalingPolicy(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "computeLimits":
suggest = "compute_limits"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterManagedScalingPolicy. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterManagedScalingPolicy.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterManagedScalingPolicy.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
compute_limits: Optional['outputs.ClusterComputeLimits'] = None):
if compute_limits is not None:
pulumi.set(__self__, "compute_limits", compute_limits)
@property
@pulumi.getter(name="computeLimits")
def compute_limits(self) -> Optional['outputs.ClusterComputeLimits']:
return pulumi.get(self, "compute_limits")
@pulumi.output_type
class ClusterMetricDimension(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class ClusterOnDemandProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allocationStrategy":
suggest = "allocation_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterOnDemandProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterOnDemandProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterOnDemandProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allocation_strategy: str):
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
return pulumi.get(self, "allocation_strategy")
@pulumi.output_type
class ClusterPlacementType(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "availabilityZone":
suggest = "availability_zone"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterPlacementType. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterPlacementType.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterPlacementType.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
availability_zone: str):
pulumi.set(__self__, "availability_zone", availability_zone)
@property
@pulumi.getter(name="availabilityZone")
def availability_zone(self) -> str:
return pulumi.get(self, "availability_zone")
@pulumi.output_type
class ClusterScalingAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "simpleScalingPolicyConfiguration":
suggest = "simple_scaling_policy_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterScalingAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterScalingAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterScalingAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
simple_scaling_policy_configuration: 'outputs.ClusterSimpleScalingPolicyConfiguration',
market: Optional[str] = None):
pulumi.set(__self__, "simple_scaling_policy_configuration", simple_scaling_policy_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
@property
@pulumi.getter(name="simpleScalingPolicyConfiguration")
def simple_scaling_policy_configuration(self) -> 'outputs.ClusterSimpleScalingPolicyConfiguration':
return pulumi.get(self, "simple_scaling_policy_configuration")
@property
@pulumi.getter
def market(self) -> Optional[str]:
return pulumi.get(self, "market")
@pulumi.output_type
class ClusterScalingConstraints(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxCapacity":
suggest = "max_capacity"
elif key == "minCapacity":
suggest = "min_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterScalingConstraints. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterScalingConstraints.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterScalingConstraints.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_capacity: int,
min_capacity: int):
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> int:
return pulumi.get(self, "max_capacity")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> int:
return pulumi.get(self, "min_capacity")
@pulumi.output_type
class ClusterScalingRule(dict):
def __init__(__self__, *,
action: 'outputs.ClusterScalingAction',
name: str,
trigger: 'outputs.ClusterScalingTrigger',
description: Optional[str] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "trigger", trigger)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def action(self) -> 'outputs.ClusterScalingAction':
return pulumi.get(self, "action")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def trigger(self) -> 'outputs.ClusterScalingTrigger':
return pulumi.get(self, "trigger")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class ClusterScalingTrigger(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudWatchAlarmDefinition":
suggest = "cloud_watch_alarm_definition"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterScalingTrigger. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterScalingTrigger.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterScalingTrigger.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cloud_watch_alarm_definition: 'outputs.ClusterCloudWatchAlarmDefinition'):
pulumi.set(__self__, "cloud_watch_alarm_definition", cloud_watch_alarm_definition)
@property
@pulumi.getter(name="cloudWatchAlarmDefinition")
def cloud_watch_alarm_definition(self) -> 'outputs.ClusterCloudWatchAlarmDefinition':
return pulumi.get(self, "cloud_watch_alarm_definition")
@pulumi.output_type
class ClusterScriptBootstrapActionConfig(dict):
def __init__(__self__, *,
path: str,
args: Optional[Sequence[str]] = None):
pulumi.set(__self__, "path", path)
if args is not None:
pulumi.set(__self__, "args", args)
@property
@pulumi.getter
def path(self) -> str:
return pulumi.get(self, "path")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "args")
@pulumi.output_type
class ClusterSimpleScalingPolicyConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scalingAdjustment":
suggest = "scaling_adjustment"
elif key == "adjustmentType":
suggest = "adjustment_type"
elif key == "coolDown":
suggest = "cool_down"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterSimpleScalingPolicyConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterSimpleScalingPolicyConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterSimpleScalingPolicyConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
scaling_adjustment: int,
adjustment_type: Optional[str] = None,
cool_down: Optional[int] = None):
pulumi.set(__self__, "scaling_adjustment", scaling_adjustment)
if adjustment_type is not None:
pulumi.set(__self__, "adjustment_type", adjustment_type)
if cool_down is not None:
pulumi.set(__self__, "cool_down", cool_down)
@property
@pulumi.getter(name="scalingAdjustment")
def scaling_adjustment(self) -> int:
return pulumi.get(self, "scaling_adjustment")
@property
@pulumi.getter(name="adjustmentType")
def adjustment_type(self) -> Optional[str]:
return pulumi.get(self, "adjustment_type")
@property
@pulumi.getter(name="coolDown")
def cool_down(self) -> Optional[int]:
return pulumi.get(self, "cool_down")
@pulumi.output_type
class ClusterSpotProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "timeoutAction":
suggest = "timeout_action"
elif key == "timeoutDurationMinutes":
suggest = "timeout_duration_minutes"
elif key == "allocationStrategy":
suggest = "allocation_strategy"
elif key == "blockDurationMinutes":
suggest = "block_duration_minutes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterSpotProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterSpotProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterSpotProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
timeout_action: str,
timeout_duration_minutes: int,
allocation_strategy: Optional[str] = None,
block_duration_minutes: Optional[int] = None):
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> str:
return pulumi.get(self, "timeout_action")
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> int:
return pulumi.get(self, "timeout_duration_minutes")
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[str]:
return pulumi.get(self, "allocation_strategy")
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[int]:
return pulumi.get(self, "block_duration_minutes")
@pulumi.output_type
class ClusterStepConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "hadoopJarStep":
suggest = "hadoop_jar_step"
elif key == "actionOnFailure":
suggest = "action_on_failure"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterStepConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterStepConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterStepConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
hadoop_jar_step: 'outputs.ClusterHadoopJarStepConfig',
name: str,
action_on_failure: Optional[str] = None):
pulumi.set(__self__, "hadoop_jar_step", hadoop_jar_step)
pulumi.set(__self__, "name", name)
if action_on_failure is not None:
pulumi.set(__self__, "action_on_failure", action_on_failure)
@property
@pulumi.getter(name="hadoopJarStep")
def hadoop_jar_step(self) -> 'outputs.ClusterHadoopJarStepConfig':
return pulumi.get(self, "hadoop_jar_step")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter(name="actionOnFailure")
def action_on_failure(self) -> Optional[str]:
return pulumi.get(self, "action_on_failure")
@pulumi.output_type
class ClusterTag(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class ClusterVolumeSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sizeInGB":
suggest = "size_in_gb"
elif key == "volumeType":
suggest = "volume_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in ClusterVolumeSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
ClusterVolumeSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
ClusterVolumeSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
size_in_gb: int,
volume_type: str,
iops: Optional[int] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> str:
return pulumi.get(self, "volume_type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
return pulumi.get(self, "iops")
@pulumi.output_type
class InstanceFleetConfigConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "configurationProperties":
suggest = "configuration_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
classification: Optional[str] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[Sequence['outputs.InstanceFleetConfigConfiguration']] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
return pulumi.get(self, "classification")
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.InstanceFleetConfigConfiguration']]:
return pulumi.get(self, "configurations")
@pulumi.output_type
class InstanceFleetConfigEbsBlockDeviceConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "volumeSpecification":
suggest = "volume_specification"
elif key == "volumesPerInstance":
suggest = "volumes_per_instance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigEbsBlockDeviceConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigEbsBlockDeviceConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigEbsBlockDeviceConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
volume_specification: 'outputs.InstanceFleetConfigVolumeSpecification',
volumes_per_instance: Optional[int] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> 'outputs.InstanceFleetConfigVolumeSpecification':
return pulumi.get(self, "volume_specification")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
return pulumi.get(self, "volumes_per_instance")
@pulumi.output_type
class InstanceFleetConfigEbsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ebsBlockDeviceConfigs":
suggest = "ebs_block_device_configs"
elif key == "ebsOptimized":
suggest = "ebs_optimized"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigEbsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigEbsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigEbsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ebs_block_device_configs: Optional[Sequence['outputs.InstanceFleetConfigEbsBlockDeviceConfig']] = None,
ebs_optimized: Optional[bool] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[Sequence['outputs.InstanceFleetConfigEbsBlockDeviceConfig']]:
return pulumi.get(self, "ebs_block_device_configs")
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[bool]:
return pulumi.get(self, "ebs_optimized")
@pulumi.output_type
class InstanceFleetConfigInstanceFleetProvisioningSpecifications(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "onDemandSpecification":
suggest = "on_demand_specification"
elif key == "spotSpecification":
suggest = "spot_specification"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigInstanceFleetProvisioningSpecifications. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigInstanceFleetProvisioningSpecifications.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
on_demand_specification: Optional['outputs.InstanceFleetConfigOnDemandProvisioningSpecification'] = None,
spot_specification: Optional['outputs.InstanceFleetConfigSpotProvisioningSpecification'] = None):
if on_demand_specification is not None:
pulumi.set(__self__, "on_demand_specification", on_demand_specification)
if spot_specification is not None:
pulumi.set(__self__, "spot_specification", spot_specification)
@property
@pulumi.getter(name="onDemandSpecification")
def on_demand_specification(self) -> Optional['outputs.InstanceFleetConfigOnDemandProvisioningSpecification']:
return pulumi.get(self, "on_demand_specification")
@property
@pulumi.getter(name="spotSpecification")
def spot_specification(self) -> Optional['outputs.InstanceFleetConfigSpotProvisioningSpecification']:
return pulumi.get(self, "spot_specification")
@pulumi.output_type
class InstanceFleetConfigInstanceTypeConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "instanceType":
suggest = "instance_type"
elif key == "bidPrice":
suggest = "bid_price"
elif key == "bidPriceAsPercentageOfOnDemandPrice":
suggest = "bid_price_as_percentage_of_on_demand_price"
elif key == "customAmiId":
suggest = "custom_ami_id"
elif key == "ebsConfiguration":
suggest = "ebs_configuration"
elif key == "weightedCapacity":
suggest = "weighted_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigInstanceTypeConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigInstanceTypeConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigInstanceTypeConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
instance_type: str,
bid_price: Optional[str] = None,
bid_price_as_percentage_of_on_demand_price: Optional[float] = None,
configurations: Optional[Sequence['outputs.InstanceFleetConfigConfiguration']] = None,
custom_ami_id: Optional[str] = None,
ebs_configuration: Optional['outputs.InstanceFleetConfigEbsConfiguration'] = None,
weighted_capacity: Optional[int] = None):
pulumi.set(__self__, "instance_type", instance_type)
if bid_price is not None:
pulumi.set(__self__, "bid_price", bid_price)
if bid_price_as_percentage_of_on_demand_price is not None:
pulumi.set(__self__, "bid_price_as_percentage_of_on_demand_price", bid_price_as_percentage_of_on_demand_price)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
if custom_ami_id is not None:
pulumi.set(__self__, "custom_ami_id", custom_ami_id)
if ebs_configuration is not None:
pulumi.set(__self__, "ebs_configuration", ebs_configuration)
if weighted_capacity is not None:
pulumi.set(__self__, "weighted_capacity", weighted_capacity)
@property
@pulumi.getter(name="instanceType")
def instance_type(self) -> str:
return pulumi.get(self, "instance_type")
@property
@pulumi.getter(name="bidPrice")
def bid_price(self) -> Optional[str]:
return pulumi.get(self, "bid_price")
@property
@pulumi.getter(name="bidPriceAsPercentageOfOnDemandPrice")
def bid_price_as_percentage_of_on_demand_price(self) -> Optional[float]:
return pulumi.get(self, "bid_price_as_percentage_of_on_demand_price")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.InstanceFleetConfigConfiguration']]:
return pulumi.get(self, "configurations")
@property
@pulumi.getter(name="customAmiId")
def custom_ami_id(self) -> Optional[str]:
return pulumi.get(self, "custom_ami_id")
@property
@pulumi.getter(name="ebsConfiguration")
def ebs_configuration(self) -> Optional['outputs.InstanceFleetConfigEbsConfiguration']:
return pulumi.get(self, "ebs_configuration")
@property
@pulumi.getter(name="weightedCapacity")
def weighted_capacity(self) -> Optional[int]:
return pulumi.get(self, "weighted_capacity")
@pulumi.output_type
class InstanceFleetConfigOnDemandProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "allocationStrategy":
suggest = "allocation_strategy"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigOnDemandProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigOnDemandProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigOnDemandProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
allocation_strategy: str):
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> str:
return pulumi.get(self, "allocation_strategy")
@pulumi.output_type
class InstanceFleetConfigSpotProvisioningSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "timeoutAction":
suggest = "timeout_action"
elif key == "timeoutDurationMinutes":
suggest = "timeout_duration_minutes"
elif key == "allocationStrategy":
suggest = "allocation_strategy"
elif key == "blockDurationMinutes":
suggest = "block_duration_minutes"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigSpotProvisioningSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigSpotProvisioningSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigSpotProvisioningSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
timeout_action: str,
timeout_duration_minutes: int,
allocation_strategy: Optional[str] = None,
block_duration_minutes: Optional[int] = None):
pulumi.set(__self__, "timeout_action", timeout_action)
pulumi.set(__self__, "timeout_duration_minutes", timeout_duration_minutes)
if allocation_strategy is not None:
pulumi.set(__self__, "allocation_strategy", allocation_strategy)
if block_duration_minutes is not None:
pulumi.set(__self__, "block_duration_minutes", block_duration_minutes)
@property
@pulumi.getter(name="timeoutAction")
def timeout_action(self) -> str:
return pulumi.get(self, "timeout_action")
@property
@pulumi.getter(name="timeoutDurationMinutes")
def timeout_duration_minutes(self) -> int:
return pulumi.get(self, "timeout_duration_minutes")
@property
@pulumi.getter(name="allocationStrategy")
def allocation_strategy(self) -> Optional[str]:
return pulumi.get(self, "allocation_strategy")
@property
@pulumi.getter(name="blockDurationMinutes")
def block_duration_minutes(self) -> Optional[int]:
return pulumi.get(self, "block_duration_minutes")
@pulumi.output_type
class InstanceFleetConfigVolumeSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sizeInGB":
suggest = "size_in_gb"
elif key == "volumeType":
suggest = "volume_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceFleetConfigVolumeSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceFleetConfigVolumeSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceFleetConfigVolumeSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
size_in_gb: int,
volume_type: str,
iops: Optional[int] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> str:
return pulumi.get(self, "volume_type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
return pulumi.get(self, "iops")
@pulumi.output_type
class InstanceGroupConfigAutoScalingPolicy(dict):
def __init__(__self__, *,
constraints: 'outputs.InstanceGroupConfigScalingConstraints',
rules: Sequence['outputs.InstanceGroupConfigScalingRule']):
pulumi.set(__self__, "constraints", constraints)
pulumi.set(__self__, "rules", rules)
@property
@pulumi.getter
def constraints(self) -> 'outputs.InstanceGroupConfigScalingConstraints':
return pulumi.get(self, "constraints")
@property
@pulumi.getter
def rules(self) -> Sequence['outputs.InstanceGroupConfigScalingRule']:
return pulumi.get(self, "rules")
@pulumi.output_type
class InstanceGroupConfigCloudWatchAlarmDefinition(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "comparisonOperator":
suggest = "comparison_operator"
elif key == "metricName":
suggest = "metric_name"
elif key == "evaluationPeriods":
suggest = "evaluation_periods"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigCloudWatchAlarmDefinition. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigCloudWatchAlarmDefinition.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigCloudWatchAlarmDefinition.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
comparison_operator: str,
metric_name: str,
period: int,
threshold: float,
dimensions: Optional[Sequence['outputs.InstanceGroupConfigMetricDimension']] = None,
evaluation_periods: Optional[int] = None,
namespace: Optional[str] = None,
statistic: Optional[str] = None,
unit: Optional[str] = None):
pulumi.set(__self__, "comparison_operator", comparison_operator)
pulumi.set(__self__, "metric_name", metric_name)
pulumi.set(__self__, "period", period)
pulumi.set(__self__, "threshold", threshold)
if dimensions is not None:
pulumi.set(__self__, "dimensions", dimensions)
if evaluation_periods is not None:
pulumi.set(__self__, "evaluation_periods", evaluation_periods)
if namespace is not None:
pulumi.set(__self__, "namespace", namespace)
if statistic is not None:
pulumi.set(__self__, "statistic", statistic)
if unit is not None:
pulumi.set(__self__, "unit", unit)
@property
@pulumi.getter(name="comparisonOperator")
def comparison_operator(self) -> str:
return pulumi.get(self, "comparison_operator")
@property
@pulumi.getter(name="metricName")
def metric_name(self) -> str:
return pulumi.get(self, "metric_name")
@property
@pulumi.getter
def period(self) -> int:
return pulumi.get(self, "period")
@property
@pulumi.getter
def threshold(self) -> float:
return pulumi.get(self, "threshold")
@property
@pulumi.getter
def dimensions(self) -> Optional[Sequence['outputs.InstanceGroupConfigMetricDimension']]:
return pulumi.get(self, "dimensions")
@property
@pulumi.getter(name="evaluationPeriods")
def evaluation_periods(self) -> Optional[int]:
return pulumi.get(self, "evaluation_periods")
@property
@pulumi.getter
def namespace(self) -> Optional[str]:
return pulumi.get(self, "namespace")
@property
@pulumi.getter
def statistic(self) -> Optional[str]:
return pulumi.get(self, "statistic")
@property
@pulumi.getter
def unit(self) -> Optional[str]:
return pulumi.get(self, "unit")
@pulumi.output_type
class InstanceGroupConfigConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "configurationProperties":
suggest = "configuration_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
classification: Optional[str] = None,
configuration_properties: Optional[Any] = None,
configurations: Optional[Sequence['outputs.InstanceGroupConfigConfiguration']] = None):
if classification is not None:
pulumi.set(__self__, "classification", classification)
if configuration_properties is not None:
pulumi.set(__self__, "configuration_properties", configuration_properties)
if configurations is not None:
pulumi.set(__self__, "configurations", configurations)
@property
@pulumi.getter
def classification(self) -> Optional[str]:
return pulumi.get(self, "classification")
@property
@pulumi.getter(name="configurationProperties")
def configuration_properties(self) -> Optional[Any]:
return pulumi.get(self, "configuration_properties")
@property
@pulumi.getter
def configurations(self) -> Optional[Sequence['outputs.InstanceGroupConfigConfiguration']]:
return pulumi.get(self, "configurations")
@pulumi.output_type
class InstanceGroupConfigEbsBlockDeviceConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "volumeSpecification":
suggest = "volume_specification"
elif key == "volumesPerInstance":
suggest = "volumes_per_instance"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigEbsBlockDeviceConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigEbsBlockDeviceConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigEbsBlockDeviceConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
volume_specification: 'outputs.InstanceGroupConfigVolumeSpecification',
volumes_per_instance: Optional[int] = None):
pulumi.set(__self__, "volume_specification", volume_specification)
if volumes_per_instance is not None:
pulumi.set(__self__, "volumes_per_instance", volumes_per_instance)
@property
@pulumi.getter(name="volumeSpecification")
def volume_specification(self) -> 'outputs.InstanceGroupConfigVolumeSpecification':
return pulumi.get(self, "volume_specification")
@property
@pulumi.getter(name="volumesPerInstance")
def volumes_per_instance(self) -> Optional[int]:
return pulumi.get(self, "volumes_per_instance")
@pulumi.output_type
class InstanceGroupConfigEbsConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "ebsBlockDeviceConfigs":
suggest = "ebs_block_device_configs"
elif key == "ebsOptimized":
suggest = "ebs_optimized"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigEbsConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigEbsConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigEbsConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
ebs_block_device_configs: Optional[Sequence['outputs.InstanceGroupConfigEbsBlockDeviceConfig']] = None,
ebs_optimized: Optional[bool] = None):
if ebs_block_device_configs is not None:
pulumi.set(__self__, "ebs_block_device_configs", ebs_block_device_configs)
if ebs_optimized is not None:
pulumi.set(__self__, "ebs_optimized", ebs_optimized)
@property
@pulumi.getter(name="ebsBlockDeviceConfigs")
def ebs_block_device_configs(self) -> Optional[Sequence['outputs.InstanceGroupConfigEbsBlockDeviceConfig']]:
return pulumi.get(self, "ebs_block_device_configs")
@property
@pulumi.getter(name="ebsOptimized")
def ebs_optimized(self) -> Optional[bool]:
return pulumi.get(self, "ebs_optimized")
@pulumi.output_type
class InstanceGroupConfigMetricDimension(dict):
def __init__(__self__, *,
key: str,
value: str):
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
return pulumi.get(self, "value")
@pulumi.output_type
class InstanceGroupConfigScalingAction(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "simpleScalingPolicyConfiguration":
suggest = "simple_scaling_policy_configuration"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigScalingAction. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigScalingAction.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigScalingAction.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
simple_scaling_policy_configuration: 'outputs.InstanceGroupConfigSimpleScalingPolicyConfiguration',
market: Optional[str] = None):
pulumi.set(__self__, "simple_scaling_policy_configuration", simple_scaling_policy_configuration)
if market is not None:
pulumi.set(__self__, "market", market)
@property
@pulumi.getter(name="simpleScalingPolicyConfiguration")
def simple_scaling_policy_configuration(self) -> 'outputs.InstanceGroupConfigSimpleScalingPolicyConfiguration':
return pulumi.get(self, "simple_scaling_policy_configuration")
@property
@pulumi.getter
def market(self) -> Optional[str]:
return pulumi.get(self, "market")
@pulumi.output_type
class InstanceGroupConfigScalingConstraints(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "maxCapacity":
suggest = "max_capacity"
elif key == "minCapacity":
suggest = "min_capacity"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigScalingConstraints. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigScalingConstraints.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigScalingConstraints.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
max_capacity: int,
min_capacity: int):
pulumi.set(__self__, "max_capacity", max_capacity)
pulumi.set(__self__, "min_capacity", min_capacity)
@property
@pulumi.getter(name="maxCapacity")
def max_capacity(self) -> int:
return pulumi.get(self, "max_capacity")
@property
@pulumi.getter(name="minCapacity")
def min_capacity(self) -> int:
return pulumi.get(self, "min_capacity")
@pulumi.output_type
class InstanceGroupConfigScalingRule(dict):
def __init__(__self__, *,
action: 'outputs.InstanceGroupConfigScalingAction',
name: str,
trigger: 'outputs.InstanceGroupConfigScalingTrigger',
description: Optional[str] = None):
pulumi.set(__self__, "action", action)
pulumi.set(__self__, "name", name)
pulumi.set(__self__, "trigger", trigger)
if description is not None:
pulumi.set(__self__, "description", description)
@property
@pulumi.getter
def action(self) -> 'outputs.InstanceGroupConfigScalingAction':
return pulumi.get(self, "action")
@property
@pulumi.getter
def name(self) -> str:
return pulumi.get(self, "name")
@property
@pulumi.getter
def trigger(self) -> 'outputs.InstanceGroupConfigScalingTrigger':
return pulumi.get(self, "trigger")
@property
@pulumi.getter
def description(self) -> Optional[str]:
return pulumi.get(self, "description")
@pulumi.output_type
class InstanceGroupConfigScalingTrigger(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "cloudWatchAlarmDefinition":
suggest = "cloud_watch_alarm_definition"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigScalingTrigger. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigScalingTrigger.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigScalingTrigger.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
cloud_watch_alarm_definition: 'outputs.InstanceGroupConfigCloudWatchAlarmDefinition'):
pulumi.set(__self__, "cloud_watch_alarm_definition", cloud_watch_alarm_definition)
@property
@pulumi.getter(name="cloudWatchAlarmDefinition")
def cloud_watch_alarm_definition(self) -> 'outputs.InstanceGroupConfigCloudWatchAlarmDefinition':
return pulumi.get(self, "cloud_watch_alarm_definition")
@pulumi.output_type
class InstanceGroupConfigSimpleScalingPolicyConfiguration(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "scalingAdjustment":
suggest = "scaling_adjustment"
elif key == "adjustmentType":
suggest = "adjustment_type"
elif key == "coolDown":
suggest = "cool_down"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigSimpleScalingPolicyConfiguration. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigSimpleScalingPolicyConfiguration.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigSimpleScalingPolicyConfiguration.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
scaling_adjustment: int,
adjustment_type: Optional[str] = None,
cool_down: Optional[int] = None):
pulumi.set(__self__, "scaling_adjustment", scaling_adjustment)
if adjustment_type is not None:
pulumi.set(__self__, "adjustment_type", adjustment_type)
if cool_down is not None:
pulumi.set(__self__, "cool_down", cool_down)
@property
@pulumi.getter(name="scalingAdjustment")
def scaling_adjustment(self) -> int:
return pulumi.get(self, "scaling_adjustment")
@property
@pulumi.getter(name="adjustmentType")
def adjustment_type(self) -> Optional[str]:
return pulumi.get(self, "adjustment_type")
@property
@pulumi.getter(name="coolDown")
def cool_down(self) -> Optional[int]:
return pulumi.get(self, "cool_down")
@pulumi.output_type
class InstanceGroupConfigVolumeSpecification(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "sizeInGB":
suggest = "size_in_gb"
elif key == "volumeType":
suggest = "volume_type"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in InstanceGroupConfigVolumeSpecification. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
InstanceGroupConfigVolumeSpecification.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
InstanceGroupConfigVolumeSpecification.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
size_in_gb: int,
volume_type: str,
iops: Optional[int] = None):
pulumi.set(__self__, "size_in_gb", size_in_gb)
pulumi.set(__self__, "volume_type", volume_type)
if iops is not None:
pulumi.set(__self__, "iops", iops)
@property
@pulumi.getter(name="sizeInGB")
def size_in_gb(self) -> int:
return pulumi.get(self, "size_in_gb")
@property
@pulumi.getter(name="volumeType")
def volume_type(self) -> str:
return pulumi.get(self, "volume_type")
@property
@pulumi.getter
def iops(self) -> Optional[int]:
return pulumi.get(self, "iops")
@pulumi.output_type
class StepHadoopJarStepConfig(dict):
@staticmethod
def __key_warning(key: str):
suggest = None
if key == "mainClass":
suggest = "main_class"
elif key == "stepProperties":
suggest = "step_properties"
if suggest:
pulumi.log.warn(f"Key '{key}' not found in StepHadoopJarStepConfig. Access the value via the '{suggest}' property getter instead.")
def __getitem__(self, key: str) -> Any:
StepHadoopJarStepConfig.__key_warning(key)
return super().__getitem__(key)
def get(self, key: str, default = None) -> Any:
StepHadoopJarStepConfig.__key_warning(key)
return super().get(key, default)
def __init__(__self__, *,
jar: str,
args: Optional[Sequence[str]] = None,
main_class: Optional[str] = None,
step_properties: Optional[Sequence['outputs.StepKeyValue']] = None):
pulumi.set(__self__, "jar", jar)
if args is not None:
pulumi.set(__self__, "args", args)
if main_class is not None:
pulumi.set(__self__, "main_class", main_class)
if step_properties is not None:
pulumi.set(__self__, "step_properties", step_properties)
@property
@pulumi.getter
def jar(self) -> str:
return pulumi.get(self, "jar")
@property
@pulumi.getter
def args(self) -> Optional[Sequence[str]]:
return pulumi.get(self, "args")
@property
@pulumi.getter(name="mainClass")
def main_class(self) -> Optional[str]:
return pulumi.get(self, "main_class")
@property
@pulumi.getter(name="stepProperties")
def step_properties(self) -> Optional[Sequence['outputs.StepKeyValue']]:
return pulumi.get(self, "step_properties")
@pulumi.output_type
class StepKeyValue(dict):
def __init__(__self__, *,
key: Optional[str] = None,
value: Optional[str] = None):
if key is not None:
pulumi.set(__self__, "key", key)
if value is not None:
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> Optional[str]:
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> Optional[str]:
return pulumi.get(self, "value")
@pulumi.output_type
class StudioTag(dict):
"""
An arbitrary set of tags (key-value pairs) for this EMR Studio.
"""
def __init__(__self__, *,
key: str,
value: str):
"""
An arbitrary set of tags (key-value pairs) for this EMR Studio.
:param str key: The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
:param str value: The value for the tag. You can specify a value that is 0 to 255 Unicode characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "value", value)
@property
@pulumi.getter
def key(self) -> str:
"""
The key name of the tag. You can specify a value that is 1 to 127 Unicode characters in length and cannot be prefixed with aws:. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def value(self) -> str:
"""
The value for the tag. You can specify a value that is 0 to 255 Unicode characters in length. You can use any of the following characters: the set of Unicode letters, digits, whitespace, _, ., /, =, +, and -.
"""
return pulumi.get(self, "value")
| 37.434066
| 268
| 0.664685
| 9,787
| 95,382
| 6.144988
| 0.03566
| 0.02549
| 0.03826
| 0.055919
| 0.791324
| 0.763339
| 0.736952
| 0.681382
| 0.667199
| 0.658802
| 0
| 0.000602
| 0.23343
| 95,382
| 2,547
| 269
| 37.448763
| 0.821929
| 0.012937
| 0
| 0.750482
| 1
| 0.020231
| 0.224716
| 0.112927
| 0
| 0
| 0
| 0
| 0
| 1
| 0.171484
| false
| 0.011079
| 0.003372
| 0.084297
| 0.326108
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b3539ffc66bede449c55231a9b0e75f5780ee2ee
| 128
|
py
|
Python
|
434-number-of-segments-in-a-string/434-number-of-segments-in-a-string.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | 2
|
2021-12-05T14:29:06.000Z
|
2022-01-01T05:46:13.000Z
|
434-number-of-segments-in-a-string/434-number-of-segments-in-a-string.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
434-number-of-segments-in-a-string/434-number-of-segments-in-a-string.py
|
hyeseonko/LeetCode
|
48dfc93f1638e13041d8ce1420517a886abbdc77
|
[
"MIT"
] | null | null | null |
class Solution:
def countSegments(self, s: str) -> int:
return sum([1 for letter in s.strip().split(" ") if letter])
| 42.666667
| 68
| 0.625
| 19
| 128
| 4.210526
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.01
| 0.21875
| 128
| 3
| 68
| 42.666667
| 0.79
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
2fa1d59d738f6050541565b7625bf68ab9c9b3ab
| 43
|
py
|
Python
|
demo_zinnia_wordpress/__init__.py
|
django-blog-zinnia/wordpress2zinnia
|
656df6d431418a660f0e590d2226af5e6dd7a3e6
|
[
"BSD-3-Clause"
] | 7
|
2015-08-16T18:50:52.000Z
|
2021-05-23T11:28:22.000Z
|
demo_zinnia_wordpress/__init__.py
|
django-blog-zinnia/wordpress2zinnia
|
656df6d431418a660f0e590d2226af5e6dd7a3e6
|
[
"BSD-3-Clause"
] | 5
|
2015-06-20T07:04:01.000Z
|
2018-08-02T14:12:41.000Z
|
demo_zinnia_wordpress/__init__.py
|
django-blog-zinnia/wordpress2zinnia
|
656df6d431418a660f0e590d2226af5e6dd7a3e6
|
[
"BSD-3-Clause"
] | 7
|
2015-04-17T14:57:37.000Z
|
2020-10-17T04:32:02.000Z
|
"""Demo of Zinnia with wordpress import"""
| 21.5
| 42
| 0.72093
| 6
| 43
| 5.166667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139535
| 43
| 1
| 43
| 43
| 0.837838
| 0.837209
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2fcd9f0a4668f30c4930f01c38dd7a96779a0ba3
| 195
|
py
|
Python
|
snippets/urls.py
|
rudra012/django_rest
|
bbfc0535cefcf20d1b788aab0336c090d58c506d
|
[
"MIT"
] | null | null | null |
snippets/urls.py
|
rudra012/django_rest
|
bbfc0535cefcf20d1b788aab0336c090d58c506d
|
[
"MIT"
] | null | null | null |
snippets/urls.py
|
rudra012/django_rest
|
bbfc0535cefcf20d1b788aab0336c090d58c506d
|
[
"MIT"
] | null | null | null |
from django.conf.urls import url
from api.snippets import snippets_api
urlpatterns = [
url(r'^$', snippets_api.snippet_list),
url(r'^(?P<pk>[0-9]+)/$', snippets_api.snippet_detail),
]
| 19.5
| 59
| 0.687179
| 29
| 195
| 4.448276
| 0.586207
| 0.255814
| 0.27907
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011905
| 0.138462
| 195
| 9
| 60
| 21.666667
| 0.755952
| 0
| 0
| 0
| 0
| 0
| 0.097938
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.333333
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
2fd2f559dbfd281655f2be8fc7d8f814570f9fcc
| 76
|
py
|
Python
|
Fundamentos/variables.py
|
ijchavez/python
|
bccd94a9bee90125e2be27b0355bdaedb0ae9d19
|
[
"Unlicense"
] | null | null | null |
Fundamentos/variables.py
|
ijchavez/python
|
bccd94a9bee90125e2be27b0355bdaedb0ae9d19
|
[
"Unlicense"
] | null | null | null |
Fundamentos/variables.py
|
ijchavez/python
|
bccd94a9bee90125e2be27b0355bdaedb0ae9d19
|
[
"Unlicense"
] | null | null | null |
x = 5
y = 3
z = x + y
print(x)
print(y)
print(x + y)
print(z)
w = z
print(w)
| 8.444444
| 12
| 0.526316
| 20
| 76
| 2
| 0.35
| 0.45
| 0.35
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.035714
| 0.263158
| 76
| 9
| 13
| 8.444444
| 0.678571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.555556
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
641e664d10bd57eb59a8d00d6a45b411c67df279
| 128
|
py
|
Python
|
whereToGo/src/sensors/_conf_sensors.py
|
k323r/whereToGo
|
5ec94a49e818c6a2acfb08c66755100c26c42f22
|
[
"MIT"
] | null | null | null |
whereToGo/src/sensors/_conf_sensors.py
|
k323r/whereToGo
|
5ec94a49e818c6a2acfb08c66755100c26c42f22
|
[
"MIT"
] | null | null | null |
whereToGo/src/sensors/_conf_sensors.py
|
k323r/whereToGo
|
5ec94a49e818c6a2acfb08c66755100c26c42f22
|
[
"MIT"
] | null | null | null |
import os.path
ROOT = os.path.dirname(
os.path.dirname(
os.path.dirname(
os.path.abspath(__file__)
)))
| 16
| 37
| 0.59375
| 17
| 128
| 4.235294
| 0.411765
| 0.416667
| 0.541667
| 0.625
| 0.625
| 0.625
| 0.625
| 0.625
| 0
| 0
| 0
| 0
| 0.265625
| 128
| 7
| 38
| 18.285714
| 0.765957
| 0
| 0
| 0.333333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.166667
| 0
| 0.166667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
641e9e0dce5e1416f0e6f8b2e384e21e197b3478
| 160
|
py
|
Python
|
Codigo_EngDados/exe03.py
|
SouzaMarcel0/Exercicios_Python
|
d0c75292b7c725faeaf49db161b7d1a0f71a9418
|
[
"MIT"
] | null | null | null |
Codigo_EngDados/exe03.py
|
SouzaMarcel0/Exercicios_Python
|
d0c75292b7c725faeaf49db161b7d1a0f71a9418
|
[
"MIT"
] | null | null | null |
Codigo_EngDados/exe03.py
|
SouzaMarcel0/Exercicios_Python
|
d0c75292b7c725faeaf49db161b7d1a0f71a9418
|
[
"MIT"
] | null | null | null |
client = pymongo.MongoClient("mongodb+srv://usermgs:udUnpg6aJnCp9d2W@cluster0.2ivys.mongodb.net/baseteste?retryWrites=true&w=majority")
db = client.test
| 16
| 135
| 0.78125
| 19
| 160
| 6.578947
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.034014
| 0.08125
| 160
| 9
| 136
| 17.777778
| 0.816327
| 0
| 0
| 0
| 0
| 0.5
| 0.668831
| 0.668831
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
643115619018886865b6c08a6d8862db0370f968
| 103
|
py
|
Python
|
pluribus/poker/evaluation/__init__.py
|
keithlee96/pluribus-poker-AI
|
15e52fe73dd09570e782dd0e7b9069865eb5823d
|
[
"MIT"
] | 113
|
2020-08-06T15:03:18.000Z
|
2022-03-31T01:56:34.000Z
|
poker_ai/poker/evaluation/__init__.py
|
fedden/pluribus
|
73fb394b26623c897459ffa3e66d7a5cb47e9962
|
[
"MIT"
] | null | null | null |
poker_ai/poker/evaluation/__init__.py
|
fedden/pluribus
|
73fb394b26623c897459ffa3e66d7a5cb47e9962
|
[
"MIT"
] | 42
|
2020-08-17T15:51:30.000Z
|
2022-03-31T17:10:44.000Z
|
from .eval_card import EvaluationCard
from .evaluator import Evaluator
from .lookup import LookupTable
| 25.75
| 37
| 0.854369
| 13
| 103
| 6.692308
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116505
| 103
| 3
| 38
| 34.333333
| 0.956044
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ff5309da667685d4448f0288ca00089d68c14df3
| 316
|
py
|
Python
|
learn_pyblish/myplugins/myplugin2.py
|
kingmax/py
|
4cbc20d21c249e22e8f6b68d3c761a66a981e38f
|
[
"MIT"
] | null | null | null |
learn_pyblish/myplugins/myplugin2.py
|
kingmax/py
|
4cbc20d21c249e22e8f6b68d3c761a66a981e38f
|
[
"MIT"
] | null | null | null |
learn_pyblish/myplugins/myplugin2.py
|
kingmax/py
|
4cbc20d21c249e22e8f6b68d3c761a66a981e38f
|
[
"MIT"
] | null | null | null |
import pyblish.api
########################################################################
class MyPlugin2(pyblish.api.ContextPlugin):
""""""
#----------------------------------------------------------------------
def process(self, context):
print('hello from plugin2')
| 24.307692
| 75
| 0.300633
| 16
| 316
| 5.9375
| 0.875
| 0.210526
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0.183544
| 316
| 12
| 76
| 26.333333
| 0.360465
| 0.221519
| 0
| 0
| 0
| 0
| 0.107784
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0.25
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
ff8ffce3211e8d216d8eb1a826f3e6a2b3bf5022
| 110
|
py
|
Python
|
synth/__init__.py
|
lummax/switching-lattice-synth
|
47cf9e64c900cb179c392b46a392049e99dfebab
|
[
"MIT"
] | null | null | null |
synth/__init__.py
|
lummax/switching-lattice-synth
|
47cf9e64c900cb179c392b46a392049e99dfebab
|
[
"MIT"
] | null | null | null |
synth/__init__.py
|
lummax/switching-lattice-synth
|
47cf9e64c900cb179c392b46a392049e99dfebab
|
[
"MIT"
] | null | null | null |
import synth.timer
from synth.base import Function
from synth.dp_construction import DualProductConstruction
| 22
| 57
| 0.872727
| 14
| 110
| 6.785714
| 0.642857
| 0.189474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 110
| 4
| 58
| 27.5
| 0.959596
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ffa7ba43fb77d122b3328d05f195ebbbdbe93ef0
| 7,577
|
py
|
Python
|
kuryr_kubernetes/tests/unit/controller/drivers/test_public_ip.py
|
BoringWenn/kuryr-kubernetes
|
625ddb0a39ab5e8752b83565e6b9689c2ab1775f
|
[
"Apache-2.0"
] | null | null | null |
kuryr_kubernetes/tests/unit/controller/drivers/test_public_ip.py
|
BoringWenn/kuryr-kubernetes
|
625ddb0a39ab5e8752b83565e6b9689c2ab1775f
|
[
"Apache-2.0"
] | null | null | null |
kuryr_kubernetes/tests/unit/controller/drivers/test_public_ip.py
|
BoringWenn/kuryr-kubernetes
|
625ddb0a39ab5e8752b83565e6b9689c2ab1775f
|
[
"Apache-2.0"
] | 1
|
2018-08-01T13:41:55.000Z
|
2018-08-01T13:41:55.000Z
|
# Copyright (c) 2017 RedHat, Inc.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from neutronclient.common import exceptions as n_exc
from kuryr_kubernetes.controller.drivers import public_ip\
as d_public_ip
from kuryr_kubernetes.tests import base as test_base
from kuryr_kubernetes.tests.unit import kuryr_fixtures as k_fix
class TestFipPubIpDriver(test_base.TestCase):
def test_is_ip_available_none_param(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
fip_ip_addr = None
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_empty_param(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
fip_ip_addr = None
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_ip_not_exist(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.4', 'port_id': None,
'id': 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b'}
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.1.1.1'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_empty_fip_list(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = None
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.1.1.1'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_occupied_fip(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.4',
'port_id': 'ec29d641-fec4-4f67-928a-124a76b3a8e6'}
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.2.3.4'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertIsNone(fip_id)
def test_is_ip_available_ip_exist_and_available(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.4', 'port_id': None,
'id': 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b'}
neutron.list_floatingips.return_value = {'floatingips': [floating_ip]}
fip_ip_addr = '1.2.3.4'
fip_id = cls.is_ip_available(m_driver, fip_ip_addr)
self.assertEqual(fip_id, 'a2a62ea7-e3bf-40df-8c09-aa0c29876a6b')
def test_allocate_ip_all_green(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
pub_net_id = mock.sentinel.pub_net_id
pub_subnet_id = mock.sentinel.pub_subnet_id
project_id = mock.sentinel.project_id
description = mock.sentinel.description
neutron = self.useFixture(k_fix.MockNeutronClient()).client
floating_ip = {'floating_ip_address': '1.2.3.5',
'id': 'ec29d641-fec4-4f67-928a-124a76b3a888'}
neutron.create_floatingip.return_value = {'floatingip': floating_ip}
fip_id, fip_addr = cls.allocate_ip(
m_driver, pub_net_id, project_id, pub_subnet_id, description)
self.assertEqual(fip_id, floating_ip['id'])
self.assertEqual(fip_addr, floating_ip['floating_ip_address'])
def test_allocate_ip_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
pub_net_id = mock.sentinel.pub_net_id
pub_subnet_id = mock.sentinel.pub_subnet_id
project_id = mock.sentinel.project_id
description = mock.sentinel.description
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.create_floatingip.side_effect = n_exc.NeutronClientException
self.assertRaises(
n_exc.NeutronClientException, cls.allocate_ip,
m_driver, pub_net_id, project_id, pub_subnet_id, description)
def test_free_ip_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.delete_floatingip.side_effect = n_exc.NeutronClientException
rc = cls.free_ip(m_driver, res_id)
self.assertEqual(rc, False)
def test_free_ip_succeeded(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.delete_floatingip.return_value = None
rc = cls.free_ip(m_driver, res_id)
self.assertEqual(rc, True)
# try:
# cls.free_ip(m_driver, res_id)
# except Exception:
# self.fail("Encountered an unexpected exception.")
def test_associate_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
vip_port_id = mock.sentinel.vip_port_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.side_effect = n_exc.NeutronClientException
retcode = cls.associate(m_driver, res_id, vip_port_id)
self.assertIsNone(retcode)
def test_associate_succeeded(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
vip_port_id = mock.sentinel.vip_port_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.return_value = None
retcode = cls.associate(m_driver, res_id, vip_port_id)
self.assertIsNone(retcode)
def test_disassociate_neutron_exception(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.side_effect = n_exc.NeutronClientException
self.assertIsNone(cls.disassociate
(m_driver, res_id))
def test_disassociate_succeeded(self):
cls = d_public_ip.FipPubIpDriver
m_driver = mock.Mock(spec=cls)
res_id = mock.sentinel.res_id
neutron = self.useFixture(k_fix.MockNeutronClient()).client
neutron.update_floatingip.return_value = None
self.assertIsNone(cls.disassociate
(m_driver, res_id))
| 39.463542
| 78
| 0.685628
| 1,010
| 7,577
| 4.835644
| 0.165347
| 0.041564
| 0.027641
| 0.040131
| 0.761671
| 0.743448
| 0.729115
| 0.714578
| 0.695536
| 0.695536
| 0
| 0.022441
| 0.223703
| 7,577
| 191
| 79
| 39.670157
| 0.807888
| 0.097796
| 0
| 0.718519
| 0
| 0
| 0.060731
| 0.026405
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.103704
| false
| 0
| 0.037037
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
927e356264be10470f903d4aeb2be8373a316cd6
| 61
|
py
|
Python
|
up/utils/model/optim/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 196
|
2021-10-30T05:15:36.000Z
|
2022-03-30T18:43:40.000Z
|
up/utils/model/optim/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 12
|
2021-10-30T11:33:28.000Z
|
2022-03-31T14:22:58.000Z
|
up/utils/model/optim/__init__.py
|
ModelTC/EOD
|
164bff80486e9ae6a095a97667b365c46ceabd86
|
[
"Apache-2.0"
] | 23
|
2021-11-01T07:26:17.000Z
|
2022-03-27T05:55:37.000Z
|
from .lars import LARS # noqa
from .lamb import LAMB # noqa
| 30.5
| 30
| 0.721311
| 10
| 61
| 4.4
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.213115
| 61
| 2
| 31
| 30.5
| 0.916667
| 0.147541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
92afbe514a83c1f439be100159e8dac9eed5d374
| 752
|
py
|
Python
|
bip_utils/bip/__init__.py
|
djmuratb/bip_utils
|
18de9a7067ca2cc3bc4727dc9e39e75db0456e1b
|
[
"MIT"
] | 1
|
2021-06-13T11:45:23.000Z
|
2021-06-13T11:45:23.000Z
|
bip_utils/bip/__init__.py
|
djmuratb/bip_utils
|
18de9a7067ca2cc3bc4727dc9e39e75db0456e1b
|
[
"MIT"
] | null | null | null |
bip_utils/bip/__init__.py
|
djmuratb/bip_utils
|
18de9a7067ca2cc3bc4727dc9e39e75db0456e1b
|
[
"MIT"
] | null | null | null |
# BIP39
from bip_utils.bip.bip39_ex import Bip39InvalidFileError, Bip39ChecksumError
from bip_utils.bip.bip39 import (
Bip39WordsNum, Bip39EntropyBitLen,
Bip39EntropyGenerator, Bip39MnemonicGenerator, Bip39MnemonicValidator, Bip39SeedGenerator
)
# BIP32
from bip_utils.bip.bip32_ex import Bip32KeyError, Bip32PathError
from bip_utils.bip.bip32_utils import Bip32Utils
from bip_utils.bip.bip32_path import Bip32PathParser
from bip_utils.bip.bip32 import Bip32
# BIP44/49/84
from bip_utils.bip.bip44_base_ex import Bip44DepthError, Bip44CoinNotAllowedError
from bip_utils.bip.bip44_base import Bip44Changes, Bip44Coins, Bip44Levels
from bip_utils.bip.bip44 import Bip44
from bip_utils.bip.bip49 import Bip49
from bip_utils.bip.bip84 import Bip84
| 41.777778
| 93
| 0.855053
| 99
| 752
| 6.313131
| 0.313131
| 0.1232
| 0.2112
| 0.264
| 0.3008
| 0.0768
| 0
| 0
| 0
| 0
| 0
| 0.108504
| 0.093085
| 752
| 17
| 94
| 44.235294
| 0.807918
| 0.030585
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.785714
| 0
| 0.785714
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
2b88c5258ed3a32b0f868be74797639d674d4da1
| 256
|
py
|
Python
|
Part_3_advanced/m08_abstract_protocol/abstract_class/homework_1_start/new_movies/rental_directory.py
|
Mikma03/InfoShareacademy_Python_Courses
|
3df1008c8c92831bebf1625f960f25b39d6987e6
|
[
"MIT"
] | null | null | null |
Part_3_advanced/m08_abstract_protocol/abstract_class/homework_1_start/new_movies/rental_directory.py
|
Mikma03/InfoShareacademy_Python_Courses
|
3df1008c8c92831bebf1625f960f25b39d6987e6
|
[
"MIT"
] | null | null | null |
Part_3_advanced/m08_abstract_protocol/abstract_class/homework_1_start/new_movies/rental_directory.py
|
Mikma03/InfoShareacademy_Python_Courses
|
3df1008c8c92831bebf1625f960f25b39d6987e6
|
[
"MIT"
] | null | null | null |
from new_movies.random_data_utility import random_generator
available_movies = random_generator.generate_random_movies(movies_number=15)
available_games = random_generator.generate_random_games()
def add_movie(movie):
available_movies.append(movie)
| 28.444444
| 76
| 0.859375
| 34
| 256
| 6.029412
| 0.5
| 0.219512
| 0.22439
| 0.282927
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008475
| 0.078125
| 256
| 8
| 77
| 32
| 0.860169
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.2
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
2bafdb3d0198af653fa5dcfa1e0681df79a2f730
| 174
|
py
|
Python
|
tests/iter_version_dev/V1_0_0/demo.py
|
liguodongIOT/nlp-app-samples
|
e0cc747e88c7b5c701b5099462d2dd6277c23381
|
[
"Apache-2.0"
] | 1
|
2021-09-30T08:16:21.000Z
|
2021-09-30T08:16:21.000Z
|
tests/iter_version_dev/V1_0_0/demo.py
|
liguodongIOT/nlp-app-samples
|
e0cc747e88c7b5c701b5099462d2dd6277c23381
|
[
"Apache-2.0"
] | null | null | null |
tests/iter_version_dev/V1_0_0/demo.py
|
liguodongIOT/nlp-app-samples
|
e0cc747e88c7b5c701b5099462d2dd6277c23381
|
[
"Apache-2.0"
] | 1
|
2021-11-24T06:24:44.000Z
|
2021-11-24T06:24:44.000Z
|
from nlp_app_samples.constants import APP_NAME
from tests.iter_version_dev.V1_0_0.classification_lr import TASK_DICT
print(APP_NAME)
print(TASK_DICT)
print("over....")
| 14.5
| 69
| 0.810345
| 29
| 174
| 4.482759
| 0.655172
| 0.107692
| 0.2
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019108
| 0.097701
| 174
| 11
| 70
| 15.818182
| 0.808917
| 0
| 0
| 0
| 0
| 0
| 0.047337
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.4
| 0
| 0.4
| 0.6
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
2bc366f88045d8e0fb26fb566dafc8ab43dfdefc
| 146
|
py
|
Python
|
eventi/core/admin.py
|
klebercode/lionsclub
|
60db85d44214561d20f85673e8f6c047fab07ee9
|
[
"MIT"
] | 1
|
2022-02-28T00:07:14.000Z
|
2022-02-28T00:07:14.000Z
|
eventi/core/admin.py
|
klebercode/lionsclub
|
60db85d44214561d20f85673e8f6c047fab07ee9
|
[
"MIT"
] | null | null | null |
eventi/core/admin.py
|
klebercode/lionsclub
|
60db85d44214561d20f85673e8f6c047fab07ee9
|
[
"MIT"
] | null | null | null |
# coding: utf-8
from django.contrib import admin
from eventi.core.models import Club, Info
admin.site.register(Club)
admin.site.register(Info)
| 16.222222
| 41
| 0.780822
| 23
| 146
| 4.956522
| 0.652174
| 0.157895
| 0.298246
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007752
| 0.116438
| 146
| 8
| 42
| 18.25
| 0.875969
| 0.089041
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
2bcf3f2068544439d767a23babe47f9de75492b6
| 286
|
py
|
Python
|
pypy/jit/backend/x86/test/test_quasiimmut.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | 1
|
2020-01-21T11:10:51.000Z
|
2020-01-21T11:10:51.000Z
|
pypy/jit/backend/x86/test/test_quasiimmut.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | null | null | null |
pypy/jit/backend/x86/test/test_quasiimmut.py
|
benoitc/pypy
|
a3e1b12d1d01dc29056b7badc051ffc034297658
|
[
"MIT"
] | null | null | null |
import py
from pypy.jit.backend.x86.test.test_basic import Jit386Mixin
from pypy.jit.metainterp.test import test_quasiimmut
class TestLoopSpec(Jit386Mixin, test_quasiimmut.QuasiImmutTests):
# for the individual tests see
# ====> ../../../metainterp/test/test_loop.py
pass
| 28.6
| 65
| 0.758741
| 37
| 286
| 5.756757
| 0.594595
| 0.075117
| 0.103286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.032129
| 0.129371
| 286
| 9
| 66
| 31.777778
| 0.823293
| 0.251748
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.2
| 0.6
| 0
| 0.8
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
2bd347c1734d1e220233fe8467a010ceab1c41ab
| 120
|
py
|
Python
|
parallel/__init__.py
|
MSU-MLSys-Lab/CATE
|
654c393d7df888d2c3f3b90f9e6752faa061157e
|
[
"Apache-2.0"
] | 15
|
2021-06-09T00:50:53.000Z
|
2022-03-15T07:01:43.000Z
|
parallel/__init__.py
|
MSU-MLSys-Lab/CATE
|
654c393d7df888d2c3f3b90f9e6752faa061157e
|
[
"Apache-2.0"
] | null | null | null |
parallel/__init__.py
|
MSU-MLSys-Lab/CATE
|
654c393d7df888d2c3f3b90f9e6752faa061157e
|
[
"Apache-2.0"
] | 4
|
2021-06-09T01:01:43.000Z
|
2021-11-03T06:16:50.000Z
|
from .parallel import DataParallelModel, DataParallelCriterion
__all__ = ["DataParallelModel", "DataParallelCriterion"]
| 40
| 62
| 0.841667
| 8
| 120
| 12.125
| 0.75
| 0.783505
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.075
| 120
| 3
| 63
| 40
| 0.873874
| 0
| 0
| 0
| 0
| 0
| 0.31405
| 0.173554
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
2bdc74e67c215c83e64d2416221be3804da9d5cb
| 883
|
py
|
Python
|
files/carinha.py
|
joaovpassos/USP-Programs
|
09ddb8aed238df1f1a2e80afdc202ac4538daf41
|
[
"MIT"
] | 2
|
2021-05-26T19:14:16.000Z
|
2021-05-27T21:14:24.000Z
|
files/carinha.py
|
joaovpassos/USP-Programs
|
09ddb8aed238df1f1a2e80afdc202ac4538daf41
|
[
"MIT"
] | null | null | null |
files/carinha.py
|
joaovpassos/USP-Programs
|
09ddb8aed238df1f1a2e80afdc202ac4538daf41
|
[
"MIT"
] | null | null | null |
x = float(input("Digite x: "))
y = float(input("Digite y: "))
if 0 <= x <= 8 and 0 <= y <= 8:
if (0 <= x < 1 or 7 < x <= 8) and (0 <= y < 2): #pescoço
print("branco")
elif 3.5 <= x <= 4.5 and 3.5 <= y <= 4.5: #nariz
print("branco")
elif (1 <= x <= 3 or 5 <= x <= 7) and (7.25 <= y <= 7.75): #sobrancelha
print("branco")
elif (((x-2)**2 + (y-6)**2 <= 1**2) and not ((x-2)**2 + (y-6)**2 <= 0.5**2)): #olho esquerdo
print("branco")
elif (((x-6)**2 + (y-6)**2 <= 1**2) and not ((x-6)**2 + (y-6)**2 <= 0.5**2)): #olho direito
print("branco")
elif 3 < x < 5 and 1.5 < y < 2.5: #boca
print("branco")
elif ((x-3)**2 + (y-2)**2 < 0.5**2): #boca esquerda
print("branco")
elif ((x-5)**2 + (y-2)**2 < 0.5**2): #boca direita
print("branco")
else:
print("azul")
else:
print("branco")
| 35.32
| 96
| 0.437146
| 160
| 883
| 2.4125
| 0.2125
| 0.256477
| 0.272021
| 0.165803
| 0.233161
| 0.196891
| 0.181347
| 0.181347
| 0.067358
| 0
| 0
| 0.118211
| 0.291053
| 883
| 24
| 97
| 36.791667
| 0.498403
| 0.087203
| 0
| 0.478261
| 0
| 0
| 0.097744
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.434783
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
920f5ebefa994608aeedc97909ba734c752a93a4
| 3,950
|
py
|
Python
|
tests/vision/metrics/vqa_test.py
|
shunk031/allennlp-models
|
d37c5fadeef9326808a84dda0bcfd210a078d6b1
|
[
"Apache-2.0"
] | 402
|
2020-03-11T22:58:35.000Z
|
2022-03-29T09:05:27.000Z
|
tests/vision/metrics/vqa_test.py
|
staceywhitmore-inl/allennlp-models
|
1e89d5e51cb45f3e77a48d4983bf980088334fac
|
[
"Apache-2.0"
] | 116
|
2020-03-11T01:26:57.000Z
|
2022-03-25T13:03:56.000Z
|
tests/vision/metrics/vqa_test.py
|
staceywhitmore-inl/allennlp-models
|
1e89d5e51cb45f3e77a48d4983bf980088334fac
|
[
"Apache-2.0"
] | 140
|
2020-03-11T00:51:35.000Z
|
2022-03-29T09:05:36.000Z
|
from typing import Any, Dict, List, Tuple, Union
import pytest
import torch
from allennlp.common.testing import (
AllenNlpTestCase,
multi_device,
global_distributed_metric,
run_distributed_test,
)
from allennlp_models.vision import VqaMeasure
class VqaMeasureTest(AllenNlpTestCase):
@multi_device
def test_vqa(self, device: str):
vqa = VqaMeasure()
logits = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]], device=device
)
labels = torch.tensor([[0], [3]], device=device)
label_weights = torch.tensor([[1 / 3], [2 / 3]], device=device)
vqa(logits, labels, label_weights)
vqa_score = vqa.get_metric()["score"]
assert vqa_score == pytest.approx((1 / 3) / 2)
@multi_device
def test_vqa_accumulates_and_resets_correctly(self, device: str):
vqa = VqaMeasure()
logits = torch.tensor(
[[0.35, 0.25, 0.1, 0.1, 0.2], [0.1, 0.6, 0.1, 0.2, 0.0]], device=device
)
labels = torch.tensor([[0], [3]], device=device)
labels2 = torch.tensor([[4], [4]], device=device)
label_weights = torch.tensor([[1 / 3], [2 / 3]], device=device)
vqa(logits, labels, label_weights)
vqa(logits, labels, label_weights)
vqa(logits, labels2, label_weights)
vqa(logits, labels2, label_weights)
vqa_score = vqa.get_metric(reset=True)["score"]
assert vqa_score == pytest.approx((1 / 3 + 1 / 3 + 0 + 0) / 8)
vqa(logits, labels, label_weights)
vqa_score = vqa.get_metric(reset=True)["score"]
assert vqa_score == pytest.approx((1 / 3) / 2)
@multi_device
def test_does_not_divide_by_zero_with_no_count(self, device: str):
vqa = VqaMeasure()
assert vqa.get_metric()["score"] == pytest.approx(0.0)
def test_distributed_accuracy(self):
logits = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
labels = [torch.tensor([[0]]), torch.tensor([[3]])]
label_weights = [torch.tensor([[1 / 3]]), torch.tensor([[2 / 3]])]
metric_kwargs = {"logits": logits, "labels": labels, "label_weights": label_weights}
desired_accuracy = {"score": (1 / 3) / 2}
run_distributed_test(
[-1, -1],
global_distributed_metric,
VqaMeasure(),
metric_kwargs,
desired_accuracy,
exact=False,
)
def test_distributed_accuracy_unequal_batches(self):
logits = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2], [0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
labels = [torch.tensor([[0], [0]]), torch.tensor([[3]])]
label_weights = [torch.tensor([[1], [1]]), torch.tensor([[1 / 3]])]
metric_kwargs = {"logits": logits, "labels": labels, "label_weights": label_weights}
desired_accuracy = {"score": (1 + 1 + 0) / 3}
run_distributed_test(
[-1, -1],
global_distributed_metric,
VqaMeasure(),
metric_kwargs,
desired_accuracy,
exact=False,
)
def test_multiple_distributed_runs(self):
logits = [
torch.tensor([[0.35, 0.25, 0.1, 0.1, 0.2]]),
torch.tensor([[0.1, 0.6, 0.1, 0.2, 0.0]]),
]
labels = [torch.tensor([[0]]), torch.tensor([[3]])]
label_weights = [torch.tensor([[1 / 3]]), torch.tensor([[2 / 3]])]
metric_kwargs = {"logits": logits, "labels": labels, "label_weights": label_weights}
desired_accuracy = {"score": (1 / 3) / 2}
run_distributed_test(
[-1, -1],
global_distributed_metric,
VqaMeasure(),
metric_kwargs,
desired_accuracy,
exact=True,
number_of_runs=200,
)
| 36.238532
| 92
| 0.552911
| 509
| 3,950
| 4.123772
| 0.147348
| 0.131015
| 0.031444
| 0.020962
| 0.775131
| 0.75131
| 0.75131
| 0.745593
| 0.718914
| 0.701286
| 0
| 0.067941
| 0.284557
| 3,950
| 108
| 93
| 36.574074
| 0.674805
| 0
| 0
| 0.645833
| 0
| 0
| 0.027848
| 0
| 0
| 0
| 0
| 0
| 0.041667
| 1
| 0.0625
| false
| 0
| 0.052083
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9216100e8c2e43d2e0d3473e279433eb80f0be8a
| 66
|
py
|
Python
|
splearn/utils/__init__.py
|
Treers/spark-learn
|
ba814b51041c7dbff33c687e7d601e5c51f31c3d
|
[
"MIT"
] | null | null | null |
splearn/utils/__init__.py
|
Treers/spark-learn
|
ba814b51041c7dbff33c687e7d601e5c51f31c3d
|
[
"MIT"
] | null | null | null |
splearn/utils/__init__.py
|
Treers/spark-learn
|
ba814b51041c7dbff33c687e7d601e5c51f31c3d
|
[
"MIT"
] | null | null | null |
#coding: utf-8
'''
@Time: 2019/4/25 11:15
@Author: fangyoucai
'''
| 11
| 22
| 0.621212
| 11
| 66
| 3.727273
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.210526
| 0.136364
| 66
| 5
| 23
| 13.2
| 0.508772
| 0.848485
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
9218ee2ecb9207604449852e8a31eab96059f1e2
| 106
|
py
|
Python
|
build_gpcr/management/commands/build_text.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 21
|
2016-01-20T09:33:14.000Z
|
2021-12-20T19:19:45.000Z
|
build_gpcr/management/commands/build_text.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 75
|
2016-02-26T16:29:58.000Z
|
2022-03-21T12:35:13.000Z
|
build_gpcr/management/commands/build_text.py
|
pszgaspar/protwis
|
4989a67175ef3c95047d795c843cf6b9cf4141fa
|
[
"Apache-2.0"
] | 77
|
2016-01-22T08:44:26.000Z
|
2022-02-01T15:54:56.000Z
|
from build.management.commands.build_text import Command as BuildText
class Command(BuildText):
pass
| 21.2
| 69
| 0.811321
| 14
| 106
| 6.071429
| 0.785714
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.132075
| 106
| 5
| 70
| 21.2
| 0.923913
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 0.333333
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 5
|
921cf7335f41595219af07b2dffd5f74a9cfc15d
| 40
|
py
|
Python
|
satori/sysinfo/__init__.py
|
mgeisler/satori
|
dea382bae1cd043189589c0f7d4c20b4b6725ab5
|
[
"Apache-2.0"
] | 1
|
2015-01-18T19:56:28.000Z
|
2015-01-18T19:56:28.000Z
|
satori/sysinfo/__init__.py
|
samstav/satori
|
239fa1e3c7aac78599145c670576f0ac76a41a89
|
[
"Apache-2.0"
] | null | null | null |
satori/sysinfo/__init__.py
|
samstav/satori
|
239fa1e3c7aac78599145c670576f0ac76a41a89
|
[
"Apache-2.0"
] | null | null | null |
"""Modules for Data Plane Discovery."""
| 20
| 39
| 0.7
| 5
| 40
| 5.6
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 40
| 1
| 40
| 40
| 0.8
| 0.825
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
a6192df53d0f19acab254e85fa3cfaa6abba3386
| 28
|
py
|
Python
|
models/__init__.py
|
Rozi1/MobileNetV2_CIFAR10
|
5e3a7cac963c9b84d8efc60984bda6956ba8ec26
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Rozi1/MobileNetV2_CIFAR10
|
5e3a7cac963c9b84d8efc60984bda6956ba8ec26
|
[
"MIT"
] | null | null | null |
models/__init__.py
|
Rozi1/MobileNetV2_CIFAR10
|
5e3a7cac963c9b84d8efc60984bda6956ba8ec26
|
[
"MIT"
] | null | null | null |
from .mobilenetv2 import *
| 9.333333
| 26
| 0.75
| 3
| 28
| 7
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043478
| 0.178571
| 28
| 2
| 27
| 14
| 0.869565
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
a63318a3fdd93456eccbc73216923e5d8710b3ba
| 2,574
|
py
|
Python
|
spring_cloud/commons/client/loadbalancer/supplier/base.py
|
haribo0915/Spring-Cloud-in-Python
|
0bcd7093869c797df14428bf2d1b0a779f96e573
|
[
"Apache-2.0"
] | 5
|
2020-10-06T09:48:23.000Z
|
2020-10-07T13:19:46.000Z
|
spring_cloud/commons/client/loadbalancer/supplier/base.py
|
haribo0915/Spring-Cloud-in-Python
|
0bcd7093869c797df14428bf2d1b0a779f96e573
|
[
"Apache-2.0"
] | 5
|
2020-10-05T09:57:01.000Z
|
2020-10-12T19:52:48.000Z
|
spring_cloud/commons/client/loadbalancer/supplier/base.py
|
haribo0915/Spring-Cloud-in-Python
|
0bcd7093869c797df14428bf2d1b0a779f96e573
|
[
"Apache-2.0"
] | 8
|
2020-10-05T06:34:49.000Z
|
2020-10-07T13:19:46.000Z
|
# -*- coding: utf-8 -*-
"""
Since the load-balancer is responsible for choosing one instance
per service request from a list of instances. We need a ServiceInstanceListSupplier for
each service to decouple the source of the instances from load-balancers.
"""
# standard library
from abc import ABC, abstractmethod
from typing import List
# scip plugin
from spring_cloud.commons.client import ServiceInstance
from spring_cloud.commons.client.discovery import DiscoveryClient
__author__ = "Waterball (johnny850807@gmail.com)"
__license__ = "Apache 2.0"
class ServiceInstanceListSupplier(ABC):
"""
Non-Reactive version of ServiceInstanceListSupplier.
(Spring Cloud implement the supplier in the reactive way, means that
its supplier returns an Observable which broadcasts the instances on every change.)
We may consider to adopt reactive programming in the future.
"""
@property
@abstractmethod
def service_id(self) -> str:
"""
:return: (str) the service's id
"""
pass
@abstractmethod
def get(self, request=None) -> List[ServiceInstance]:
"""
:param request (opt) TODO not sure will we need this,
this extension was designed by spring-cloud.
:return: (*ServiceInstance) a list of instances
"""
pass
class FixedServiceInstanceListSupplier(ServiceInstanceListSupplier):
"""
A supplier that is initialized with fixed instances. (i.e. they won't be changed)
"""
def __init__(self, service_id: str, instances: List[ServiceInstance]):
"""
:param service_id: (str)
:param instances: (*ServiceInstance)
"""
self._service_id = service_id
self._instances = instances
def get(self, request=None) -> List[ServiceInstance]:
return self._instances
@property
def service_id(self) -> str:
return self._service_id
class DiscoveryClientServiceInstanceListSupplier(ServiceInstanceListSupplier):
"""
The adapter delegating to discovery client for querying instances
"""
def __init__(self, service_id: str, discovery_client: DiscoveryClient):
"""
:param service_id: (str)
:param discovery_client: (DiscoveryClient)
"""
self.__service_id = service_id
self.__delegate = discovery_client
@property
def service_id(self) -> str:
return self.__service_id
def get(self, request=None) -> List[ServiceInstance]:
return self.__delegate.get_instances(self.service_id)
| 29.930233
| 87
| 0.685315
| 287
| 2,574
| 5.979094
| 0.400697
| 0.073427
| 0.05303
| 0.027972
| 0.265152
| 0.206876
| 0.135198
| 0.111888
| 0.111888
| 0.053613
| 0
| 0.00455
| 0.231546
| 2,574
| 85
| 88
| 30.282353
| 0.862993
| 0.395882
| 0
| 0.40625
| 0
| 0
| 0.032258
| 0.017595
| 0
| 0
| 0
| 0.011765
| 0
| 1
| 0.25
| false
| 0.0625
| 0.125
| 0.125
| 0.59375
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
|
0
| 5
|
a6815c7893a0d94c3e1ad438c9c59e228b6c752c
| 522
|
py
|
Python
|
InquirerPy/prompts/__init__.py
|
jfilipedias/InquirerPy
|
9f67125f808cbe6a73ab3cb652f35faba3f3443e
|
[
"MIT"
] | null | null | null |
InquirerPy/prompts/__init__.py
|
jfilipedias/InquirerPy
|
9f67125f808cbe6a73ab3cb652f35faba3f3443e
|
[
"MIT"
] | null | null | null |
InquirerPy/prompts/__init__.py
|
jfilipedias/InquirerPy
|
9f67125f808cbe6a73ab3cb652f35faba3f3443e
|
[
"MIT"
] | null | null | null |
"""Module contains import of all prompts classes."""
from InquirerPy.prompts.checkbox import CheckboxPrompt
from InquirerPy.prompts.confirm import ConfirmPrompt
from InquirerPy.prompts.expand import ExpandPrompt
from InquirerPy.prompts.filepath import FilePathPrompt
from InquirerPy.prompts.fuzzy.fuzzy import FuzzyPrompt
from InquirerPy.prompts.input import InputPrompt
from InquirerPy.prompts.list import ListPrompt
from InquirerPy.prompts.rawlist import RawlistPrompt
from InquirerPy.prompts.secret import SecretPrompt
| 47.454545
| 54
| 0.867816
| 62
| 522
| 7.306452
| 0.435484
| 0.278146
| 0.417219
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.082375
| 522
| 10
| 55
| 52.2
| 0.94572
| 0.088123
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
a6957adb4b1b1df4357ebf29509b3e8d10fca9bb
| 357
|
py
|
Python
|
lib/core/errors.py
|
h3ar7dump/Zeus-Scanner
|
e5ac6316d351851883c7a895999b92846380392c
|
[
"RSA-MD"
] | 1
|
2019-11-29T10:11:01.000Z
|
2019-11-29T10:11:01.000Z
|
lib/core/errors.py
|
h3ar7dump/Zeus-Scanner
|
e5ac6316d351851883c7a895999b92846380392c
|
[
"RSA-MD"
] | null | null | null |
lib/core/errors.py
|
h3ar7dump/Zeus-Scanner
|
e5ac6316d351851883c7a895999b92846380392c
|
[
"RSA-MD"
] | null | null | null |
class InvalidProxyType(Exception): pass
class ApiConnectionError(Exception): pass
class ApplicationNotFound(Exception): pass
class SqlmapFailedStart(Exception): pass
class SpiderTestFailure(Exception): pass
class InvalidInputProvided(Exception): pass
class InvalidTamperProvided(Exception): pass
class PortScanTimeOutException(Exception): pass
| 16.227273
| 47
| 0.829132
| 32
| 357
| 9.25
| 0.34375
| 0.351351
| 0.425676
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103641
| 357
| 22
| 47
| 16.227273
| 0.925
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
|
0
| 5
|
a6e4911a102a56bec265217b599cb065b431fc4f
| 106
|
py
|
Python
|
srv/service/__init__.py
|
mantou22/SC_system
|
0c048c1ba678e378e62bb046b39c1a0f7792adee
|
[
"MulanPSL-1.0"
] | null | null | null |
srv/service/__init__.py
|
mantou22/SC_system
|
0c048c1ba678e378e62bb046b39c1a0f7792adee
|
[
"MulanPSL-1.0"
] | 1
|
2021-09-01T03:28:39.000Z
|
2021-09-01T03:28:39.000Z
|
srv/service/__init__.py
|
mantou22/SC_system
|
0c048c1ba678e378e62bb046b39c1a0f7792adee
|
[
"MulanPSL-1.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: UTF-8 -*-
"""
@author:MT
@file:__init__.py.py
@time:2021/8/21 23:02
"""
| 15.142857
| 24
| 0.575472
| 18
| 106
| 3.166667
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.130435
| 0.132075
| 106
| 7
| 25
| 15.142857
| 0.48913
| 0.896226
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
47037400ff4e76fc857f6ee41c232c1a45585226
| 148
|
py
|
Python
|
graphgallery/nn/models/pytorch/autoencoder/__init__.py
|
TobiasSchmidtDE/GraphGallery
|
e627e4f454e0ce3813171305a524f5190a6e6f45
|
[
"MIT"
] | null | null | null |
graphgallery/nn/models/pytorch/autoencoder/__init__.py
|
TobiasSchmidtDE/GraphGallery
|
e627e4f454e0ce3813171305a524f5190a6e6f45
|
[
"MIT"
] | null | null | null |
graphgallery/nn/models/pytorch/autoencoder/__init__.py
|
TobiasSchmidtDE/GraphGallery
|
e627e4f454e0ce3813171305a524f5190a6e6f45
|
[
"MIT"
] | null | null | null |
from .decoder import InnerProductDecoder
from .autoencoder import AutoEncoder
from .loss import BCELoss
from .gae import GAE
from .vgae import VGAE
| 24.666667
| 40
| 0.831081
| 20
| 148
| 6.15
| 0.45
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.135135
| 148
| 5
| 41
| 29.6
| 0.960938
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
5b2d3bb33aaf00064a4a81aefce63447edda3498
| 100
|
py
|
Python
|
Python-Hackerrank/Eye and Identity.py
|
nihalkhan2810/Data-Structures-Algorithms
|
3224fba18dcdbb1c6cc3d7f7a16df4ed0e0a321d
|
[
"MIT"
] | 10
|
2020-05-02T14:42:15.000Z
|
2021-01-26T16:51:47.000Z
|
Python-Hackerrank/Eye and Identity.py
|
nihalkhan2810/Data-Structures-Algorithms
|
3224fba18dcdbb1c6cc3d7f7a16df4ed0e0a321d
|
[
"MIT"
] | null | null | null |
Python-Hackerrank/Eye and Identity.py
|
nihalkhan2810/Data-Structures-Algorithms
|
3224fba18dcdbb1c6cc3d7f7a16df4ed0e0a321d
|
[
"MIT"
] | 13
|
2020-03-05T13:31:11.000Z
|
2021-01-29T08:14:26.000Z
|
import numpy
print(str(numpy.eye(*map(int,input().split()))).replace('1',' 1').replace('0',' 0'))
| 20
| 84
| 0.61
| 16
| 100
| 3.8125
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.043011
| 0.07
| 100
| 4
| 85
| 25
| 0.612903
| 0
| 0
| 0
| 0
| 0
| 0.061224
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
5b695471ab29b797681a7c5ba85992682e02db15
| 137
|
py
|
Python
|
users/admin.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 11
|
2021-01-23T01:09:54.000Z
|
2021-01-25T07:16:30.000Z
|
users/admin.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 7
|
2021-04-06T18:19:10.000Z
|
2021-09-22T19:45:03.000Z
|
users/admin.py
|
rossm6/accounts
|
74633ce4038806222048d85ef9dfe97a957a6a71
|
[
"MIT"
] | 3
|
2021-01-23T18:55:32.000Z
|
2021-02-16T17:47:59.000Z
|
from django.contrib import admin
from users.models import Lock, UserSession
admin.site.register(Lock)
admin.site.register(UserSession)
| 19.571429
| 42
| 0.824818
| 19
| 137
| 5.947368
| 0.578947
| 0.159292
| 0.300885
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094891
| 137
| 6
| 43
| 22.833333
| 0.91129
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
5b6fbfdce0aeea51fda85018f63eb008c4350c82
| 174
|
py
|
Python
|
gluoncv/data/video_custom/__init__.py
|
Kh4L/gluon-cv
|
849411ed56632cd854850b07142087d599f97dcb
|
[
"Apache-2.0"
] | 5,447
|
2018-04-25T18:02:51.000Z
|
2022-03-31T00:59:49.000Z
|
gluoncv/data/video_custom/__init__.py
|
Kh4L/gluon-cv
|
849411ed56632cd854850b07142087d599f97dcb
|
[
"Apache-2.0"
] | 1,566
|
2018-04-25T21:14:04.000Z
|
2022-03-31T06:42:42.000Z
|
gluoncv/data/video_custom/__init__.py
|
Kh4L/gluon-cv
|
849411ed56632cd854850b07142087d599f97dcb
|
[
"Apache-2.0"
] | 1,345
|
2018-04-25T18:44:13.000Z
|
2022-03-30T19:32:53.000Z
|
# pylint: disable=wildcard-import
"""
Customized data loader for video classification related tasks.
"""
from __future__ import absolute_import
from .classification import *
| 24.857143
| 62
| 0.804598
| 20
| 174
| 6.75
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12069
| 174
| 6
| 63
| 29
| 0.882353
| 0.545977
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
751f7bec046094aa0251500c529606eae0d8aaaa
| 62
|
py
|
Python
|
projects/thesis/continuous/custom/evaluation/__init__.py
|
cpark90/rrrcnn
|
ba66cc391265be76fa3896b66459ff7241b47972
|
[
"Apache-2.0"
] | null | null | null |
projects/thesis/continuous/custom/evaluation/__init__.py
|
cpark90/rrrcnn
|
ba66cc391265be76fa3896b66459ff7241b47972
|
[
"Apache-2.0"
] | null | null | null |
projects/thesis/continuous/custom/evaluation/__init__.py
|
cpark90/rrrcnn
|
ba66cc391265be76fa3896b66459ff7241b47972
|
[
"Apache-2.0"
] | null | null | null |
from .classification_evaluation import ClassificationEvaluator
| 62
| 62
| 0.935484
| 5
| 62
| 11.4
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.048387
| 62
| 1
| 62
| 62
| 0.966102
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
75217fe93a80109a6e221e14b5e96a11ae8b35fe
| 89
|
py
|
Python
|
api/src/app/settings.py
|
mdcg/fastapi-processes-microservices
|
cfa2309e0cb6b811525925cfa447c6f6dff37b3a
|
[
"MIT"
] | null | null | null |
api/src/app/settings.py
|
mdcg/fastapi-processes-microservices
|
cfa2309e0cb6b811525925cfa447c6f6dff37b3a
|
[
"MIT"
] | null | null | null |
api/src/app/settings.py
|
mdcg/fastapi-processes-microservices
|
cfa2309e0cb6b811525925cfa447c6f6dff37b3a
|
[
"MIT"
] | null | null | null |
from decouple import config
MEDIA_PATH = config("MEDIA_PATH", "/home/mdcg/Documents/")
| 17.8
| 58
| 0.752809
| 12
| 89
| 5.416667
| 0.75
| 0.338462
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11236
| 89
| 4
| 59
| 22.25
| 0.822785
| 0
| 0
| 0
| 0
| 0
| 0.348315
| 0.235955
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
7544963ba7d28d1172e33eda501c69199e904e17
| 86
|
py
|
Python
|
g3_metaconfig/__init__.py
|
BigGreenDelta/g3-metaconfig
|
5a75b63986fa42e70cdc7863b1d7436e20e1c4f9
|
[
"MIT"
] | null | null | null |
g3_metaconfig/__init__.py
|
BigGreenDelta/g3-metaconfig
|
5a75b63986fa42e70cdc7863b1d7436e20e1c4f9
|
[
"MIT"
] | 2
|
2022-01-17T08:51:38.000Z
|
2022-02-24T21:42:36.000Z
|
g3_metaconfig/__init__.py
|
BigGreenDelta/g3-metaconfig
|
5a75b63986fa42e70cdc7863b1d7436e20e1c4f9
|
[
"MIT"
] | null | null | null |
from .metaclass import G3ConfigMeta, Config, ArgParserConfig
from .param import Param
| 28.666667
| 60
| 0.837209
| 10
| 86
| 7.2
| 0.7
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013158
| 0.116279
| 86
| 2
| 61
| 43
| 0.934211
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
7554f9acad191edf71d5e0947b588d081ddd4ae3
| 32
|
py
|
Python
|
augmenty/lang/ru/__init__.py
|
koaning/augmenty
|
13dbdbb5fd56b36c97678ae48d1e0d869987f6dd
|
[
"MIT"
] | null | null | null |
augmenty/lang/ru/__init__.py
|
koaning/augmenty
|
13dbdbb5fd56b36c97678ae48d1e0d869987f6dd
|
[
"MIT"
] | 1
|
2022-03-12T02:25:00.000Z
|
2022-03-12T02:26:01.000Z
|
augmenty/lang/ru/__init__.py
|
HishamKhdair/augmenty
|
a65a7beac410f53706bb7838026f2bac9b89d544
|
[
"MIT"
] | null | null | null |
from .keyboard import create_ru
| 16
| 31
| 0.84375
| 5
| 32
| 5.2
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.125
| 32
| 1
| 32
| 32
| 0.928571
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
340b77b6abbc62dbe070ea6034ab0b693a686510
| 5,189
|
py
|
Python
|
API/conftest.py
|
BahrmaLe/otus_python_homework
|
510a4f1971b35048d760fcc45098e511b81bea31
|
[
"MIT"
] | 1
|
2021-02-25T15:37:21.000Z
|
2021-02-25T15:37:21.000Z
|
API/conftest.py
|
BahrmaLe/otus_python_homework
|
510a4f1971b35048d760fcc45098e511b81bea31
|
[
"MIT"
] | null | null | null |
API/conftest.py
|
BahrmaLe/otus_python_homework
|
510a4f1971b35048d760fcc45098e511b81bea31
|
[
"MIT"
] | null | null | null |
"""Fixtures for tests.py (Dogs API testing)"""
import pytest
import requests
URLS = ["https://dog.ceo/dog-api/",
"https://dog.ceo/api/breeds/list/all",
"https://dog.ceo/api/breeds/image/random",
"https://dog.ceo/api/breeds/image/random/3",
"https://dog.ceo/api/breed/hound/images",
"https://dog.ceo/api/breed/hound/images/random",
"https://dog.ceo/api/breed/hound/images/random/3",
"https://dog.ceo/api/breed/hound/list",
"https://dog.ceo/api/breed/hound/afghan/images",
"https://dog.ceo/api/breed/hound/afghan/images/random",
"https://dog.ceo/api/breed/hound/afghan/images/random/3", ]
"""List general ULRS with Dogs API"""
HEADERS = [{"Content-type": "application/json"}, {"Content-type": "text/html"}, {}]
PAIRS = [(url, header) for url in URLS for header in HEADERS]
@pytest.fixture(params=PAIRS)
def pairs_of_response(request):
"""pairwise testing for content-type, headers in responses for all urls """
response = requests.get(request.param[0], headers=request.param[1])
print(request.param[0])
print(request.param[1])
return response
@pytest.fixture()
def listallbreeds():
"""GET Request to https://dog.ceo/api/breeds/list/all and return Json data"""
response = requests.get(URLS[1])
json_data = response.json()
return json_data
@pytest.fixture()
def randomimage():
"""GET Request to "https://dog.ceo/api/breeds/image/random/3"
and return Json data with random image"""
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_image():
"""Function for class"""
response = requests.get(URLS[2])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def randomthreeimage():
"""GET Request to "https://dog.ceo/api/breeds/image/random"
and return Json data with three random image"""
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_three_image():
"""Function for class"""
response = requests.get(URLS[3])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def list_of_breed():
"""GET Request to "https://dog.ceo/api/breed/hound/images"
and return Json data with list а all images by breed
"hound" """
response = requests.get(URLS[4])
json_data = response.json()
return json_data
@pytest.fixture()
def get_random_image_by_breed():
"""GET Request to "https://dog.ceo/api/breed/hound/images/random"
and return Json data with random image by breed"""
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_image():
"""Function for class"""
response = requests.get(URLS[5])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def get_random_three_image_by_breed():
"""GET Request to "https://dog.ceo/api/breed/hound/images/random/3"
and return Json data with random three image
by breed """
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_three_image():
"""Function for class"""
response = requests.get(URLS[6])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def listallsubbreeds():
"""GET Request to "https://dog.ceo/api/breed/hound/images"
and return Json data with list а all images by sub-breeds
for "hound" """
response = requests.get(URLS[7])
json_data = response.json()
return json_data
@pytest.fixture()
def list_of_subbreed():
"""GET Request to "https://dog.ceo/api/breed/hound/afghan/images"
and return Json data with list а all images by
sub-breed "afghan" """
response = requests.get(URLS[8])
json_data = response.json()
return json_data
@pytest.fixture()
def get_random_image_by_subbreed():
"""GET Request to "https://dog.ceo/api/breed/hound/afghan/images/random"
and return Json data with random image
by sub-breed "afghan" """
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_image():
"""Function for class"""
response = requests.get(URLS[9])
json_data = response.json()
return json_data
return Randomimage()
@pytest.fixture()
def get_random_three_image_by_subbreed():
"""GET Request to "https://dog.ceo/api/breed/hound/afghan/images/random/3"
and return Json data with three random
image by sub-breed "afghan" """
class Randomimage():
""""I don't know why it here, I am google it"""
@staticmethod
def get_random_three_image():
"""Function for class"""
response = requests.get(URLS[10])
json_data = response.json()
return json_data
return Randomimage()
| 31.259036
| 83
| 0.631143
| 696
| 5,189
| 4.62069
| 0.12069
| 0.074627
| 0.071828
| 0.087065
| 0.818408
| 0.795709
| 0.795709
| 0.773632
| 0.685012
| 0.644279
| 0
| 0.005266
| 0.231451
| 5,189
| 165
| 84
| 31.448485
| 0.801153
| 0.313355
| 0
| 0.568421
| 0
| 0
| 0.150972
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.178947
| false
| 0
| 0.021053
| 0
| 0.442105
| 0.021053
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
340fcc7ec477179690e4471488c43b411756338d
| 184
|
py
|
Python
|
fastapi-transformer-baseline/app/main.py
|
DeDeckerThomas/NLPiP
|
37b4d146f5d9760f779f724c4a0698930f59d6d1
|
[
"Apache-2.0"
] | 3
|
2022-03-10T13:33:24.000Z
|
2022-03-31T13:31:30.000Z
|
fastapi-transformer-baseline/app/main.py
|
DeDeckerThomas/NLPiP
|
37b4d146f5d9760f779f724c4a0698930f59d6d1
|
[
"Apache-2.0"
] | null | null | null |
fastapi-transformer-baseline/app/main.py
|
DeDeckerThomas/NLPiP
|
37b4d146f5d9760f779f724c4a0698930f59d6d1
|
[
"Apache-2.0"
] | null | null | null |
from fastapi import FastAPI
from routers.api_router import api_router
from core.config import settings
app: FastAPI = FastAPI(title=settings.APP_NAME)
app.include_router(api_router)
| 23
| 47
| 0.836957
| 28
| 184
| 5.321429
| 0.464286
| 0.181208
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.103261
| 184
| 7
| 48
| 26.285714
| 0.90303
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
347d886b920493c8af185f7166a5e1089d5bdb0f
| 23
|
py
|
Python
|
test/example/__init__.py
|
fieldOfView/izzyPythonPlugin
|
deb0dca2a4294cd74aa18ab1228de10eceba1266
|
[
"MIT"
] | 6
|
2019-11-06T00:37:43.000Z
|
2021-12-22T02:39:45.000Z
|
example/__init__.py
|
Submanifold/cmake-cpp-pybind11
|
f22893e87977de8c619d6033b7ca5bb240451f3d
|
[
"MIT"
] | null | null | null |
example/__init__.py
|
Submanifold/cmake-cpp-pybind11
|
f22893e87977de8c619d6033b7ca5bb240451f3d
|
[
"MIT"
] | null | null | null |
from .example import *
| 11.5
| 22
| 0.73913
| 3
| 23
| 5.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.173913
| 23
| 1
| 23
| 23
| 0.894737
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
caf9fdaea0cfc21fcb1eeef6a8902847ee871447
| 177
|
py
|
Python
|
sdap_ingest_manager/history_manager/__init__.py
|
tloubrieu-jpl/incubator-sdap-nexus-ingestion-manager
|
1fedc94265056ea9f9f96e9851bfe885959893fd
|
[
"Apache-2.0"
] | null | null | null |
sdap_ingest_manager/history_manager/__init__.py
|
tloubrieu-jpl/incubator-sdap-nexus-ingestion-manager
|
1fedc94265056ea9f9f96e9851bfe885959893fd
|
[
"Apache-2.0"
] | 1
|
2020-06-08T18:12:42.000Z
|
2020-06-09T02:47:47.000Z
|
sdap_ingest_manager/history_manager/__init__.py
|
tloubrieu-jpl/incubator-sdap-nexus-ingestion-manager
|
1fedc94265056ea9f9f96e9851bfe885959893fd
|
[
"Apache-2.0"
] | null | null | null |
from .util import md5sum_from_filepath
from .datasetingestionhistorysolr import DatasetIngestionHistorySolr
from .datasetingestionhistoryfile import DatasetIngestionHistoryFile
| 44.25
| 68
| 0.915254
| 14
| 177
| 11.428571
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006061
| 0.067797
| 177
| 3
| 69
| 59
| 0.963636
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1b0b98eac31f986426335fe4a1b639f5a2b189be
| 52
|
py
|
Python
|
sat_datetime/__init__.py
|
junhg0211/sat-datetime
|
519700011c8d17e33fd844b53a6e17563bb2abae
|
[
"MIT"
] | null | null | null |
sat_datetime/__init__.py
|
junhg0211/sat-datetime
|
519700011c8d17e33fd844b53a6e17563bb2abae
|
[
"MIT"
] | null | null | null |
sat_datetime/__init__.py
|
junhg0211/sat-datetime
|
519700011c8d17e33fd844b53a6e17563bb2abae
|
[
"MIT"
] | null | null | null |
from .sat_datetime import SatDatetime, SatTimedelta
| 26
| 51
| 0.865385
| 6
| 52
| 7.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.096154
| 52
| 1
| 52
| 52
| 0.93617
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1b18107e731c97c78274e71e34a4e53422066da2
| 54
|
py
|
Python
|
tests/mac-logs/examples/tag/tag.py
|
andrewp-as-is/mac-logs.py
|
13f39b9541e775ef886fc3501dd04b2d06f4aa04
|
[
"Unlicense"
] | 1
|
2019-01-14T14:34:59.000Z
|
2019-01-14T14:34:59.000Z
|
tests/mac-logs/examples/tag/tag.py
|
looking-for-a-job/mac-logs.py
|
13f39b9541e775ef886fc3501dd04b2d06f4aa04
|
[
"Unlicense"
] | null | null | null |
tests/mac-logs/examples/tag/tag.py
|
looking-for-a-job/mac-logs.py
|
13f39b9541e775ef886fc3501dd04b2d06f4aa04
|
[
"Unlicense"
] | null | null | null |
#!/usr/bin/env python
import mac_logs
mac_logs.tag()
| 10.8
| 21
| 0.740741
| 10
| 54
| 3.8
| 0.8
| 0.368421
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 54
| 4
| 22
| 13.5
| 0.791667
| 0.37037
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
1b1acb3da87ff1fab8416f66740940a5554cf27f
| 5,899
|
py
|
Python
|
ProgrammingAssignments/pyretic/pyretic/tutorial/of_tutorial.py
|
Mahdi-Asaly/Coursera-SDN-Assignments
|
aac5d62f40c5283e296a0f87b7ec2de8986a8efc
|
[
"Intel"
] | null | null | null |
ProgrammingAssignments/pyretic/pyretic/tutorial/of_tutorial.py
|
Mahdi-Asaly/Coursera-SDN-Assignments
|
aac5d62f40c5283e296a0f87b7ec2de8986a8efc
|
[
"Intel"
] | null | null | null |
ProgrammingAssignments/pyretic/pyretic/tutorial/of_tutorial.py
|
Mahdi-Asaly/Coursera-SDN-Assignments
|
aac5d62f40c5283e296a0f87b7ec2de8986a8efc
|
[
"Intel"
] | null | null | null |
################################################################################
# The Pyretic Project #
# frenetic-lang.org/pyretic #
# author: Joshua Reich (jreich@cs.princeton.edu) #
################################################################################
# Licensed to the Pyretic Project by one or more contributors. See the #
# NOTICES file distributed with this work for additional information #
# regarding copyright and ownership. The Pyretic Project licenses this #
# file to you under the following license. #
# #
# Redistribution and use in source and binary forms, with or without #
# modification, are permitted provided the following conditions are met: #
# - Redistributions of source code must retain the above copyright #
# notice, this list of conditions and the following disclaimer. #
# - Redistributions in binary form must reproduce the above copyright #
# notice, this list of conditions and the following disclaimer in #
# the documentation or other materials provided with the distribution. #
# - The names of the copyright holds and contributors may not be used to #
# endorse or promote products derived from this work without specific #
# prior written permission. #
# #
# Unless required by applicable law or agreed to in writing, software #
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT #
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the #
# LICENSE file distributed with this work for specific language governing #
# permissions and limitations under the License. #
################################################################################
################################################################################
# SETUP #
# ------------------------------------------------------------------- #
# mininet: mininet.sh --topo clique,4,4 (or other single subnet) #
################################################################################
from pyretic.lib.corelib import *
from pyretic.lib.std import *
from pyretic.lib.query import *
def act_like_hub():
"""Implement hub-like behavior --- send all packets to all ports on a network
minimum spanning tree, except for the input port"""
return flood() # Return the policy flood
# we create a new dynamic policy class with the name "act_like_switch"
class act_like_switch(DynamicPolicy):
"""
Implement switch-like behavior.
"""
""" # DELETE BOTH THIS LINE AND THE ONE BELOW TO START WORKING ON THE TUTORIAL #
# Here's some psuedocode to start you off implementing a learning
# switch. You'll need to rewrite it as real Python code using Pyretic predicates
# and policies - all of which are defined and documented in pyretic/core/language.py
def __init__(self):
# Set up the initial forwarding behavior for your mac learning switch
# Tip: set up a separate variable to track this
self.forward = <some policy here>
# hint, mac learning switches start off by flooding all packets
# Set up a query that will receive new incoming packets
self.query = <a packets query for the first packet w/ a given (srcmac,switch) pair>
# Write a function to take each new packet p and update the forwarding policy
# so subsequent incoming packets on this switch whose dstmac matches p's srcmac
# (accessed like in a dictionary p['srcmac']), those packets will be forwarded out
# p's inport (pyretic packets are located, so we access this value just like srcmac
# - i.e., p['inport'])
def learn_from_a_packet(pkt):
# perhaps we want to print the incoming packet so we can see it
print pkt
# and we will need to set the forwarding policy
self.forward = <....> # hint use the 'match' policy and either
# if_(f,p1,p2) or
# a combination of parallel and sequential composition
# let's print the forwarding policy to see if it looks right
print self.forward
# and don't forget to update the dynamic policy to forward and query
# (each dynamic policy has a member 'policy'
# whenever this member is assigned, the dynamic policy updates itself)
self.policy = <forwarding and query policies composed in parallel>
# hint: 'P1 + P2' is shorthand for parallel composition of P1 and P2
# 'P1 >> P2' is shorthand for sequential composition of P1 and P2
# we need to make sure learn_from_a_packet is called back
# every time our query sees a new packet
self.query.register_callback(learn_from_a_packet)
# finally, we initialize our dynamic policy
super(act_like_switch,self).__init__(<the first value 'self.policy' should take>)
""" # DELETE BOTH THIS LINE AND THE ONE ABOVE TO START WORKING ON THE TUTORIAL #
def main():
## The main method returns the policy that will be run
## To try your code, comment the first return line and uncomment the second
### Part 0 - hub ###
return act_like_hub()
### Part 1 - write a basic mac learning module ###
# return act_like_switch()
| 55.650943
| 91
| 0.561282
| 682
| 5,899
| 4.809384
| 0.409091
| 0.012805
| 0.015854
| 0.014634
| 0.115854
| 0.092683
| 0.057927
| 0.041463
| 0.041463
| 0.041463
| 0
| 0.00344
| 0.310053
| 5,899
| 105
| 92
| 56.180952
| 0.802457
| 0.448042
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0.375
| 0.125
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 1
| 1
| 0
|
0
| 5
|
1b5728fb13835525abdbb3be08ce7804a39a37e6
| 134
|
py
|
Python
|
TemplateEngineForRESP-F3T/RoofFunctions/trigonometric_function.py
|
riku-sakamoto/RESP-ProgramTips
|
45be5afb90283e56ddf5d32681c58dec17986ba8
|
[
"MIT"
] | 1
|
2021-10-13T02:07:17.000Z
|
2021-10-13T02:07:17.000Z
|
TemplateEngineForRESP-F3T/RoofFunctions/trigonometric_function.py
|
riku-sakamoto/RESP-ProgramTips
|
45be5afb90283e56ddf5d32681c58dec17986ba8
|
[
"MIT"
] | null | null | null |
TemplateEngineForRESP-F3T/RoofFunctions/trigonometric_function.py
|
riku-sakamoto/RESP-ProgramTips
|
45be5afb90283e56ddf5d32681c58dec17986ba8
|
[
"MIT"
] | null | null | null |
# -*- coding:utf-8 -*-
import numpy as np
def sin_sin(x,y):
return 1000*abs(np.sin(x/2000*np.pi) + np.sin(y/2000.0*np.pi))+100
| 13.4
| 68
| 0.61194
| 29
| 134
| 2.793103
| 0.62069
| 0.098765
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.150442
| 0.156716
| 134
| 9
| 69
| 14.888889
| 0.566372
| 0.149254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| false
| 0
| 0.333333
| 0.333333
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
|
0
| 5
|
1b5f5814e99811388d8791f4808e71fca930b211
| 651
|
py
|
Python
|
chapter9/msfrpc/msfrpc_connect.py
|
abbbhucho/Mastering-Python-for-Networking-and-Security
|
f4fb1131253e9daad8da501c297758fdcedfbac3
|
[
"MIT"
] | 98
|
2018-05-13T20:41:43.000Z
|
2022-03-31T00:24:01.000Z
|
chapter9/msfrpc/msfrpc_connect.py
|
Cyb3rid10ts/Mastering-Python-for-Networking-and-Security
|
4cf04d1758f17ae378b5e3422404e5b7a174a243
|
[
"MIT"
] | null | null | null |
chapter9/msfrpc/msfrpc_connect.py
|
Cyb3rid10ts/Mastering-Python-for-Networking-and-Security
|
4cf04d1758f17ae378b5e3422404e5b7a174a243
|
[
"MIT"
] | 62
|
2018-06-19T13:46:34.000Z
|
2022-02-11T05:47:24.000Z
|
# -*- encoding: utf-8 -*-
import msfrpc
client = msfrpc.Msfrpc({'uri':'/msfrpc', 'port':'5553', 'host':'127.0.0.1', 'ssl': True})
auth = client.login('msf','password')
if auth:
print str(client.call('core.version'))+'\n'
print str(client.call('core.thread_list', []))+'\n'
print str(client.call('job.list', []))+'\n'
print str(client.call('module.exploits', []))+'\n'
print str(client.call('module.auxiliary', []))+'\n'
print str(client.call('module.post', []))+'\n'
print str(client.call('module.payloads', []))+'\n'
print str(client.call('module.encoders', []))+'\n'
print str(client.call('module.nops', []))+'\n'
| 46.5
| 89
| 0.597542
| 90
| 651
| 4.311111
| 0.4
| 0.185567
| 0.324742
| 0.417526
| 0.572165
| 0.456186
| 0
| 0
| 0
| 0
| 0
| 0.019264
| 0.122888
| 651
| 14
| 90
| 46.5
| 0.660245
| 0.03533
| 0
| 0
| 0
| 0
| 0.290271
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.076923
| 0.076923
| null | null | 0.692308
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 5
|
1b941a731326acc3ecffebccafaa639df1c778bf
| 66,776
|
py
|
Python
|
pyboto3/sqs.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/sqs.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
pyboto3/sqs.py
|
thecraftman/pyboto3
|
653a0db2b00b06708334431da8f169d1f7c7734f
|
[
"MIT"
] | null | null | null |
'''
The MIT License (MIT)
Copyright (c) 2016 WavyCloud
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
'''
def add_permission(QueueUrl=None, Label=None, AWSAccountIds=None, Actions=None):
"""
Adds a permission to a queue for a specific principal . This allows sharing access to the queue.
When you create a queue, you have full control access rights for the queue. Only you, the owner of the queue, can grant or deny permissions to the queue. For more information about these permissions, see Shared Queues in the Amazon SQS Developer Guide .
See also: AWS API Documentation
:example: response = client.add_permission(
QueueUrl='string',
Label='string',
AWSAccountIds=[
'string',
],
Actions=[
'string',
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue to which permissions are added.
Queue URLs are case-sensitive.
:type Label: string
:param Label: [REQUIRED]
The unique identification of the permission you're setting (for example, AliceSendMessage ). Maximum 80 characters. Allowed characters include alphanumeric characters, hyphens (- ), and underscores (_ ).
:type AWSAccountIds: list
:param AWSAccountIds: [REQUIRED]
The AWS account number of the principal who is given permission. The principal must have an AWS account, but does not need to be signed up for Amazon SQS. For information about locating the AWS account identification, see Your AWS Identifiers in the Amazon SQS Developer Guide .
(string) --
:type Actions: list
:param Actions: [REQUIRED]
The action the client wants to allow for the specified principal. The following values are valid:
*
ChangeMessageVisibility
DeleteMessage
GetQueueAttributes
GetQueueUrl
ReceiveMessage
SendMessage
For more information about these actions, see Understanding Permissions in the Amazon SQS Developer Guide .
Specifying SendMessage , DeleteMessage , or ChangeMessageVisibility for ActionName.n also grants permissions for the corresponding batch versions of those actions: SendMessageBatch , DeleteMessageBatch , and ChangeMessageVisibilityBatch .
(string) --
"""
pass
def can_paginate(operation_name=None):
"""
Check if an operation can be paginated.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
"""
pass
def change_message_visibility(QueueUrl=None, ReceiptHandle=None, VisibilityTimeout=None):
"""
Changes the visibility timeout of a specified message in a queue to a new value. The maximum allowed timeout value is 12 hours. Thus, you can't extend the timeout of a message in an existing queue to more than a total visibility timeout of 12 hours. For more information, see Visibility Timeout in the Amazon SQS Developer Guide .
For example, you have a message and with the default visibility timeout of 5 minutes. After 3 minutes, you call ChangeMessageVisiblity with a timeout of 10 minutes. At that time, the timeout for the message is extended by 10 minutes beyond the time of the ChangeMessageVisibility action. This results in a total visibility timeout of 13 minutes. You can continue to call the ChangeMessageVisibility to extend the visibility timeout to a maximum of 12 hours. If you try to extend the visibility timeout beyond 12 hours, your request is rejected.
A message is considered to be in flight after it's received from a queue by a consumer, but not yet deleted from the queue.
For standard queues, there can be a maximum of 120,000 inflight messages per queue. If you reach this limit, Amazon SQS returns the OverLimit error message. To avoid reaching the limit, you should delete messages from the queue after they're processed. You can also increase the number of queues you use to process your messages.
For FIFO queues, there can be a maximum of 20,000 inflight messages per queue. If you reach this limit, Amazon SQS returns no error messages.
See also: AWS API Documentation
:example: response = client.change_message_visibility(
QueueUrl='string',
ReceiptHandle='string',
VisibilityTimeout=123
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue whose message's visibility is changed.
Queue URLs are case-sensitive.
:type ReceiptHandle: string
:param ReceiptHandle: [REQUIRED]
The receipt handle associated with the message whose visibility timeout is changed. This parameter is returned by the `` ReceiveMessage `` action.
:type VisibilityTimeout: integer
:param VisibilityTimeout: [REQUIRED]
The new value for the message's visibility timeout (in seconds). Values values: 0 to 43200 . Maximum: 12 hours.
"""
pass
def change_message_visibility_batch(QueueUrl=None, Entries=None):
"""
Changes the visibility timeout of multiple messages. This is a batch version of `` ChangeMessageVisibility .`` The result of the action on each message is reported individually in the response. You can send up to 10 `` ChangeMessageVisibility `` requests with each ChangeMessageVisibilityBatch action.
See also: AWS API Documentation
:example: response = client.change_message_visibility_batch(
QueueUrl='string',
Entries=[
{
'Id': 'string',
'ReceiptHandle': 'string',
'VisibilityTimeout': 123
},
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue whose messages' visibility is changed.
Queue URLs are case-sensitive.
:type Entries: list
:param Entries: [REQUIRED]
A list of receipt handles of the messages for which the visibility timeout must be changed.
(dict) --Encloses a receipt handle and an entry id for each message in `` ChangeMessageVisibilityBatch .``
Warning
All of the following list parameters must be prefixed with ChangeMessageVisibilityBatchRequestEntry.n , where n is an integer value starting with 1 . For example, a parameter list for this action might look like this:
amp;ChangeMessageVisibilityBatchRequestEntry.1.Id=change_visibility_msg_2
amp;ChangeMessageVisibilityBatchRequestEntry.1.ReceiptHandle=replaceableYour_Receipt_Handle/replaceable
amp;ChangeMessageVisibilityBatchRequestEntry.1.VisibilityTimeout=45
Id (string) -- [REQUIRED]An identifier for this particular receipt handle used to communicate the result.
Note
The Id s of a batch request need to be unique within a request
ReceiptHandle (string) -- [REQUIRED]A receipt handle.
VisibilityTimeout (integer) --The new value (in seconds) for the message's visibility timeout.
:rtype: dict
:return: {
'Successful': [
{
'Id': 'string'
},
],
'Failed': [
{
'Id': 'string',
'SenderFault': True|False,
'Code': 'string',
'Message': 'string'
},
]
}
"""
pass
def create_queue(QueueName=None, Attributes=None):
"""
Creates a new standard or FIFO queue. You can pass one or more attributes in the request. Keep the following caveats in mind:
To successfully create a new queue, you must provide a queue name that adheres to the limits related to queues and is unique within the scope of your queues.
To get the queue URL, use the `` GetQueueUrl `` action. `` GetQueueUrl `` requires only the QueueName parameter. be aware of existing queue names:
See also: AWS API Documentation
Examples
The following operation creates an SQS queue named MyQueue.
Expected Output:
:example: response = client.create_queue(
QueueName='string',
Attributes={
'string': 'string'
}
)
:type QueueName: string
:param QueueName: [REQUIRED]
The name of the new queue. The following limits apply to this name:
A queue name can have up to 80 characters.
Valid values: alphanumeric characters, hyphens (- ), and underscores (_ ).
A FIFO queue name must end with the .fifo suffix.
Queue names are case-sensitive.
:type Attributes: dict
:param Attributes: A map of attributes with their corresponding values.
The following lists the names, descriptions, and values of the special request parameters that the CreateQueue action uses:
DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 seconds (15 minutes). The default is 0 (zero).
MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) to 262,144 bytes (256 KiB). The default is 262,144 (256 KiB).
MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer from 60 seconds (1 minute) to 1,209,600 seconds (14 days). The default is 345,600 (4 days).
Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide .
ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a `` ReceiveMessage `` action waits for a message to arrive. Valid values: An integer from 0 to 20 (seconds). The default is 0 (zero).
RedrivePolicy - The parameters for the dead letter queue functionality of the source queue. For more information about the redrive policy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide .
Note
The dead letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead letter queue of a standard queue must also be a standard queue.
VisibilityTimeout - The visibility timeout for the queue. Valid values: An integer from 0 to 43,200 (12 hours). The default is 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide .
The following attributes apply only to server-side-encryption :
KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms . While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs , the alias of a custom CMK can, for example, be alias/aws/sqs . For more examples, see KeyId in the AWS Key Management Service API Reference .
KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work? .
The following attributes apply only to FIFO (first-in-first-out) queues :
FifoQueue - Designates a queue as FIFO. Valid values: true , false . You can provide this attribute only during queue creation. You can't change it for an existing queue. When you set this attribute, you must also provide the MessageGroupId for your messages explicitly. For more information, see FIFO Queue Logic in the Amazon SQS Developer Guide .
ContentBasedDeduplication - Enables content-based deduplication. Valid values: true , false . For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
Every message must have a unique MessageDeduplicationId ,
You may provide a MessageDeduplicationId explicitly.
If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.
If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.
When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId , the two messages are treated as duplicates and only one copy of the message is delivered.
Any other valid special request parameters (such as the following) are ignored:
ApproximateNumberOfMessages
ApproximateNumberOfMessagesDelayed
ApproximateNumberOfMessagesNotVisible
CreatedTimestamp
LastModifiedTimestamp
QueueArn
(string) --
(string) --
:rtype: dict
:return: {
'QueueUrl': 'string'
}
:returns:
If you don't provide a value for an attribute, the queue is created with the default value for the attribute.
If you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.
"""
pass
def delete_message(QueueUrl=None, ReceiptHandle=None):
"""
Deletes the specified message from the specified queue. You specify the message by using the message's receipt handle and not the MessageId you receive when you send the message. Even if the message is locked by another reader due to the visibility timeout setting, it is still deleted from the queue. If you leave a message in the queue for longer than the queue's configured retention period, Amazon SQS automatically deletes the message.
See also: AWS API Documentation
:example: response = client.delete_message(
QueueUrl='string',
ReceiptHandle='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue from which messages are deleted.
Queue URLs are case-sensitive.
:type ReceiptHandle: string
:param ReceiptHandle: [REQUIRED]
The receipt handle associated with the message to delete.
"""
pass
def delete_message_batch(QueueUrl=None, Entries=None):
"""
Deletes up to ten messages from the specified queue. This is a batch version of `` DeleteMessage .`` The result of the action on each message is reported individually in the response.
See also: AWS API Documentation
:example: response = client.delete_message_batch(
QueueUrl='string',
Entries=[
{
'Id': 'string',
'ReceiptHandle': 'string'
},
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue from which messages are deleted.
Queue URLs are case-sensitive.
:type Entries: list
:param Entries: [REQUIRED]
A list of receipt handles for the messages to be deleted.
(dict) --Encloses a receipt handle and an identifier for it.
Id (string) -- [REQUIRED]An identifier for this particular receipt handle. This is used to communicate the result.
Note
The Id s of a batch request need to be unique within a request
ReceiptHandle (string) -- [REQUIRED]A receipt handle.
:rtype: dict
:return: {
'Successful': [
{
'Id': 'string'
},
],
'Failed': [
{
'Id': 'string',
'SenderFault': True|False,
'Code': 'string',
'Message': 'string'
},
]
}
"""
pass
def delete_queue(QueueUrl=None):
"""
Deletes the queue specified by the QueueUrl , even if the queue is empty. If the specified queue doesn't exist, Amazon SQS returns a successful response.
When you delete a queue, the deletion process takes up to 60 seconds. Requests you send involving that queue during the 60 seconds might succeed. For example, a `` SendMessage `` request might succeed, but after 60 seconds the queue and the message you sent no longer exist.
When you delete a queue, you must wait at least 60 seconds before creating a queue with the same name.
See also: AWS API Documentation
:example: response = client.delete_queue(
QueueUrl='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue to delete.
Queue URLs are case-sensitive.
"""
pass
def generate_presigned_url(ClientMethod=None, Params=None, ExpiresIn=None, HttpMethod=None):
"""
Generate a presigned url given a client, its method, and arguments
:type ClientMethod: string
:param ClientMethod: The client method to presign for
:type Params: dict
:param Params: The parameters normally passed to
ClientMethod.
:type ExpiresIn: int
:param ExpiresIn: The number of seconds the presigned url is valid
for. By default it expires in an hour (3600 seconds)
:type HttpMethod: string
:param HttpMethod: The http method to use on the generated url. By
default, the http method is whatever is used in the method's model.
"""
pass
def get_paginator(operation_name=None):
"""
Create a paginator for an operation.
:type operation_name: string
:param operation_name: The operation name. This is the same name
as the method name on the client. For example, if the
method name is create_foo, and you'd normally invoke the
operation as client.create_foo(**kwargs), if the
create_foo operation can be paginated, you can use the
call client.get_paginator('create_foo').
:rtype: L{botocore.paginate.Paginator}
"""
pass
def get_queue_attributes(QueueUrl=None, AttributeNames=None):
"""
Gets attributes for the specified queue.
See also: AWS API Documentation
:example: response = client.get_queue_attributes(
QueueUrl='string',
AttributeNames=[
'All'|'Policy'|'VisibilityTimeout'|'MaximumMessageSize'|'MessageRetentionPeriod'|'ApproximateNumberOfMessages'|'ApproximateNumberOfMessagesNotVisible'|'CreatedTimestamp'|'LastModifiedTimestamp'|'QueueArn'|'ApproximateNumberOfMessagesDelayed'|'DelaySeconds'|'ReceiveMessageWaitTimeSeconds'|'RedrivePolicy'|'FifoQueue'|'ContentBasedDeduplication'|'KmsMasterKeyId'|'KmsDataKeyReusePeriodSeconds',
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue whose attribute information is retrieved.
Queue URLs are case-sensitive.
:type AttributeNames: list
:param AttributeNames: A list of attributes for which to retrieve information.
Note
In the future, new attributes might be added. If you write code that calls this action, we recommend that you structure your code so that it can handle new attributes gracefully.
The following attributes are supported:
All - Returns all values.
ApproximateNumberOfMessages - Returns the approximate number of visible messages in a queue. For more information, see Resources Required to Process Messages in the Amazon SQS Developer Guide .
ApproximateNumberOfMessagesDelayed - Returns the approximate number of messages that are waiting to be added to the queue.
ApproximateNumberOfMessagesNotVisible - Returns the approximate number of messages that have not timed-out and aren't deleted. For more information, see Resources Required to Process Messages in the Amazon SQS Developer Guide .
CreatedTimestamp - Returns the time when the queue was created in seconds (epoch time ).
DelaySeconds - Returns the default delay on the queue in seconds.
LastModifiedTimestamp - Returns the time when the queue was last changed in seconds (epoch time ).
MaximumMessageSize - Returns the limit of how many bytes a message can contain before Amazon SQS rejects it.
MessageRetentionPeriod - Returns the length of time, in seconds, for which Amazon SQS retains a message.
Policy - Returns the policy of the queue.
QueueArn - Returns the Amazon resource name (ARN) of the queue.
ReceiveMessageWaitTimeSeconds - Returns the length of time, in seconds, for which the ReceiveMessage action waits for a message to arrive.
RedrivePolicy - Returns the parameters for dead letter queue functionality of the source queue. For more information about the redrive policy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide .
VisibilityTimeout - Returns the visibility timeout for the queue. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide .
The following attributes apply only to server-side-encryption :
KmsMasterKeyId - Returns the ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms .
KmsDataKeyReusePeriodSeconds - Returns the length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again.
The following attributes apply only to FIFO (first-in-first-out) queues :
FifoQueue - Returns whether the queue is FIFO. For more information, see FIFO Queue Logic in the Amazon SQS Developer Guide .
Note
To determine whether a queue is FIFO , you can check whether QueueName ends with the .fifo suffix.
ContentBasedDeduplication - Returns whether content-based deduplication is enabled for the queue. For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
(string) --
:rtype: dict
:return: {
'Attributes': {
'string': 'string'
}
}
:returns:
(string) --
(string) --
"""
pass
def get_queue_url(QueueName=None, QueueOwnerAWSAccountId=None):
"""
Returns the URL of an existing queue. This action provides a simple way to retrieve the URL of an Amazon SQS queue.
To access a queue that belongs to another AWS account, use the QueueOwnerAWSAccountId parameter to specify the account ID of the queue's owner. The queue's owner must grant you permission to access the queue. For more information about shared queue access, see `` AddPermission `` or see Shared Queues in the Amazon SQS Developer Guide .
See also: AWS API Documentation
Examples
The following example retrieves the queue ARN.
Expected Output:
:example: response = client.get_queue_url(
QueueName='string',
QueueOwnerAWSAccountId='string'
)
:type QueueName: string
:param QueueName: [REQUIRED]
The name of the queue whose URL must be fetched. Maximum 80 characters. Valid values: alphanumeric characters, hyphens (- ), and underscores (_ ).
Queue names are case-sensitive.
:type QueueOwnerAWSAccountId: string
:param QueueOwnerAWSAccountId: The AWS account ID of the account that created the queue.
:rtype: dict
:return: {
'QueueUrl': 'string'
}
"""
pass
def get_waiter():
"""
"""
pass
def list_dead_letter_source_queues(QueueUrl=None):
"""
Returns a list of your queues that have the RedrivePolicy queue attribute configured with a dead letter queue.
For more information about using dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide .
See also: AWS API Documentation
:example: response = client.list_dead_letter_source_queues(
QueueUrl='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of a dead letter queue.
Queue URLs are case-sensitive.
:rtype: dict
:return: {
'queueUrls': [
'string',
]
}
"""
pass
def list_queues(QueueNamePrefix=None):
"""
Returns a list of your queues. The maximum number of queues that can be returned is 1,000. If you specify a value for the optional QueueNamePrefix parameter, only queues with a name that begins with the specified value are returned.
See also: AWS API Documentation
:example: response = client.list_queues(
QueueNamePrefix='string'
)
:type QueueNamePrefix: string
:param QueueNamePrefix: A string to use for filtering the list results. Only those queues whose name begins with the specified string are returned.
Queue names are case-sensitive.
:rtype: dict
:return: {
'QueueUrls': [
'string',
]
}
"""
pass
def purge_queue(QueueUrl=None):
"""
Deletes the messages in a queue specified by the QueueURL parameter.
When you purge a queue, the message deletion process takes up to 60 seconds. All messages sent to the queue before calling the PurgeQueue action are deleted. Messages sent to the queue while it is being purged might be deleted. While the queue is being purged, messages sent to the queue before PurgeQueue is called might be received, but are deleted within the next minute.
See also: AWS API Documentation
:example: response = client.purge_queue(
QueueUrl='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the queue from which the PurgeQueue action deletes messages.
Queue URLs are case-sensitive.
"""
pass
def receive_message(QueueUrl=None, AttributeNames=None, MessageAttributeNames=None, MaxNumberOfMessages=None, VisibilityTimeout=None, WaitTimeSeconds=None, ReceiveRequestAttemptId=None):
"""
Retrieves one or more messages (up to 10), from the specified queue. Using the WaitTimeSeconds parameter enables long-poll support. For more information, see Amazon SQS Long Polling in the Amazon SQS Developer Guide .
Short poll is the default behavior where a weighted random set of machines is sampled on a ReceiveMessage call. Thus, only the messages on the sampled machines are returned. If the number of messages in the queue is small (fewer than 1,000), you most likely get fewer messages than you requested per ReceiveMessage call. If the number of messages in the queue is extremely small, you might not receive any messages in a particular ReceiveMessage response. If this happens, repeat the request.
For each message returned, the response includes the following:
The receipt handle is the identifier you must provide when deleting the message. For more information, see Queue and Message Identifiers in the Amazon SQS Developer Guide .
You can provide the VisibilityTimeout parameter in your request. The parameter is applied to the messages that Amazon SQS returns in the response. If you don't include the parameter, the overall visibility timeout for the queue is used for the returned messages. For more information, see Visibility Timeout in the Amazon SQS Developer Guide .
A message that isn't deleted or a message whose visibility isn't extended before the visibility timeout expires counts as a failed receive. Depending on the configuration of the queue, the message might be sent to the dead letter queue.
See also: AWS API Documentation
:example: response = client.receive_message(
QueueUrl='string',
AttributeNames=[
'All'|'Policy'|'VisibilityTimeout'|'MaximumMessageSize'|'MessageRetentionPeriod'|'ApproximateNumberOfMessages'|'ApproximateNumberOfMessagesNotVisible'|'CreatedTimestamp'|'LastModifiedTimestamp'|'QueueArn'|'ApproximateNumberOfMessagesDelayed'|'DelaySeconds'|'ReceiveMessageWaitTimeSeconds'|'RedrivePolicy'|'FifoQueue'|'ContentBasedDeduplication'|'KmsMasterKeyId'|'KmsDataKeyReusePeriodSeconds',
],
MessageAttributeNames=[
'string',
],
MaxNumberOfMessages=123,
VisibilityTimeout=123,
WaitTimeSeconds=123,
ReceiveRequestAttemptId='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue from which messages are received.
Queue URLs are case-sensitive.
:type AttributeNames: list
:param AttributeNames: A list of attributes that need to be returned along with each message. These attributes include:
All - Returns all values.
ApproximateFirstReceiveTimestamp - Returns the time the message was first received from the queue (epoch time in milliseconds).
ApproximateReceiveCount - Returns the number of times a message has been received from the queue but not deleted.
SenderId
For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R .
For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456 .
SentTimestamp - Returns the time the message was sent to the queue (epoch time in milliseconds).
MessageDeduplicationId - Returns the value provided by the sender that calls the `` SendMessage `` action.
MessageGroupId - Returns the value provided by the sender that calls the `` SendMessage `` action. Messages with the same MessageGroupId are returned in sequence.
SequenceNumber - Returns the value provided by Amazon SQS.
Any other valid special request parameters (such as the following) are ignored:
ApproximateNumberOfMessages
ApproximateNumberOfMessagesDelayed
ApproximateNumberOfMessagesNotVisible
CreatedTimestamp
ContentBasedDeduplication
DelaySeconds
FifoQueue
LastModifiedTimestamp
MaximumMessageSize
MessageRetentionPeriod
Policy
QueueArn ,
ReceiveMessageWaitTimeSeconds
RedrivePolicy
VisibilityTimeout
(string) --
:type MessageAttributeNames: list
:param MessageAttributeNames: The name of the message attribute, where N is the index.
The name can contain alphanumeric characters and the underscore (_ ), hyphen (- ), and period (. ).
The name is case-sensitive and must be unique among all attribute names for the message.
The name must not start with AWS-reserved prefixes such as AWS. or Amazon. (or any casing variants).
The name must not start or end with a period (. ), and it should not have periods in succession (.. ).
The name can be up to 256 characters long.
When using ReceiveMessage , you can send a list of attribute names to receive, or you can return all of the attributes by specifying All or .* in your request. You can also use all message attributes starting with a prefix, for example bar.* .
(string) --
:type MaxNumberOfMessages: integer
:param MaxNumberOfMessages: The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values are 1 to 10. Default is 1.
:type VisibilityTimeout: integer
:param VisibilityTimeout: The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request.
:type WaitTimeSeconds: integer
:param WaitTimeSeconds: The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds .
:type ReceiveRequestAttemptId: string
:param ReceiveRequestAttemptId: This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.
You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.
When you set FifoQueue , a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.
If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId , Amazon SQS generates a ReceiveRequestAttemptId .
You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).
During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide .
Warning
If a caller of the ReceiveMessage action is still processing messages when the visibility timeout expires and messages become visible, another worker reading from the same queue can receive the same messages and therefore process duplicates. Also, if a reader whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error. To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.
While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.
If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId , no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.
The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z , A-Z , 0-9 ) and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~ ).
For best practices of using ReceiveRequestAttemptId , see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide .
:rtype: dict
:return: {
'Messages': [
{
'MessageId': 'string',
'ReceiptHandle': 'string',
'MD5OfBody': 'string',
'Body': 'string',
'Attributes': {
'string': 'string'
},
'MD5OfMessageAttributes': 'string',
'MessageAttributes': {
'string': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'StringListValues': [
'string',
],
'BinaryListValues': [
b'bytes',
],
'DataType': 'string'
}
}
},
]
}
:returns:
QueueUrl (string) -- [REQUIRED]
The URL of the Amazon SQS queue from which messages are received.
Queue URLs are case-sensitive.
AttributeNames (list) -- A list of attributes that need to be returned along with each message. These attributes include:
All - Returns all values.
ApproximateFirstReceiveTimestamp - Returns the time the message was first received from the queue (epoch time in milliseconds).
ApproximateReceiveCount - Returns the number of times a message has been received from the queue but not deleted.
SenderId
For an IAM user, returns the IAM user ID, for example ABCDEFGHI1JKLMNOPQ23R .
For an IAM role, returns the IAM role ID, for example ABCDE1F2GH3I4JK5LMNOP:i-a123b456 .
SentTimestamp - Returns the time the message was sent to the queue (epoch time in milliseconds).
MessageDeduplicationId - Returns the value provided by the sender that calls the `` SendMessage `` action.
MessageGroupId - Returns the value provided by the sender that calls the `` SendMessage `` action. Messages with the same MessageGroupId are returned in sequence.
SequenceNumber - Returns the value provided by Amazon SQS.
Any other valid special request parameters (such as the following) are ignored:
ApproximateNumberOfMessages
ApproximateNumberOfMessagesDelayed
ApproximateNumberOfMessagesNotVisible
CreatedTimestamp
ContentBasedDeduplication
DelaySeconds
FifoQueue
LastModifiedTimestamp
MaximumMessageSize
MessageRetentionPeriod
Policy
QueueArn ,
ReceiveMessageWaitTimeSeconds
RedrivePolicy
VisibilityTimeout
(string) --
MessageAttributeNames (list) -- The name of the message attribute, where N is the index.
The name can contain alphanumeric characters and the underscore (_ ), hyphen (- ), and period (. ).
The name is case-sensitive and must be unique among all attribute names for the message.
The name must not start with AWS-reserved prefixes such as AWS. or Amazon. (or any casing variants).
The name must not start or end with a period (. ), and it should not have periods in succession (.. ).
The name can be up to 256 characters long.
When using ReceiveMessage , you can send a list of attribute names to receive, or you can return all of the attributes by specifying All or .* in your request. You can also use all message attributes starting with a prefix, for example bar.* .
(string) --
MaxNumberOfMessages (integer) -- The maximum number of messages to return. Amazon SQS never returns more messages than this value (however, fewer messages might be returned). Valid values are 1 to 10. Default is 1.
VisibilityTimeout (integer) -- The duration (in seconds) that the received messages are hidden from subsequent retrieve requests after being retrieved by a ReceiveMessage request.
WaitTimeSeconds (integer) -- The duration (in seconds) for which the call waits for a message to arrive in the queue before returning. If a message is available, the call returns sooner than WaitTimeSeconds .
ReceiveRequestAttemptId (string) -- This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of ReceiveMessage calls. If a networking issue occurs after a ReceiveMessage action, and instead of a response you receive a generic error, you can retry the same action with an identical ReceiveRequestAttemptId to retrieve the same set of messages, even if their visibility timeout has not yet expired.
You can use ReceiveRequestAttemptId only for 5 minutes after a ReceiveMessage action.
When you set FifoQueue , a caller of the ReceiveMessage action can provide a ReceiveRequestAttemptId explicitly.
If a caller of the ReceiveMessage action doesn't provide a ReceiveRequestAttemptId , Amazon SQS generates a ReceiveRequestAttemptId .
You can retry the ReceiveMessage action with the same ReceiveRequestAttemptId if none of the messages have been modified (deleted or had their visibility changes).
During a visibility timeout, subsequent calls with the same ReceiveRequestAttemptId return the same messages and receipt handles. If a retry occurs within the deduplication interval, it resets the visibility timeout. For more information, see Visibility Timeout in the Amazon Simple Queue Service Developer Guide .
Warning
If a caller of the ReceiveMessage action is still processing messages when the visibility timeout expires and messages become visible, another worker reading from the same queue can receive the same messages and therefore process duplicates. Also, if a reader whose message processing time is longer than the visibility timeout tries to delete the processed messages, the action fails with an error. To mitigate this effect, ensure that your application observes a safe threshold before the visibility timeout expires and extend the visibility timeout as necessary.
While messages with a particular MessageGroupId are invisible, no more messages belonging to the same MessageGroupId are returned until the visibility timeout expires. You can still receive messages with another MessageGroupId as long as it is also visible.
If a caller of ReceiveMessage can't track the ReceiveRequestAttemptId , no retries work until the original visibility timeout expires. As a result, delays might occur but the messages in the queue remain in a strict order.
The length of ReceiveRequestAttemptId is 128 characters. ReceiveRequestAttemptId can contain alphanumeric characters (a-z , A-Z , 0-9 ) and punctuation (!"#$%'()*+,-./:;=?@[\]^_`{|}~ ).
For best practices of using ReceiveRequestAttemptId , see Using the ReceiveRequestAttemptId Request Parameter in the Amazon Simple Queue Service Developer Guide .
"""
pass
def remove_permission(QueueUrl=None, Label=None):
"""
Revokes any permissions in the queue policy that matches the specified Label parameter. Only the owner of the queue can remove permissions.
See also: AWS API Documentation
:example: response = client.remove_permission(
QueueUrl='string',
Label='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue from which permissions are removed.
Queue URLs are case-sensitive.
:type Label: string
:param Label: [REQUIRED]
The identification of the permission to remove. This is the label added using the `` AddPermission `` action.
"""
pass
def send_message(QueueUrl=None, MessageBody=None, DelaySeconds=None, MessageAttributes=None, MessageDeduplicationId=None, MessageGroupId=None):
"""
Delivers a message to the specified queue.
See also: AWS API Documentation
:example: response = client.send_message(
QueueUrl='string',
MessageBody='string',
DelaySeconds=123,
MessageAttributes={
'string': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'StringListValues': [
'string',
],
'BinaryListValues': [
b'bytes',
],
'DataType': 'string'
}
},
MessageDeduplicationId='string',
MessageGroupId='string'
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue to which a message is sent.
Queue URLs are case-sensitive.
:type MessageBody: string
:param MessageBody: [REQUIRED]
The message to send. The maximum string size is 256 KB.
Warning
A message can include only XML, JSON, and unformatted text. The following Unicode characters are allowed:
#x9 | #xA | #xD | #x20 to #xD7FF | #xE000 to #xFFFD | #x10000 to #x10FFFF
Any characters not included in this list will be rejected. For more information, see the W3C specification for characters .
:type DelaySeconds: integer
:param DelaySeconds: The length of time, in seconds, for which to delay a specific message. Valid values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value become available for processing after the delay period is finished. If you don't specify a value, the default value for the queue applies.
Note
When you set FifoQueue , you can't set DelaySeconds per message. You can set this parameter only on a queue level.
:type MessageAttributes: dict
:param MessageAttributes: Each message attribute consists of a Name , Type , and Value . For more information, see Message Attribute Items and Validation in the Amazon SQS Developer Guide .
(string) --
(dict) --The user-specified message attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see `` SendMessage .``
Name , type , value and the message body must not be empty or null. All parts of the message attribute, including Name , Type , and Value , are part of the message size restriction (256 KB or 262,144 bytes).
StringValue (string) --Strings are Unicode with UTF-8 binary encoding. For a list of code values, see ASCII Printable Characters .
BinaryValue (bytes) --Binary type attributes can store any binary data, such as compressed data, encrypted data, or images.
StringListValues (list) --Not implemented. Reserved for future use.
(string) --
BinaryListValues (list) --Not implemented. Reserved for future use.
(bytes) --
DataType (string) -- [REQUIRED]Amazon SQS supports the following logical data types: String , Number , and Binary . For the Number data type, you must use StringValue .
You can also append custom labels. For more information, see Message Attribute Data Types and Validation in the Amazon SQS Developer Guide .
:type MessageDeduplicationId: string
:param MessageDeduplicationId: This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of sent messages. If a message with a particular MessageDeduplicationId is sent successfully, any messages sent with the same MessageDeduplicationId are accepted successfully but aren't delivered during the 5-minute deduplication interval. For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
Every message must have a unique MessageDeduplicationId ,
You may provide a MessageDeduplicationId explicitly.
If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.
If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.
When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId , the two messages are treated as duplicates and only one copy of the message is delivered.
Note
The MessageDeduplicationId is available to the recipient of the message (this can be useful for troubleshooting delivery issues).
If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.
The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z , A-Z , 0-9 ) and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~ ).
For best practices of using MessageDeduplicationId , see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide .
:type MessageGroupId: string
:param MessageGroupId: This parameter applies only to FIFO (first-in-first-out) queues.
The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple readers can process the queue, but the session data of each user is processed in a FIFO fashion.
You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId , the action fails.
ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId , the messages are sorted by time sent. The caller can't specify a MessageGroupId .
The length of MessageGroupId is 128 characters. Valid values are alphanumeric characters and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~) .
For best practices of using MessageGroupId , see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide .
Warning
MessageGroupId is required for FIFO queues. You can't use it for Standard queues.
:rtype: dict
:return: {
'MD5OfMessageBody': 'string',
'MD5OfMessageAttributes': 'string',
'MessageId': 'string',
'SequenceNumber': 'string'
}
"""
pass
def send_message_batch(QueueUrl=None, Entries=None):
"""
Delivers up to ten messages to the specified queue. This is a batch version of `` SendMessage .`` For a FIFO queue, multiple messages within a single batch are enqueued in the order they are sent.
The result of sending each message is reported individually in the response. Because the batch request can result in a combination of successful and unsuccessful actions, you should check for batch errors even when the call returns an HTTP status code of 200 .
The maximum allowed individual message size and the maximum total payload size (the sum of the individual lengths of all of the batched messages) are both 256 KB (262,144 bytes).
If you don't specify the DelaySeconds parameter for an entry, Amazon SQS uses the default value for the queue.
See also: AWS API Documentation
:example: response = client.send_message_batch(
QueueUrl='string',
Entries=[
{
'Id': 'string',
'MessageBody': 'string',
'DelaySeconds': 123,
'MessageAttributes': {
'string': {
'StringValue': 'string',
'BinaryValue': b'bytes',
'StringListValues': [
'string',
],
'BinaryListValues': [
b'bytes',
],
'DataType': 'string'
}
},
'MessageDeduplicationId': 'string',
'MessageGroupId': 'string'
},
]
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue to which batched messages are sent.
Queue URLs are case-sensitive.
:type Entries: list
:param Entries: [REQUIRED]
A list of `` SendMessageBatchRequestEntry `` items.
(dict) --Contains the details of a single Amazon SQS message along with an Id .
Id (string) -- [REQUIRED]An identifier for a message in this batch used to communicate the result.
Note
The Id s of a batch request need to be unique within a request
MessageBody (string) -- [REQUIRED]The body of the message.
DelaySeconds (integer) --The length of time, in seconds, for which a specific message is delayed. Valid values: 0 to 900. Maximum: 15 minutes. Messages with a positive DelaySeconds value become available for processing after the delay period is finished. If you don't specify a value, the default value for the queue is applied.
Note
When you set FifoQueue , you can't set DelaySeconds per message. You can set this parameter only on a queue level.
MessageAttributes (dict) --Each message attribute consists of a Name , Type , and Value . For more information, see Message Attribute Items and Validation in the Amazon SQS Developer Guide .
(string) --
(dict) --The user-specified message attribute value. For string data types, the Value attribute has the same restrictions on the content as the message body. For more information, see `` SendMessage .``
Name , type , value and the message body must not be empty or null. All parts of the message attribute, including Name , Type , and Value , are part of the message size restriction (256 KB or 262,144 bytes).
StringValue (string) --Strings are Unicode with UTF-8 binary encoding. For a list of code values, see ASCII Printable Characters .
BinaryValue (bytes) --Binary type attributes can store any binary data, such as compressed data, encrypted data, or images.
StringListValues (list) --Not implemented. Reserved for future use.
(string) --
BinaryListValues (list) --Not implemented. Reserved for future use.
(bytes) --
DataType (string) -- [REQUIRED]Amazon SQS supports the following logical data types: String , Number , and Binary . For the Number data type, you must use StringValue .
You can also append custom labels. For more information, see Message Attribute Data Types and Validation in the Amazon SQS Developer Guide .
MessageDeduplicationId (string) --This parameter applies only to FIFO (first-in-first-out) queues.
The token used for deduplication of messages within a 5-minute minimum deduplication interval. If a message with a particular MessageDeduplicationId is sent successfully, subsequent messages with the same MessageDeduplicationId are accepted successfully but aren't delivered. For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
Every message must have a unique MessageDeduplicationId ,
You may provide a MessageDeduplicationId explicitly.
If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.
If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.
When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId , the two messages are treated as duplicates and only one copy of the message is delivered.
Note
The MessageDeduplicationId is available to the recipient of the message (this can be useful for troubleshooting delivery issues).
If a message is sent successfully but the acknowledgement is lost and the message is resent with the same MessageDeduplicationId after the deduplication interval, Amazon SQS can't detect duplicate messages.
The length of MessageDeduplicationId is 128 characters. MessageDeduplicationId can contain alphanumeric characters (a-z , A-Z , 0-9 ) and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~ ).
For best practices of using MessageDeduplicationId , see Using the MessageDeduplicationId Property in the Amazon Simple Queue Service Developer Guide .
MessageGroupId (string) --This parameter applies only to FIFO (first-in-first-out) queues.
The tag that specifies that a message belongs to a specific message group. Messages that belong to the same message group are processed in a FIFO manner (however, messages in different message groups might be processed out of order). To interleave multiple ordered streams within a single queue, use MessageGroupId values (for example, session data for multiple users). In this scenario, multiple readers can process the queue, but the session data of each user is processed in a FIFO fashion.
You must associate a non-empty MessageGroupId with a message. If you don't provide a MessageGroupId , the action fails.
ReceiveMessage might return messages with multiple MessageGroupId values. For each MessageGroupId , the messages are sorted by time sent. The caller can't specify a MessageGroupId .
The length of MessageGroupId is 128 characters. Valid values are alphanumeric characters and punctuation (!'#$%'()*+,-./:;=?@[\]^_`{|}~) .
For best practices of using MessageGroupId , see Using the MessageGroupId Property in the Amazon Simple Queue Service Developer Guide .
Warning
MessageGroupId is required for FIFO queues. You can't use it for Standard queues.
:rtype: dict
:return: {
'Successful': [
{
'Id': 'string',
'MessageId': 'string',
'MD5OfMessageBody': 'string',
'MD5OfMessageAttributes': 'string',
'SequenceNumber': 'string'
},
],
'Failed': [
{
'Id': 'string',
'SenderFault': True|False,
'Code': 'string',
'Message': 'string'
},
]
}
"""
pass
def set_queue_attributes(QueueUrl=None, Attributes=None):
"""
Sets the value of one or more queue attributes. When you change a queue's attributes, the change can take up to 60 seconds for most of the attributes to propagate throughout the Amazon SQS system. Changes made to the MessageRetentionPeriod attribute can take up to 15 minutes.
See also: AWS API Documentation
:example: response = client.set_queue_attributes(
QueueUrl='string',
Attributes={
'string': 'string'
}
)
:type QueueUrl: string
:param QueueUrl: [REQUIRED]
The URL of the Amazon SQS queue whose attributes are set.
Queue URLs are case-sensitive.
:type Attributes: dict
:param Attributes: [REQUIRED]
A map of attributes to set.
The following lists the names, descriptions, and values of the special request parameters that the SetQueueAttributes action uses:
DelaySeconds - The length of time, in seconds, for which the delivery of all messages in the queue is delayed. Valid values: An integer from 0 to 900 (15 minutes). The default is 0 (zero).
MaximumMessageSize - The limit of how many bytes a message can contain before Amazon SQS rejects it. Valid values: An integer from 1,024 bytes (1 KiB) up to 262,144 bytes (256 KiB). The default is 262,144 (256 KiB).
MessageRetentionPeriod - The length of time, in seconds, for which Amazon SQS retains a message. Valid values: An integer representing seconds, from 60 (1 minute) to 1,209,600 (14 days). The default is 345,600 (4 days).
Policy - The queue's policy. A valid AWS policy. For more information about policy structure, see Overview of AWS IAM Policies in the Amazon IAM User Guide .
ReceiveMessageWaitTimeSeconds - The length of time, in seconds, for which a `` ReceiveMessage `` action waits for a message to arrive. Valid values: an integer from 0 to 20 (seconds). The default is 0.
RedrivePolicy - The parameters for the dead letter queue functionality of the source queue. For more information about the redrive policy and dead letter queues, see Using Amazon SQS Dead Letter Queues in the Amazon SQS Developer Guide .
Note
The dead letter queue of a FIFO queue must also be a FIFO queue. Similarly, the dead letter queue of a standard queue must also be a standard queue.
VisibilityTimeout - The visibility timeout for the queue. Valid values: an integer from 0 to 43,200 (12 hours). The default is 30. For more information about the visibility timeout, see Visibility Timeout in the Amazon SQS Developer Guide .
The following attributes apply only to server-side-encryption :
KmsMasterKeyId - The ID of an AWS-managed customer master key (CMK) for Amazon SQS or a custom CMK. For more information, see Key Terms . While the alias of the AWS-managed CMK for Amazon SQS is always alias/aws/sqs , the alias of a custom CMK can, for example, be alias/aws/sqs . For more examples, see KeyId in the AWS Key Management Service API Reference .
KmsDataKeyReusePeriodSeconds - The length of time, in seconds, for which Amazon SQS can reuse a data key to encrypt or decrypt messages before calling AWS KMS again. An integer representing seconds, between 60 seconds (1 minute) and 86,400 seconds (24 hours). The default is 300 (5 minutes). A shorter time period provides better security but results in more calls to KMS which incur charges after Free Tier. For more information, see How Does the Data Key Reuse Period Work? .
The following attribute applies only to FIFO (first-in-first-out) queues :
ContentBasedDeduplication - Enables content-based deduplication. For more information, see Exactly-Once Processing in the Amazon SQS Developer Guide .
Every message must have a unique MessageDeduplicationId ,
You may provide a MessageDeduplicationId explicitly.
If you aren't able to provide a MessageDeduplicationId and you enable ContentBasedDeduplication for your queue, Amazon SQS uses a SHA-256 hash to generate the MessageDeduplicationId using the body of the message (but not the attributes of the message).
If you don't provide a MessageDeduplicationId and the queue doesn't have ContentBasedDeduplication set, the action fails with an error.
If the queue has ContentBasedDeduplication set, your MessageDeduplicationId overrides the generated one.
When ContentBasedDeduplication is in effect, messages with identical content sent within the deduplication interval are treated as duplicates and only one copy of the message is delivered.
If you send one message with ContentBasedDeduplication enabled and then another message with a MessageDeduplicationId that is the same as the one generated for the first MessageDeduplicationId , the two messages are treated as duplicates and only one copy of the message is delivered.
Any other valid special request parameters (such as the following) are ignored:
ApproximateNumberOfMessages
ApproximateNumberOfMessagesDelayed
ApproximateNumberOfMessagesNotVisible
CreatedTimestamp
LastModifiedTimestamp
QueueArn
(string) --
(string) --
"""
pass
| 59.996406
| 577
| 0.694561
| 8,395
| 66,776
| 5.513639
| 0.098035
| 0.016333
| 0.010889
| 0.008469
| 0.729643
| 0.704431
| 0.689286
| 0.676345
| 0.666602
| 0.653013
| 0
| 0.008267
| 0.255511
| 66,776
| 1,112
| 578
| 60.05036
| 0.922798
| 0.915074
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
1bb4d37b6f5910fd85a5d687f9ba95669ab512ff
| 223
|
py
|
Python
|
src/backend/tests/__init__.py
|
mrzzy/memento
|
a83db7dd769c949d9924f5ef29930d818b105ef4
|
[
"MIT"
] | 1
|
2019-11-18T04:30:32.000Z
|
2019-11-18T04:30:32.000Z
|
src/backend/tests/__init__.py
|
mrzzy/NP-Portfolio-2
|
a83db7dd769c949d9924f5ef29930d818b105ef4
|
[
"MIT"
] | 1
|
2021-03-10T06:04:20.000Z
|
2021-03-10T06:04:20.000Z
|
src/backend/tests/__init__.py
|
mrzzy/NP-Portfolio-2
|
a83db7dd769c949d9924f5ef29930d818b105ef4
|
[
"MIT"
] | null | null | null |
#
# Memento
# Backend
# Tests
#
from .models.identity import *
from .models.assignment import *
from .models.notification import *
from .ops.identity import *
from .ops.assignment import *
from .ops.notification import *
| 15.928571
| 34
| 0.744395
| 27
| 223
| 6.148148
| 0.37037
| 0.301205
| 0.23494
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.152466
| 223
| 13
| 35
| 17.153846
| 0.878307
| 0.09417
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
941da2ff708ac753a5e6740d5eb4d1133bc1a2b2
| 120
|
py
|
Python
|
python/coursera_python/WESLEYAN/week1/1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 16
|
2018-11-26T08:39:42.000Z
|
2019-05-08T10:09:52.000Z
|
python/coursera_python/WESLEYAN/week1/1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 8
|
2020-05-04T06:29:26.000Z
|
2022-02-12T05:33:16.000Z
|
python/coursera_python/WESLEYAN/week1/1.py
|
SayanGhoshBDA/code-backup
|
8b6135facc0e598e9686b2e8eb2d69dd68198b80
|
[
"MIT"
] | 5
|
2020-02-11T16:02:21.000Z
|
2021-02-05T07:48:30.000Z
|
def problem1_1():
print("Problem Set 1")
pass # replace this pass (a do-nothing) statement with your code
| 20
| 68
| 0.658333
| 18
| 120
| 4.333333
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.25
| 120
| 5
| 69
| 24
| 0.833333
| 0.475
| 0
| 0
| 0
| 0
| 0.213115
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.333333
| true
| 0.333333
| 0
| 0
| 0.333333
| 0.333333
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
846a34f97a5d99256a90a1f11c153e6cfb2340e9
| 52
|
py
|
Python
|
utils.py
|
whoamins/Ctftime-TelegramBot
|
e2d309ecdcc4c2e3a484a735771e2edf9cdb2554
|
[
"MIT"
] | 2
|
2021-12-17T21:37:45.000Z
|
2022-02-05T18:30:55.000Z
|
utils.py
|
whoamins/Ctftime-parser
|
2760355b5ea41202c50cf4218ff06d79c8ca1d59
|
[
"MIT"
] | null | null | null |
utils.py
|
whoamins/Ctftime-parser
|
2760355b5ea41202c50cf4218ff06d79c8ca1d59
|
[
"MIT"
] | 2
|
2021-12-18T14:30:28.000Z
|
2021-12-18T20:12:02.000Z
|
def extract_arg(arg):
return arg.split()[1:][0]
| 17.333333
| 29
| 0.634615
| 9
| 52
| 3.555556
| 0.777778
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 0.153846
| 52
| 2
| 30
| 26
| 0.681818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 5
|
84841b824ca105976b7c170ca16678142365540b
| 218
|
py
|
Python
|
preprocessing/utils.py
|
nazariinyzhnyk/nlp-beatles-lyrics-modeling
|
d341b6c2a1fe60a3ca6cae03052775b443f8cedb
|
[
"MIT"
] | null | null | null |
preprocessing/utils.py
|
nazariinyzhnyk/nlp-beatles-lyrics-modeling
|
d341b6c2a1fe60a3ca6cae03052775b443f8cedb
|
[
"MIT"
] | null | null | null |
preprocessing/utils.py
|
nazariinyzhnyk/nlp-beatles-lyrics-modeling
|
d341b6c2a1fe60a3ca6cae03052775b443f8cedb
|
[
"MIT"
] | null | null | null |
import random
import numpy as np
def set_seed(random_state: int = 42) -> None:
"""Function fixes random state to ensure results are reproducible"""
np.random.seed(random_state)
random.seed(random_state)
| 21.8
| 72
| 0.729358
| 32
| 218
| 4.84375
| 0.59375
| 0.283871
| 0.290323
| 0.270968
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011173
| 0.178899
| 218
| 9
| 73
| 24.222222
| 0.854749
| 0.284404
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.4
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
84da2222e9165d8c0a637fe44385107549d65db3
| 395
|
py
|
Python
|
tests/bvlapi/guid/test_club.py
|
alanverresen/bvl-api
|
8db768c4393f054bdf314cbcbd47eb408647f877
|
[
"MIT"
] | 1
|
2021-09-27T14:54:39.000Z
|
2021-09-27T14:54:39.000Z
|
tests/bvlapi/guid/test_club.py
|
alanverresen/bvl-api
|
8db768c4393f054bdf314cbcbd47eb408647f877
|
[
"MIT"
] | null | null | null |
tests/bvlapi/guid/test_club.py
|
alanverresen/bvl-api
|
8db768c4393f054bdf314cbcbd47eb408647f877
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# Contains tests for validating club GUIDs.
from bvlapi.guid.club import is_club_guid
def test_is_club_guid():
""" Test that a valid club GUID is recognized.
"""
assert is_club_guid("BVBL1328")
def test_is_club_guid__false():
""" Test that an invalid club GUID is recognized.
"""
assert not is_club_guid("not a guid")
| 20.789474
| 53
| 0.683544
| 61
| 395
| 4.196721
| 0.491803
| 0.21875
| 0.195313
| 0.101563
| 0.335938
| 0
| 0
| 0
| 0
| 0
| 0
| 0.018987
| 0.2
| 395
| 18
| 54
| 21.944444
| 0.791139
| 0.465823
| 0
| 0
| 0
| 0
| 0.091837
| 0
| 0
| 0
| 0
| 0
| 0.4
| 1
| 0.4
| true
| 0
| 0.2
| 0
| 0.6
| 0
| 0
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 1
| 0
|
0
| 5
|
ca3af843c611433f6ffbca54bba8965c06ce88b0
| 362
|
py
|
Python
|
app/oauth_office365/tests.py
|
ricardojba/PwnAuth
|
875c4689bb0e35abdfa5ddda7fe89d15d1e52c1f
|
[
"Apache-2.0"
] | 304
|
2018-05-21T17:28:34.000Z
|
2021-09-11T21:36:05.000Z
|
app/oauth_office365/tests.py
|
ricardojba/PwnAuth
|
875c4689bb0e35abdfa5ddda7fe89d15d1e52c1f
|
[
"Apache-2.0"
] | 9
|
2018-05-22T17:08:51.000Z
|
2021-06-10T20:17:36.000Z
|
app/oauth_office365/tests.py
|
lunarobliq/PwnAuth
|
2074e0236fa83c4d57ac9a1eed64921b7b99fca2
|
[
"Apache-2.0"
] | 76
|
2018-05-21T18:19:20.000Z
|
2021-06-04T05:13:42.000Z
|
from django.test import TestCase
# Create your tests here.
# TODO add test to validate application scopes are enforced when creating
# TODO add test to validate that application redirect and refresh URL match the site's base url
# TODO add unicode handling tests
# TODO test large attachments
# TODO basic tests for getting and deleting messages, attachments
| 32.909091
| 95
| 0.801105
| 55
| 362
| 5.272727
| 0.690909
| 0.072414
| 0.075862
| 0.089655
| 0.144828
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.171271
| 362
| 10
| 96
| 36.2
| 0.966667
| 0.864641
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.1
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
ca4dac22e817435c011945da10bee5efd497b22e
| 55
|
py
|
Python
|
plugins/speechhandler/gmail/__init__.py
|
narfman0/jeeves
|
a88035846d6ec95c9ff8559d12b3b5ef4bb1c2d6
|
[
"MIT"
] | 4
|
2019-01-18T00:32:33.000Z
|
2022-03-06T06:00:44.000Z
|
plugins/speechhandler/gmail/__init__.py
|
narfman0/jeeves
|
a88035846d6ec95c9ff8559d12b3b5ef4bb1c2d6
|
[
"MIT"
] | 2
|
2018-06-17T04:38:54.000Z
|
2018-07-25T15:00:54.000Z
|
plugins/speechhandler/gmail/__init__.py
|
narfman0/jeeves
|
a88035846d6ec95c9ff8559d12b3b5ef4bb1c2d6
|
[
"MIT"
] | 5
|
2017-12-29T12:34:08.000Z
|
2019-04-26T19:21:32.000Z
|
# -*- coding: utf-8 -*-
from .gmail import GmailPlugin
| 18.333333
| 30
| 0.654545
| 7
| 55
| 5.142857
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.163636
| 55
| 2
| 31
| 27.5
| 0.76087
| 0.381818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
04737b29349fe094e4753116c73ba39614a30afc
| 477
|
py
|
Python
|
python/anyascii/_data/_30c.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_30c.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
python/anyascii/_data/_30c.py
|
casept/anyascii
|
d4f426b91751254b68eaa84c6cd23099edd668e6
|
[
"ISC"
] | null | null | null |
b=' Tan Shuang Zhou Dai Lou Xu Tang Qiu Lun Kui Dui Shi Qi Sha Za Fu Liu She Ke Gong Jian Yong Meng Ung Na Fen Xu Jian Zhong Xiang Fen Fu Lu Lu Bei Ci Zhi Lai Tui Teng Lei Ceoi Wu Zhan Wu Che Qian Zong Lun Gui Zhan Long Ze Xing E Yi Guo Da Ji Jian Chen Fei Ying Wei Long Yi Dui Yan Nong Daat Shi'
| 477
| 477
| 0.465409
| 72
| 477
| 3.083333
| 0.819444
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.528302
| 477
| 1
| 477
| 477
| 0.986667
| 0
| 0
| 0
| 0
| 1
| 0.98954
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
0474b3b615ffa27e5b2d59490038a87b1d507a7f
| 2,650
|
py
|
Python
|
Collections-a-installer/community-general-2.4.0/tests/unit/plugins/become/test_doas.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 22
|
2021-07-16T08:11:22.000Z
|
2022-03-31T07:15:34.000Z
|
Collections-a-installer/community-general-2.4.0/tests/unit/plugins/become/test_doas.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 12
|
2020-02-21T07:24:52.000Z
|
2020-04-14T09:54:32.000Z
|
Collections-a-installer/community-general-2.4.0/tests/unit/plugins/become/test_doas.py
|
d-amien-b/simple-getwordpress
|
da90d515a0aa837b633d50db4d91d22b031c04a2
|
[
"MIT"
] | 39
|
2021-07-05T02:31:42.000Z
|
2022-03-31T02:46:03.000Z
|
# (c) 2012-2014, Michael DeHaan <michael.dehaan@gmail.com>
# (c) 2020 Ansible Project
#
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
# Make coding more python3-ish
from __future__ import (absolute_import, division, print_function)
__metaclass__ = type
import re
from ansible import context
from .helper import call_become_plugin
def test_doas_basic(mocker, parser, reset_cli_args):
options = parser.parse_args([])
context._init_global_context(options)
default_cmd = "/bin/foo"
default_exe = "/bin/bash"
doas_exe = 'doas'
doas_flags = '-n'
success = 'BECOME-SUCCESS-.+?'
task = {
'become_method': 'community.general.doas',
}
var_options = {}
cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
print(cmd)
assert (re.match("""%s %s %s -c 'echo %s; %s'""" % (doas_exe, doas_flags, default_exe, success,
default_cmd), cmd) is not None)
def test_doas(mocker, parser, reset_cli_args):
options = parser.parse_args([])
context._init_global_context(options)
default_cmd = "/bin/foo"
default_exe = "/bin/bash"
doas_exe = 'doas'
doas_flags = '-n'
success = 'BECOME-SUCCESS-.+?'
task = {
'become_user': 'foo',
'become_method': 'community.general.doas',
'become_flags': doas_flags,
}
var_options = {}
cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
print(cmd)
assert (re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (doas_exe, doas_flags, task['become_user'], default_exe, success,
default_cmd), cmd) is not None)
def test_doas_varoptions(mocker, parser, reset_cli_args):
options = parser.parse_args([])
context._init_global_context(options)
default_cmd = "/bin/foo"
default_exe = "/bin/bash"
doas_exe = 'doas'
doas_flags = '-n'
success = 'BECOME-SUCCESS-.+?'
task = {
'become_user': 'foo',
'become_method': 'community.general.doas',
'become_flags': 'xxx',
}
var_options = {
'ansible_become_user': 'bar',
'ansible_become_flags': doas_flags,
}
cmd = call_become_plugin(task, var_options, cmd=default_cmd, executable=default_exe)
print(cmd)
assert (re.match("""%s %s -u %s %s -c 'echo %s; %s'""" % (doas_exe, doas_flags, var_options['ansible_become_user'], default_exe, success,
default_cmd), cmd) is not None)
| 31.176471
| 141
| 0.612453
| 334
| 2,650
| 4.577844
| 0.254491
| 0.058862
| 0.043165
| 0.039241
| 0.768476
| 0.722695
| 0.722695
| 0.722695
| 0.722695
| 0.722695
| 0
| 0.00859
| 0.253208
| 2,650
| 84
| 142
| 31.547619
| 0.764022
| 0.075849
| 0
| 0.666667
| 0
| 0
| 0.181744
| 0.027016
| 0
| 0
| 0
| 0
| 0.05
| 1
| 0.05
| false
| 0
| 0.066667
| 0
| 0.116667
| 0.066667
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
04a96fdf4942d571969168bae0c819b41a089652
| 98
|
py
|
Python
|
alyBlog/apps/news/templatetags/__init__.py
|
Hx-someone/aly-blog
|
e0205777d2ff1642fde5741a5b5c1b06ad675001
|
[
"WTFPL"
] | 1
|
2020-04-17T02:15:45.000Z
|
2020-04-17T02:15:45.000Z
|
alyBlog/apps/news/templatetags/__init__.py
|
Hx-someone/aly-blog
|
e0205777d2ff1642fde5741a5b5c1b06ad675001
|
[
"WTFPL"
] | null | null | null |
alyBlog/apps/news/templatetags/__init__.py
|
Hx-someone/aly-blog
|
e0205777d2ff1642fde5741a5b5c1b06ad675001
|
[
"WTFPL"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
@Time : 2020/3/2 14:38
@Author : 半纸梁
@File : __init__.py.py
"""
| 16.333333
| 25
| 0.5
| 15
| 98
| 3
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.146667
| 0.234694
| 98
| 6
| 26
| 16.333333
| 0.453333
| 0.908163
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
04af3be462be49c77cf685b5d006bc72dfdb498a
| 800
|
py
|
Python
|
week6/w6e1.py
|
melphick/pybasics
|
68508d10b7509943b629b3c627252de60b6a5744
|
[
"Apache-2.0"
] | null | null | null |
week6/w6e1.py
|
melphick/pybasics
|
68508d10b7509943b629b3c627252de60b6a5744
|
[
"Apache-2.0"
] | null | null | null |
week6/w6e1.py
|
melphick/pybasics
|
68508d10b7509943b629b3c627252de60b6a5744
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
"""
A function that returns the multiplication product of three parameters--x, y,
and z has a default value of 1.
a. Call the function with all positional arguments.
b. Call the function with all named arguments.
c. Call the function with a mix of positional and named arguments.
d. Call the function with only two arguments and use the default value for z.
"""
def multiply(x,y,z=1):
return x*y*z
print "Call the function with all positional arguments."
print multiply(10,5,2)
print "Call the function with all named arguments."
print multiply(x=10,y=5,z=2)
print "Call the function with a mix of positional and named arguments."
print multiply(5,y=2,z=10)
print "Call the function with only two arguments and use the default value for z."
print multiply(x=10,y=5)
| 26.666667
| 82
| 0.7475
| 145
| 800
| 4.124138
| 0.289655
| 0.093645
| 0.200669
| 0.254181
| 0.729097
| 0.720736
| 0.632107
| 0.374582
| 0.374582
| 0.374582
| 0
| 0.025602
| 0.17
| 800
| 29
| 83
| 27.586207
| 0.875
| 0.02
| 0
| 0
| 0
| 0
| 0.542857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0.8
| 0
| 0
| 0
| null | 0
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
04b40feb5f75992b5a145a00b4bdb91155e08e55
| 55
|
py
|
Python
|
currency/__init__.py
|
p4l1ly/currency
|
787dfa0bad0ba8c8663631bc18ee57d0b74552c4
|
[
"MIT"
] | null | null | null |
currency/__init__.py
|
p4l1ly/currency
|
787dfa0bad0ba8c8663631bc18ee57d0b74552c4
|
[
"MIT"
] | null | null | null |
currency/__init__.py
|
p4l1ly/currency
|
787dfa0bad0ba8c8663631bc18ee57d0b74552c4
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from .fetcher import from_all
| 13.75
| 29
| 0.636364
| 8
| 55
| 4.25
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.022222
| 0.181818
| 55
| 3
| 30
| 18.333333
| 0.733333
| 0.381818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
04d28a5a1f76c44608d5fef3ad3b9f6620990a82
| 669
|
py
|
Python
|
renovation_core/install/after_migrate.py
|
Abadulrehman/renovation_core
|
2cb015ec1832ceb6076e20914f504a1049d7a736
|
[
"MIT"
] | 18
|
2020-04-12T20:40:41.000Z
|
2022-03-09T13:50:59.000Z
|
renovation_core/install/after_migrate.py
|
Abadulrehman/renovation_core
|
2cb015ec1832ceb6076e20914f504a1049d7a736
|
[
"MIT"
] | 28
|
2020-04-21T13:24:28.000Z
|
2021-11-03T12:23:01.000Z
|
renovation_core/install/after_migrate.py
|
Abadulrehman/renovation_core
|
2cb015ec1832ceb6076e20914f504a1049d7a736
|
[
"MIT"
] | 16
|
2020-04-12T20:31:50.000Z
|
2022-01-30T12:19:45.000Z
|
import frappe
def after_migrate():
set_default_otp_template()
def set_default_otp_template():
if not frappe.db.get_value("System Settings", None, "email_otp_template"):
if frappe.db.exists("Email Template", "Default Email OTP Template"):
# should exists via fixtures
frappe.db.set_value("System Settings", None, "email_otp_template", "Default Email OTP Template")
if not frappe.db.get_value("System Settings", None, "sms_otp_template"):
if frappe.db.exists("SMS Template", "Default SMS OTP Template"):
# should exists via fixtures
frappe.db.set_value("System Settings", None, "sms_otp_template", "Default SMS OTP Template")
| 41.8125
| 102
| 0.727952
| 94
| 669
| 4.978723
| 0.244681
| 0.235043
| 0.111111
| 0.196581
| 0.852564
| 0.700855
| 0.632479
| 0.504274
| 0.504274
| 0.504274
| 0
| 0
| 0.155456
| 669
| 16
| 103
| 41.8125
| 0.828319
| 0.079223
| 0
| 0
| 0
| 0
| 0.413681
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| true
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b6cc7f99fc4e29918f1da13bf49a17e58ad30b28
| 134
|
py
|
Python
|
supermario/supermario 1118/mygame.py
|
Kimmiryeong/2DGP_GameProject
|
ad3fb197aab27227fc92fd404b2c310f8d0827ca
|
[
"MIT"
] | null | null | null |
supermario/supermario 1118/mygame.py
|
Kimmiryeong/2DGP_GameProject
|
ad3fb197aab27227fc92fd404b2c310f8d0827ca
|
[
"MIT"
] | null | null | null |
supermario/supermario 1118/mygame.py
|
Kimmiryeong/2DGP_GameProject
|
ad3fb197aab27227fc92fd404b2c310f8d0827ca
|
[
"MIT"
] | null | null | null |
import game_framework
import pico2d
import start_state
pico2d.open_canvas()
game_framework.run(start_state)
pico2d.close_canvas()
| 13.4
| 31
| 0.835821
| 19
| 134
| 5.578947
| 0.526316
| 0.245283
| 0.301887
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.024793
| 0.097015
| 134
| 10
| 32
| 13.4
| 0.85124
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
8e193b0bde2f443a74d2f503a6ddb21056e794e0
| 20
|
py
|
Python
|
tests/acceptance/test_acceptance/__init__.py
|
WolffunGame/experiment-agent
|
5fbf3fb4ef2defc4c81ecafb2335bb9ac31c1aac
|
[
"Apache-2.0"
] | 23
|
2020-01-14T11:28:10.000Z
|
2022-02-06T09:05:34.000Z
|
tests/acceptance/test_acceptance/__init__.py
|
WolffunGame/experiment-agent
|
5fbf3fb4ef2defc4c81ecafb2335bb9ac31c1aac
|
[
"Apache-2.0"
] | 146
|
2020-01-13T19:33:09.000Z
|
2022-03-30T22:24:38.000Z
|
tests/acceptance/test_acceptance/__init__.py
|
WolffunGame/experiment-agent
|
5fbf3fb4ef2defc4c81ecafb2335bb9ac31c1aac
|
[
"Apache-2.0"
] | 22
|
2020-02-17T22:48:55.000Z
|
2022-02-22T09:54:57.000Z
|
# __init__ is empty
| 10
| 19
| 0.75
| 3
| 20
| 3.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.2
| 20
| 1
| 20
| 20
| 0.6875
| 0.85
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
8e4e4c47e3e55c6c03e22f0df229a7bc75c3388a
| 7,995
|
py
|
Python
|
backend/tests/test_verify.py
|
uwplse/stng
|
ce12c2c079516df873382a5aa3c18c407833d130
|
[
"MIT"
] | 14
|
2017-03-07T00:14:33.000Z
|
2022-02-09T00:59:22.000Z
|
backend/tests/test_verify.py
|
uwplse/stng
|
ce12c2c079516df873382a5aa3c18c407833d130
|
[
"MIT"
] | 11
|
2016-11-22T13:14:55.000Z
|
2021-12-14T00:56:51.000Z
|
backend/tests/test_verify.py
|
uwplse/stng
|
ce12c2c079516df873382a5aa3c18c407833d130
|
[
"MIT"
] | 6
|
2016-11-07T13:38:45.000Z
|
2021-04-04T12:13:31.000Z
|
import unittest
from grammar import *
from verify import *
import assertion_grammar as ag
class TestVerification(unittest.TestCase):
def test_while(self):
# Verifier(tst_ast)
pass
class TestWPC(unittest.TestCase):
def test_trivial(self):
tst_ast = NumNode(4)
self.assertTrue(WeakestPrecondition(tst_ast, True, []).get())
tst_ast = VarNode("i")
self.assertTrue(WeakestPrecondition(tst_ast, True, []).get())
def test_assignment(self):
tst_ast = AssignExp(VarNode("i"), NumNode(3))
q = ag.BinExp(ag.VarNode("i"), '<', ag.NumNode(10))
self.assertEqual("(3 < 10)", ag.tree_to_str(WeakestPrecondition(tst_ast, q, []).get()))
def test_assignment_sequence(self):
tst_ast = Block([AssignExp(VarNode("i"), NumNode(3)),
AssignExp(VarNode("b"), VarNode("i"))])
q = ag.BinExp(ag.VarNode("b"), '<', ag.NumNode(10))
self.assertEqual("(3 < 10)", ag.tree_to_str(WeakestPrecondition(tst_ast, q, []).get()))
def test_array_assignment(self):
tst_ast = AssignExp(ArrExp(VarNode("i"), NumNode(0)), NumNode(3))
q = ag.BinExp(ag.ArrExp(ag.VarNode("i"), ag.NumNode(0)), '<', ag.NumNode(10))
self.assertEqual("(i{0:=3}[0] < 10)", ag.tree_to_str(WeakestPrecondition(tst_ast, q, []).get()))
def test_complicated_array_assignment(self):
tst_ast = AssignExp(ArrExp(VarNode("x"), NumNode(0)), ArrExp(VarNode("x"), NumNode(3)))
q = ag.BinExp(ag.ArrExp(ag.VarNode("x"), ag.NumNode(0)), '<', ag.NumNode(10))
self.assertEqual("(x{0:=x[3]}[0] < 10)", ag.tree_to_str(WeakestPrecondition(tst_ast, q, []).get()))
tst_ast = AssignExp(ArrExp(VarNode("x"), VarNode("i")), ArrExp(VarNode("x"), NumNode(3)))
q = ag.BinExp(ag.ArrExp(ag.VarNode("x"), ag.NumNode(0)), '<', ag.NumNode(10))
self.assertEqual("(x{i:=x[3]}[0] < 10)", ag.tree_to_str(WeakestPrecondition(tst_ast, q, []).get()))
tst_ast = AssignExp(ArrExp(VarNode("x"), VarNode("i")), ArrExp(VarNode("y"), VarNode("i")))
q = ag.BinExp(ag.ArrExp(ag.VarNode("x"), ag.NumNode(0)), '<', ag.NumNode(10))
self.assertEqual("(x{i:=y[i]}[0] < 10)", ag.tree_to_str(WeakestPrecondition(tst_ast, q, []).get()))
def test_multiple_array_assignment(self):
tst_ast = Block([AssignExp(ArrExp(VarNode("x"), NumNode(0)), NumNode(3)),
AssignExp(ArrExp(VarNode("x"), VarNode("i")), NumNode(4))])
q = ag.BinExp(ag.ArrExp(ag.VarNode("x"), ag.NumNode(0)), '<', ag.NumNode(10))
self.assertEqual("(x{0:=3}{i:=4}[0] < 10)", ag.tree_to_str(WeakestPrecondition(tst_ast, q, []).get()))
def test_sequence_array_assignment(self):
tst_ast = Block([AssignExp(ArrExp(VarNode("x"), NumNode(0)), NumNode(3)),
AssignExp(ArrExp(VarNode("y"), VarNode("i")), ArrExp(VarNode("x"), NumNode(0)))])
q = ag.BinExp(ag.ArrExp(ag.VarNode("y"), ag.NumNode(0)), '<', ag.NumNode(10))
self.assertEqual("(y{i:=x{0:=3}[0]}[0] < 10)", ag.tree_to_str(WeakestPrecondition(tst_ast, q, []).get()))
def test_conditional(self):
tst_ast = IfExp(BinExp(VarNode("i"), '<', NumNode(1)),
Block([AssignExp(VarNode("i"), NumNode(3))]),
Block([NumNode(1)]))
q = ag.BinExp(ag.VarNode("i"), '<', ag.NumNode(10))
wp = WeakestPrecondition(tst_ast, q, [])
allcond = ag.tree_to_str(wp.get_with_additional_conditions())
print "COND", allcond
self.assertEqual("((!((i < 1))) -> ((i < 10)) && ((i < 1)) -> ((3 < 10)))", allcond)
def test_block(self):
tst_ast = Block(body=[AssignExp(VarNode("i"), NumNode(3)),
VarNode("i")])
q = ag.BinExp(ag.VarNode("i"), '<', ag.NumNode(10))
self.assertEqual("(3 < 10)", ag.tree_to_str(WeakestPrecondition(tst_ast, q, []).get()))
#def test_while(self):
#tst_ast = Block([AssignExp(VarNode("i"), NumNode(1)),
#WhileLoop(VarNode("i"), BinExp(VarNode("i"), '<', NumNode(3)),
#Block([AssignExp(VarNode("i"), BinExp(VarNode("i"), '+', NumNode(1)))]))])
#q = ag.BinExp(ag.VarNode("i"), '==', ag.NumNode(3))
## print tree_to_str(tst_ast)
#precon = WeakestPrecondition(tst_ast, q)
#answer = precon.get_with_additional_conditions()
## print ag.tree_to_str(answer)
#self.assertEqual("(I(i) && i < 3) -> (I(i + 1)) && (I(i) && !(i < 3)) -> (i == 3) && I(1)",
#ag.tree_to_str(answer))
class TestComplicatedLoop(unittest.TestCase):
def test_loop(self):
print "in test_loop."
tst_ast = Block([AssignExp(VarNode("i"), NumNode(1)),
WhileLoop(VarNode("i"), BinExp(VarNode("i"), '<', NumNode(3)),
Block([AssignExp(ArrExp(VarNode("a"), VarNode("i")),
ArrExp(VarNode("a"), BinExp(VarNode("i"), '+', NumNode(1)))),
AssignExp(VarNode("i"), BinExp(VarNode("i"), '+', NumNode(1)))]))])
#print tree_to_str(tst_ast)
q = ag.CallExp(ag.VarNode("postcondition"), [ag.VarNode("a")])
precon = WeakestPrecondition(tst_ast, q, [])
print "HMMM:", precon.additional_conditions
print "====="
print ag.tree_to_str(precon.get_with_additional_conditions())
print "====="
def test_2d_loop(self):
print "in test_2d_loop."
tst_ast2 = Block([AssignExp(VarNode("i"), NumNode(1)),
WhileLoop(VarNode("i"), BinExp(VarNode("i"), '<', NumNode(3)),
Block([AssignExp(VarNode("j"), NumNode(1)),
WhileLoop(VarNode("j"), BinExp(VarNode("j"), '<', NumNode(4)),
Block([AssignExp(ArrExp(VarNode("a"), BinExp(VarNode("i"), '+',
BinExp(VarNode("j"), '*', VarNode("N")))),
ArrExp(VarNode("b"), BinExp(BinExp(VarNode("i"), '+', NumNode(1)),
"+", BinExp(VarNode("j"), '*', VarNode("N"))))),
AssignExp(VarNode("j"), BinExp(VarNode("j"), '+', NumNode(1)))])),
AssignExp(VarNode("i"), BinExp(VarNode("i"), '+', NumNode(1)))]))])
print tree_to_str(tst_ast2)
q = ag.CallExp(ag.VarNode("postcondition"), [ag.VarNode("a")])
precon = WeakestPrecondition(tst_ast2, q, [])
print "HMMM:", precon.additional_conditions
print "----"
print ag.tree_to_str(precon.get_with_additional_conditions())
print "----"
def test_conditional_in_loop(self):
tst_ast = Block([AssignExp(VarNode("i"), NumNode(1)),
WhileLoop(VarNode("i"), BinExp(VarNode("i"), '<', NumNode(3)),
Block([IfExp(BinExp(VarNode("i"), '<', NumNode(2)),
Block([AssignExp(ArrExp(VarNode("a"), VarNode("i")),
ArrExp(VarNode("b"), BinExp(VarNode("i"), '+', NumNode(1))))]),
Block([AssignExp(ArrExp(VarNode("a"), VarNode("i")),
ArrExp(VarNode("b"), BinExp(VarNode("i"), '-', NumNode(1))))])),
AssignExp(VarNode("i"), BinExp(VarNode("i"), '+', NumNode(1)))]))])
q = ag.BinExp(ag.VarNode("i"), '<', ag.NumNode(10))
wp = WeakestPrecondition(tst_ast, q, [])
allcond = ag.tree_to_str(wp.get_with_additional_conditions())
print tree_to_str(tst_ast)
print "BIGCOND", allcond
if __name__ == '__main__':
unittest.main()
| 51.25
| 129
| 0.524578
| 921
| 7,995
| 4.413681
| 0.085776
| 0.096433
| 0.088561
| 0.04059
| 0.845756
| 0.772202
| 0.714145
| 0.667897
| 0.617466
| 0.600492
| 0
| 0.021053
| 0.275172
| 7,995
| 155
| 130
| 51.580645
| 0.680414
| 0.072045
| 0
| 0.366972
| 0
| 0.009174
| 0.056262
| 0
| 0
| 0
| 0
| 0
| 0.119266
| 0
| null | null | 0.009174
| 0.036697
| null | null | 0.12844
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
f3cd242daceaf22c1da4707d844a7564b1c6d5c1
| 66
|
py
|
Python
|
test.py
|
insertinterestingnamehere/cython_overload_except
|
00d76ad8020fcb21948545de8161da65f7f4acd8
|
[
"BSD-2-Clause"
] | null | null | null |
test.py
|
insertinterestingnamehere/cython_overload_except
|
00d76ad8020fcb21948545de8161da65f7f4acd8
|
[
"BSD-2-Clause"
] | null | null | null |
test.py
|
insertinterestingnamehere/cython_overload_except
|
00d76ad8020fcb21948545de8161da65f7f4acd8
|
[
"BSD-2-Clause"
] | null | null | null |
import pyadd as pa
print "running test"
pa.test()
print "passed!"
| 13.2
| 20
| 0.727273
| 11
| 66
| 4.363636
| 0.727273
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.151515
| 66
| 4
| 21
| 16.5
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0.287879
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.25
| 0.25
| null | null | 0.5
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 1
|
0
| 5
|
ed94af084e037eae274541880d7fd4eaccace6d8
| 21
|
py
|
Python
|
detection/models/__init__.py
|
DoomsdayT/Raspberry-Pi-Fall-Detection
|
ca1216e1e23ccf3484ea8365fac0b10e5bc8eb2a
|
[
"MIT"
] | 1
|
2020-09-08T08:08:56.000Z
|
2020-09-08T08:08:56.000Z
|
detection/models/__init__.py
|
DoomsdayT/Raspberry-Pi-Fall-Detection
|
ca1216e1e23ccf3484ea8365fac0b10e5bc8eb2a
|
[
"MIT"
] | null | null | null |
detection/models/__init__.py
|
DoomsdayT/Raspberry-Pi-Fall-Detection
|
ca1216e1e23ccf3484ea8365fac0b10e5bc8eb2a
|
[
"MIT"
] | 1
|
2022-01-24T06:03:54.000Z
|
2022-01-24T06:03:54.000Z
|
from . import expert
| 10.5
| 20
| 0.761905
| 3
| 21
| 5.333333
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190476
| 21
| 1
| 21
| 21
| 0.941176
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 5
|
edb57933d6da6c82d084bb9cc110fbefc034a51c
| 2,113
|
py
|
Python
|
test_query.py
|
kosugi/alfred.y-transit
|
62d6eacef9dbe1dbfeeda56aa0ebadbd4d261460
|
[
"BSD-2-Clause"
] | 2
|
2015-10-13T22:19:28.000Z
|
2016-08-16T05:33:00.000Z
|
test_query.py
|
kosugi/alfred.y-transit
|
62d6eacef9dbe1dbfeeda56aa0ebadbd4d261460
|
[
"BSD-2-Clause"
] | null | null | null |
test_query.py
|
kosugi/alfred.y-transit
|
62d6eacef9dbe1dbfeeda56aa0ebadbd4d261460
|
[
"BSD-2-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import unittest
import re
from query import *
def squeeze(value):
value = value.replace('\r', '')
value = value.replace('\n', '')
return value
class QueryTestCase(unittest.TestCase):
def test_parse_names(self):
self.assertEqual(None, parse_names(u''))
self.assertEqual(None, parse_names(u' '))
self.assertEqual(None, parse_names(u'\t'))
self.assertEqual(None, parse_names(u'\r'))
self.assertEqual(None, parse_names(u'\n'))
self.assertEqual(None, parse_names(u'a'))
self.assertEqual(None, parse_names(u' a'))
self.assertEqual(None, parse_names(u' a\t'))
self.assertEqual(None, parse_names(u' a\t '))
self.assertEqual((u'a', u'b'), parse_names(u' a b'))
self.assertEqual((u'a', u'b'), parse_names(u' a b '))
self.assertEqual((u'a', u'b'), parse_names(u' a b '))
self.assertEqual((u'a', u'b'), parse_names(u'a-b'))
self.assertEqual((u'a', u'b'), parse_names(u'a - b'))
self.assertEqual((u'a', u'b'), parse_names(u'a〜b'))
self.assertEqual((u'a', u'b'), parse_names(u'a~b'))
self.assertEqual((u'a', u'b'), parse_names(u'a-b'))
self.assertEqual((u'a', u'b'), parse_names(u'a - b'))
self.assertEqual(None, parse_names(u' a b c'))
def test_do(self):
self.maxDiff = None
xml = re.sub(ur'>\s*<', u'><', do(u''))
self.assertEqual(u'<?xml version="1.0"?><items><item uid="result" arg="" valid="no"><title>type “from” and “to” station names</title></item></items>', xml)
xml = re.sub(ur'>\s*<', u'><', do(u' a '))
self.assertEqual(u'<?xml version="1.0"?><items><item uid="result" arg="" valid="no"><title>type “from” and “to” station names</title></item></items>', xml)
xml = re.sub(ur'>\s*<', u'><', do(u' a b '))
self.assertEqual(u'<?xml version="1.0"?><items><item uid="result" arg="http://transit.yahoo.co.jp/search/result?from=a&to=b" valid="yes"><title>Query routes from a to b</title></item></items>', xml)
if __name__ == '__main__':
unittest.main()
| 43.122449
| 210
| 0.58637
| 330
| 2,113
| 3.666667
| 0.190909
| 0.041322
| 0.172727
| 0.138843
| 0.743802
| 0.742975
| 0.717355
| 0.678512
| 0.666116
| 0.666116
| 0
| 0.004086
| 0.189304
| 2,113
| 48
| 211
| 44.020833
| 0.701693
| 0.009938
| 0
| 0.394737
| 0
| 0.078947
| 0.265072
| 0.109569
| 0
| 0
| 0
| 0
| 0.578947
| 0
| null | null | 0
| 0.078947
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
edc1a87e4f10eaf7cffcc4f3d179b740642df13d
| 585
|
py
|
Python
|
accountsplus/tests/test_admin.py
|
GhalebKhaled/django-users-plus
|
467f6cb528672a1eafc336640d2c7d0f06c378c6
|
[
"MIT"
] | 3
|
2016-05-26T13:25:19.000Z
|
2020-12-30T07:40:02.000Z
|
accountsplus/tests/test_admin.py
|
GhalebKhaled/django-users-plus
|
467f6cb528672a1eafc336640d2c7d0f06c378c6
|
[
"MIT"
] | 31
|
2016-05-26T13:20:48.000Z
|
2021-06-10T19:57:19.000Z
|
accountsplus/tests/test_admin.py
|
GhalebKhaled/django-users-plus
|
467f6cb528672a1eafc336640d2c7d0f06c378c6
|
[
"MIT"
] | 1
|
2018-05-24T13:01:40.000Z
|
2018-05-24T13:01:40.000Z
|
from __future__ import unicode_literals
import django.test
import django.contrib.admin
import logging
from .. import admin
from test_models import (UnitTestCompany, UnitTestUser, UnitTestAuditLogEvent, )
logging.disable(logging.CRITICAL)
@django.contrib.admin.register(UnitTestCompany)
class UnitTestCompanyAdmin(admin.BaseCompanyAdmin):
pass
@django.contrib.admin.register(UnitTestUser)
class UnitTestUserAdmin(admin.BaseUserAdmin):
pass
@django.contrib.admin.register(UnitTestAuditLogEvent)
class UnitTestAuditLogEventAdmin(admin.BaseAuditLogEventAdmin):
pass
| 20.892857
| 80
| 0.82735
| 58
| 585
| 8.241379
| 0.431034
| 0.108787
| 0.150628
| 0.16318
| 0.125523
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097436
| 585
| 27
| 81
| 21.666667
| 0.905303
| 0
| 0
| 0.1875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.1875
| 0.375
| 0
| 0.5625
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
edc713e6ea136dc5638a1ccfbdea7741a2100dd6
| 187
|
py
|
Python
|
gwcs/tests/setup_package.py
|
cshanahan1/gwcs
|
de203e5953d648baf359c50d3e6a479bcb97b83b
|
[
"BSD-3-Clause"
] | null | null | null |
gwcs/tests/setup_package.py
|
cshanahan1/gwcs
|
de203e5953d648baf359c50d3e6a479bcb97b83b
|
[
"BSD-3-Clause"
] | null | null | null |
gwcs/tests/setup_package.py
|
cshanahan1/gwcs
|
de203e5953d648baf359c50d3e6a479bcb97b83b
|
[
"BSD-3-Clause"
] | null | null | null |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
def get_package_data():
return {
_ASTROPY_PACKAGE_NAME_ + '.tests': ['coveragerc', 'data/*.hdr'] # noqa
}
| 26.714286
| 78
| 0.652406
| 24
| 187
| 4.833333
| 0.875
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.006803
| 0.213904
| 187
| 6
| 79
| 31.166667
| 0.782313
| 0.352941
| 0
| 0
| 0
| 0
| 0.220339
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| true
| 0
| 0
| 0.25
| 0.5
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 1
| 0
| 0
|
0
| 5
|
edd3adc383a5336977f9cd8a5a463386702c9be3
| 203
|
py
|
Python
|
content/usage/lst/feedback/reset-feedback.py
|
Kumortas/osdoc_eyelogic
|
5688c1b85135aaa0fcfb37c4889b9c3dd82d7821
|
[
"CC-BY-3.0"
] | null | null | null |
content/usage/lst/feedback/reset-feedback.py
|
Kumortas/osdoc_eyelogic
|
5688c1b85135aaa0fcfb37c4889b9c3dd82d7821
|
[
"CC-BY-3.0"
] | null | null | null |
content/usage/lst/feedback/reset-feedback.py
|
Kumortas/osdoc_eyelogic
|
5688c1b85135aaa0fcfb37c4889b9c3dd82d7821
|
[
"CC-BY-3.0"
] | null | null | null |
exp.set('total_responses', 0)
exp.set('total_correct', 0)
exp.set('total_response_time', 0)
exp.set('average_response_time', 'NA')
exp.set('avg_rt', 'NA')
exp.set('accuracy', 'NA')
exp.set('acc', 'NA')
| 22.555556
| 38
| 0.679803
| 35
| 203
| 3.742857
| 0.4
| 0.320611
| 0.251908
| 0.183206
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.015957
| 0.073892
| 203
| 8
| 39
| 25.375
| 0.680851
| 0
| 0
| 0
| 0
| 0
| 0.460396
| 0.10396
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
edd69b1a90da038c91ea71a633b13eca4b101cd7
| 142
|
py
|
Python
|
edera/storages/__init__.py
|
thoughteer/edera
|
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
|
[
"MIT"
] | 3
|
2018-11-27T15:45:19.000Z
|
2018-12-21T20:32:10.000Z
|
edera/storages/__init__.py
|
thoughteer/edera
|
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
|
[
"MIT"
] | 18
|
2018-12-02T18:38:59.000Z
|
2020-02-05T22:09:37.000Z
|
edera/storages/__init__.py
|
thoughteer/edera
|
c4ddb5d8a25906c3bd773c91afb3260fc0b704f2
|
[
"MIT"
] | null | null | null |
from .embedded import EmbeddedStorage
from .inmemory import InMemoryStorage
from .mongo import MongoStorage
from .sqlite import SQLiteStorage
| 28.4
| 37
| 0.859155
| 16
| 142
| 7.625
| 0.625
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.112676
| 142
| 4
| 38
| 35.5
| 0.968254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
edfcddda3ea05037bf1033582ae547265a688780
| 188
|
py
|
Python
|
tests/__init__.py
|
benlachman/marshmallow-mongoengine
|
72787fdb5e9d598821df614c676642842ec44b0d
|
[
"MIT"
] | 77
|
2017-01-11T11:30:43.000Z
|
2022-03-17T19:57:03.000Z
|
tests/__init__.py
|
benlachman/marshmallow-mongoengine
|
72787fdb5e9d598821df614c676642842ec44b0d
|
[
"MIT"
] | 13
|
2017-02-03T06:42:40.000Z
|
2021-07-07T17:09:37.000Z
|
tests/__init__.py
|
benlachman/marshmallow-mongoengine
|
72787fdb5e9d598821df614c676642842ec44b0d
|
[
"MIT"
] | 31
|
2017-02-03T06:18:55.000Z
|
2022-03-26T09:30:36.000Z
|
def exception_test(assert_check_fn):
try:
def exception_wrapper(*arg, **kwarg):
assert_check_fn(*arg, **kwarg)
except Exception as e:
pytest.failed(e)
| 23.5
| 45
| 0.62234
| 24
| 188
| 4.625
| 0.625
| 0.216216
| 0.234234
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.265957
| 188
| 8
| 46
| 23.5
| 0.804348
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| false
| 0
| 0
| 0
| 0.333333
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
b61ff905ef96f1da25a74ccd8401c4192ef5cd5b
| 290
|
py
|
Python
|
api/admin.py
|
EstherWaweru/JuniorDeveloperTest
|
351c3f31a26dde86f650e674b8e7d79bde985e2d
|
[
"MIT"
] | 1
|
2021-01-31T07:11:25.000Z
|
2021-01-31T07:11:25.000Z
|
api/admin.py
|
EstherWaweru/JuniorDeveloperTest
|
351c3f31a26dde86f650e674b8e7d79bde985e2d
|
[
"MIT"
] | null | null | null |
api/admin.py
|
EstherWaweru/JuniorDeveloperTest
|
351c3f31a26dde86f650e674b8e7d79bde985e2d
|
[
"MIT"
] | null | null | null |
from django.contrib import admin
# Register your models here.
from django.contrib.auth.admin import UserAdmin
# Register your models here.
from api.models import CustomUser,Company
class UserModel(UserAdmin):
pass
admin.site.register(CustomUser,UserModel)
admin.site.register(Company)
| 26.363636
| 47
| 0.813793
| 39
| 290
| 6.051282
| 0.461538
| 0.084746
| 0.144068
| 0.186441
| 0.220339
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.110345
| 290
| 10
| 48
| 29
| 0.914729
| 0.182759
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.142857
| 0.428571
| 0
| 0.571429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 5
|
b65163308efaba4b8a2fdb15bae5e4d1caf0c914
| 55
|
py
|
Python
|
01-values-and-types.py
|
leogithubid/python3-programming
|
cec559040f80e559000378489eb0ef7a8c8b61fa
|
[
"MIT"
] | null | null | null |
01-values-and-types.py
|
leogithubid/python3-programming
|
cec559040f80e559000378489eb0ef7a8c8b61fa
|
[
"MIT"
] | null | null | null |
01-values-and-types.py
|
leogithubid/python3-programming
|
cec559040f80e559000378489eb0ef7a8c8b61fa
|
[
"MIT"
] | null | null | null |
print('hello world')
print(type(100))
print(type(3.14))
| 18.333333
| 20
| 0.709091
| 10
| 55
| 3.9
| 0.7
| 0.461538
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.115385
| 0.054545
| 55
| 3
| 21
| 18.333333
| 0.634615
| 0
| 0
| 0
| 0
| 0
| 0.196429
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 5
|
b67d0855a8e02deaf2a8667f3239a5f520bd4458
| 90
|
py
|
Python
|
Monke/__init__.py
|
Duck-sri/Monke
|
7539929496d4ddb134f5d449e9d090c271457113
|
[
"MIT"
] | null | null | null |
Monke/__init__.py
|
Duck-sri/Monke
|
7539929496d4ddb134f5d449e9d090c271457113
|
[
"MIT"
] | 1
|
2021-11-08T04:56:05.000Z
|
2021-11-08T04:56:05.000Z
|
Monke/__init__.py
|
Duck-sri/Monke
|
7539929496d4ddb134f5d449e9d090c271457113
|
[
"MIT"
] | null | null | null |
from .block import Block
from .transaction import Transaction
from .account import Account
| 30
| 36
| 0.844444
| 12
| 90
| 6.333333
| 0.416667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.122222
| 90
| 3
| 37
| 30
| 0.962025
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
b68d51d8887f49f000ef91611f35d1225cb82b19
| 43
|
py
|
Python
|
array/fourSum.py
|
ZeddShi/alg-py
|
f491dbf92bf7ddf0ac159d1ccfa1f716e458699f
|
[
"MIT"
] | null | null | null |
array/fourSum.py
|
ZeddShi/alg-py
|
f491dbf92bf7ddf0ac159d1ccfa1f716e458699f
|
[
"MIT"
] | null | null | null |
array/fourSum.py
|
ZeddShi/alg-py
|
f491dbf92bf7ddf0ac159d1ccfa1f716e458699f
|
[
"MIT"
] | null | null | null |
# 四数之和
def fourSum(nums, target):
pass
| 10.75
| 26
| 0.651163
| 6
| 43
| 4.666667
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.232558
| 43
| 4
| 27
| 10.75
| 0.848485
| 0.093023
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0.5
| 0
| 0
| 0.5
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 5
|
b6a41c2e1b6a583c2000ea9bbd29c7b7001a79b0
| 148,811
|
py
|
Python
|
suncasa/suncasatasks/ptclean6.py
|
wyq24/suncasa
|
e6ed6d8b9bd2186c4af6d0354d03af5fff9aef7a
|
[
"BSD-2-Clause"
] | 2
|
2018-02-12T09:34:23.000Z
|
2019-07-16T18:25:12.000Z
|
suncasa/suncasatasks/ptclean6.py
|
wulinhui1/suncasa-src
|
1f94aaabaf6a3911fa532648ec6676a221553436
|
[
"BSD-2-Clause"
] | 26
|
2016-11-09T17:11:45.000Z
|
2021-08-20T13:41:50.000Z
|
suncasa/suncasatasks/ptclean6.py
|
wulinhui1/suncasa-src
|
1f94aaabaf6a3911fa532648ec6676a221553436
|
[
"BSD-2-Clause"
] | 17
|
2016-10-27T18:35:46.000Z
|
2021-08-03T05:33:57.000Z
|
##################### generated by xml-casa (v2) from ptclean6.xml ##################
##################### 6a89d05724a14fedd7b8ceb75d841936 ##############################
from __future__ import absolute_import
import numpy
from casatools.typecheck import CasaValidator as _val_ctor
_pc = _val_ctor( )
from casatools.coercetype import coerce as _coerce
from .private.task_ptclean6 import ptclean6 as _ptclean6_t
from casatasks.private.task_logging import start_log as _start_log
from casatasks.private.task_logging import end_log as _end_log
class _ptclean6:
"""
ptclean6 ---- Parallelized tclean in consecutive time steps
Parallelized clean in consecutive time steps. Packed over CASA 6 tclean.
--------- parameter descriptions ---------------------------------------------
vis Name(s) of input visibility file(s)
default: none;
example: vis='ngc5921.ms'
vis=['ngc5921a.ms','ngc5921b.ms']; multiple MSes
imageprefix Prefix of output image names (usually useful in defining the output path)
imagesuffix Suffix of output image names (usually useful in specifyting the image type, version, etc.)
ncpu Number of cpu cores to use
twidth Number of time pixels to average
doreg True if use vla_prep to register the image
usephacenter True if use the phacenter information from the measurement set (e.g., VLA); False to assume the phase center is at the solar disk center (EOVSA)
reftime Reference time of the J2000 coordinates associated with the ephemeris target. e.g., "2012/03/03/12:00". This is used for helioimage2fits.py to find the solar x y offset in order to register the image. If not set, use the actual timerange of the image (default)
toTb True if convert to brightness temperature
sclfactor scale the brightness temperature up by its value
subregion The name of a CASA region string
The name of a CASA image or region file or region string. Only locations within the region will
output to the fits file.
If regions specified fall completely outside of the image, ptclean6 will throw an error.
Manual mask options/examples :
subregion='box[[224pix,224pix],[288pix,288pix]]' : A CASA region string.
docompress True if compress the output fits files
overwrite True if overwrite the image
selectdata Enable data selection parameters.
field to image or mosaic. Use field id(s) or name(s).
['go listobs' to obtain the list id's or names]
default: ''= all fields
If field string is a non-negative integer, it is assumed to
be a field index otherwise, it is assumed to be a
field name
field='0~2'; field ids 0,1,2
field='0,4,5~7'; field ids 0,4,5,6,7
field='3C286,3C295'; field named 3C286 and 3C295
field = '3,4C*'; field id 3, all names starting with 4C
For multiple MS input, a list of field strings can be used:
field = ['0~2','0~4']; field ids 0-2 for the first MS and 0-4
for the second
field = '0~2'; field ids 0-2 for all input MSes
spw l window/channels
NOTE: channels de-selected here will contain all zeros if
selected by the parameter mode subparameters.
default: ''=all spectral windows and channels
spw='0~2,4'; spectral windows 0,1,2,4 (all channels)
spw='0:5~61'; spw 0, channels 5 to 61
spw='<2'; spectral windows less than 2 (i.e. 0,1)
spw='0,10,3:3~45'; spw 0,10 all channels, spw 3,
channels 3 to 45.
spw='0~2:2~6'; spw 0,1,2 with channels 2 through 6 in each.
For multiple MS input, a list of spw strings can be used:
spw=['0','0~3']; spw ids 0 for the first MS and 0-3 for the second
spw='0~3' spw ids 0-3 for all input MS
spw='3:10~20;50~60' for multiple channel ranges within spw id 3
spw='3:10~20;50~60,4:0~30' for different channel ranges for spw ids 3 and 4
spw='0:0~10,1:20~30,2:1;2;3'; spw 0, channels 0-10,
spw 1, channels 20-30, and spw 2, channels, 1,2 and 3
spw='1~4;6:15~48' for channels 15 through 48 for spw ids 1,2,3,4 and 6
timerange Range of time to select from data
default: '' (all); examples,
timerange = 'YYYY/MM/DD/hh:mm:ss~YYYY/MM/DD/hh:mm:ss'
Note: if YYYY/MM/DD is missing date defaults to first
day in data set
timerange='09:14:0~09:54:0' picks 40 min on first day
timerange='25:00:00~27:30:00' picks 1 hr to 3 hr
30min on NEXT day
timerange='09:44:00' pick data within one integration
of time
timerange='> 10:24:00' data after this time
For multiple MS input, a list of timerange strings can be
used:
timerange=['09:14:0~09:54:0','> 10:24:00']
timerange='09:14:0~09:54:0''; apply the same timerange for
all input MSes
uvrange Select data within uvrange (default unit is meters)
default: '' (all); example:
uvrange='0~1000klambda'; uvrange from 0-1000 kilo-lambda
uvrange='> 4klambda';uvranges greater than 4 kilo lambda
For multiple MS input, a list of uvrange strings can be
used:
uvrange=['0~1000klambda','100~1000klamda']
uvrange='0~1000klambda'; apply 0-1000 kilo-lambda for all
input MSes
antenna Select data based on antenna/baseline
default: '' (all)
If antenna string is a non-negative integer, it is
assumed to be an antenna index, otherwise, it is
considered an antenna name.
antenna='5\&6'; baseline between antenna index 5 and
index 6.
antenna='VA05\&VA06'; baseline between VLA antenna 5
and 6.
antenna='5\&6;7\&8'; baselines 5-6 and 7-8
antenna='5'; all baselines with antenna index 5
antenna='05'; all baselines with antenna number 05
(VLA old name)
antenna='5,6,9'; all baselines with antennas 5,6,9
index number
For multiple MS input, a list of antenna strings can be
used:
antenna=['5','5\&6'];
antenna='5'; antenna index 5 for all input MSes
antenna='!DV14'; use all antennas except DV14
scan Scan number range
default: '' (all)
example: scan='1~5'
For multiple MS input, a list of scan strings can be used:
scan=['0~100','10~200']
scan='0~100; scan ids 0-100 for all input MSes
observation Observation ID range
default: '' (all)
example: observation='1~5'
intent Scan Intent(s)
default: '' (all)
example: intent='TARGET_SOURCE'
example: intent='TARGET_SOURCE1,TARGET_SOURCE2'
example: intent='TARGET_POINTING*'
datacolumn Data column to image (data or observed, corrected)
default:'corrected'
( If 'corrected' does not exist, it will use 'data' instead )
imagename Pre-name of output images
example : imagename='try'
Output images will be (a subset of) :
try.psf - Point spread function
try.residual - Residual image
try.image - Restored image
try.model - Model image (contains only flux components)
try.sumwt - Single pixel image containing sum-of-weights.
(for natural weighting, sensitivity=1/sqrt(sumwt))
try.pb - Primary beam model (values depend on the gridder used)
Widefield projection algorithms (gridder=mosaic,awproject) will
compute the following images too.
try.weight - FT of gridded weights or the
un-normalized sum of PB-square (for all pointings)
Here, PB = sqrt(weight) normalized to a maximum of 1.0
For multi-term wideband imaging, all relevant images above will
have additional .tt0,.tt1, etc suffixes to indicate Taylor terms,
plus the following extra output images.
try.alpha - spectral index
try.alpha.error - estimate of error on spectral index
try.beta - spectral curvature (if nterms \> 2)
Tip : Include a directory name in 'imagename' for all
output images to be sent there instead of the
current working directory : imagename='mydir/try'
Tip : Restarting an imaging run without changing 'imagename'
implies continuation from the existing model image on disk.
- If 'startmodel' was initially specified it needs to be set to ""
for the restart run (or tclean will exit with an error message).
- By default, the residual image and psf will be recomputed
but if no changes were made to relevant parameters between
the runs, set calcres=False, calcpsf=False to resume directly from
the minor cycle without the (unnecessary) first major cycle.
To automatically change 'imagename' with a numerical
increment, set restart=False (see tclean docs for 'restart').
Note : All imaging runs will by default produce restored images.
For a niter=0 run, this will be redundant and can optionally
be turned off via the 'restoration=T/F' parameter.
imsize Number of pixels
example : imsize = [350,250]
imsize = 500 is equivalent to [500,500]
To take proper advantage of internal optimized FFT routines, the
number of pixels must be even and factorizable by 2,3,5,7 only.
cell Cell size
example: cell=['0.5arcsec,'0.5arcsec'] or
cell=['1arcmin', '1arcmin']
cell = '1arcsec' is equivalent to ['1arcsec','1arcsec']
phasecenter Phase center of the image (string or field id); if the phasecenter is the name known major solar system object ('MERCURY', 'VENUS', 'MARS', 'JUPITER', 'SATURN', 'URANUS', 'NEPTUNE', 'PLUTO', 'SUN', 'MOON') or is an ephemerides table then that source is tracked and the background sources get smeared. There is a special case, when phasecenter='TRACKFIELD', which will use the ephemerides or polynomial phasecenter in the FIELD table of the MS's as the source center to track.
example: phasecenter=6
phasecenter='J2000 19h30m00 -40d00m00'
phasecenter='J2000 292.5deg -40.0deg'
phasecenter='J2000 5.105rad -0.698rad'
phasecenter='ICRS 13:05:27.2780 -049.28.04.458'
phasecenter='myComet_ephem.tab'
phasecenter='MOON'
phasecenter='TRACKFIELD'
stokes Stokes Planes to make
default='I'; example: stokes='IQUV';
Options: 'I','Q','U','V','IV','QU','IQ','UV','IQUV','RR','LL','XX','YY','RRLL','XXYY','pseudoI'
Note : Due to current internal code constraints, if any correlation pair
is flagged, by default, no data for that row in the MS will be used.
So, in an MS with XX,YY, if only YY is flagged, neither a
Stokes I image nor an XX image can be made from those data points.
In such a situation, please split out only the unflagged correlation into
a separate MS.
Note : The 'pseudoI' option is a partial solution, allowing Stokes I imaging
when either of the parallel-hand correlations are unflagged.
The remaining constraints shall be removed (where logical) in a future release.
projection Coordinate projection
Examples : SIN, NCP
A list of supported (but untested) projections can be found here :
http://casa.nrao.edu/active/docs/doxygen/html/classcasa_1_1Projection.html#a3d5f9ec787e4eabdce57ab5edaf7c0cd
startmodel Name of starting model image
The contents of the supplied starting model image will be
copied to the imagename.model before the run begins.
example : startmodel = 'singledish.im'
For deconvolver='mtmfs', one image per Taylor term must be provided.
example : startmodel = ['try.model.tt0', 'try.model.tt1']
startmodel = ['try.model.tt0'] will use a starting model only
for the zeroth order term.
startmodel = ['','try.model.tt1'] will use a starting model only
for the first order term.
This starting model can be of a different image shape and size from
what is currently being imaged. If so, an image regrid is first triggered
to resample the input image onto the target coordinate system.
A common usage is to set this parameter equal to a single dish image
Negative components in the model image will be included as is.
[ Note : If an error occurs during image resampling/regridding,
please try using task imregrid to resample the starting model
image onto a CASA image with the target shape and
coordinate system before supplying it via startmodel ]
specmode Spectral definition mode (mfs,cube,cubedata, cubesource)
mode='mfs' : Continuum imaging with only one output image channel.
(mode='cont' can also be used here)
mode='cube' : Spectral line imaging with one or more channels
Parameters start, width,and nchan define the spectral
coordinate system and can be specified either in terms
of channel numbers, frequency or velocity in whatever
spectral frame is specified in 'outframe'.
All internal and output images are made with outframe as the
base spectral frame. However imaging code internally uses the fixed
spectral frame, LSRK for automatic internal software
Doppler tracking so that a spectral line observed over an
extended time range will line up appropriately.
Therefore the output images have additional spectral frame conversion
layer in LSRK on the top the base frame.
(Note : Even if the input parameters are specified in a frame
other than LSRK, the viewer still displays spectral
axis in LSRK by default because of the conversion frame
layer mentioned above. The viewer can be used to relabel
the spectral axis in any desired frame - via the spectral
reference option under axis label properties in the
data display options window.)
mode='cubedata' : Spectral line imaging with one or more channels
There is no internal software Doppler tracking so
a spectral line observed over an extended time range
may be smeared out in frequency. There is strictly
no valid spectral frame with which to label the output
images, but they will list the frame defined in the MS.
mode='cubesource': Spectral line imaging while
tracking moving source (near field or solar system
objects). The velocity of the source is accounted
and the frequency reported is in the source frame.
As there is not SOURCE frame defined,
the frame reported will be REST (as it may not be
in the rest frame emission region may be
moving w.r.t the systemic velocity frame)
reffreq Reference frequency of the output image coordinate system
Example : reffreq='1.5GHz' as a string with units.
By default, it is calculated as the middle of the selected frequency range.
For deconvolver='mtmfs' the Taylor expansion is also done about
this specified reference frequency.
nchan Number of channels in the output image
For default (=-1), the number of channels will be automatically determined
based on data selected by 'spw' with 'start' and 'width'.
It is often easiest to leave nchan at the default value.
example: nchan=100
start First channel (e.g. start=3,start=\'1.1GHz\',start=\'15343km/s\')
of output cube images specified by data channel number (integer),
velocity (string with a unit), or frequency (string with a unit).
Default:''; The first channel is automatically determined based on
the 'spw' channel selection and 'width'.
When the channel number is used along with the channel selection
in 'spw' (e.g. spw='0:6~100'),
'start' channel number is RELATIVE (zero-based) to the selected
channels in 'spw'. So for the above example,
start=1 means that the first image channel is the second selected
data channel, which is channel 7.
For specmode='cube', when velocity or frequency is used it is
interpreted with the frame defined in outframe. [The parameters of
the desired output cube can be estimated by using the 'transform'
functionality of 'plotms']
examples: start='5.0km/s'; 1st channel, 5.0km/s in outframe
start='22.3GHz'; 1st channel, 22.3GHz in outframe
width Channel width (e.g. width=2,width=\'0.1MHz\',width=\'10km/s\') of output cube images
specified by data channel number (integer), velocity (string with a unit), or
or frequency (string with a unit).
Default:''; data channel width
The sign of width defines the direction of the channels to be incremented.
For width specified in velocity or frequency with '-' in front gives image channels in
decreasing velocity or frequency, respectively.
For specmode='cube', when velocity or frequency is used it is interpreted with
the reference frame defined in outframe.
examples: width='2.0km/s'; results in channels with increasing velocity
width='-2.0km/s'; results in channels with decreasing velocity
width='40kHz'; results in channels with increasing frequency
width=-2; results in channels averaged of 2 data channels incremented from
high to low channel numbers
outframe Spectral reference frame in which to interpret \'start\' and \'width\'
Options: '','LSRK','LSRD','BARY','GEO','TOPO','GALACTO','LGROUP','CMB'
example: outframe='bary' for Barycentric frame
REST -- Rest frequency
LSRD -- Local Standard of Rest (J2000)
-- as the dynamical definition (IAU, [9,12,7] km/s in galactic coordinates)
LSRK -- LSR as a kinematical (radio) definition
-- 20.0 km/s in direction ra,dec = [270,+30] deg (B1900.0)
BARY -- Barycentric (J2000)
GEO --- Geocentric
TOPO -- Topocentric
GALACTO -- Galacto centric (with rotation of 220 km/s in direction l,b = [90,0] deg.
LGROUP -- Local group velocity -- 308km/s towards l,b = [105,-7] deg (F. Ghigo)
CMB -- CMB velocity -- 369.5km/s towards l,b = [264.4, 48.4] deg (F. Ghigo)
DEFAULT = LSRK
veltype Velocity type (radio, z, ratio, beta, gamma, optical)
For start and/or width specified in velocity, specifies the velocity definition
Options: 'radio','optical','z','beta','gamma','optical'
NOTE: the viewer always defaults to displaying the 'radio' frame,
but that can be changed in the position tracking pull down.
The different types (with F = f/f0, the frequency ratio), are:
Z = (-1 + 1/F)
RATIO = (F) *
RADIO = (1 - F)
OPTICAL == Z
BETA = ((1 - F2)/(1 + F2))
GAMMA = ((1 + F2)/2F) *
RELATIVISTIC == BETA (== v/c)
DEFAULT == RADIO
Note that the ones with an '*' have no real interpretation
(although the calculation will proceed) if given as a velocity.
restfreq List of rest frequencies or a rest frequency in a string.
Specify rest frequency to use for output image.
*Currently it uses the first rest frequency in the list for translation of
velocities. The list will be stored in the output images.
Default: []; look for the rest frequency stored in the MS, if not available,
use center frequency of the selected channels
examples: restfreq=['1.42GHz']
restfreq='1.42GHz'
interpolation Spectral interpolation (nearest,linear,cubic)
Interpolation rules to use when binning data channels onto image channels
and evaluating visibility values at the centers of image channels.
Note : 'linear' and 'cubic' interpolation requires data points on both sides of
each image frequency. Errors are therefore possible at edge channels, or near
flagged data channels. When image channel width is much larger than the data
channel width there is nothing much to be gained using linear or cubic thus
not worth the extra computation involved.
perchanweightdensity When calculating weight density for Briggs
style weighting in a cube, this parameter
determines whether to calculate the weight
density for each channel independently
(the default, True)
or a common weight density for all of the selected
data. This parameter has no
meaning for continuum (specmode='mfs') imaging
or for natural and radial weighting schemes.
For cube imaging
perchanweightdensity=True is a recommended
option that provides more uniform
sensitivity per channel for cubes, but with
generally larger psfs than the
perchanweightdensity=False (prior behavior)
option. When using Briggs style weight with
perchanweightdensity=True, the imaging weight
density calculations use only the weights of
data that contribute specifically to that
channel. On the other hand, when
perchanweightdensity=False, the imaging
weight density calculations sum all of the
weights from all of the data channels
selected whose (u,v) falls in a given uv cell
on the weight density grid. Since the
aggregated weights, in any given uv cell,
will change depending on the number of
channels included when imaging, the psf
calculated for a given frequency channel will
also necessarily change, resulting in
variability in the psf for a given frequency
channel when perchanweightdensity=False. In
general, perchanweightdensity=False results
in smaller psfs for the same value of
robustness compared to
perchanweightdensity=True, but the rms noise
as a function of channel varies and increases
toward the edge channels;
perchanweightdensity=True provides more
uniform sensitivity per channel for
cubes. This may make it harder to find
estimates of continuum when
perchanweightdensity=False. If you intend to
image a large cube in many smaller subcubes
and subsequently concatenate, it is advisable
to use perchanweightdensity=True to avoid
surprisingly varying sensitivity and psfs
across the concatenated cube.
gridder Gridding options (standard, wproject, widefield, mosaic, awproject)
The following options choose different gridding convolution
functions for the process of convolutional resampling of the measured
visibilities onto a regular uv-grid prior to an inverse FFT.
Model prediction (degridding) also uses these same functions.
Several wide-field effects can be accounted for via careful choices of
convolution functions. Gridding (degridding) runtime will rise in
proportion to the support size of these convolution functions (in uv-pixels).
standard : Prolate Spheroid with 7x7 uv pixel support size
[ This mode can also be invoked using 'ft' or 'gridft' ]
wproject : W-Projection algorithm to correct for the widefield
non-coplanar baseline effect. [Cornwell et.al 2008]
wprojplanes is the number of distinct w-values at
which to compute and use different gridding convolution
functions (see help for wprojplanes).
Convolution function support size can range
from 5x5 to few 100 x few 100.
[ This mode can also be invoked using 'wprojectft' ]
widefield : Facetted imaging with or without W-Projection per facet.
A set of facets x facets subregions of the specified image
are gridded separately using their respective phase centers
(to minimize max W). Deconvolution is done on the joint
full size image, using a PSF from the first subregion.
wprojplanes=1 : standard prolate spheroid gridder per facet.
wprojplanes > 1 : W-Projection gridder per facet.
nfacets=1, wprojplanes > 1 : Pure W-Projection and no facetting
nfacets=1, wprojplanes=1 : Same as standard,ft,gridft
A combination of facetting and W-Projection is relevant only for
very large fields of view. (In our current version of tclean, this
combination runs only with parallel=False.
mosaic : A-Projection with azimuthally symmetric beams without
sidelobes, beam rotation or squint correction.
Gridding convolution functions per visibility are computed
from FTs of PB models per antenna.
This gridder can be run on single fields as well as mosaics.
VLA : PB polynomial fit model (Napier and Rots, 1982)
EVLA : PB polynomial fit model (Perley, 2015)
ALMA : Airy disks for a 10.7m dish (for 12m dishes) and
6.25m dish (for 7m dishes) each with 0.75m
blockages (Hunter/Brogan 2011). Joint mosaic
imaging supports heterogeneous arrays for ALMA.
Typical gridding convolution function support sizes are
between 7 and 50 depending on the desired
accuracy (given by the uv cell size or image field of view).
[ This mode can also be invoked using 'mosaicft' or 'ftmosaic' ]
awproject : A-Projection with azimuthally asymmetric beams and
including beam rotation, squint correction,
conjugate frequency beams and W-projection.
[Bhatnagar et.al, 2008]
Gridding convolution functions are computed from
aperture illumination models per antenna and optionally
combined with W-Projection kernels and a prolate spheroid.
This gridder can be run on single fields as well as mosaics.
VLA : Uses ray traced model (VLA and EVLA) including feed
leg and subreflector shadows, off-axis feed location
(for beam squint and other polarization effects), and
a Gaussian fit for the feed beams (Ref: Brisken 2009)
ALMA : Similar ray-traced model as above (but the correctness
of its polarization properties remains un-verified).
Typical gridding convolution function support sizes are
between 7 and 50 depending on the desired
accuracy (given by the uv cell size or image field of view).
When combined with W-Projection they can be significantly larger.
[ This mode can also be invoked using 'awprojectft' ]
imagemosaic : (untested implementation)
Grid and iFT each pointing separately and combine the
images as a linear mosaic (weighted by a PB model) in
the image domain before a joint minor cycle.
VLA/ALMA PB models are same as for gridder='mosaicft'
------ Notes on PB models :
(1) Several different sources of PB models are used in the modes
listed above. This is partly for reasons of algorithmic flexibility
and partly due to the current lack of a common beam model
repository or consensus on what beam models are most appropriate.
(2) For ALMA and gridder='mosaic', ray-traced (TICRA) beams
are also available via the vpmanager tool.
For example, call the following before the tclean run.
vp.setpbimage(telescope="ALMA",
compleximage='/home/casa/data/trunk/alma/responses/ALMA_0_DV__0_0_360_0_45_90_348.5_373_373_GHz_ticra2007_VP.im',
antnames=['DV'+'%02d'%k for k in range(25)])
vp.saveastable('mypb.tab')
Then, supply vptable='mypb.tab' to tclean.
( Currently this will work only for non-parallel runs )
------ Note on PB masks :
In tclean, A-Projection gridders (mosaic and awproject) produce a
.pb image and use the 'pblimit' subparameter to decide normalization
cutoffs and construct an internal T/F mask in the .pb and .image images.
However, this T/F mask cannot directly be used during deconvolution
(which needs a 1/0 mask). There are two options for making a pb based
deconvolution mask.
-- Run tclean with niter=0 to produce the .pb, construct a 1/0 image
with the desired threshold (using ia.open('newmask.im');
ia.calc('iif("xxx.pb">0.3,1.0,0.0)');ia.close() for example),
and supply it via the 'mask' parameter in a subsequent run
(with calcres=F and calcpsf=F to restart directly from the minor cycle).
-- Run tclean with usemask='pb' for it to automatically construct
a 1/0 mask from the internal T/F mask from .pb at a fixed 0.2 threshold.
----- Making PBs for gridders other than mosaic,awproject
After the PSF generation, a PB is constructed using the same
models used in gridder='mosaic' but just evaluated in the image
domain without consideration to weights.
facets Number of facets on a side
A set of (facets x facets) subregions of the specified image
are gridded separately using their respective phase centers
(to minimize max W). Deconvolution is done on the joint
full size image, using a PSF from the first subregion/facet.
In our current version of tclean, facets>1 may be used only
with parallel=False.
psfphasecenter For mosaic use psf centered on this
optional direction. You may need to use
this if for example the mosaic does not
have any pointing in the center of the
image. Another reason; as the psf is
approximate for a mosaic, this may help
to deconvolve a non central bright source
well and quickly.
example:
psfphasecenter=6 #center psf on field 6
psfphasecenter='J2000 19h30m00 -40d00m00'
psfphasecenter='J2000 292.5deg -40.0deg'
psfphasecenter='J2000 5.105rad -0.698rad'
psfphasecenter='ICRS 13:05:27.2780 -049.28.04.458'
wprojplanes Number of distinct w-values at which to compute and use different
gridding convolution functions for W-Projection
An appropriate value of wprojplanes depends on the presence/absence
of a bright source far from the phase center, the desired dynamic
range of an image in the presence of a bright far out source,
the maximum w-value in the measurements, and the desired trade off
between accuracy and computing cost.
As a (rough) guide, VLA L-Band D-config may require a
value of 128 for a source 30arcmin away from the phase
center. A-config may require 1024 or more. To converge to an
appropriate value, try starting with 128 and then increasing
it if artifacts persist. W-term artifacts (for the VLA) typically look
like arc-shaped smears in a synthesis image or a shift in source
position between images made at different times. These artifacts
are more pronounced the further the source is from the phase center.
There is no harm in simply always choosing a large value (say, 1024)
but there will be a significant performance cost to doing so, especially
for gridder='awproject' where it is combined with A-Projection.
wprojplanes=-1 is an option for gridder='widefield' or 'wproject'
in which the number of planes is automatically computed.
vptable vpmanager
vptable="" : Choose default beams for different telescopes
ALMA : Airy disks
EVLA : old VLA models.
Other primary beam models can be chosen via the vpmanager tool.
Step 1 : Set up the vpmanager tool and save its state in a table
vp.setpbpoly(telescope='EVLA', coeff=[1.0, -1.529e-3, 8.69e-7, -1.88e-10])
vp.saveastable('myvp.tab')
Step 2 : Supply the name of that table in tclean.
tclean(....., vptable='myvp.tab',....)
Please see the documentation for the vpmanager for more details on how to
choose different beam models. Work is in progress to update the defaults
for EVLA and ALMA.
Note : AWProjection currently does not use this mechanism to choose
beam models. It instead uses ray-traced beams computed from
parameterized aperture illumination functions, which are not
available via the vpmanager. So, gridder='awproject' does not allow
the user to set this parameter.
mosweight When doing Brigg's style weighting (including uniform) to perform the weight density calculation for each field indepedently if True. If False the weight density is calculated from the average uv distribution of all the fields.
aterm Use aperture illumination functions during gridding
This parameter turns on the A-term of the AW-Projection gridder.
Gridding convolution functions are constructed from aperture illumination
function models of each antenna.
psterm Include the Prolate Spheroidal (PS) funtion as the anti-aliasing
operator in the gridding convolution functions used for gridding.
Setting this parameter to true is necessary when aterm is set to
false. It can be set to false when aterm is set to true, though
with this setting effects of aliasing may be there in the image,
particularly near the edges.
When set to true, the .pb images will contain the fourier transform
of the of the PS funtion. The table below enumarates the functional
effects of the psterm, aterm and wprojplanes settings. PB referes to
the Primary Beam and FT() refers to the Fourier transform operation.
Operation aterm psterm wprojplanes Contents of the .pb image
----------------------------------------------------------------------
AW-Projection True True >1 FT(PS) x PB
False PB
A-Projection True True 1 FT(PS) x PB
False PB
W-Projection False True >1 FT(PS)
Standard False True 1 FT(PS)
wbawp Use frequency dependent A-terms
Scale aperture illumination functions appropriately with frequency
when gridding and combining data from multiple channels.
conjbeams Use conjugate frequency for wideband A-terms
While gridding data from one frequency channel, choose a convolution
function from a 'conjugate' frequency such that the resulting baseline
primary beam is approximately constant across frequency. For a system in
which the primary beam scales with frequency, this step will eliminate
instrumental spectral structure from the measured data and leave only the
sky spectrum for the minor cycle to model and reconstruct [Bhatnagar et al., ApJ, 2013].
As a rough guideline for when this is relevant, a source at the half power
point of the PB at the center frequency will see an artificial spectral
index of -1.4 due to the frequency dependence of the PB [Sault and Wieringa, 1994].
If left uncorrected during gridding, this spectral structure must be modeled
in the minor cycle (using the mtmfs algorithm) to avoid dynamic range limits
(of a few hundred for a 2:1 bandwidth).
This works for specmode='mfs' and its value is ignored for cubes
cfcache Convolution function cache directory name
Name of a directory in which to store gridding convolution functions.
This cache is filled at the beginning of an imaging run. This step can be time
consuming but the cache can be reused across multiple imaging runs that
use the same image parameters (cell size, image size , spectral data
selections, wprojplanes, wbawp, psterm, aterm). The effect of the wbawp,
psterm and aterm settings is frozen-in in the cfcache. Using an existing cfcache
made with a different setting of these parameters will not reflect the current
settings.
In a parallel execution, the construction of the cfcache is also parallelized
and the time to compute scales close to linearly with the number of compute
cores used. With the re-computation of Convolution Functions (CF) due to PA
rotation turned-off (the computepastep parameter), the total number of in the
cfcache can be computed as [No. of wprojplanes x No. of selected spectral windows x 4]
By default, cfcache = imagename + '.cf'
usepointing The usepointing flag informs the gridder that it should utilize the pointing table
to use the correct direction in which the antenna is pointing with respect to the pointing phasecenter.
computepastep Parallactic angle interval after the AIFs are recomputed (deg)
This parameter controls the accuracy of the aperture illumination function
used with AProjection for alt-az mount dishes where the AIF rotates on the
sky as the synthesis image is built up. Once the PA in the data changes by
the given interval, AIFs are re-computed at the new PA.
A value of 360.0 deg (the default) implies no re-computation due to PA rotation.
AIFs are computed for the PA value of the first valid data received and used for
all of the data.
rotatepastep Parallactic angle interval after which the nearest AIF is rotated (deg)
Instead of recomputing the AIF for every timestep's parallactic angle,
the nearest existing AIF is used and rotated
after the PA changed by rotatepastep value.
A value of 360.0 deg (the default) disables rotation of the AIF.
For example, computepastep=360.0 and rotatepastep=5.0 will compute
the AIFs at only the starting parallactic angle and all other timesteps will
use a rotated version of that AIF at the nearest 5.0 degree point.
pointingoffsetsigdev Corrections for heterogenous and time-dependent pointing
offsets via AWProjection are controlled by this parameter.
It is a vector of 2 ints or doubles each of which is interpreted
in units of arcsec. Based on the first threshold, a clustering
algorithm is applied to entries from the POINTING subtable
of the MS to determine how distinct antenna groups for which
the pointing offset must be computed separately. The second
number controls how much a pointing change across time can
be ignored and after which an antenna rebinning is required.
Note : The default value of this parameter is [], due a programmatic constraint.
If run with this value, it will internally pick [600,600] and exercise the
option of using large tolerances (10arcmin) on both axes. Please choose
a setting explicitly for runs that need to use this parameter.
Note : This option is available only for gridder='awproject' and usepointing=True and
and has been validated primarily with VLASS on-the-fly mosaic data
where POINTING subtables have been modified after the data are recorded.
Examples of parameter usage :
[100.0,100.0] : Pointing offsets of 100 arcsec or less are considered
small enough to be ignored. Using large values for both
indicates a homogeneous array.
[10.0, 100.0] : Based on entries in the POINTING subtable, antennas
are grouped into clusters based on a 10arcsec bin size.
All antennas in a bin are given a pointing offset calculated
as the average of the offsets of all antennas in the bin.
On the time axis, offset changes upto 100 arcsec will be ignored.
[10.0,10.0] : Calculate separate pointing offsets for each antenna group
(with a 10 arcsec bin size). As a function of time, recalculate
the antenna binning if the POINTING table entries change by
more than 10 arcsec w.r.to the previously computed binning.
[1.0, 1.0] : Tight tolerances will imply a fully heterogenous situation where
each antenna gets its own pointing offset. Also, time-dependent
offset changes greater than 1 arcsec will trigger recomputes of
the phase gradients. This is the most general situation and is also
the most expensive option as it constructs and uses separate
phase gradients for all baselines and timesteps.
For VLASS 1.1 data with two kinds of pointing offsets, the recommended
setting is [ 30.0, 30.0 ].
For VLASS 1.2 data with only the time-dependent pointing offsets, the
recommended setting is [ 300.0, 30.0 ] to turn off the antenna grouping
but to retain the time dependent corrections required from one timestep
to the next.
pblimit PB gain level at which to cut off normalizations
Divisions by .pb during normalizations have a cut off at a .pb gain
level given by pblimit. Outside this limit, image values are set to zero.
Additionally, by default, an internal T/F mask is applied to the .pb, .image and
.residual images to mask out (T) all invalid pixels outside the pblimit area.
Note : This internal T/F mask cannot be used as a deconvolution mask.
To do so, please follow the steps listed above in the Notes for the
'gridder' parameter.
Note : To prevent the internal T/F mask from appearing in anything other
than the .pb and .image.pbcor images, 'pblimit' can be set to a
negative number. The absolute value will still be used as a valid 'pblimit'.
A tclean restart using existing output images on disk that already
have this T/F mask in the .residual and .image but only pblimit set
to a negative value, will remove this mask after the next major cycle.
normtype Normalization type (flatnoise, flatsky, pbsquare)
Gridded (and FT'd) images represent the PB-weighted sky image.
Qualitatively it can be approximated as two instances of the PB
applied to the sky image (one naturally present in the data
and one introduced during gridding via the convolution functions).
xxx.weight : Weight image approximately equal to sum ( square ( pb ) )
xxx.pb : Primary beam calculated as sqrt ( xxx.weight )
normtype='flatnoise' : Divide the raw image by sqrt(.weight) so that
the input to the minor cycle represents the
product of the sky and PB. The noise is 'flat'
across the region covered by each PB.
normtype='flatsky' : Divide the raw image by .weight so that the input
to the minor cycle represents only the sky.
The noise is higher in the outer regions of the
primary beam where the sensitivity is low.
normtype='pbsquare' : No normalization after gridding and FFT.
The minor cycle sees the sky times pb square
deconvolver Name of minor cycle algorithm (hogbom,clark,multiscale,mtmfs,mem,clarkstokes)
Each of the following algorithms operate on residual images and psfs
from the gridder and produce output model and restored images.
Minor cycles stop and a major cycle is triggered when cyclethreshold
or cycleniter are reached. For all methods, components are picked from
the entire extent of the image or (if specified) within a mask.
hogbom : An adapted version of Hogbom Clean [Hogbom, 1974]
- Find the location of the peak residual
- Add this delta function component to the model image
- Subtract a scaled and shifted PSF of the same size as the image
from regions of the residual image where the two overlap.
- Repeat
clark : An adapted version of Clark Clean [Clark, 1980]
- Find the location of max(I^2+Q^2+U^2+V^2)
- Add delta functions to each stokes plane of the model image
- Subtract a scaled and shifted PSF within a small patch size
from regions of the residual image where the two overlap.
- After several iterations trigger a Clark major cycle to subtract
components from the visibility domain, but without de-gridding.
- Repeat
( Note : 'clark' maps to imagermode='' in the old clean task.
'clark_exp' is another implementation that maps to
imagermode='mosaic' or 'csclean' in the old clean task
but the behavior is not identical. For now, please
use deconvolver='hogbom' if you encounter problems. )
clarkstokes : Clark Clean operating separately per Stokes plane
(Note : 'clarkstokes_exp' is an alternate version. See above.)
multiscale : MultiScale Clean [Cornwell, 2008]
- Smooth the residual image to multiple scale sizes
- Find the location and scale at which the peak occurs
- Add this multiscale component to the model image
- Subtract a scaled,smoothed,shifted PSF (within a small
patch size per scale) from all residual images
- Repeat from step 2
mtmfs : Multi-term (Multi Scale) Multi-Frequency Synthesis [Rau and Cornwell, 2011]
- Smooth each Taylor residual image to multiple scale sizes
- Solve a NTxNT system of equations per scale size to compute
Taylor coefficients for components at all locations
- Compute gradient chi-square and pick the Taylor coefficients
and scale size at the location with maximum reduction in
chi-square
- Add multi-scale components to each Taylor-coefficient
model image
- Subtract scaled,smoothed,shifted PSF (within a small patch size
per scale) from all smoothed Taylor residual images
- Repeat from step 2
mem : Maximum Entropy Method [Cornwell and Evans, 1985]
- Iteratively solve for values at all individual pixels via the
MEM method. It minimizes an objective function of
chi-square plus entropy (here, a measure of difference
between the current model and a flat prior model).
(Note : This MEM implementation is not very robust.
Improvements will be made in the future.)
scales List of scale sizes (in pixels) for multi-scale and mtmfs algorithms.
--> scales=[0,6,20]
This set of scale sizes should represent the sizes
(diameters in units of number of pixels)
of dominant features in the image being reconstructed.
The smallest scale size is recommended to be 0 (point source),
the second the size of the synthesized beam and the third 3-5
times the synthesized beam, etc. For example, if the synthesized
beam is 10" FWHM and cell=2",try scales = [0,5,15].
For numerical stability, the largest scale must be
smaller than the image (or mask) size and smaller than or
comparable to the scale corresponding to the lowest measured
spatial frequency (as a scale size much larger than what the
instrument is sensitive to is unconstrained by the data making
it harder to recovery from errors during the minor cycle).
nterms Number of Taylor coefficients in the spectral model
- nterms=1 : Assume flat spectrum source
- nterms=2 : Spectrum is a straight line with a slope
- nterms=N : A polynomial of order N-1
From a Taylor expansion of the expression of a power law, the
spectral index is derived as alpha = taylorcoeff_1 / taylorcoeff_0
Spectral curvature is similarly derived when possible.
The optimal number of Taylor terms depends on the available
signal to noise ratio, bandwidth ratio, and spectral shape of the
source as seen by the telescope (sky spectrum x PB spectrum).
nterms=2 is a good starting point for wideband EVLA imaging
and the lower frequency bands of ALMA (when fractional bandwidth
is greater than 10%) and if there is at least one bright source for
which a dynamic range of greater than few 100 is desired.
Spectral artifacts for the VLA often look like spokes radiating out from
a bright source (i.e. in the image made with standard mfs imaging).
If increasing the number of terms does not eliminate these artifacts,
check the data for inadequate bandpass calibration. If the source is away
from the pointing center, consider including wide-field corrections too.
(Note : In addition to output Taylor coefficient images .tt0,.tt1,etc
images of spectral index (.alpha), an estimate of error on
spectral index (.alpha.error) and spectral curvature (.beta,
if nterms is greater than 2) are produced.
- These alpha, alpha.error and beta images contain
internal T/F masks based on a threshold computed
as peakresidual/10. Additional masking based on
.alpha/.alpha.error may be desirable.
- .alpha.error is a purely empirical estimate derived
from the propagation of error during the division of
two noisy numbers (alpha = xx.tt1/xx.tt0) where the
'error' on tt1 and tt0 are simply the values picked from
the corresponding residual images. The absolute value
of the error is not always accurate and it is best to interpret
the errors across the image only in a relative sense.)
smallscalebias A numerical control to bias the scales when using multi-scale or mtmfs algorithms.
The peak from each scale's smoothed residual is
multiplied by ( 1 - smallscalebias * scale/maxscale )
to increase or decrease the amplitude relative to other scales,
before the scale with the largest peak is chosen.
Smallscalebias can be varied between -1.0 and 1.0.
A score of 0.0 gives all scales equal weight (default).
A score larger than 0.0 will bias the solution towards smaller scales.
A score smaller than 0.0 will bias the solution towards larger scales.
The effect of smallscalebias is more pronounced when using multi-scale relative to mtmfs.
restoration e.
Construct a restored image : imagename.image by convolving the model
image with a clean beam and adding the residual image to the result.
If a restoringbeam is specified, the residual image is also
smoothed to that target resolution before adding it in.
If a .model does not exist, it will make an empty one and create
the restored image from the residuals ( with additional smoothing if needed ).
With algorithm='mtmfs', this will construct Taylor coefficient maps from
the residuals and compute .alpha and .alpha.error.
restoringbeam ize to use.
- restoringbeam='' or ['']
A Gaussian fitted to the PSF main lobe (separately per image plane).
- restoringbeam='10.0arcsec'
Use a circular Gaussian of this width for all planes
- restoringbeam=['8.0arcsec','10.0arcsec','45deg']
Use this elliptical Gaussian for all planes
- restoringbeam='common'
Automatically estimate a common beam shape/size appropriate for
all planes.
Note : For any restoring beam different from the native resolution
the model image is convolved with the beam and added to
residuals that have been convolved to the same target resolution.
pbcor the output restored image
A new image with extension .image.pbcor will be created from
the evaluation of .image / .pb for all pixels above the specified pblimit.
Note : Stand-alone PB-correction can be triggered by re-running
tclean with the appropriate imagename and with
niter=0, calcpsf=False, calcres=False, pbcor=True, vptable='vp.tab'
( where vp.tab is the name of the vpmanager file.
See the inline help for the 'vptable' parameter )
Note : Multi-term PB correction that includes a correction for the
spectral index of the PB has not been enabled for the 4.7 release.
Please use the widebandpbcor task instead.
( Wideband PB corrections are required when the amplitude of the
brightest source is known accurately enough to be sensitive
to the difference in the PB gain between the upper and lower
end of the band at its location. As a guideline, the artificial spectral
index due to the PB is -1.4 at the 0.5 gain level and less than -0.2
at the 0.9 gain level at the middle frequency )
outlierfile Name of outlier-field image definitions
A text file containing sets of parameter=value pairs,
one set per outlier field.
Example : outlierfile='outs.txt'
Contents of outs.txt :
imagename=tst1
nchan=1
imsize=[80,80]
cell=[8.0arcsec,8.0arcsec]
phasecenter=J2000 19:58:40.895 +40.55.58.543
mask=circle[[40pix,40pix],10pix]
imagename=tst2
nchan=1
imsize=[100,100]
cell=[8.0arcsec,8.0arcsec]
phasecenter=J2000 19:58:40.895 +40.56.00.000
mask=circle[[60pix,60pix],20pix]
The following parameters are currently allowed to be different between
the main field and the outlier fields (i.e. they will be recognized if found
in the outlier text file). If a parameter is not listed, the value is picked from
what is defined in the main task input.
imagename, imsize, cell, phasecenter, startmodel, mask
specmode, nchan, start, width, nterms, reffreq,
gridder, deconvolver, wprojplanes
Note : 'specmode' is an option, so combinations of mfs and cube
for different image fields, for example, are supported.
'deconvolver' and 'gridder' are also options that allow different
imaging or deconvolution algorithm per image field.
For example, multiscale with wprojection and 16 w-term planes
on the main field and mtmfs with nterms=3 and wprojection
with 64 planes on a bright outlier source for which the frequency
dependence of the primary beam produces a strong effect that
must be modeled. The traditional alternative to this approach is
to first image the outlier, subtract it out of the data (uvsub) and
then image the main field.
Note : If you encounter a use-case where some other parameter needs
to be allowed in the outlier file (and it is logical to do so), please
send us feedback. The above is an initial list.
weighting Weighting scheme (natural,uniform,briggs,superuniform,radial, briggsabs, briggsbwtaper)
During gridding of the dirty or residual image, each visibility value is
multiplied by a weight before it is accumulated on the uv-grid.
The PSF's uv-grid is generated by gridding only the weights (weightgrid).
weighting='natural' : Gridding weights are identical to the data weights
from the MS. For visibilities with similar data weights,
the weightgrid will follow the sample density
pattern on the uv-plane. This weighting scheme
provides the maximum imaging sensitivity at the
expense of a possibly fat PSF with high sidelobes.
It is most appropriate for detection experiments
where sensitivity is most important.
weighting='uniform' : Gridding weights per visibility data point are the
original data weights divided by the total weight of
all data points that map to the same uv grid cell :
' data_weight / total_wt_per_cell '.
The weightgrid is as close to flat as possible resulting
in a PSF with a narrow main lobe and suppressed
sidelobes. However, since heavily sampled areas of
the uv-plane get down-weighted, the imaging
sensitivity is not as high as with natural weighting.
It is most appropriate for imaging experiments where
a well behaved PSF can help the reconstruction.
weighting='briggs' : Gridding weights per visibility data point are given by
'data_weight / ( A *total_wt_per_cell + B ) ' where
A and B vary according to the 'robust' parameter.
robust = -2.0 maps to A=1,B=0 or uniform weighting.
robust = +2.0 maps to natural weighting.
(robust=0.5 is equivalent to robust=0.0 in AIPS IMAGR.)
Robust/Briggs weighting generates a PSF that can
vary smoothly between 'natural' and 'uniform' and
allow customized trade-offs between PSF shape and
imaging sensitivity.
weighting='briggsabs' : Experimental option.
Same as Briggs except the formula is different A=
robust*robust and B is dependent on the
noise per visibility estimated. Giving noise='0Jy'
is a not a reasonable option.
In this mode (or formula) robust values
from -2.0 to 0.0 only make sense (2.0 and
-2.0 will get the same weighting)
weighting='superuniform' : This is similar to uniform weighting except that
the total_wt_per_cell is replaced by the
total_wt_within_NxN_cells around the uv cell of
interest. ( N = subparameter 'npixels' )
This method tends to give a PSF with inner
sidelobes that are suppressed as in uniform
weighting but with far-out sidelobes closer to
natural weighting. The peak sensitivity is also
closer to natural weighting.
weighting='radial' : Gridding weights are given by ' data_weight * uvdistance '
This method approximately minimizes rms sidelobes
for an east-west synthesis array.
weighting='briggsbwtaper' : A modified version of Briggs weighting for cubes where an inverse uv taper,
which is proportional to the fractional bandwidth of the entire cube,
is applied per channel. The objective is to modify cube (perchanweightdensity = True)
imaging weights to have a similar density to that of the continuum imaging weights.
This is currently an experimental weighting scheme being developed for ALMA.
For more details on weighting please see Chapter3
of Dan Briggs' thesis (http://www.aoc.nrao.edu/dissertations/dbriggs)
robust Robustness parameter for Briggs weighting.
robust = -2.0 maps to uniform weighting.
robust = +2.0 maps to natural weighting.
(robust=0.5 is equivalent to robust=0.0 in AIPS IMAGR.)
noise noise parameter for briggs abs mode weighting
npixels Number of pixels to determine uv-cell size for super-uniform weighting
(0 defaults to -/+ 3 pixels)
npixels -- uv-box used for weight calculation
a box going from -npixel/2 to +npixel/2 on each side
around a point is used to calculate weight density.
npixels=2 goes from -1 to +1 and covers 3 pixels on a side.
npixels=0 implies a single pixel, which does not make sense for
superuniform weighting. Therefore, if npixels=0 it will
be forced to 6 (or a box of -3pixels to +3pixels) to cover
7 pixels on a side.
uvtaper uv-taper on outer baselines in uv-plane
Apply a Gaussian taper in addition to the weighting scheme specified
via the 'weighting' parameter. Higher spatial frequencies are weighted
down relative to lower spatial frequencies to suppress artifacts
arising from poorly sampled areas of the uv-plane. It is equivalent to
smoothing the PSF obtained by other weighting schemes and can be
specified either as a Gaussian in uv-space (eg. units of lambda)
or as a Gaussian in the image domain (eg. angular units like arcsec).
uvtaper = [bmaj, bmin, bpa]
NOTE: the on-sky FWHM in arcsec is roughly the uv taper/200 (klambda).
default: uvtaper=[]; no Gaussian taper applied
example: uvtaper=['5klambda'] circular taper
FWHM=5 kilo-lambda
uvtaper=['5klambda','3klambda','45.0deg']
uvtaper=['10arcsec'] on-sky FWHM 10 arcseconds
uvtaper=['300.0'] default units are lambda
in aperture plane
niter Maximum number of iterations
A stopping criterion based on total iteration count.
Currently the parameter type is defined as an integer therefore the integer value
larger than 2147483647 will not be set properly as it causes an overflow.
Iterations are typically defined as the selecting one flux component
and partially subtracting it out from the residual image.
niter=0 : Do only the initial major cycle (make dirty image, psf, pb, etc)
niter larger than zero : Run major and minor cycles.
Note : Global stopping criteria vs major-cycle triggers
In addition to global stopping criteria, the following rules are
used to determine when to terminate a set of minor cycle iterations
and trigger major cycles [derived from Cotton-Schwab Clean, 1984]
'cycleniter' : controls the maximum number of iterations per image
plane before triggering a major cycle.
'cyclethreshold' : Automatically computed threshold related to the
max sidelobe level of the PSF and peak residual.
Divergence, detected as an increase of 10% in peak residual from the
minimum so far (during minor cycle iterations)
The first criterion to be satisfied takes precedence.
Note : Iteration counts for cubes or multi-field images :
For images with multiple planes (or image fields) on which the
deconvolver operates in sequence, iterations are counted across
all planes (or image fields). The iteration count is compared with
'niter' only after all channels/planes/fields have completed their
minor cycles and exited either due to 'cycleniter' or 'cyclethreshold'.
Therefore, the actual number of iterations reported in the logger
can sometimes be larger than the user specified value in 'niter'.
For example, with niter=100, cycleniter=20,nchan=10,threshold=0,
a total of 200 iterations will be done in the first set of minor cycles
before the total is compared with niter=100 and it exits.
Note : Additional global stopping criteria include
- no change in peak residual across two major cycles
- a 50% or more increase in peak residual across one major cycle
gain Loop gain
Fraction of the source flux to subtract out of the residual image
for the CLEAN algorithm and its variants.
A low value (0.2 or less) is recommended when the sky brightness
distribution is not well represented by the basis functions used by
the chosen deconvolution algorithm. A higher value can be tried when
there is a good match between the true sky brightness structure and
the basis function shapes. For example, for extended emission,
multiscale clean with an appropriate set of scale sizes will tolerate
a higher loop gain than Clark clean (for example).
threshold Stopping threshold (number in units of Jy, or string)
A global stopping threshold that the peak residual (within clean mask)
across all image planes is compared to.
threshold = 0.005 : 5mJy
threshold = '5.0mJy'
Note : A 'cyclethreshold' is internally computed and used as a major cycle
trigger. It is related what fraction of the PSF can be reliably
used during minor cycle updates of the residual image. By default
the minor cycle iterations terminate once the peak residual reaches
the first sidelobe level of the brightest source.
'cyclethreshold' is computed as follows using the settings in
parameters 'cyclefactor','minpsffraction','maxpsffraction','threshold' :
psf_fraction = max_psf_sidelobe_level * 'cyclefactor'
psf_fraction = max(psf_fraction, 'minpsffraction');
psf_fraction = min(psf_fraction, 'maxpsffraction');
cyclethreshold = peak_residual * psf_fraction
cyclethreshold = max( cyclethreshold, 'threshold' )
If nsigma is set (>0.0), the N-sigma threshold is calculated (see
the description under nsigma), then cyclethreshold is further modified as,
cyclethreshold = max( cyclethreshold, nsgima_threshold )
'cyclethreshold' is made visible and editable only in the
interactive GUI when tclean is run with interactive=True.
nsigma Multiplicative factor for rms-based threshold stopping
N-sigma threshold is calculated as nsigma * rms value per image plane determined
from a robust statistics. For nsigma > 0.0, in a minor cycle, a maximum of the two values,
the N-sigma threshold and cyclethreshold, is used to trigger a major cycle
(see also the descreption under 'threshold').
Set nsigma=0.0 to preserve the previous tclean behavior without this feature.
The top level parameter, fastnoise is relevant for the rms noise calculation which is used
to determine the threshold.
The parameter 'nsigma' may be an int, float, or a double.
cycleniter Maximum number of minor-cycle iterations (per plane) before triggering
a major cycle
For example, for a single plane image, if niter=100 and cycleniter=20,
there will be 5 major cycles after the initial one (assuming there is no
threshold based stopping criterion). At each major cycle boundary, if
the number of iterations left over (to reach niter) is less than cycleniter,
it is set to the difference.
Note : cycleniter applies per image plane, even if cycleniter x nplanes
gives a total number of iterations greater than 'niter'. This is to
preserve consistency across image planes within one set of minor
cycle iterations.
cyclefactor Scaling on PSF sidelobe level to compute the minor-cycle stopping threshold.
Please refer to the Note under the documentation for 'threshold' that
discussed the calculation of 'cyclethreshold'
cyclefactor=1.0 results in a cyclethreshold at the first sidelobe level of
the brightest source in the residual image before the minor cycle starts.
cyclefactor=0.5 allows the minor cycle to go deeper.
cyclefactor=2.0 triggers a major cycle sooner.
minpsffraction PSF fraction that marks the max depth of cleaning in the minor cycle
Please refer to the Note under the documentation for 'threshold' that
discussed the calculation of 'cyclethreshold'
For example, minpsffraction=0.5 will stop cleaning at half the height of
the peak residual and trigger a major cycle earlier.
maxpsffraction PSF fraction that marks the minimum depth of cleaning in the minor cycle
Please refer to the Note under the documentation for 'threshold' that
discussed the calculation of 'cyclethreshold'
For example, maxpsffraction=0.8 will ensure that at least the top 20
percent of the source will be subtracted out in the minor cycle even if
the first PSF sidelobe is at the 0.9 level (an extreme example), or if the
cyclefactor is set too high for anything to get cleaned.
interactive Modify masks and parameters at runtime
interactive=True will trigger an interactive GUI at every major cycle
boundary (after the major cycle and before the minor cycle).
The interactive mode is currently not available for parallel cube imaging (please also
refer to the Note under the documentation for 'parallel' below).
Options for runtime parameter modification are :
Interactive clean mask : Draw a 1/0 mask (appears as a contour) by hand.
If a mask is supplied at the task interface or if
automasking is invoked, the current mask is
displayed in the GUI and is available for manual
editing.
Note : If a mask contour is not visible, please
check the cursor display at the bottom of
GUI to see which parts of the mask image
have ones and zeros. If the entire mask=1
no contours will be visible.
Operation buttons : -- Stop execution now (restore current model and exit)
-- Continue on until global stopping criteria are reached
without stopping for any more interaction
-- Continue with minor cycles and return for interaction
after the next major cycle.
Iteration control : -- max cycleniter : Trigger for the next major cycle
The display begins with
[ min( cycleniter, niter - itercount ) ]
and can be edited by hand.
-- iterations left : The display begins with [niter-itercount ]
and can be edited to increase or
decrease the total allowed niter.
-- threshold : Edit global stopping threshold
-- cyclethreshold : The display begins with the
automatically computed value
(see Note in help for 'threshold'),
and can be edited by hand.
All edits will be reflected in the log messages that appear
once minor cycles begin.
[ For scripting purposes, replacing True/False with 1/0 will get tclean to
return an imaging summary dictionary to python ]
usemask Type of mask(s) to be used for deconvolution
user: (default) mask image(s) or user specified region file(s) or string CRTF expression(s)
subparameters: mask, pbmask
pb: primary beam mask
subparameter: pbmask
Example: usemask="pb", pbmask=0.2
Construct a mask at the 0.2 pb gain level.
(Currently, this option will work only with
gridders that produce .pb (i.e. mosaic and awproject)
or if an externally produced .pb image exists on disk)
auto-multithresh : auto-masking by multiple thresholds for deconvolution
subparameters : sidelobethreshold, noisethreshold, lownoisethreshold, negativethrehsold, smoothfactor,
minbeamfrac, cutthreshold, pbmask, growiterations, dogrowprune, minpercentchange, verbose
Additional top level parameter relevant to auto-multithresh: fastnoise
if pbmask is >0.0, the region outside the specified pb gain level is excluded from
image statistics in determination of the threshold.
Note: By default the intermediate mask generated by automask at each deconvolution cycle
is over-written in the next cycle but one can save them by setting
the environment variable, SAVE_ALL_AUTOMASKS="true".
(e.g. in the CASA prompt, os.environ['SAVE_ALL_AUTOMASKS']="true" )
The saved CASA mask image name will be imagename.mask.autothresh#, where
# is the iteration cycle number.
mask Mask (a list of image name(s) or region file(s) or region string(s)
The name of a CASA image or region file or region string that specifies
a 1/0 mask to be used for deconvolution. Only locations with value 1 will
be considered for the centers of flux components in the minor cycle.
If regions specified fall completely outside of the image, tclean will throw an error.
Manual mask options/examples :
mask='xxx.mask' : Use this CASA image named xxx.mask and containing
ones and zeros as the mask.
If the mask is only different in spatial coordinates from what is being made
it will be resampled to the target coordinate system before being used.
The mask has to have the same shape in velocity and Stokes planes
as the output image. Exceptions are single velocity and/or single
Stokes plane masks. They will be expanded to cover all velocity and/or
Stokes planes of the output cube.
[ Note : If an error occurs during image resampling or
if the expected mask does not appear, please try
using tasks 'imregrid' or 'makemask' to resample
the mask image onto a CASA image with the target
shape and coordinates and supply it via the 'mask'
parameter. ]
mask='xxx.crtf' : A text file with region strings and the following on the first line
( #CRTFv0 CASA Region Text Format version 0 )
This is the format of a file created via the viewer's region
tool when saved in CASA region file format.
mask='circle[[40pix,40pix],10pix]' : A CASA region string.
mask=['xxx.mask','xxx.crtf', 'circle[[40pix,40pix],10pix]'] : a list of masks
Note : Mask images for deconvolution must contain 1 or 0 in each pixel.
Such a mask is different from an internal T/F mask that can be
held within each CASA image. These two types of masks are not
automatically interchangeable, so please use the makemask task
to copy between them if you need to construct a 1/0 based mask
from a T/F one.
Note : Work is in progress to generate more flexible masking options and
enable more controls.
pbmask Sub-parameter for usemask='auto-multithresh': primary beam mask
Examples : pbmask=0.0 (default, no pb mask)
pbmask=0.2 (construct a mask at the 0.2 pb gain level)
sidelobethreshold Sub-parameter for "auto-multithresh": mask threshold based on sidelobe levels: sidelobethreshold * max_sidelobe_level * peak residual
noisethreshold Sub-parameter for "auto-multithresh": mask threshold based on the noise level: noisethreshold * rms + location (=median)
The rms is calculated from MAD with rms = 1.4826*MAD.
lownoisethreshold Sub-parameter for "auto-multithresh": mask threshold to grow previously masked regions via binary dilation: lownoisethreshold * rms in residual image + location (=median)
The rms is calculated from MAD with rms = 1.4826*MAD.
negativethreshold Sub-parameter for "auto-multithresh": mask threshold for negative features: -1.0* negativethreshold * rms + location(=median)
The rms is calculated from MAD with rms = 1.4826*MAD.
smoothfactor Sub-parameter for "auto-multithresh": smoothing factor in a unit of the beam
minbeamfrac Sub-parameter for "auto-multithresh": minimum beam fraction in size to prune masks smaller than mimbeamfrac * beam
<=0.0 : No pruning
cutthreshold Sub-parameter for "auto-multithresh": threshold to cut the smoothed mask to create a final mask: cutthreshold * peak of the smoothed mask
growiterations Sub-parameter for "auto-multithresh": Maximum number of iterations to perform using binary dilation for growing the mask
dogrowprune Experimental sub-parameter for "auto-multithresh": Do pruning on the grow mask
minpercentchange If the change in the mask size in a particular channel is less than minpercentchange, stop masking that channel in subsequent cycles. This check is only applied when noise based threshold is used and when the previous clean major cycle had a cyclethreshold value equal to the clean threshold. Values equal to -1.0 (or any value less than 0.0) will turn off this check (the default). Automask will still stop masking if the current channel mask is an empty mask and the noise threshold was used to determine the mask.
verbose he summary of automasking at the end of each automasking process
is printed in the logger. Following information per channel will be listed in the summary.
chan: channel number
masking?: F - stop updating automask for the subsequent iteration cycles
RMS: robust rms noise
peak: peak in residual image
thresh_type: type of threshold used (noise or sidelobe)
thresh_value: the value of threshold used
N_reg: number of the automask regions
N_pruned: number of the automask regions removed by pruning
N_grow: number of the grow mask regions
N_grow_pruned: number of the grow mask regions removed by pruning
N_neg_pix: number of pixels for negative mask regions
Note that for a large cube, extra logging may slow down the process.
fastnoise mask (user='multi-autothresh') and/or n-sigma stopping threshold (nsigma>0.0) are/is used. If it is set to True, a simpler but faster noise calucation is used.
In this case, the threshold values are determined based on classic statistics (using all
unmasked pixels for the calculations).
If it is set to False, the new noise calculation
method is used based on pre-existing mask.
Case 1: no exiting mask
Calculate image statistics using Chauvenet algorithm
Case 2: there is an existing mask
Calculate image statistics by classical method on the region
outside the mask and inside the primary beam mask.
In all cases above RMS noise is calculated from MAD.
restart images (and start from an existing model image)
or automatically increment the image name and make a new image set.
True : Re-use existing images. If imagename.model exists the subsequent
run will start from this model (i.e. predicting it using current gridder
settings and starting from the residual image). Care must be taken
when combining this option with startmodel. Currently, only one or
the other can be used.
startmodel='', imagename.model exists :
- Start from imagename.model
startmodel='xxx', imagename.model does not exist :
- Start from startmodel
startmodel='xxx', imagename.model exists :
- Exit with an error message requesting the user to pick
only one model. This situation can arise when doing one
run with startmodel='xxx' to produce an output
imagename.model that includes the content of startmodel,
and wanting to restart a second run to continue deconvolution.
Startmodel should be set to '' before continuing.
If any change in the shape or coordinate system of the image is
desired during the restart, please change the image name and
use the startmodel (and mask) parameter(s) so that the old model
(and mask) can be regridded to the new coordinate system before starting.
False : A convenience feature to increment imagename with '_1', '_2',
etc as suffixes so that all runs of tclean are fresh starts (without
having to change the imagename parameter or delete images).
This mode will search the current directory for all existing
imagename extensions, pick the maximum, and adds 1.
For imagename='try' it will make try.psf, try_2.psf, try_3.psf, etc.
This also works if you specify a directory name in the path :
imagename='outdir/try'. If './outdir' does not exist, it will create it.
Then it will search for existing filenames inside that directory.
If outlier fields are specified, the incrementing happens for each
of them (since each has its own 'imagename'). The counters are
synchronized across imagefields, to make it easier to match up sets
of output images. It adds 1 to the 'max id' from all outlier names
on disk. So, if you do two runs with only the main field
(imagename='try'), and in the third run you add an outlier with
imagename='outtry', you will get the following image names
for the third run : 'try_3' and 'outtry_3' even though
'outry' and 'outtry_2' have not been used.
savemodel Options to save model visibilities (none, virtual, modelcolumn)
Often, model visibilities must be created and saved in the MS
to be later used for self-calibration (or to just plot and view them).
none : Do not save any model visibilities in the MS. The MS is opened
in readonly mode.
Model visibilities can be predicted in a separate step by
restarting tclean with niter=0,savemodel=virtual or modelcolumn
and not changing any image names so that it finds the .model on
disk (or by changing imagename and setting startmodel to the
original imagename).
virtual : In the last major cycle, save the image model and state of the
gridder used during imaging within the SOURCE subtable of the
MS. Images required for de-gridding will also be stored internally.
All future references to model visibilities will activate the
(de)gridder to compute them on-the-fly. This mode is useful
when the dataset is large enough that an additional model data
column on disk may be too much extra disk I/O, when the
gridder is simple enough that on-the-fly recomputing of the
model visibilities is quicker than disk I/O.
For e.g. that gridder='awproject' does not support virtual model.
modelcolumn : In the last major cycle, save predicted model visibilities
in the MODEL_DATA column of the MS. This mode is useful when
the de-gridding cost to produce the model visibilities is higher
than the I/O required to read the model visibilities from disk.
This mode is currently required for gridder='awproject'.
This mode is also required for the ability to later pull out
model visibilities from the MS into a python array for custom
processing.
Note 1 : The imagename.model image on disk will always be constructed
if the minor cycle runs. This savemodel parameter applies only to
model visibilities created by de-gridding the model image.
Note 2 : It is possible for an MS to have both a virtual model
as well as a model_data column, but under normal operation,
the last used mode will get triggered. Use the delmod task to
clear out existing models from an MS if confusion arises.
Note 3: when parallel=True, use savemodel='none'; Other options are not yet ready
for use in parallel. If model visibilities need to be saved (virtual or modelcolumn):
please run tclean in serial mode with niter=0; after the parallel run
calcres Calculate initial residual image
This parameter controls what the first major cycle does.
calcres=False with niter greater than 0 will assume that
a .residual image already exists and that the minor cycle can
begin without recomputing it.
calcres=False with niter=0 implies that only the PSF will be made
and no data will be gridded.
calcres=True requires that calcpsf=True or that the .psf and .sumwt
images already exist on disk (for normalization purposes).
Usage example : For large runs (or a pipeline scripts) it may be
useful to first run tclean with niter=0 to create
an initial .residual to look at and perhaps make
a custom mask for. Imaging can be resumed
without recomputing it.
calcpsf Calculate PSF
This parameter controls what the first major cycle does.
calcpsf=False will assume that a .psf image already exists
and that the minor cycle can begin without recomputing it.
psfcutoff When the .psf image is created a 2 dimensional Gaussian is fit to the main lobe of the PSF.
Which pixels in the PSF are fitted is determined by psfcutoff.
The default value of psfcutoff is 0.35 and can varied from 0.01 to 0.99.
Fitting algorithm:
- A region of 41 x 41 pixels around the peak of the PSF is compared against the psfcutoff.
Sidelobes are ignored by radially searching from the PSF peak.
- Calculate the bottom left corner (blc) and top right corner (trc) from the points. Expand blc and trc with a number of pixels (5).
- Create a new sub-matrix from blc and trc.
- Interpolate matrix to a target number of points (3001) using CUBIC spline.
- All the non-sidelobe points, in the interpolated matrix, that are above the psfcutoff are used to fit a Gaussian.
A Levenberg-Marquardt algorithm is used.
- If the fitting fails the algorithm is repeated with the psfcutoff decreased (psfcutoff=psfcutoff/1.5).
A message in the log will apear if the fitting fails along with the new value of psfcutoff.
This will be done up to 50 times if fitting fails.
This Gaussian beam is defined by a major axis, minor axis, and position angle.
During the restoration process, this Gaussian beam is used as the Clean beam.
Varying psfcutoff might be useful for producing a better fit for highly non-Gaussian PSFs, however, the resulting fits should be carefully checked.
This parameter should rarely be changed.
(This is not the support size for clark clean.)
parallel Run major cycles in parallel (this feature is experimental)
Parallel tclean will run only if casa has already been started using mpirun.
Please refer to HPC documentation for details on how to start this on your system.
Example : mpirun -n 3 -xterm 0 `which casa`
Continuum Imaging :
- Data are partitioned (in time) into NProc pieces
- Gridding/iFT is done separately per partition
- Images (and weights) are gathered and then normalized
- One non-parallel minor cycle is run
- Model image is scattered to all processes
- Major cycle is done in parallel per partition
Cube Imaging :
- Data and Image coordinates are partitioned (in freq) into NProc pieces
- Each partition is processed independently (major and minor cycles)
- All processes are synchronized at major cycle boundaries for convergence checks
- At the end, cubes from all partitions are concatenated along the spectral axis
Note 1 : Iteration control for cube imaging is independent per partition.
- There is currently no communication between them to synchronize
information such as peak residual and cyclethreshold. Therefore,
different chunks may trigger major cycles at different levels.
- For cube imaging in parallel, there is currently no interactive masking.
(Proper synchronization of iteration control is work in progress.)
[1;42mRETURNS[1;m void
--------- examples -----------------------------------------------------------
This is the first release of our refactored imager code. Although most features have
been used and validated, there are many details that have not been thoroughly tested.
Feedback will be much appreciated.
Usage Examples :
-----------------------
(A) A suite of test programs that demo all usable modes of tclean on small test datasets
https://svn.cv.nrao.edu/svn/casa/branches/release-4_5/gcwrap/python/scripts/tests/test_refimager.py
(B) A set of demo examples for ALMA imaging
https://casaguides.nrao.edu/index.php/TCLEAN_and_ALMA
"""
_info_group_ = """imaging"""
_info_desc_ = """Parallelized tclean in consecutive time steps"""
def __call__( self, vis='', imageprefix='', imagesuffix='', ncpu=int(8), twidth=int(1), doreg=False, usephacenter=True, reftime='', toTb=False, sclfactor=float(1.0), subregion='', docompress=False, overwrite=False, selectdata=True, field='', spw='', timerange='', uvrange='', antenna='', scan='', observation='', intent='', datacolumn='corrected', imagename='', imsize=[ int(100) ], cell=[ ], phasecenter='', stokes='I', projection='SIN', startmodel='', specmode='mfs', reffreq='', nchan=int(-1), start='', width='', outframe='LSRK', veltype='radio', restfreq=[ ], interpolation='linear', perchanweightdensity=True, gridder='standard', facets=int(1), psfphasecenter='', wprojplanes=int(1), vptable='', mosweight=True, aterm=True, psterm=False, wbawp=True, conjbeams=False, cfcache='', usepointing=False, computepastep=float(360.0), rotatepastep=float(360.0), pointingoffsetsigdev=[ ], pblimit=float(0.2), normtype='flatnoise', deconvolver='hogbom', scales=[ ], nterms=int(2), smallscalebias=float(0.0), restoration=True, restoringbeam=[ ], pbcor=False, outlierfile='', weighting='natural', robust=float(0.5), noise='1.0Jy', npixels=int(0), uvtaper=[ '' ], niter=int(0), gain=float(0.1), threshold=float(0.0), nsigma=float(0.0), cycleniter=int(-1), cyclefactor=float(1.0), minpsffraction=float(0.05), maxpsffraction=float(0.8), interactive=False, usemask='user', mask='', pbmask=float(0.0), sidelobethreshold=float(3.0), noisethreshold=float(5.0), lownoisethreshold=float(1.5), negativethreshold=float(0.0), smoothfactor=float(1.0), minbeamfrac=float(0.3), cutthreshold=float(0.01), growiterations=int(75), dogrowprune=True, minpercentchange=float(-1.0), verbose=False, fastnoise=True, restart=True, savemodel='none', calcres=True, calcpsf=True, psfcutoff=float(0.35), parallel=False ):
schema = {'vis': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'imageprefix': {'type': 'cStr', 'coerce': _coerce.to_str}, 'imagesuffix': {'type': 'cStr', 'coerce': _coerce.to_str}, 'ncpu': {'type': 'cInt'}, 'twidth': {'type': 'cInt'}, 'doreg': {'type': 'cBool'}, 'usephacenter': {'type': 'cBool'}, 'reftime': {'type': 'cStr', 'coerce': _coerce.to_str}, 'toTb': {'type': 'cBool'}, 'sclfactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'subregion': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'docompress': {'type': 'cBool'}, 'overwrite': {'type': 'cBool'}, 'selectdata': {'type': 'cBool'}, 'field': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'spw': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'timerange': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'uvrange': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'antenna': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'scan': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'observation': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cInt'}]}, 'intent': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'datacolumn': {'type': 'cStr', 'coerce': _coerce.to_str}, 'imagename': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'imsize': {'anyof': [{'type': 'cInt'}, {'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}]}, 'cell': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cFloat', 'coerce': _coerce.to_float}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}, {'type': 'cInt'}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'phasecenter': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}]}, 'stokes': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'I', 'IQUV', 'UV', 'RRLL', 'IQ', 'V', 'pseudoI', 'QU', 'YY', 'RR', 'Q', 'U', 'IV', 'XX', 'XXYY', 'LL' ]}, 'projection': {'type': 'cStr', 'coerce': _coerce.to_str}, 'startmodel': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'specmode': {'type': 'cVariant', 'coerce': [_coerce.to_variant] # <allowed> IS NOT ALLOWED FOR A PARAMETER OF TYPE any
}, 'reffreq': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'nchan': {'type': 'cInt'}, 'start': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'width': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'outframe': {'type': 'cStr', 'coerce': _coerce.to_str}, 'veltype': {'type': 'cStr', 'coerce': _coerce.to_str}, 'restfreq': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'interpolation': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'nearest', 'linear', 'cubic' ]}, 'perchanweightdensity': {'type': 'cBool'}, 'gridder': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'widefield', 'wproject', 'imagemosaic', 'standard', 'awproject', 'wprojectft', 'mosaicft', 'ft', 'ftmosaic', 'mosaic', 'awprojectft', 'gridft' ]}, 'facets': {'type': 'cInt'}, 'psfphasecenter': {'anyof': [{'type': 'cInt'}, {'type': 'cStr', 'coerce': _coerce.to_str}]}, 'wprojplanes': {'type': 'cInt'}, 'vptable': {'type': 'cStr', 'coerce': _coerce.to_str}, 'mosweight': {'type': 'cBool'}, 'aterm': {'type': 'cBool'}, 'psterm': {'type': 'cBool'}, 'wbawp': {'type': 'cBool'}, 'conjbeams': {'type': 'cBool'}, 'cfcache': {'type': 'cStr', 'coerce': _coerce.to_str}, 'usepointing': {'type': 'cBool'}, 'computepastep': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'rotatepastep': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'pointingoffsetsigdev': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'pblimit': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'normtype': {'type': 'cStr', 'coerce': _coerce.to_str}, 'deconvolver': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'clarkstokes_exp', 'mtmfs', 'mem', 'clarkstokes', 'hogbom', 'clark_exp', 'clark', 'multiscale' ]}, 'scales': {'anyof': [{'type': 'cIntVec', 'coerce': [_coerce.to_list,_coerce.to_intvec]}, {'type': 'cFloatVec', 'coerce': [_coerce.to_list,_coerce.to_floatvec]}]}, 'nterms': {'type': 'cInt'}, 'smallscalebias': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'restoration': {'type': 'cBool'}, 'restoringbeam': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'pbcor': {'type': 'cBool'}, 'outlierfile': {'type': 'cStr', 'coerce': _coerce.to_str}, 'weighting': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'briggsabs', 'briggs', 'briggsbwtaper', 'natural', 'radial', 'superuniform', 'uniform' ]}, 'robust': {'type': 'cFloat', 'coerce': _coerce.to_float, 'min': -2.0, 'max': 2.0}, 'noise': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'npixels': {'type': 'cInt'}, 'uvtaper': {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}, 'niter': {'type': 'cInt'}, 'gain': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'threshold': {'type': 'cVariant', 'coerce': [_coerce.to_variant]}, 'nsigma': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'cycleniter': {'type': 'cInt'}, 'cyclefactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'minpsffraction': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'maxpsffraction': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'interactive': {'anyof': [{'type': 'cBool'}, {'type': 'cInt'}]}, 'usemask': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'user', 'pb', 'auto-multithresh' ]}, 'mask': {'anyof': [{'type': 'cStr', 'coerce': _coerce.to_str}, {'type': 'cStrVec', 'coerce': [_coerce.to_list,_coerce.to_strvec]}]}, 'pbmask': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'sidelobethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'noisethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'lownoisethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'negativethreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'smoothfactor': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'minbeamfrac': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'cutthreshold': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'growiterations': {'type': 'cInt'}, 'dogrowprune': {'type': 'cBool'}, 'minpercentchange': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'verbose': {'type': 'cBool'}, 'fastnoise': {'type': 'cBool'}, 'restart': {'type': 'cBool'}, 'savemodel': {'type': 'cStr', 'coerce': _coerce.to_str, 'allowed': [ 'none', 'virtual', 'modelcolumn' ]}, 'calcres': {'type': 'cBool'}, 'calcpsf': {'type': 'cBool'}, 'psfcutoff': {'type': 'cFloat', 'coerce': _coerce.to_float}, 'parallel': {'type': 'cBool'}}
doc = {'vis': vis, 'imageprefix': imageprefix, 'imagesuffix': imagesuffix, 'ncpu': ncpu, 'twidth': twidth, 'doreg': doreg, 'usephacenter': usephacenter, 'reftime': reftime, 'toTb': toTb, 'sclfactor': sclfactor, 'subregion': subregion, 'docompress': docompress, 'overwrite': overwrite, 'selectdata': selectdata, 'field': field, 'spw': spw, 'timerange': timerange, 'uvrange': uvrange, 'antenna': antenna, 'scan': scan, 'observation': observation, 'intent': intent, 'datacolumn': datacolumn, 'imagename': imagename, 'imsize': imsize, 'cell': cell, 'phasecenter': phasecenter, 'stokes': stokes, 'projection': projection, 'startmodel': startmodel, 'specmode': specmode, 'reffreq': reffreq, 'nchan': nchan, 'start': start, 'width': width, 'outframe': outframe, 'veltype': veltype, 'restfreq': restfreq, 'interpolation': interpolation, 'perchanweightdensity': perchanweightdensity, 'gridder': gridder, 'facets': facets, 'psfphasecenter': psfphasecenter, 'wprojplanes': wprojplanes, 'vptable': vptable, 'mosweight': mosweight, 'aterm': aterm, 'psterm': psterm, 'wbawp': wbawp, 'conjbeams': conjbeams, 'cfcache': cfcache, 'usepointing': usepointing, 'computepastep': computepastep, 'rotatepastep': rotatepastep, 'pointingoffsetsigdev': pointingoffsetsigdev, 'pblimit': pblimit, 'normtype': normtype, 'deconvolver': deconvolver, 'scales': scales, 'nterms': nterms, 'smallscalebias': smallscalebias, 'restoration': restoration, 'restoringbeam': restoringbeam, 'pbcor': pbcor, 'outlierfile': outlierfile, 'weighting': weighting, 'robust': robust, 'noise': noise, 'npixels': npixels, 'uvtaper': uvtaper, 'niter': niter, 'gain': gain, 'threshold': threshold, 'nsigma': nsigma, 'cycleniter': cycleniter, 'cyclefactor': cyclefactor, 'minpsffraction': minpsffraction, 'maxpsffraction': maxpsffraction, 'interactive': interactive, 'usemask': usemask, 'mask': mask, 'pbmask': pbmask, 'sidelobethreshold': sidelobethreshold, 'noisethreshold': noisethreshold, 'lownoisethreshold': lownoisethreshold, 'negativethreshold': negativethreshold, 'smoothfactor': smoothfactor, 'minbeamfrac': minbeamfrac, 'cutthreshold': cutthreshold, 'growiterations': growiterations, 'dogrowprune': dogrowprune, 'minpercentchange': minpercentchange, 'verbose': verbose, 'fastnoise': fastnoise, 'restart': restart, 'savemodel': savemodel, 'calcres': calcres, 'calcpsf': calcpsf, 'psfcutoff': psfcutoff, 'parallel': parallel}
assert _pc.validate(doc,schema), str(_pc.errors)
_logging_state_ = _start_log( 'ptclean6', [ 'vis=' + repr(_pc.document['vis']), 'imageprefix=' + repr(_pc.document['imageprefix']), 'imagesuffix=' + repr(_pc.document['imagesuffix']), 'ncpu=' + repr(_pc.document['ncpu']), 'twidth=' + repr(_pc.document['twidth']), 'doreg=' + repr(_pc.document['doreg']), 'usephacenter=' + repr(_pc.document['usephacenter']), 'reftime=' + repr(_pc.document['reftime']), 'toTb=' + repr(_pc.document['toTb']), 'sclfactor=' + repr(_pc.document['sclfactor']), 'subregion=' + repr(_pc.document['subregion']), 'docompress=' + repr(_pc.document['docompress']), 'overwrite=' + repr(_pc.document['overwrite']), 'selectdata=' + repr(_pc.document['selectdata']), 'field=' + repr(_pc.document['field']), 'spw=' + repr(_pc.document['spw']), 'timerange=' + repr(_pc.document['timerange']), 'uvrange=' + repr(_pc.document['uvrange']), 'antenna=' + repr(_pc.document['antenna']), 'scan=' + repr(_pc.document['scan']), 'observation=' + repr(_pc.document['observation']), 'intent=' + repr(_pc.document['intent']), 'datacolumn=' + repr(_pc.document['datacolumn']), 'imagename=' + repr(_pc.document['imagename']), 'imsize=' + repr(_pc.document['imsize']), 'cell=' + repr(_pc.document['cell']), 'phasecenter=' + repr(_pc.document['phasecenter']), 'stokes=' + repr(_pc.document['stokes']), 'projection=' + repr(_pc.document['projection']), 'startmodel=' + repr(_pc.document['startmodel']), 'specmode=' + repr(_pc.document['specmode']), 'reffreq=' + repr(_pc.document['reffreq']), 'nchan=' + repr(_pc.document['nchan']), 'start=' + repr(_pc.document['start']), 'width=' + repr(_pc.document['width']), 'outframe=' + repr(_pc.document['outframe']), 'veltype=' + repr(_pc.document['veltype']), 'restfreq=' + repr(_pc.document['restfreq']), 'interpolation=' + repr(_pc.document['interpolation']), 'perchanweightdensity=' + repr(_pc.document['perchanweightdensity']), 'gridder=' + repr(_pc.document['gridder']), 'facets=' + repr(_pc.document['facets']), 'psfphasecenter=' + repr(_pc.document['psfphasecenter']), 'wprojplanes=' + repr(_pc.document['wprojplanes']), 'vptable=' + repr(_pc.document['vptable']), 'mosweight=' + repr(_pc.document['mosweight']), 'aterm=' + repr(_pc.document['aterm']), 'psterm=' + repr(_pc.document['psterm']), 'wbawp=' + repr(_pc.document['wbawp']), 'conjbeams=' + repr(_pc.document['conjbeams']), 'cfcache=' + repr(_pc.document['cfcache']), 'usepointing=' + repr(_pc.document['usepointing']), 'computepastep=' + repr(_pc.document['computepastep']), 'rotatepastep=' + repr(_pc.document['rotatepastep']), 'pointingoffsetsigdev=' + repr(_pc.document['pointingoffsetsigdev']), 'pblimit=' + repr(_pc.document['pblimit']), 'normtype=' + repr(_pc.document['normtype']), 'deconvolver=' + repr(_pc.document['deconvolver']), 'scales=' + repr(_pc.document['scales']), 'nterms=' + repr(_pc.document['nterms']), 'smallscalebias=' + repr(_pc.document['smallscalebias']), 'restoration=' + repr(_pc.document['restoration']), 'restoringbeam=' + repr(_pc.document['restoringbeam']), 'pbcor=' + repr(_pc.document['pbcor']), 'outlierfile=' + repr(_pc.document['outlierfile']), 'weighting=' + repr(_pc.document['weighting']), 'robust=' + repr(_pc.document['robust']), 'noise=' + repr(_pc.document['noise']), 'npixels=' + repr(_pc.document['npixels']), 'uvtaper=' + repr(_pc.document['uvtaper']), 'niter=' + repr(_pc.document['niter']), 'gain=' + repr(_pc.document['gain']), 'threshold=' + repr(_pc.document['threshold']), 'nsigma=' + repr(_pc.document['nsigma']), 'cycleniter=' + repr(_pc.document['cycleniter']), 'cyclefactor=' + repr(_pc.document['cyclefactor']), 'minpsffraction=' + repr(_pc.document['minpsffraction']), 'maxpsffraction=' + repr(_pc.document['maxpsffraction']), 'interactive=' + repr(_pc.document['interactive']), 'usemask=' + repr(_pc.document['usemask']), 'mask=' + repr(_pc.document['mask']), 'pbmask=' + repr(_pc.document['pbmask']), 'sidelobethreshold=' + repr(_pc.document['sidelobethreshold']), 'noisethreshold=' + repr(_pc.document['noisethreshold']), 'lownoisethreshold=' + repr(_pc.document['lownoisethreshold']), 'negativethreshold=' + repr(_pc.document['negativethreshold']), 'smoothfactor=' + repr(_pc.document['smoothfactor']), 'minbeamfrac=' + repr(_pc.document['minbeamfrac']), 'cutthreshold=' + repr(_pc.document['cutthreshold']), 'growiterations=' + repr(_pc.document['growiterations']), 'dogrowprune=' + repr(_pc.document['dogrowprune']), 'minpercentchange=' + repr(_pc.document['minpercentchange']), 'verbose=' + repr(_pc.document['verbose']), 'fastnoise=' + repr(_pc.document['fastnoise']), 'restart=' + repr(_pc.document['restart']), 'savemodel=' + repr(_pc.document['savemodel']), 'calcres=' + repr(_pc.document['calcres']), 'calcpsf=' + repr(_pc.document['calcpsf']), 'psfcutoff=' + repr(_pc.document['psfcutoff']), 'parallel=' + repr(_pc.document['parallel']) ] )
return _end_log( _logging_state_, 'ptclean6', _ptclean6_t( _pc.document['vis'], _pc.document['imageprefix'], _pc.document['imagesuffix'], _pc.document['ncpu'], _pc.document['twidth'], _pc.document['doreg'], _pc.document['usephacenter'], _pc.document['reftime'], _pc.document['toTb'], _pc.document['sclfactor'], _pc.document['subregion'], _pc.document['docompress'], _pc.document['overwrite'], _pc.document['selectdata'], _pc.document['field'], _pc.document['spw'], _pc.document['timerange'], _pc.document['uvrange'], _pc.document['antenna'], _pc.document['scan'], _pc.document['observation'], _pc.document['intent'], _pc.document['datacolumn'], _pc.document['imagename'], _pc.document['imsize'], _pc.document['cell'], _pc.document['phasecenter'], _pc.document['stokes'], _pc.document['projection'], _pc.document['startmodel'], _pc.document['specmode'], _pc.document['reffreq'], _pc.document['nchan'], _pc.document['start'], _pc.document['width'], _pc.document['outframe'], _pc.document['veltype'], _pc.document['restfreq'], _pc.document['interpolation'], _pc.document['perchanweightdensity'], _pc.document['gridder'], _pc.document['facets'], _pc.document['psfphasecenter'], _pc.document['wprojplanes'], _pc.document['vptable'], _pc.document['mosweight'], _pc.document['aterm'], _pc.document['psterm'], _pc.document['wbawp'], _pc.document['conjbeams'], _pc.document['cfcache'], _pc.document['usepointing'], _pc.document['computepastep'], _pc.document['rotatepastep'], _pc.document['pointingoffsetsigdev'], _pc.document['pblimit'], _pc.document['normtype'], _pc.document['deconvolver'], _pc.document['scales'], _pc.document['nterms'], _pc.document['smallscalebias'], _pc.document['restoration'], _pc.document['restoringbeam'], _pc.document['pbcor'], _pc.document['outlierfile'], _pc.document['weighting'], _pc.document['robust'], _pc.document['noise'], _pc.document['npixels'], _pc.document['uvtaper'], _pc.document['niter'], _pc.document['gain'], _pc.document['threshold'], _pc.document['nsigma'], _pc.document['cycleniter'], _pc.document['cyclefactor'], _pc.document['minpsffraction'], _pc.document['maxpsffraction'], _pc.document['interactive'], _pc.document['usemask'], _pc.document['mask'], _pc.document['pbmask'], _pc.document['sidelobethreshold'], _pc.document['noisethreshold'], _pc.document['lownoisethreshold'], _pc.document['negativethreshold'], _pc.document['smoothfactor'], _pc.document['minbeamfrac'], _pc.document['cutthreshold'], _pc.document['growiterations'], _pc.document['dogrowprune'], _pc.document['minpercentchange'], _pc.document['verbose'], _pc.document['fastnoise'], _pc.document['restart'], _pc.document['savemodel'], _pc.document['calcres'], _pc.document['calcpsf'], _pc.document['psfcutoff'], _pc.document['parallel'] ) )
ptclean6 = _ptclean6( )
| 88.053846
| 4,828
| 0.493774
| 14,720
| 148,811
| 4.954959
| 0.135666
| 0.027421
| 0.019195
| 0.009323
| 0.141163
| 0.118842
| 0.088624
| 0.075243
| 0.064631
| 0.054006
| 0
| 0.017111
| 0.445088
| 148,811
| 1,689
| 4,829
| 88.10598
| 0.866125
| 0.822923
| 0
| 0
| 1
| 0
| 0.345961
| 0.002127
| 0
| 0
| 0
| 0
| 0.052632
| 1
| 0.052632
| false
| 0
| 0.368421
| 0
| 0.631579
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
fcb7a2f5a8239727ba1c609b61067df5c6acae03
| 201
|
py
|
Python
|
SimCalorimetry/EcalSelectiveReadoutProducers/python/ecalDigis_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 852
|
2015-01-11T21:03:51.000Z
|
2022-03-25T21:14:00.000Z
|
SimCalorimetry/EcalSelectiveReadoutProducers/python/ecalDigis_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 30,371
|
2015-01-02T00:14:40.000Z
|
2022-03-31T23:26:05.000Z
|
SimCalorimetry/EcalSelectiveReadoutProducers/python/ecalDigis_cff.py
|
ckamtsikis/cmssw
|
ea19fe642bb7537cbf58451dcf73aa5fd1b66250
|
[
"Apache-2.0"
] | 3,240
|
2015-01-02T05:53:18.000Z
|
2022-03-31T17:24:21.000Z
|
import FWCore.ParameterSet.Config as cms
# Define EcalSelectiveReadoutProducer module as "simEcalDigis" with default settings
from SimCalorimetry.EcalSelectiveReadoutProducers.ecalDigis_cfi import *
| 40.2
| 85
| 0.865672
| 20
| 201
| 8.65
| 0.9
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.094527
| 201
| 4
| 86
| 50.25
| 0.950549
| 0.40796
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
1e26f3ae4b816561d50777eda1291f820556c297
| 198
|
py
|
Python
|
Darlington/phase1/python Basic 1/day 16 solution/qtn6.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 6
|
2020-05-23T19:53:25.000Z
|
2021-05-08T20:21:30.000Z
|
Darlington/phase1/python Basic 1/day 16 solution/qtn6.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 8
|
2020-05-14T18:53:12.000Z
|
2020-07-03T00:06:20.000Z
|
Darlington/phase1/python Basic 1/day 16 solution/qtn6.py
|
CodedLadiesInnovateTech/-python-challenge-solutions
|
430cd3eb84a2905a286819eef384ee484d8eb9e7
|
[
"MIT"
] | 39
|
2020-05-10T20:55:02.000Z
|
2020-09-12T17:40:59.000Z
|
#program to find the location of Python module sources.
import sys
print("\nList of directories in sys module:")
print(sys.path)
print("\nList of directories in os module:")
import os
print(os.path)
| 28.285714
| 55
| 0.767677
| 33
| 198
| 4.606061
| 0.515152
| 0.131579
| 0.157895
| 0.302632
| 0.328947
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.131313
| 198
| 7
| 56
| 28.285714
| 0.883721
| 0.272727
| 0
| 0
| 0
| 0
| 0.493056
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.333333
| 0
| 0.333333
| 0.666667
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 5
|
1e33c5a421e868d7a2be981bc1e0819633898250
| 6,715
|
py
|
Python
|
anet-video-captioning/model/modules.py
|
chihyaoma/cyclical-visual-captioning
|
337eee17a8789e58d7bf687ad9ecf27c8da44f14
|
[
"MIT"
] | 43
|
2020-05-14T21:25:23.000Z
|
2022-03-24T06:17:19.000Z
|
anet-video-captioning/model/modules.py
|
chihyaoma/cyclical-visual-captioning
|
337eee17a8789e58d7bf687ad9ecf27c8da44f14
|
[
"MIT"
] | 3
|
2020-06-09T16:07:17.000Z
|
2020-11-18T07:11:47.000Z
|
anet-video-captioning/model/modules.py
|
chihyaoma/cyclical-visual-captioning
|
337eee17a8789e58d7bf687ad9ecf27c8da44f14
|
[
"MIT"
] | 3
|
2020-06-02T21:31:40.000Z
|
2021-09-16T02:37:32.000Z
|
import torch
import torch.nn as nn
import torch.nn.functional as F
class SoftAttention(nn.Module):
"""
Soft Attention module
"""
def __init__(self, rnn_hidden_size, attn_hidden_size, temp=1):
super(SoftAttention, self).__init__()
self.softmax = nn.Softmax(dim=1)
self.h2attn = nn.Linear(rnn_hidden_size, attn_hidden_size)
self.temp = temp
# this min_value is used to prevent in the case that,
# when the mask is all empty, softmax will result in NaN
self.min_value = -1e8
def forward(self, h, proj_context, context=None, mask=None, proposal_frame_mask=None, with_sentinel=False):
"""Propagate h through the network.
h: batch x dim (concat(img, action))
context: batch x seq_len x dim
mask: batch x seq_len indices to be masked
"""
attn_h = self.h2attn(h)
# Get attention
attn = torch.bmm(proj_context, attn_h.unsqueeze(2)
).squeeze(2) # batch x seq_len
attn = attn / self.temp
if mask is not None:
if with_sentinel:
attn.data.masked_fill_(mask.data, -float('inf'))
else:
# without sentinel, we need to use a very small value for masking,
# because there are corner cases where a image has no ROI proposals
# masking with -inf will thus result in NaN.
attn.data.masked_fill_(mask.data, self.min_value)
if proposal_frame_mask is not None:
# this `frame_masked_attn` is only used to computing (supervised) attention loss
# since our proposed method does not rely on supervision, we will not update the model
# based on this loss
frame_masked_attn = attn.clone()
if with_sentinel:
frame_masked_attn.data.masked_fill_(
proposal_frame_mask.data, -float('inf'))
else:
# without sentinel, we need to use a very small value for masking,
# because there are corner cases where a image has no ROI proposals
# masking with -inf will thus result in NaN.
frame_masked_attn.data.masked_fill_(
proposal_frame_mask.data, self.min_value)
attn = self.softmax(attn)
attn3 = attn.view(attn.size(0), 1, attn.size(1)) # batch x 1 x seq_len
if context is not None:
weighted_context = torch.bmm(
attn3, context).squeeze(1) # batch x dim
else:
weighted_context = torch.bmm(
attn3, proj_context).squeeze(1) # batch x dim
if proposal_frame_mask is not None:
return weighted_context, attn, frame_masked_attn
else:
return weighted_context, attn, None
class AdditiveSoftAttention(nn.Module):
"""
Soft Attention module
"""
def __init__(self, rnn_hidden_size, attn_hidden_size, temp=1):
super(AdditiveSoftAttention, self).__init__()
self.softmax = nn.Softmax(dim=1)
self.rnn_size = rnn_hidden_size
self.att_hid_size = attn_hidden_size
self.h2attn = nn.Linear(rnn_hidden_size, attn_hidden_size)
self.alpha_net = nn.Linear(attn_hidden_size, 1)
self.temp = temp
# this min_value is used to prevent in the case that,
# when the mask is all empty, softmax will result in NaN
self.min_value = -1e8
def forward(self, h, proj_context, context=None, mask=None, proposal_frame_mask=None, with_sentinel=False):
"""Propagate h through the network.
h: batch x dim (concat(img, action))
context: batch x seq_len x dim
mask: batch x seq_len indices to be masked
"""
attn_size = proj_context.size(1)
attn_h = self.h2attn(h)
attn_h = attn_h.unsqueeze(1)
dot = proj_context + attn_h
dot = torch.tanh(dot)
dot = dot.view(-1, self.att_hid_size)
dot = self.alpha_net(dot)
attn = dot.view(-1, attn_size)
# Get attention
# attn = torch.bmm(proj_context, attn_h.unsqueeze(2)).squeeze(2) # batch x seq_len
# attn = attn / self.temp
if mask is not None:
if with_sentinel:
attn.data.masked_fill_(mask.data, -float('inf'))
else:
# without sentinel, we need to use a very small value for masking,
# because there are corner cases where a image has no ROI proposals
# masking with -inf will thus result in NaN.
attn.data.masked_fill_(mask.data, self.min_value)
if proposal_frame_mask is not None:
# this `frame_masked_attn` is only used to computing (supervised) attention loss
# since our proposed method does not rely on supervision, we will not update the model
# based on this loss
frame_masked_attn = attn.clone()
if with_sentinel:
frame_masked_attn.data.masked_fill_(
proposal_frame_mask.data, -float('inf'))
else:
# without sentinel, we need to use a very small value for masking,
# because there are corner cases where a image has no ROI proposals
# masking with -inf will thus result in NaN.
frame_masked_attn.data.masked_fill_(
proposal_frame_mask.data, self.min_value)
attn = self.softmax(attn)
attn3 = attn.unsqueeze(1) # batch x 1 x seq_len
if context is not None:
weighted_context = torch.bmm(
attn3, context).squeeze(1) # batch x dim
else:
weighted_context = torch.bmm(
attn3, proj_context).squeeze(1) # batch x dim
if proposal_frame_mask is not None:
return weighted_context, attn, frame_masked_attn
else:
return weighted_context, attn, None
def proj_masking(feat, projector, mask=None):
"""Universal projector and masking"""
proj_feat = projector(feat.view(-1, feat.size(2)))
proj_feat = proj_feat.view(feat.size(0), feat.size(1), -1)
if mask is not None:
# check that there are at least one element not masked for each sample (row),
# this is more strict but won't work for features from NBT because
# some images do not have any regional proposal features
# assert 0 not in mask.sum(1)
assert mask.sum() != 0 # check that not all the elements across all samples are set to 0
return proj_feat * mask.unsqueeze(2).expand_as(proj_feat)
else:
return proj_feat
| 37.937853
| 111
| 0.610127
| 918
| 6,715
| 4.293028
| 0.176471
| 0.021314
| 0.043136
| 0.036539
| 0.796498
| 0.78102
| 0.78102
| 0.78102
| 0.78102
| 0.763766
| 0
| 0.010211
| 0.31452
| 6,715
| 176
| 112
| 38.153409
| 0.84597
| 0.32271
| 0
| 0.695652
| 0
| 0
| 0.002729
| 0
| 0
| 0
| 0
| 0
| 0.01087
| 1
| 0.054348
| false
| 0
| 0.032609
| 0
| 0.173913
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 5
|
1e46eb8e3ba2d18143073b54d12710fbb6774894
| 204
|
py
|
Python
|
rex/rechunk_h5/__init__.py
|
psusmars/rex
|
1891e1e41f899e5f479545ff8d8e9fc76a1929e6
|
[
"BSD-3-Clause"
] | 8
|
2020-11-18T19:42:55.000Z
|
2022-01-12T04:24:05.000Z
|
rex/rechunk_h5/__init__.py
|
psusmars/rex
|
1891e1e41f899e5f479545ff8d8e9fc76a1929e6
|
[
"BSD-3-Clause"
] | 57
|
2020-05-01T14:33:34.000Z
|
2022-02-18T17:52:49.000Z
|
rex/rechunk_h5/__init__.py
|
psusmars/rex
|
1891e1e41f899e5f479545ff8d8e9fc76a1929e6
|
[
"BSD-3-Clause"
] | 4
|
2020-08-20T16:46:15.000Z
|
2022-02-22T19:46:23.000Z
|
# -*- coding: utf-8 -*-
"""
.h5 rechunking tool
"""
from .chunk_size import ArrayChunkSize, TimeseriesChunkSize
from .combine_h5 import CombineH5
from .rechunk_h5 import RechunkH5, get_dataset_attributes
| 25.5
| 59
| 0.779412
| 25
| 204
| 6.16
| 0.76
| 0.103896
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.033333
| 0.117647
| 204
| 7
| 60
| 29.142857
| 0.822222
| 0.205882
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 5
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.